summaryrefslogtreecommitdiffstats
path: root/sync
diff options
context:
space:
mode:
authorakalin@chromium.org <akalin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-15 09:35:42 +0000
committerakalin@chromium.org <akalin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-15 09:35:42 +0000
commitc1c32c85357f14756247b04b8b5ae41b05bf2e16 (patch)
tree58f25f64e1fa592e8daf276ef69901cd2218f929 /sync
parent63ee33bde2ec8471a70f0f0ec6a1962dd07fc8ab (diff)
downloadchromium_src-c1c32c85357f14756247b04b8b5ae41b05bf2e16.zip
chromium_src-c1c32c85357f14756247b04b8b5ae41b05bf2e16.tar.gz
chromium_src-c1c32c85357f14756247b04b8b5ae41b05bf2e16.tar.bz2
[Sync] Move 'sync' target to sync/
Also move related test files. Move WriteNode::UpdateEntryWithEncryption to nigori_util.h. Clean up defines and dependencies. In particular, get rid of SYNC_ENGINE_VERSION_STRING and hard-code the string in the single place it's used. Rename data_encryption.* to data_encryption_win.* and add a pragma for crypt32.lib. Clean up exit-time constructor warnings in sync{able,er}_unittest.cc. Remove some unused files. BUG=117585 TEST= TBR=jhawkins@chromium.org Review URL: https://chromiumcodereview.appspot.com/9699057 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@126872 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sync')
-rw-r--r--sync/DEPS13
-rw-r--r--sync/engine/DEPS8
-rw-r--r--sync/engine/apply_updates_command.cc86
-rw-r--r--sync/engine/apply_updates_command.h32
-rw-r--r--sync/engine/apply_updates_command_unittest.cc1007
-rw-r--r--sync/engine/build_commit_command.cc255
-rw-r--r--sync/engine/build_commit_command.h51
-rw-r--r--sync/engine/build_commit_command_unittest.cc98
-rw-r--r--sync/engine/cleanup_disabled_types_command.cc70
-rw-r--r--sync/engine/cleanup_disabled_types_command.h46
-rw-r--r--sync/engine/cleanup_disabled_types_command_unittest.cc77
-rw-r--r--sync/engine/clear_data_command.cc77
-rw-r--r--sync/engine/clear_data_command.h31
-rw-r--r--sync/engine/clear_data_command_unittest.cc117
-rw-r--r--sync/engine/conflict_resolver.cc404
-rw-r--r--sync/engine/conflict_resolver.h89
-rw-r--r--sync/engine/download_updates_command.cc130
-rw-r--r--sync/engine/download_updates_command.h65
-rw-r--r--sync/engine/download_updates_command_unittest.cc74
-rw-r--r--sync/engine/get_commit_ids_command.cc434
-rw-r--r--sync/engine/get_commit_ids_command.h126
-rw-r--r--sync/engine/model_changing_syncer_command.cc59
-rw-r--r--sync/engine/model_changing_syncer_command.h85
-rw-r--r--sync/engine/model_changing_syncer_command_unittest.cc77
-rw-r--r--sync/engine/model_safe_worker.cc75
-rw-r--r--sync/engine/model_safe_worker.h112
-rw-r--r--sync/engine/model_safe_worker_unittest.cc55
-rw-r--r--sync/engine/net/DEPS3
-rw-r--r--sync/engine/net/server_connection_manager.cc414
-rw-r--r--sync/engine/net/server_connection_manager.h393
-rw-r--r--sync/engine/net/url_translator.cc48
-rw-r--r--sync/engine/net/url_translator.h28
-rw-r--r--sync/engine/nigori_util.cc244
-rw-r--r--sync/engine/nigori_util.h75
-rw-r--r--sync/engine/nigori_util_unittest.cc48
-rw-r--r--sync/engine/nudge_source.cc27
-rw-r--r--sync/engine/nudge_source.h27
-rw-r--r--sync/engine/passive_model_worker.cc28
-rw-r--r--sync/engine/passive_model_worker.h40
-rw-r--r--sync/engine/polling_constants.cc26
-rw-r--r--sync/engine/polling_constants.h20
-rw-r--r--sync/engine/post_commit_message_command.cc49
-rw-r--r--sync/engine/post_commit_message_command.h28
-rw-r--r--sync/engine/process_commit_response_command.cc482
-rw-r--r--sync/engine/process_commit_response_command.h101
-rw-r--r--sync/engine/process_commit_response_command_unittest.cc437
-rw-r--r--sync/engine/process_updates_command.cc183
-rw-r--r--sync/engine/process_updates_command.h55
-rw-r--r--sync/engine/process_updates_command_unittest.cc52
-rw-r--r--sync/engine/resolve_conflicts_command.cc40
-rw-r--r--sync/engine/resolve_conflicts_command.h33
-rw-r--r--sync/engine/resolve_conflicts_command_unittest.cc51
-rw-r--r--sync/engine/store_timestamps_command.cc59
-rw-r--r--sync/engine/store_timestamps_command.h41
-rw-r--r--sync/engine/sync_scheduler.cc1233
-rw-r--r--sync/engine/sync_scheduler.h421
-rw-r--r--sync/engine/sync_scheduler_unittest.cc1165
-rw-r--r--sync/engine/sync_scheduler_whitebox_unittest.cc276
-rw-r--r--sync/engine/syncer.cc345
-rw-r--r--sync/engine/syncer.h116
-rw-r--r--sync/engine/syncer_command.cc31
-rw-r--r--sync/engine/syncer_command.h48
-rw-r--r--sync/engine/syncer_proto_util.cc537
-rw-r--r--sync/engine/syncer_proto_util.h138
-rw-r--r--sync/engine/syncer_proto_util_unittest.cc298
-rw-r--r--sync/engine/syncer_types.cc15
-rw-r--r--sync/engine/syncer_types.h158
-rw-r--r--sync/engine/syncer_unittest.cc4523
-rw-r--r--sync/engine/syncer_util.cc778
-rw-r--r--sync/engine/syncer_util.h137
-rw-r--r--sync/engine/syncproto.h87
-rw-r--r--sync/engine/syncproto_unittest.cc18
-rw-r--r--sync/engine/update_applicator.cc190
-rw-r--r--sync/engine/update_applicator.h110
-rw-r--r--sync/engine/verify_updates_command.cc139
-rw-r--r--sync/engine/verify_updates_command.h49
-rw-r--r--sync/engine/verify_updates_command_unittest.cc111
-rw-r--r--sync/js/DEPS3
-rw-r--r--sync/js/README.js48
-rw-r--r--sync/js/js_arg_list.cc27
-rw-r--r--sync/js/js_arg_list.h44
-rw-r--r--sync/js/js_arg_list_unittest.cc40
-rw-r--r--sync/js/js_backend.h41
-rw-r--r--sync/js/js_controller.h50
-rw-r--r--sync/js/js_event_details.cc27
-rw-r--r--sync/js/js_event_details.h45
-rw-r--r--sync/js/js_event_details_unittest.cc36
-rw-r--r--sync/js/js_event_handler.h30
-rw-r--r--sync/js/js_reply_handler.h30
-rw-r--r--sync/js/js_test_util.cc137
-rw-r--r--sync/js/js_test_util.h109
-rw-r--r--sync/js/sync_js_controller.cc83
-rw-r--r--sync/js/sync_js_controller.h81
-rw-r--r--sync/js/sync_js_controller_unittest.cc126
-rw-r--r--sync/protocol/DEPS3
-rw-r--r--sync/protocol/proto_enum_conversions.cc112
-rw-r--r--sync/protocol/proto_enum_conversions.h40
-rw-r--r--sync/protocol/proto_enum_conversions_unittest.cc62
-rw-r--r--sync/protocol/proto_value_conversions.cc413
-rw-r--r--sync/protocol/proto_value_conversions.h142
-rw-r--r--sync/protocol/proto_value_conversions_unittest.cc191
-rw-r--r--sync/protocol/service_constants.h23
-rw-r--r--sync/protocol/sync_protocol_error.cc63
-rw-r--r--sync/protocol/sync_protocol_error.h81
-rw-r--r--sync/sessions/DEPS7
-rw-r--r--sync/sessions/debug_info_getter.h27
-rw-r--r--sync/sessions/ordered_commit_set.cc110
-rw-r--r--sync/sessions/ordered_commit_set.h119
-rw-r--r--sync/sessions/ordered_commit_set_unittest.cc120
-rw-r--r--sync/sessions/session_state.cc324
-rw-r--r--sync/sessions/session_state.h329
-rw-r--r--sync/sessions/session_state_unittest.cc176
-rw-r--r--sync/sessions/status_controller.cc310
-rw-r--r--sync/sessions/status_controller.h297
-rw-r--r--sync/sessions/status_controller_unittest.cc198
-rw-r--r--sync/sessions/sync_session.cc253
-rw-r--r--sync/sessions/sync_session.h240
-rw-r--r--sync/sessions/sync_session_context.cc81
-rw-r--r--sync/sessions/sync_session_context.h208
-rw-r--r--sync/sessions/sync_session_context_unittest.cc45
-rw-r--r--sync/sessions/sync_session_unittest.cc596
-rw-r--r--sync/sessions/test_util.cc55
-rw-r--r--sync/sessions/test_util.h50
-rw-r--r--sync/sync.gyp351
-rw-r--r--sync/syncable/DEPS13
-rw-r--r--sync/syncable/blob.h19
-rw-r--r--sync/syncable/dir_open_result.h20
-rw-r--r--sync/syncable/directory_backing_store.cc1078
-rw-r--r--sync/syncable/directory_backing_store.h166
-rw-r--r--sync/syncable/directory_backing_store_unittest.cc2162
-rw-r--r--sync/syncable/directory_change_delegate.h45
-rw-r--r--sync/syncable/in_memory_directory_backing_store.cc32
-rw-r--r--sync/syncable/in_memory_directory_backing_store.h32
-rw-r--r--sync/syncable/model_type.cc542
-rw-r--r--sync/syncable/model_type.h175
-rw-r--r--sync/syncable/model_type_payload_map.cc100
-rw-r--r--sync/syncable/model_type_payload_map.h60
-rw-r--r--sync/syncable/model_type_payload_map_unittest.cc43
-rw-r--r--sync/syncable/model_type_test_util.cc52
-rw-r--r--sync/syncable/model_type_test_util.h26
-rw-r--r--sync/syncable/model_type_unittest.cc76
-rw-r--r--sync/syncable/on_disk_directory_backing_store.cc41
-rw-r--r--sync/syncable/on_disk_directory_backing_store.h30
-rw-r--r--sync/syncable/syncable-inl.h22
-rw-r--r--sync/syncable/syncable.cc2405
-rw-r--r--sync/syncable/syncable.h1349
-rw-r--r--sync/syncable/syncable_changes_version.h30
-rw-r--r--sync/syncable/syncable_columns.h74
-rw-r--r--sync/syncable/syncable_enum_conversions.cc164
-rw-r--r--sync/syncable/syncable_enum_conversions.h45
-rw-r--r--sync/syncable/syncable_enum_conversions_unittest.cc85
-rw-r--r--sync/syncable/syncable_id.cc77
-rw-r--r--sync/syncable/syncable_id.h134
-rw-r--r--sync/syncable/syncable_id_unittest.cc96
-rw-r--r--sync/syncable/syncable_mock.cc20
-rw-r--r--sync/syncable/syncable_mock.h47
-rw-r--r--sync/syncable/syncable_unittest.cc1741
-rw-r--r--sync/syncable/transaction_observer.h25
-rw-r--r--sync/test/DEPS4
-rw-r--r--sync/test/engine/fake_model_safe_worker_registrar.cc42
-rw-r--r--sync/test/engine/fake_model_safe_worker_registrar.h36
-rw-r--r--sync/test/engine/fake_model_worker.cc31
-rw-r--r--sync/test/engine/fake_model_worker.h43
-rw-r--r--sync/test/engine/mock_connection_manager.cc659
-rw-r--r--sync/test/engine/mock_connection_manager.h363
-rw-r--r--sync/test/engine/syncer_command_test.cc63
-rw-r--r--sync/test/engine/syncer_command_test.h233
-rw-r--r--sync/test/engine/test_directory_setter_upper.cc59
-rw-r--r--sync/test/engine/test_directory_setter_upper.h79
-rw-r--r--sync/test/engine/test_id_factory.h73
-rw-r--r--sync/test/engine/test_syncable_utils.cc62
-rw-r--r--sync/test/engine/test_syncable_utils.h41
-rw-r--r--sync/test/fake_encryptor.cc23
-rw-r--r--sync/test/fake_encryptor.h29
-rw-r--r--sync/test/fake_extensions_activity_monitor.cc31
-rw-r--r--sync/test/fake_extensions_activity_monitor.h33
-rw-r--r--sync/test/null_directory_change_delegate.cc29
-rw-r--r--sync/test/null_directory_change_delegate.h34
-rw-r--r--sync/test/null_transaction_observer.cc15
-rw-r--r--sync/test/null_transaction_observer.h21
-rw-r--r--sync/test/sessions/test_scoped_session_event_listener.h36
-rw-r--r--sync/test/test_directory_backing_store.cc41
-rw-r--r--sync/test/test_directory_backing_store.h53
-rw-r--r--sync/util/DEPS7
-rw-r--r--sync/util/cryptographer.cc447
-rw-r--r--sync/util/cryptographer.h247
-rw-r--r--sync/util/cryptographer_unittest.cc391
-rw-r--r--sync/util/data_encryption_win.cc60
-rw-r--r--sync/util/data_encryption_win.h20
-rw-r--r--sync/util/data_encryption_win_unittest.cc31
-rw-r--r--sync/util/data_type_histogram.h91
-rw-r--r--sync/util/data_type_histogram_unittest.cc63
-rw-r--r--sync/util/encryptor.h28
-rw-r--r--sync/util/enum_set.h286
-rw-r--r--sync/util/enum_set_unittest.cc195
-rw-r--r--sync/util/extensions_activity_monitor.cc16
-rw-r--r--sync/util/extensions_activity_monitor.h53
-rw-r--r--sync/util/get_session_name.cc73
-rw-r--r--sync/util/get_session_name.h28
-rw-r--r--sync/util/get_session_name_mac.h23
-rw-r--r--sync/util/get_session_name_mac.mm51
-rw-r--r--sync/util/get_session_name_unittest.cc48
-rw-r--r--sync/util/get_session_name_win.cc21
-rw-r--r--sync/util/get_session_name_win.h19
-rw-r--r--sync/util/immutable.h262
-rw-r--r--sync/util/immutable_unittest.cc244
-rw-r--r--sync/util/logging.cc18
-rw-r--r--sync/util/logging.h35
-rw-r--r--sync/util/nigori.cc256
-rw-r--r--sync/util/nigori.h83
-rw-r--r--sync/util/nigori_unittest.cc170
-rw-r--r--sync/util/protobuf_unittest.cc35
-rw-r--r--sync/util/report_unrecoverable_error_function.h19
-rw-r--r--sync/util/syncer_error.cc36
-rw-r--r--sync/util/syncer_error.h46
-rw-r--r--sync/util/test_unrecoverable_error_handler.cc23
-rw-r--r--sync/util/test_unrecoverable_error_handler.h28
-rw-r--r--sync/util/time.cc24
-rw-r--r--sync/util/time.h29
-rw-r--r--sync/util/unrecoverable_error_handler.h30
-rw-r--r--sync/util/unrecoverable_error_info.cc44
-rw-r--r--sync/util/unrecoverable_error_info.h41
-rw-r--r--sync/util/weak_handle.cc36
-rw-r--r--sync/util/weak_handle.h379
-rw-r--r--sync/util/weak_handle_unittest.cc326
225 files changed, 41212 insertions, 0 deletions
diff --git a/sync/DEPS b/sync/DEPS
new file mode 100644
index 0000000..e3938ed
--- /dev/null
+++ b/sync/DEPS
@@ -0,0 +1,13 @@
+include_rules = [
+ # Repeat these from the top-level DEPS file so one can just run
+ #
+ # checkdeps.py sync
+ #
+ # to test.
+ "+base",
+ "+build",
+ "+testing",
+
+ # Force subdirectories to explicitly define DEPS.
+ "-sync",
+]
diff --git a/sync/engine/DEPS b/sync/engine/DEPS
new file mode 100644
index 0000000..a22ed7d
--- /dev/null
+++ b/sync/engine/DEPS
@@ -0,0 +1,8 @@
+include_rules = [
+ "+googleurl",
+ "+sync/protocol",
+ "+sync/sessions",
+ "+sync/syncable",
+ "+sync/test",
+ "+sync/util",
+]
diff --git a/sync/engine/apply_updates_command.cc b/sync/engine/apply_updates_command.cc
new file mode 100644
index 0000000..ce4f14f
--- /dev/null
+++ b/sync/engine/apply_updates_command.cc
@@ -0,0 +1,86 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/apply_updates_command.h"
+
+#include "base/location.h"
+#include "sync/engine/update_applicator.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/syncable.h"
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+
+ApplyUpdatesCommand::ApplyUpdatesCommand() {}
+ApplyUpdatesCommand::~ApplyUpdatesCommand() {}
+
+std::set<ModelSafeGroup> ApplyUpdatesCommand::GetGroupsToChange(
+ const sessions::SyncSession& session) const {
+ std::set<ModelSafeGroup> groups_with_unapplied_updates;
+
+ syncable::FullModelTypeSet server_types_with_unapplied_updates;
+ {
+ syncable::Directory* dir = session.context()->directory();
+ syncable::ReadTransaction trans(FROM_HERE, dir);
+ server_types_with_unapplied_updates =
+ dir->GetServerTypesWithUnappliedUpdates(&trans);
+ }
+
+ for (syncable::FullModelTypeSet::Iterator it =
+ server_types_with_unapplied_updates.First(); it.Good(); it.Inc()) {
+ groups_with_unapplied_updates.insert(
+ GetGroupForModelType(it.Get(), session.routing_info()));
+ }
+
+ return groups_with_unapplied_updates;
+}
+
+SyncerError ApplyUpdatesCommand::ModelChangingExecuteImpl(
+ SyncSession* session) {
+ syncable::Directory* dir = session->context()->directory();
+ syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
+
+ // Compute server types with unapplied updates that fall under our
+ // group restriction.
+ const syncable::FullModelTypeSet server_types_with_unapplied_updates =
+ dir->GetServerTypesWithUnappliedUpdates(&trans);
+ syncable::FullModelTypeSet server_type_restriction;
+ for (syncable::FullModelTypeSet::Iterator it =
+ server_types_with_unapplied_updates.First(); it.Good(); it.Inc()) {
+ if (GetGroupForModelType(it.Get(), session->routing_info()) ==
+ session->status_controller().group_restriction()) {
+ server_type_restriction.Put(it.Get());
+ }
+ }
+
+ syncable::Directory::UnappliedUpdateMetaHandles handles;
+ dir->GetUnappliedUpdateMetaHandles(
+ &trans, server_type_restriction, &handles);
+
+ UpdateApplicator applicator(
+ session->context()->resolver(),
+ dir->GetCryptographer(&trans),
+ handles.begin(), handles.end(), session->routing_info(),
+ session->status_controller().group_restriction());
+ while (applicator.AttemptOneApplication(&trans)) {}
+ applicator.SaveProgressIntoSessionState(
+ session->mutable_status_controller()->mutable_conflict_progress(),
+ session->mutable_status_controller()->mutable_update_progress());
+
+ // This might be the first time we've fully completed a sync cycle, for
+ // some subset of the currently synced datatypes.
+ const sessions::StatusController& status(session->status_controller());
+ if (status.ServerSaysNothingMoreToDownload()) {
+ for (syncable::ModelTypeSet::Iterator it =
+ status.updates_request_types().First(); it.Good(); it.Inc()) {
+ // This gets persisted to the directory's backing store.
+ dir->set_initial_sync_ended_for_type(it.Get(), true);
+ }
+ }
+
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/apply_updates_command.h b/sync/engine/apply_updates_command.h
new file mode 100644
index 0000000..4282dc1
--- /dev/null
+++ b/sync/engine/apply_updates_command.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
+#define SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/engine/model_changing_syncer_command.h"
+
+namespace browser_sync {
+
+class ApplyUpdatesCommand : public ModelChangingSyncerCommand {
+ public:
+ ApplyUpdatesCommand();
+ virtual ~ApplyUpdatesCommand();
+
+ protected:
+ // ModelChangingSyncerCommand implementation.
+ virtual std::set<ModelSafeGroup> GetGroupsToChange(
+ const sessions::SyncSession& session) const OVERRIDE;
+ virtual SyncerError ModelChangingExecuteImpl(
+ sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
diff --git a/sync/engine/apply_updates_command_unittest.cc b/sync/engine/apply_updates_command_unittest.cc
new file mode 100644
index 0000000..6ff6dae
--- /dev/null
+++ b/sync/engine/apply_updates_command_unittest.cc
@@ -0,0 +1,1007 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/format_macros.h"
+#include "base/location.h"
+#include "base/stringprintf.h"
+#include "sync/engine/apply_updates_command.h"
+#include "sync/engine/nigori_util.h"
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/password_specifics.pb.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_id.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/syncer_command_test.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "sync/test/fake_encryptor.h"
+#include "sync/util/cryptographer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+using std::string;
+using syncable::Entry;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::ReadTransaction;
+using syncable::UNITTEST;
+using syncable::WriteTransaction;
+
+namespace {
+sync_pb::EntitySpecifics DefaultBookmarkSpecifics() {
+ sync_pb::EntitySpecifics result;
+ AddDefaultFieldValue(syncable::BOOKMARKS, &result);
+ return result;
+}
+} // namespace
+
+// A test fixture for tests exercising ApplyUpdatesCommand.
+class ApplyUpdatesCommandTest : public SyncerCommandTest {
+ public:
+ protected:
+ ApplyUpdatesCommandTest() : next_revision_(1) {}
+ virtual ~ApplyUpdatesCommandTest() {}
+
+ virtual void SetUp() {
+ workers()->clear();
+ mutable_routing_info()->clear();
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_PASSWORD)));
+ (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_UI;
+ (*mutable_routing_info())[syncable::PASSWORDS] = GROUP_PASSWORD;
+ (*mutable_routing_info())[syncable::NIGORI] = GROUP_PASSIVE;
+ SyncerCommandTest::SetUp();
+ ExpectNoGroupsToChange(apply_updates_command_);
+ }
+
+ // Create a new unapplied folder node with a parent.
+ void CreateUnappliedNewItemWithParent(
+ const string& item_id,
+ const sync_pb::EntitySpecifics& specifics,
+ const string& parent_id) {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ Id::CreateFromServerId(item_id));
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::SERVER_VERSION, next_revision_++);
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+
+ entry.Put(syncable::SERVER_NON_UNIQUE_NAME, item_id);
+ entry.Put(syncable::SERVER_PARENT_ID, Id::CreateFromServerId(parent_id));
+ entry.Put(syncable::SERVER_IS_DIR, true);
+ entry.Put(syncable::SERVER_SPECIFICS, specifics);
+ }
+
+ // Create a new unapplied update without a parent.
+ void CreateUnappliedNewItem(const string& item_id,
+ const sync_pb::EntitySpecifics& specifics,
+ bool is_unique) {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ Id::CreateFromServerId(item_id));
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::SERVER_VERSION, next_revision_++);
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ entry.Put(syncable::SERVER_NON_UNIQUE_NAME, item_id);
+ entry.Put(syncable::SERVER_PARENT_ID, syncable::GetNullId());
+ entry.Put(syncable::SERVER_IS_DIR, false);
+ entry.Put(syncable::SERVER_SPECIFICS, specifics);
+ if (is_unique) // For top-level nodes.
+ entry.Put(syncable::UNIQUE_SERVER_TAG, item_id);
+ }
+
+ // Create an unsynced item in the database. If item_id is a local ID, it
+ // will be treated as a create-new. Otherwise, if it's a server ID, we'll
+ // fake the server data so that it looks like it exists on the server.
+ // Returns the methandle of the created item in |metahandle_out| if not NULL.
+ void CreateUnsyncedItem(const Id& item_id,
+ const Id& parent_id,
+ const string& name,
+ bool is_folder,
+ syncable::ModelType model_type,
+ int64* metahandle_out) {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ Id predecessor_id;
+ ASSERT_TRUE(
+ directory()->GetLastChildIdForTest(&trans, parent_id, &predecessor_id));
+ MutableEntry entry(&trans, syncable::CREATE, parent_id, name);
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::ID, item_id);
+ entry.Put(syncable::BASE_VERSION,
+ item_id.ServerKnows() ? next_revision_++ : 0);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::IS_DIR, is_folder);
+ entry.Put(syncable::IS_DEL, false);
+ entry.Put(syncable::PARENT_ID, parent_id);
+ CHECK(entry.PutPredecessor(predecessor_id));
+ sync_pb::EntitySpecifics default_specifics;
+ syncable::AddDefaultFieldValue(model_type, &default_specifics);
+ entry.Put(syncable::SPECIFICS, default_specifics);
+ if (item_id.ServerKnows()) {
+ entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
+ entry.Put(syncable::SERVER_IS_DIR, is_folder);
+ entry.Put(syncable::SERVER_PARENT_ID, parent_id);
+ entry.Put(syncable::SERVER_IS_DEL, false);
+ }
+ if (metahandle_out)
+ *metahandle_out = entry.Get(syncable::META_HANDLE);
+ }
+
+ // Creates an item that is both unsynced an an unapplied update. Returns the
+ // metahandle of the created item.
+ int64 CreateUnappliedAndUnsyncedItem(const string& name,
+ syncable::ModelType model_type) {
+ int64 metahandle = 0;
+ CreateUnsyncedItem(id_factory_.MakeServer(name), id_factory_.root(), name,
+ false, model_type, &metahandle);
+
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::GET_BY_HANDLE, metahandle);
+ if (!entry.good()) {
+ ADD_FAILURE();
+ return syncable::kInvalidMetaHandle;
+ }
+
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ entry.Put(syncable::SERVER_VERSION, GetNextRevision());
+
+ return metahandle;
+ }
+
+
+ // Creates an item that has neither IS_UNSYNED or IS_UNAPPLIED_UPDATE. The
+ // item is known to both the server and client. Returns the metahandle of
+ // the created item.
+ int64 CreateSyncedItem(const std::string& name, syncable::ModelType
+ model_type, bool is_folder) {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+
+ syncable::Id parent_id(id_factory_.root());
+ syncable::Id item_id(id_factory_.MakeServer(name));
+ int64 version = GetNextRevision();
+
+ sync_pb::EntitySpecifics default_specifics;
+ syncable::AddDefaultFieldValue(model_type, &default_specifics);
+
+ MutableEntry entry(&trans, syncable::CREATE, parent_id, name);
+ if (!entry.good()) {
+ ADD_FAILURE();
+ return syncable::kInvalidMetaHandle;
+ }
+
+ entry.Put(syncable::ID, item_id);
+ entry.Put(syncable::BASE_VERSION, version);
+ entry.Put(syncable::IS_UNSYNCED, false);
+ entry.Put(syncable::NON_UNIQUE_NAME, name);
+ entry.Put(syncable::IS_DIR, is_folder);
+ entry.Put(syncable::IS_DEL, false);
+ entry.Put(syncable::PARENT_ID, parent_id);
+
+ if (!entry.PutPredecessor(id_factory_.root())) {
+ ADD_FAILURE();
+ return syncable::kInvalidMetaHandle;
+ }
+ entry.Put(syncable::SPECIFICS, default_specifics);
+
+ entry.Put(syncable::SERVER_VERSION, GetNextRevision());
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ entry.Put(syncable::SERVER_NON_UNIQUE_NAME, "X");
+ entry.Put(syncable::SERVER_PARENT_ID, id_factory_.MakeServer("Y"));
+ entry.Put(syncable::SERVER_IS_DIR, is_folder);
+ entry.Put(syncable::SERVER_IS_DEL, false);
+ entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
+ entry.Put(syncable::SERVER_PARENT_ID, parent_id);
+
+ return entry.Get(syncable::META_HANDLE);
+ }
+
+ int64 GetNextRevision() {
+ return next_revision_++;
+ }
+
+ ApplyUpdatesCommand apply_updates_command_;
+ FakeEncryptor encryptor_;
+ TestIdFactory id_factory_;
+ private:
+ int64 next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesCommandTest);
+};
+
+TEST_F(ApplyUpdatesCommandTest, Simple) {
+ string root_server_id = syncable::GetNullId().GetServerId();
+ CreateUnappliedNewItemWithParent("parent",
+ DefaultBookmarkSpecifics(),
+ root_server_id);
+ CreateUnappliedNewItemWithParent("child",
+ DefaultBookmarkSpecifics(),
+ "parent");
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(2, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "Simple update shouldn't result in conflicts";
+ EXPECT_EQ(0, status->conflict_progress()->EncryptionConflictingItemsSize())
+ << "Simple update shouldn't result in conflicts";
+ EXPECT_EQ(0, status->conflict_progress()->HierarchyConflictingItemsSize())
+ << "Simple update shouldn't result in conflicts";
+ EXPECT_EQ(2, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "All items should have been successfully applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, UpdateWithChildrenBeforeParents) {
+ // Set a bunch of updates which are difficult to apply in the order
+ // they're received due to dependencies on other unseen items.
+ string root_server_id = syncable::GetNullId().GetServerId();
+ CreateUnappliedNewItemWithParent("a_child_created_first",
+ DefaultBookmarkSpecifics(),
+ "parent");
+ CreateUnappliedNewItemWithParent("x_child_created_first",
+ DefaultBookmarkSpecifics(),
+ "parent");
+ CreateUnappliedNewItemWithParent("parent",
+ DefaultBookmarkSpecifics(),
+ root_server_id);
+ CreateUnappliedNewItemWithParent("a_child_created_second",
+ DefaultBookmarkSpecifics(),
+ "parent");
+ CreateUnappliedNewItemWithParent("x_child_created_second",
+ DefaultBookmarkSpecifics(),
+ "parent");
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(5, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "Simple update shouldn't result in conflicts, even if out-of-order";
+ EXPECT_EQ(5, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "All updates should have been successfully applied";
+}
+
+// Runs the ApplyUpdatesCommand on an item that has both local and remote
+// modifications (IS_UNSYNCED and IS_UNAPPLIED_UPDATE). We expect the command
+// to detect that this update can't be applied because it is in a CONFLICT
+// state.
+TEST_F(ApplyUpdatesCommandTest, SimpleConflict) {
+ CreateUnappliedAndUnsyncedItem("item", syncable::BOOKMARKS);
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(1, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "Unsynced and unapplied item should be a simple conflict";
+}
+
+// Runs the ApplyUpdatesCommand on an item that has both local and remote
+// modifications *and* the remote modification cannot be applied without
+// violating the tree constraints. We expect the command to detect that this
+// update can't be applied and that this situation can't be resolved with the
+// simple conflict processing logic; it is in a CONFLICT_HIERARCHY state.
+TEST_F(ApplyUpdatesCommandTest, HierarchyAndSimpleConflict) {
+ // Create a simply-conflicting item. It will start with valid parent ids.
+ int64 handle = CreateUnappliedAndUnsyncedItem("orphaned_by_server",
+ syncable::BOOKMARKS);
+ {
+ // Manually set the SERVER_PARENT_ID to bad value.
+ // A bad parent indicates a hierarchy conflict.
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(entry.good());
+
+ entry.Put(syncable::SERVER_PARENT_ID,
+ id_factory_.MakeServer("bogus_parent"));
+ }
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+
+ EXPECT_EQ(1, status->update_progress()->AppliedUpdatesSize());
+
+ // An update that is both a simple conflict and a hierarchy conflict should be
+ // treated as a hierarchy conflict.
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(1, status->conflict_progress()->HierarchyConflictingItemsSize());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize());
+}
+
+
+// Runs the ApplyUpdatesCommand on an item with remote modifications that would
+// create a directory loop if the update were applied. We expect the command to
+// detect that this update can't be applied because it is in a
+// CONFLICT_HIERARCHY state.
+TEST_F(ApplyUpdatesCommandTest, HierarchyConflictDirectoryLoop) {
+ // Item 'X' locally has parent of 'root'. Server is updating it to have
+ // parent of 'Y'.
+ {
+ // Create it as a child of root node.
+ int64 handle = CreateSyncedItem("X", syncable::BOOKMARKS, true);
+
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(entry.good());
+
+ // Re-parent from root to "Y"
+ entry.Put(syncable::SERVER_VERSION, GetNextRevision());
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ entry.Put(syncable::SERVER_PARENT_ID, id_factory_.MakeServer("Y"));
+ }
+
+ // Item 'Y' is child of 'X'.
+ CreateUnsyncedItem(id_factory_.MakeServer("Y"), id_factory_.MakeServer("X"),
+ "Y", true, syncable::BOOKMARKS, NULL);
+
+ // If the server's update were applied, we would have X be a child of Y, and Y
+ // as a child of X. That's a directory loop. The UpdateApplicator should
+ // prevent the update from being applied and note that this is a hierarchy
+ // conflict.
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+
+ EXPECT_EQ(1, status->update_progress()->AppliedUpdatesSize());
+
+ // This should count as a hierarchy conflict.
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(1, status->conflict_progress()->HierarchyConflictingItemsSize());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize());
+}
+
+// Runs the ApplyUpdatesCommand on a directory where the server sent us an
+// update to add a child to a locally deleted (and unsynced) parent. We expect
+// the command to not apply the update and to indicate the update is in a
+// CONFLICT_HIERARCHY state.
+TEST_F(ApplyUpdatesCommandTest, HierarchyConflictDeletedParent) {
+ // Create a locally deleted parent item.
+ int64 parent_handle;
+ CreateUnsyncedItem(Id::CreateFromServerId("parent"), id_factory_.root(),
+ "parent", true, syncable::BOOKMARKS, &parent_handle);
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::GET_BY_HANDLE, parent_handle);
+ entry.Put(syncable::IS_DEL, true);
+ }
+
+ // Create an incoming child from the server.
+ CreateUnappliedNewItemWithParent("child", DefaultBookmarkSpecifics(),
+ "parent");
+
+ // The server's update may seem valid to some other client, but on this client
+ // that new item's parent no longer exists. The update should not be applied
+ // and the update applicator should indicate this is a hierarchy conflict.
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+
+ // This should count as a hierarchy conflict.
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(1, status->conflict_progress()->HierarchyConflictingItemsSize());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize());
+}
+
+// Runs the ApplyUpdatesCommand on a directory where the server is trying to
+// delete a folder that has a recently added (and unsynced) child. We expect
+// the command to not apply the update because it is in a CONFLICT_HIERARCHY
+// state.
+TEST_F(ApplyUpdatesCommandTest, HierarchyConflictDeleteNonEmptyDirectory) {
+ // Create a server-deleted directory.
+ {
+ // Create it as a child of root node.
+ int64 handle = CreateSyncedItem("parent", syncable::BOOKMARKS, true);
+
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(entry.good());
+
+ // Delete it on the server.
+ entry.Put(syncable::SERVER_VERSION, GetNextRevision());
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ entry.Put(syncable::SERVER_PARENT_ID, id_factory_.root());
+ entry.Put(syncable::SERVER_IS_DEL, true);
+ }
+
+ // Create a local child of the server-deleted directory.
+ CreateUnsyncedItem(id_factory_.MakeServer("child"),
+ id_factory_.MakeServer("parent"), "child", false,
+ syncable::BOOKMARKS, NULL);
+
+ // The server's request to delete the directory must be ignored, otherwise our
+ // unsynced new child would be orphaned. This is a hierarchy conflict.
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+
+ // This should count as a hierarchy conflict.
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(1, status->conflict_progress()->HierarchyConflictingItemsSize());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize());
+}
+
+// Runs the ApplyUpdatesCommand on a server-created item that has a locally
+// unknown parent. We expect the command to not apply the update because the
+// item is in a CONFLICT_HIERARCHY state.
+TEST_F(ApplyUpdatesCommandTest, HierarchyConflictUnknownParent) {
+ // We shouldn't be able to do anything with either of these items.
+ CreateUnappliedNewItemWithParent("some_item",
+ DefaultBookmarkSpecifics(),
+ "unknown_parent");
+ CreateUnappliedNewItemWithParent("some_other_item",
+ DefaultBookmarkSpecifics(),
+ "some_item");
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(2, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "Updates with unknown parent should not be treated as 'simple'"
+ << " conflicts";
+ EXPECT_EQ(2, status->conflict_progress()->HierarchyConflictingItemsSize())
+ << "All updates with an unknown ancestors should be in conflict";
+ EXPECT_EQ(0, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "No item with an unknown ancestor should be applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, ItemsBothKnownAndUnknown) {
+ // See what happens when there's a mixture of good and bad updates.
+ string root_server_id = syncable::GetNullId().GetServerId();
+ CreateUnappliedNewItemWithParent("first_unknown_item",
+ DefaultBookmarkSpecifics(),
+ "unknown_parent");
+ CreateUnappliedNewItemWithParent("first_known_item",
+ DefaultBookmarkSpecifics(),
+ root_server_id);
+ CreateUnappliedNewItemWithParent("second_unknown_item",
+ DefaultBookmarkSpecifics(),
+ "unknown_parent");
+ CreateUnappliedNewItemWithParent("second_known_item",
+ DefaultBookmarkSpecifics(),
+ "first_known_item");
+ CreateUnappliedNewItemWithParent("third_known_item",
+ DefaultBookmarkSpecifics(),
+ "fourth_known_item");
+ CreateUnappliedNewItemWithParent("fourth_known_item",
+ DefaultBookmarkSpecifics(),
+ root_server_id);
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_UI);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(6, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(2, status->conflict_progress()->HierarchyConflictingItemsSize())
+ << "The updates with unknown ancestors should be in conflict";
+ EXPECT_EQ(4, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "The updates with known ancestors should be successfully applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, DecryptablePassword) {
+ // Decryptable password updates should be applied.
+ Cryptographer* cryptographer;
+ {
+ // Storing the cryptographer separately is bad, but for this test we
+ // know it's safe.
+ ReadTransaction trans(FROM_HERE, directory());
+ cryptographer = directory()->GetCryptographer(&trans);
+ }
+
+ browser_sync::KeyParams params = {"localhost", "dummy", "foobar"};
+ cryptographer->AddKey(params);
+
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::PasswordSpecificsData data;
+ data.set_origin("http://example.com");
+
+ cryptographer->Encrypt(data,
+ specifics.mutable_password()->mutable_encrypted());
+ CreateUnappliedNewItem("item", specifics, false);
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_PASSWORD);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSWORD);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(1, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "No update should be in conflict because they're all decryptable";
+ EXPECT_EQ(1, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "The updates that can be decrypted should be applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, UndecryptableData) {
+ // Undecryptable updates should not be applied.
+ sync_pb::EntitySpecifics encrypted_bookmark;
+ encrypted_bookmark.mutable_encrypted();
+ AddDefaultFieldValue(syncable::BOOKMARKS, &encrypted_bookmark);
+ string root_server_id = syncable::GetNullId().GetServerId();
+ CreateUnappliedNewItemWithParent("folder",
+ encrypted_bookmark,
+ root_server_id);
+ CreateUnappliedNewItem("item2", encrypted_bookmark, false);
+ sync_pb::EntitySpecifics encrypted_password;
+ encrypted_password.mutable_password();
+ CreateUnappliedNewItem("item3", encrypted_password, false);
+
+ ExpectGroupsToChange(apply_updates_command_, GROUP_UI, GROUP_PASSWORD);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ EXPECT_TRUE(status->HasConflictingUpdates())
+ << "Updates that can't be decrypted should trigger the syncer to have "
+ << "conflicting updates.";
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(2, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "The updates that can't be decrypted should not be in regular "
+ << "conflict";
+ EXPECT_EQ(2, status->conflict_progress()->EncryptionConflictingItemsSize())
+ << "The updates that can't be decrypted should be in encryption "
+ << "conflict";
+ EXPECT_EQ(0, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "No update that can't be decrypted should be applied";
+ }
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSWORD);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(1, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "The updates that can't be decrypted should not be in regular "
+ << "conflict";
+ EXPECT_EQ(1, status->conflict_progress()->EncryptionConflictingItemsSize())
+ << "The updates that can't be decrypted should be in encryption "
+ << "conflict";
+ EXPECT_EQ(0, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "No update that can't be decrypted should be applied";
+ }
+}
+
+TEST_F(ApplyUpdatesCommandTest, SomeUndecryptablePassword) {
+ // Only decryptable password updates should be applied.
+ {
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::PasswordSpecificsData data;
+ data.set_origin("http://example.com/1");
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Cryptographer* cryptographer = directory()->GetCryptographer(&trans);
+
+ KeyParams params = {"localhost", "dummy", "foobar"};
+ cryptographer->AddKey(params);
+
+ cryptographer->Encrypt(data,
+ specifics.mutable_password()->mutable_encrypted());
+ }
+ CreateUnappliedNewItem("item1", specifics, false);
+ }
+ {
+ // Create a new cryptographer, independent of the one in the session.
+ Cryptographer cryptographer(&encryptor_);
+ KeyParams params = {"localhost", "dummy", "bazqux"};
+ cryptographer.AddKey(params);
+
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::PasswordSpecificsData data;
+ data.set_origin("http://example.com/2");
+
+ cryptographer.Encrypt(data,
+ specifics.mutable_password()->mutable_encrypted());
+ CreateUnappliedNewItem("item2", specifics, false);
+ }
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_PASSWORD);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ EXPECT_TRUE(status->HasConflictingUpdates())
+ << "Updates that can't be decrypted should trigger the syncer to have "
+ << "conflicting updates.";
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSWORD);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(2, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "The updates that can't be decrypted should not be in regular "
+ << "conflict";
+ EXPECT_EQ(1, status->conflict_progress()->EncryptionConflictingItemsSize())
+ << "The updates that can't be decrypted should be in encryption "
+ << "conflict";
+ EXPECT_EQ(1, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "The undecryptable password update shouldn't be applied";
+ }
+}
+
+TEST_F(ApplyUpdatesCommandTest, NigoriUpdate) {
+ // Storing the cryptographer separately is bad, but for this test we
+ // know it's safe.
+ Cryptographer* cryptographer;
+ syncable::ModelTypeSet encrypted_types;
+ encrypted_types.Put(syncable::PASSWORDS);
+ encrypted_types.Put(syncable::NIGORI);
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ cryptographer = directory()->GetCryptographer(&trans);
+ EXPECT_TRUE(cryptographer->GetEncryptedTypes().Equals(encrypted_types));
+ }
+
+ // Nigori node updates should update the Cryptographer.
+ Cryptographer other_cryptographer(&encryptor_);
+ KeyParams params = {"localhost", "dummy", "foobar"};
+ other_cryptographer.AddKey(params);
+
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ other_cryptographer.GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_bookmarks(true);
+ encrypted_types.Put(syncable::BOOKMARKS);
+ CreateUnappliedNewItem(syncable::ModelTypeToRootTag(syncable::NIGORI),
+ specifics, true);
+ EXPECT_FALSE(cryptographer->has_pending_keys());
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_PASSIVE);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(1, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "The nigori update shouldn't be in conflict";
+ EXPECT_EQ(1, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "The nigori update should be applied";
+
+ EXPECT_FALSE(cryptographer->is_ready());
+ EXPECT_TRUE(cryptographer->has_pending_keys());
+ EXPECT_TRUE(
+ cryptographer->GetEncryptedTypes()
+ .Equals(syncable::ModelTypeSet::All()));
+}
+
+TEST_F(ApplyUpdatesCommandTest, NigoriUpdateForDisabledTypes) {
+ // Storing the cryptographer separately is bad, but for this test we
+ // know it's safe.
+ Cryptographer* cryptographer;
+ syncable::ModelTypeSet encrypted_types;
+ encrypted_types.Put(syncable::PASSWORDS);
+ encrypted_types.Put(syncable::NIGORI);
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ cryptographer = directory()->GetCryptographer(&trans);
+ EXPECT_TRUE(cryptographer->GetEncryptedTypes().Equals(encrypted_types));
+ }
+
+ // Nigori node updates should update the Cryptographer.
+ Cryptographer other_cryptographer(&encryptor_);
+ KeyParams params = {"localhost", "dummy", "foobar"};
+ other_cryptographer.AddKey(params);
+
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ other_cryptographer.GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_sessions(true);
+ nigori->set_encrypt_themes(true);
+ encrypted_types.Put(syncable::SESSIONS);
+ encrypted_types.Put(syncable::THEMES);
+ CreateUnappliedNewItem(syncable::ModelTypeToRootTag(syncable::NIGORI),
+ specifics, true);
+ EXPECT_FALSE(cryptographer->has_pending_keys());
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_PASSIVE);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(1, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "The nigori update shouldn't be in conflict";
+ EXPECT_EQ(1, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "The nigori update should be applied";
+
+ EXPECT_FALSE(cryptographer->is_ready());
+ EXPECT_TRUE(cryptographer->has_pending_keys());
+ EXPECT_TRUE(
+ cryptographer->GetEncryptedTypes()
+ .Equals(syncable::ModelTypeSet::All()));
+}
+
+// Create some local unsynced and unencrypted data. Apply a nigori update that
+// turns on encryption for the unsynced data. Ensure we properly encrypt the
+// data as part of the nigori update. Apply another nigori update with no
+// changes. Ensure we ignore already-encrypted unsynced data and that nothing
+// breaks.
+TEST_F(ApplyUpdatesCommandTest, EncryptUnsyncedChanges) {
+ // Storing the cryptographer separately is bad, but for this test we
+ // know it's safe.
+ Cryptographer* cryptographer;
+ syncable::ModelTypeSet encrypted_types;
+ encrypted_types.Put(syncable::PASSWORDS);
+ encrypted_types.Put(syncable::NIGORI);
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ cryptographer = directory()->GetCryptographer(&trans);
+ EXPECT_TRUE(cryptographer->GetEncryptedTypes().Equals(encrypted_types));
+
+ // With default encrypted_types, this should be true.
+ EXPECT_TRUE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
+
+ Syncer::UnsyncedMetaHandles handles;
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ EXPECT_TRUE(handles.empty());
+ }
+
+ // Create unsynced bookmarks without encryption.
+ // First item is a folder
+ Id folder_id = id_factory_.NewLocalId();
+ CreateUnsyncedItem(folder_id, id_factory_.root(), "folder",
+ true, syncable::BOOKMARKS, NULL);
+ // Next five items are children of the folder
+ size_t i;
+ size_t batch_s = 5;
+ for (i = 0; i < batch_s; ++i) {
+ CreateUnsyncedItem(id_factory_.NewLocalId(), folder_id,
+ base::StringPrintf("Item %"PRIuS"", i), false,
+ syncable::BOOKMARKS, NULL);
+ }
+ // Next five items are children of the root.
+ for (; i < 2*batch_s; ++i) {
+ CreateUnsyncedItem(id_factory_.NewLocalId(), id_factory_.root(),
+ base::StringPrintf("Item %"PRIuS"", i), false,
+ syncable::BOOKMARKS, NULL);
+ }
+
+ KeyParams params = {"localhost", "dummy", "foobar"};
+ cryptographer->AddKey(params);
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ cryptographer->GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_bookmarks(true);
+ encrypted_types.Put(syncable::BOOKMARKS);
+ CreateUnappliedNewItem(syncable::ModelTypeToRootTag(syncable::NIGORI),
+ specifics, true);
+ EXPECT_FALSE(cryptographer->has_pending_keys());
+ EXPECT_TRUE(cryptographer->is_ready());
+
+ {
+ // Ensure we have unsynced nodes that aren't properly encrypted.
+ ReadTransaction trans(FROM_HERE, directory());
+ EXPECT_FALSE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
+
+ Syncer::UnsyncedMetaHandles handles;
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ EXPECT_EQ(2*batch_s+1, handles.size());
+ }
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_PASSIVE);
+ apply_updates_command_.ExecuteImpl(session());
+
+ {
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(1, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "No updates should be in conflict";
+ EXPECT_EQ(0, status->conflict_progress()->EncryptionConflictingItemsSize())
+ << "No updates should be in conflict";
+ EXPECT_EQ(1, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "The nigori update should be applied";
+ }
+ EXPECT_FALSE(cryptographer->has_pending_keys());
+ EXPECT_TRUE(cryptographer->is_ready());
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ // If ProcessUnsyncedChangesForEncryption worked, all our unsynced changes
+ // should be encrypted now.
+ EXPECT_TRUE(syncable::ModelTypeSet::All().Equals(
+ cryptographer->GetEncryptedTypes()));
+ EXPECT_TRUE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
+
+ Syncer::UnsyncedMetaHandles handles;
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ EXPECT_EQ(2*batch_s+1, handles.size());
+ }
+
+ // Simulate another nigori update that doesn't change anything.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::GET_BY_SERVER_TAG,
+ syncable::ModelTypeToRootTag(syncable::NIGORI));
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::SERVER_VERSION, GetNextRevision());
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ }
+ ExpectGroupToChange(apply_updates_command_, GROUP_PASSIVE);
+ apply_updates_command_.ExecuteImpl(session());
+ {
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(2, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "No updates should be in conflict";
+ EXPECT_EQ(0, status->conflict_progress()->EncryptionConflictingItemsSize())
+ << "No updates should be in conflict";
+ EXPECT_EQ(2, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "The nigori update should be applied";
+ }
+ EXPECT_FALSE(cryptographer->has_pending_keys());
+ EXPECT_TRUE(cryptographer->is_ready());
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ // All our changes should still be encrypted.
+ EXPECT_TRUE(syncable::ModelTypeSet::All().Equals(
+ cryptographer->GetEncryptedTypes()));
+ EXPECT_TRUE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
+
+ Syncer::UnsyncedMetaHandles handles;
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ EXPECT_EQ(2*batch_s+1, handles.size());
+ }
+}
+
+TEST_F(ApplyUpdatesCommandTest, CannotEncryptUnsyncedChanges) {
+ // Storing the cryptographer separately is bad, but for this test we
+ // know it's safe.
+ Cryptographer* cryptographer;
+ syncable::ModelTypeSet encrypted_types;
+ encrypted_types.Put(syncable::PASSWORDS);
+ encrypted_types.Put(syncable::NIGORI);
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ cryptographer = directory()->GetCryptographer(&trans);
+ EXPECT_TRUE(cryptographer->GetEncryptedTypes().Equals(encrypted_types));
+
+ // With default encrypted_types, this should be true.
+ EXPECT_TRUE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
+
+ Syncer::UnsyncedMetaHandles handles;
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ EXPECT_TRUE(handles.empty());
+ }
+
+ // Create unsynced bookmarks without encryption.
+ // First item is a folder
+ Id folder_id = id_factory_.NewLocalId();
+ CreateUnsyncedItem(folder_id, id_factory_.root(), "folder", true,
+ syncable::BOOKMARKS, NULL);
+ // Next five items are children of the folder
+ size_t i;
+ size_t batch_s = 5;
+ for (i = 0; i < batch_s; ++i) {
+ CreateUnsyncedItem(id_factory_.NewLocalId(), folder_id,
+ base::StringPrintf("Item %"PRIuS"", i), false,
+ syncable::BOOKMARKS, NULL);
+ }
+ // Next five items are children of the root.
+ for (; i < 2*batch_s; ++i) {
+ CreateUnsyncedItem(id_factory_.NewLocalId(), id_factory_.root(),
+ base::StringPrintf("Item %"PRIuS"", i), false,
+ syncable::BOOKMARKS, NULL);
+ }
+
+ // We encrypt with new keys, triggering the local cryptographer to be unready
+ // and unable to decrypt data (once updated).
+ Cryptographer other_cryptographer(&encryptor_);
+ KeyParams params = {"localhost", "dummy", "foobar"};
+ other_cryptographer.AddKey(params);
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ other_cryptographer.GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_bookmarks(true);
+ encrypted_types.Put(syncable::BOOKMARKS);
+ CreateUnappliedNewItem(syncable::ModelTypeToRootTag(syncable::NIGORI),
+ specifics, true);
+ EXPECT_FALSE(cryptographer->has_pending_keys());
+
+ {
+ // Ensure we have unsynced nodes that aren't properly encrypted.
+ ReadTransaction trans(FROM_HERE, directory());
+ EXPECT_FALSE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
+ Syncer::UnsyncedMetaHandles handles;
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ EXPECT_EQ(2*batch_s+1, handles.size());
+ }
+
+ ExpectGroupToChange(apply_updates_command_, GROUP_PASSIVE);
+ apply_updates_command_.ExecuteImpl(session());
+
+ sessions::StatusController* status = session()->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(1, status->update_progress()->AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(0, status->conflict_progress()->SimpleConflictingItemsSize())
+ << "The unsynced changes don't trigger a blocking conflict with the "
+ << "nigori update.";
+ EXPECT_EQ(0, status->conflict_progress()->EncryptionConflictingItemsSize())
+ << "The unsynced changes don't trigger an encryption conflict with the "
+ << "nigori update.";
+ EXPECT_EQ(1, status->update_progress()->SuccessfullyAppliedUpdateCount())
+ << "The nigori update should be applied";
+ EXPECT_FALSE(cryptographer->is_ready());
+ EXPECT_TRUE(cryptographer->has_pending_keys());
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ // Since we have pending keys, we would have failed to encrypt, but the
+ // cryptographer should be updated.
+ EXPECT_FALSE(VerifyUnsyncedChangesAreEncrypted(&trans, encrypted_types));
+ EXPECT_TRUE(cryptographer->GetEncryptedTypes().Equals(
+ syncable::ModelTypeSet().All()));
+ EXPECT_FALSE(cryptographer->is_ready());
+ EXPECT_TRUE(cryptographer->has_pending_keys());
+
+ Syncer::UnsyncedMetaHandles handles;
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ EXPECT_EQ(2*batch_s+1, handles.size());
+ }
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/build_commit_command.cc b/sync/engine/build_commit_command.cc
new file mode 100644
index 0000000..86d1d8b
--- /dev/null
+++ b/sync/engine/build_commit_command.cc
@@ -0,0 +1,255 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/build_commit_command.h"
+
+#include <limits>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/string_util.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_changes_version.h"
+#include "sync/util/time.h"
+
+using std::set;
+using std::string;
+using std::vector;
+using syncable::Entry;
+using syncable::IS_DEL;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::SPECIFICS;
+using syncable::UNSPECIFIED;
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+
+// static
+int64 BuildCommitCommand::GetFirstPosition() {
+ return std::numeric_limits<int64>::min();
+}
+
+// static
+int64 BuildCommitCommand::GetLastPosition() {
+ return std::numeric_limits<int64>::max();
+}
+
+// static
+int64 BuildCommitCommand::GetGap() {
+ return 1LL << 20;
+}
+
+BuildCommitCommand::BuildCommitCommand() {}
+BuildCommitCommand::~BuildCommitCommand() {}
+
+void BuildCommitCommand::AddExtensionsActivityToMessage(
+ SyncSession* session, CommitMessage* message) {
+ // We only send ExtensionsActivity to the server if bookmarks are being
+ // committed.
+ ExtensionsActivityMonitor* monitor = session->context()->extensions_monitor();
+ if (!session->status_controller().HasBookmarkCommitActivity()) {
+ // Return the records to the activity monitor.
+ monitor->PutRecords(session->extensions_activity());
+ session->mutable_extensions_activity()->clear();
+ return;
+ }
+ const ExtensionsActivityMonitor::Records& records =
+ session->extensions_activity();
+ for (ExtensionsActivityMonitor::Records::const_iterator it = records.begin();
+ it != records.end(); ++it) {
+ sync_pb::ChromiumExtensionsActivity* activity_message =
+ message->add_extensions_activity();
+ activity_message->set_extension_id(it->second.extension_id);
+ activity_message->set_bookmark_writes_since_last_commit(
+ it->second.bookmark_write_count);
+ }
+}
+
+namespace {
+void SetEntrySpecifics(MutableEntry* meta_entry, SyncEntity* sync_entry) {
+ // Add the new style extension and the folder bit.
+ sync_entry->mutable_specifics()->CopyFrom(meta_entry->Get(SPECIFICS));
+ sync_entry->set_folder(meta_entry->Get(syncable::IS_DIR));
+
+ DCHECK(meta_entry->GetModelType() == sync_entry->GetModelType());
+}
+} // namespace
+
+SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
+ ClientToServerMessage message;
+ message.set_share(session->context()->account_name());
+ message.set_message_contents(ClientToServerMessage::COMMIT);
+
+ CommitMessage* commit_message = message.mutable_commit();
+ commit_message->set_cache_guid(
+ session->write_transaction()->directory()->cache_guid());
+ AddExtensionsActivityToMessage(session, commit_message);
+ SyncerProtoUtil::AddRequestBirthday(
+ session->write_transaction()->directory(), &message);
+
+ // Cache previously computed position values. Because |commit_ids|
+ // is already in sibling order, we should always hit this map after
+ // the first sibling in a consecutive run of commit items. The
+ // entries in this map are (low, high) values describing the
+ // space of positions that are immediate successors of the item
+ // whose ID is the map's key.
+ std::map<Id, std::pair<int64, int64> > position_map;
+
+ const vector<Id>& commit_ids = session->status_controller().commit_ids();
+ for (size_t i = 0; i < commit_ids.size(); i++) {
+ Id id = commit_ids[i];
+ SyncEntity* sync_entry =
+ static_cast<SyncEntity*>(commit_message->add_entries());
+ sync_entry->set_id(id);
+ MutableEntry meta_entry(session->write_transaction(),
+ syncable::GET_BY_ID,
+ id);
+ CHECK(meta_entry.good());
+ // This is the only change we make to the entry in this function.
+ meta_entry.Put(syncable::SYNCING, true);
+
+ DCHECK(0 != session->routing_info().count(meta_entry.GetModelType()))
+ << "Committing change to datatype that's not actively enabled.";
+
+ string name = meta_entry.Get(syncable::NON_UNIQUE_NAME);
+ CHECK(!name.empty()); // Make sure this isn't an update.
+ TruncateUTF8ToByteSize(name, 255, &name);
+ sync_entry->set_name(name);
+
+ // Set the non_unique_name. If we do, the server ignores
+ // the |name| value (using |non_unique_name| instead), and will return
+ // in the CommitResponse a unique name if one is generated.
+ // We send both because it may aid in logging.
+ sync_entry->set_non_unique_name(name);
+
+ if (!meta_entry.Get(syncable::UNIQUE_CLIENT_TAG).empty()) {
+ sync_entry->set_client_defined_unique_tag(
+ meta_entry.Get(syncable::UNIQUE_CLIENT_TAG));
+ }
+
+ // Deleted items with server-unknown parent ids can be a problem so we set
+ // the parent to 0. (TODO(sync): Still true in protocol?).
+ Id new_parent_id;
+ if (meta_entry.Get(syncable::IS_DEL) &&
+ !meta_entry.Get(syncable::PARENT_ID).ServerKnows()) {
+ new_parent_id = session->write_transaction()->root_id();
+ } else {
+ new_parent_id = meta_entry.Get(syncable::PARENT_ID);
+ }
+ sync_entry->set_parent_id(new_parent_id);
+
+ // If our parent has changed, send up the old one so the server
+ // can correctly deal with multiple parents.
+ // TODO(nick): With the server keeping track of the primary sync parent,
+ // it should not be necessary to provide the old_parent_id: the version
+ // number should suffice.
+ if (new_parent_id != meta_entry.Get(syncable::SERVER_PARENT_ID) &&
+ 0 != meta_entry.Get(syncable::BASE_VERSION) &&
+ syncable::CHANGES_VERSION != meta_entry.Get(syncable::BASE_VERSION)) {
+ sync_entry->set_old_parent_id(meta_entry.Get(syncable::SERVER_PARENT_ID));
+ }
+
+ int64 version = meta_entry.Get(syncable::BASE_VERSION);
+ if (syncable::CHANGES_VERSION == version || 0 == version) {
+ // Undeletions are only supported for items that have a client tag.
+ DCHECK(!id.ServerKnows() ||
+ !meta_entry.Get(syncable::UNIQUE_CLIENT_TAG).empty())
+ << meta_entry;
+
+ // Version 0 means to create or undelete an object.
+ sync_entry->set_version(0);
+ } else {
+ DCHECK(id.ServerKnows()) << meta_entry;
+ sync_entry->set_version(meta_entry.Get(syncable::BASE_VERSION));
+ }
+ sync_entry->set_ctime(TimeToProtoTime(meta_entry.Get(syncable::CTIME)));
+ sync_entry->set_mtime(TimeToProtoTime(meta_entry.Get(syncable::MTIME)));
+
+ // Deletion is final on the server, let's move things and then delete them.
+ if (meta_entry.Get(IS_DEL)) {
+ sync_entry->set_deleted(true);
+ } else {
+ if (meta_entry.Get(SPECIFICS).has_bookmark()) {
+ // Common data in both new and old protocol.
+ const Id& prev_id = meta_entry.Get(syncable::PREV_ID);
+ string prev_id_string =
+ prev_id.IsRoot() ? string() : prev_id.GetServerId();
+ sync_entry->set_insert_after_item_id(prev_id_string);
+
+ // Compute a numeric position based on what we know locally.
+ std::pair<int64, int64> position_block(
+ GetFirstPosition(), GetLastPosition());
+ std::map<Id, std::pair<int64, int64> >::iterator prev_pos =
+ position_map.find(prev_id);
+ if (prev_pos != position_map.end()) {
+ position_block = prev_pos->second;
+ position_map.erase(prev_pos);
+ } else {
+ position_block = std::make_pair(
+ FindAnchorPosition(syncable::PREV_ID, meta_entry),
+ FindAnchorPosition(syncable::NEXT_ID, meta_entry));
+ }
+ position_block.first = InterpolatePosition(position_block.first,
+ position_block.second);
+
+ position_map[id] = position_block;
+ sync_entry->set_position_in_parent(position_block.first);
+ }
+ SetEntrySpecifics(&meta_entry, sync_entry);
+ }
+ }
+ session->mutable_status_controller()->
+ mutable_commit_message()->CopyFrom(message);
+
+ return SYNCER_OK;
+}
+
+int64 BuildCommitCommand::FindAnchorPosition(syncable::IdField direction,
+ const syncable::Entry& entry) {
+ Id next_id = entry.Get(direction);
+ while (!next_id.IsRoot()) {
+ Entry next_entry(entry.trans(),
+ syncable::GET_BY_ID,
+ next_id);
+ if (!next_entry.Get(IS_UNSYNCED) && !next_entry.Get(IS_UNAPPLIED_UPDATE)) {
+ return next_entry.Get(SERVER_POSITION_IN_PARENT);
+ }
+ next_id = next_entry.Get(direction);
+ }
+ return
+ direction == syncable::PREV_ID ?
+ GetFirstPosition() : GetLastPosition();
+}
+
+int64 BuildCommitCommand::InterpolatePosition(const int64 lo,
+ const int64 hi) {
+ DCHECK_LE(lo, hi);
+
+ // The first item to be added under a parent gets a position of zero.
+ if (lo == GetFirstPosition() && hi == GetLastPosition())
+ return 0;
+
+ // For small gaps, we do linear interpolation. For larger gaps,
+ // we use an additive offset of |GetGap()|. We are careful to avoid
+ // signed integer overflow.
+ uint64 delta = static_cast<uint64>(hi) - static_cast<uint64>(lo);
+ if (delta <= static_cast<uint64>(GetGap()*2))
+ return lo + (static_cast<int64>(delta) + 7) / 8; // Interpolate.
+ else if (lo == GetFirstPosition())
+ return hi - GetGap(); // Extend range just before successor.
+ else
+ return lo + GetGap(); // Use or extend range just after predecessor.
+}
+
+
+} // namespace browser_sync
diff --git a/sync/engine/build_commit_command.h b/sync/engine/build_commit_command.h
new file mode 100644
index 0000000..d18c94b
--- /dev/null
+++ b/sync/engine/build_commit_command.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
+#define SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
+#pragma once
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "sync/engine/syncer_command.h"
+#include "sync/engine/syncproto.h"
+#include "sync/syncable/syncable.h"
+
+namespace browser_sync {
+
+class BuildCommitCommand : public SyncerCommand {
+ public:
+ BuildCommitCommand();
+ virtual ~BuildCommitCommand();
+
+ // SyncerCommand implementation.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(BuildCommitCommandTest, InterpolatePosition);
+
+ // Functions returning constants controlling range of values.
+ static int64 GetFirstPosition();
+ static int64 GetLastPosition();
+ static int64 GetGap();
+
+ void AddExtensionsActivityToMessage(sessions::SyncSession* session,
+ CommitMessage* message);
+ // Helper for computing position. Find the numeric position value
+ // of the closest already-synced entry. |direction| must be one of
+ // NEXT_ID or PREV_ID; this parameter controls the search direction.
+ // For an open range (no predecessor or successor), the return
+ // value will be kFirstPosition or kLastPosition.
+ int64 FindAnchorPosition(syncable::IdField direction,
+ const syncable::Entry& entry);
+ // Given two values of the type returned by FindAnchorPosition,
+ // compute a third value in between the two ranges.
+ int64 InterpolatePosition(int64 lo, int64 hi);
+
+ DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
diff --git a/sync/engine/build_commit_command_unittest.cc b/sync/engine/build_commit_command_unittest.cc
new file mode 100644
index 0000000..f0a5bea
--- /dev/null
+++ b/sync/engine/build_commit_command_unittest.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/build_commit_command.h"
+#include "sync/test/engine/syncer_command_test.h"
+
+namespace browser_sync {
+
+// A test fixture for tests exercising ClearDataCommandTest.
+class BuildCommitCommandTest : public SyncerCommandTest {
+ protected:
+ BuildCommitCommandTest() {}
+ BuildCommitCommand command_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BuildCommitCommandTest);
+};
+
+TEST_F(BuildCommitCommandTest, InterpolatePosition) {
+ EXPECT_LT(BuildCommitCommand::GetFirstPosition(),
+ BuildCommitCommand::GetLastPosition());
+
+ // Dense ranges.
+ EXPECT_EQ(10, command_.InterpolatePosition(10, 10));
+ EXPECT_EQ(11, command_.InterpolatePosition(10, 11));
+ EXPECT_EQ(11, command_.InterpolatePosition(10, 12));
+ EXPECT_EQ(11, command_.InterpolatePosition(10, 13));
+ EXPECT_EQ(11, command_.InterpolatePosition(10, 14));
+ EXPECT_EQ(11, command_.InterpolatePosition(10, 15));
+ EXPECT_EQ(11, command_.InterpolatePosition(10, 16));
+ EXPECT_EQ(11, command_.InterpolatePosition(10, 17));
+ EXPECT_EQ(11, command_.InterpolatePosition(10, 18));
+ EXPECT_EQ(12, command_.InterpolatePosition(10, 19));
+ EXPECT_EQ(12, command_.InterpolatePosition(10, 20));
+
+ // Sparse ranges.
+ EXPECT_EQ(0x32535ffe3dc97LL + BuildCommitCommand::GetGap(),
+ command_.InterpolatePosition(0x32535ffe3dc97LL, 0x61abcd323122cLL));
+ EXPECT_EQ(~0x61abcd323122cLL + BuildCommitCommand::GetGap(),
+ command_.InterpolatePosition(~0x61abcd323122cLL, ~0x32535ffe3dc97LL));
+
+ // Lower limits
+ EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 0x20,
+ command_.InterpolatePosition(
+ BuildCommitCommand::GetFirstPosition(),
+ BuildCommitCommand::GetFirstPosition() + 0x100));
+ EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 2,
+ command_.InterpolatePosition(BuildCommitCommand::GetFirstPosition() + 1,
+ BuildCommitCommand::GetFirstPosition() + 2));
+ EXPECT_EQ(BuildCommitCommand::GetFirstPosition() +
+ BuildCommitCommand::GetGap()/8 + 1,
+ command_.InterpolatePosition(
+ BuildCommitCommand::GetFirstPosition() + 1,
+ BuildCommitCommand::GetFirstPosition() + 1 +
+ BuildCommitCommand::GetGap()));
+
+ // Extremal cases.
+ EXPECT_EQ(0,
+ command_.InterpolatePosition(BuildCommitCommand::GetFirstPosition(),
+ BuildCommitCommand::GetLastPosition()));
+ EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 1 +
+ BuildCommitCommand::GetGap(),
+ command_.InterpolatePosition(BuildCommitCommand::GetFirstPosition() + 1,
+ BuildCommitCommand::GetLastPosition()));
+ EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 1 +
+ BuildCommitCommand::GetGap(),
+ command_.InterpolatePosition(BuildCommitCommand::GetFirstPosition() + 1,
+ BuildCommitCommand::GetLastPosition() - 1));
+ EXPECT_EQ(BuildCommitCommand::GetLastPosition() - 1 -
+ BuildCommitCommand::GetGap(),
+ command_.InterpolatePosition(BuildCommitCommand::GetFirstPosition(),
+ BuildCommitCommand::GetLastPosition() - 1));
+
+ // Edge cases around zero.
+ EXPECT_EQ(BuildCommitCommand::GetGap(),
+ command_.InterpolatePosition(0, BuildCommitCommand::GetLastPosition()));
+ EXPECT_EQ(BuildCommitCommand::GetGap() + 1,
+ command_.InterpolatePosition(1, BuildCommitCommand::GetLastPosition()));
+ EXPECT_EQ(BuildCommitCommand::GetGap() - 1,
+ command_.InterpolatePosition(-1, BuildCommitCommand::GetLastPosition()));
+ EXPECT_EQ(-BuildCommitCommand::GetGap(),
+ command_.InterpolatePosition(BuildCommitCommand::GetFirstPosition(), 0));
+ EXPECT_EQ(-BuildCommitCommand::GetGap() + 1,
+ command_.InterpolatePosition(BuildCommitCommand::GetFirstPosition(), 1));
+ EXPECT_EQ(-BuildCommitCommand::GetGap() - 1,
+ command_.InterpolatePosition(BuildCommitCommand::GetFirstPosition(), -1));
+ EXPECT_EQ(BuildCommitCommand::GetGap() / 8,
+ command_.InterpolatePosition(0, BuildCommitCommand::GetGap()));
+ EXPECT_EQ(BuildCommitCommand::GetGap() / 4,
+ command_.InterpolatePosition(0, BuildCommitCommand::GetGap()*2));
+ EXPECT_EQ(BuildCommitCommand::GetGap(),
+ command_.InterpolatePosition(0, BuildCommitCommand::GetGap()*2 + 1));
+}
+
+} // namespace browser_sync
+
+
diff --git a/sync/engine/cleanup_disabled_types_command.cc b/sync/engine/cleanup_disabled_types_command.cc
new file mode 100644
index 0000000..35e61cd
--- /dev/null
+++ b/sync/engine/cleanup_disabled_types_command.cc
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/cleanup_disabled_types_command.h"
+
+#include <algorithm>
+
+#include "sync/sessions/sync_session.h"
+#include "sync/sessions/sync_session_context.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+
+namespace browser_sync {
+
+CleanupDisabledTypesCommand::CleanupDisabledTypesCommand() {}
+CleanupDisabledTypesCommand::~CleanupDisabledTypesCommand() {}
+
+SyncerError CleanupDisabledTypesCommand::ExecuteImpl(
+ sessions::SyncSession* session) {
+ using syncable::ModelTypeSet;
+ using syncable::ModelTypeSetToString;
+ // Because a full directory purge is slow, we avoid purging
+ // undesired types unless we have reason to believe they were
+ // previously enabled. Because purging could theoretically fail on
+ // the first sync session (when there's no previous routing info) we
+ // pay the full directory scan price once and do a "deep clean" of
+ // types that may potentially need cleanup so that we converge to
+ // the correct state.
+ //
+ // in_previous | !in_previous
+ // |
+ // initial_sync_ended should clean | may have attempted cleanup
+ // !initial_sync_ended should clean | may have never been enabled, or
+ // | could have been disabled before
+ // | initial sync ended and cleanup
+ // | may not have happened yet
+ // | (failure, browser restart
+ // | before another sync session,..)
+
+ const ModelTypeSet enabled_types =
+ GetRoutingInfoTypes(session->routing_info());
+
+ const ModelTypeSet previous_enabled_types =
+ GetRoutingInfoTypes(
+ session->context()->previous_session_routing_info());
+
+ ModelTypeSet to_cleanup = Difference(ModelTypeSet::All(), enabled_types);
+
+ // If |previous_enabled_types| is non-empty (i.e., not the first
+ // sync session), set |to_cleanup| to its intersection with
+ // |previous_enabled_types|.
+ if (!previous_enabled_types.Empty()) {
+ to_cleanup.RetainAll(previous_enabled_types);
+ }
+
+ DVLOG(1) << "enabled_types = " << ModelTypeSetToString(enabled_types)
+ << ", previous_enabled_types = "
+ << ModelTypeSetToString(previous_enabled_types)
+ << ", to_cleanup = " << ModelTypeSetToString(to_cleanup);
+
+ if (to_cleanup.Empty())
+ return SYNCER_OK;
+
+ session->context()->directory()->PurgeEntriesWithTypeIn(to_cleanup);
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
+
diff --git a/sync/engine/cleanup_disabled_types_command.h b/sync/engine/cleanup_disabled_types_command.h
new file mode 100644
index 0000000..c855814
--- /dev/null
+++ b/sync/engine/cleanup_disabled_types_command.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_CLEANUP_DISABLED_TYPES_COMMAND_H_
+#define SYNC_ENGINE_CLEANUP_DISABLED_TYPES_COMMAND_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/engine/syncer_command.h"
+
+namespace browser_sync {
+
+// A syncer command that purges (from memory and disk) entries belonging to
+// a ModelType or ServerModelType that the user has not elected to sync.
+//
+// This is done as part of a session to 1) ensure it does not block the UI,
+// and 2) avoid complicated races that could arise between a) deleting
+// things b) a sync session trying to use these things c) and the potential
+// re-enabling of the data type by the user before some scheduled deletion
+// took place. Here, we are safe to perform I/O synchronously and we know it
+// is a safe time to delete as we are in the only active session.
+//
+// The removal from memory is done synchronously, while the disk purge is left
+// to an asynchronous SaveChanges operation. However, all the updates for
+// meta data fields (such as initial_sync_ended) as well as the actual entry
+// deletions will be committed in a single sqlite transaction. Thus it is
+// possible that disabled types re-appear (in the sync db) after a reboot,
+// but things will remain in a consistent state. This kind of error case is
+// cared for in this command by retrying; see ExecuteImpl.
+class CleanupDisabledTypesCommand : public SyncerCommand {
+ public:
+ CleanupDisabledTypesCommand();
+ virtual ~CleanupDisabledTypesCommand();
+
+ // SyncerCommand implementation.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CleanupDisabledTypesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_CLEANUP_DISABLED_TYPES_COMMAND_H_
+
diff --git a/sync/engine/cleanup_disabled_types_command_unittest.cc b/sync/engine/cleanup_disabled_types_command_unittest.cc
new file mode 100644
index 0000000..daa1365
--- /dev/null
+++ b/sync/engine/cleanup_disabled_types_command_unittest.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "sync/engine/cleanup_disabled_types_command.h"
+
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/model_type_test_util.h"
+#include "sync/test/engine/syncer_command_test.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace browser_sync {
+
+namespace {
+
+using syncable::HasModelTypes;
+using syncable::ModelTypeSet;
+using testing::_;
+
+class CleanupDisabledTypesCommandTest : public MockDirectorySyncerCommandTest {
+ public:
+ CleanupDisabledTypesCommandTest() {}
+
+ virtual void SetUp() {
+ mutable_routing_info()->clear();
+ (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_PASSIVE;
+ MockDirectorySyncerCommandTest::SetUp();
+ }
+};
+
+// TODO(tim): Add syncer test to verify previous routing info is set.
+TEST_F(CleanupDisabledTypesCommandTest, NoPreviousRoutingInfo) {
+ CleanupDisabledTypesCommand command;
+ ModelTypeSet expected = ModelTypeSet::All();
+ expected.Remove(syncable::BOOKMARKS);
+ EXPECT_CALL(*mock_directory(),
+ PurgeEntriesWithTypeIn(HasModelTypes(expected)));
+ command.ExecuteImpl(session());
+}
+
+TEST_F(CleanupDisabledTypesCommandTest, NoPurge) {
+ CleanupDisabledTypesCommand command;
+ EXPECT_CALL(*mock_directory(), PurgeEntriesWithTypeIn(_)).Times(0);
+
+ ModelSafeRoutingInfo prev(routing_info());
+ session()->context()->set_previous_session_routing_info(prev);
+ (*mutable_routing_info())[syncable::AUTOFILL] = GROUP_PASSIVE;
+ command.ExecuteImpl(session());
+
+ prev = routing_info();
+ command.ExecuteImpl(session());
+}
+
+TEST_F(CleanupDisabledTypesCommandTest, TypeDisabled) {
+ CleanupDisabledTypesCommand command;
+
+ (*mutable_routing_info())[syncable::AUTOFILL] = GROUP_PASSIVE;
+ (*mutable_routing_info())[syncable::THEMES] = GROUP_PASSIVE;
+ (*mutable_routing_info())[syncable::EXTENSIONS] = GROUP_PASSIVE;
+
+ ModelSafeRoutingInfo prev(routing_info());
+ prev[syncable::PASSWORDS] = GROUP_PASSIVE;
+ prev[syncable::PREFERENCES] = GROUP_PASSIVE;
+ session()->context()->set_previous_session_routing_info(prev);
+
+ const ModelTypeSet expected(syncable::PASSWORDS, syncable::PREFERENCES);
+ EXPECT_CALL(*mock_directory(),
+ PurgeEntriesWithTypeIn(HasModelTypes(expected)));
+ command.ExecuteImpl(session());
+}
+
+} // namespace
+
+} // namespace browser_sync
diff --git a/sync/engine/clear_data_command.cc b/sync/engine/clear_data_command.cc
new file mode 100644
index 0000000..e7d7343
--- /dev/null
+++ b/sync/engine/clear_data_command.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/clear_data_command.h"
+
+#include <string>
+
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/engine/syncproto.h"
+#include "sync/sessions/sync_session.h"
+
+namespace browser_sync {
+
+using sessions::StatusController;
+using sessions::SyncSession;
+using std::string;
+using syncable::FIRST_REAL_MODEL_TYPE;
+using syncable::MODEL_TYPE_COUNT;
+
+
+ClearDataCommand::ClearDataCommand() {}
+ClearDataCommand::~ClearDataCommand() {}
+
+SyncerError ClearDataCommand::ExecuteImpl(SyncSession* session) {
+ ClientToServerMessage client_to_server_message;
+ ClientToServerResponse client_to_server_response;
+
+ client_to_server_message.set_share(session->context()->account_name());
+ client_to_server_message.set_message_contents(
+ ClientToServerMessage::CLEAR_DATA);
+
+ client_to_server_message.mutable_clear_user_data();
+
+ SyncerProtoUtil::AddRequestBirthday(session->context()->directory(),
+ &client_to_server_message);
+
+ DVLOG(1) << "Clearing server data";
+
+ SyncerError result = SyncerProtoUtil::PostClientToServerMessage(
+ client_to_server_message,
+ &client_to_server_response,
+ session);
+
+ DVLOG(1) << SyncerProtoUtil::ClientToServerResponseDebugString(
+ client_to_server_response);
+
+ // TODO(lipalani): This code is wrong. The response error codes it checks
+ // have been obsoleted. The only reason it hasn't caused problems is that
+ // this code is unreachable. We should do something to clean up this mess.
+ // See also: crbug.com/71616.
+ //
+ // Clear pending indicates that the server has received our clear message
+ if (result != SYNCER_OK || !client_to_server_response.has_error_code() ||
+ client_to_server_response.error_code() != sync_pb::SyncEnums::SUCCESS) {
+ // On failure, subsequent requests to the server will cause it to attempt
+ // to resume the clear. The client will handle disabling of sync in
+ // response to a store birthday error from the server.
+ SyncEngineEvent event(SyncEngineEvent::CLEAR_SERVER_DATA_FAILED);
+ session->context()->NotifyListeners(event);
+
+ LOG(ERROR) << "Error posting ClearData.";
+
+ return result;
+ }
+
+ SyncEngineEvent event(SyncEngineEvent::CLEAR_SERVER_DATA_SUCCEEDED);
+ session->context()->NotifyListeners(event);
+
+ session->delegate()->OnShouldStopSyncingPermanently();
+
+ DVLOG(1) << "ClearData succeeded.";
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/clear_data_command.h b/sync/engine/clear_data_command.h
new file mode 100644
index 0000000..5302871
--- /dev/null
+++ b/sync/engine/clear_data_command.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_CLEAR_DATA_COMMAND_H_
+#define SYNC_ENGINE_CLEAR_DATA_COMMAND_H_
+#pragma once
+
+#include "base/basictypes.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/engine/syncer_command.h"
+#include "sync/syncable/model_type.h"
+
+namespace browser_sync {
+
+// Clears server data associated with this user's account
+class ClearDataCommand : public SyncerCommand {
+ public:
+ ClearDataCommand();
+ virtual ~ClearDataCommand();
+
+ // SyncerCommand implementation.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ClearDataCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_CLEAR_DATA_COMMAND_H_
diff --git a/sync/engine/clear_data_command_unittest.cc b/sync/engine/clear_data_command_unittest.cc
new file mode 100644
index 0000000..b94d08a
--- /dev/null
+++ b/sync/engine/clear_data_command_unittest.cc
@@ -0,0 +1,117 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/clear_data_command.h"
+#include "sync/protocol/autofill_specifics.pb.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/preference_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/test/engine/syncer_command_test.h"
+#include "sync/test/sessions/test_scoped_session_event_listener.h"
+
+namespace browser_sync {
+
+using sessions::TestScopedSessionEventListener;
+using syncable::FIRST_REAL_MODEL_TYPE;
+using syncable::MODEL_TYPE_COUNT;
+
+// A test fixture for tests exercising ClearDataCommandTest.
+class ClearDataCommandTest : public SyncerCommandTest {
+ protected:
+ ClearDataCommandTest() {}
+ ClearDataCommand command_;
+
+ virtual void OnShouldStopSyncingPermanently() {
+ on_should_stop_syncing_permanently_called_ = true;
+ }
+
+ protected:
+ bool on_should_stop_syncing_permanently_called_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ClearDataCommandTest);
+};
+
+class ClearEventHandler : public SyncEngineEventListener {
+ public:
+ ClearEventHandler() {
+ ResetReceivedEvents();
+ }
+ bool ReceievedClearSuccessEvent() { return received_clear_success_event_; }
+ bool ReceievedClearFailedEvent() { return received_clear_failed_event_; }
+ void ResetReceivedEvents() {
+ received_clear_success_event_ = false;
+ received_clear_failed_event_ = false;
+ }
+
+ virtual void OnSyncEngineEvent(const SyncEngineEvent& event) {
+ if (event.what_happened == SyncEngineEvent::CLEAR_SERVER_DATA_FAILED) {
+ received_clear_failed_event_ = true;
+ } else if (event.what_happened ==
+ SyncEngineEvent::CLEAR_SERVER_DATA_SUCCEEDED) {
+ received_clear_success_event_ = true;
+ }
+ }
+
+ private:
+ bool received_clear_success_event_;
+ bool received_clear_failed_event_;
+};
+
+TEST_F(ClearDataCommandTest, ClearDataCommandExpectFailed) {
+ ConfigureMockServerConnection();
+ scoped_ptr<ClearEventHandler> handler(new ClearEventHandler());
+ TestScopedSessionEventListener reg(context(), handler.get());
+
+ directory()->set_store_birthday(mock_server()->store_birthday());
+ mock_server()->SetServerNotReachable();
+ on_should_stop_syncing_permanently_called_ = false;
+
+ command_.Execute(session());
+
+ // Expect that the client sent a clear request, received failure,
+ // fired a failure event, but did not disable sync.
+ //
+ // A failure event will be bubbled back to the user's UI, and the
+ // user can press "clear" again.
+ //
+ // We do not want to disable sync in the client because the user may
+ // incorrectly get the impression that their private data has been cleared
+ // from the server (from the fact that their data is gone on the client).
+ //
+ // Any subsequent GetUpdates/Commit requests or attempts to enable sync
+ // will cause the server to attempt to resume the clearing process (within
+ // a bounded window of time)
+ const sync_pb::ClientToServerMessage& r = mock_server()->last_request();
+ EXPECT_TRUE(r.has_clear_user_data());
+
+ EXPECT_TRUE(handler.get()->ReceievedClearFailedEvent());
+
+ EXPECT_FALSE(handler.get()->ReceievedClearSuccessEvent());
+ EXPECT_FALSE(on_should_stop_syncing_permanently_called_);
+}
+
+TEST_F(ClearDataCommandTest, ClearDataCommandExpectSuccess) {
+ ConfigureMockServerConnection();
+ scoped_ptr<ClearEventHandler> handler(new ClearEventHandler());
+ TestScopedSessionEventListener reg(context(), handler.get());
+
+ directory()->set_store_birthday(mock_server()->store_birthday());
+ mock_server()->SetClearUserDataResponseStatus(sync_pb::SyncEnums::SUCCESS);
+ on_should_stop_syncing_permanently_called_ = false;
+
+ command_.Execute(session());
+
+ // Expect that the client sent a clear request, fired off the success event
+ // in response, and disabled sync
+ const sync_pb::ClientToServerMessage& r = mock_server()->last_request();
+ EXPECT_TRUE(r.has_clear_user_data());
+
+ EXPECT_TRUE(handler->ReceievedClearSuccessEvent());
+ EXPECT_TRUE(on_should_stop_syncing_permanently_called_);
+
+ EXPECT_FALSE(handler->ReceievedClearFailedEvent());
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/conflict_resolver.cc b/sync/engine/conflict_resolver.cc
new file mode 100644
index 0000000..548613f
--- /dev/null
+++ b/sync/engine/conflict_resolver.cc
@@ -0,0 +1,404 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/conflict_resolver.h"
+
+#include <algorithm>
+#include <list>
+#include <map>
+#include <set>
+
+#include "base/location.h"
+#include "base/metrics/histogram.h"
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/protocol/service_constants.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/syncable/syncable.h"
+#include "sync/util/cryptographer.h"
+
+using std::list;
+using std::map;
+using std::set;
+using syncable::BaseTransaction;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::GetModelTypeFromSpecifics;
+using syncable::Id;
+using syncable::IsRealDataType;
+using syncable::MutableEntry;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+using sessions::ConflictProgress;
+using sessions::StatusController;
+
+namespace {
+
+const int SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT = 8;
+
+} // namespace
+
+ConflictResolver::ConflictResolver() {
+}
+
+ConflictResolver::~ConflictResolver() {
+}
+
+void ConflictResolver::IgnoreLocalChanges(MutableEntry* entry) {
+ // An update matches local actions, merge the changes.
+ // This is a little fishy because we don't actually merge them.
+ // In the future we should do a 3-way merge.
+ // With IS_UNSYNCED false, changes should be merged.
+ entry->Put(syncable::IS_UNSYNCED, false);
+}
+
+void ConflictResolver::OverwriteServerChanges(WriteTransaction* trans,
+ MutableEntry * entry) {
+ // This is similar to an overwrite from the old client.
+ // This is equivalent to a scenario where we got the update before we'd
+ // made our local client changes.
+ // TODO(chron): This is really a general property clobber. We clobber
+ // the server side property. Perhaps we should actually do property merging.
+ entry->Put(syncable::BASE_VERSION, entry->Get(syncable::SERVER_VERSION));
+ entry->Put(syncable::IS_UNAPPLIED_UPDATE, false);
+}
+
+ConflictResolver::ProcessSimpleConflictResult
+ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
+ const Id& id,
+ const Cryptographer* cryptographer,
+ StatusController* status) {
+ MutableEntry entry(trans, syncable::GET_BY_ID, id);
+ // Must be good as the entry won't have been cleaned up.
+ CHECK(entry.good());
+
+ // This function can only resolve simple conflicts. Simple conflicts have
+ // both IS_UNSYNCED and IS_UNAPPLIED_UDPATE set.
+ if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ !entry.Get(syncable::IS_UNSYNCED)) {
+ // This is very unusual, but it can happen in tests. We may be able to
+ // assert NOTREACHED() here when those tests are updated.
+ return NO_SYNC_PROGRESS;
+ }
+
+ if (entry.Get(syncable::IS_DEL) && entry.Get(syncable::SERVER_IS_DEL)) {
+ // we've both deleted it, so lets just drop the need to commit/update this
+ // entry.
+ entry.Put(syncable::IS_UNSYNCED, false);
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, false);
+ // we've made changes, but they won't help syncing progress.
+ // METRIC simple conflict resolved by merge.
+ return NO_SYNC_PROGRESS;
+ }
+
+ // This logic determines "client wins" vs. "server wins" strategy picking.
+ // By the time we get to this point, we rely on the following to be true:
+ // a) We can decrypt both the local and server data (else we'd be in
+ // conflict encryption and not attempting to resolve).
+ // b) All unsynced changes have been re-encrypted with the default key (
+ // occurs either in AttemptToUpdateEntry, SetPassphrase, or
+ // RefreshEncryption).
+ // c) Base_server_specifics having a valid datatype means that we received
+ // an undecryptable update that only changed specifics, and since then have
+ // not received any further non-specifics-only or decryptable updates.
+ // d) If the server_specifics match specifics, server_specifics are
+ // encrypted with the default key, and all other visible properties match,
+ // then we can safely ignore the local changes as redundant.
+ // e) Otherwise if the base_server_specifics match the server_specifics, no
+ // functional change must have been made server-side (else
+ // base_server_specifics would have been cleared), and we can therefore
+ // safely ignore the server changes as redundant.
+ // f) Otherwise, it's in general safer to ignore local changes, with the
+ // exception of deletion conflicts (choose to undelete) and conflicts
+ // where the non_unique_name or parent don't match.
+ if (!entry.Get(syncable::SERVER_IS_DEL)) {
+ // TODO(nick): The current logic is arbitrary; instead, it ought to be made
+ // consistent with the ModelAssociator behavior for a datatype. It would
+ // be nice if we could route this back to ModelAssociator code to pick one
+ // of three options: CLIENT, SERVER, or MERGE. Some datatypes (autofill)
+ // are easily mergeable.
+ // See http://crbug.com/77339.
+ bool name_matches = entry.Get(syncable::NON_UNIQUE_NAME) ==
+ entry.Get(syncable::SERVER_NON_UNIQUE_NAME);
+ bool parent_matches = entry.Get(syncable::PARENT_ID) ==
+ entry.Get(syncable::SERVER_PARENT_ID);
+ bool entry_deleted = entry.Get(syncable::IS_DEL);
+
+ // This positional check is meant to be necessary but not sufficient. As a
+ // result, it may be false even when the position hasn't changed, possibly
+ // resulting in unnecessary commits, but if it's true the position has
+ // definitely not changed. The check works by verifying that the prev id
+ // as calculated from the server position (which will ignore any
+ // unsynced/unapplied predecessors and be root for non-bookmark datatypes)
+ // matches the client prev id. Because we traverse chains of conflicting
+ // items in predecessor -> successor order, we don't need to also verify the
+ // successor matches (If it's in conflict, we'll verify it next. If it's
+ // not, then it should be taken into account already in the
+ // ComputePrevIdFromServerPosition calculation). This works even when there
+ // are chains of conflicting items.
+ //
+ // Example: Original sequence was abcde. Server changes to aCDbe, while
+ // client changes to aDCbe (C and D are in conflict). Locally, D's prev id
+ // is a, while C's prev id is D. On the other hand, the server prev id will
+ // ignore unsynced/unapplied items, so D's server prev id will also be a,
+ // just like C's. Because we traverse in client predecessor->successor
+ // order, we evaluate D first. Since prev id and server id match, we
+ // consider the position to have remained the same for D, and will unset
+ // it's UNSYNCED/UNAPPLIED bits. When we evaluate C though, we'll see that
+ // the prev id is D locally while the server's prev id is a. C will
+ // therefore count as a positional conflict (and the local data will be
+ // overwritten by the server data typically). The final result will be
+ // aCDbe (the same as the server's view). Even though both C and D were
+ // modified, only one counted as being in actual conflict and was resolved
+ // with local/server wins.
+ //
+ // In general, when there are chains of positional conflicts, only the first
+ // item in chain (based on the clients point of view) will have both it's
+ // server prev id and local prev id match. For all the rest the server prev
+ // id will be the predecessor of the first item in the chain, and therefore
+ // not match the local prev id.
+ //
+ // Similarly, chains of conflicts where the server and client info are the
+ // same are supported due to the predecessor->successor ordering. In this
+ // case, from the first item onward, we unset the UNSYNCED/UNAPPLIED bits as
+ // we decide that nothing changed. The subsequent item's server prev id will
+ // accurately match the local prev id because the predecessor is no longer
+ // UNSYNCED/UNAPPLIED.
+ // TODO(zea): simplify all this once we can directly compare server position
+ // to client position.
+ syncable::Id server_prev_id = entry.ComputePrevIdFromServerPosition(
+ entry.Get(syncable::SERVER_PARENT_ID));
+ bool needs_reinsertion = !parent_matches ||
+ server_prev_id != entry.Get(syncable::PREV_ID);
+ DVLOG_IF(1, needs_reinsertion) << "Insertion needed, server prev id "
+ << " is " << server_prev_id << ", local prev id is "
+ << entry.Get(syncable::PREV_ID);
+ const sync_pb::EntitySpecifics& specifics =
+ entry.Get(syncable::SPECIFICS);
+ const sync_pb::EntitySpecifics& server_specifics =
+ entry.Get(syncable::SERVER_SPECIFICS);
+ const sync_pb::EntitySpecifics& base_server_specifics =
+ entry.Get(syncable::BASE_SERVER_SPECIFICS);
+ std::string decrypted_specifics, decrypted_server_specifics;
+ bool specifics_match = false;
+ bool server_encrypted_with_default_key = false;
+ if (specifics.has_encrypted()) {
+ DCHECK(cryptographer->CanDecryptUsingDefaultKey(specifics.encrypted()));
+ decrypted_specifics = cryptographer->DecryptToString(
+ specifics.encrypted());
+ } else {
+ decrypted_specifics = specifics.SerializeAsString();
+ }
+ if (server_specifics.has_encrypted()) {
+ server_encrypted_with_default_key =
+ cryptographer->CanDecryptUsingDefaultKey(
+ server_specifics.encrypted());
+ decrypted_server_specifics = cryptographer->DecryptToString(
+ server_specifics.encrypted());
+ } else {
+ decrypted_server_specifics = server_specifics.SerializeAsString();
+ }
+ if (decrypted_server_specifics == decrypted_specifics &&
+ server_encrypted_with_default_key == specifics.has_encrypted()) {
+ specifics_match = true;
+ }
+ bool base_server_specifics_match = false;
+ if (server_specifics.has_encrypted() &&
+ IsRealDataType(GetModelTypeFromSpecifics(base_server_specifics))) {
+ std::string decrypted_base_server_specifics;
+ if (!base_server_specifics.has_encrypted()) {
+ decrypted_base_server_specifics =
+ base_server_specifics.SerializeAsString();
+ } else {
+ decrypted_base_server_specifics = cryptographer->DecryptToString(
+ base_server_specifics.encrypted());
+ }
+ if (decrypted_server_specifics == decrypted_base_server_specifics)
+ base_server_specifics_match = true;
+ }
+
+ // We manually merge nigori data.
+ if (entry.GetModelType() == syncable::NIGORI) {
+ // Create a new set of specifics based on the server specifics (which
+ // preserves their encryption keys).
+ sync_pb::EntitySpecifics specifics =
+ entry.Get(syncable::SERVER_SPECIFICS);
+ sync_pb::NigoriSpecifics* server_nigori = specifics.mutable_nigori();
+ // Store the merged set of encrypted types (cryptographer->Update(..) will
+ // have merged the local types already).
+ cryptographer->UpdateNigoriFromEncryptedTypes(server_nigori);
+ // The local set of keys is already merged with the server's set within
+ // the cryptographer. If we don't have pending keys we can store the
+ // merged set back immediately. Else we preserve the server keys and will
+ // update the nigori when the user provides the pending passphrase via
+ // SetPassphrase(..).
+ if (cryptographer->is_ready()) {
+ cryptographer->GetKeys(server_nigori->mutable_encrypted());
+ }
+ // TODO(zea): Find a better way of doing this. As it stands, we have to
+ // update this code whenever we add a new non-cryptographer related field
+ // to the nigori node.
+ if (entry.Get(syncable::SPECIFICS).nigori().sync_tabs()) {
+ server_nigori->set_sync_tabs(true);
+ }
+ // We deliberately leave the server's device information. This client will
+ // add it's own device information on restart.
+ entry.Put(syncable::SPECIFICS, specifics);
+ DVLOG(1) << "Resovling simple conflict, merging nigori nodes: " << entry;
+ status->increment_num_server_overwrites();
+ OverwriteServerChanges(trans, &entry);
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ NIGORI_MERGE,
+ CONFLICT_RESOLUTION_SIZE);
+ } else if (!entry_deleted && name_matches && parent_matches &&
+ specifics_match && !needs_reinsertion) {
+ DVLOG(1) << "Resolving simple conflict, everything matches, ignoring "
+ << "changes for: " << entry;
+ // This unsets both IS_UNSYNCED and IS_UNAPPLIED_UPDATE, and sets the
+ // BASE_VERSION to match the SERVER_VERSION. If we didn't also unset
+ // IS_UNAPPLIED_UPDATE, then we would lose unsynced positional data from
+ // adjacent entries when the server update gets applied and the item is
+ // re-inserted into the PREV_ID/NEXT_ID linked list. This is primarily
+ // an issue because we commit after applying updates, and is most
+ // commonly seen when positional changes are made while a passphrase
+ // is required (and hence there will be many encryption conflicts).
+ OverwriteServerChanges(trans, &entry);
+ IgnoreLocalChanges(&entry);
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ CHANGES_MATCH,
+ CONFLICT_RESOLUTION_SIZE);
+ } else if (base_server_specifics_match) {
+ DVLOG(1) << "Resolving simple conflict, ignoring server encryption "
+ << " changes for: " << entry;
+ status->increment_num_server_overwrites();
+ OverwriteServerChanges(trans, &entry);
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ IGNORE_ENCRYPTION,
+ CONFLICT_RESOLUTION_SIZE);
+ } else if (entry_deleted || !name_matches || !parent_matches) {
+ OverwriteServerChanges(trans, &entry);
+ status->increment_num_server_overwrites();
+ DVLOG(1) << "Resolving simple conflict, overwriting server changes "
+ << "for: " << entry;
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ OVERWRITE_SERVER,
+ CONFLICT_RESOLUTION_SIZE);
+ } else {
+ DVLOG(1) << "Resolving simple conflict, ignoring local changes for: "
+ << entry;
+ IgnoreLocalChanges(&entry);
+ status->increment_num_local_overwrites();
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ OVERWRITE_LOCAL,
+ CONFLICT_RESOLUTION_SIZE);
+ }
+ // Now that we've resolved the conflict, clear the prev server
+ // specifics.
+ entry.Put(syncable::BASE_SERVER_SPECIFICS, sync_pb::EntitySpecifics());
+ return SYNC_PROGRESS;
+ } else { // SERVER_IS_DEL is true
+ // If a server deleted folder has local contents it should be a hierarchy
+ // conflict. Hierarchy conflicts should not be processed by this function.
+ // We could end up here if a change was made since we last tried to detect
+ // conflicts, which was during update application.
+ if (entry.Get(syncable::IS_DIR)) {
+ Directory::ChildHandles children;
+ trans->directory()->GetChildHandlesById(trans,
+ entry.Get(syncable::ID),
+ &children);
+ if (0 != children.size()) {
+ DVLOG(1) << "Entry is a server deleted directory with local contents, "
+ << "should be a hierarchy conflict. (race condition).";
+ return NO_SYNC_PROGRESS;
+ }
+ }
+
+ // The entry is deleted on the server but still exists locally.
+ if (!entry.Get(syncable::UNIQUE_CLIENT_TAG).empty()) {
+ // If we've got a client-unique tag, we can undelete while retaining
+ // our present ID.
+ DCHECK_EQ(entry.Get(syncable::SERVER_VERSION), 0) << "For the server to "
+ "know to re-create, client-tagged items should revert to version 0 "
+ "when server-deleted.";
+ OverwriteServerChanges(trans, &entry);
+ status->increment_num_server_overwrites();
+ DVLOG(1) << "Resolving simple conflict, undeleting server entry: "
+ << entry;
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ OVERWRITE_SERVER,
+ CONFLICT_RESOLUTION_SIZE);
+ // Clobber the versions, just in case the above DCHECK is violated.
+ entry.Put(syncable::SERVER_VERSION, 0);
+ entry.Put(syncable::BASE_VERSION, 0);
+ } else {
+ // Otherwise, we've got to undelete by creating a new locally
+ // uncommitted entry.
+ SyncerUtil::SplitServerInformationIntoNewEntry(trans, &entry);
+
+ MutableEntry server_update(trans, syncable::GET_BY_ID, id);
+ CHECK(server_update.good());
+ CHECK(server_update.Get(syncable::META_HANDLE) !=
+ entry.Get(syncable::META_HANDLE))
+ << server_update << entry;
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ UNDELETE,
+ CONFLICT_RESOLUTION_SIZE);
+ }
+ return SYNC_PROGRESS;
+ }
+}
+
+bool ConflictResolver::ResolveConflicts(syncable::WriteTransaction* trans,
+ const Cryptographer* cryptographer,
+ const ConflictProgress& progress,
+ sessions::StatusController* status) {
+ bool forward_progress = false;
+ // Iterate over simple conflict items.
+ set<Id>::const_iterator conflicting_item_it;
+ set<Id> processed_items;
+ for (conflicting_item_it = progress.SimpleConflictingItemsBegin();
+ conflicting_item_it != progress.SimpleConflictingItemsEnd();
+ ++conflicting_item_it) {
+ Id id = *conflicting_item_it;
+ if (processed_items.count(id) > 0)
+ continue;
+
+ // We have a simple conflict. In order check if positions have changed,
+ // we need to process conflicting predecessors before successors. Traverse
+ // backwards through all continuous conflicting predecessors, building a
+ // stack of items to resolve in predecessor->successor order, then process
+ // each item individually.
+ list<Id> predecessors;
+ Id prev_id = id;
+ do {
+ predecessors.push_back(prev_id);
+ Entry entry(trans, syncable::GET_BY_ID, prev_id);
+ // Any entry in conflict must be valid.
+ CHECK(entry.good());
+ Id new_prev_id = entry.Get(syncable::PREV_ID);
+ if (new_prev_id == prev_id)
+ break;
+ prev_id = new_prev_id;
+ } while (processed_items.count(prev_id) == 0 &&
+ progress.HasSimpleConflictItem(prev_id)); // Excludes root.
+ while (!predecessors.empty()) {
+ id = predecessors.back();
+ predecessors.pop_back();
+ switch (ProcessSimpleConflict(trans, id, cryptographer, status)) {
+ case NO_SYNC_PROGRESS:
+ break;
+ case SYNC_PROGRESS:
+ forward_progress = true;
+ break;
+ }
+ processed_items.insert(id);
+ }
+ }
+ return forward_progress;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/conflict_resolver.h b/sync/engine/conflict_resolver.h
new file mode 100644
index 0000000..d2d89d0
--- /dev/null
+++ b/sync/engine/conflict_resolver.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A class that watches the syncer and attempts to resolve any conflicts that
+// occur.
+
+#ifndef SYNC_ENGINE_CONFLICT_RESOLVER_H_
+#define SYNC_ENGINE_CONFLICT_RESOLVER_H_
+#pragma once
+
+#include <map>
+#include <set>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/gtest_prod_util.h"
+#include "sync/engine/syncer_types.h"
+
+namespace syncable {
+class BaseTransaction;
+class Id;
+class MutableEntry;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class Cryptographer;
+
+namespace sessions {
+class ConflictProgress;
+class StatusController;
+} // namespace sessions
+
+class ConflictResolver {
+ friend class SyncerTest;
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest,
+ ConflictResolverMergeOverwritesLocalEntry);
+ public:
+ // Enumeration of different conflict resolutions. Used for histogramming.
+ enum SimpleConflictResolutions {
+ OVERWRITE_LOCAL, // Resolved by overwriting local changes.
+ OVERWRITE_SERVER, // Resolved by overwriting server changes.
+ UNDELETE, // Resolved by undeleting local item.
+ IGNORE_ENCRYPTION, // Resolved by ignoring an encryption-only server
+ // change.
+ NIGORI_MERGE, // Resolved by merging nigori nodes.
+ CHANGES_MATCH, // Resolved by ignoring both local and server
+ // changes because they matched.
+ CONFLICT_RESOLUTION_SIZE,
+ };
+
+ ConflictResolver();
+ ~ConflictResolver();
+ // Called by the syncer at the end of a update/commit cycle.
+ // Returns true if the syncer should try to apply its updates again.
+ bool ResolveConflicts(syncable::WriteTransaction* trans,
+ const Cryptographer* cryptographer,
+ const sessions::ConflictProgress& progress,
+ sessions::StatusController* status);
+
+ private:
+ enum ProcessSimpleConflictResult {
+ NO_SYNC_PROGRESS, // No changes to advance syncing made.
+ SYNC_PROGRESS, // Progress made.
+ };
+
+ void IgnoreLocalChanges(syncable::MutableEntry* entry);
+ void OverwriteServerChanges(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ ProcessSimpleConflictResult ProcessSimpleConflict(
+ syncable::WriteTransaction* trans,
+ const syncable::Id& id,
+ const Cryptographer* cryptographer,
+ sessions::StatusController* status);
+
+ bool ResolveSimpleConflicts(syncable::WriteTransaction* trans,
+ const Cryptographer* cryptographer,
+ const sessions::ConflictProgress& progress,
+ sessions::StatusController* status);
+
+ DISALLOW_COPY_AND_ASSIGN(ConflictResolver);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_CONFLICT_RESOLVER_H_
diff --git a/sync/engine/download_updates_command.cc b/sync/engine/download_updates_command.cc
new file mode 100644
index 0000000..385649c
--- /dev/null
+++ b/sync/engine/download_updates_command.cc
@@ -0,0 +1,130 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/download_updates_command.h"
+
+#include <string>
+
+#include "base/command_line.h"
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/engine/syncproto.h"
+#include "sync/syncable/model_type_payload_map.h"
+#include "sync/syncable/syncable.h"
+
+using sync_pb::DebugInfo;
+
+namespace browser_sync {
+using sessions::StatusController;
+using sessions::SyncSession;
+using std::string;
+using syncable::FIRST_REAL_MODEL_TYPE;
+using syncable::MODEL_TYPE_COUNT;
+using syncable::ModelTypeSet;
+using syncable::ModelTypeSetToString;
+
+DownloadUpdatesCommand::DownloadUpdatesCommand(
+ bool create_mobile_bookmarks_folder)
+ : create_mobile_bookmarks_folder_(create_mobile_bookmarks_folder) {}
+
+DownloadUpdatesCommand::~DownloadUpdatesCommand() {}
+
+SyncerError DownloadUpdatesCommand::ExecuteImpl(SyncSession* session) {
+ ClientToServerMessage client_to_server_message;
+ ClientToServerResponse update_response;
+
+ client_to_server_message.set_share(session->context()->account_name());
+ client_to_server_message.set_message_contents(
+ ClientToServerMessage::GET_UPDATES);
+ GetUpdatesMessage* get_updates =
+ client_to_server_message.mutable_get_updates();
+ get_updates->set_create_mobile_bookmarks_folder(
+ create_mobile_bookmarks_folder_);
+
+ syncable::Directory* dir = session->context()->directory();
+
+ // Request updates for all enabled types.
+ const ModelTypeSet enabled_types =
+ GetRoutingInfoTypes(session->routing_info());
+ DVLOG(1) << "Getting updates for types "
+ << ModelTypeSetToString(enabled_types);
+ DCHECK(!enabled_types.Empty());
+
+ const syncable::ModelTypePayloadMap& type_payload_map =
+ session->source().types;
+ for (ModelTypeSet::Iterator it = enabled_types.First();
+ it.Good(); it.Inc()) {
+ sync_pb::DataTypeProgressMarker* progress_marker =
+ get_updates->add_from_progress_marker();
+ dir->GetDownloadProgress(it.Get(), progress_marker);
+
+ // Set notification hint if present.
+ syncable::ModelTypePayloadMap::const_iterator type_payload =
+ type_payload_map.find(it.Get());
+ if (type_payload != type_payload_map.end()) {
+ progress_marker->set_notification_hint(type_payload->second);
+ }
+ }
+
+ // We want folders for our associated types, always. If we were to set
+ // this to false, the server would send just the non-container items
+ // (e.g. Bookmark URLs but not their containing folders).
+ get_updates->set_fetch_folders(true);
+
+ // Set GetUpdatesMessage.GetUpdatesCallerInfo information.
+ get_updates->mutable_caller_info()->set_source(
+ session->source().updates_source);
+ get_updates->mutable_caller_info()->set_notifications_enabled(
+ session->context()->notifications_enabled());
+
+ SyncerProtoUtil::AddRequestBirthday(dir, &client_to_server_message);
+
+ DebugInfo* debug_info = client_to_server_message.mutable_debug_info();
+
+ AppendClientDebugInfoIfNeeded(session, debug_info);
+
+ SyncerError result = SyncerProtoUtil::PostClientToServerMessage(
+ client_to_server_message,
+ &update_response,
+ session);
+
+ DVLOG(2) << SyncerProtoUtil::ClientToServerResponseDebugString(
+ update_response);
+
+ StatusController* status = session->mutable_status_controller();
+ status->set_updates_request_types(enabled_types);
+ if (result != SYNCER_OK) {
+ status->mutable_updates_response()->Clear();
+ LOG(ERROR) << "PostClientToServerMessage() failed during GetUpdates";
+ return result;
+ }
+
+ status->mutable_updates_response()->CopyFrom(update_response);
+
+ DVLOG(1) << "GetUpdates "
+ << " returned " << update_response.get_updates().entries_size()
+ << " updates and indicated "
+ << update_response.get_updates().changes_remaining()
+ << " updates left on server.";
+ return result;
+}
+
+void DownloadUpdatesCommand::AppendClientDebugInfoIfNeeded(
+ sessions::SyncSession* session,
+ DebugInfo* debug_info) {
+ // We want to send the debug info only once per sync cycle. Check if it has
+ // already been sent.
+ if (!session->status_controller().debug_info_sent()) {
+ DVLOG(1) << "Sending client debug info ...";
+ // could be null in some unit tests.
+ if (session->context()->debug_info_getter()) {
+ session->context()->debug_info_getter()->GetAndClearDebugInfo(
+ debug_info);
+ }
+ session->mutable_status_controller()->set_debug_info_sent();
+ }
+}
+
+
+} // namespace browser_sync
diff --git a/sync/engine/download_updates_command.h b/sync/engine/download_updates_command.h
new file mode 100644
index 0000000..9e71610
--- /dev/null
+++ b/sync/engine/download_updates_command.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
+#define SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
+#pragma once
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/engine/syncer_command.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/syncable/model_type.h"
+
+namespace sync_pb {
+class EntitySpecifics;
+}
+
+namespace browser_sync {
+
+// Determine the enabled datatypes, download a batch of updates for them
+// from the server, place the result in the SyncSession for further processing.
+//
+// The main inputs to this operation are the download_progress state
+// in the syncable::Directory, and the set of enabled types as indicated by
+// the SyncSession. DownloadUpdatesCommand will fetch updates for
+// all the enabled types, using download_progress to indicate the starting
+// point to the server. DownloadUpdatesCommand stores the server response
+// in the SyncSession. Only one server request is performed per Execute
+// operation. A loop that causes multiple Execute operations within a sync
+// session can be found in the Syncer logic. When looping, the
+// DownloadUpdatesCommand consumes the information stored by the
+// StoreTimestampsCommand.
+//
+// In practice, DownloadUpdatesCommand should loop until all updates are
+// downloaded for all enabled datatypes (i.e., until the server indicates
+// changes_remaining == 0 in the GetUpdates response), or until an error
+// is encountered.
+class DownloadUpdatesCommand : public SyncerCommand {
+ public:
+ // |create_mobile_bookmarks_folder| controls whether or not to
+ // create the mobile bookmarks folder if it's not already created.
+ // Should be set to true only by mobile clients.
+ explicit DownloadUpdatesCommand(bool create_mobile_bookmarks_folder);
+ virtual ~DownloadUpdatesCommand();
+
+ // SyncerCommand implementation.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(DownloadUpdatesCommandTest, VerifyAppendDebugInfo);
+ void AppendClientDebugInfoIfNeeded(sessions::SyncSession* session,
+ sync_pb::DebugInfo* debug_info);
+
+ const bool create_mobile_bookmarks_folder_;
+
+ DISALLOW_COPY_AND_ASSIGN(DownloadUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
+
diff --git a/sync/engine/download_updates_command_unittest.cc b/sync/engine/download_updates_command_unittest.cc
new file mode 100644
index 0000000..9a6c280
--- /dev/null
+++ b/sync/engine/download_updates_command_unittest.cc
@@ -0,0 +1,74 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/download_updates_command.h"
+#include "sync/protocol/autofill_specifics.pb.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/preference_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/syncer_command_test.h"
+
+using ::testing::_;
+namespace browser_sync {
+
+using syncable::FIRST_REAL_MODEL_TYPE;
+using syncable::MODEL_TYPE_COUNT;
+
+// A test fixture for tests exercising DownloadUpdatesCommandTest.
+class DownloadUpdatesCommandTest : public SyncerCommandTest {
+ protected:
+ DownloadUpdatesCommandTest()
+ : command_(true /* create_mobile_bookmarks_folder */) {}
+
+ virtual void SetUp() {
+ workers()->clear();
+ mutable_routing_info()->clear();
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
+ (*mutable_routing_info())[syncable::AUTOFILL] = GROUP_DB;
+ (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_UI;
+ (*mutable_routing_info())[syncable::PREFERENCES] = GROUP_UI;
+ SyncerCommandTest::SetUp();
+ }
+
+ DownloadUpdatesCommand command_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DownloadUpdatesCommandTest);
+};
+
+TEST_F(DownloadUpdatesCommandTest, ExecuteNoPayloads) {
+ ConfigureMockServerConnection();
+ mock_server()->ExpectGetUpdatesRequestTypes(
+ GetRoutingInfoTypes(routing_info()));
+ command_.ExecuteImpl(session());
+}
+
+TEST_F(DownloadUpdatesCommandTest, ExecuteWithPayloads) {
+ ConfigureMockServerConnection();
+ sessions::SyncSourceInfo source;
+ source.types[syncable::AUTOFILL] = "autofill_payload";
+ source.types[syncable::BOOKMARKS] = "bookmark_payload";
+ source.types[syncable::PREFERENCES] = "preferences_payload";
+ mock_server()->ExpectGetUpdatesRequestTypes(
+ GetRoutingInfoTypes(routing_info()));
+ mock_server()->ExpectGetUpdatesRequestPayloads(source.types);
+ command_.ExecuteImpl(session(source));
+}
+
+TEST_F(DownloadUpdatesCommandTest, VerifyAppendDebugInfo) {
+ sync_pb::DebugInfo debug_info;
+ EXPECT_CALL(*(mock_debug_info_getter()), GetAndClearDebugInfo(_))
+ .Times(1);
+ command_.AppendClientDebugInfoIfNeeded(session(), &debug_info);
+
+ // Now try to add it once more and make sure |GetAndClearDebugInfo| is not
+ // called.
+ command_.AppendClientDebugInfoIfNeeded(session(), &debug_info);
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/get_commit_ids_command.cc b/sync/engine/get_commit_ids_command.cc
new file mode 100644
index 0000000..55d7e23
--- /dev/null
+++ b/sync/engine/get_commit_ids_command.cc
@@ -0,0 +1,434 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/get_commit_ids_command.h"
+
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "sync/engine/nigori_util.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/syncable/syncable.h"
+#include "sync/util/cryptographer.h"
+
+using std::set;
+using std::vector;
+
+namespace browser_sync {
+
+using sessions::OrderedCommitSet;
+using sessions::SyncSession;
+using sessions::StatusController;
+
+GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size)
+ : requested_commit_batch_size_(commit_batch_size) {}
+
+GetCommitIdsCommand::~GetCommitIdsCommand() {}
+
+SyncerError GetCommitIdsCommand::ExecuteImpl(SyncSession* session) {
+ // Gather the full set of unsynced items and store it in the session. They
+ // are not in the correct order for commit.
+ std::set<int64> ready_unsynced_set;
+ syncable::Directory::UnsyncedMetaHandles all_unsynced_handles;
+ SyncerUtil::GetUnsyncedEntries(session->write_transaction(),
+ &all_unsynced_handles);
+
+ syncable::ModelTypeSet encrypted_types;
+ bool passphrase_missing = false;
+ Cryptographer* cryptographer =
+ session->context()->
+ directory()->GetCryptographer(session->write_transaction());
+ if (cryptographer) {
+ encrypted_types = cryptographer->GetEncryptedTypes();
+ passphrase_missing = cryptographer->has_pending_keys();
+ };
+
+ const syncable::ModelTypeSet throttled_types =
+ session->context()->GetThrottledTypes();
+ // We filter out all unready entries from the set of unsynced handles. This
+ // new set of ready and unsynced items (which excludes throttled items as
+ // well) is then what we use to determine what is a candidate for commit.
+ FilterUnreadyEntries(session->write_transaction(),
+ throttled_types,
+ encrypted_types,
+ passphrase_missing,
+ all_unsynced_handles,
+ &ready_unsynced_set);
+
+ BuildCommitIds(session->write_transaction(),
+ session->routing_info(),
+ ready_unsynced_set);
+
+ StatusController* status = session->mutable_status_controller();
+ syncable::Directory::UnsyncedMetaHandles ready_unsynced_vector(
+ ready_unsynced_set.begin(), ready_unsynced_set.end());
+ status->set_unsynced_handles(ready_unsynced_vector);
+ const vector<syncable::Id>& verified_commit_ids =
+ ordered_commit_set_->GetAllCommitIds();
+
+ for (size_t i = 0; i < verified_commit_ids.size(); i++)
+ DVLOG(1) << "Debug commit batch result:" << verified_commit_ids[i];
+
+ status->set_commit_set(*ordered_commit_set_.get());
+ return SYNCER_OK;
+}
+
+namespace {
+
+bool IsEntryInConflict(const syncable::Entry& entry) {
+ if (entry.Get(syncable::IS_UNSYNCED) &&
+ entry.Get(syncable::SERVER_VERSION) > 0 &&
+ (entry.Get(syncable::SERVER_VERSION) >
+ entry.Get(syncable::BASE_VERSION))) {
+ // The local and server versions don't match. The item must be in
+ // conflict, so there's no point in attempting to commit.
+ DCHECK(entry.Get(syncable::IS_UNAPPLIED_UPDATE));
+ DVLOG(1) << "Excluding entry from commit due to version mismatch "
+ << entry;
+ return true;
+ }
+ return false;
+}
+
+// An entry is not considered ready for commit if any are true:
+// 1. It's in conflict.
+// 2. It requires encryption (either the type is encrypted but a passphrase
+// is missing from the cryptographer, or the entry itself wasn't properly
+// encrypted).
+// 3. It's type is currently throttled.
+// 4. It's a delete but has not been committed.
+bool IsEntryReadyForCommit(syncable::ModelTypeSet throttled_types,
+ syncable::ModelTypeSet encrypted_types,
+ bool passphrase_missing,
+ const syncable::Entry& entry) {
+ DCHECK(entry.Get(syncable::IS_UNSYNCED));
+ if (IsEntryInConflict(entry))
+ return false;
+
+ const syncable::ModelType type = entry.GetModelType();
+ // We special case the nigori node because even though it is considered an
+ // "encrypted type", not all nigori node changes require valid encryption
+ // (ex: sync_tabs).
+ if ((type != syncable::NIGORI) &&
+ encrypted_types.Has(type) &&
+ (passphrase_missing ||
+ syncable::EntryNeedsEncryption(encrypted_types, entry))) {
+ // This entry requires encryption but is not properly encrypted (possibly
+ // due to the cryptographer not being initialized or the user hasn't
+ // provided the most recent passphrase).
+ DVLOG(1) << "Excluding entry from commit due to lack of encryption "
+ << entry;
+ return false;
+ }
+
+ // Look at the throttled types.
+ if (throttled_types.Has(type))
+ return false;
+
+ // Drop deleted uncommitted entries.
+ if (entry.Get(syncable::IS_DEL) && !entry.Get(syncable::ID).ServerKnows()) {
+ // TODO(zea): These will remain unsynced indefinitely. This is harmless,
+ // but we should clean them up somewhere.
+ DVLOG(1) << "Ignoring deleted and uncommitted item." << entry;
+ return false;
+ }
+
+ // Extra validity checks.
+ syncable::Id id = entry.Get(syncable::ID);
+ if (id == entry.Get(syncable::PARENT_ID)) {
+ CHECK(id.IsRoot()) << "Non-root item is self parenting." << entry;
+ // If the root becomes unsynced it can cause us problems.
+ NOTREACHED() << "Root item became unsynced " << entry;
+ return false;
+ }
+
+ if (entry.IsRoot()) {
+ NOTREACHED() << "Permanent item became unsynced " << entry;
+ return false;
+ }
+
+ DVLOG(2) << "Entry is ready for commit: " << entry;
+ return true;
+}
+
+} // namespace
+
+void GetCommitIdsCommand::FilterUnreadyEntries(
+ syncable::BaseTransaction* trans,
+ syncable::ModelTypeSet throttled_types,
+ syncable::ModelTypeSet encrypted_types,
+ bool passphrase_missing,
+ const syncable::Directory::UnsyncedMetaHandles& unsynced_handles,
+ std::set<int64>* ready_unsynced_set) {
+ for (syncable::Directory::UnsyncedMetaHandles::const_iterator iter =
+ unsynced_handles.begin(); iter != unsynced_handles.end(); ++iter) {
+ syncable::Entry entry(trans, syncable::GET_BY_HANDLE, *iter);
+ if (IsEntryReadyForCommit(throttled_types,
+ encrypted_types,
+ passphrase_missing,
+ entry)) {
+ ready_unsynced_set->insert(*iter);
+ }
+ }
+}
+
+bool GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ const ModelSafeRoutingInfo& routes,
+ const std::set<int64>& ready_unsynced_set,
+ const syncable::Entry& item,
+ sessions::OrderedCommitSet* result) const {
+ OrderedCommitSet item_dependencies(routes);
+ syncable::Id parent_id = item.Get(syncable::PARENT_ID);
+
+ // Climb the tree adding entries leaf -> root.
+ while (!parent_id.ServerKnows()) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
+ CHECK(parent.good()) << "Bad user-only parent in item path.";
+ int64 handle = parent.Get(syncable::META_HANDLE);
+ if (ordered_commit_set_->HaveCommitItem(handle)) {
+ // We've already added this parent (and therefore all of its parents).
+ // We can return early.
+ break;
+ }
+ if (!AddItemThenPredecessors(trans, ready_unsynced_set, parent,
+ &item_dependencies)) {
+ // There was a parent/predecessor in conflict. We return without adding
+ // anything to |ordered_commit_set_|.
+ DVLOG(1) << "Parent or parent's predecessor was in conflict, omitting "
+ << item;
+ return false;
+ }
+ parent_id = parent.Get(syncable::PARENT_ID);
+ }
+
+ // Reverse what we added to get the correct order.
+ result->AppendReverse(item_dependencies);
+ return true;
+}
+
+bool GetCommitIdsCommand::AddItem(const std::set<int64>& ready_unsynced_set,
+ const syncable::Entry& item,
+ OrderedCommitSet* result) const {
+ DCHECK(item.Get(syncable::IS_UNSYNCED));
+ // An item in conflict means that dependent items (successors and children)
+ // cannot be added either.
+ if (IsEntryInConflict(item))
+ return false;
+ int64 item_handle = item.Get(syncable::META_HANDLE);
+ if (ready_unsynced_set.count(item_handle) == 0) {
+ // It's not in conflict, but not ready for commit. Just return true without
+ // adding it to the commit set.
+ return true;
+ }
+ result->AddCommitItem(item_handle, item.Get(syncable::ID),
+ item.GetModelType());
+ return true;
+}
+
+bool GetCommitIdsCommand::AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ const std::set<int64>& ready_unsynced_set,
+ const syncable::Entry& item,
+ OrderedCommitSet* result) const {
+ int64 item_handle = item.Get(syncable::META_HANDLE);
+ if (ordered_commit_set_->HaveCommitItem(item_handle)) {
+ // We've already added this item to the commit set, and so must have
+ // already added the predecessors as well.
+ return true;
+ }
+ if (!AddItem(ready_unsynced_set, item, result))
+ return false; // Item is in conflict.
+ if (item.Get(syncable::IS_DEL))
+ return true; // Deleted items have no predecessors.
+
+ syncable::Id prev_id = item.Get(syncable::PREV_ID);
+ while (!prev_id.IsRoot()) {
+ syncable::Entry prev(trans, syncable::GET_BY_ID, prev_id);
+ CHECK(prev.good()) << "Bad id when walking predecessors.";
+ if (!prev.Get(syncable::IS_UNSYNCED))
+ break;
+ int64 handle = prev.Get(syncable::META_HANDLE);
+ if (ordered_commit_set_->HaveCommitItem(handle)) {
+ // We've already added this item to the commit set, and so must have
+ // already added the predecessors as well.
+ return true;
+ }
+ if (!AddItem(ready_unsynced_set, prev, result))
+ return false; // Item is in conflict.
+ prev_id = prev.Get(syncable::PREV_ID);
+ }
+ return true;
+}
+
+bool GetCommitIdsCommand::AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ const ModelSafeRoutingInfo& routes,
+ const std::set<int64>& ready_unsynced_set,
+ const syncable::Entry& item,
+ OrderedCommitSet* result) const {
+ OrderedCommitSet item_dependencies(routes);
+ if (!AddItemThenPredecessors(trans, ready_unsynced_set, item,
+ &item_dependencies)) {
+ // Either the item or its predecessors are in conflict, so don't add any
+ // items to the commit set.
+ DVLOG(1) << "Predecessor was in conflict, omitting " << item;
+ return false;
+ }
+
+ // Reverse what we added to get the correct order.
+ result->AppendReverse(item_dependencies);
+ return true;
+}
+
+bool GetCommitIdsCommand::IsCommitBatchFull() const {
+ return ordered_commit_set_->Size() >= requested_commit_batch_size_;
+}
+
+void GetCommitIdsCommand::AddCreatesAndMoves(
+ syncable::WriteTransaction* write_transaction,
+ const ModelSafeRoutingInfo& routes,
+ const std::set<int64>& ready_unsynced_set) {
+ // Add moves and creates, and prepend their uncommitted parents.
+ for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
+ !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
+ int64 metahandle = *iter;
+ if (ordered_commit_set_->HaveCommitItem(metahandle))
+ continue;
+
+ syncable::Entry entry(write_transaction,
+ syncable::GET_BY_HANDLE,
+ metahandle);
+ if (!entry.Get(syncable::IS_DEL)) {
+ // We only commit an item + its dependencies if it and all its
+ // dependencies are not in conflict.
+ OrderedCommitSet item_dependencies(routes);
+ if (AddUncommittedParentsAndTheirPredecessors(
+ write_transaction,
+ routes,
+ ready_unsynced_set,
+ entry,
+ &item_dependencies) &&
+ AddPredecessorsThenItem(write_transaction,
+ routes,
+ ready_unsynced_set,
+ entry,
+ &item_dependencies)) {
+ ordered_commit_set_->Append(item_dependencies);
+ }
+ }
+ }
+
+ // It's possible that we overcommitted while trying to expand dependent
+ // items. If so, truncate the set down to the allowed size.
+ ordered_commit_set_->Truncate(requested_commit_batch_size_);
+}
+
+void GetCommitIdsCommand::AddDeletes(
+ syncable::WriteTransaction* write_transaction,
+ const std::set<int64>& ready_unsynced_set) {
+ set<syncable::Id> legal_delete_parents;
+
+ for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
+ !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
+ int64 metahandle = *iter;
+ if (ordered_commit_set_->HaveCommitItem(metahandle))
+ continue;
+
+ syncable::Entry entry(write_transaction, syncable::GET_BY_HANDLE,
+ metahandle);
+
+ if (entry.Get(syncable::IS_DEL)) {
+ syncable::Entry parent(write_transaction, syncable::GET_BY_ID,
+ entry.Get(syncable::PARENT_ID));
+ // If the parent is deleted and unsynced, then any children of that
+ // parent don't need to be added to the delete queue.
+ //
+ // Note: the parent could be synced if there was an update deleting a
+ // folder when we had a deleted all items in it.
+ // We may get more updates, or we may want to delete the entry.
+ if (parent.good() &&
+ parent.Get(syncable::IS_DEL) &&
+ parent.Get(syncable::IS_UNSYNCED)) {
+ // However, if an entry is moved, these rules can apply differently.
+ //
+ // If the entry was moved, then the destination parent was deleted,
+ // then we'll miss it in the roll up. We have to add it in manually.
+ // TODO(chron): Unit test for move / delete cases:
+ // Case 1: Locally moved, then parent deleted
+ // Case 2: Server moved, then locally issue recursive delete.
+ if (entry.Get(syncable::ID).ServerKnows() &&
+ entry.Get(syncable::PARENT_ID) !=
+ entry.Get(syncable::SERVER_PARENT_ID)) {
+ DVLOG(1) << "Inserting moved and deleted entry, will be missed by "
+ << "delete roll." << entry.Get(syncable::ID);
+
+ ordered_commit_set_->AddCommitItem(metahandle,
+ entry.Get(syncable::ID),
+ entry.GetModelType());
+ }
+
+ // Skip this entry since it's a child of a parent that will be
+ // deleted. The server will unroll the delete and delete the
+ // child as well.
+ continue;
+ }
+
+ legal_delete_parents.insert(entry.Get(syncable::PARENT_ID));
+ }
+ }
+
+ // We could store all the potential entries with a particular parent during
+ // the above scan, but instead we rescan here. This is less efficient, but
+ // we're dropping memory alloc/dealloc in favor of linear scans of recently
+ // examined entries.
+ //
+ // Scan through the UnsyncedMetaHandles again. If we have a deleted
+ // entry, then check if the parent is in legal_delete_parents.
+ //
+ // Parent being in legal_delete_parents means for the child:
+ // a recursive delete is not currently happening (no recent deletes in same
+ // folder)
+ // parent did expect at least one old deleted child
+ // parent was not deleted
+ for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
+ !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
+ int64 metahandle = *iter;
+ if (ordered_commit_set_->HaveCommitItem(metahandle))
+ continue;
+ syncable::MutableEntry entry(write_transaction, syncable::GET_BY_HANDLE,
+ metahandle);
+ if (entry.Get(syncable::IS_DEL)) {
+ syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
+ if (legal_delete_parents.count(parent_id)) {
+ ordered_commit_set_->AddCommitItem(metahandle, entry.Get(syncable::ID),
+ entry.GetModelType());
+ }
+ }
+ }
+}
+
+void GetCommitIdsCommand::BuildCommitIds(
+ syncable::WriteTransaction* write_transaction,
+ const ModelSafeRoutingInfo& routes,
+ const std::set<int64>& ready_unsynced_set) {
+ ordered_commit_set_.reset(new OrderedCommitSet(routes));
+ // Commits follow these rules:
+ // 1. Moves or creates are preceded by needed folder creates, from
+ // root to leaf. For folders whose contents are ordered, moves
+ // and creates appear in order.
+ // 2. Moves/Creates before deletes.
+ // 3. Deletes, collapsed.
+ // We commit deleted moves under deleted items as moves when collapsing
+ // delete trees.
+
+ // Add moves and creates, and prepend their uncommitted parents.
+ AddCreatesAndMoves(write_transaction, routes, ready_unsynced_set);
+
+ // Add all deletes.
+ AddDeletes(write_transaction, ready_unsynced_set);
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/get_commit_ids_command.h b/sync/engine/get_commit_ids_command.h
new file mode 100644
index 0000000..0a277f9
--- /dev/null
+++ b/sync/engine/get_commit_ids_command.h
@@ -0,0 +1,126 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
+#define SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
+#pragma once
+
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "sync/engine/syncer_command.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/sessions/ordered_commit_set.h"
+#include "sync/sessions/sync_session.h"
+
+using std::pair;
+using std::vector;
+
+namespace browser_sync {
+
+class GetCommitIdsCommand : public SyncerCommand {
+ friend class SyncerTest;
+
+ public:
+ explicit GetCommitIdsCommand(int commit_batch_size);
+ virtual ~GetCommitIdsCommand();
+
+ // SyncerCommand implementation.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
+
+ // Builds a vector of IDs that should be committed.
+ void BuildCommitIds(syncable::WriteTransaction* write_transaction,
+ const ModelSafeRoutingInfo& routes,
+ const std::set<int64>& ready_unsynced_set);
+
+ // Fill |ready_unsynced_set| with all entries from |unsynced_handles| that
+ // are ready to commit.
+ // An entry is not considered ready for commit if any are true:
+ // 1. It's in conflict.
+ // 2. It requires encryption (either the type is encrypted but a passphrase
+ // is missing from the cryptographer, or the entry itself wasn't properly
+ // encrypted).
+ // 3. It's type is currently throttled.
+ // 4. It's a delete but has not been committed.
+ void FilterUnreadyEntries(
+ syncable::BaseTransaction* trans,
+ syncable::ModelTypeSet throttled_types,
+ syncable::ModelTypeSet encrypted_types,
+ bool passphrase_missing,
+ const syncable::Directory::UnsyncedMetaHandles& unsynced_handles,
+ std::set<int64>* ready_unsynced_set);
+
+ private:
+ // Add all the uncommitted parents (and their predecessors) of |item| to
+ // |result| if they are ready to commit. Entries are added in root->child
+ // order and predecessor->successor order.
+ // Returns values:
+ // False: if a dependent item was in conflict, and hence no child cannot be
+ // committed.
+ // True: if all parents and their predecessors were checked for commit
+ // readiness and were added to |result| as necessary.
+ bool AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ const ModelSafeRoutingInfo& routes,
+ const std::set<int64>& ready_unsynced_set,
+ const syncable::Entry& item,
+ sessions::OrderedCommitSet* result) const;
+
+ // OrderedCommitSet helpers for adding predecessors in order.
+
+ // Adds |item| to |result| if it's ready for committing and was not already
+ // present.
+ // Prereq: |item| is unsynced.
+ // Returns values:
+ // False: if |item| was in conflict.
+ // True: if |item| was checked for commit readiness and added to |result|
+ // as necessary.
+ bool AddItem(const std::set<int64>& ready_unsynced_set,
+ const syncable::Entry& item,
+ sessions::OrderedCommitSet* result) const;
+
+ // Adds item and all it's unsynced predecessors to |result| as necessary, as
+ // long as no item was in conflict.
+ // Return values:
+ // False: if there was an entry in conflict.
+ // True: if all entries were checked for commit readiness and added to
+ // |result| as necessary.
+ bool AddItemThenPredecessors(syncable::BaseTransaction* trans,
+ const std::set<int64>& ready_unsynced_set,
+ const syncable::Entry& item,
+ sessions::OrderedCommitSet* result) const;
+
+ // Appends all commit ready predecessors of |item|, followed by |item| itself,
+ // to |ordered_commit_set_|, iff item and all its predecessors not in
+ // conflict.
+ // Return values:
+ // False: if there was an entry in conflict.
+ // True: if all entries were checked for commit readiness and added to
+ // |result| as necessary.
+ bool AddPredecessorsThenItem(syncable::BaseTransaction* trans,
+ const ModelSafeRoutingInfo& routes,
+ const std::set<int64>& ready_unsynced_set,
+ const syncable::Entry& item,
+ sessions::OrderedCommitSet* result) const;
+
+ bool IsCommitBatchFull() const;
+
+ void AddCreatesAndMoves(syncable::WriteTransaction* write_transaction,
+ const ModelSafeRoutingInfo& routes,
+ const std::set<int64>& ready_unsynced_set);
+
+ void AddDeletes(syncable::WriteTransaction* write_transaction,
+ const std::set<int64>& ready_unsynced_set);
+
+ scoped_ptr<sessions::OrderedCommitSet> ordered_commit_set_;
+
+ int requested_commit_batch_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(GetCommitIdsCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
diff --git a/sync/engine/model_changing_syncer_command.cc b/sync/engine/model_changing_syncer_command.cc
new file mode 100644
index 0000000..9409746
--- /dev/null
+++ b/sync/engine/model_changing_syncer_command.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/model_changing_syncer_command.h"
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/sessions/sync_session.h"
+
+namespace browser_sync {
+
+SyncerError ModelChangingSyncerCommand::ExecuteImpl(
+ sessions::SyncSession* session) {
+ work_session_ = session;
+ SyncerError result = ModelNeutralExecuteImpl(work_session_);
+
+ if (result != SYNCER_OK)
+ return result;
+
+ const std::set<ModelSafeGroup>& groups_to_change =
+ GetGroupsToChange(*work_session_);
+ for (size_t i = 0; i < session->workers().size(); ++i) {
+ ModelSafeWorker* worker = work_session_->workers()[i];
+ ModelSafeGroup group = worker->GetModelSafeGroup();
+ // Skip workers whose group isn't active.
+ if (groups_to_change.count(group) == 0u) {
+ DVLOG(2) << "Skipping worker for group "
+ << ModelSafeGroupToString(group);
+ continue;
+ }
+
+ sessions::StatusController* status =
+ work_session_->mutable_status_controller();
+ sessions::ScopedModelSafeGroupRestriction r(status, group);
+ WorkCallback c = base::Bind(
+ &ModelChangingSyncerCommand::StartChangingModel,
+ // We wait until the callback is executed. So it is safe to use
+ // unretained.
+ base::Unretained(this));
+
+ SyncerError this_worker_result = worker->DoWorkAndWaitUntilDone(c);
+ // TODO(rlarocque): Figure out a better way to deal with errors from
+ // multiple models at once. See also: crbug.com/109422.
+ if (this_worker_result != SYNCER_OK)
+ result = this_worker_result;
+ }
+
+ return result;
+}
+
+SyncerError ModelChangingSyncerCommand::ModelNeutralExecuteImpl(
+ sessions::SyncSession* session) {
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/model_changing_syncer_command.h b/sync/engine/model_changing_syncer_command.h
new file mode 100644
index 0000000..d09e4b7
--- /dev/null
+++ b/sync/engine/model_changing_syncer_command.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
+#define SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/engine/syncer_command.h"
+
+namespace browser_sync {
+namespace sessions {
+class SyncSession;
+}
+
+// An abstract SyncerCommand which dispatches its Execute step to the
+// model-safe worker thread. Classes derived from ModelChangingSyncerCommand
+// instead of SyncerCommand must implement ModelChangingExecuteImpl instead of
+// ExecuteImpl, but otherwise, the contract is the same.
+//
+// A command should derive from ModelChangingSyncerCommand instead of
+// SyncerCommand whenever the operation might change any client-visible
+// fields on any syncable::Entry. If the operation involves creating a
+// WriteTransaction, this is a sign that ModelChangingSyncerCommand is likely
+// necessary.
+class ModelChangingSyncerCommand : public SyncerCommand {
+ public:
+ ModelChangingSyncerCommand() : work_session_(NULL) { }
+ virtual ~ModelChangingSyncerCommand() { }
+
+ // SyncerCommand implementation. Sets work_session to session.
+ virtual browser_sync::SyncerError ExecuteImpl(
+ sessions::SyncSession* session) OVERRIDE;
+
+ // Wrapper so implementations don't worry about storing work_session.
+ SyncerError StartChangingModel() {
+ return ModelChangingExecuteImpl(work_session_);
+ }
+
+ std::set<ModelSafeGroup> GetGroupsToChangeForTest(
+ const sessions::SyncSession& session) const {
+ return GetGroupsToChange(session);
+ }
+
+ protected:
+ // This should return the set of groups in |session| that need to be
+ // changed. The returned set should be a subset of
+ // session.GetEnabledGroups(). Subclasses can guarantee this either
+ // by calling one of the session.GetEnabledGroups*() functions and
+ // filtering that, or using GetGroupForModelType() (which handles
+ // top-level/unspecified nodes) to project from model types to
+ // groups.
+ virtual std::set<ModelSafeGroup> GetGroupsToChange(
+ const sessions::SyncSession& session) const = 0;
+
+ // Sometimes, a command has work to do that needs to touch global state
+ // belonging to multiple ModelSafeGroups, but in a way that is known to be
+ // safe. This will be called once, prior to ModelChangingExecuteImpl,
+ // *without* a ModelSafeGroup restriction in place on the SyncSession.
+ // Returns true on success, false on failure.
+ // TODO(tim): Remove this (bug 36594).
+ virtual SyncerError ModelNeutralExecuteImpl(sessions::SyncSession* session);
+
+ // Abstract method to be implemented by subclasses to handle logic that
+ // operates on the model. This is invoked with a SyncSession ModelSafeGroup
+ // restriction in place so that bits of state belonging to data types
+ // running on an unsafe thread are siloed away.
+ virtual SyncerError ModelChangingExecuteImpl(
+ sessions::SyncSession* session) = 0;
+
+ private:
+ // ExecuteImpl is expected to be run by SyncerCommand to set work_session.
+ // StartChangingModel is called to start this command running.
+ // Implementations will implement ModelChangingExecuteImpl and not
+ // worry about storing the session or setting it. They are given work_session.
+ sessions::SyncSession* work_session_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelChangingSyncerCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
diff --git a/sync/engine/model_changing_syncer_command_unittest.cc b/sync/engine/model_changing_syncer_command_unittest.cc
new file mode 100644
index 0000000..615ee51
--- /dev/null
+++ b/sync/engine/model_changing_syncer_command_unittest.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "sync/engine/model_changing_syncer_command.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/model_type.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/syncer_command_test.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+namespace {
+
+class FakeModelChangingSyncerCommand : public ModelChangingSyncerCommand {
+ public:
+ FakeModelChangingSyncerCommand() {}
+ virtual ~FakeModelChangingSyncerCommand() {}
+
+ const std::set<ModelSafeGroup>& changed_groups() const {
+ return changed_groups_;
+ }
+
+ protected:
+ virtual std::set<ModelSafeGroup> GetGroupsToChange(
+ const sessions::SyncSession& session) const OVERRIDE {
+ return session.GetEnabledGroups();
+ }
+
+ virtual SyncerError ModelChangingExecuteImpl(
+ sessions::SyncSession* session) OVERRIDE {
+ changed_groups_.insert(session->status_controller().group_restriction());
+ return SYNCER_OK;
+ }
+
+ private:
+ std::set<ModelSafeGroup> changed_groups_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeModelChangingSyncerCommand);
+};
+
+class ModelChangingSyncerCommandTest : public SyncerCommandTest {
+ protected:
+ ModelChangingSyncerCommandTest() {}
+ virtual ~ModelChangingSyncerCommandTest() {}
+
+ virtual void SetUp() {
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_PASSWORD)));
+ (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_UI;
+ (*mutable_routing_info())[syncable::PASSWORDS] = GROUP_PASSWORD;
+ SyncerCommandTest::SetUp();
+ }
+
+ FakeModelChangingSyncerCommand command_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ModelChangingSyncerCommandTest);
+};
+
+TEST_F(ModelChangingSyncerCommandTest, Basic) {
+ ExpectGroupsToChange(command_, GROUP_UI, GROUP_PASSWORD, GROUP_PASSIVE);
+ EXPECT_TRUE(command_.changed_groups().empty());
+ command_.ExecuteImpl(session());
+ EXPECT_EQ(command_.GetGroupsToChangeForTest(*session()),
+ command_.changed_groups());
+}
+
+} // namespace
+
+} // namespace browser_sync
diff --git a/sync/engine/model_safe_worker.cc b/sync/engine/model_safe_worker.cc
new file mode 100644
index 0000000..ab1f750
--- /dev/null
+++ b/sync/engine/model_safe_worker.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/model_safe_worker.h"
+
+#include "base/json/json_writer.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/values.h"
+
+namespace browser_sync {
+
+base::DictionaryValue* ModelSafeRoutingInfoToValue(
+ const ModelSafeRoutingInfo& routing_info) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ for (ModelSafeRoutingInfo::const_iterator it = routing_info.begin();
+ it != routing_info.end(); ++it) {
+ dict->SetString(syncable::ModelTypeToString(it->first),
+ ModelSafeGroupToString(it->second));
+ }
+ return dict;
+}
+
+std::string ModelSafeRoutingInfoToString(
+ const ModelSafeRoutingInfo& routing_info) {
+ scoped_ptr<DictionaryValue> dict(ModelSafeRoutingInfoToValue(routing_info));
+ std::string json;
+ base::JSONWriter::Write(dict.get(), false, &json);
+ return json;
+}
+
+syncable::ModelTypeSet GetRoutingInfoTypes(
+ const ModelSafeRoutingInfo& routing_info) {
+ syncable::ModelTypeSet types;
+ for (ModelSafeRoutingInfo::const_iterator it = routing_info.begin();
+ it != routing_info.end(); ++it) {
+ types.Put(it->first);
+ }
+ return types;
+}
+
+ModelSafeGroup GetGroupForModelType(const syncable::ModelType type,
+ const ModelSafeRoutingInfo& routes) {
+ ModelSafeRoutingInfo::const_iterator it = routes.find(type);
+ if (it == routes.end()) {
+ if (type != syncable::UNSPECIFIED && type != syncable::TOP_LEVEL_FOLDER)
+ LOG(WARNING) << "Entry does not belong to active ModelSafeGroup!";
+ return GROUP_PASSIVE;
+ }
+ return it->second;
+}
+
+std::string ModelSafeGroupToString(ModelSafeGroup group) {
+ switch (group) {
+ case GROUP_UI:
+ return "GROUP_UI";
+ case GROUP_DB:
+ return "GROUP_DB";
+ case GROUP_FILE:
+ return "GROUP_FILE";
+ case GROUP_HISTORY:
+ return "GROUP_HISTORY";
+ case GROUP_PASSIVE:
+ return "GROUP_PASSIVE";
+ case GROUP_PASSWORD:
+ return "GROUP_PASSWORD";
+ default:
+ NOTREACHED();
+ return "INVALID";
+ }
+}
+
+ModelSafeWorker::~ModelSafeWorker() {}
+
+} // namespace browser_sync
diff --git a/sync/engine/model_safe_worker.h b/sync/engine/model_safe_worker.h
new file mode 100644
index 0000000..ced63d6
--- /dev/null
+++ b/sync/engine/model_safe_worker.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_MODEL_SAFE_WORKER_H_
+#define SYNC_ENGINE_MODEL_SAFE_WORKER_H_
+#pragma once
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "sync/syncable/model_type.h"
+#include "sync/util/syncer_error.h"
+
+namespace base {
+class DictionaryValue;
+} // namespace
+
+namespace browser_sync {
+
+typedef base::Callback<enum SyncerError(void)> WorkCallback;
+
+enum ModelSafeGroup {
+ GROUP_PASSIVE = 0, // Models that are just "passively" being synced; e.g.
+ // changes to these models don't need to be pushed to a
+ // native model.
+ GROUP_UI, // Models that live on UI thread and are being synced.
+ GROUP_DB, // Models that live on DB thread and are being synced.
+ GROUP_FILE, // Models that live on FILE thread and are being synced.
+ GROUP_HISTORY, // Models that live on history thread and are being
+ // synced.
+ GROUP_PASSWORD, // Models that live on the password thread and are
+ // being synced. On windows and linux, this runs on the
+ // DB thread.
+ MODEL_SAFE_GROUP_COUNT,
+};
+
+std::string ModelSafeGroupToString(ModelSafeGroup group);
+
+// The Syncer uses a ModelSafeWorker for all tasks that could potentially
+// modify syncable entries (e.g under a WriteTransaction). The ModelSafeWorker
+// only knows how to do one thing, and that is take some work (in a fully
+// pre-bound callback) and have it performed (as in Run()) from a thread which
+// is guaranteed to be "model-safe", where "safe" refers to not allowing us to
+// cause an embedding application model to fall out of sync with the
+// syncable::Directory due to a race.
+class ModelSafeWorker : public base::RefCountedThreadSafe<ModelSafeWorker> {
+ public:
+ // Any time the Syncer performs model modifications (e.g employing a
+ // WriteTransaction), it should be done by this method to ensure it is done
+ // from a model-safe thread.
+ virtual SyncerError DoWorkAndWaitUntilDone(const WorkCallback& work) = 0;
+
+ virtual ModelSafeGroup GetModelSafeGroup() = 0;
+
+ protected:
+ virtual ~ModelSafeWorker();
+
+ private:
+ friend class base::RefCountedThreadSafe<ModelSafeWorker>;
+};
+
+// A map that details which ModelSafeGroup each syncable::ModelType
+// belongs to. Routing info can change in response to the user enabling /
+// disabling sync for certain types, as well as model association completions.
+typedef std::map<syncable::ModelType, ModelSafeGroup>
+ ModelSafeRoutingInfo;
+
+// Caller takes ownership of return value.
+base::DictionaryValue* ModelSafeRoutingInfoToValue(
+ const ModelSafeRoutingInfo& routing_info);
+
+std::string ModelSafeRoutingInfoToString(
+ const ModelSafeRoutingInfo& routing_info);
+
+syncable::ModelTypeSet GetRoutingInfoTypes(
+ const ModelSafeRoutingInfo& routing_info);
+
+ModelSafeGroup GetGroupForModelType(const syncable::ModelType type,
+ const ModelSafeRoutingInfo& routes);
+
+// Maintain the up-to-date state regarding which ModelSafeWorkers exist and
+// which types get routed to which worker. When a sync session begins, it will
+// snapshot the state at that instant, and will use that for the entire
+// session. This means if a model becomes synced (or unsynced) by the user
+// during a sync session, that session will complete and be unaware of this
+// change -- it will only get picked up for the next session.
+// TODO(tim): That's really the only way I can make sense of it in the Syncer
+// HOWEVER, it is awkward for running ModelAssociation. We need to make sure
+// we don't run such a thing until an active session wraps up.
+class ModelSafeWorkerRegistrar {
+ public:
+ ModelSafeWorkerRegistrar() { }
+ // Get the current list of active ModelSafeWorkers. Should be threadsafe.
+ virtual void GetWorkers(std::vector<ModelSafeWorker*>* out) = 0;
+
+ // Get the current routing information for all enabled model types.
+ // If a model type is not enabled (that is, if the syncer should not
+ // be trying to sync it), it is not in this map.
+ virtual void GetModelSafeRoutingInfo(ModelSafeRoutingInfo* out) = 0;
+ protected:
+ virtual ~ModelSafeWorkerRegistrar() {}
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ModelSafeWorkerRegistrar);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_MODEL_SAFE_WORKER_H_
diff --git a/sync/engine/model_safe_worker_unittest.cc b/sync/engine/model_safe_worker_unittest.cc
new file mode 100644
index 0000000..741c724
--- /dev/null
+++ b/sync/engine/model_safe_worker_unittest.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/model_safe_worker.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+class ModelSafeWorkerTest : public ::testing::Test {
+};
+
+TEST_F(ModelSafeWorkerTest, ModelSafeRoutingInfoToValue) {
+ ModelSafeRoutingInfo routing_info;
+ routing_info[syncable::BOOKMARKS] = GROUP_PASSIVE;
+ routing_info[syncable::NIGORI] = GROUP_UI;
+ routing_info[syncable::PREFERENCES] = GROUP_DB;
+ DictionaryValue expected_value;
+ expected_value.SetString("Bookmarks", "GROUP_PASSIVE");
+ expected_value.SetString("Encryption keys", "GROUP_UI");
+ expected_value.SetString("Preferences", "GROUP_DB");
+ scoped_ptr<DictionaryValue> value(
+ ModelSafeRoutingInfoToValue(routing_info));
+ EXPECT_TRUE(value->Equals(&expected_value));
+}
+
+TEST_F(ModelSafeWorkerTest, ModelSafeRoutingInfoToString) {
+ ModelSafeRoutingInfo routing_info;
+ routing_info[syncable::BOOKMARKS] = GROUP_PASSIVE;
+ routing_info[syncable::NIGORI] = GROUP_UI;
+ routing_info[syncable::PREFERENCES] = GROUP_DB;
+ EXPECT_EQ(
+ "{\"Bookmarks\":\"GROUP_PASSIVE\",\"Encryption keys\":\"GROUP_UI\","
+ "\"Preferences\":\"GROUP_DB\"}",
+ ModelSafeRoutingInfoToString(routing_info));
+}
+
+TEST_F(ModelSafeWorkerTest, GetRoutingInfoTypes) {
+ ModelSafeRoutingInfo routing_info;
+ routing_info[syncable::BOOKMARKS] = GROUP_PASSIVE;
+ routing_info[syncable::NIGORI] = GROUP_UI;
+ routing_info[syncable::PREFERENCES] = GROUP_DB;
+ const syncable::ModelTypeSet expected_types(
+ syncable::BOOKMARKS,
+ syncable::NIGORI,
+ syncable::PREFERENCES);
+ EXPECT_TRUE(GetRoutingInfoTypes(routing_info).Equals(expected_types));
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/engine/net/DEPS b/sync/engine/net/DEPS
new file mode 100644
index 0000000..8fa9d48
--- /dev/null
+++ b/sync/engine/net/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+net",
+]
diff --git a/sync/engine/net/server_connection_manager.cc b/sync/engine/net/server_connection_manager.cc
new file mode 100644
index 0000000..94d01f2
--- /dev/null
+++ b/sync/engine/net/server_connection_manager.cc
@@ -0,0 +1,414 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/net/server_connection_manager.h"
+
+#include <errno.h>
+
+#include <ostream>
+#include <string>
+#include <vector>
+
+#include "base/command_line.h"
+#include "build/build_config.h"
+#include "googleurl/src/gurl.h"
+#include "net/http/http_status_code.h"
+#include "sync/engine/net/url_translator.h"
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncproto.h"
+#include "sync/protocol/sync.pb.h"
+
+namespace browser_sync {
+
+using std::ostream;
+using std::string;
+using std::vector;
+
+static const char kSyncServerSyncPath[] = "/command/";
+
+// At the /time/ path of the sync server, we expect to find a very simple
+// time of day service that we can use to synchronize the local clock with
+// server time.
+static const char kSyncServerGetTimePath[] = "/time";
+
+HttpResponse::HttpResponse()
+ : response_code(kUnsetResponseCode),
+ content_length(kUnsetContentLength),
+ payload_length(kUnsetPayloadLength),
+ server_status(NONE) {}
+
+#define ENUM_CASE(x) case x: return #x; break
+
+const char* HttpResponse::GetServerConnectionCodeString(
+ ServerConnectionCode code) {
+ switch (code) {
+ ENUM_CASE(NONE);
+ ENUM_CASE(CONNECTION_UNAVAILABLE);
+ ENUM_CASE(IO_ERROR);
+ ENUM_CASE(SYNC_SERVER_ERROR);
+ ENUM_CASE(SYNC_AUTH_ERROR);
+ ENUM_CASE(SERVER_CONNECTION_OK);
+ ENUM_CASE(RETRY);
+ }
+ NOTREACHED();
+ return "";
+}
+
+#undef ENUM_CASE
+
+ServerConnectionManager::Connection::Connection(
+ ServerConnectionManager* scm) : scm_(scm) {
+}
+
+ServerConnectionManager::Connection::~Connection() {
+}
+
+bool ServerConnectionManager::Connection::ReadBufferResponse(
+ string* buffer_out,
+ HttpResponse* response,
+ bool require_response) {
+ if (net::HTTP_OK != response->response_code) {
+ response->server_status = HttpResponse::SYNC_SERVER_ERROR;
+ return false;
+ }
+
+ if (require_response && (1 > response->content_length))
+ return false;
+
+ const int64 bytes_read = ReadResponse(buffer_out,
+ static_cast<int>(response->content_length));
+ if (bytes_read != response->content_length) {
+ response->server_status = HttpResponse::IO_ERROR;
+ return false;
+ }
+ return true;
+}
+
+bool ServerConnectionManager::Connection::ReadDownloadResponse(
+ HttpResponse* response,
+ string* buffer_out) {
+ const int64 bytes_read = ReadResponse(buffer_out,
+ static_cast<int>(response->content_length));
+
+ if (bytes_read != response->content_length) {
+ LOG(ERROR) << "Mismatched content lengths, server claimed " <<
+ response->content_length << ", but sent " << bytes_read;
+ response->server_status = HttpResponse::IO_ERROR;
+ return false;
+ }
+ return true;
+}
+
+ServerConnectionManager::ScopedConnectionHelper::ScopedConnectionHelper(
+ ServerConnectionManager* manager, Connection* connection)
+ : manager_(manager), connection_(connection) {}
+
+ServerConnectionManager::ScopedConnectionHelper::~ScopedConnectionHelper() {
+ if (connection_.get())
+ manager_->OnConnectionDestroyed(connection_.get());
+ connection_.reset();
+}
+
+ServerConnectionManager::Connection*
+ServerConnectionManager::ScopedConnectionHelper::get() {
+ return connection_.get();
+}
+
+namespace {
+
+string StripTrailingSlash(const string& s) {
+ int stripped_end_pos = s.size();
+ if (s.at(stripped_end_pos - 1) == '/') {
+ stripped_end_pos = stripped_end_pos - 1;
+ }
+
+ return s.substr(0, stripped_end_pos);
+}
+
+} // namespace
+
+// TODO(chron): Use a GURL instead of string concatenation.
+string ServerConnectionManager::Connection::MakeConnectionURL(
+ const string& sync_server,
+ const string& path,
+ bool use_ssl) const {
+ string connection_url = (use_ssl ? "https://" : "http://");
+ connection_url += sync_server;
+ connection_url = StripTrailingSlash(connection_url);
+ connection_url += path;
+
+ return connection_url;
+}
+
+int ServerConnectionManager::Connection::ReadResponse(string* out_buffer,
+ int length) {
+ int bytes_read = buffer_.length();
+ CHECK(length <= bytes_read);
+ out_buffer->assign(buffer_);
+ return bytes_read;
+}
+
+ScopedServerStatusWatcher::ScopedServerStatusWatcher(
+ ServerConnectionManager* conn_mgr, HttpResponse* response)
+ : conn_mgr_(conn_mgr),
+ response_(response),
+ server_reachable_(conn_mgr->server_reachable_) {
+ response->server_status = conn_mgr->server_status_;
+}
+
+ScopedServerStatusWatcher::~ScopedServerStatusWatcher() {
+ if (conn_mgr_->server_status_ != response_->server_status) {
+ conn_mgr_->server_status_ = response_->server_status;
+ conn_mgr_->NotifyStatusChanged();
+ return;
+ }
+ // Notify if we've gone on or offline.
+ if (server_reachable_ != conn_mgr_->server_reachable_)
+ conn_mgr_->NotifyStatusChanged();
+}
+
+ServerConnectionManager::ServerConnectionManager(
+ const string& server,
+ int port,
+ bool use_ssl,
+ const string& user_agent)
+ : sync_server_(server),
+ sync_server_port_(port),
+ user_agent_(user_agent),
+ use_ssl_(use_ssl),
+ proto_sync_path_(kSyncServerSyncPath),
+ get_time_path_(kSyncServerGetTimePath),
+ server_status_(HttpResponse::NONE),
+ server_reachable_(false),
+ terminated_(false),
+ active_connection_(NULL) {
+}
+
+ServerConnectionManager::~ServerConnectionManager() {
+}
+
+ServerConnectionManager::Connection*
+ServerConnectionManager::MakeActiveConnection() {
+ base::AutoLock lock(terminate_connection_lock_);
+ DCHECK(!active_connection_);
+ if (terminated_)
+ return NULL;
+
+ active_connection_ = MakeConnection();
+ return active_connection_;
+}
+
+void ServerConnectionManager::OnConnectionDestroyed(Connection* connection) {
+ DCHECK(connection);
+ base::AutoLock lock(terminate_connection_lock_);
+ // |active_connection_| can be NULL already if it was aborted. Also,
+ // it can legitimately be a different Connection object if a new Connection
+ // was created after a previous one was Aborted and destroyed.
+ if (active_connection_ != connection)
+ return;
+
+ active_connection_ = NULL;
+}
+
+void ServerConnectionManager::NotifyStatusChanged() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ FOR_EACH_OBSERVER(ServerConnectionEventListener, listeners_,
+ OnServerConnectionEvent(
+ ServerConnectionEvent(server_status_, server_reachable_)));
+}
+
+bool ServerConnectionManager::PostBufferWithCachedAuth(
+ PostBufferParams* params, ScopedServerStatusWatcher* watcher) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ string path =
+ MakeSyncServerPath(proto_sync_path(), MakeSyncQueryString(client_id_));
+ return PostBufferToPath(params, path, auth_token(), watcher);
+}
+
+bool ServerConnectionManager::PostBufferToPath(PostBufferParams* params,
+ const string& path, const string& auth_token,
+ ScopedServerStatusWatcher* watcher) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(watcher != NULL);
+
+ if (auth_token.empty()) {
+ params->response.server_status = HttpResponse::SYNC_AUTH_ERROR;
+ return false;
+ }
+
+ // When our connection object falls out of scope, it clears itself from
+ // active_connection_.
+ ScopedConnectionHelper post(this, MakeActiveConnection());
+ if (!post.get()) {
+ params->response.server_status = HttpResponse::CONNECTION_UNAVAILABLE;
+ return false;
+ }
+
+ // Note that |post| may be aborted by now, which will just cause Init to fail
+ // with CONNECTION_UNAVAILABLE.
+ bool ok = post.get()->Init(
+ path.c_str(), auth_token, params->buffer_in, &params->response);
+
+ if (params->response.server_status == HttpResponse::SYNC_AUTH_ERROR)
+ InvalidateAndClearAuthToken();
+
+ if (!ok || net::HTTP_OK != params->response.response_code)
+ return false;
+
+ if (post.get()->ReadBufferResponse(
+ &params->buffer_out, &params->response, true)) {
+ params->response.server_status = HttpResponse::SERVER_CONNECTION_OK;
+ server_reachable_ = true;
+ return true;
+ }
+ return false;
+}
+
+bool ServerConnectionManager::CheckTime(int32* out_time) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Verify that the server really is reachable by checking the time. We need
+ // to do this because of wifi interstitials that intercept messages from the
+ // client and return HTTP OK instead of a redirect.
+ HttpResponse response;
+ ScopedServerStatusWatcher watcher(this, &response);
+ string post_body = "command=get_time";
+
+ for (int i = 0 ; i < 3; i++) {
+ ScopedConnectionHelper post(this, MakeActiveConnection());
+ if (!post.get())
+ break;
+
+ // Note that the server's get_time path doesn't require authentication.
+ string get_time_path =
+ MakeSyncServerPath(kSyncServerGetTimePath, post_body);
+ DVLOG(1) << "Requesting get_time from:" << get_time_path;
+
+ string blank_post_body;
+ bool ok = post.get()->Init(get_time_path.c_str(), blank_post_body,
+ blank_post_body, &response);
+ if (!ok) {
+ DVLOG(1) << "Unable to check the time";
+ continue;
+ }
+ string time_response;
+ time_response.resize(
+ static_cast<string::size_type>(response.content_length));
+ ok = post.get()->ReadDownloadResponse(&response, &time_response);
+ if (!ok || string::npos !=
+ time_response.find_first_not_of("0123456789")) {
+ LOG(ERROR) << "unable to read a non-numeric response from get_time:"
+ << time_response;
+ continue;
+ }
+ *out_time = atoi(time_response.c_str());
+ DVLOG(1) << "Server was reachable.";
+ return true;
+ }
+ return false;
+}
+
+bool ServerConnectionManager::IsServerReachable() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ int32 time;
+ return CheckTime(&time);
+}
+
+bool ServerConnectionManager::IsUserAuthenticated() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return IsGoodReplyFromServer(server_status_);
+}
+
+bool ServerConnectionManager::CheckServerReachable() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const bool server_is_reachable = IsServerReachable();
+ if (server_reachable_ != server_is_reachable) {
+ server_reachable_ = server_is_reachable;
+ NotifyStatusChanged();
+ }
+ return server_is_reachable;
+}
+
+void ServerConnectionManager::SetServerParameters(const string& server_url,
+ int port,
+ bool use_ssl) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ sync_server_ = server_url;
+ sync_server_port_ = port;
+ use_ssl_ = use_ssl;
+}
+
+// Returns the current server parameters in server_url and port.
+void ServerConnectionManager::GetServerParameters(string* server_url,
+ int* port,
+ bool* use_ssl) const {
+ if (server_url != NULL)
+ *server_url = sync_server_;
+ if (port != NULL)
+ *port = sync_server_port_;
+ if (use_ssl != NULL)
+ *use_ssl = use_ssl_;
+}
+
+std::string ServerConnectionManager::GetServerHost() const {
+ string server_url;
+ int port;
+ bool use_ssl;
+ GetServerParameters(&server_url, &port, &use_ssl);
+ // For unit tests.
+ if (server_url.empty())
+ return std::string();
+ // We just want the hostname, so we don't need to switch on use_ssl.
+ server_url = "http://" + server_url;
+ GURL gurl(server_url);
+ DCHECK(gurl.is_valid()) << gurl;
+ return gurl.host();
+}
+
+void ServerConnectionManager::AddListener(
+ ServerConnectionEventListener* listener) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ listeners_.AddObserver(listener);
+}
+
+void ServerConnectionManager::RemoveListener(
+ ServerConnectionEventListener* listener) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ listeners_.RemoveObserver(listener);
+}
+
+ServerConnectionManager::Connection* ServerConnectionManager::MakeConnection()
+{
+ return NULL; // For testing.
+}
+
+void ServerConnectionManager::TerminateAllIO() {
+ base::AutoLock lock(terminate_connection_lock_);
+ terminated_ = true;
+ if (active_connection_)
+ active_connection_->Abort();
+
+ // Sever our ties to this connection object. Note that it still may exist,
+ // since we don't own it, but it has been neutered.
+ active_connection_ = NULL;
+}
+
+bool FillMessageWithShareDetails(sync_pb::ClientToServerMessage* csm,
+ syncable::Directory* directory,
+ const std::string& share) {
+ string birthday = directory->store_birthday();
+ if (!birthday.empty())
+ csm->set_store_birthday(birthday);
+ csm->set_share(share);
+ return true;
+}
+
+std::ostream& operator << (std::ostream& s, const struct HttpResponse& hr) {
+ s << " Response Code (bogus on error): " << hr.response_code;
+ s << " Content-Length (bogus on error): " << hr.content_length;
+ s << " Server Status: " << hr.server_status;
+ return s;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/net/server_connection_manager.h b/sync/engine/net/server_connection_manager.h
new file mode 100644
index 0000000..6347223
--- /dev/null
+++ b/sync/engine/net/server_connection_manager.h
@@ -0,0 +1,393 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
+#define SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
+#pragma once
+
+#include <iosfwd>
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/observer_list.h"
+#include "base/string_util.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/threading/thread_checker.h"
+#include "sync/syncable/syncable_id.h"
+
+namespace syncable {
+class Directory;
+}
+
+namespace sync_pb {
+class ClientToServerMessage;
+}
+
+namespace browser_sync {
+
+class ClientToServerMessage;
+
+static const int32 kUnsetResponseCode = -1;
+static const int32 kUnsetContentLength = -1;
+static const int32 kUnsetPayloadLength = -1;
+
+// HttpResponse gathers the relevant output properties of an HTTP request.
+// Depending on the value of the server_status code, response_code, and
+// content_length may not be valid.
+struct HttpResponse {
+ enum ServerConnectionCode {
+ // For uninitialized state.
+ NONE,
+
+ // CONNECTION_UNAVAILABLE is returned when InternetConnect() fails.
+ CONNECTION_UNAVAILABLE,
+
+ // IO_ERROR is returned when reading/writing to a buffer has failed.
+ IO_ERROR,
+
+ // SYNC_SERVER_ERROR is returned when the HTTP status code indicates that
+ // a non-auth error has occured.
+ SYNC_SERVER_ERROR,
+
+ // SYNC_AUTH_ERROR is returned when the HTTP status code indicates that an
+ // auth error has occured (i.e. a 401 or sync-specific AUTH_INVALID
+ // response)
+ // TODO(tim): Caring about AUTH_INVALID is a layering violation. But
+ // this app-specific logic is being added as a stable branch hotfix so
+ // minimal changes prevail for the moment. Fix this! Bug 35060.
+ SYNC_AUTH_ERROR,
+
+ // All the following connection codes are valid responses from the server.
+ // Means the server is up. If you update this list, be sure to also update
+ // IsGoodReplyFromServer().
+
+ // SERVER_CONNECTION_OK is returned when request was handled correctly.
+ SERVER_CONNECTION_OK,
+
+ // RETRY is returned when a Commit request fails with a RETRY response from
+ // the server.
+ //
+ // TODO(idana): the server no longer returns RETRY so we should remove this
+ // value.
+ RETRY,
+ };
+
+ // The HTTP Status code.
+ int64 response_code;
+
+ // The value of the Content-length header.
+ int64 content_length;
+
+ // The size of a download request's payload.
+ int64 payload_length;
+
+ // Value of the Update-Client-Auth header.
+ std::string update_client_auth_header;
+
+ // Identifies the type of failure, if any.
+ ServerConnectionCode server_status;
+
+ HttpResponse();
+
+ static const char* GetServerConnectionCodeString(
+ ServerConnectionCode code);
+};
+
+inline bool IsGoodReplyFromServer(HttpResponse::ServerConnectionCode code) {
+ return code >= HttpResponse::SERVER_CONNECTION_OK;
+}
+
+struct ServerConnectionEvent {
+ HttpResponse::ServerConnectionCode connection_code;
+ bool server_reachable;
+ ServerConnectionEvent(HttpResponse::ServerConnectionCode code,
+ bool server_reachable) :
+ connection_code(code), server_reachable(server_reachable) {}
+};
+
+class ServerConnectionEventListener {
+ public:
+ virtual void OnServerConnectionEvent(const ServerConnectionEvent& event) = 0;
+ protected:
+ virtual ~ServerConnectionEventListener() {}
+};
+
+class ServerConnectionManager;
+// A helper class that automatically notifies when the status changes.
+// TODO(tim): This class shouldn't be exposed outside of the implementation,
+// bug 35060.
+class ScopedServerStatusWatcher : public base::NonThreadSafe {
+ public:
+ ScopedServerStatusWatcher(ServerConnectionManager* conn_mgr,
+ HttpResponse* response);
+ virtual ~ScopedServerStatusWatcher();
+ private:
+ ServerConnectionManager* const conn_mgr_;
+ HttpResponse* const response_;
+ bool server_reachable_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedServerStatusWatcher);
+};
+
+// Use this class to interact with the sync server.
+// The ServerConnectionManager currently supports POSTing protocol buffers.
+//
+class ServerConnectionManager {
+ public:
+ // buffer_in - will be POSTed
+ // buffer_out - string will be overwritten with response
+ struct PostBufferParams {
+ std::string buffer_in;
+ std::string buffer_out;
+ HttpResponse response;
+ };
+
+ // Abstract class providing network-layer functionality to the
+ // ServerConnectionManager. Subclasses implement this using an HTTP stack of
+ // their choice.
+ class Connection {
+ public:
+ explicit Connection(ServerConnectionManager* scm);
+ virtual ~Connection();
+
+ // Called to initialize and perform an HTTP POST.
+ virtual bool Init(const char* path,
+ const std::string& auth_token,
+ const std::string& payload,
+ HttpResponse* response) = 0;
+
+ // Immediately abandons a pending HTTP POST request and unblocks caller
+ // in Init.
+ virtual void Abort() = 0;
+
+ bool ReadBufferResponse(std::string* buffer_out, HttpResponse* response,
+ bool require_response);
+ bool ReadDownloadResponse(HttpResponse* response, std::string* buffer_out);
+
+ protected:
+ std::string MakeConnectionURL(const std::string& sync_server,
+ const std::string& path,
+ bool use_ssl) const;
+
+ void GetServerParams(std::string* server,
+ int* server_port,
+ bool* use_ssl) const {
+ server->assign(scm_->sync_server_);
+ *server_port = scm_->sync_server_port_;
+ *use_ssl = scm_->use_ssl_;
+ }
+
+ std::string buffer_;
+ ServerConnectionManager* scm_;
+
+ private:
+ int ReadResponse(void* buffer, int length);
+ int ReadResponse(std::string* buffer, int length);
+ };
+
+ ServerConnectionManager(const std::string& server,
+ int port,
+ bool use_ssl,
+ const std::string& user_agent);
+
+ virtual ~ServerConnectionManager();
+
+ // POSTS buffer_in and reads a response into buffer_out. Uses our currently
+ // set auth token in our headers.
+ //
+ // Returns true if executed successfully.
+ virtual bool PostBufferWithCachedAuth(PostBufferParams* params,
+ ScopedServerStatusWatcher* watcher);
+
+ // Checks the time on the server. Returns false if the request failed. |time|
+ // is an out parameter that stores the value returned from the server.
+ virtual bool CheckTime(int32* out_time);
+
+ // Returns true if sync_server_ is reachable. This method verifies that the
+ // server is pingable and that traffic can be sent to and from it.
+ virtual bool IsServerReachable();
+
+ // Returns true if user has been successfully authenticated.
+ virtual bool IsUserAuthenticated();
+
+ // Updates status and broadcasts events on change.
+ bool CheckServerReachable();
+
+ void AddListener(ServerConnectionEventListener* listener);
+ void RemoveListener(ServerConnectionEventListener* listener);
+
+ inline std::string user_agent() const { return user_agent_; }
+
+ inline HttpResponse::ServerConnectionCode server_status() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return server_status_;
+ }
+
+ inline bool server_reachable() const { return server_reachable_; }
+
+ const std::string client_id() const { return client_id_; }
+
+ // This changes the server info used by the connection manager. This allows
+ // a single client instance to talk to different backing servers. This is
+ // typically called during / after authentication so that the server url
+ // can be a function of the user's login id.
+ void SetServerParameters(const std::string& server_url,
+ int port,
+ bool use_ssl);
+
+ // Returns the current server parameters in server_url, port and use_ssl.
+ void GetServerParameters(std::string* server_url,
+ int* port,
+ bool* use_ssl) const;
+
+ std::string GetServerHost() const;
+
+ // Factory method to create an Connection object we can use for
+ // communication with the server.
+ virtual Connection* MakeConnection();
+
+ // Aborts any active HTTP POST request.
+ // We expect this to get called on a different thread than the valid
+ // ThreadChecker thread, as we want to kill any pending http traffic without
+ // having to wait for the request to complete.
+ void TerminateAllIO();
+
+ void set_client_id(const std::string& client_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(client_id_.empty());
+ client_id_.assign(client_id);
+ }
+
+ // Returns true if the auth token is succesfully set and false otherwise.
+ bool set_auth_token(const std::string& auth_token) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (previously_invalidated_token != auth_token) {
+ auth_token_.assign(auth_token);
+ previously_invalidated_token = std::string();
+ return true;
+ }
+ return false;
+ }
+
+ void InvalidateAndClearAuthToken() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Copy over the token to previous invalid token.
+ if (!auth_token_.empty()) {
+ previously_invalidated_token.assign(auth_token_);
+ auth_token_ = std::string();
+ }
+ }
+
+ const std::string auth_token() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return auth_token_;
+ }
+
+ protected:
+ inline std::string proto_sync_path() const {
+ return proto_sync_path_;
+ }
+
+ std::string get_time_path() const {
+ return get_time_path_;
+ }
+
+ // NOTE: Tests rely on this protected function being virtual.
+ //
+ // Internal PostBuffer base function.
+ virtual bool PostBufferToPath(PostBufferParams*,
+ const std::string& path,
+ const std::string& auth_token,
+ ScopedServerStatusWatcher* watcher);
+
+ // Helper to check terminated flags and build a Connection object, installing
+ // it as the |active_connection_|. If this ServerConnectionManager has been
+ // terminated, this will return NULL.
+ Connection* MakeActiveConnection();
+
+ // Called by Connection objects as they are destroyed to allow the
+ // ServerConnectionManager to cleanup active connections.
+ void OnConnectionDestroyed(Connection* connection);
+
+ // The sync_server_ is the server that requests will be made to.
+ std::string sync_server_;
+
+ // The sync_server_port_ is the port that HTTP requests will be made on.
+ int sync_server_port_;
+
+ // The unique id of the user's client.
+ std::string client_id_;
+
+ // The user-agent string for HTTP.
+ std::string user_agent_;
+
+ // Indicates whether or not requests should be made using HTTPS.
+ bool use_ssl_;
+
+ // The paths we post to.
+ std::string proto_sync_path_;
+ std::string get_time_path_;
+
+ // The auth token to use in authenticated requests. Set by the AuthWatcher.
+ std::string auth_token_;
+
+ // The previous auth token that is invalid now.
+ std::string previously_invalidated_token;
+
+ ObserverList<ServerConnectionEventListener> listeners_;
+
+ HttpResponse::ServerConnectionCode server_status_;
+ bool server_reachable_;
+
+ base::ThreadChecker thread_checker_;
+
+ // Protects all variables below to allow bailing out of active connections.
+ base::Lock terminate_connection_lock_;
+
+ // If true, we've been told to terminate IO and expect to be destroyed
+ // shortly. No future network requests will be made.
+ bool terminated_;
+
+ // A non-owning pointer to any active http connection, so that we can abort
+ // it if necessary.
+ Connection* active_connection_;
+
+ private:
+ friend class Connection;
+ friend class ScopedServerStatusWatcher;
+
+ // A class to help deal with cleaning up active Connection objects when (for
+ // ex) multiple early-exits are present in some scope. ScopedConnectionHelper
+ // informs the ServerConnectionManager before the Connection object it takes
+ // ownership of is destroyed.
+ class ScopedConnectionHelper {
+ public:
+ // |manager| must outlive this. Takes ownership of |connection|.
+ ScopedConnectionHelper(ServerConnectionManager* manager,
+ Connection* connection);
+ ~ScopedConnectionHelper();
+ Connection* get();
+ private:
+ ServerConnectionManager* manager_;
+ scoped_ptr<Connection> connection_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedConnectionHelper);
+ };
+
+ void NotifyStatusChanged();
+
+ DISALLOW_COPY_AND_ASSIGN(ServerConnectionManager);
+};
+
+// Fills a ClientToServerMessage with the appropriate share and birthday
+// settings.
+bool FillMessageWithShareDetails(sync_pb::ClientToServerMessage* csm,
+ syncable::Directory* manager,
+ const std::string& share);
+
+std::ostream& operator<<(std::ostream& s, const struct HttpResponse& hr);
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
diff --git a/sync/engine/net/url_translator.cc b/sync/engine/net/url_translator.cc
new file mode 100644
index 0000000..90b7794
--- /dev/null
+++ b/sync/engine/net/url_translator.cc
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Contains the definition of a few helper functions used for generating sync
+// URLs.
+
+#include "sync/engine/net/url_translator.h"
+
+#include "base/basictypes.h"
+#include "base/port.h"
+#include "net/base/escape.h"
+
+using std::string;
+
+namespace browser_sync {
+
+namespace {
+// Parameters that the server understands. (here, a-Z)
+const char kParameterAuthToken[] = "auth";
+const char kParameterClientID[] = "client_id";
+}
+
+// Convenience wrappers around CgiEscapePath().
+string CgiEscapeString(const char* src) {
+ return CgiEscapeString(string(src));
+}
+
+string CgiEscapeString(const string& src) {
+ return net::EscapeUrlEncodedData(src, true);
+}
+
+// This method appends the query string to the sync server path.
+string MakeSyncServerPath(const string& path, const string& query_string) {
+ string result = path;
+ result.append("?");
+ result.append(query_string);
+ return result;
+}
+
+string MakeSyncQueryString(const string& client_id) {
+ string query;
+ query += kParameterClientID;
+ query += "=" + CgiEscapeString(client_id);
+ return query;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/net/url_translator.h b/sync/engine/net/url_translator.h
new file mode 100644
index 0000000..65b7346
--- /dev/null
+++ b/sync/engine/net/url_translator.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Contains the declaration of a few helper functions used for generating sync
+// URLs.
+
+#ifndef SYNC_ENGINE_NET_URL_TRANSLATOR_H_
+#define SYNC_ENGINE_NET_URL_TRANSLATOR_H_
+#pragma once
+
+#include <string>
+
+namespace browser_sync {
+
+// Convenience wrappers around CgiEscapePath(), used by gaia_auth.
+std::string CgiEscapeString(const char* src);
+std::string CgiEscapeString(const std::string& src);
+
+// This method appends the query string to the sync server path.
+std::string MakeSyncServerPath(const std::string& path,
+ const std::string& query_string);
+
+std::string MakeSyncQueryString(const std::string& client_id);
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_NET_URL_TRANSLATOR_H_
diff --git a/sync/engine/nigori_util.cc b/sync/engine/nigori_util.cc
new file mode 100644
index 0000000..1b6d42a
--- /dev/null
+++ b/sync/engine/nigori_util.cc
@@ -0,0 +1,244 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/nigori_util.h"
+
+#include <queue>
+#include <string>
+#include <vector>
+
+#include "base/json/json_writer.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/syncable/syncable.h"
+#include "sync/util/cryptographer.h"
+
+namespace syncable {
+
+bool ProcessUnsyncedChangesForEncryption(
+ WriteTransaction* const trans,
+ browser_sync::Cryptographer* cryptographer) {
+ DCHECK(cryptographer->is_ready());
+ // Get list of all datatypes with unsynced changes. It's possible that our
+ // local changes need to be encrypted if encryption for that datatype was
+ // just turned on (and vice versa).
+ // Note: we do not attempt to re-encrypt data with a new key here as key
+ // changes in this code path are likely due to consistency issues (we have
+ // to be updated to a key we already have, e.g. an old key).
+ std::vector<int64> handles;
+ browser_sync::SyncerUtil::GetUnsyncedEntries(trans, &handles);
+ for (size_t i = 0; i < handles.size(); ++i) {
+ MutableEntry entry(trans, GET_BY_HANDLE, handles[i]);
+ const sync_pb::EntitySpecifics& specifics = entry.Get(SPECIFICS);
+ // Ignore types that don't need encryption or entries that are already
+ // encrypted.
+ if (!SpecificsNeedsEncryption(cryptographer->GetEncryptedTypes(),
+ specifics)) {
+ continue;
+ }
+ if (!UpdateEntryWithEncryption(cryptographer, specifics, &entry)) {
+ NOTREACHED();
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VerifyUnsyncedChangesAreEncrypted(
+ BaseTransaction* const trans,
+ ModelTypeSet encrypted_types) {
+ std::vector<int64> handles;
+ browser_sync::SyncerUtil::GetUnsyncedEntries(trans, &handles);
+ for (size_t i = 0; i < handles.size(); ++i) {
+ Entry entry(trans, GET_BY_HANDLE, handles[i]);
+ if (!entry.good()) {
+ NOTREACHED();
+ return false;
+ }
+ if (EntryNeedsEncryption(encrypted_types, entry))
+ return false;
+ }
+ return true;
+}
+
+bool EntryNeedsEncryption(ModelTypeSet encrypted_types,
+ const Entry& entry) {
+ if (!entry.Get(UNIQUE_SERVER_TAG).empty())
+ return false; // We don't encrypt unique server nodes.
+ syncable::ModelType type = entry.GetModelType();
+ if (type == PASSWORDS || type == NIGORI)
+ return false;
+ // Checking NON_UNIQUE_NAME is not necessary for the correctness of encrypting
+ // the data, nor for determining if data is encrypted. We simply ensure it has
+ // been overwritten to avoid any possible leaks of sensitive data.
+ return SpecificsNeedsEncryption(encrypted_types, entry.Get(SPECIFICS)) ||
+ (encrypted_types.Has(type) &&
+ entry.Get(NON_UNIQUE_NAME) != kEncryptedString);
+}
+
+bool SpecificsNeedsEncryption(ModelTypeSet encrypted_types,
+ const sync_pb::EntitySpecifics& specifics) {
+ const ModelType type = GetModelTypeFromSpecifics(specifics);
+ if (type == PASSWORDS || type == NIGORI)
+ return false; // These types have their own encryption schemes.
+ if (!encrypted_types.Has(type))
+ return false; // This type does not require encryption
+ return !specifics.has_encrypted();
+}
+
+// Mainly for testing.
+bool VerifyDataTypeEncryptionForTest(
+ BaseTransaction* const trans,
+ browser_sync::Cryptographer* cryptographer,
+ ModelType type,
+ bool is_encrypted) {
+ if (type == PASSWORDS || type == NIGORI) {
+ NOTREACHED();
+ return true;
+ }
+ std::string type_tag = ModelTypeToRootTag(type);
+ Entry type_root(trans, GET_BY_SERVER_TAG, type_tag);
+ if (!type_root.good()) {
+ NOTREACHED();
+ return false;
+ }
+
+ std::queue<Id> to_visit;
+ Id id_string;
+ if (!trans->directory()->GetFirstChildId(
+ trans, type_root.Get(ID), &id_string)) {
+ NOTREACHED();
+ return false;
+ }
+ to_visit.push(id_string);
+ while (!to_visit.empty()) {
+ id_string = to_visit.front();
+ to_visit.pop();
+ if (id_string.IsRoot())
+ continue;
+
+ Entry child(trans, GET_BY_ID, id_string);
+ if (!child.good()) {
+ NOTREACHED();
+ return false;
+ }
+ if (child.Get(IS_DIR)) {
+ Id child_id_string;
+ if (!trans->directory()->GetFirstChildId(
+ trans, child.Get(ID), &child_id_string)) {
+ NOTREACHED();
+ return false;
+ }
+ // Traverse the children.
+ to_visit.push(child_id_string);
+ }
+ const sync_pb::EntitySpecifics& specifics = child.Get(SPECIFICS);
+ DCHECK_EQ(type, child.GetModelType());
+ DCHECK_EQ(type, GetModelTypeFromSpecifics(specifics));
+ // We don't encrypt the server's permanent items.
+ if (child.Get(UNIQUE_SERVER_TAG).empty()) {
+ if (specifics.has_encrypted() != is_encrypted)
+ return false;
+ if (specifics.has_encrypted()) {
+ if (child.Get(NON_UNIQUE_NAME) != kEncryptedString)
+ return false;
+ if (!cryptographer->CanDecryptUsingDefaultKey(specifics.encrypted()))
+ return false;
+ }
+ }
+ // Push the successor.
+ to_visit.push(child.Get(NEXT_ID));
+ }
+ return true;
+}
+
+bool UpdateEntryWithEncryption(
+ browser_sync::Cryptographer* cryptographer,
+ const sync_pb::EntitySpecifics& new_specifics,
+ syncable::MutableEntry* entry) {
+ syncable::ModelType type = syncable::GetModelTypeFromSpecifics(new_specifics);
+ DCHECK_GE(type, syncable::FIRST_REAL_MODEL_TYPE);
+ const sync_pb::EntitySpecifics& old_specifics = entry->Get(SPECIFICS);
+ const syncable::ModelTypeSet encrypted_types =
+ cryptographer->GetEncryptedTypes();
+ // It's possible the nigori lost the set of encrypted types. If the current
+ // specifics are already encrypted, we want to ensure we continue encrypting.
+ bool was_encrypted = old_specifics.has_encrypted();
+ sync_pb::EntitySpecifics generated_specifics;
+ if (new_specifics.has_encrypted()) {
+ NOTREACHED() << "New specifics already has an encrypted blob.";
+ return false;
+ }
+ if ((!SpecificsNeedsEncryption(encrypted_types, new_specifics) &&
+ !was_encrypted) ||
+ !cryptographer->is_initialized()) {
+ // No encryption required or we are unable to encrypt.
+ generated_specifics.CopyFrom(new_specifics);
+ } else {
+ // Encrypt new_specifics into generated_specifics.
+ if (VLOG_IS_ON(2)) {
+ scoped_ptr<DictionaryValue> value(entry->ToValue());
+ std::string info;
+ base::JSONWriter::Write(value.get(), true, &info);
+ DVLOG(2) << "Encrypting specifics of type "
+ << syncable::ModelTypeToString(type)
+ << " with content: "
+ << info;
+ }
+ // Only copy over the old specifics if it is of the right type and already
+ // encrypted. The first time we encrypt a node we start from scratch, hence
+ // removing all the unencrypted data, but from then on we only want to
+ // update the node if the data changes or the encryption key changes.
+ if (syncable::GetModelTypeFromSpecifics(old_specifics) == type &&
+ was_encrypted) {
+ generated_specifics.CopyFrom(old_specifics);
+ } else {
+ syncable::AddDefaultFieldValue(type, &generated_specifics);
+ }
+ // Does not change anything if underlying encrypted blob was already up
+ // to date and encrypted with the default key.
+ if (!cryptographer->Encrypt(new_specifics,
+ generated_specifics.mutable_encrypted())) {
+ NOTREACHED() << "Could not encrypt data for node of type "
+ << syncable::ModelTypeToString(type);
+ return false;
+ }
+ }
+
+ // It's possible this entry was encrypted but didn't properly overwrite the
+ // non_unique_name (see crbug.com/96314).
+ bool encrypted_without_overwriting_name = (was_encrypted &&
+ entry->Get(syncable::NON_UNIQUE_NAME) != kEncryptedString);
+
+ // If we're encrypted but the name wasn't overwritten properly we still want
+ // to rewrite the entry, irrespective of whether the specifics match.
+ if (!encrypted_without_overwriting_name &&
+ old_specifics.SerializeAsString() ==
+ generated_specifics.SerializeAsString()) {
+ DVLOG(2) << "Specifics of type " << syncable::ModelTypeToString(type)
+ << " already match, dropping change.";
+ return true;
+ }
+
+ if (generated_specifics.has_encrypted()) {
+ // Overwrite the possibly sensitive non-specifics data.
+ entry->Put(syncable::NON_UNIQUE_NAME, kEncryptedString);
+ // For bookmarks we actually put bogus data into the unencrypted specifics,
+ // else the server will try to do it for us.
+ if (type == syncable::BOOKMARKS) {
+ sync_pb::BookmarkSpecifics* bookmark_specifics =
+ generated_specifics.mutable_bookmark();
+ if (!entry->Get(syncable::IS_DIR))
+ bookmark_specifics->set_url(kEncryptedString);
+ bookmark_specifics->set_title(kEncryptedString);
+ }
+ }
+ entry->Put(syncable::SPECIFICS, generated_specifics);
+ DVLOG(1) << "Overwriting specifics of type "
+ << syncable::ModelTypeToString(type)
+ << " and marking for syncing.";
+ syncable::MarkForSyncing(entry);
+ return true;
+}
+
+} // namespace syncable
diff --git a/sync/engine/nigori_util.h b/sync/engine/nigori_util.h
new file mode 100644
index 0000000..67174c3
--- /dev/null
+++ b/sync/engine/nigori_util.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Various utility methods for nigori-based multi-type encryption.
+
+#ifndef SYNC_ENGINE_NIGORI_UTIL_H_
+#define SYNC_ENGINE_NIGORI_UTIL_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/syncable/model_type.h"
+
+namespace browser_sync {
+class Cryptographer;
+}
+
+namespace sync_pb {
+class EntitySpecifics;
+}
+
+namespace syncable {
+
+const char kEncryptedString[] = "encrypted";
+
+class BaseTransaction;
+class Entry;
+class MutableEntry;
+class WriteTransaction;
+
+// Check if our unsyced changes are encrypted if they need to be based on
+// |encrypted_types|.
+// Returns: true if all unsynced data that should be encrypted is.
+// false if some unsynced changes need to be encrypted.
+// This method is similar to ProcessUnsyncedChangesForEncryption but does not
+// modify the data and does not care if data is unnecessarily encrypted.
+bool VerifyUnsyncedChangesAreEncrypted(
+ BaseTransaction* const trans,
+ ModelTypeSet encrypted_types);
+
+// Processes all unsynced changes and ensures they are appropriately encrypted
+// or unencrypted, based on |encrypted_types|.
+bool ProcessUnsyncedChangesForEncryption(
+ WriteTransaction* const trans,
+ browser_sync::Cryptographer* cryptographer);
+
+// Returns true if the entry requires encryption but is not encrypted, false
+// otherwise. Note: this does not check that already encrypted entries are
+// encrypted with the proper key.
+bool EntryNeedsEncryption(ModelTypeSet encrypted_types,
+ const Entry& entry);
+
+// Same as EntryNeedsEncryption, but looks at specifics.
+bool SpecificsNeedsEncryption(ModelTypeSet encrypted_types,
+ const sync_pb::EntitySpecifics& specifics);
+
+// Verifies all data of type |type| is encrypted appropriately.
+bool VerifyDataTypeEncryptionForTest(
+ BaseTransaction* const trans,
+ browser_sync::Cryptographer* cryptographer,
+ ModelType type,
+ bool is_encrypted) WARN_UNUSED_RESULT;
+
+// Stores |new_specifics| into |entry|, encrypting if necessary.
+// Returns false if an error encrypting occurred (does not modify |entry|).
+// Note: gracefully handles new_specifics aliasing with entry->Get(SPECIFICS).
+bool UpdateEntryWithEncryption(
+ browser_sync::Cryptographer* cryptographer,
+ const sync_pb::EntitySpecifics& new_specifics,
+ MutableEntry* entry);
+
+} // namespace syncable
+
+#endif // SYNC_ENGINE_NIGORI_UTIL_H_
diff --git a/sync/engine/nigori_util_unittest.cc b/sync/engine/nigori_util_unittest.cc
new file mode 100644
index 0000000..2195c78
--- /dev/null
+++ b/sync/engine/nigori_util_unittest.cc
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/nigori_util.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/util/cryptographer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncable {
+
+typedef testing::Test NigoriUtilTest;
+
+TEST(NigoriUtilTest, SpecificsNeedsEncryption) {
+ ModelTypeSet encrypted_types;
+ encrypted_types.Put(BOOKMARKS);
+ encrypted_types.Put(PASSWORDS);
+
+ sync_pb::EntitySpecifics specifics;
+ EXPECT_FALSE(SpecificsNeedsEncryption(ModelTypeSet(), specifics));
+ EXPECT_FALSE(SpecificsNeedsEncryption(encrypted_types, specifics));
+
+ AddDefaultFieldValue(PREFERENCES, &specifics);
+ EXPECT_FALSE(SpecificsNeedsEncryption(encrypted_types, specifics));
+
+ sync_pb::EntitySpecifics bookmark_specifics;
+ AddDefaultFieldValue(BOOKMARKS, &bookmark_specifics);
+ EXPECT_TRUE(SpecificsNeedsEncryption(encrypted_types, bookmark_specifics));
+
+ bookmark_specifics.mutable_bookmark()->set_title("title");
+ bookmark_specifics.mutable_bookmark()->set_url("url");
+ EXPECT_TRUE(SpecificsNeedsEncryption(encrypted_types, bookmark_specifics));
+ EXPECT_FALSE(SpecificsNeedsEncryption(ModelTypeSet(), bookmark_specifics));
+
+ bookmark_specifics.mutable_encrypted();
+ EXPECT_FALSE(SpecificsNeedsEncryption(encrypted_types, bookmark_specifics));
+ EXPECT_FALSE(SpecificsNeedsEncryption(ModelTypeSet(), bookmark_specifics));
+
+ sync_pb::EntitySpecifics password_specifics;
+ AddDefaultFieldValue(PASSWORDS, &password_specifics);
+ EXPECT_FALSE(SpecificsNeedsEncryption(encrypted_types, password_specifics));
+}
+
+// ProcessUnsyncedChangesForEncryption and other methods that rely on the syncer
+// are tested in apply_updates_command_unittest.cc
+
+} // namespace syncable
diff --git a/sync/engine/nudge_source.cc b/sync/engine/nudge_source.cc
new file mode 100644
index 0000000..50cd7eb
--- /dev/null
+++ b/sync/engine/nudge_source.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/nudge_source.h"
+
+#include "base/logging.h"
+
+namespace browser_sync {
+
+#define ENUM_CASE(x) case x: return #x; break
+
+const char* GetNudgeSourceString(NudgeSource nudge_source) {
+ switch (nudge_source) {
+ ENUM_CASE(NUDGE_SOURCE_UNKNOWN);
+ ENUM_CASE(NUDGE_SOURCE_NOTIFICATION);
+ ENUM_CASE(NUDGE_SOURCE_LOCAL);
+ ENUM_CASE(NUDGE_SOURCE_CONTINUATION);
+ ENUM_CASE(NUDGE_SOURCE_LOCAL_REFRESH);
+ };
+ NOTREACHED();
+ return "";
+}
+
+#undef ENUM_CASE
+
+} // namespace browser_sync
diff --git a/sync/engine/nudge_source.h b/sync/engine/nudge_source.h
new file mode 100644
index 0000000..f3488f4
--- /dev/null
+++ b/sync/engine/nudge_source.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_NUDGE_SOURCE_H_
+#define SYNC_ENGINE_NUDGE_SOURCE_H_
+#pragma once
+
+namespace browser_sync {
+
+enum NudgeSource {
+ NUDGE_SOURCE_UNKNOWN = 0,
+ // We received an invalidation message and are nudging to check for updates.
+ NUDGE_SOURCE_NOTIFICATION,
+ // A local change occurred (e.g. bookmark moved).
+ NUDGE_SOURCE_LOCAL,
+ // A previous sync cycle did not fully complete (e.g. HTTP error).
+ NUDGE_SOURCE_CONTINUATION,
+ // A local event is triggering an optimistic datatype refresh.
+ NUDGE_SOURCE_LOCAL_REFRESH,
+};
+
+const char* GetNudgeSourceString(NudgeSource nudge_source);
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_NUDGE_SOURCE_H_
diff --git a/sync/engine/passive_model_worker.cc b/sync/engine/passive_model_worker.cc
new file mode 100644
index 0000000..0b0d423
--- /dev/null
+++ b/sync/engine/passive_model_worker.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/passive_model_worker.h"
+
+#include "base/message_loop.h"
+
+namespace browser_sync {
+
+PassiveModelWorker::PassiveModelWorker(const MessageLoop* sync_loop)
+ : sync_loop_(sync_loop) {}
+
+PassiveModelWorker::~PassiveModelWorker() {
+}
+
+SyncerError PassiveModelWorker::DoWorkAndWaitUntilDone(
+ const WorkCallback& work) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ // Simply do the work on the current thread.
+ return work.Run();
+}
+
+ModelSafeGroup PassiveModelWorker::GetModelSafeGroup() {
+ return GROUP_PASSIVE;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/passive_model_worker.h b/sync/engine/passive_model_worker.h
new file mode 100644
index 0000000..a8b89f7
--- /dev/null
+++ b/sync/engine/passive_model_worker.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_PASSIVE_MODEL_WORKER_H_
+#define SYNC_ENGINE_PASSIVE_MODEL_WORKER_H_
+#pragma once
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/util/syncer_error.h"
+
+class MessageLoop;
+
+namespace browser_sync {
+
+// Implementation of ModelSafeWorker for passive types. All work is
+// done on the same thread DoWorkAndWaitUntilDone (i.e., the sync
+// thread).
+class PassiveModelWorker : public ModelSafeWorker {
+ public:
+ explicit PassiveModelWorker(const MessageLoop* sync_loop);
+
+ // ModelSafeWorker implementation. Called on the sync thread.
+ virtual SyncerError DoWorkAndWaitUntilDone(
+ const WorkCallback& work) OVERRIDE;
+ virtual ModelSafeGroup GetModelSafeGroup() OVERRIDE;
+
+ private:
+ virtual ~PassiveModelWorker();
+
+ const MessageLoop* const sync_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(PassiveModelWorker);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_PASSIVE_MODEL_WORKER_H_
diff --git a/sync/engine/polling_constants.cc b/sync/engine/polling_constants.cc
new file mode 100644
index 0000000..c1f8e37
--- /dev/null
+++ b/sync/engine/polling_constants.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "sync/engine/polling_constants.h"
+
+namespace browser_sync {
+
+// Server can overwrite these values via client commands.
+// Standard short poll. This is used when XMPP is off.
+// We use high values here to ensure that failure to receive poll updates from
+// the server doesn't result in rapid-fire polling from the client due to low
+// local limits.
+const int64 kDefaultShortPollIntervalSeconds = 3600 * 8;
+// Long poll is used when XMPP is on.
+const int64 kDefaultLongPollIntervalSeconds = 3600 * 12;
+
+// Maximum interval for exponential backoff.
+const int64 kMaxBackoffSeconds = 60 * 60 * 4; // 4 hours.
+
+// Backoff interval randomization factor.
+const int kBackoffRandomizationFactor = 2;
+
+} // namespace browser_sync
+
diff --git a/sync/engine/polling_constants.h b/sync/engine/polling_constants.h
new file mode 100644
index 0000000..ff6650c
--- /dev/null
+++ b/sync/engine/polling_constants.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Constants used by SyncerThread when polling servers for updates.
+
+#ifndef SYNC_ENGINE_POLLING_CONSTANTS_H_
+#define SYNC_ENGINE_POLLING_CONSTANTS_H_
+#pragma once
+
+namespace browser_sync {
+
+extern const int64 kDefaultShortPollIntervalSeconds;
+extern const int64 kDefaultLongPollIntervalSeconds;
+extern const int64 kMaxBackoffSeconds;
+extern const int kBackoffRandomizationFactor;
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_POLLING_CONSTANTS_H_
diff --git a/sync/engine/post_commit_message_command.cc b/sync/engine/post_commit_message_command.cc
new file mode 100644
index 0000000..8f51f20
--- /dev/null
+++ b/sync/engine/post_commit_message_command.cc
@@ -0,0 +1,49 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/post_commit_message_command.h"
+
+#include <vector>
+
+#include "base/location.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/engine/syncproto.h"
+#include "sync/sessions/sync_session.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+PostCommitMessageCommand::PostCommitMessageCommand() {}
+PostCommitMessageCommand::~PostCommitMessageCommand() {}
+
+SyncerError PostCommitMessageCommand::ExecuteImpl(
+ sessions::SyncSession* session) {
+ if (session->status_controller().commit_ids().empty())
+ return SYNCER_OK; // Nothing to commit.
+ ClientToServerResponse response;
+ syncable::Directory* dir = session->context()->directory();
+ sessions::StatusController* status = session->mutable_status_controller();
+ SyncerError result = SyncerProtoUtil::PostClientToServerMessage(
+ status->commit_message(), &response, session);
+ if (result != SYNCER_OK) {
+ // None of our changes got through. Clear the SYNCING bit which was
+ // set to true during BuildCommitCommand, and which may still be true.
+ // Not to be confused with IS_UNSYNCED, this bit is used to detect local
+ // changes to items that happen during the server Commit operation.
+ syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
+ const vector<syncable::Id>& commit_ids = status->commit_ids();
+ for (size_t i = 0; i < commit_ids.size(); i++) {
+ syncable::MutableEntry entry(&trans, syncable::GET_BY_ID, commit_ids[i]);
+ entry.Put(syncable::SYNCING, false);
+ }
+ return result;
+ }
+
+ status->set_items_committed();
+ status->mutable_commit_response()->CopyFrom(response);
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/post_commit_message_command.h b/sync/engine/post_commit_message_command.h
new file mode 100644
index 0000000..50fae39
--- /dev/null
+++ b/sync/engine/post_commit_message_command.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
+#define SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/engine/syncer_command.h"
+
+namespace browser_sync {
+
+class PostCommitMessageCommand : public SyncerCommand {
+ public:
+ PostCommitMessageCommand();
+ virtual ~PostCommitMessageCommand();
+
+ // SyncerCommand implementation.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PostCommitMessageCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
diff --git a/sync/engine/process_commit_response_command.cc b/sync/engine/process_commit_response_command.cc
new file mode 100644
index 0000000..b82ff6d
--- /dev/null
+++ b/sync/engine/process_commit_response_command.cc
@@ -0,0 +1,482 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/process_commit_response_command.h"
+
+#include <cstddef>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/location.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/engine/syncproto.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/syncable.h"
+#include "sync/util/time.h"
+
+using syncable::WriteTransaction;
+using syncable::MutableEntry;
+using syncable::Entry;
+
+using std::set;
+using std::string;
+using std::vector;
+
+using syncable::BASE_VERSION;
+using syncable::GET_BY_ID;
+using syncable::ID;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::PARENT_ID;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_VERSION;
+using syncable::SYNCER;
+using syncable::SYNCING;
+
+namespace browser_sync {
+
+using sessions::OrderedCommitSet;
+using sessions::StatusController;
+using sessions::SyncSession;
+using sessions::ConflictProgress;
+
+ProcessCommitResponseCommand::ProcessCommitResponseCommand() {}
+ProcessCommitResponseCommand::~ProcessCommitResponseCommand() {}
+
+std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
+ const sessions::SyncSession& session) const {
+ std::set<ModelSafeGroup> groups_with_commits;
+
+ syncable::Directory* dir = session.context()->directory();
+ syncable::ReadTransaction trans(FROM_HERE, dir);
+ const StatusController& status = session.status_controller();
+ for (size_t i = 0; i < status.commit_ids().size(); ++i) {
+ groups_with_commits.insert(
+ GetGroupForModelType(status.GetUnrestrictedCommitModelTypeAt(i),
+ session.routing_info()));
+ }
+
+ return groups_with_commits;
+}
+
+SyncerError ProcessCommitResponseCommand::ModelNeutralExecuteImpl(
+ sessions::SyncSession* session) {
+ const StatusController& status = session->status_controller();
+ const ClientToServerResponse& response(status.commit_response());
+ const vector<syncable::Id>& commit_ids(status.commit_ids());
+
+ if (!response.has_commit()) {
+ // TODO(sync): What if we didn't try to commit anything?
+ LOG(WARNING) << "Commit response has no commit body!";
+ return SERVER_RESPONSE_VALIDATION_FAILED;
+ }
+
+ const CommitResponse& cr = response.commit();
+ int commit_count = commit_ids.size();
+ if (cr.entryresponse_size() != commit_count) {
+ LOG(ERROR) << "Commit response has wrong number of entries! Expected:" <<
+ commit_count << " Got:" << cr.entryresponse_size();
+ for (int i = 0 ; i < cr.entryresponse_size() ; i++) {
+ LOG(ERROR) << "Response #" << i << " Value: " <<
+ cr.entryresponse(i).response_type();
+ if (cr.entryresponse(i).has_error_message())
+ LOG(ERROR) << " " << cr.entryresponse(i).error_message();
+ }
+ return SERVER_RESPONSE_VALIDATION_FAILED;
+ }
+ return SYNCER_OK;
+}
+
+SyncerError ProcessCommitResponseCommand::ModelChangingExecuteImpl(
+ SyncSession* session) {
+ SyncerError result = ProcessCommitResponse(session);
+ ExtensionsActivityMonitor* monitor = session->context()->extensions_monitor();
+ if (session->status_controller().HasBookmarkCommitActivity() &&
+ session->status_controller().syncer_status()
+ .num_successful_bookmark_commits == 0) {
+ monitor->PutRecords(session->extensions_activity());
+ session->mutable_extensions_activity()->clear();
+ }
+ return result;
+}
+
+SyncerError ProcessCommitResponseCommand::ProcessCommitResponse(
+ SyncSession* session) {
+ syncable::Directory* dir = session->context()->directory();
+
+ StatusController* status = session->mutable_status_controller();
+ const ClientToServerResponse& response(status->commit_response());
+ const CommitResponse& cr = response.commit();
+ const sync_pb::CommitMessage& commit_message =
+ status->commit_message().commit();
+
+ // If we try to commit a parent and child together and the parent conflicts
+ // the child will have a bad parent causing an error. As this is not a
+ // critical error, we trap it and don't LOG(ERROR). To enable this we keep
+ // a map of conflicting new folders.
+ int transient_error_commits = 0;
+ int conflicting_commits = 0;
+ int error_commits = 0;
+ int successes = 0;
+ set<syncable::Id> conflicting_new_folder_ids;
+ set<syncable::Id> deleted_folders;
+ ConflictProgress* conflict_progress = status->mutable_conflict_progress();
+ OrderedCommitSet::Projection proj = status->commit_id_projection();
+ if (!proj.empty()) { // Scope for WriteTransaction.
+ WriteTransaction trans(FROM_HERE, SYNCER, dir);
+ for (size_t i = 0; i < proj.size(); i++) {
+ CommitResponse::ResponseType response_type =
+ ProcessSingleCommitResponse(&trans, cr.entryresponse(proj[i]),
+ commit_message.entries(proj[i]),
+ status->GetCommitIdAt(proj[i]),
+ &conflicting_new_folder_ids,
+ &deleted_folders);
+ switch (response_type) {
+ case CommitResponse::INVALID_MESSAGE:
+ ++error_commits;
+ break;
+ case CommitResponse::CONFLICT:
+ ++conflicting_commits;
+ // Only server CONFLICT responses will activate conflict resolution.
+ conflict_progress->AddServerConflictingItemById(
+ status->GetCommitIdAt(proj[i]));
+ break;
+ case CommitResponse::SUCCESS:
+ // TODO(sync): worry about sync_rate_ rate calc?
+ ++successes;
+ if (status->GetCommitModelTypeAt(proj[i]) == syncable::BOOKMARKS)
+ status->increment_num_successful_bookmark_commits();
+ status->increment_num_successful_commits();
+ break;
+ case CommitResponse::OVER_QUOTA:
+ // We handle over quota like a retry, which is same as transient.
+ case CommitResponse::RETRY:
+ case CommitResponse::TRANSIENT_ERROR:
+ ++transient_error_commits;
+ break;
+ default:
+ LOG(FATAL) << "Bad return from ProcessSingleCommitResponse";
+ }
+ }
+ }
+
+ SyncerUtil::MarkDeletedChildrenSynced(dir, &deleted_folders);
+
+ int commit_count = static_cast<int>(proj.size());
+ if (commit_count == (successes + conflicting_commits)) {
+ // We consider conflicting commits as a success because things will work out
+ // on their own when we receive them. Flags will be set so that
+ // HasMoreToSync() will cause SyncScheduler to enter another sync cycle to
+ // handle this condition.
+ return SYNCER_OK;
+ } else if (error_commits > 0) {
+ return SERVER_RETURN_UNKNOWN_ERROR;
+ } else if (transient_error_commits > 0) {
+ return SERVER_RETURN_TRANSIENT_ERROR;
+ } else {
+ LOG(FATAL) << "Inconsistent counts when processing commit response";
+ return SYNCER_OK;
+ }
+}
+
+void LogServerError(const CommitResponse_EntryResponse& res) {
+ if (res.has_error_message())
+ LOG(WARNING) << " " << res.error_message();
+ else
+ LOG(WARNING) << " No detailed error message returned from server";
+}
+
+CommitResponse::ResponseType
+ProcessCommitResponseCommand::ProcessSingleCommitResponse(
+ syncable::WriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
+ const sync_pb::SyncEntity& commit_request_entry,
+ const syncable::Id& pre_commit_id,
+ std::set<syncable::Id>* conflicting_new_folder_ids,
+ set<syncable::Id>* deleted_folders) {
+
+ const CommitResponse_EntryResponse& server_entry =
+ *static_cast<const CommitResponse_EntryResponse*>(&pb_server_entry);
+ MutableEntry local_entry(trans, GET_BY_ID, pre_commit_id);
+ CHECK(local_entry.good());
+ bool syncing_was_set = local_entry.Get(SYNCING);
+ local_entry.Put(SYNCING, false);
+
+ CommitResponse::ResponseType response = (CommitResponse::ResponseType)
+ server_entry.response_type();
+ if (!CommitResponse::ResponseType_IsValid(response)) {
+ LOG(ERROR) << "Commit response has unknown response type! Possibly out "
+ "of date client?";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+ if (CommitResponse::TRANSIENT_ERROR == response) {
+ DVLOG(1) << "Transient Error Committing: " << local_entry;
+ LogServerError(server_entry);
+ return CommitResponse::TRANSIENT_ERROR;
+ }
+ if (CommitResponse::INVALID_MESSAGE == response) {
+ LOG(ERROR) << "Error Commiting: " << local_entry;
+ LogServerError(server_entry);
+ return response;
+ }
+ if (CommitResponse::CONFLICT == response) {
+ DVLOG(1) << "Conflict Committing: " << local_entry;
+ // TODO(nick): conflicting_new_folder_ids is a purposeless anachronism.
+ if (!pre_commit_id.ServerKnows() && local_entry.Get(IS_DIR)) {
+ conflicting_new_folder_ids->insert(pre_commit_id);
+ }
+ return response;
+ }
+ if (CommitResponse::RETRY == response) {
+ DVLOG(1) << "Retry Committing: " << local_entry;
+ return response;
+ }
+ if (CommitResponse::OVER_QUOTA == response) {
+ LOG(WARNING) << "Hit deprecated OVER_QUOTA Committing: " << local_entry;
+ return response;
+ }
+ if (!server_entry.has_id_string()) {
+ LOG(ERROR) << "Commit response has no id";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+
+ // Implied by the IsValid call above, but here for clarity.
+ DCHECK_EQ(CommitResponse::SUCCESS, response) << response;
+ // Check to see if we've been given the ID of an existing entry. If so treat
+ // it as an error response and retry later.
+ if (pre_commit_id != server_entry.id()) {
+ Entry e(trans, GET_BY_ID, server_entry.id());
+ if (e.good()) {
+ LOG(ERROR) << "Got duplicate id when commiting id: " << pre_commit_id <<
+ ". Treating as an error return";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+ }
+
+ if (server_entry.version() == 0) {
+ LOG(WARNING) << "Server returned a zero version on a commit response.";
+ }
+
+ ProcessSuccessfulCommitResponse(commit_request_entry, server_entry,
+ pre_commit_id, &local_entry, syncing_was_set, deleted_folders);
+ return response;
+}
+
+const string& ProcessCommitResponseCommand::GetResultingPostCommitName(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response) {
+ const string& response_name =
+ SyncerProtoUtil::NameFromCommitEntryResponse(entry_response);
+ if (!response_name.empty())
+ return response_name;
+ return SyncerProtoUtil::NameFromSyncEntity(committed_entry);
+}
+
+bool ProcessCommitResponseCommand::UpdateVersionAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id,
+ syncable::MutableEntry* local_entry) {
+ int64 old_version = local_entry->Get(BASE_VERSION);
+ int64 new_version = entry_response.version();
+ bool bad_commit_version = false;
+ if (committed_entry.deleted() &&
+ !local_entry->Get(syncable::UNIQUE_CLIENT_TAG).empty()) {
+ // If the item was deleted, and it's undeletable (uses the client tag),
+ // change the version back to zero. We must set the version to zero so
+ // that the server knows to re-create the item if it gets committed
+ // later for undeletion.
+ new_version = 0;
+ } else if (!pre_commit_id.ServerKnows()) {
+ bad_commit_version = 0 == new_version;
+ } else {
+ bad_commit_version = old_version > new_version;
+ }
+ if (bad_commit_version) {
+ LOG(ERROR) << "Bad version in commit return for " << *local_entry
+ << " new_id:" << entry_response.id() << " new_version:"
+ << entry_response.version();
+ return false;
+ }
+
+ // Update the base version and server version. The base version must change
+ // here, even if syncing_was_set is false; that's because local changes were
+ // on top of the successfully committed version.
+ local_entry->Put(BASE_VERSION, new_version);
+ DVLOG(1) << "Commit is changing base version of " << local_entry->Get(ID)
+ << " to: " << new_version;
+ local_entry->Put(SERVER_VERSION, new_version);
+ return true;
+}
+
+bool ProcessCommitResponseCommand::ChangeIdAfterCommit(
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id,
+ syncable::MutableEntry* local_entry) {
+ syncable::WriteTransaction* trans = local_entry->write_transaction();
+ if (entry_response.id() != pre_commit_id) {
+ if (pre_commit_id.ServerKnows()) {
+ // The server can sometimes generate a new ID on commit; for example,
+ // when committing an undeletion.
+ DVLOG(1) << " ID changed while committing an old entry. "
+ << pre_commit_id << " became " << entry_response.id() << ".";
+ }
+ MutableEntry same_id(trans, GET_BY_ID, entry_response.id());
+ // We should trap this before this function.
+ if (same_id.good()) {
+ LOG(ERROR) << "ID clash with id " << entry_response.id()
+ << " during commit " << same_id;
+ return false;
+ }
+ SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ trans, local_entry, entry_response.id());
+ DVLOG(1) << "Changing ID to " << entry_response.id();
+ }
+ return true;
+}
+
+void ProcessCommitResponseCommand::UpdateServerFieldsAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ syncable::MutableEntry* local_entry) {
+
+ // We just committed an entry successfully, and now we want to make our view
+ // of the server state consistent with the server state. We must be careful;
+ // |entry_response| and |committed_entry| have some identically named
+ // fields. We only want to consider fields from |committed_entry| when there
+ // is not an overriding field in the |entry_response|. We do not want to
+ // update the server data from the local data in the entry -- it's possible
+ // that the local data changed during the commit, and even if not, the server
+ // has the last word on the values of several properties.
+
+ local_entry->Put(SERVER_IS_DEL, committed_entry.deleted());
+ if (committed_entry.deleted()) {
+ // Don't clobber any other fields of deleted objects.
+ return;
+ }
+
+ local_entry->Put(syncable::SERVER_IS_DIR,
+ (committed_entry.folder() ||
+ committed_entry.bookmarkdata().bookmark_folder()));
+ local_entry->Put(syncable::SERVER_SPECIFICS,
+ committed_entry.specifics());
+ local_entry->Put(syncable::SERVER_MTIME,
+ ProtoTimeToTime(committed_entry.mtime()));
+ local_entry->Put(syncable::SERVER_CTIME,
+ ProtoTimeToTime(committed_entry.ctime()));
+ local_entry->Put(syncable::SERVER_POSITION_IN_PARENT,
+ entry_response.position_in_parent());
+ // TODO(nick): The server doesn't set entry_response.server_parent_id in
+ // practice; to update SERVER_PARENT_ID appropriately here we'd need to
+ // get the post-commit ID of the parent indicated by
+ // committed_entry.parent_id_string(). That should be inferrable from the
+ // information we have, but it's a bit convoluted to pull it out directly.
+ // Getting this right is important: SERVER_PARENT_ID gets fed back into
+ // old_parent_id during the next commit.
+ local_entry->Put(syncable::SERVER_PARENT_ID,
+ local_entry->Get(syncable::PARENT_ID));
+ local_entry->Put(syncable::SERVER_NON_UNIQUE_NAME,
+ GetResultingPostCommitName(committed_entry, entry_response));
+
+ if (local_entry->Get(IS_UNAPPLIED_UPDATE)) {
+ // This shouldn't happen; an unapplied update shouldn't be committed, and
+ // if it were, the commit should have failed. But if it does happen: we've
+ // just overwritten the update info, so clear the flag.
+ local_entry->Put(IS_UNAPPLIED_UPDATE, false);
+ }
+}
+
+void ProcessCommitResponseCommand::OverrideClientFieldsAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ syncable::MutableEntry* local_entry) {
+ if (committed_entry.deleted()) {
+ // If an entry's been deleted, nothing else matters.
+ DCHECK(local_entry->Get(IS_DEL));
+ return;
+ }
+
+ // Update the name.
+ const string& server_name =
+ GetResultingPostCommitName(committed_entry, entry_response);
+ const string& old_name =
+ local_entry->Get(syncable::NON_UNIQUE_NAME);
+
+ if (!server_name.empty() && old_name != server_name) {
+ DVLOG(1) << "During commit, server changed name: " << old_name
+ << " to new name: " << server_name;
+ local_entry->Put(syncable::NON_UNIQUE_NAME, server_name);
+ }
+
+ // The server has the final say on positioning, so apply the absolute
+ // position that it returns.
+ if (entry_response.has_position_in_parent()) {
+ // The SERVER_ field should already have been written.
+ DCHECK_EQ(entry_response.position_in_parent(),
+ local_entry->Get(SERVER_POSITION_IN_PARENT));
+
+ // We just committed successfully, so we assume that the position
+ // value we got applies to the PARENT_ID we submitted.
+ syncable::Id new_prev = local_entry->ComputePrevIdFromServerPosition(
+ local_entry->Get(PARENT_ID));
+ if (!local_entry->PutPredecessor(new_prev)) {
+ // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
+ NOTREACHED();
+ }
+ }
+}
+
+void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ bool syncing_was_set, set<syncable::Id>* deleted_folders) {
+ DCHECK(local_entry->Get(IS_UNSYNCED));
+
+ // Update SERVER_VERSION and BASE_VERSION.
+ if (!UpdateVersionAfterCommit(committed_entry, entry_response, pre_commit_id,
+ local_entry)) {
+ LOG(ERROR) << "Bad version in commit return for " << *local_entry
+ << " new_id:" << entry_response.id() << " new_version:"
+ << entry_response.version();
+ return;
+ }
+
+ // If the server gave us a new ID, apply it.
+ if (!ChangeIdAfterCommit(entry_response, pre_commit_id, local_entry)) {
+ return;
+ }
+
+ // Update our stored copy of the server state.
+ UpdateServerFieldsAfterCommit(committed_entry, entry_response, local_entry);
+
+ // If the item doesn't need to be committed again (an item might need to be
+ // committed again if it changed locally during the commit), we can remove
+ // it from the unsynced list. Also, we should change the locally-
+ // visible properties to apply any canonicalizations or fixups
+ // that the server introduced during the commit.
+ if (syncing_was_set) {
+ OverrideClientFieldsAfterCommit(committed_entry, entry_response,
+ local_entry);
+ local_entry->Put(IS_UNSYNCED, false);
+ }
+
+ // Make a note of any deleted folders, whose children would have
+ // been recursively deleted.
+ // TODO(nick): Here, commit_message.deleted() would be more correct than
+ // local_entry->Get(IS_DEL). For example, an item could be renamed, and then
+ // deleted during the commit of the rename. Unit test & fix.
+ if (local_entry->Get(IS_DIR) && local_entry->Get(IS_DEL)) {
+ deleted_folders->insert(local_entry->Get(ID));
+ }
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/process_commit_response_command.h b/sync/engine/process_commit_response_command.h
new file mode 100644
index 0000000..8e288de
--- /dev/null
+++ b/sync/engine/process_commit_response_command.h
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
+#define SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
+#pragma once
+
+#include <set>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "sync/engine/model_changing_syncer_command.h"
+#include "sync/engine/syncproto.h"
+
+namespace syncable {
+class Id;
+class WriteTransaction;
+class MutableEntry;
+}
+
+namespace browser_sync {
+
+class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
+ public:
+
+ ProcessCommitResponseCommand();
+ virtual ~ProcessCommitResponseCommand();
+
+ protected:
+ // ModelChangingSyncerCommand implementation.
+ virtual std::set<ModelSafeGroup> GetGroupsToChange(
+ const sessions::SyncSession& session) const OVERRIDE;
+ virtual SyncerError ModelNeutralExecuteImpl(
+ sessions::SyncSession* session) OVERRIDE;
+ virtual SyncerError ModelChangingExecuteImpl(
+ sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ CommitResponse::ResponseType ProcessSingleCommitResponse(
+ syncable::WriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& pb_commit_response,
+ const sync_pb::SyncEntity& pb_committed_entry,
+ const syncable::Id& pre_commit_id,
+ std::set<syncable::Id>* conflicting_new_directory_ids,
+ std::set<syncable::Id>* deleted_folders);
+
+ // Actually does the work of execute.
+ SyncerError ProcessCommitResponse(sessions::SyncSession* session);
+
+ void ProcessSuccessfulCommitResponse(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ bool syncing_was_set, std::set<syncable::Id>* deleted_folders);
+
+ // Update the BASE_VERSION and SERVER_VERSION, post-commit.
+ // Helper for ProcessSuccessfulCommitResponse.
+ bool UpdateVersionAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id,
+ syncable::MutableEntry* local_entry);
+
+ // If the server generated an ID for us during a commit, apply the new ID.
+ // Helper for ProcessSuccessfulCommitResponse.
+ bool ChangeIdAfterCommit(
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id,
+ syncable::MutableEntry* local_entry);
+
+ // Update the SERVER_ fields to reflect the server state after committing.
+ // Helper for ProcessSuccessfulCommitResponse.
+ void UpdateServerFieldsAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ syncable::MutableEntry* local_entry);
+
+ // The server can override some values during a commit; the overridden values
+ // are returned as fields in the CommitResponse_EntryResponse. This method
+ // stores the fields back in the client-visible (i.e. not the SERVER_* fields)
+ // fields of the entry. This should only be done if the item did not change
+ // locally while the commit was in flight.
+ // Helper for ProcessSuccessfulCommitResponse.
+ void OverrideClientFieldsAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ syncable::MutableEntry* local_entry);
+
+ // Helper to extract the final name from the protobufs.
+ const std::string& GetResultingPostCommitName(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response);
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
diff --git a/sync/engine/process_commit_response_command_unittest.cc b/sync/engine/process_commit_response_command_unittest.cc
new file mode 100644
index 0000000..517bc51
--- /dev/null
+++ b/sync/engine/process_commit_response_command_unittest.cc
@@ -0,0 +1,437 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/location.h"
+#include "base/stringprintf.h"
+#include "sync/engine/process_commit_response_command.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_id.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/syncer_command_test.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+using std::string;
+using syncable::BASE_VERSION;
+using syncable::Entry;
+using syncable::IS_DIR;
+using syncable::IS_UNSYNCED;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::NON_UNIQUE_NAME;
+using syncable::ReadTransaction;
+using syncable::UNITTEST;
+using syncable::WriteTransaction;
+
+// A test fixture for tests exercising ProcessCommitResponseCommand.
+class ProcessCommitResponseCommandTest : public SyncerCommandTest {
+ public:
+ virtual void SetUp() {
+ workers()->clear();
+ mutable_routing_info()->clear();
+
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
+ (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_UI;
+ (*mutable_routing_info())[syncable::PREFERENCES] = GROUP_UI;
+ (*mutable_routing_info())[syncable::AUTOFILL] = GROUP_DB;
+
+ commit_set_.reset(new sessions::OrderedCommitSet(routing_info()));
+ SyncerCommandTest::SetUp();
+ // Need to explicitly use this-> to avoid obscure template
+ // warning.
+ this->ExpectNoGroupsToChange(command_);
+ }
+
+ protected:
+
+ ProcessCommitResponseCommandTest()
+ : next_old_revision_(1),
+ next_new_revision_(4000),
+ next_server_position_(10000) {
+ }
+
+ void CheckEntry(Entry* e, const std::string& name,
+ syncable::ModelType model_type, const Id& parent_id) {
+ EXPECT_TRUE(e->good());
+ ASSERT_EQ(name, e->Get(NON_UNIQUE_NAME));
+ ASSERT_EQ(model_type, e->GetModelType());
+ ASSERT_EQ(parent_id, e->Get(syncable::PARENT_ID));
+ ASSERT_LT(0, e->Get(BASE_VERSION))
+ << "Item should have a valid (positive) server base revision";
+ }
+
+ // Create an unsynced item in the database. If item_id is a local ID, it
+ // will be treated as a create-new. Otherwise, if it's a server ID, we'll
+ // fake the server data so that it looks like it exists on the server.
+ // Returns the methandle of the created item in |metahandle_out| if not NULL.
+ void CreateUnsyncedItem(const Id& item_id,
+ const Id& parent_id,
+ const string& name,
+ bool is_folder,
+ syncable::ModelType model_type,
+ int64* metahandle_out) {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ Id predecessor_id;
+ ASSERT_TRUE(
+ directory()->GetLastChildIdForTest(&trans, parent_id, &predecessor_id));
+ MutableEntry entry(&trans, syncable::CREATE, parent_id, name);
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::ID, item_id);
+ entry.Put(syncable::BASE_VERSION,
+ item_id.ServerKnows() ? next_old_revision_++ : 0);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::IS_DIR, is_folder);
+ entry.Put(syncable::IS_DEL, false);
+ entry.Put(syncable::PARENT_ID, parent_id);
+ entry.PutPredecessor(predecessor_id);
+ sync_pb::EntitySpecifics default_specifics;
+ syncable::AddDefaultFieldValue(model_type, &default_specifics);
+ entry.Put(syncable::SPECIFICS, default_specifics);
+ if (item_id.ServerKnows()) {
+ entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
+ entry.Put(syncable::SERVER_IS_DIR, is_folder);
+ entry.Put(syncable::SERVER_PARENT_ID, parent_id);
+ entry.Put(syncable::SERVER_IS_DEL, false);
+ }
+ if (metahandle_out)
+ *metahandle_out = entry.Get(syncable::META_HANDLE);
+ }
+
+ // Create a new unsynced item in the database, and synthesize a commit
+ // record and a commit response for it in the syncer session. If item_id
+ // is a local ID, the item will be a create operation. Otherwise, it
+ // will be an edit.
+ void CreateUnprocessedCommitResult(const Id& item_id,
+ const Id& parent_id,
+ const string& name,
+ syncable::ModelType model_type) {
+ sessions::StatusController* sync_state =
+ session()->mutable_status_controller();
+ bool is_folder = true;
+ int64 metahandle = 0;
+ CreateUnsyncedItem(item_id, parent_id, name, is_folder, model_type,
+ &metahandle);
+
+ // ProcessCommitResponseCommand consumes commit_ids from the session
+ // state, so we need to update that. O(n^2) because it's a test.
+ commit_set_->AddCommitItem(metahandle, item_id, model_type);
+ sync_state->set_commit_set(*commit_set_.get());
+
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::GET_BY_ID, item_id);
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::SYNCING, true);
+
+ // ProcessCommitResponseCommand looks at both the commit message as well
+ // as the commit response, so we need to synthesize both here.
+ sync_pb::ClientToServerMessage* commit =
+ sync_state->mutable_commit_message();
+ commit->set_message_contents(ClientToServerMessage::COMMIT);
+ SyncEntity* entity = static_cast<SyncEntity*>(
+ commit->mutable_commit()->add_entries());
+ entity->set_non_unique_name(name);
+ entity->set_folder(is_folder);
+ entity->set_parent_id(parent_id);
+ entity->set_version(entry.Get(syncable::BASE_VERSION));
+ entity->mutable_specifics()->CopyFrom(entry.Get(syncable::SPECIFICS));
+ entity->set_id(item_id);
+
+ sync_pb::ClientToServerResponse* response =
+ sync_state->mutable_commit_response();
+ response->set_error_code(sync_pb::SyncEnums::SUCCESS);
+ sync_pb::CommitResponse_EntryResponse* entry_response =
+ response->mutable_commit()->add_entryresponse();
+ entry_response->set_response_type(CommitResponse::SUCCESS);
+ entry_response->set_name("Garbage.");
+ entry_response->set_non_unique_name(entity->name());
+ if (item_id.ServerKnows())
+ entry_response->set_id_string(entity->id_string());
+ else
+ entry_response->set_id_string(id_factory_.NewServerId().GetServerId());
+ entry_response->set_version(next_new_revision_++);
+ entry_response->set_position_in_parent(next_server_position_++);
+
+ // If the ID of our parent item committed earlier in the batch was
+ // rewritten, rewrite it in the entry response. This matches
+ // the server behavior.
+ entry_response->set_parent_id_string(entity->parent_id_string());
+ for (int i = 0; i < commit->commit().entries_size(); ++i) {
+ if (commit->commit().entries(i).id_string() ==
+ entity->parent_id_string()) {
+ entry_response->set_parent_id_string(
+ response->commit().entryresponse(i).id_string());
+ }
+ }
+ }
+
+ void SetLastErrorCode(CommitResponse::ResponseType error_code) {
+ sessions::StatusController* sync_state =
+ session()->mutable_status_controller();
+ sync_pb::ClientToServerResponse* response =
+ sync_state->mutable_commit_response();
+ sync_pb::CommitResponse_EntryResponse* entry_response =
+ response->mutable_commit()->mutable_entryresponse(
+ response->mutable_commit()->entryresponse_size() - 1);
+ entry_response->set_response_type(error_code);
+ }
+
+ ProcessCommitResponseCommand command_;
+ TestIdFactory id_factory_;
+ scoped_ptr<sessions::OrderedCommitSet> commit_set_;
+ private:
+ int64 next_old_revision_;
+ int64 next_new_revision_;
+ int64 next_server_position_;
+ DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommandTest);
+};
+
+TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
+ Id bookmark_folder_id = id_factory_.NewLocalId();
+ Id bookmark_id1 = id_factory_.NewLocalId();
+ Id bookmark_id2 = id_factory_.NewLocalId();
+ Id pref_id1 = id_factory_.NewLocalId(), pref_id2 = id_factory_.NewLocalId();
+ Id autofill_id1 = id_factory_.NewLocalId();
+ Id autofill_id2 = id_factory_.NewLocalId();
+ CreateUnprocessedCommitResult(bookmark_folder_id, id_factory_.root(),
+ "A bookmark folder", syncable::BOOKMARKS);
+ CreateUnprocessedCommitResult(bookmark_id1, bookmark_folder_id,
+ "bookmark 1", syncable::BOOKMARKS);
+ CreateUnprocessedCommitResult(bookmark_id2, bookmark_folder_id,
+ "bookmark 2", syncable::BOOKMARKS);
+ CreateUnprocessedCommitResult(pref_id1, id_factory_.root(),
+ "Pref 1", syncable::PREFERENCES);
+ CreateUnprocessedCommitResult(pref_id2, id_factory_.root(),
+ "Pref 2", syncable::PREFERENCES);
+ CreateUnprocessedCommitResult(autofill_id1, id_factory_.root(),
+ "Autofill 1", syncable::AUTOFILL);
+ CreateUnprocessedCommitResult(autofill_id2, id_factory_.root(),
+ "Autofill 2", syncable::AUTOFILL);
+
+ ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
+ command_.ExecuteImpl(session());
+
+ ReadTransaction trans(FROM_HERE, directory());
+ Id new_fid;
+ ASSERT_TRUE(directory()->GetFirstChildId(
+ &trans, id_factory_.root(), &new_fid));
+ ASSERT_FALSE(new_fid.IsRoot());
+ EXPECT_TRUE(new_fid.ServerKnows());
+ EXPECT_FALSE(bookmark_folder_id.ServerKnows());
+ EXPECT_FALSE(new_fid == bookmark_folder_id);
+ Entry b_folder(&trans, syncable::GET_BY_ID, new_fid);
+ ASSERT_TRUE(b_folder.good());
+ ASSERT_EQ("A bookmark folder", b_folder.Get(NON_UNIQUE_NAME))
+ << "Name of bookmark folder should not change.";
+ ASSERT_LT(0, b_folder.Get(BASE_VERSION))
+ << "Bookmark folder should have a valid (positive) server base revision";
+
+ // Look at the two bookmarks in bookmark_folder.
+ Id cid;
+ ASSERT_TRUE(directory()->GetFirstChildId(&trans, new_fid, &cid));
+ Entry b1(&trans, syncable::GET_BY_ID, cid);
+ Entry b2(&trans, syncable::GET_BY_ID, b1.Get(syncable::NEXT_ID));
+ CheckEntry(&b1, "bookmark 1", syncable::BOOKMARKS, new_fid);
+ CheckEntry(&b2, "bookmark 2", syncable::BOOKMARKS, new_fid);
+ ASSERT_TRUE(b2.Get(syncable::NEXT_ID).IsRoot());
+
+ // Look at the prefs and autofill items.
+ Entry p1(&trans, syncable::GET_BY_ID, b_folder.Get(syncable::NEXT_ID));
+ Entry p2(&trans, syncable::GET_BY_ID, p1.Get(syncable::NEXT_ID));
+ CheckEntry(&p1, "Pref 1", syncable::PREFERENCES, id_factory_.root());
+ CheckEntry(&p2, "Pref 2", syncable::PREFERENCES, id_factory_.root());
+
+ Entry a1(&trans, syncable::GET_BY_ID, p2.Get(syncable::NEXT_ID));
+ Entry a2(&trans, syncable::GET_BY_ID, a1.Get(syncable::NEXT_ID));
+ CheckEntry(&a1, "Autofill 1", syncable::AUTOFILL, id_factory_.root());
+ CheckEntry(&a2, "Autofill 2", syncable::AUTOFILL, id_factory_.root());
+ ASSERT_TRUE(a2.Get(syncable::NEXT_ID).IsRoot());
+}
+
+// In this test, we test processing a commit response for a commit batch that
+// includes a newly created folder and some (but not all) of its children.
+// In particular, the folder has 50 children, which alternate between being
+// new items and preexisting items. This mixture of new and old is meant to
+// be a torture test of the code in ProcessCommitResponseCommand that changes
+// an item's ID from a local ID to a server-generated ID on the first commit.
+// We commit only the first 25 children in the sibling order, leaving the
+// second 25 children as unsynced items. http://crbug.com/33081 describes
+// how this scenario used to fail, reversing the order for the second half
+// of the children.
+TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
+ // Create the parent folder, a new item whose ID will change on commit.
+ Id folder_id = id_factory_.NewLocalId();
+ CreateUnprocessedCommitResult(folder_id, id_factory_.root(), "A",
+ syncable::BOOKMARKS);
+
+ // Verify that the item is reachable.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Id child_id;
+ ASSERT_TRUE(directory()->GetFirstChildId(
+ &trans, id_factory_.root(), &child_id));
+ ASSERT_EQ(folder_id, child_id);
+ }
+
+ // The first 25 children of the parent folder will be part of the commit
+ // batch.
+ int batch_size = 25;
+ int i = 0;
+ for (; i < batch_size; ++i) {
+ // Alternate between new and old child items, just for kicks.
+ Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
+ CreateUnprocessedCommitResult(
+ id, folder_id, base::StringPrintf("Item %d", i), syncable::BOOKMARKS);
+ }
+ // The second 25 children will be unsynced items but NOT part of the commit
+ // batch. When the ID of the parent folder changes during the commit,
+ // these items PARENT_ID should be updated, and their ordering should be
+ // preserved.
+ for (; i < 2*batch_size; ++i) {
+ // Alternate between new and old child items, just for kicks.
+ Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
+ CreateUnsyncedItem(id, folder_id, base::StringPrintf("Item %d", i),
+ false, syncable::BOOKMARKS, NULL);
+ }
+
+ // Process the commit response for the parent folder and the first
+ // 25 items. This should apply the values indicated by
+ // each CommitResponse_EntryResponse to the syncable Entries. All new
+ // items in the commit batch should have their IDs changed to server IDs.
+ ExpectGroupToChange(command_, GROUP_UI);
+ command_.ExecuteImpl(session());
+
+ ReadTransaction trans(FROM_HERE, directory());
+ // Lookup the parent folder by finding a child of the root. We can't use
+ // folder_id here, because it changed during the commit.
+ Id new_fid;
+ ASSERT_TRUE(directory()->GetFirstChildId(
+ &trans, id_factory_.root(), &new_fid));
+ ASSERT_FALSE(new_fid.IsRoot());
+ EXPECT_TRUE(new_fid.ServerKnows());
+ EXPECT_FALSE(folder_id.ServerKnows());
+ EXPECT_TRUE(new_fid != folder_id);
+ Entry parent(&trans, syncable::GET_BY_ID, new_fid);
+ ASSERT_TRUE(parent.good());
+ ASSERT_EQ("A", parent.Get(NON_UNIQUE_NAME))
+ << "Name of parent folder should not change.";
+ ASSERT_LT(0, parent.Get(BASE_VERSION))
+ << "Parent should have a valid (positive) server base revision";
+
+ Id cid;
+ ASSERT_TRUE(directory()->GetFirstChildId(&trans, new_fid, &cid));
+ int child_count = 0;
+ // Now loop over all the children of the parent folder, verifying
+ // that they are in their original order by checking to see that their
+ // names are still sequential.
+ while (!cid.IsRoot()) {
+ SCOPED_TRACE(::testing::Message("Examining item #") << child_count);
+ Entry c(&trans, syncable::GET_BY_ID, cid);
+ DCHECK(c.good());
+ ASSERT_EQ(base::StringPrintf("Item %d", child_count),
+ c.Get(NON_UNIQUE_NAME));
+ ASSERT_EQ(new_fid, c.Get(syncable::PARENT_ID));
+ if (child_count < batch_size) {
+ ASSERT_FALSE(c.Get(IS_UNSYNCED)) << "Item should be committed";
+ ASSERT_TRUE(cid.ServerKnows());
+ ASSERT_LT(0, c.Get(BASE_VERSION));
+ } else {
+ ASSERT_TRUE(c.Get(IS_UNSYNCED)) << "Item should be uncommitted";
+ // We alternated between creates and edits; double check that these items
+ // have been preserved.
+ if (child_count % 4 < 2) {
+ ASSERT_FALSE(cid.ServerKnows());
+ ASSERT_GE(0, c.Get(BASE_VERSION));
+ } else {
+ ASSERT_TRUE(cid.ServerKnows());
+ ASSERT_LT(0, c.Get(BASE_VERSION));
+ }
+ }
+ cid = c.Get(syncable::NEXT_ID);
+ child_count++;
+ }
+ ASSERT_EQ(batch_size*2, child_count)
+ << "Too few or too many children in parent folder after commit.";
+}
+
+// This test fixture runs across a Cartesian product of per-type fail/success
+// possibilities.
+enum {
+ TEST_PARAM_BOOKMARK_ENABLE_BIT,
+ TEST_PARAM_AUTOFILL_ENABLE_BIT,
+ TEST_PARAM_BIT_COUNT
+};
+class MixedResult :
+ public ProcessCommitResponseCommandTest,
+ public ::testing::WithParamInterface<int> {
+ protected:
+ bool ShouldFailBookmarkCommit() {
+ return (GetParam() & (1 << TEST_PARAM_BOOKMARK_ENABLE_BIT)) == 0;
+ }
+ bool ShouldFailAutofillCommit() {
+ return (GetParam() & (1 << TEST_PARAM_AUTOFILL_ENABLE_BIT)) == 0;
+ }
+};
+INSTANTIATE_TEST_CASE_P(ProcessCommitResponse,
+ MixedResult,
+ testing::Range(0, 1 << TEST_PARAM_BIT_COUNT));
+
+// This test commits 2 items (one bookmark, one autofill) and validates what
+// happens to the extensions activity records. Commits could fail or succeed,
+// depending on the test parameter.
+TEST_P(MixedResult, ExtensionActivity) {
+ EXPECT_NE(routing_info().find(syncable::BOOKMARKS)->second,
+ routing_info().find(syncable::AUTOFILL)->second)
+ << "To not be lame, this test requires more than one active group.";
+
+ // Bookmark item setup.
+ CreateUnprocessedCommitResult(id_factory_.NewServerId(),
+ id_factory_.root(), "Some bookmark", syncable::BOOKMARKS);
+ if (ShouldFailBookmarkCommit())
+ SetLastErrorCode(CommitResponse::TRANSIENT_ERROR);
+ // Autofill item setup.
+ CreateUnprocessedCommitResult(id_factory_.NewServerId(),
+ id_factory_.root(), "Some autofill", syncable::AUTOFILL);
+ if (ShouldFailAutofillCommit())
+ SetLastErrorCode(CommitResponse::TRANSIENT_ERROR);
+
+ // Put some extensions activity in the session.
+ {
+ ExtensionsActivityMonitor::Records* records =
+ session()->mutable_extensions_activity();
+ (*records)["ABC"].extension_id = "ABC";
+ (*records)["ABC"].bookmark_write_count = 2049U;
+ (*records)["xyz"].extension_id = "xyz";
+ (*records)["xyz"].bookmark_write_count = 4U;
+ }
+ ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
+ command_.ExecuteImpl(session());
+
+ ExtensionsActivityMonitor::Records final_monitor_records;
+ context()->extensions_monitor()->GetAndClearRecords(&final_monitor_records);
+
+ if (ShouldFailBookmarkCommit()) {
+ ASSERT_EQ(2U, final_monitor_records.size())
+ << "Should restore records after unsuccessful bookmark commit.";
+ EXPECT_EQ("ABC", final_monitor_records["ABC"].extension_id);
+ EXPECT_EQ("xyz", final_monitor_records["xyz"].extension_id);
+ EXPECT_EQ(2049U, final_monitor_records["ABC"].bookmark_write_count);
+ EXPECT_EQ(4U, final_monitor_records["xyz"].bookmark_write_count);
+ } else {
+ EXPECT_TRUE(final_monitor_records.empty())
+ << "Should not restore records after successful bookmark commit.";
+ }
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/process_updates_command.cc b/sync/engine/process_updates_command.cc
new file mode 100644
index 0000000..7262831
--- /dev/null
+++ b/sync/engine/process_updates_command.cc
@@ -0,0 +1,183 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/process_updates_command.h"
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/location.h"
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/engine/syncproto.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/syncable.h"
+#include "sync/util/cryptographer.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+using sessions::StatusController;
+using sessions::UpdateProgress;
+
+ProcessUpdatesCommand::ProcessUpdatesCommand() {}
+ProcessUpdatesCommand::~ProcessUpdatesCommand() {}
+
+std::set<ModelSafeGroup> ProcessUpdatesCommand::GetGroupsToChange(
+ const sessions::SyncSession& session) const {
+ return session.GetEnabledGroupsWithVerifiedUpdates();
+}
+
+SyncerError ProcessUpdatesCommand::ModelChangingExecuteImpl(
+ SyncSession* session) {
+ syncable::Directory* dir = session->context()->directory();
+
+ const sessions::UpdateProgress* progress =
+ session->status_controller().update_progress();
+ if (!progress)
+ return SYNCER_OK; // Nothing to do.
+
+ syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
+ vector<sessions::VerifiedUpdate>::const_iterator it;
+ for (it = progress->VerifiedUpdatesBegin();
+ it != progress->VerifiedUpdatesEnd();
+ ++it) {
+ const sync_pb::SyncEntity& update = it->second;
+
+ if (it->first != VERIFY_SUCCESS && it->first != VERIFY_UNDELETE)
+ continue;
+ switch (ProcessUpdate(update,
+ dir->GetCryptographer(&trans),
+ &trans)) {
+ case SUCCESS_PROCESSED:
+ case SUCCESS_STORED:
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ }
+
+ StatusController* status = session->mutable_status_controller();
+ status->mutable_update_progress()->ClearVerifiedUpdates();
+ return SYNCER_OK;
+}
+
+namespace {
+// Returns true if the entry is still ok to process.
+bool ReverifyEntry(syncable::WriteTransaction* trans, const SyncEntity& entry,
+ syncable::MutableEntry* same_id) {
+
+ const bool deleted = entry.has_deleted() && entry.deleted();
+ const bool is_directory = entry.IsFolder();
+ const syncable::ModelType model_type = entry.GetModelType();
+
+ return VERIFY_SUCCESS == SyncerUtil::VerifyUpdateConsistency(trans,
+ entry,
+ same_id,
+ deleted,
+ is_directory,
+ model_type);
+}
+} // namespace
+
+// Process a single update. Will avoid touching global state.
+ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
+ const sync_pb::SyncEntity& proto_update,
+ const Cryptographer* cryptographer,
+ syncable::WriteTransaction* const trans) {
+
+ const SyncEntity& update = *static_cast<const SyncEntity*>(&proto_update);
+ syncable::Id server_id = update.id();
+ const std::string name = SyncerProtoUtil::NameFromSyncEntity(update);
+
+ // Look to see if there's a local item that should recieve this update,
+ // maybe due to a duplicate client tag or a lost commit response.
+ syncable::Id local_id = SyncerUtil::FindLocalIdToUpdate(trans, update);
+
+ // FindLocalEntryToUpdate has veto power.
+ if (local_id.IsNull()) {
+ return SUCCESS_PROCESSED; // The entry has become irrelevant.
+ }
+
+ SyncerUtil::CreateNewEntry(trans, local_id);
+
+ // We take a two step approach. First we store the entries data in the
+ // server fields of a local entry and then move the data to the local fields
+ syncable::MutableEntry target_entry(trans, syncable::GET_BY_ID, local_id);
+
+ // We need to run the Verify checks again; the world could have changed
+ // since VerifyUpdatesCommand.
+ if (!ReverifyEntry(trans, update, &target_entry)) {
+ return SUCCESS_PROCESSED; // The entry has become irrelevant.
+ }
+
+ // If we're repurposing an existing local entry with a new server ID,
+ // change the ID now, after we're sure that the update can succeed.
+ if (local_id != server_id) {
+ DCHECK(!update.deleted());
+ SyncerUtil::ChangeEntryIDAndUpdateChildren(trans, &target_entry,
+ server_id);
+ // When IDs change, versions become irrelevant. Forcing BASE_VERSION
+ // to zero would ensure that this update gets applied, but would indicate
+ // creation or undeletion if it were committed that way. Instead, prefer
+ // forcing BASE_VERSION to entry.version() while also forcing
+ // IS_UNAPPLIED_UPDATE to true. If the item is UNSYNCED, it's committable
+ // from the new state; it may commit before the conflict resolver gets
+ // a crack at it.
+ if (target_entry.Get(syncable::IS_UNSYNCED) ||
+ target_entry.Get(syncable::BASE_VERSION) > 0) {
+ // If either of these conditions are met, then we can expect valid client
+ // fields for this entry. When BASE_VERSION is positive, consistency is
+ // enforced on the client fields at update-application time. Otherwise,
+ // we leave the BASE_VERSION field alone; it'll get updated the first time
+ // we successfully apply this update.
+ target_entry.Put(syncable::BASE_VERSION, update.version());
+ }
+ // Force application of this update, no matter what.
+ target_entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ }
+
+ // If this is a newly received undecryptable update, and the only thing that
+ // has changed are the specifics, store the original decryptable specifics,
+ // (on which any current or future local changes are based) before we
+ // overwrite SERVER_SPECIFICS.
+ // MTIME, CTIME, and NON_UNIQUE_NAME are not enforced.
+ if (!update.deleted() && !target_entry.Get(syncable::SERVER_IS_DEL) &&
+ (update.parent_id() == target_entry.Get(syncable::SERVER_PARENT_ID)) &&
+ (update.position_in_parent() ==
+ target_entry.Get(syncable::SERVER_POSITION_IN_PARENT)) &&
+ update.has_specifics() && update.specifics().has_encrypted() &&
+ !cryptographer->CanDecrypt(update.specifics().encrypted())) {
+ sync_pb::EntitySpecifics prev_specifics =
+ target_entry.Get(syncable::SERVER_SPECIFICS);
+ // We only store the old specifics if they were decryptable and applied and
+ // there is no BASE_SERVER_SPECIFICS already. Else do nothing.
+ if (!target_entry.Get(syncable::IS_UNAPPLIED_UPDATE) &&
+ !syncable::IsRealDataType(syncable::GetModelTypeFromSpecifics(
+ target_entry.Get(syncable::BASE_SERVER_SPECIFICS))) &&
+ (!prev_specifics.has_encrypted() ||
+ cryptographer->CanDecrypt(prev_specifics.encrypted()))) {
+ DVLOG(2) << "Storing previous server specifcs: "
+ << prev_specifics.SerializeAsString();
+ target_entry.Put(syncable::BASE_SERVER_SPECIFICS, prev_specifics);
+ }
+ } else if (syncable::IsRealDataType(syncable::GetModelTypeFromSpecifics(
+ target_entry.Get(syncable::BASE_SERVER_SPECIFICS)))) {
+ // We have a BASE_SERVER_SPECIFICS, but a subsequent non-specifics-only
+ // change arrived. As a result, we can't use the specifics alone to detect
+ // changes, so we clear BASE_SERVER_SPECIFICS.
+ target_entry.Put(syncable::BASE_SERVER_SPECIFICS,
+ sync_pb::EntitySpecifics());
+ }
+
+ SyncerUtil::UpdateServerFieldsFromUpdate(&target_entry, update, name);
+
+ return SUCCESS_PROCESSED;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/process_updates_command.h b/sync/engine/process_updates_command.h
new file mode 100644
index 0000000..f5f430f
--- /dev/null
+++ b/sync/engine/process_updates_command.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
+#define SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/engine/model_changing_syncer_command.h"
+#include "sync/engine/syncer_types.h"
+
+namespace syncable {
+class WriteTransaction;
+}
+
+namespace sync_pb {
+class SyncEntity;
+}
+
+namespace browser_sync {
+
+class Cryptographer;
+
+// A syncer command for processing updates.
+//
+// Preconditions - updates in the SyncerSesssion have been downloaded
+// and verified.
+//
+// Postconditions - All of the verified SyncEntity data will be copied to
+// the server fields of the corresponding syncable entries.
+// TODO(tim): This should not be ModelChanging (bug 36592).
+class ProcessUpdatesCommand : public ModelChangingSyncerCommand {
+ public:
+ ProcessUpdatesCommand();
+ virtual ~ProcessUpdatesCommand();
+
+ protected:
+ // ModelChangingSyncerCommand implementation.
+ virtual std::set<ModelSafeGroup> GetGroupsToChange(
+ const sessions::SyncSession& session) const OVERRIDE;
+ virtual SyncerError ModelChangingExecuteImpl(
+ sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ ServerUpdateProcessingResult ProcessUpdate(
+ const sync_pb::SyncEntity& proto_update,
+ const Cryptographer* cryptographer,
+ syncable::WriteTransaction* const trans);
+ DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
diff --git a/sync/engine/process_updates_command_unittest.cc b/sync/engine/process_updates_command_unittest.cc
new file mode 100644
index 0000000..1c6a786
--- /dev/null
+++ b/sync/engine/process_updates_command_unittest.cc
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "sync/engine/process_updates_command.h"
+#include "sync/sessions/session_state.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable_id.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/syncer_command_test.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+namespace {
+
+class ProcessUpdatesCommandTest : public SyncerCommandTest {
+ protected:
+ ProcessUpdatesCommandTest() {}
+ virtual ~ProcessUpdatesCommandTest() {}
+
+ virtual void SetUp() {
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
+ (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_UI;
+ (*mutable_routing_info())[syncable::AUTOFILL] = GROUP_DB;
+ SyncerCommandTest::SetUp();
+ }
+
+ ProcessUpdatesCommand command_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommandTest);
+};
+
+TEST_F(ProcessUpdatesCommandTest, GetGroupsToChange) {
+ ExpectNoGroupsToChange(command_);
+ // Add a verified update for GROUP_DB.
+ session()->mutable_status_controller()->
+ GetUnrestrictedMutableUpdateProgressForTest(GROUP_DB)->
+ AddVerifyResult(VerifyResult(), sync_pb::SyncEntity());
+ ExpectGroupToChange(command_, GROUP_DB);
+}
+
+} // namespace
+
+} // namespace browser_sync
diff --git a/sync/engine/resolve_conflicts_command.cc b/sync/engine/resolve_conflicts_command.cc
new file mode 100644
index 0000000..457e4b7
--- /dev/null
+++ b/sync/engine/resolve_conflicts_command.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/resolve_conflicts_command.h"
+
+#include "sync/engine/conflict_resolver.h"
+#include "sync/sessions/session_state.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/syncable.h"
+
+namespace browser_sync {
+
+ResolveConflictsCommand::ResolveConflictsCommand() {}
+ResolveConflictsCommand::~ResolveConflictsCommand() {}
+
+std::set<ModelSafeGroup> ResolveConflictsCommand::GetGroupsToChange(
+ const sessions::SyncSession& session) const {
+ return session.GetEnabledGroupsWithConflicts();
+}
+
+SyncerError ResolveConflictsCommand::ModelChangingExecuteImpl(
+ sessions::SyncSession* session) {
+ ConflictResolver* resolver = session->context()->resolver();
+ DCHECK(resolver);
+
+ syncable::Directory* dir = session->context()->directory();
+ sessions::StatusController* status = session->mutable_status_controller();
+ const sessions::ConflictProgress* progress = status->conflict_progress();
+ if (!progress)
+ return SYNCER_OK; // Nothing to do.
+ syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
+ const Cryptographer* cryptographer = dir->GetCryptographer(&trans);
+ status->update_conflicts_resolved(
+ resolver->ResolveConflicts(&trans, cryptographer, *progress, status));
+
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/resolve_conflicts_command.h b/sync/engine/resolve_conflicts_command.h
new file mode 100644
index 0000000..8c2c87e4
--- /dev/null
+++ b/sync/engine/resolve_conflicts_command.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
+#define SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
+#pragma once
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "sync/engine/model_changing_syncer_command.h"
+
+namespace browser_sync {
+
+class ResolveConflictsCommand : public ModelChangingSyncerCommand {
+ public:
+ ResolveConflictsCommand();
+ virtual ~ResolveConflictsCommand();
+
+ protected:
+ // ModelChangingSyncerCommand implementation.
+ virtual std::set<ModelSafeGroup> GetGroupsToChange(
+ const sessions::SyncSession& session) const OVERRIDE;
+ virtual SyncerError ModelChangingExecuteImpl(
+ sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ResolveConflictsCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
diff --git a/sync/engine/resolve_conflicts_command_unittest.cc b/sync/engine/resolve_conflicts_command_unittest.cc
new file mode 100644
index 0000000..079683a
--- /dev/null
+++ b/sync/engine/resolve_conflicts_command_unittest.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "sync/engine/resolve_conflicts_command.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable_id.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/syncer_command_test.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+namespace {
+
+class ResolveConflictsCommandTest : public SyncerCommandTest {
+ protected:
+ ResolveConflictsCommandTest() {}
+ virtual ~ResolveConflictsCommandTest() {}
+
+ virtual void SetUp() {
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_PASSWORD)));
+ (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_UI;
+ (*mutable_routing_info())[syncable::PASSWORDS] = GROUP_PASSWORD;
+ SyncerCommandTest::SetUp();
+ }
+
+ ResolveConflictsCommand command_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ResolveConflictsCommandTest);
+};
+
+TEST_F(ResolveConflictsCommandTest, GetGroupsToChange) {
+ ExpectNoGroupsToChange(command_);
+ // Put GROUP_PASSWORD in conflict.
+ session()->mutable_status_controller()->
+ GetUnrestrictedMutableConflictProgressForTest(GROUP_PASSWORD)->
+ AddSimpleConflictingItemById(syncable::Id());
+ ExpectGroupToChange(command_, GROUP_PASSWORD);
+}
+
+} // namespace
+
+} // namespace browser_sync
diff --git a/sync/engine/store_timestamps_command.cc b/sync/engine/store_timestamps_command.cc
new file mode 100644
index 0000000..e35e6c7
--- /dev/null
+++ b/sync/engine/store_timestamps_command.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/store_timestamps_command.h"
+
+#include "sync/sessions/status_controller.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+
+namespace browser_sync {
+
+StoreTimestampsCommand::StoreTimestampsCommand() {}
+StoreTimestampsCommand::~StoreTimestampsCommand() {}
+
+SyncerError StoreTimestampsCommand::ExecuteImpl(
+ sessions::SyncSession* session) {
+ syncable::Directory* dir = session->context()->directory();
+
+ const GetUpdatesResponse& updates =
+ session->status_controller().updates_response().get_updates();
+
+ sessions::StatusController* status = session->mutable_status_controller();
+
+ // Update the progress marker tokens from the server result. If a marker
+ // was omitted for any one type, that indicates no change from the previous
+ // state.
+ syncable::ModelTypeSet forward_progress_types;
+ for (int i = 0; i < updates.new_progress_marker_size(); ++i) {
+ syncable::ModelType model =
+ syncable::GetModelTypeFromSpecificsFieldNumber(
+ updates.new_progress_marker(i).data_type_id());
+ if (model == syncable::UNSPECIFIED || model == syncable::TOP_LEVEL_FOLDER) {
+ NOTREACHED() << "Unintelligible server response.";
+ continue;
+ }
+ forward_progress_types.Put(model);
+ dir->SetDownloadProgress(model, updates.new_progress_marker(i));
+ }
+ DCHECK(!forward_progress_types.Empty() ||
+ updates.changes_remaining() == 0);
+ if (VLOG_IS_ON(1)) {
+ DVLOG_IF(1, !forward_progress_types.Empty())
+ << "Get Updates got new progress marker for types: "
+ << syncable::ModelTypeSetToString(forward_progress_types)
+ << " out of possible: "
+ << syncable::ModelTypeSetToString(status->updates_request_types());
+ }
+ if (updates.has_changes_remaining()) {
+ int64 changes_left = updates.changes_remaining();
+ DVLOG(1) << "Changes remaining: " << changes_left;
+ status->set_num_server_changes_remaining(changes_left);
+ }
+
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/store_timestamps_command.h b/sync/engine/store_timestamps_command.h
new file mode 100644
index 0000000..ec57052
--- /dev/null
+++ b/sync/engine/store_timestamps_command.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_STORE_TIMESTAMPS_COMMAND_H_
+#define SYNC_ENGINE_STORE_TIMESTAMPS_COMMAND_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/engine/syncer_command.h"
+#include "sync/engine/syncer_types.h"
+
+namespace browser_sync {
+
+// A syncer command that extracts the changelog timestamp information from
+// a GetUpdatesResponse (fetched in DownloadUpdatesCommand) and stores
+// it in the directory. This is meant to run immediately after
+// ProcessUpdatesCommand.
+//
+// Preconditions - all updates in the SyncerSesssion have been stored in the
+// database, meaning it is safe to update the persisted
+// timestamps.
+//
+// Postconditions - The next_timestamp returned by the server will be
+// saved into the directory (where it will be used
+// the next time that DownloadUpdatesCommand runs).
+class StoreTimestampsCommand : public SyncerCommand {
+ public:
+ StoreTimestampsCommand();
+ virtual ~StoreTimestampsCommand();
+
+ // SyncerCommand implementation.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StoreTimestampsCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_STORE_TIMESTAMPS_COMMAND_H_
diff --git a/sync/engine/sync_scheduler.cc b/sync/engine/sync_scheduler.cc
new file mode 100644
index 0000000..f03b727
--- /dev/null
+++ b/sync/engine/sync_scheduler.cc
@@ -0,0 +1,1233 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/sync_scheduler.h"
+
+#include <algorithm>
+#include <cstring>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/rand_util.h"
+#include "sync/engine/syncer.h"
+#include "sync/protocol/proto_enum_conversions.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/util/data_type_histogram.h"
+#include "sync/util/logging.h"
+
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+using sessions::SyncSessionSnapshot;
+using sessions::SyncSourceInfo;
+using syncable::ModelTypeSet;
+using syncable::ModelTypeSetToString;
+using syncable::ModelTypePayloadMap;
+using sync_pb::GetUpdatesCallerInfo;
+
+namespace {
+bool ShouldRequestEarlyExit(
+ const browser_sync::SyncProtocolError& error) {
+ switch (error.error_type) {
+ case browser_sync::SYNC_SUCCESS:
+ case browser_sync::MIGRATION_DONE:
+ case browser_sync::THROTTLED:
+ case browser_sync::TRANSIENT_ERROR:
+ return false;
+ case browser_sync::NOT_MY_BIRTHDAY:
+ case browser_sync::CLEAR_PENDING:
+ // If we send terminate sync early then |sync_cycle_ended| notification
+ // would not be sent. If there were no actions then |ACTIONABLE_ERROR|
+ // notification wouldnt be sent either. Then the UI layer would be left
+ // waiting forever. So assert we would send something.
+ DCHECK(error.action != browser_sync::UNKNOWN_ACTION);
+ return true;
+ case browser_sync::INVALID_CREDENTIAL:
+ // The notification for this is handled by PostAndProcessHeaders|.
+ // Server does no have to send any action for this.
+ return true;
+ // Make the default a NOTREACHED. So if a new error is introduced we
+ // think about its expected functionality.
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool IsActionableError(
+ const browser_sync::SyncProtocolError& error) {
+ return (error.action != browser_sync::UNKNOWN_ACTION);
+}
+} // namespace
+
+SyncScheduler::DelayProvider::DelayProvider() {}
+SyncScheduler::DelayProvider::~DelayProvider() {}
+
+SyncScheduler::WaitInterval::WaitInterval()
+ : mode(UNKNOWN),
+ had_nudge(false) {
+}
+
+SyncScheduler::WaitInterval::~WaitInterval() {}
+
+#define ENUM_CASE(x) case x: return #x; break;
+
+const char* SyncScheduler::WaitInterval::GetModeString(Mode mode) {
+ switch (mode) {
+ ENUM_CASE(UNKNOWN);
+ ENUM_CASE(EXPONENTIAL_BACKOFF);
+ ENUM_CASE(THROTTLED);
+ }
+ NOTREACHED();
+ return "";
+}
+
+SyncScheduler::SyncSessionJob::SyncSessionJob()
+ : purpose(UNKNOWN),
+ is_canary_job(false) {
+}
+
+SyncScheduler::SyncSessionJob::~SyncSessionJob() {}
+
+SyncScheduler::SyncSessionJob::SyncSessionJob(SyncSessionJobPurpose purpose,
+ base::TimeTicks start,
+ linked_ptr<sessions::SyncSession> session, bool is_canary_job,
+ const tracked_objects::Location& from_here) : purpose(purpose),
+ scheduled_start(start),
+ session(session),
+ is_canary_job(is_canary_job),
+ from_here(from_here) {
+}
+
+const char* SyncScheduler::SyncSessionJob::GetPurposeString(
+ SyncScheduler::SyncSessionJob::SyncSessionJobPurpose purpose) {
+ switch (purpose) {
+ ENUM_CASE(UNKNOWN);
+ ENUM_CASE(POLL);
+ ENUM_CASE(NUDGE);
+ ENUM_CASE(CLEAR_USER_DATA);
+ ENUM_CASE(CONFIGURATION);
+ ENUM_CASE(CLEANUP_DISABLED_TYPES);
+ }
+ NOTREACHED();
+ return "";
+}
+
+TimeDelta SyncScheduler::DelayProvider::GetDelay(
+ const base::TimeDelta& last_delay) {
+ return SyncScheduler::GetRecommendedDelay(last_delay);
+}
+
+GetUpdatesCallerInfo::GetUpdatesSource GetUpdatesFromNudgeSource(
+ NudgeSource source) {
+ switch (source) {
+ case NUDGE_SOURCE_NOTIFICATION:
+ return GetUpdatesCallerInfo::NOTIFICATION;
+ case NUDGE_SOURCE_LOCAL:
+ return GetUpdatesCallerInfo::LOCAL;
+ case NUDGE_SOURCE_CONTINUATION:
+ return GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION;
+ case NUDGE_SOURCE_LOCAL_REFRESH:
+ return GetUpdatesCallerInfo::DATATYPE_REFRESH;
+ case NUDGE_SOURCE_UNKNOWN:
+ return GetUpdatesCallerInfo::UNKNOWN;
+ default:
+ NOTREACHED();
+ return GetUpdatesCallerInfo::UNKNOWN;
+ }
+}
+
+SyncScheduler::WaitInterval::WaitInterval(Mode mode, TimeDelta length)
+ : mode(mode), had_nudge(false), length(length) { }
+
+// Helper macros to log with the syncer thread name; useful when there
+// are multiple syncer threads involved.
+
+#define SLOG(severity) LOG(severity) << name_ << ": "
+
+#define SDVLOG(verbose_level) DVLOG(verbose_level) << name_ << ": "
+
+#define SDVLOG_LOC(from_here, verbose_level) \
+ DVLOG_LOC(from_here, verbose_level) << name_ << ": "
+
+namespace {
+
+const int kDefaultSessionsCommitDelaySeconds = 10;
+
+bool IsConfigRelatedUpdateSourceValue(
+ GetUpdatesCallerInfo::GetUpdatesSource source) {
+ switch (source) {
+ case GetUpdatesCallerInfo::RECONFIGURATION:
+ case GetUpdatesCallerInfo::MIGRATION:
+ case GetUpdatesCallerInfo::NEW_CLIENT:
+ case GetUpdatesCallerInfo::NEWLY_SUPPORTED_DATATYPE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+SyncScheduler::SyncScheduler(const std::string& name,
+ sessions::SyncSessionContext* context,
+ Syncer* syncer)
+ : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ weak_ptr_factory_for_weak_handle_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ weak_handle_this_(MakeWeakHandle(
+ weak_ptr_factory_for_weak_handle_.GetWeakPtr())),
+ name_(name),
+ sync_loop_(MessageLoop::current()),
+ started_(false),
+ syncer_short_poll_interval_seconds_(
+ TimeDelta::FromSeconds(kDefaultShortPollIntervalSeconds)),
+ syncer_long_poll_interval_seconds_(
+ TimeDelta::FromSeconds(kDefaultLongPollIntervalSeconds)),
+ sessions_commit_delay_(
+ TimeDelta::FromSeconds(kDefaultSessionsCommitDelaySeconds)),
+ mode_(NORMAL_MODE),
+ // Start with assuming everything is fine with the connection.
+ // At the end of the sync cycle we would have the correct status.
+ server_connection_ok_(true),
+ connection_code_(HttpResponse::SERVER_CONNECTION_OK),
+ delay_provider_(new DelayProvider()),
+ syncer_(syncer),
+ session_context_(context) {
+ DCHECK(sync_loop_);
+}
+
+SyncScheduler::~SyncScheduler() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ StopImpl(base::Closure());
+}
+
+void SyncScheduler::OnCredentialsUpdated() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+
+ // TODO(lipalani): crbug.com/106262. One issue here is that if after
+ // the auth error we happened to do gettime and it succeeded then
+ // the |connection_code_| would be briefly OK however it would revert
+ // back to SYNC_AUTH_ERROR at the end of the sync cycle. The
+ // referenced bug explores the option of removing gettime calls
+ // altogethere
+ if (HttpResponse::SYNC_AUTH_ERROR == connection_code_) {
+ OnServerConnectionErrorFixed();
+ }
+}
+
+void SyncScheduler::OnConnectionStatusChange() {
+ if (HttpResponse::CONNECTION_UNAVAILABLE == connection_code_) {
+ // Optimistically assume that the connection is fixed and try
+ // connecting.
+ OnServerConnectionErrorFixed();
+ }
+}
+
+void SyncScheduler::OnServerConnectionErrorFixed() {
+ DCHECK(!server_connection_ok_);
+ connection_code_ = HttpResponse::SERVER_CONNECTION_OK;
+ server_connection_ok_ = true;
+ PostTask(FROM_HERE, "DoCanaryJob",
+ base::Bind(&SyncScheduler::DoCanaryJob,
+ weak_ptr_factory_.GetWeakPtr()));
+
+}
+
+void SyncScheduler::UpdateServerConnectionManagerStatus(
+ HttpResponse::ServerConnectionCode code) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SDVLOG(2) << "New server connection code: "
+ << HttpResponse::GetServerConnectionCodeString(code);
+ bool old_server_connection_ok = server_connection_ok_;
+
+ connection_code_ = code;
+
+ // Note, be careful when adding cases here because if the SyncScheduler
+ // thinks there is no valid connection as determined by this method, it
+ // will drop out of *all* forward progress sync loops (it won't poll and it
+ // will queue up Talk notifications but not actually call SyncShare) until
+ // some external action causes a ServerConnectionManager to broadcast that
+ // a valid connection has been re-established
+ if (HttpResponse::CONNECTION_UNAVAILABLE == code ||
+ HttpResponse::SYNC_AUTH_ERROR == code) {
+ server_connection_ok_ = false;
+ SDVLOG(2) << "Sync auth error or unavailable connection: "
+ << "server connection is down";
+ } else if (HttpResponse::SERVER_CONNECTION_OK == code) {
+ server_connection_ok_ = true;
+ SDVLOG(2) << "Sync server connection is ok: "
+ << "server connection is up, doing canary job";
+ }
+
+ if (old_server_connection_ok != server_connection_ok_) {
+ const char* transition =
+ server_connection_ok_ ? "down -> up" : "up -> down";
+ SDVLOG(2) << "Server connection changed: " << transition;
+ }
+}
+
+void SyncScheduler::Start(Mode mode, const base::Closure& callback) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ std::string thread_name = MessageLoop::current()->thread_name();
+ if (thread_name.empty())
+ thread_name = "<Main thread>";
+ SDVLOG(2) << "Start called from thread "
+ << thread_name << " with mode " << GetModeString(mode);
+ if (!started_) {
+ started_ = true;
+ PostTask(FROM_HERE, "SendInitialSnapshot",
+ base::Bind(&SyncScheduler::SendInitialSnapshot,
+ weak_ptr_factory_.GetWeakPtr()));
+ }
+ PostTask(FROM_HERE, "StartImpl",
+ base::Bind(&SyncScheduler::StartImpl,
+ weak_ptr_factory_.GetWeakPtr(), mode, callback));
+}
+
+void SyncScheduler::SendInitialSnapshot() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ scoped_ptr<SyncSession> dummy(new SyncSession(session_context_.get(), this,
+ SyncSourceInfo(), ModelSafeRoutingInfo(),
+ std::vector<ModelSafeWorker*>()));
+ SyncEngineEvent event(SyncEngineEvent::STATUS_CHANGED);
+ sessions::SyncSessionSnapshot snapshot(dummy->TakeSnapshot());
+ event.snapshot = &snapshot;
+ session_context_->NotifyListeners(event);
+}
+
+void SyncScheduler::StartImpl(Mode mode, const base::Closure& callback) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SDVLOG(2) << "In StartImpl with mode " << GetModeString(mode);
+
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(!session_context_->account_name().empty());
+ DCHECK(syncer_.get());
+ Mode old_mode = mode_;
+ mode_ = mode;
+ AdjustPolling(NULL); // Will kick start poll timer if needed.
+ if (!callback.is_null())
+ callback.Run();
+
+ if (old_mode != mode_) {
+ // We just changed our mode. See if there are any pending jobs that we could
+ // execute in the new mode.
+ DoPendingJobIfPossible(false);
+ }
+}
+
+SyncScheduler::JobProcessDecision SyncScheduler::DecideWhileInWaitInterval(
+ const SyncSessionJob& job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(wait_interval_.get());
+ DCHECK_NE(job.purpose, SyncSessionJob::CLEAR_USER_DATA);
+ DCHECK_NE(job.purpose, SyncSessionJob::CLEANUP_DISABLED_TYPES);
+
+ SDVLOG(2) << "DecideWhileInWaitInterval with WaitInterval mode "
+ << WaitInterval::GetModeString(wait_interval_->mode)
+ << (wait_interval_->had_nudge ? " (had nudge)" : "")
+ << (job.is_canary_job ? " (canary)" : "");
+
+ if (job.purpose == SyncSessionJob::POLL)
+ return DROP;
+
+ DCHECK(job.purpose == SyncSessionJob::NUDGE ||
+ job.purpose == SyncSessionJob::CONFIGURATION);
+ if (wait_interval_->mode == WaitInterval::THROTTLED)
+ return SAVE;
+
+ DCHECK_EQ(wait_interval_->mode, WaitInterval::EXPONENTIAL_BACKOFF);
+ if (job.purpose == SyncSessionJob::NUDGE) {
+ if (mode_ == CONFIGURATION_MODE)
+ return SAVE;
+
+ // If we already had one nudge then just drop this nudge. We will retry
+ // later when the timer runs out.
+ if (!job.is_canary_job)
+ return wait_interval_->had_nudge ? DROP : CONTINUE;
+ else // We are here because timer ran out. So retry.
+ return CONTINUE;
+ }
+ return job.is_canary_job ? CONTINUE : SAVE;
+}
+
+SyncScheduler::JobProcessDecision SyncScheduler::DecideOnJob(
+ const SyncSessionJob& job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ if (job.purpose == SyncSessionJob::CLEAR_USER_DATA ||
+ job.purpose == SyncSessionJob::CLEANUP_DISABLED_TYPES)
+ return CONTINUE;
+
+ // See if our type is throttled.
+ syncable::ModelTypeSet throttled_types =
+ session_context_->GetThrottledTypes();
+ if (job.purpose == SyncSessionJob::NUDGE &&
+ job.session->source().updates_source == GetUpdatesCallerInfo::LOCAL) {
+ syncable::ModelTypeSet requested_types;
+ for (ModelTypePayloadMap::const_iterator i =
+ job.session->source().types.begin();
+ i != job.session->source().types.end();
+ ++i) {
+ requested_types.Put(i->first);
+ }
+
+ if (!requested_types.Empty() && throttled_types.HasAll(requested_types))
+ return SAVE;
+ }
+
+ if (wait_interval_.get())
+ return DecideWhileInWaitInterval(job);
+
+ if (mode_ == CONFIGURATION_MODE) {
+ if (job.purpose == SyncSessionJob::NUDGE)
+ return SAVE;
+ else if (job.purpose == SyncSessionJob::CONFIGURATION)
+ return CONTINUE;
+ else
+ return DROP;
+ }
+
+ // We are in normal mode.
+ DCHECK_EQ(mode_, NORMAL_MODE);
+ DCHECK_NE(job.purpose, SyncSessionJob::CONFIGURATION);
+
+ // Freshness condition
+ if (job.scheduled_start < last_sync_session_end_time_) {
+ SDVLOG(2) << "Dropping job because of freshness";
+ return DROP;
+ }
+
+ if (server_connection_ok_)
+ return CONTINUE;
+
+ SDVLOG(2) << "Bad server connection. Using that to decide on job.";
+ return job.purpose == SyncSessionJob::NUDGE ? SAVE : DROP;
+}
+
+void SyncScheduler::InitOrCoalescePendingJob(const SyncSessionJob& job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(job.purpose != SyncSessionJob::CONFIGURATION);
+ if (pending_nudge_.get() == NULL) {
+ SDVLOG(2) << "Creating a pending nudge job";
+ SyncSession* s = job.session.get();
+ scoped_ptr<SyncSession> session(new SyncSession(s->context(),
+ s->delegate(), s->source(), s->routing_info(), s->workers()));
+
+ SyncSessionJob new_job(SyncSessionJob::NUDGE, job.scheduled_start,
+ make_linked_ptr(session.release()), false, job.from_here);
+ pending_nudge_.reset(new SyncSessionJob(new_job));
+
+ return;
+ }
+
+ SDVLOG(2) << "Coalescing a pending nudge";
+ pending_nudge_->session->Coalesce(*(job.session.get()));
+ pending_nudge_->scheduled_start = job.scheduled_start;
+
+ // Unfortunately the nudge location cannot be modified. So it stores the
+ // location of the first caller.
+}
+
+bool SyncScheduler::ShouldRunJob(const SyncSessionJob& job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(started_);
+
+ JobProcessDecision decision = DecideOnJob(job);
+ SDVLOG(2) << "Should run "
+ << SyncSessionJob::GetPurposeString(job.purpose)
+ << " job in mode " << GetModeString(mode_)
+ << ": " << GetDecisionString(decision);
+ if (decision != SAVE)
+ return decision == CONTINUE;
+
+ DCHECK(job.purpose == SyncSessionJob::NUDGE || job.purpose ==
+ SyncSessionJob::CONFIGURATION);
+
+ SaveJob(job);
+ return false;
+}
+
+void SyncScheduler::SaveJob(const SyncSessionJob& job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK_NE(job.purpose, SyncSessionJob::CLEAR_USER_DATA);
+ // TODO(sync): Should we also check that job.purpose !=
+ // CLEANUP_DISABLED_TYPES? (See http://crbug.com/90868.)
+ if (job.purpose == SyncSessionJob::NUDGE) {
+ SDVLOG(2) << "Saving a nudge job";
+ InitOrCoalescePendingJob(job);
+ } else if (job.purpose == SyncSessionJob::CONFIGURATION){
+ SDVLOG(2) << "Saving a configuration job";
+ DCHECK(wait_interval_.get());
+ DCHECK(mode_ == CONFIGURATION_MODE);
+
+ SyncSession* old = job.session.get();
+ SyncSession* s(new SyncSession(session_context_.get(), this,
+ old->source(), old->routing_info(), old->workers()));
+ SyncSessionJob new_job(job.purpose, TimeTicks::Now(),
+ make_linked_ptr(s), false, job.from_here);
+ wait_interval_->pending_configure_job.reset(new SyncSessionJob(new_job));
+ } // drop the rest.
+ // TODO(sync): Is it okay to drop the rest? It's weird that
+ // SaveJob() only does what it says sometimes. (See
+ // http://crbug.com/90868.)
+}
+
+// Functor for std::find_if to search by ModelSafeGroup.
+struct ModelSafeWorkerGroupIs {
+ explicit ModelSafeWorkerGroupIs(ModelSafeGroup group) : group(group) {}
+ bool operator()(ModelSafeWorker* w) {
+ return group == w->GetModelSafeGroup();
+ }
+ ModelSafeGroup group;
+};
+
+void SyncScheduler::ScheduleClearUserData() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ PostTask(FROM_HERE, "ScheduleClearUserDataImpl",
+ base::Bind(&SyncScheduler::ScheduleClearUserDataImpl,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+// TODO(sync): Remove the *Impl methods for the other Schedule*
+// functions, too.
+void SyncScheduler::ScheduleCleanupDisabledTypes() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SyncSessionJob job(SyncSessionJob::CLEANUP_DISABLED_TYPES, TimeTicks::Now(),
+ make_linked_ptr(CreateSyncSession(SyncSourceInfo())),
+ false,
+ FROM_HERE);
+ ScheduleSyncSessionJob(job);
+}
+
+void SyncScheduler::ScheduleNudge(
+ const TimeDelta& delay,
+ NudgeSource source, ModelTypeSet types,
+ const tracked_objects::Location& nudge_location) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SDVLOG_LOC(nudge_location, 2)
+ << "Nudge scheduled with delay " << delay.InMilliseconds() << " ms, "
+ << "source " << GetNudgeSourceString(source) << ", "
+ << "types " << ModelTypeSetToString(types);
+
+ ModelTypePayloadMap types_with_payloads =
+ syncable::ModelTypePayloadMapFromEnumSet(types, std::string());
+ PostTask(nudge_location, "ScheduleNudgeImpl",
+ base::Bind(&SyncScheduler::ScheduleNudgeImpl,
+ weak_ptr_factory_.GetWeakPtr(),
+ delay,
+ GetUpdatesFromNudgeSource(source),
+ types_with_payloads,
+ false,
+ nudge_location));
+}
+
+void SyncScheduler::ScheduleNudgeWithPayloads(
+ const TimeDelta& delay,
+ NudgeSource source, const ModelTypePayloadMap& types_with_payloads,
+ const tracked_objects::Location& nudge_location) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SDVLOG_LOC(nudge_location, 2)
+ << "Nudge scheduled with delay " << delay.InMilliseconds() << " ms, "
+ << "source " << GetNudgeSourceString(source) << ", "
+ << "payloads "
+ << syncable::ModelTypePayloadMapToString(types_with_payloads);
+
+ PostTask(nudge_location, "ScheduleNudgeImpl",
+ base::Bind(&SyncScheduler::ScheduleNudgeImpl,
+ weak_ptr_factory_.GetWeakPtr(),
+ delay,
+ GetUpdatesFromNudgeSource(source),
+ types_with_payloads,
+ false,
+ nudge_location));
+}
+
+void SyncScheduler::ScheduleClearUserDataImpl() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SyncSessionJob job(SyncSessionJob::CLEAR_USER_DATA, TimeTicks::Now(),
+ make_linked_ptr(CreateSyncSession(SyncSourceInfo())),
+ false,
+ FROM_HERE);
+
+ ScheduleSyncSessionJob(job);
+}
+
+void SyncScheduler::ScheduleNudgeImpl(
+ const TimeDelta& delay,
+ GetUpdatesCallerInfo::GetUpdatesSource source,
+ const ModelTypePayloadMap& types_with_payloads,
+ bool is_canary_job, const tracked_objects::Location& nudge_location) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+
+ SDVLOG_LOC(nudge_location, 2)
+ << "In ScheduleNudgeImpl with delay "
+ << delay.InMilliseconds() << " ms, "
+ << "source " << GetUpdatesSourceString(source) << ", "
+ << "payloads "
+ << syncable::ModelTypePayloadMapToString(types_with_payloads)
+ << (is_canary_job ? " (canary)" : "");
+
+ SyncSourceInfo info(source, types_with_payloads);
+
+ SyncSession* session(CreateSyncSession(info));
+ SyncSessionJob job(SyncSessionJob::NUDGE, TimeTicks::Now() + delay,
+ make_linked_ptr(session), is_canary_job,
+ nudge_location);
+
+ session = NULL;
+ if (!ShouldRunJob(job))
+ return;
+
+ if (pending_nudge_.get()) {
+ if (IsBackingOff() && delay > TimeDelta::FromSeconds(1)) {
+ SDVLOG(2) << "Dropping the nudge because we are in backoff";
+ return;
+ }
+
+ SDVLOG(2) << "Coalescing pending nudge";
+ pending_nudge_->session->Coalesce(*(job.session.get()));
+
+ SDVLOG(2) << "Rescheduling pending nudge";
+ SyncSession* s = pending_nudge_->session.get();
+ job.session.reset(new SyncSession(s->context(), s->delegate(),
+ s->source(), s->routing_info(), s->workers()));
+
+ // Choose the start time as the earliest of the 2.
+ job.scheduled_start = std::min(job.scheduled_start,
+ pending_nudge_->scheduled_start);
+ pending_nudge_.reset();
+ }
+
+ // TODO(zea): Consider adding separate throttling/backoff for datatype
+ // refresh requests.
+ ScheduleSyncSessionJob(job);
+}
+
+// Helper to extract the routing info and workers corresponding to types in
+// |types| from |registrar|.
+void GetModelSafeParamsForTypes(ModelTypeSet types,
+ ModelSafeWorkerRegistrar* registrar, ModelSafeRoutingInfo* routes,
+ std::vector<ModelSafeWorker*>* workers) {
+ ModelSafeRoutingInfo r_tmp;
+ std::vector<ModelSafeWorker*> w_tmp;
+ registrar->GetModelSafeRoutingInfo(&r_tmp);
+ registrar->GetWorkers(&w_tmp);
+
+ bool passive_group_added = false;
+
+ typedef std::vector<ModelSafeWorker*>::const_iterator iter;
+ for (ModelTypeSet::Iterator it = types.First();
+ it.Good(); it.Inc()) {
+ const syncable::ModelType t = it.Get();
+ DCHECK_EQ(1U, r_tmp.count(t));
+ (*routes)[t] = r_tmp[t];
+ iter w_tmp_it = std::find_if(w_tmp.begin(), w_tmp.end(),
+ ModelSafeWorkerGroupIs(r_tmp[t]));
+ if (w_tmp_it != w_tmp.end()) {
+ iter workers_it = std::find_if(workers->begin(), workers->end(),
+ ModelSafeWorkerGroupIs(r_tmp[t]));
+ if (workers_it == workers->end())
+ workers->push_back(*w_tmp_it);
+
+ if (r_tmp[t] == GROUP_PASSIVE)
+ passive_group_added = true;
+ } else {
+ NOTREACHED();
+ }
+ }
+
+ // Always add group passive.
+ if (passive_group_added == false) {
+ iter it = std::find_if(w_tmp.begin(), w_tmp.end(),
+ ModelSafeWorkerGroupIs(GROUP_PASSIVE));
+ if (it != w_tmp.end())
+ workers->push_back(*it);
+ else
+ NOTREACHED();
+ }
+}
+
+void SyncScheduler::ScheduleConfig(
+ ModelTypeSet types,
+ GetUpdatesCallerInfo::GetUpdatesSource source) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(IsConfigRelatedUpdateSourceValue(source));
+ SDVLOG(2) << "Scheduling a config";
+ ModelSafeRoutingInfo routes;
+ std::vector<ModelSafeWorker*> workers;
+ GetModelSafeParamsForTypes(types, session_context_->registrar(),
+ &routes, &workers);
+
+ PostTask(FROM_HERE, "ScheduleConfigImpl",
+ base::Bind(&SyncScheduler::ScheduleConfigImpl,
+ weak_ptr_factory_.GetWeakPtr(),
+ routes,
+ workers,
+ source));
+}
+
+void SyncScheduler::ScheduleConfigImpl(
+ const ModelSafeRoutingInfo& routing_info,
+ const std::vector<ModelSafeWorker*>& workers,
+ const sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+
+ SDVLOG(2) << "In ScheduleConfigImpl";
+ // TODO(tim): config-specific GetUpdatesCallerInfo value?
+ SyncSession* session = new SyncSession(session_context_.get(), this,
+ SyncSourceInfo(source,
+ syncable::ModelTypePayloadMapFromRoutingInfo(
+ routing_info, std::string())),
+ routing_info, workers);
+ SyncSessionJob job(SyncSessionJob::CONFIGURATION, TimeTicks::Now(),
+ make_linked_ptr(session),
+ false,
+ FROM_HERE);
+ ScheduleSyncSessionJob(job);
+}
+
+const char* SyncScheduler::GetModeString(SyncScheduler::Mode mode) {
+ switch (mode) {
+ ENUM_CASE(CONFIGURATION_MODE);
+ ENUM_CASE(NORMAL_MODE);
+ }
+ return "";
+}
+
+const char* SyncScheduler::GetDecisionString(
+ SyncScheduler::JobProcessDecision mode) {
+ switch (mode) {
+ ENUM_CASE(CONTINUE);
+ ENUM_CASE(SAVE);
+ ENUM_CASE(DROP);
+ }
+ return "";
+}
+
+void SyncScheduler::PostTask(
+ const tracked_objects::Location& from_here,
+ const char* name, const base::Closure& task) {
+ SDVLOG_LOC(from_here, 3) << "Posting " << name << " task";
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ if (!started_) {
+ SDVLOG(1) << "Not posting task as scheduler is stopped.";
+ return;
+ }
+ sync_loop_->PostTask(from_here, task);
+}
+
+void SyncScheduler::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const char* name, const base::Closure& task, base::TimeDelta delay) {
+ SDVLOG_LOC(from_here, 3) << "Posting " << name << " task with "
+ << delay.InMilliseconds() << " ms delay";
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ if (!started_) {
+ SDVLOG(1) << "Not posting task as scheduler is stopped.";
+ return;
+ }
+ sync_loop_->PostDelayedTask(from_here, task, delay);
+}
+
+void SyncScheduler::ScheduleSyncSessionJob(const SyncSessionJob& job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ TimeDelta delay = job.scheduled_start - TimeTicks::Now();
+ if (delay < TimeDelta::FromMilliseconds(0))
+ delay = TimeDelta::FromMilliseconds(0);
+ SDVLOG_LOC(job.from_here, 2)
+ << "In ScheduleSyncSessionJob with "
+ << SyncSessionJob::GetPurposeString(job.purpose)
+ << " job and " << delay.InMilliseconds() << " ms delay";
+
+ if (job.purpose == SyncSessionJob::NUDGE) {
+ SDVLOG_LOC(job.from_here, 2) << "Resetting pending_nudge";
+ DCHECK(!pending_nudge_.get() || pending_nudge_->session.get() ==
+ job.session);
+ pending_nudge_.reset(new SyncSessionJob(job));
+ }
+ PostDelayedTask(job.from_here, "DoSyncSessionJob",
+ base::Bind(&SyncScheduler::DoSyncSessionJob,
+ weak_ptr_factory_.GetWeakPtr(),
+ job),
+ delay);
+}
+
+void SyncScheduler::SetSyncerStepsForPurpose(
+ SyncSessionJob::SyncSessionJobPurpose purpose,
+ SyncerStep* start, SyncerStep* end) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ switch (purpose) {
+ case SyncSessionJob::CONFIGURATION:
+ *start = DOWNLOAD_UPDATES;
+ *end = APPLY_UPDATES;
+ return;
+ case SyncSessionJob::CLEAR_USER_DATA:
+ *start = CLEAR_PRIVATE_DATA;
+ *end = CLEAR_PRIVATE_DATA;
+ return;
+ case SyncSessionJob::NUDGE:
+ case SyncSessionJob::POLL:
+ *start = SYNCER_BEGIN;
+ *end = SYNCER_END;
+ return;
+ case SyncSessionJob::CLEANUP_DISABLED_TYPES:
+ *start = CLEANUP_DISABLED_TYPES;
+ *end = CLEANUP_DISABLED_TYPES;
+ return;
+ default:
+ NOTREACHED();
+ *start = SYNCER_END;
+ *end = SYNCER_END;
+ return;
+ }
+}
+
+void SyncScheduler::DoSyncSessionJob(const SyncSessionJob& job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ if (!ShouldRunJob(job)) {
+ SLOG(WARNING)
+ << "Not executing "
+ << SyncSessionJob::GetPurposeString(job.purpose) << " job from "
+ << GetUpdatesSourceString(job.session->source().updates_source);
+ return;
+ }
+
+ if (job.purpose == SyncSessionJob::NUDGE) {
+ if (pending_nudge_.get() == NULL ||
+ pending_nudge_->session != job.session) {
+ SDVLOG(2) << "Dropping a nudge in "
+ << "DoSyncSessionJob because another nudge was scheduled";
+ return; // Another nudge must have been scheduled in in the meantime.
+ }
+ pending_nudge_.reset();
+
+ // Create the session with the latest model safe table and use it to purge
+ // and update any disabled or modified entries in the job.
+ scoped_ptr<SyncSession> session(CreateSyncSession(job.session->source()));
+
+ job.session->RebaseRoutingInfoWithLatest(*session);
+ }
+ SDVLOG(2) << "DoSyncSessionJob with "
+ << SyncSessionJob::GetPurposeString(job.purpose) << " job";
+
+ SyncerStep begin(SYNCER_END);
+ SyncerStep end(SYNCER_END);
+ SetSyncerStepsForPurpose(job.purpose, &begin, &end);
+
+ bool has_more_to_sync = true;
+ while (ShouldRunJob(job) && has_more_to_sync) {
+ SDVLOG(2) << "Calling SyncShare.";
+ // Synchronously perform the sync session from this thread.
+ syncer_->SyncShare(job.session.get(), begin, end);
+ has_more_to_sync = job.session->HasMoreToSync();
+ if (has_more_to_sync)
+ job.session->PrepareForAnotherSyncCycle();
+ }
+ SDVLOG(2) << "Done SyncShare looping.";
+
+ FinishSyncSessionJob(job);
+}
+
+void SyncScheduler::UpdateCarryoverSessionState(
+ const SyncSessionJob& old_job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ if (old_job.purpose == SyncSessionJob::CONFIGURATION) {
+ // Whatever types were part of a configuration task will have had updates
+ // downloaded. For that reason, we make sure they get recorded in the
+ // event that they get disabled at a later time.
+ ModelSafeRoutingInfo r(session_context_->previous_session_routing_info());
+ if (!r.empty()) {
+ ModelSafeRoutingInfo temp_r;
+ ModelSafeRoutingInfo old_info(old_job.session->routing_info());
+ std::set_union(r.begin(), r.end(), old_info.begin(), old_info.end(),
+ std::insert_iterator<ModelSafeRoutingInfo>(temp_r, temp_r.begin()));
+ session_context_->set_previous_session_routing_info(temp_r);
+ }
+ } else {
+ session_context_->set_previous_session_routing_info(
+ old_job.session->routing_info());
+ }
+}
+
+void SyncScheduler::FinishSyncSessionJob(const SyncSessionJob& job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ // Update timing information for how often datatypes are triggering nudges.
+ base::TimeTicks now = TimeTicks::Now();
+ if (!last_sync_session_end_time_.is_null()) {
+ ModelTypePayloadMap::const_iterator iter;
+ for (iter = job.session->source().types.begin();
+ iter != job.session->source().types.end();
+ ++iter) {
+#define PER_DATA_TYPE_MACRO(type_str) \
+ SYNC_FREQ_HISTOGRAM("Sync.Freq" type_str, \
+ now - last_sync_session_end_time_);
+ SYNC_DATA_TYPE_HISTOGRAM(iter->first);
+#undef PER_DATA_TYPE_MACRO
+ }
+ }
+ last_sync_session_end_time_ = now;
+
+ // Now update the status of the connection from SCM. We need this
+ // to decide whether we need to save/run future jobs. The notifications
+ // from SCM are not reliable.
+ // TODO(rlarocque): crbug.com/110954
+ // We should get rid of the notifications and
+ // it is probably not needed to maintain this status variable
+ // in 2 places. We should query it directly from SCM when needed.
+ // But that would need little more refactoring(including a method to
+ // query if the auth token is invalid) from SCM side.
+ ServerConnectionManager* scm = session_context_->connection_manager();
+ UpdateServerConnectionManagerStatus(scm->server_status());
+
+ UpdateCarryoverSessionState(job);
+ if (IsSyncingCurrentlySilenced()) {
+ SDVLOG(2) << "We are currently throttled; not scheduling the next sync.";
+ // TODO(sync): Investigate whether we need to check job.purpose
+ // here; see DCHECKs in SaveJob(). (See http://crbug.com/90868.)
+ SaveJob(job);
+ return; // Nothing to do.
+ }
+
+ SDVLOG(2) << "Updating the next polling time after SyncMain";
+ ScheduleNextSync(job);
+}
+
+void SyncScheduler::ScheduleNextSync(const SyncSessionJob& old_job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK(!old_job.session->HasMoreToSync());
+
+ AdjustPolling(&old_job);
+
+ if (old_job.session->Succeeded()) {
+ // Success implies backoff relief. Note that if this was a
+ // "one-off" job (i.e. purpose ==
+ // SyncSessionJob::{CLEAR_USER_DATA,CLEANUP_DISABLED_TYPES}), if
+ // there was work to do before it ran this wont have changed, as
+ // jobs like this don't run a full sync cycle. So we don't need
+ // special code here.
+ wait_interval_.reset();
+ SDVLOG(2) << "Job succeeded so not scheduling more jobs";
+ return;
+ }
+
+ if (old_job.purpose == SyncSessionJob::POLL) {
+ return; // We don't retry POLL jobs.
+ }
+
+ // TODO(rlarocque): There's no reason why we should blindly backoff and retry
+ // if we don't succeed. Some types of errors are not likely to disappear on
+ // their own. With the return values now available in the old_job.session, we
+ // should be able to detect such errors and only retry when we detect
+ // transient errors.
+
+ if (IsBackingOff() && wait_interval_->timer.IsRunning() &&
+ mode_ == NORMAL_MODE) {
+ // When in normal mode, we allow up to one nudge per backoff interval. It
+ // appears that this was our nudge for this interval, and it failed.
+ //
+ // Note: This does not prevent us from running canary jobs. For example, an
+ // IP address change might still result in another nudge being executed
+ // during this backoff interval.
+ SDVLOG(2) << "A nudge during backoff failed";
+
+ DCHECK_EQ(SyncSessionJob::NUDGE, old_job.purpose);
+ DCHECK(!wait_interval_->had_nudge);
+
+ wait_interval_->had_nudge = true;
+ InitOrCoalescePendingJob(old_job);
+ RestartWaiting();
+ } else {
+ // Either this is the first failure or a consecutive failure after our
+ // backoff timer expired. We handle it the same way in either case.
+ SDVLOG(2) << "Non-'backoff nudge' SyncShare job failed";
+ HandleContinuationError(old_job);
+ }
+}
+
+void SyncScheduler::AdjustPolling(const SyncSessionJob* old_job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+
+ TimeDelta poll = (!session_context_->notifications_enabled()) ?
+ syncer_short_poll_interval_seconds_ :
+ syncer_long_poll_interval_seconds_;
+ bool rate_changed = !poll_timer_.IsRunning() ||
+ poll != poll_timer_.GetCurrentDelay();
+
+ if (old_job && old_job->purpose != SyncSessionJob::POLL && !rate_changed)
+ poll_timer_.Reset();
+
+ if (!rate_changed)
+ return;
+
+ // Adjust poll rate.
+ poll_timer_.Stop();
+ poll_timer_.Start(FROM_HERE, poll, this, &SyncScheduler::PollTimerCallback);
+}
+
+void SyncScheduler::RestartWaiting() {
+ CHECK(wait_interval_.get());
+ wait_interval_->timer.Stop();
+ wait_interval_->timer.Start(FROM_HERE, wait_interval_->length,
+ this, &SyncScheduler::DoCanaryJob);
+}
+
+void SyncScheduler::HandleContinuationError(
+ const SyncSessionJob& old_job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ if (DCHECK_IS_ON()) {
+ if (IsBackingOff()) {
+ DCHECK(wait_interval_->timer.IsRunning() || old_job.is_canary_job);
+ }
+ }
+
+ TimeDelta length = delay_provider_->GetDelay(
+ IsBackingOff() ? wait_interval_->length : TimeDelta::FromSeconds(1));
+
+ SDVLOG(2) << "In handle continuation error with "
+ << SyncSessionJob::GetPurposeString(old_job.purpose)
+ << " job. The time delta(ms) is "
+ << length.InMilliseconds();
+
+ // This will reset the had_nudge variable as well.
+ wait_interval_.reset(new WaitInterval(WaitInterval::EXPONENTIAL_BACKOFF,
+ length));
+ if (old_job.purpose == SyncSessionJob::CONFIGURATION) {
+ SyncSession* old = old_job.session.get();
+ SyncSession* s(new SyncSession(session_context_.get(), this,
+ old->source(), old->routing_info(), old->workers()));
+ SyncSessionJob job(old_job.purpose, TimeTicks::Now() + length,
+ make_linked_ptr(s), false, FROM_HERE);
+ wait_interval_->pending_configure_job.reset(new SyncSessionJob(job));
+ } else {
+ // We are not in configuration mode. So wait_interval's pending job
+ // should be null.
+ DCHECK(wait_interval_->pending_configure_job.get() == NULL);
+
+ // TODO(lipalani) - handle clear user data.
+ InitOrCoalescePendingJob(old_job);
+ }
+ RestartWaiting();
+}
+
+// static
+TimeDelta SyncScheduler::GetRecommendedDelay(const TimeDelta& last_delay) {
+ if (last_delay.InSeconds() >= kMaxBackoffSeconds)
+ return TimeDelta::FromSeconds(kMaxBackoffSeconds);
+
+ // This calculates approx. base_delay_seconds * 2 +/- base_delay_seconds / 2
+ int64 backoff_s =
+ std::max(static_cast<int64>(1),
+ last_delay.InSeconds() * kBackoffRandomizationFactor);
+
+ // Flip a coin to randomize backoff interval by +/- 50%.
+ int rand_sign = base::RandInt(0, 1) * 2 - 1;
+
+ // Truncation is adequate for rounding here.
+ backoff_s = backoff_s +
+ (rand_sign * (last_delay.InSeconds() / kBackoffRandomizationFactor));
+
+ // Cap the backoff interval.
+ backoff_s = std::max(static_cast<int64>(1),
+ std::min(backoff_s, kMaxBackoffSeconds));
+
+ return TimeDelta::FromSeconds(backoff_s);
+}
+
+void SyncScheduler::RequestStop(const base::Closure& callback) {
+ syncer_->RequestEarlyExit(); // Safe to call from any thread.
+ DCHECK(weak_handle_this_.IsInitialized());
+ SDVLOG(3) << "Posting StopImpl";
+ weak_handle_this_.Call(FROM_HERE,
+ &SyncScheduler::StopImpl,
+ callback);
+}
+
+void SyncScheduler::StopImpl(const base::Closure& callback) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SDVLOG(2) << "StopImpl called";
+
+ // Kill any in-flight method calls.
+ weak_ptr_factory_.InvalidateWeakPtrs();
+ wait_interval_.reset();
+ poll_timer_.Stop();
+ if (started_) {
+ started_ = false;
+ }
+ if (!callback.is_null())
+ callback.Run();
+}
+
+void SyncScheduler::DoCanaryJob() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SDVLOG(2) << "Do canary job";
+ DoPendingJobIfPossible(true);
+}
+
+void SyncScheduler::DoPendingJobIfPossible(bool is_canary_job) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SyncSessionJob* job_to_execute = NULL;
+ if (mode_ == CONFIGURATION_MODE && wait_interval_.get()
+ && wait_interval_->pending_configure_job.get()) {
+ SDVLOG(2) << "Found pending configure job";
+ job_to_execute = wait_interval_->pending_configure_job.get();
+ } else if (mode_ == NORMAL_MODE && pending_nudge_.get()) {
+ SDVLOG(2) << "Found pending nudge job";
+ // Pending jobs mostly have time from the past. Reset it so this job
+ // will get executed.
+ if (pending_nudge_->scheduled_start < TimeTicks::Now())
+ pending_nudge_->scheduled_start = TimeTicks::Now();
+
+ scoped_ptr<SyncSession> session(CreateSyncSession(
+ pending_nudge_->session->source()));
+
+ // Also the routing info might have been changed since we cached the
+ // pending nudge. Update it by coalescing to the latest.
+ pending_nudge_->session->Coalesce(*(session.get()));
+ // The pending nudge would be cleared in the DoSyncSessionJob function.
+ job_to_execute = pending_nudge_.get();
+ }
+
+ if (job_to_execute != NULL) {
+ SDVLOG(2) << "Executing pending job";
+ SyncSessionJob copy = *job_to_execute;
+ copy.is_canary_job = is_canary_job;
+ DoSyncSessionJob(copy);
+ }
+}
+
+SyncSession* SyncScheduler::CreateSyncSession(const SyncSourceInfo& source) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ ModelSafeRoutingInfo routes;
+ std::vector<ModelSafeWorker*> workers;
+ session_context_->registrar()->GetModelSafeRoutingInfo(&routes);
+ DVLOG(2) << "Creating sync session with routes "
+ << ModelSafeRoutingInfoToString(routes);
+ session_context_->registrar()->GetWorkers(&workers);
+ SyncSourceInfo info(source);
+
+ SyncSession* session(new SyncSession(session_context_.get(), this, info,
+ routes, workers));
+
+ return session;
+}
+
+void SyncScheduler::PollTimerCallback() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ ModelSafeRoutingInfo r;
+ ModelTypePayloadMap types_with_payloads =
+ syncable::ModelTypePayloadMapFromRoutingInfo(r, std::string());
+ SyncSourceInfo info(GetUpdatesCallerInfo::PERIODIC, types_with_payloads);
+ SyncSession* s = CreateSyncSession(info);
+
+ SyncSessionJob job(SyncSessionJob::POLL, TimeTicks::Now(),
+ make_linked_ptr(s),
+ false,
+ FROM_HERE);
+
+ ScheduleSyncSessionJob(job);
+}
+
+void SyncScheduler::Unthrottle() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ DCHECK_EQ(WaitInterval::THROTTLED, wait_interval_->mode);
+ SDVLOG(2) << "Unthrottled.";
+ DoCanaryJob();
+ wait_interval_.reset();
+}
+
+void SyncScheduler::Notify(SyncEngineEvent::EventCause cause) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ session_context_->NotifyListeners(SyncEngineEvent(cause));
+}
+
+bool SyncScheduler::IsBackingOff() const {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ return wait_interval_.get() && wait_interval_->mode ==
+ WaitInterval::EXPONENTIAL_BACKOFF;
+}
+
+void SyncScheduler::OnSilencedUntil(const base::TimeTicks& silenced_until) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ wait_interval_.reset(new WaitInterval(WaitInterval::THROTTLED,
+ silenced_until - TimeTicks::Now()));
+ wait_interval_->timer.Start(FROM_HERE, wait_interval_->length, this,
+ &SyncScheduler::Unthrottle);
+}
+
+bool SyncScheduler::IsSyncingCurrentlySilenced() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ return wait_interval_.get() && wait_interval_->mode ==
+ WaitInterval::THROTTLED;
+}
+
+void SyncScheduler::OnReceivedShortPollIntervalUpdate(
+ const base::TimeDelta& new_interval) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ syncer_short_poll_interval_seconds_ = new_interval;
+}
+
+void SyncScheduler::OnReceivedLongPollIntervalUpdate(
+ const base::TimeDelta& new_interval) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ syncer_long_poll_interval_seconds_ = new_interval;
+}
+
+void SyncScheduler::OnReceivedSessionsCommitDelay(
+ const base::TimeDelta& new_delay) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ sessions_commit_delay_ = new_delay;
+}
+
+void SyncScheduler::OnShouldStopSyncingPermanently() {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SDVLOG(2) << "OnShouldStopSyncingPermanently";
+ syncer_->RequestEarlyExit(); // Thread-safe.
+ Notify(SyncEngineEvent::STOP_SYNCING_PERMANENTLY);
+}
+
+void SyncScheduler::OnActionableError(
+ const sessions::SyncSessionSnapshot& snap) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ SDVLOG(2) << "OnActionableError";
+ SyncEngineEvent event(SyncEngineEvent::ACTIONABLE_ERROR);
+ sessions::SyncSessionSnapshot snapshot(snap);
+ event.snapshot = &snapshot;
+ session_context_->NotifyListeners(event);
+}
+
+void SyncScheduler::OnSyncProtocolError(
+ const sessions::SyncSessionSnapshot& snapshot) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ if (ShouldRequestEarlyExit(snapshot.errors.sync_protocol_error)) {
+ SDVLOG(2) << "Sync Scheduler requesting early exit.";
+ syncer_->RequestEarlyExit(); // Thread-safe.
+ }
+ if (IsActionableError(snapshot.errors.sync_protocol_error))
+ OnActionableError(snapshot);
+}
+
+void SyncScheduler::set_notifications_enabled(bool notifications_enabled) {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ session_context_->set_notifications_enabled(notifications_enabled);
+}
+
+base::TimeDelta SyncScheduler::sessions_commit_delay() const {
+ DCHECK_EQ(MessageLoop::current(), sync_loop_);
+ return sessions_commit_delay_;
+}
+
+#undef SDVLOG_LOC
+
+#undef SDVLOG
+
+#undef SLOG
+
+#undef ENUM_CASE
+
+} // browser_sync
diff --git a/sync/engine/sync_scheduler.h b/sync/engine/sync_scheduler.h
new file mode 100644
index 0000000..8540484
--- /dev/null
+++ b/sync/engine/sync_scheduler.h
@@ -0,0 +1,421 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A class to schedule syncer tasks intelligently.
+#ifndef SYNC_ENGINE_SYNC_SCHEDULER_H_
+#define SYNC_ENGINE_SYNC_SCHEDULER_H_
+#pragma once
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/observer_list.h"
+#include "base/time.h"
+#include "base/timer.h"
+#include "sync/engine/net/server_connection_manager.h"
+#include "sync/engine/nudge_source.h"
+#include "sync/engine/polling_constants.h"
+#include "sync/engine/syncer.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/sessions/sync_session_context.h"
+#include "sync/syncable/model_type_payload_map.h"
+#include "sync/util/weak_handle.h"
+
+class MessageLoop;
+
+namespace tracked_objects {
+class Location;
+} // namespace tracked_objects
+
+namespace browser_sync {
+
+struct ServerConnectionEvent;
+
+class SyncScheduler : public sessions::SyncSession::Delegate {
+ public:
+ enum Mode {
+ // In this mode, the thread only performs configuration tasks. This is
+ // designed to make the case where we want to download updates for a
+ // specific type only, and not continue syncing until we are moved into
+ // normal mode.
+ CONFIGURATION_MODE,
+ // Resumes polling and allows nudges, drops configuration tasks. Runs
+ // through entire sync cycle.
+ NORMAL_MODE,
+ };
+
+ // All methods of SyncScheduler must be called on the same thread
+ // (except for RequestEarlyExit()).
+
+ // |name| is a display string to identify the syncer thread. Takes
+ // |ownership of both |context| and |syncer|.
+ SyncScheduler(const std::string& name,
+ sessions::SyncSessionContext* context, Syncer* syncer);
+
+ // Calls Stop().
+ virtual ~SyncScheduler();
+
+ // Start the scheduler with the given mode. If the scheduler is
+ // already started, switch to the given mode, although some
+ // scheduled tasks from the old mode may still run. If non-NULL,
+ // |callback| will be invoked when the mode has been changed to
+ // |mode|. Takes ownership of |callback|.
+ void Start(Mode mode, const base::Closure& callback);
+
+ // Request that any running syncer task stop as soon as possible and
+ // cancel all scheduled tasks. This function can be called from any thread,
+ // and should in fact be called from a thread that isn't the sync loop to
+ // allow preempting ongoing sync cycles.
+ // Invokes |callback| from the sync loop once syncer is idle and all tasks
+ // are cancelled.
+ void RequestStop(const base::Closure& callback);
+
+ // The meat and potatoes.
+ void ScheduleNudge(const base::TimeDelta& delay, NudgeSource source,
+ syncable::ModelTypeSet types,
+ const tracked_objects::Location& nudge_location);
+ void ScheduleNudgeWithPayloads(
+ const base::TimeDelta& delay, NudgeSource source,
+ const syncable::ModelTypePayloadMap& types_with_payloads,
+ const tracked_objects::Location& nudge_location);
+
+ // Note: The source argument of this function must come from the subset of
+ // GetUpdatesCallerInfo values related to configurations.
+ void ScheduleConfig(
+ syncable::ModelTypeSet types,
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source);
+
+ void ScheduleClearUserData();
+ // If this is called before Start(), the cleanup is guaranteed to
+ // happen before the Start finishes.
+ //
+ // TODO(akalin): Figure out how to test this.
+ void ScheduleCleanupDisabledTypes();
+
+ // Change status of notifications in the SyncSessionContext.
+ void set_notifications_enabled(bool notifications_enabled);
+
+ base::TimeDelta sessions_commit_delay() const;
+
+ // DDOS avoidance function. Calculates how long we should wait before trying
+ // again after a failed sync attempt, where the last delay was |base_delay|.
+ // TODO(tim): Look at URLRequestThrottlerEntryInterface.
+ static base::TimeDelta GetRecommendedDelay(const base::TimeDelta& base_delay);
+
+ // Called when credentials are updated by the user.
+ void OnCredentialsUpdated();
+
+ // Called when the network layer detects a connection status change.
+ void OnConnectionStatusChange();
+
+ // SyncSession::Delegate implementation.
+ virtual void OnSilencedUntil(
+ const base::TimeTicks& silenced_until) OVERRIDE;
+ virtual bool IsSyncingCurrentlySilenced() OVERRIDE;
+ virtual void OnReceivedShortPollIntervalUpdate(
+ const base::TimeDelta& new_interval) OVERRIDE;
+ virtual void OnReceivedLongPollIntervalUpdate(
+ const base::TimeDelta& new_interval) OVERRIDE;
+ virtual void OnReceivedSessionsCommitDelay(
+ const base::TimeDelta& new_delay) OVERRIDE;
+ virtual void OnShouldStopSyncingPermanently() OVERRIDE;
+ virtual void OnSyncProtocolError(
+ const sessions::SyncSessionSnapshot& snapshot) OVERRIDE;
+
+ private:
+ enum JobProcessDecision {
+ // Indicates we should continue with the current job.
+ CONTINUE,
+ // Indicates that we should save it to be processed later.
+ SAVE,
+ // Indicates we should drop this job.
+ DROP,
+ };
+
+ struct SyncSessionJob {
+ // An enum used to describe jobs for scheduling purposes.
+ enum SyncSessionJobPurpose {
+ // Uninitialized state, should never be hit in practice.
+ UNKNOWN = -1,
+ // Our poll timer schedules POLL jobs periodically based on a server
+ // assigned poll interval.
+ POLL,
+ // A nudge task can come from a variety of components needing to force
+ // a sync. The source is inferable from |session.source()|.
+ NUDGE,
+ // The user invoked a function in the UI to clear their entire account
+ // and stop syncing (globally).
+ CLEAR_USER_DATA,
+ // Typically used for fetching updates for a subset of the enabled types
+ // during initial sync or reconfiguration. We don't run all steps of
+ // the sync cycle for these (e.g. CleanupDisabledTypes is skipped).
+ CONFIGURATION,
+ // The user disabled some types and we have to clean up the data
+ // for those.
+ CLEANUP_DISABLED_TYPES,
+ };
+ SyncSessionJob();
+ SyncSessionJob(SyncSessionJobPurpose purpose, base::TimeTicks start,
+ linked_ptr<sessions::SyncSession> session, bool is_canary_job,
+ const tracked_objects::Location& nudge_location);
+ ~SyncSessionJob();
+ static const char* GetPurposeString(SyncSessionJobPurpose purpose);
+
+ SyncSessionJobPurpose purpose;
+ base::TimeTicks scheduled_start;
+ linked_ptr<sessions::SyncSession> session;
+ bool is_canary_job;
+
+ // This is the location the job came from. Used for debugging.
+ // In case of multiple nudges getting coalesced this stores the
+ // first location that came in.
+ tracked_objects::Location from_here;
+ };
+ friend class SyncSchedulerTest;
+ friend class SyncSchedulerWhiteboxTest;
+
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
+ DropNudgeWhileExponentialBackOff);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, SaveNudge);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
+ SaveNudgeWhileTypeThrottled);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, ContinueNudge);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, DropPoll);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, ContinuePoll);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest, ContinueConfiguration);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
+ SaveConfigurationWhileThrottled);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
+ SaveNudgeWhileThrottled);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
+ ContinueClearUserDataUnderAllCircumstances);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
+ ContinueCanaryJobConfig);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerWhiteboxTest,
+ ContinueNudgeWhileExponentialBackOff);
+ FRIEND_TEST_ALL_PREFIXES(SyncSchedulerTest, TransientPollFailure);
+
+ // A component used to get time delays associated with exponential backoff.
+ // Encapsulated into a class to facilitate testing.
+ class DelayProvider {
+ public:
+ DelayProvider();
+ virtual base::TimeDelta GetDelay(const base::TimeDelta& last_delay);
+ virtual ~DelayProvider();
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DelayProvider);
+ };
+
+ struct WaitInterval {
+ enum Mode {
+ // Uninitialized state, should not be set in practice.
+ UNKNOWN = -1,
+ // A wait interval whose duration has been affected by exponential
+ // backoff.
+ // EXPONENTIAL_BACKOFF intervals are nudge-rate limited to 1 per interval.
+ EXPONENTIAL_BACKOFF,
+ // A server-initiated throttled interval. We do not allow any syncing
+ // during such an interval.
+ THROTTLED,
+ };
+ WaitInterval();
+ ~WaitInterval();
+ WaitInterval(Mode mode, base::TimeDelta length);
+
+ static const char* GetModeString(Mode mode);
+
+ Mode mode;
+
+ // This bool is set to true if we have observed a nudge during this
+ // interval and mode == EXPONENTIAL_BACKOFF.
+ bool had_nudge;
+ base::TimeDelta length;
+ base::OneShotTimer<SyncScheduler> timer;
+
+ // Configure jobs are saved only when backing off or throttling. So we
+ // expose the pointer here.
+ scoped_ptr<SyncSessionJob> pending_configure_job;
+ };
+
+ static const char* GetModeString(Mode mode);
+
+ static const char* GetDecisionString(JobProcessDecision decision);
+
+ // Helpers that log before posting to |sync_loop_|. These will only post
+ // the task in between calls to Start/Stop.
+ void PostTask(const tracked_objects::Location& from_here,
+ const char* name,
+ const base::Closure& task);
+ void PostDelayedTask(const tracked_objects::Location& from_here,
+ const char* name,
+ const base::Closure& task,
+ base::TimeDelta delay);
+
+ // Helper to assemble a job and post a delayed task to sync.
+ void ScheduleSyncSessionJob(const SyncSessionJob& job);
+
+ // Invoke the Syncer to perform a sync.
+ void DoSyncSessionJob(const SyncSessionJob& job);
+
+ // Called after the Syncer has performed the sync represented by |job|, to
+ // reset our state.
+ void FinishSyncSessionJob(const SyncSessionJob& job);
+
+ // Record important state that might be needed in future syncs, such as which
+ // data types may require cleanup.
+ void UpdateCarryoverSessionState(const SyncSessionJob& old_job);
+
+ // Helper to FinishSyncSessionJob to schedule the next sync operation.
+ void ScheduleNextSync(const SyncSessionJob& old_job);
+
+ // Helper to configure polling intervals. Used by Start and ScheduleNextSync.
+ void AdjustPolling(const SyncSessionJob* old_job);
+
+ // Helper to restart waiting with |wait_interval_|'s timer.
+ void RestartWaiting();
+
+ // Helper to ScheduleNextSync in case of consecutive sync errors.
+ void HandleContinuationError(const SyncSessionJob& old_job);
+
+ // Determines if it is legal to run |job| by checking current
+ // operational mode, backoff or throttling, freshness
+ // (so we don't make redundant syncs), and connection.
+ bool ShouldRunJob(const SyncSessionJob& job);
+
+ // Decide whether we should CONTINUE, SAVE or DROP the job.
+ JobProcessDecision DecideOnJob(const SyncSessionJob& job);
+
+ // Decide on whether to CONTINUE, SAVE or DROP the job when we are in
+ // backoff mode.
+ JobProcessDecision DecideWhileInWaitInterval(const SyncSessionJob& job);
+
+ // Saves the job for future execution. Note: It drops all the poll jobs.
+ void SaveJob(const SyncSessionJob& job);
+
+ // Coalesces the current job with the pending nudge.
+ void InitOrCoalescePendingJob(const SyncSessionJob& job);
+
+ // 'Impl' here refers to real implementation of public functions, running on
+ // |thread_|.
+ void StartImpl(Mode mode, const base::Closure& callback);
+ void StopImpl(const base::Closure& callback);
+ void ScheduleNudgeImpl(
+ const base::TimeDelta& delay,
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source,
+ const syncable::ModelTypePayloadMap& types_with_payloads,
+ bool is_canary_job, const tracked_objects::Location& nudge_location);
+ void ScheduleConfigImpl(const ModelSafeRoutingInfo& routing_info,
+ const std::vector<ModelSafeWorker*>& workers,
+ const sync_pb::GetUpdatesCallerInfo::GetUpdatesSource source);
+ void ScheduleClearUserDataImpl();
+
+ // Returns true if the client is currently in exponential backoff.
+ bool IsBackingOff() const;
+
+ // Helper to signal all listeners registered with |session_context_|.
+ void Notify(SyncEngineEvent::EventCause cause);
+
+ // Callback to change backoff state.
+ void DoCanaryJob();
+ void Unthrottle();
+
+ // Executes the pending job. Called whenever an event occurs that may
+ // change conditions permitting a job to run. Like when network connection is
+ // re-established, mode changes etc.
+ void DoPendingJobIfPossible(bool is_canary_job);
+
+ // Called when the root cause of the current connection error is fixed.
+ void OnServerConnectionErrorFixed();
+
+ // The pointer is owned by the caller.
+ browser_sync::sessions::SyncSession* CreateSyncSession(
+ const browser_sync::sessions::SyncSourceInfo& info);
+
+ // Creates a session for a poll and performs the sync.
+ void PollTimerCallback();
+
+ // Assign |start| and |end| to appropriate SyncerStep values for the
+ // specified |purpose|.
+ void SetSyncerStepsForPurpose(SyncSessionJob::SyncSessionJobPurpose purpose,
+ SyncerStep* start,
+ SyncerStep* end);
+
+ // Used to update |server_connection_ok_|, see below.
+ void UpdateServerConnectionManagerStatus(
+ HttpResponse::ServerConnectionCode code);
+
+ // Called once the first time thread_ is started to broadcast an initial
+ // session snapshot containing data like initial_sync_ended. Important when
+ // the client starts up and does not need to perform an initial sync.
+ void SendInitialSnapshot();
+
+ virtual void OnActionableError(const sessions::SyncSessionSnapshot& snapshot);
+
+ base::WeakPtrFactory<SyncScheduler> weak_ptr_factory_;
+
+ // A second factory specially for weak_handle_this_, to allow the handle
+ // to be const and alleviate threading concerns.
+ base::WeakPtrFactory<SyncScheduler> weak_ptr_factory_for_weak_handle_;
+
+ // For certain methods that need to worry about X-thread posting.
+ const WeakHandle<SyncScheduler> weak_handle_this_;
+
+ // Used for logging.
+ const std::string name_;
+
+ // The message loop this object is on. Almost all methods have to
+ // be called on this thread.
+ MessageLoop* const sync_loop_;
+
+ // Set in Start(), unset in Stop().
+ bool started_;
+
+ // Modifiable versions of kDefaultLongPollIntervalSeconds which can be
+ // updated by the server.
+ base::TimeDelta syncer_short_poll_interval_seconds_;
+ base::TimeDelta syncer_long_poll_interval_seconds_;
+
+ // Server-tweakable sessions commit delay.
+ base::TimeDelta sessions_commit_delay_;
+
+ // Periodic timer for polling. See AdjustPolling.
+ base::RepeatingTimer<SyncScheduler> poll_timer_;
+
+ // The mode of operation.
+ Mode mode_;
+
+ // TODO(tim): Bug 26339. This needs to track more than just time I think,
+ // since the nudges could be for different types. Current impl doesn't care.
+ base::TimeTicks last_sync_session_end_time_;
+
+ // Have we observed a valid server connection?
+ bool server_connection_ok_;
+
+ // The latest connection code we got while trying to connect.
+ HttpResponse::ServerConnectionCode connection_code_;
+
+ // Tracks in-flight nudges so we can coalesce.
+ scoped_ptr<SyncSessionJob> pending_nudge_;
+
+ // Current wait state. Null if we're not in backoff and not throttled.
+ scoped_ptr<WaitInterval> wait_interval_;
+
+ scoped_ptr<DelayProvider> delay_provider_;
+
+ // Invoked to run through the sync cycle.
+ scoped_ptr<Syncer> syncer_;
+
+ scoped_ptr<sessions::SyncSessionContext> session_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncScheduler);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_SYNC_SCHEDULER_H_
diff --git a/sync/engine/sync_scheduler_unittest.cc b/sync/engine/sync_scheduler_unittest.cc
new file mode 100644
index 0000000..d90ce23
--- /dev/null
+++ b/sync/engine/sync_scheduler_unittest.cc
@@ -0,0 +1,1165 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop.h"
+#include "base/test/test_timeouts.h"
+#include "sync/engine/sync_scheduler.h"
+#include "sync/engine/syncer.h"
+#include "sync/sessions/test_util.h"
+#include "sync/test/engine/fake_model_safe_worker_registrar.h"
+#include "sync/test/engine/mock_connection_manager.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/fake_extensions_activity_monitor.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeDelta;
+using base::TimeTicks;
+using testing::_;
+using testing::AtLeast;
+using testing::DoAll;
+using testing::Eq;
+using testing::Invoke;
+using testing::Mock;
+using testing::Return;
+using testing::WithArg;
+
+namespace browser_sync {
+using sessions::SyncSession;
+using sessions::SyncSessionContext;
+using sessions::SyncSessionSnapshot;
+using syncable::ModelTypeSet;
+using sync_pb::GetUpdatesCallerInfo;
+
+class MockSyncer : public Syncer {
+ public:
+ MOCK_METHOD3(SyncShare, void(sessions::SyncSession*, SyncerStep,
+ SyncerStep));
+};
+
+// Used when tests want to record syncing activity to examine later.
+struct SyncShareRecords {
+ std::vector<TimeTicks> times;
+ std::vector<linked_ptr<SyncSessionSnapshot> > snapshots;
+};
+
+void QuitLoopNow() {
+ // We use QuitNow() instead of Quit() as the latter may get stalled
+ // indefinitely in the presence of repeated timers with low delays
+ // and a slow test (e.g., ThrottlingDoesThrottle [which has a poll
+ // delay of 5ms] run under TSAN on the trybots).
+ MessageLoop::current()->QuitNow();
+}
+
+void RunLoop() {
+ MessageLoop::current()->Run();
+}
+
+void PumpLoop() {
+ // Do it this way instead of RunAllPending to pump loop exactly once
+ // (necessary in the presence of timers; see comment in
+ // QuitLoopNow).
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&QuitLoopNow));
+ RunLoop();
+}
+
+// Convenient to use in tests wishing to analyze SyncShare calls over time.
+static const size_t kMinNumSamples = 5;
+class SyncSchedulerTest : public testing::Test {
+ public:
+ SyncSchedulerTest()
+ : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ context_(NULL),
+ syncer_(NULL),
+ delay_(NULL) {}
+
+ class MockDelayProvider : public SyncScheduler::DelayProvider {
+ public:
+ MOCK_METHOD1(GetDelay, TimeDelta(const TimeDelta&));
+ };
+
+ virtual void SetUp() {
+ dir_maker_.SetUp();
+ syncer_ = new MockSyncer();
+ delay_ = NULL;
+ ModelSafeRoutingInfo routing_info;
+ routing_info[syncable::BOOKMARKS] = GROUP_UI;
+ routing_info[syncable::AUTOFILL] = GROUP_DB;
+ routing_info[syncable::THEMES] = GROUP_UI;
+ routing_info[syncable::NIGORI] = GROUP_PASSIVE;
+ registrar_.reset(new FakeModelSafeWorkerRegistrar(routing_info));
+ connection_.reset(new MockConnectionManager(directory()));
+ connection_->SetServerReachable();
+ context_ = new SyncSessionContext(
+ connection_.get(), directory(), registrar_.get(),
+ &extensions_activity_monitor_,
+ std::vector<SyncEngineEventListener*>(), NULL);
+ context_->set_notifications_enabled(true);
+ context_->set_account_name("Test");
+ scheduler_.reset(
+ new SyncScheduler("TestSyncScheduler", context_, syncer_));
+ }
+
+ SyncScheduler* scheduler() { return scheduler_.get(); }
+ MockSyncer* syncer() { return syncer_; }
+ MockDelayProvider* delay() { return delay_; }
+ MockConnectionManager* connection() { return connection_.get(); }
+ TimeDelta zero() { return TimeDelta::FromSeconds(0); }
+ TimeDelta timeout() {
+ return TimeDelta::FromMilliseconds(TestTimeouts::action_timeout_ms());
+ }
+
+ virtual void TearDown() {
+ PumpLoop();
+ scheduler_.reset();
+ PumpLoop();
+ dir_maker_.TearDown();
+ }
+
+ void AnalyzePollRun(const SyncShareRecords& records, size_t min_num_samples,
+ const TimeTicks& optimal_start, const TimeDelta& poll_interval) {
+ const std::vector<TimeTicks>& data(records.times);
+ EXPECT_GE(data.size(), min_num_samples);
+ for (size_t i = 0; i < data.size(); i++) {
+ SCOPED_TRACE(testing::Message() << "SyncShare # (" << i << ")");
+ TimeTicks optimal_next_sync = optimal_start + poll_interval * i;
+ EXPECT_GE(data[i], optimal_next_sync);
+ EXPECT_EQ(GetUpdatesCallerInfo::PERIODIC,
+ records.snapshots[i]->source.updates_source);
+ }
+ }
+
+ void DoQuitLoopNow() {
+ QuitLoopNow();
+ }
+
+ void StartSyncScheduler(SyncScheduler::Mode mode) {
+ scheduler()->Start(
+ mode,
+ base::Bind(&SyncSchedulerTest::DoQuitLoopNow,
+ weak_ptr_factory_.GetWeakPtr()));
+ }
+
+ // This stops the scheduler synchronously.
+ void StopSyncScheduler() {
+ scheduler()->RequestStop(base::Bind(&SyncSchedulerTest::DoQuitLoopNow,
+ weak_ptr_factory_.GetWeakPtr()));
+ RunLoop();
+ }
+
+ bool RunAndGetBackoff() {
+ ModelTypeSet nudge_types;
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, nudge_types, FROM_HERE);
+ RunLoop();
+
+ return scheduler()->IsBackingOff();
+ }
+
+ void UseMockDelayProvider() {
+ delay_ = new MockDelayProvider();
+ scheduler_->delay_provider_.reset(delay_);
+ }
+
+ // Compare a ModelTypeSet to a ModelTypePayloadMap, ignoring
+ // payload values.
+ bool CompareModelTypeSetToModelTypePayloadMap(
+ ModelTypeSet lhs,
+ const syncable::ModelTypePayloadMap& rhs) {
+ size_t count = 0;
+ for (syncable::ModelTypePayloadMap::const_iterator i = rhs.begin();
+ i != rhs.end(); ++i, ++count) {
+ if (!lhs.Has(i->first))
+ return false;
+ }
+ if (lhs.Size() != count)
+ return false;
+ return true;
+ }
+
+ SyncSessionContext* context() { return context_; }
+
+ private:
+ syncable::Directory* directory() {
+ return dir_maker_.directory();
+ }
+
+ base::WeakPtrFactory<SyncSchedulerTest> weak_ptr_factory_;
+ MessageLoop message_loop_;
+ TestDirectorySetterUpper dir_maker_;
+ scoped_ptr<SyncScheduler> scheduler_;
+ scoped_ptr<MockConnectionManager> connection_;
+ SyncSessionContext* context_;
+ MockSyncer* syncer_;
+ MockDelayProvider* delay_;
+ scoped_ptr<FakeModelSafeWorkerRegistrar> registrar_;
+ FakeExtensionsActivityMonitor extensions_activity_monitor_;
+};
+
+class BackoffTriggersSyncSchedulerTest : public SyncSchedulerTest {
+ void SetUp() {
+ SyncSchedulerTest::SetUp();
+ UseMockDelayProvider();
+ EXPECT_CALL(*delay(), GetDelay(_))
+ .WillRepeatedly(Return(TimeDelta::FromMilliseconds(1)));
+ }
+
+ void TearDown() {
+ StopSyncScheduler();
+ SyncSchedulerTest::TearDown();
+ }
+};
+
+void RecordSyncShareImpl(SyncSession* s, SyncShareRecords* record) {
+ record->times.push_back(TimeTicks::Now());
+ record->snapshots.push_back(make_linked_ptr(new SyncSessionSnapshot(
+ s->TakeSnapshot())));
+}
+
+ACTION_P(RecordSyncShare, record) {
+ RecordSyncShareImpl(arg0, record);
+ QuitLoopNow();
+}
+
+ACTION_P2(RecordSyncShareMultiple, record, quit_after) {
+ RecordSyncShareImpl(arg0, record);
+ EXPECT_LE(record->times.size(), quit_after);
+ if (record->times.size() >= quit_after) {
+ QuitLoopNow();
+ }
+}
+
+ACTION(AddFailureAndQuitLoopNow) {
+ ADD_FAILURE();
+ QuitLoopNow();
+}
+
+ACTION(QuitLoopNowAction) {
+ QuitLoopNow();
+}
+
+// Test nudge scheduling.
+TEST_F(SyncSchedulerTest, Nudge) {
+ SyncShareRecords records;
+ ModelTypeSet model_types(syncable::BOOKMARKS);
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records))))
+ .RetiresOnSaturation();
+
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, model_types, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(1U, records.snapshots.size());
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(model_types,
+ records.snapshots[0]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ records.snapshots[0]->source.updates_source);
+
+ Mock::VerifyAndClearExpectations(syncer());
+
+ // Make sure a second, later, nudge is unaffected by first (no coalescing).
+ SyncShareRecords records2;
+ model_types.Remove(syncable::BOOKMARKS);
+ model_types.Put(syncable::AUTOFILL);
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records2))));
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, model_types, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(1U, records2.snapshots.size());
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(model_types,
+ records2.snapshots[0]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ records2.snapshots[0]->source.updates_source);
+}
+
+// Make sure a regular config command is scheduled fine in the absence of any
+// errors.
+TEST_F(SyncSchedulerTest, Config) {
+ SyncShareRecords records;
+ const ModelTypeSet model_types(syncable::BOOKMARKS);
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records))));
+
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleConfig(
+ model_types, GetUpdatesCallerInfo::RECONFIGURATION);
+ RunLoop();
+
+ ASSERT_EQ(1U, records.snapshots.size());
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(model_types,
+ records.snapshots[0]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::RECONFIGURATION,
+ records.snapshots[0]->source.updates_source);
+}
+
+// Simulate a failure and make sure the config request is retried.
+TEST_F(SyncSchedulerTest, ConfigWithBackingOff) {
+ UseMockDelayProvider();
+ EXPECT_CALL(*delay(), GetDelay(_))
+ .WillRepeatedly(Return(TimeDelta::FromMilliseconds(1)));
+ SyncShareRecords records;
+ const ModelTypeSet model_types(syncable::BOOKMARKS);
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ WithArg<0>(RecordSyncShare(&records))))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records))));
+
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+ RunLoop();
+
+ ASSERT_EQ(0U, records.snapshots.size());
+ scheduler()->ScheduleConfig(
+ model_types, GetUpdatesCallerInfo::RECONFIGURATION);
+ RunLoop();
+
+ ASSERT_EQ(1U, records.snapshots.size());
+ RunLoop();
+
+ ASSERT_EQ(2U, records.snapshots.size());
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(model_types,
+ records.snapshots[1]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::RECONFIGURATION,
+ records.snapshots[1]->source.updates_source);
+}
+
+// Issue 2 config commands. Second one right after the first has failed
+// and make sure LATEST is executed.
+TEST_F(SyncSchedulerTest, MultipleConfigWithBackingOff) {
+ const ModelTypeSet
+ model_types1(syncable::BOOKMARKS),
+ model_types2(syncable::AUTOFILL);
+ UseMockDelayProvider();
+ EXPECT_CALL(*delay(), GetDelay(_))
+ .WillRepeatedly(Return(TimeDelta::FromMilliseconds(30)));
+ SyncShareRecords records;
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ WithArg<0>(RecordSyncShare(&records))))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ WithArg<0>(RecordSyncShare(&records))))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records))));
+
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+ RunLoop();
+
+ ASSERT_EQ(0U, records.snapshots.size());
+ scheduler()->ScheduleConfig(
+ model_types1, GetUpdatesCallerInfo::RECONFIGURATION);
+ RunLoop();
+
+ ASSERT_EQ(1U, records.snapshots.size());
+ scheduler()->ScheduleConfig(
+ model_types2, GetUpdatesCallerInfo::RECONFIGURATION);
+ RunLoop();
+
+ ASSERT_EQ(2U, records.snapshots.size());
+ RunLoop();
+
+ ASSERT_EQ(3U, records.snapshots.size());
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(model_types2,
+ records.snapshots[2]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::RECONFIGURATION,
+ records.snapshots[2]->source.updates_source);
+}
+
+// Issue a nudge when the config has failed. Make sure both the config and
+// nudge are executed.
+TEST_F(SyncSchedulerTest, NudgeWithConfigWithBackingOff) {
+ const ModelTypeSet model_types(syncable::BOOKMARKS);
+ UseMockDelayProvider();
+ EXPECT_CALL(*delay(), GetDelay(_))
+ .WillRepeatedly(Return(TimeDelta::FromMilliseconds(50)));
+ SyncShareRecords records;
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ WithArg<0>(RecordSyncShare(&records))))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ WithArg<0>(RecordSyncShare(&records))))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records))))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records))));
+
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+ RunLoop();
+
+ ASSERT_EQ(0U, records.snapshots.size());
+ scheduler()->ScheduleConfig(
+ model_types, GetUpdatesCallerInfo::RECONFIGURATION);
+ RunLoop();
+
+ ASSERT_EQ(1U, records.snapshots.size());
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, model_types, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(2U, records.snapshots.size());
+ RunLoop();
+
+ // Now change the mode so nudge can execute.
+ ASSERT_EQ(3U, records.snapshots.size());
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ ASSERT_EQ(4U, records.snapshots.size());
+
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(model_types,
+ records.snapshots[2]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::RECONFIGURATION,
+ records.snapshots[2]->source.updates_source);
+
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(model_types,
+ records.snapshots[3]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ records.snapshots[3]->source.updates_source);
+
+}
+
+// Test that nudges are coalesced.
+TEST_F(SyncSchedulerTest, NudgeCoalescing) {
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ SyncShareRecords r;
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&r))));
+ const ModelTypeSet
+ types1(syncable::BOOKMARKS),
+ types2(syncable::AUTOFILL),
+ types3(syncable::THEMES);
+ TimeDelta delay = zero();
+ TimeTicks optimal_time = TimeTicks::Now() + delay;
+ scheduler()->ScheduleNudge(
+ delay, NUDGE_SOURCE_UNKNOWN, types1, FROM_HERE);
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, types2, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(1U, r.snapshots.size());
+ EXPECT_GE(r.times[0], optimal_time);
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(
+ Union(types1, types2), r.snapshots[0]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ r.snapshots[0]->source.updates_source);
+
+ Mock::VerifyAndClearExpectations(syncer());
+
+ SyncShareRecords r2;
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&r2))));
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_NOTIFICATION, types3, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(1U, r2.snapshots.size());
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(types3,
+ r2.snapshots[0]->source.types));
+ EXPECT_EQ(GetUpdatesCallerInfo::NOTIFICATION,
+ r2.snapshots[0]->source.updates_source);
+}
+
+// Test that nudges are coalesced.
+TEST_F(SyncSchedulerTest, NudgeCoalescingWithDifferentTimings) {
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ SyncShareRecords r;
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&r))));
+ syncable::ModelTypeSet types1(syncable::BOOKMARKS),
+ types2(syncable::AUTOFILL), types3;
+
+ // Create a huge time delay.
+ TimeDelta delay = TimeDelta::FromDays(1);
+
+ scheduler()->ScheduleNudge(
+ delay, NUDGE_SOURCE_UNKNOWN, types1, FROM_HERE);
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_UNKNOWN, types2, FROM_HERE);
+
+ TimeTicks min_time = TimeTicks::Now();
+ TimeTicks max_time = TimeTicks::Now() + delay;
+
+ RunLoop();
+
+ // Make sure the sync has happened.
+ ASSERT_EQ(1U, r.snapshots.size());
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(
+ Union(types1, types2), r.snapshots[0]->source.types));
+
+ // Make sure the sync happened at the right time.
+ EXPECT_GE(r.times[0], min_time);
+ EXPECT_LE(r.times[0], max_time);
+}
+
+// Test nudge scheduling.
+TEST_F(SyncSchedulerTest, NudgeWithPayloads) {
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ SyncShareRecords records;
+ syncable::ModelTypePayloadMap model_types_with_payloads;
+ model_types_with_payloads[syncable::BOOKMARKS] = "test";
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records))))
+ .RetiresOnSaturation();
+ scheduler()->ScheduleNudgeWithPayloads(
+ zero(), NUDGE_SOURCE_LOCAL, model_types_with_payloads, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(1U, records.snapshots.size());
+ EXPECT_EQ(model_types_with_payloads, records.snapshots[0]->source.types);
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ records.snapshots[0]->source.updates_source);
+
+ Mock::VerifyAndClearExpectations(syncer());
+
+ // Make sure a second, later, nudge is unaffected by first (no coalescing).
+ SyncShareRecords records2;
+ model_types_with_payloads.erase(syncable::BOOKMARKS);
+ model_types_with_payloads[syncable::AUTOFILL] = "test2";
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records2))));
+ scheduler()->ScheduleNudgeWithPayloads(
+ zero(), NUDGE_SOURCE_LOCAL, model_types_with_payloads, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(1U, records2.snapshots.size());
+ EXPECT_EQ(model_types_with_payloads, records2.snapshots[0]->source.types);
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ records2.snapshots[0]->source.updates_source);
+}
+
+// Test that nudges are coalesced.
+TEST_F(SyncSchedulerTest, NudgeWithPayloadsCoalescing) {
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ SyncShareRecords r;
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&r))));
+ syncable::ModelTypePayloadMap types1, types2, types3;
+ types1[syncable::BOOKMARKS] = "test1";
+ types2[syncable::AUTOFILL] = "test2";
+ types3[syncable::THEMES] = "test3";
+ TimeDelta delay = zero();
+ TimeTicks optimal_time = TimeTicks::Now() + delay;
+ scheduler()->ScheduleNudgeWithPayloads(
+ delay, NUDGE_SOURCE_UNKNOWN, types1, FROM_HERE);
+ scheduler()->ScheduleNudgeWithPayloads(
+ zero(), NUDGE_SOURCE_LOCAL, types2, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(1U, r.snapshots.size());
+ EXPECT_GE(r.times[0], optimal_time);
+ syncable::ModelTypePayloadMap coalesced_types;
+ syncable::CoalescePayloads(&coalesced_types, types1);
+ syncable::CoalescePayloads(&coalesced_types, types2);
+ EXPECT_EQ(coalesced_types, r.snapshots[0]->source.types);
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ r.snapshots[0]->source.updates_source);
+
+ Mock::VerifyAndClearExpectations(syncer());
+
+ SyncShareRecords r2;
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&r2))));
+ scheduler()->ScheduleNudgeWithPayloads(
+ zero(), NUDGE_SOURCE_NOTIFICATION, types3, FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(1U, r2.snapshots.size());
+ EXPECT_EQ(types3, r2.snapshots[0]->source.types);
+ EXPECT_EQ(GetUpdatesCallerInfo::NOTIFICATION,
+ r2.snapshots[0]->source.updates_source);
+}
+
+// Test that polling works as expected.
+TEST_F(SyncSchedulerTest, Polling) {
+ SyncShareRecords records;
+ TimeDelta poll_interval(TimeDelta::FromMilliseconds(30));
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(AtLeast(kMinNumSamples))
+ .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShareMultiple(&records, kMinNumSamples))));
+
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll_interval);
+
+ TimeTicks optimal_start = TimeTicks::Now() + poll_interval;
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // Run again to wait for polling.
+ RunLoop();
+
+ StopSyncScheduler();
+ AnalyzePollRun(records, kMinNumSamples, optimal_start, poll_interval);
+}
+
+// Test that the short poll interval is used.
+TEST_F(SyncSchedulerTest, PollNotificationsDisabled) {
+ SyncShareRecords records;
+ TimeDelta poll_interval(TimeDelta::FromMilliseconds(30));
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(AtLeast(kMinNumSamples))
+ .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShareMultiple(&records, kMinNumSamples))));
+
+ scheduler()->OnReceivedShortPollIntervalUpdate(poll_interval);
+ scheduler()->set_notifications_enabled(false);
+
+ TimeTicks optimal_start = TimeTicks::Now() + poll_interval;
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // Run again to wait for polling.
+ RunLoop();
+
+ StopSyncScheduler();
+ AnalyzePollRun(records, kMinNumSamples, optimal_start, poll_interval);
+}
+
+// Test that polling intervals are updated when needed.
+TEST_F(SyncSchedulerTest, PollIntervalUpdate) {
+ SyncShareRecords records;
+ TimeDelta poll1(TimeDelta::FromMilliseconds(120));
+ TimeDelta poll2(TimeDelta::FromMilliseconds(30));
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll1);
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(AtLeast(kMinNumSamples))
+ .WillOnce(WithArg<0>(
+ sessions::test_util::SimulatePollIntervalUpdate(poll2)))
+ .WillRepeatedly(
+ DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(
+ RecordSyncShareMultiple(&records, kMinNumSamples))));
+
+ TimeTicks optimal_start = TimeTicks::Now() + poll1 + poll2;
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // Run again to wait for polling.
+ RunLoop();
+
+ StopSyncScheduler();
+ AnalyzePollRun(records, kMinNumSamples, optimal_start, poll2);
+}
+
+// Test that the sessions commit delay is updated when needed.
+TEST_F(SyncSchedulerTest, SessionsCommitDelay) {
+ SyncShareRecords records;
+ TimeDelta delay1(TimeDelta::FromMilliseconds(120));
+ TimeDelta delay2(TimeDelta::FromMilliseconds(30));
+ scheduler()->OnReceivedSessionsCommitDelay(delay1);
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(
+ DoAll(
+ WithArg<0>(
+ sessions::test_util::SimulateSessionsCommitDelayUpdate(
+ delay2)),
+ Invoke(sessions::test_util::SimulateSuccess),
+ QuitLoopNowAction()));
+
+ EXPECT_EQ(delay1, scheduler()->sessions_commit_delay());
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ EXPECT_EQ(delay1, scheduler()->sessions_commit_delay());
+ const ModelTypeSet model_types(syncable::BOOKMARKS);
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, model_types, FROM_HERE);
+ RunLoop();
+
+ EXPECT_EQ(delay2, scheduler()->sessions_commit_delay());
+ StopSyncScheduler();
+}
+
+// Test that a sync session is run through to completion.
+TEST_F(SyncSchedulerTest, HasMoreToSync) {
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(Invoke(sessions::test_util::SimulateHasMoreToSync))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ QuitLoopNowAction()));
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, ModelTypeSet(), FROM_HERE);
+ RunLoop();
+ // If more nudges are scheduled, they'll be waited on by TearDown, and would
+ // cause our expectation to break.
+}
+
+// Test that no syncing occurs when throttled.
+TEST_F(SyncSchedulerTest, ThrottlingDoesThrottle) {
+ const ModelTypeSet types(syncable::BOOKMARKS);
+ TimeDelta poll(TimeDelta::FromMilliseconds(5));
+ TimeDelta throttle(TimeDelta::FromMinutes(10));
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll);
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(WithArg<0>(sessions::test_util::SimulateThrottled(throttle)))
+ .WillRepeatedly(AddFailureAndQuitLoopNow());
+
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, types, FROM_HERE);
+ PumpLoop();
+
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleConfig(
+ types, GetUpdatesCallerInfo::RECONFIGURATION);
+ PumpLoop();
+}
+
+TEST_F(SyncSchedulerTest, ThrottlingExpires) {
+ SyncShareRecords records;
+ TimeDelta poll(TimeDelta::FromMilliseconds(15));
+ TimeDelta throttle1(TimeDelta::FromMilliseconds(150));
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll);
+
+ ::testing::InSequence seq;
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(WithArg<0>(sessions::test_util::SimulateThrottled(throttle1)))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShareMultiple(&records, kMinNumSamples))));
+
+ TimeTicks optimal_start = TimeTicks::Now() + poll + throttle1;
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // Run again to wait for polling.
+ RunLoop();
+
+ StopSyncScheduler();
+ AnalyzePollRun(records, kMinNumSamples, optimal_start, poll);
+}
+
+// Test nudges / polls don't run in config mode and config tasks do.
+TEST_F(SyncSchedulerTest, ConfigurationMode) {
+ TimeDelta poll(TimeDelta::FromMilliseconds(15));
+ SyncShareRecords records;
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll);
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce((Invoke(sessions::test_util::SimulateSuccess),
+ WithArg<0>(RecordSyncShare(&records))));
+
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+ RunLoop();
+
+ const ModelTypeSet nudge_types(syncable::AUTOFILL);
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, nudge_types, FROM_HERE);
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, nudge_types, FROM_HERE);
+
+ const ModelTypeSet config_types(syncable::BOOKMARKS);
+
+ scheduler()->ScheduleConfig(
+ config_types, GetUpdatesCallerInfo::RECONFIGURATION);
+ RunLoop();
+
+ ASSERT_EQ(1U, records.snapshots.size());
+ EXPECT_TRUE(CompareModelTypeSetToModelTypePayloadMap(config_types,
+ records.snapshots[0]->source.types));
+}
+
+// Have the sycner fail during commit. Expect that the scheduler enters
+// backoff.
+TEST_F(BackoffTriggersSyncSchedulerTest, FailCommitOnce) {
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ QuitLoopNowAction()));
+ EXPECT_TRUE(RunAndGetBackoff());
+}
+
+// Have the syncer fail during download updates and succeed on the first
+// retry. Expect that this clears the backoff state.
+TEST_F(BackoffTriggersSyncSchedulerTest, FailDownloadOnceThenSucceed) {
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(Invoke(sessions::test_util::SimulateDownloadUpdatesFailed))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ QuitLoopNowAction()));
+ EXPECT_FALSE(RunAndGetBackoff());
+}
+
+// Have the syncer fail during commit and succeed on the first retry. Expect
+// that this clears the backoff state.
+TEST_F(BackoffTriggersSyncSchedulerTest, FailCommitOnceThenSucceed) {
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(Invoke(sessions::test_util::SimulateCommitFailed))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ QuitLoopNowAction()));
+ EXPECT_FALSE(RunAndGetBackoff());
+}
+
+// Have the syncer fail to download updates and fail again on the retry.
+// Expect this will leave the scheduler in backoff.
+TEST_F(BackoffTriggersSyncSchedulerTest, FailDownloadTwice) {
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(Invoke(sessions::test_util::SimulateDownloadUpdatesFailed))
+ .WillRepeatedly(DoAll(
+ Invoke(sessions::test_util::SimulateDownloadUpdatesFailed),
+ QuitLoopNowAction()));
+ EXPECT_TRUE(RunAndGetBackoff());
+}
+
+// Test that no polls or extraneous nudges occur when in backoff.
+TEST_F(SyncSchedulerTest, BackoffDropsJobs) {
+ SyncShareRecords r;
+ TimeDelta poll(TimeDelta::FromMilliseconds(5));
+ const ModelTypeSet types(syncable::BOOKMARKS);
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll);
+ UseMockDelayProvider();
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(1)
+ .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ RecordSyncShareMultiple(&r, 1U)));
+ EXPECT_CALL(*delay(), GetDelay(_)).
+ WillRepeatedly(Return(TimeDelta::FromDays(1)));
+
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // This nudge should fail and put us into backoff. Thanks to our mock
+ // GetDelay() setup above, this will be a long backoff.
+ scheduler()->ScheduleNudge(zero(), NUDGE_SOURCE_LOCAL, types, FROM_HERE);
+ RunLoop();
+
+ Mock::VerifyAndClearExpectations(syncer());
+ ASSERT_EQ(1U, r.snapshots.size());
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL, r.snapshots[0]->source.updates_source);
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(1)
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ RecordSyncShare(&r)));
+
+ // We schedule a nudge with enough delay (10X poll interval) that at least
+ // one or two polls would have taken place. The nudge should succeed.
+ scheduler()->ScheduleNudge(poll * 10, NUDGE_SOURCE_LOCAL, types, FROM_HERE);
+ RunLoop();
+
+ Mock::VerifyAndClearExpectations(syncer());
+ Mock::VerifyAndClearExpectations(delay());
+ ASSERT_EQ(2U, r.snapshots.size());
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL, r.snapshots[1]->source.updates_source);
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(0);
+ EXPECT_CALL(*delay(), GetDelay(_)).Times(0);
+
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleConfig(
+ types, GetUpdatesCallerInfo::RECONFIGURATION);
+ PumpLoop();
+
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, types, FROM_HERE);
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, types, FROM_HERE);
+ PumpLoop();
+}
+
+// Test that backoff is shaping traffic properly with consecutive errors.
+TEST_F(SyncSchedulerTest, BackoffElevation) {
+ SyncShareRecords r;
+ UseMockDelayProvider();
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(kMinNumSamples)
+ .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ RecordSyncShareMultiple(&r, kMinNumSamples)));
+
+ const TimeDelta first = TimeDelta::FromSeconds(1);
+ const TimeDelta second = TimeDelta::FromMilliseconds(2);
+ const TimeDelta third = TimeDelta::FromMilliseconds(3);
+ const TimeDelta fourth = TimeDelta::FromMilliseconds(4);
+ const TimeDelta fifth = TimeDelta::FromMilliseconds(5);
+ const TimeDelta sixth = TimeDelta::FromDays(1);
+
+ EXPECT_CALL(*delay(), GetDelay(Eq(first))).WillOnce(Return(second))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delay(), GetDelay(Eq(second))).WillOnce(Return(third))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delay(), GetDelay(Eq(third))).WillOnce(Return(fourth))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delay(), GetDelay(Eq(fourth))).WillOnce(Return(fifth))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delay(), GetDelay(Eq(fifth))).WillOnce(Return(sixth));
+
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // Run again with a nudge.
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, ModelTypeSet(), FROM_HERE);
+ RunLoop();
+
+ ASSERT_EQ(kMinNumSamples, r.snapshots.size());
+ EXPECT_GE(r.times[1] - r.times[0], second);
+ EXPECT_GE(r.times[2] - r.times[1], third);
+ EXPECT_GE(r.times[3] - r.times[2], fourth);
+ EXPECT_GE(r.times[4] - r.times[3], fifth);
+}
+
+// Test that things go back to normal once a retry makes forward progress.
+TEST_F(SyncSchedulerTest, BackoffRelief) {
+ SyncShareRecords r;
+ const TimeDelta poll(TimeDelta::FromMilliseconds(10));
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll);
+ UseMockDelayProvider();
+
+ const TimeDelta backoff = TimeDelta::FromMilliseconds(5);
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ RecordSyncShareMultiple(&r, kMinNumSamples)))
+ .WillRepeatedly(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ RecordSyncShareMultiple(&r, kMinNumSamples)));
+ EXPECT_CALL(*delay(), GetDelay(_)).WillOnce(Return(backoff));
+
+ // Optimal start for the post-backoff poll party.
+ TimeTicks optimal_start = TimeTicks::Now();
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // Run again to wait for polling.
+ scheduler()->ScheduleNudge(zero(), NUDGE_SOURCE_LOCAL,
+ ModelTypeSet(), FROM_HERE);
+ RunLoop();
+
+ StopSyncScheduler();
+
+ EXPECT_EQ(kMinNumSamples, r.times.size());
+
+ // The first nudge ran as soon as possible. It failed.
+ TimeTicks optimal_job_time = optimal_start;
+ EXPECT_GE(r.times[0], optimal_job_time);
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ r.snapshots[0]->source.updates_source);
+
+ // It was followed by a successful retry nudge shortly afterward.
+ optimal_job_time = optimal_job_time + backoff;
+ EXPECT_GE(r.times[1], optimal_job_time);
+ EXPECT_EQ(GetUpdatesCallerInfo::LOCAL,
+ r.snapshots[1]->source.updates_source);
+ // After that, we went back to polling.
+ for (size_t i = 2; i < r.snapshots.size(); i++) {
+ optimal_job_time = optimal_job_time + poll;
+ SCOPED_TRACE(testing::Message() << "SyncShare # (" << i << ")");
+ EXPECT_GE(r.times[i], optimal_job_time);
+ EXPECT_EQ(GetUpdatesCallerInfo::PERIODIC,
+ r.snapshots[i]->source.updates_source);
+ }
+}
+
+// Test that poll failures are ignored. They should have no effect on
+// subsequent poll attempts, nor should they trigger a backoff/retry.
+TEST_F(SyncSchedulerTest, TransientPollFailure) {
+ SyncShareRecords r;
+ const TimeDelta poll_interval(TimeDelta::FromMilliseconds(10));
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll_interval);
+ UseMockDelayProvider(); // Will cause test failure if backoff is initiated.
+
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateCommitFailed),
+ RecordSyncShare(&r)))
+ .WillOnce(DoAll(Invoke(sessions::test_util::SimulateSuccess),
+ RecordSyncShare(&r)));
+
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // Run the unsucessful poll. The failed poll should not trigger backoff.
+ RunLoop();
+ EXPECT_FALSE(scheduler()->IsBackingOff());
+
+ // Run the successful poll.
+ RunLoop();
+ EXPECT_FALSE(scheduler()->IsBackingOff());
+
+ // Verify the the two SyncShare() calls were made one poll interval apart.
+ ASSERT_EQ(2U, r.snapshots.size());
+ EXPECT_GE(r.times[1] - r.times[0], poll_interval);
+}
+
+TEST_F(SyncSchedulerTest, GetRecommendedDelay) {
+ EXPECT_LE(TimeDelta::FromSeconds(0),
+ SyncScheduler::GetRecommendedDelay(TimeDelta::FromSeconds(0)));
+ EXPECT_LE(TimeDelta::FromSeconds(1),
+ SyncScheduler::GetRecommendedDelay(TimeDelta::FromSeconds(1)));
+ EXPECT_LE(TimeDelta::FromSeconds(50),
+ SyncScheduler::GetRecommendedDelay(TimeDelta::FromSeconds(50)));
+ EXPECT_LE(TimeDelta::FromSeconds(10),
+ SyncScheduler::GetRecommendedDelay(TimeDelta::FromSeconds(10)));
+ EXPECT_EQ(TimeDelta::FromSeconds(kMaxBackoffSeconds),
+ SyncScheduler::GetRecommendedDelay(
+ TimeDelta::FromSeconds(kMaxBackoffSeconds)));
+ EXPECT_EQ(TimeDelta::FromSeconds(kMaxBackoffSeconds),
+ SyncScheduler::GetRecommendedDelay(
+ TimeDelta::FromSeconds(kMaxBackoffSeconds + 1)));
+}
+
+// Test that appropriate syncer steps are requested for each job type.
+TEST_F(SyncSchedulerTest, SyncerSteps) {
+ // Nudges.
+ EXPECT_CALL(*syncer(), SyncShare(_, SYNCER_BEGIN, SYNCER_END))
+ .Times(1);
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, ModelTypeSet(), FROM_HERE);
+ PumpLoop();
+ // Pump again to run job.
+ PumpLoop();
+
+ StopSyncScheduler();
+ Mock::VerifyAndClearExpectations(syncer());
+
+ // ClearUserData.
+ EXPECT_CALL(*syncer(), SyncShare(_, CLEAR_PRIVATE_DATA, CLEAR_PRIVATE_DATA))
+ .Times(1);
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleClearUserData();
+ PumpLoop();
+ PumpLoop();
+
+ StopSyncScheduler();
+ Mock::VerifyAndClearExpectations(syncer());
+
+ // Configuration.
+ EXPECT_CALL(*syncer(), SyncShare(_, DOWNLOAD_UPDATES, APPLY_UPDATES));
+ StartSyncScheduler(SyncScheduler::CONFIGURATION_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleConfig(
+ ModelTypeSet(), GetUpdatesCallerInfo::RECONFIGURATION);
+ PumpLoop();
+ PumpLoop();
+
+ StopSyncScheduler();
+ Mock::VerifyAndClearExpectations(syncer());
+
+ // Cleanup disabled types.
+ EXPECT_CALL(*syncer(),
+ SyncShare(_, CLEANUP_DISABLED_TYPES, CLEANUP_DISABLED_TYPES));
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleCleanupDisabledTypes();
+ // Only need to pump once, as ScheduleCleanupDisabledTypes()
+ // schedules the job directly.
+ PumpLoop();
+
+ StopSyncScheduler();
+ Mock::VerifyAndClearExpectations(syncer());
+
+ // Poll.
+ EXPECT_CALL(*syncer(), SyncShare(_, SYNCER_BEGIN, SYNCER_END))
+ .Times(AtLeast(1))
+ .WillRepeatedly(QuitLoopNowAction());
+ const TimeDelta poll(TimeDelta::FromMilliseconds(10));
+ scheduler()->OnReceivedLongPollIntervalUpdate(poll);
+
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ // Run again to wait for polling.
+ RunLoop();
+
+ StopSyncScheduler();
+ Mock::VerifyAndClearExpectations(syncer());
+}
+
+// Test config tasks don't run during normal mode.
+// TODO(tim): Implement this test and then the functionality!
+TEST_F(SyncSchedulerTest, DISABLED_NoConfigDuringNormal) {
+}
+
+// Test that starting the syncer thread without a valid connection doesn't
+// break things when a connection is detected.
+TEST_F(SyncSchedulerTest, StartWhenNotConnected) {
+ connection()->SetServerNotReachable();
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_))
+ .WillOnce(Invoke(sessions::test_util::SimulateDownloadUpdatesFailed))
+ .WillOnce(QuitLoopNowAction());
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ MessageLoop::current()->RunAllPending();
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, ModelTypeSet(), FROM_HERE);
+ // Should save the nudge for until after the server is reachable.
+ MessageLoop::current()->RunAllPending();
+
+ connection()->SetServerReachable();
+ scheduler()->OnConnectionStatusChange();
+ MessageLoop::current()->RunAllPending();
+}
+
+TEST_F(SyncSchedulerTest, SetsPreviousRoutingInfo) {
+ ModelSafeRoutingInfo info;
+ EXPECT_TRUE(info == context()->previous_session_routing_info());
+ ModelSafeRoutingInfo expected;
+ context()->registrar()->GetModelSafeRoutingInfo(&expected);
+ ASSERT_FALSE(expected.empty());
+ EXPECT_CALL(*syncer(), SyncShare(_,_,_)).Times(1);
+
+ StartSyncScheduler(SyncScheduler::NORMAL_MODE);
+ RunLoop();
+
+ scheduler()->ScheduleNudge(
+ zero(), NUDGE_SOURCE_LOCAL, ModelTypeSet(), FROM_HERE);
+ PumpLoop();
+ // Pump again to run job.
+ PumpLoop();
+
+ StopSyncScheduler();
+
+ EXPECT_TRUE(expected == context()->previous_session_routing_info());
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/sync_scheduler_whitebox_unittest.cc b/sync/engine/sync_scheduler_whitebox_unittest.cc
new file mode 100644
index 0000000..35a0955
--- /dev/null
+++ b/sync/engine/sync_scheduler_whitebox_unittest.cc
@@ -0,0 +1,276 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop.h"
+#include "base/time.h"
+#include "sync/engine/sync_scheduler.h"
+#include "sync/sessions/sync_session_context.h"
+#include "sync/sessions/test_util.h"
+#include "sync/test/engine/fake_model_safe_worker_registrar.h"
+#include "sync/test/engine/mock_connection_manager.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/fake_extensions_activity_monitor.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace browser_sync {
+using browser_sync::Syncer;
+using sessions::SyncSession;
+using sessions::SyncSessionContext;
+using sessions::SyncSourceInfo;
+using sync_pb::GetUpdatesCallerInfo;
+
+class SyncSchedulerWhiteboxTest : public testing::Test {
+ public:
+ virtual void SetUp() {
+ dir_maker_.SetUp();
+ Syncer* syncer = new Syncer();
+ ModelSafeRoutingInfo routes;
+ routes[syncable::BOOKMARKS] = GROUP_UI;
+ routes[syncable::NIGORI] = GROUP_PASSIVE;
+ registrar_.reset(new FakeModelSafeWorkerRegistrar(routes));
+ connection_.reset(new MockConnectionManager(NULL));
+ connection_->SetServerReachable();
+ context_ =
+ new SyncSessionContext(
+ connection_.get(), dir_maker_.directory(),
+ registrar_.get(), &extensions_activity_monitor_,
+ std::vector<SyncEngineEventListener*>(), NULL);
+ context_->set_notifications_enabled(true);
+ context_->set_account_name("Test");
+ scheduler_.reset(
+ new SyncScheduler("TestSyncSchedulerWhitebox", context_, syncer));
+ }
+
+ virtual void TearDown() {
+ scheduler_.reset();
+ }
+
+ void SetMode(SyncScheduler::Mode mode) {
+ scheduler_->mode_ = mode;
+ }
+
+ void SetLastSyncedTime(base::TimeTicks ticks) {
+ scheduler_->last_sync_session_end_time_ = ticks;
+ }
+
+ void SetServerConnection(bool connected) {
+ scheduler_->server_connection_ok_ = connected;
+ }
+
+ void ResetWaitInterval() {
+ scheduler_->wait_interval_.reset();
+ }
+
+ void SetWaitIntervalToThrottled() {
+ scheduler_->wait_interval_.reset(new SyncScheduler::WaitInterval(
+ SyncScheduler::WaitInterval::THROTTLED, TimeDelta::FromSeconds(1)));
+ }
+
+ void SetWaitIntervalToExponentialBackoff() {
+ scheduler_->wait_interval_.reset(
+ new SyncScheduler::WaitInterval(
+ SyncScheduler::WaitInterval::EXPONENTIAL_BACKOFF,
+ TimeDelta::FromSeconds(1)));
+ }
+
+ void SetWaitIntervalHadNudge(bool had_nudge) {
+ scheduler_->wait_interval_->had_nudge = had_nudge;
+ }
+
+ SyncScheduler::JobProcessDecision DecideOnJob(
+ const SyncScheduler::SyncSessionJob& job) {
+ return scheduler_->DecideOnJob(job);
+ }
+
+ void InitializeSyncerOnNormalMode() {
+ SetMode(SyncScheduler::NORMAL_MODE);
+ ResetWaitInterval();
+ SetServerConnection(true);
+ SetLastSyncedTime(base::TimeTicks::Now());
+ }
+
+ SyncScheduler::JobProcessDecision CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::SyncSessionJobPurpose purpose) {
+ SyncSession* s = scheduler_->CreateSyncSession(SyncSourceInfo());
+ SyncScheduler::SyncSessionJob job(purpose, TimeTicks::Now(),
+ make_linked_ptr(s),
+ false,
+ FROM_HERE);
+ return DecideOnJob(job);
+ }
+
+ SyncSessionContext* context() { return context_; }
+
+ protected:
+ scoped_ptr<SyncScheduler> scheduler_;
+
+ private:
+ MessageLoop message_loop_;
+ scoped_ptr<MockConnectionManager> connection_;
+ SyncSessionContext* context_;
+ scoped_ptr<FakeModelSafeWorkerRegistrar> registrar_;
+ FakeExtensionsActivityMonitor extensions_activity_monitor_;
+ TestDirectorySetterUpper dir_maker_;
+};
+
+TEST_F(SyncSchedulerWhiteboxTest, SaveNudge) {
+ InitializeSyncerOnNormalMode();
+
+ // Now set the mode to configure.
+ SetMode(SyncScheduler::CONFIGURATION_MODE);
+
+ SyncScheduler::JobProcessDecision decision =
+ CreateAndDecideJob(SyncScheduler::SyncSessionJob::NUDGE);
+
+ EXPECT_EQ(decision, SyncScheduler::SAVE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, SaveNudgeWhileTypeThrottled) {
+ InitializeSyncerOnNormalMode();
+
+ syncable::ModelTypeSet types;
+ types.Put(syncable::BOOKMARKS);
+
+ // Mark bookmarks as throttled.
+ context()->SetUnthrottleTime(types,
+ base::TimeTicks::Now() + base::TimeDelta::FromHours(2));
+
+ syncable::ModelTypePayloadMap types_with_payload;
+ types_with_payload[syncable::BOOKMARKS] = "";
+
+ SyncSourceInfo info(GetUpdatesCallerInfo::LOCAL, types_with_payload);
+ SyncSession* s = scheduler_->CreateSyncSession(info);
+
+ // Now schedule a nudge with just bookmarks and the change is local.
+ SyncScheduler::SyncSessionJob job(SyncScheduler::SyncSessionJob::NUDGE,
+ TimeTicks::Now(),
+ make_linked_ptr(s),
+ false,
+ FROM_HERE);
+
+ SyncScheduler::JobProcessDecision decision = DecideOnJob(job);
+ EXPECT_EQ(decision, SyncScheduler::SAVE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, ContinueNudge) {
+ InitializeSyncerOnNormalMode();
+
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::NUDGE);
+
+ EXPECT_EQ(decision, SyncScheduler::CONTINUE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, DropPoll) {
+ InitializeSyncerOnNormalMode();
+ SetMode(SyncScheduler::CONFIGURATION_MODE);
+
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::POLL);
+
+ EXPECT_EQ(decision, SyncScheduler::DROP);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, ContinuePoll) {
+ InitializeSyncerOnNormalMode();
+
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::POLL);
+
+ EXPECT_EQ(decision, SyncScheduler::CONTINUE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, ContinueConfiguration) {
+ InitializeSyncerOnNormalMode();
+ SetMode(SyncScheduler::CONFIGURATION_MODE);
+
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::CONFIGURATION);
+
+ EXPECT_EQ(decision, SyncScheduler::CONTINUE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, SaveConfigurationWhileThrottled) {
+ InitializeSyncerOnNormalMode();
+ SetMode(SyncScheduler::CONFIGURATION_MODE);
+
+ SetWaitIntervalToThrottled();
+
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::CONFIGURATION);
+
+ EXPECT_EQ(decision, SyncScheduler::SAVE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, SaveNudgeWhileThrottled) {
+ InitializeSyncerOnNormalMode();
+ SetMode(SyncScheduler::CONFIGURATION_MODE);
+
+ SetWaitIntervalToThrottled();
+
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::NUDGE);
+
+ EXPECT_EQ(decision, SyncScheduler::SAVE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest,
+ ContinueClearUserDataUnderAllCircumstances) {
+ InitializeSyncerOnNormalMode();
+
+ SetMode(SyncScheduler::CONFIGURATION_MODE);
+ SetWaitIntervalToThrottled();
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::CLEAR_USER_DATA);
+ EXPECT_EQ(decision, SyncScheduler::CONTINUE);
+
+ SetMode(SyncScheduler::NORMAL_MODE);
+ SetWaitIntervalToExponentialBackoff();
+ decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::CLEAR_USER_DATA);
+ EXPECT_EQ(decision, SyncScheduler::CONTINUE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, ContinueNudgeWhileExponentialBackOff) {
+ InitializeSyncerOnNormalMode();
+ SetMode(SyncScheduler::NORMAL_MODE);
+ SetWaitIntervalToExponentialBackoff();
+
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::NUDGE);
+
+ EXPECT_EQ(decision, SyncScheduler::CONTINUE);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, DropNudgeWhileExponentialBackOff) {
+ InitializeSyncerOnNormalMode();
+ SetMode(SyncScheduler::NORMAL_MODE);
+ SetWaitIntervalToExponentialBackoff();
+ SetWaitIntervalHadNudge(true);
+
+ SyncScheduler::JobProcessDecision decision = CreateAndDecideJob(
+ SyncScheduler::SyncSessionJob::NUDGE);
+
+ EXPECT_EQ(decision, SyncScheduler::DROP);
+}
+
+TEST_F(SyncSchedulerWhiteboxTest, ContinueCanaryJobConfig) {
+ InitializeSyncerOnNormalMode();
+ SetMode(SyncScheduler::CONFIGURATION_MODE);
+ SetWaitIntervalToExponentialBackoff();
+
+ struct SyncScheduler::SyncSessionJob job;
+ job.purpose = SyncScheduler::SyncSessionJob::CONFIGURATION;
+ job.scheduled_start = TimeTicks::Now();
+ job.is_canary_job = true;
+ SyncScheduler::JobProcessDecision decision = DecideOnJob(job);
+
+ EXPECT_EQ(decision, SyncScheduler::CONTINUE);
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/syncer.cc b/sync/engine/syncer.cc
new file mode 100644
index 0000000..62c7af8
--- /dev/null
+++ b/sync/engine/syncer.cc
@@ -0,0 +1,345 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/syncer.h"
+
+#include "base/debug/trace_event.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/time.h"
+#include "build/build_config.h"
+#include "sync/engine/apply_updates_command.h"
+#include "sync/engine/build_commit_command.h"
+#include "sync/engine/cleanup_disabled_types_command.h"
+#include "sync/engine/clear_data_command.h"
+#include "sync/engine/conflict_resolver.h"
+#include "sync/engine/download_updates_command.h"
+#include "sync/engine/get_commit_ids_command.h"
+#include "sync/engine/net/server_connection_manager.h"
+#include "sync/engine/post_commit_message_command.h"
+#include "sync/engine/process_commit_response_command.h"
+#include "sync/engine/process_updates_command.h"
+#include "sync/engine/resolve_conflicts_command.h"
+#include "sync/engine/store_timestamps_command.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/engine/syncproto.h"
+#include "sync/engine/verify_updates_command.h"
+#include "sync/syncable/syncable-inl.h"
+#include "sync/syncable/syncable.h"
+
+using base::Time;
+using base::TimeDelta;
+using sync_pb::ClientCommand;
+using syncable::Blob;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::SERVER_CTIME;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_IS_DIR;
+using syncable::SERVER_MTIME;
+using syncable::SERVER_NON_UNIQUE_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_SPECIFICS;
+using syncable::SERVER_VERSION;
+using syncable::SYNCER;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+using sessions::ScopedSessionContextConflictResolver;
+using sessions::StatusController;
+using sessions::SyncSession;
+using sessions::ConflictProgress;
+
+#define ENUM_CASE(x) case x: return #x
+const char* SyncerStepToString(const SyncerStep step)
+{
+ switch (step) {
+ ENUM_CASE(SYNCER_BEGIN);
+ ENUM_CASE(CLEANUP_DISABLED_TYPES);
+ ENUM_CASE(DOWNLOAD_UPDATES);
+ ENUM_CASE(PROCESS_CLIENT_COMMAND);
+ ENUM_CASE(VERIFY_UPDATES);
+ ENUM_CASE(PROCESS_UPDATES);
+ ENUM_CASE(STORE_TIMESTAMPS);
+ ENUM_CASE(APPLY_UPDATES);
+ ENUM_CASE(BUILD_COMMIT_REQUEST);
+ ENUM_CASE(POST_COMMIT_MESSAGE);
+ ENUM_CASE(PROCESS_COMMIT_RESPONSE);
+ ENUM_CASE(RESOLVE_CONFLICTS);
+ ENUM_CASE(APPLY_UPDATES_TO_RESOLVE_CONFLICTS);
+ ENUM_CASE(CLEAR_PRIVATE_DATA);
+ ENUM_CASE(SYNCER_END);
+ }
+ NOTREACHED();
+ return "";
+}
+#undef ENUM_CASE
+
+Syncer::Syncer()
+ : early_exit_requested_(false) {
+}
+
+Syncer::~Syncer() {}
+
+bool Syncer::ExitRequested() {
+ base::AutoLock lock(early_exit_requested_lock_);
+ return early_exit_requested_;
+}
+
+void Syncer::RequestEarlyExit() {
+ base::AutoLock lock(early_exit_requested_lock_);
+ early_exit_requested_ = true;
+}
+
+void Syncer::SyncShare(sessions::SyncSession* session,
+ SyncerStep first_step,
+ SyncerStep last_step) {
+ ScopedSessionContextConflictResolver scoped(session->context(),
+ &resolver_);
+ session->mutable_status_controller()->UpdateStartTime();
+ SyncerStep current_step = first_step;
+
+ SyncerStep next_step = current_step;
+ while (!ExitRequested()) {
+ TRACE_EVENT1("sync", "SyncerStateMachine",
+ "state", SyncerStepToString(current_step));
+ DVLOG(1) << "Syncer step:" << SyncerStepToString(current_step);
+
+ switch (current_step) {
+ case SYNCER_BEGIN:
+ // This isn't perfect, as we can end up bundling extensions activity
+ // intended for the next session into the current one. We could do a
+ // test-and-reset as with the source, but note that also falls short if
+ // the commit request fails (e.g. due to lost connection), as we will
+ // fall all the way back to the syncer thread main loop in that case,
+ // creating a new session when a connection is established, losing the
+ // records set here on the original attempt. This should provide us
+ // with the right data "most of the time", and we're only using this
+ // for analysis purposes, so Law of Large Numbers FTW.
+ session->context()->extensions_monitor()->GetAndClearRecords(
+ session->mutable_extensions_activity());
+ session->context()->PruneUnthrottledTypes(base::TimeTicks::Now());
+ session->SendEventNotification(SyncEngineEvent::SYNC_CYCLE_BEGIN);
+
+ next_step = CLEANUP_DISABLED_TYPES;
+ break;
+ case CLEANUP_DISABLED_TYPES: {
+ CleanupDisabledTypesCommand cleanup;
+ cleanup.Execute(session);
+ next_step = DOWNLOAD_UPDATES;
+ break;
+ }
+ case DOWNLOAD_UPDATES: {
+ // TODO(akalin): We may want to propagate this switch up
+ // eventually.
+#if defined(OS_ANDROID)
+ const bool kCreateMobileBookmarksFolder = true;
+#else
+ const bool kCreateMobileBookmarksFolder = false;
+#endif
+ DownloadUpdatesCommand download_updates(kCreateMobileBookmarksFolder);
+ session->mutable_status_controller()->set_last_download_updates_result(
+ download_updates.Execute(session));
+ next_step = PROCESS_CLIENT_COMMAND;
+ break;
+ }
+ case PROCESS_CLIENT_COMMAND: {
+ ProcessClientCommand(session);
+ next_step = VERIFY_UPDATES;
+ break;
+ }
+ case VERIFY_UPDATES: {
+ VerifyUpdatesCommand verify_updates;
+ verify_updates.Execute(session);
+ next_step = PROCESS_UPDATES;
+ break;
+ }
+ case PROCESS_UPDATES: {
+ ProcessUpdatesCommand process_updates;
+ process_updates.Execute(session);
+ next_step = STORE_TIMESTAMPS;
+ break;
+ }
+ case STORE_TIMESTAMPS: {
+ StoreTimestampsCommand store_timestamps;
+ store_timestamps.Execute(session);
+ // We should download all of the updates before attempting to process
+ // them.
+ if (session->status_controller().ServerSaysNothingMoreToDownload() ||
+ !session->status_controller().download_updates_succeeded()) {
+ next_step = APPLY_UPDATES;
+ } else {
+ next_step = DOWNLOAD_UPDATES;
+ }
+ break;
+ }
+ case APPLY_UPDATES: {
+ ApplyUpdatesCommand apply_updates;
+ apply_updates.Execute(session);
+ if (last_step == APPLY_UPDATES) {
+ // We're in configuration mode, but we still need to run the
+ // SYNCER_END step.
+ last_step = SYNCER_END;
+ next_step = SYNCER_END;
+ } else {
+ next_step = BUILD_COMMIT_REQUEST;
+ }
+ break;
+ }
+ // These two steps are combined since they are executed within the same
+ // write transaction.
+ case BUILD_COMMIT_REQUEST: {
+ syncable::Directory* dir = session->context()->directory();
+ WriteTransaction trans(FROM_HERE, SYNCER, dir);
+ sessions::ScopedSetSessionWriteTransaction set_trans(session, &trans);
+
+ DVLOG(1) << "Getting the Commit IDs";
+ GetCommitIdsCommand get_commit_ids_command(
+ session->context()->max_commit_batch_size());
+ get_commit_ids_command.Execute(session);
+
+ if (!session->status_controller().commit_ids().empty()) {
+ DVLOG(1) << "Building a commit message";
+ BuildCommitCommand build_commit_command;
+ build_commit_command.Execute(session);
+
+ next_step = POST_COMMIT_MESSAGE;
+ } else {
+ next_step = RESOLVE_CONFLICTS;
+ }
+
+ break;
+ }
+ case POST_COMMIT_MESSAGE: {
+ PostCommitMessageCommand post_commit_command;
+ session->mutable_status_controller()->set_last_post_commit_result(
+ post_commit_command.Execute(session));
+ next_step = PROCESS_COMMIT_RESPONSE;
+ break;
+ }
+ case PROCESS_COMMIT_RESPONSE: {
+ ProcessCommitResponseCommand process_response_command;
+ session->mutable_status_controller()->
+ set_last_process_commit_response_result(
+ process_response_command.Execute(session));
+ next_step = RESOLVE_CONFLICTS;
+ break;
+ }
+ case RESOLVE_CONFLICTS: {
+ StatusController* status = session->mutable_status_controller();
+ status->reset_conflicts_resolved();
+ ResolveConflictsCommand resolve_conflicts_command;
+ resolve_conflicts_command.Execute(session);
+
+ // Has ConflictingUpdates includes both resolvable and unresolvable
+ // conflicts. If we have either, we want to attempt to reapply.
+ if (status->HasConflictingUpdates())
+ next_step = APPLY_UPDATES_TO_RESOLVE_CONFLICTS;
+ else
+ next_step = SYNCER_END;
+ break;
+ }
+ case APPLY_UPDATES_TO_RESOLVE_CONFLICTS: {
+ StatusController* status = session->mutable_status_controller();
+ DVLOG(1) << "Applying updates to resolve conflicts";
+ ApplyUpdatesCommand apply_updates;
+
+ // We only care to resolve conflicts again if we made progress on the
+ // simple conflicts.
+ int before_blocking_conflicting_updates =
+ status->TotalNumSimpleConflictingItems();
+ apply_updates.Execute(session);
+ int after_blocking_conflicting_updates =
+ status->TotalNumSimpleConflictingItems();
+ // If the following call sets the conflicts_resolved value to true,
+ // SyncSession::HasMoreToSync() will send us into another sync cycle
+ // after this one completes.
+ //
+ // TODO(rlarocque, 109072): Make conflict resolution not require
+ // extra sync cycles/GetUpdates.
+ status->update_conflicts_resolved(before_blocking_conflicting_updates >
+ after_blocking_conflicting_updates);
+ next_step = SYNCER_END;
+ break;
+ }
+ case CLEAR_PRIVATE_DATA: {
+ ClearDataCommand clear_data_command;
+ clear_data_command.Execute(session);
+ next_step = SYNCER_END;
+ break;
+ }
+ case SYNCER_END: {
+ session->SendEventNotification(SyncEngineEvent::SYNC_CYCLE_ENDED);
+ next_step = SYNCER_END;
+ break;
+ }
+ default:
+ LOG(ERROR) << "Unknown command: " << current_step;
+ }
+ DVLOG(2) << "last step: " << SyncerStepToString(last_step) << ", "
+ << "current step: " << SyncerStepToString(current_step) << ", "
+ << "next step: " << SyncerStepToString(next_step) << ", "
+ << "snapshot: " << session->TakeSnapshot().ToString();
+ if (last_step == current_step)
+ break;
+ current_step = next_step;
+ }
+}
+
+void Syncer::ProcessClientCommand(sessions::SyncSession* session) {
+ const ClientToServerResponse& response =
+ session->status_controller().updates_response();
+ if (!response.has_client_command())
+ return;
+ const ClientCommand& command = response.client_command();
+
+ // The server limits the number of items a client can commit in one batch.
+ if (command.has_max_commit_batch_size()) {
+ session->context()->set_max_commit_batch_size(
+ command.max_commit_batch_size());
+ }
+ if (command.has_set_sync_long_poll_interval()) {
+ session->delegate()->OnReceivedLongPollIntervalUpdate(
+ TimeDelta::FromSeconds(command.set_sync_long_poll_interval()));
+ }
+ if (command.has_set_sync_poll_interval()) {
+ session->delegate()->OnReceivedShortPollIntervalUpdate(
+ TimeDelta::FromSeconds(command.set_sync_poll_interval()));
+ }
+
+ if (command.has_sessions_commit_delay_seconds()) {
+ session->delegate()->OnReceivedSessionsCommitDelay(
+ TimeDelta::FromSeconds(command.sessions_commit_delay_seconds()));
+ }
+}
+
+void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest) {
+ dest->Put(SERVER_NON_UNIQUE_NAME, src->Get(SERVER_NON_UNIQUE_NAME));
+ dest->Put(SERVER_PARENT_ID, src->Get(SERVER_PARENT_ID));
+ dest->Put(SERVER_MTIME, src->Get(SERVER_MTIME));
+ dest->Put(SERVER_CTIME, src->Get(SERVER_CTIME));
+ dest->Put(SERVER_VERSION, src->Get(SERVER_VERSION));
+ dest->Put(SERVER_IS_DIR, src->Get(SERVER_IS_DIR));
+ dest->Put(SERVER_IS_DEL, src->Get(SERVER_IS_DEL));
+ dest->Put(IS_UNAPPLIED_UPDATE, src->Get(IS_UNAPPLIED_UPDATE));
+ dest->Put(SERVER_SPECIFICS, src->Get(SERVER_SPECIFICS));
+ dest->Put(SERVER_POSITION_IN_PARENT, src->Get(SERVER_POSITION_IN_PARENT));
+}
+
+void ClearServerData(syncable::MutableEntry* entry) {
+ entry->Put(SERVER_NON_UNIQUE_NAME, "");
+ entry->Put(SERVER_PARENT_ID, syncable::GetNullId());
+ entry->Put(SERVER_MTIME, Time());
+ entry->Put(SERVER_CTIME, Time());
+ entry->Put(SERVER_VERSION, 0);
+ entry->Put(SERVER_IS_DIR, false);
+ entry->Put(SERVER_IS_DEL, false);
+ entry->Put(IS_UNAPPLIED_UPDATE, false);
+ entry->Put(SERVER_SPECIFICS, sync_pb::EntitySpecifics::default_instance());
+ entry->Put(SERVER_POSITION_IN_PARENT, 0);
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/syncer.h b/sync/engine/syncer.h
new file mode 100644
index 0000000..13fc681
--- /dev/null
+++ b/sync/engine/syncer.h
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNCER_H_
+#define SYNC_ENGINE_SYNCER_H_
+#pragma once
+
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "sync/engine/conflict_resolver.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/engine/syncproto.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/model_type.h"
+#include "sync/util/extensions_activity_monitor.h"
+
+namespace syncable {
+class Entry;
+class MutableEntry;
+} // namespace syncable
+
+namespace browser_sync {
+
+enum SyncerStep {
+ SYNCER_BEGIN,
+ CLEANUP_DISABLED_TYPES,
+ DOWNLOAD_UPDATES,
+ PROCESS_CLIENT_COMMAND,
+ VERIFY_UPDATES,
+ PROCESS_UPDATES,
+ STORE_TIMESTAMPS,
+ APPLY_UPDATES,
+ BUILD_COMMIT_REQUEST,
+ POST_COMMIT_MESSAGE,
+ PROCESS_COMMIT_RESPONSE,
+ RESOLVE_CONFLICTS,
+ APPLY_UPDATES_TO_RESOLVE_CONFLICTS,
+ CLEAR_PRIVATE_DATA, // TODO(tim): Rename 'private' to 'user'.
+ SYNCER_END
+};
+
+// A Syncer provides a control interface for driving the individual steps
+// of the sync cycle. Each cycle (hopefully) moves the client into closer
+// synchronization with the server. The individual steps are modeled
+// as SyncerCommands, and the ordering of the steps is expressed using
+// the SyncerStep enum.
+//
+// A Syncer instance expects to run on a dedicated thread. Calls
+// to SyncShare() may take an unbounded amount of time, as SyncerCommands
+// may block on network i/o, on lock contention, or on tasks posted to
+// other threads.
+class Syncer {
+ public:
+ typedef std::vector<int64> UnsyncedMetaHandles;
+
+ Syncer();
+ virtual ~Syncer();
+
+ // Called by other threads to tell the syncer to stop what it's doing
+ // and return early from SyncShare, if possible.
+ bool ExitRequested();
+ void RequestEarlyExit();
+
+ // Runs a sync cycle from |first_step| to |last_step|.
+ virtual void SyncShare(sessions::SyncSession* session,
+ SyncerStep first_step,
+ SyncerStep last_step);
+
+ private:
+ // Implements the PROCESS_CLIENT_COMMAND syncer step.
+ void ProcessClientCommand(sessions::SyncSession* session);
+
+ bool early_exit_requested_;
+ base::Lock early_exit_requested_lock_;
+
+ ConflictResolver resolver_;
+
+ friend class SyncerTest;
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, NameClashWithResolver);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, IllegalAndLegalUpdates);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingAndNewParent);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest,
+ TestCommitListOrderingAndNewParentAndChild);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingCounterexample);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingWithNesting);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingWithNewItems);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestGetUnsyncedAndSimpleCommit);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestPurgeWhileUnsynced);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestPurgeWhileUnapplied);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, UnappliedUpdateDuringCommit);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, DeletingEntryInFolder);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest,
+ LongChangelistCreatesFakeOrphanedEntries);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, QuicklyMergeDualCreatedHierarchy);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, LongChangelistWithApplicationConflict);
+ FRIEND_TEST_ALL_PREFIXES(SyncerTest, DeletingEntryWithLocalEdits);
+ FRIEND_TEST_ALL_PREFIXES(EntryCreatedInNewFolderTest,
+ EntryCreatedInNewFolderMidSync);
+
+ DISALLOW_COPY_AND_ASSIGN(Syncer);
+};
+
+// Utility function declarations.
+void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest);
+void ClearServerData(syncable::MutableEntry* entry);
+const char* SyncerStepToString(const SyncerStep);
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_SYNCER_H_
diff --git a/sync/engine/syncer_command.cc b/sync/engine/syncer_command.cc
new file mode 100644
index 0000000..e2e5138
--- /dev/null
+++ b/sync/engine/syncer_command.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/syncer_command.h"
+
+#include "sync/engine/net/server_connection_manager.h"
+#include "sync/sessions/sync_session.h"
+
+namespace browser_sync {
+using sessions::SyncSession;
+
+SyncerCommand::SyncerCommand() {}
+SyncerCommand::~SyncerCommand() {}
+
+SyncerError SyncerCommand::Execute(SyncSession* session) {
+ SyncerError result = ExecuteImpl(session);
+ SendNotifications(session);
+ return result;
+}
+
+void SyncerCommand::SendNotifications(SyncSession* session) {
+ if (session->mutable_status_controller()->TestAndClearIsDirty()) {
+ SyncEngineEvent event(SyncEngineEvent::STATUS_CHANGED);
+ const sessions::SyncSessionSnapshot& snapshot(session->TakeSnapshot());
+ event.snapshot = &snapshot;
+ session->context()->NotifyListeners(event);
+ }
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/syncer_command.h b/sync/engine/syncer_command.h
new file mode 100644
index 0000000..8095467
--- /dev/null
+++ b/sync/engine/syncer_command.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNCER_COMMAND_H_
+#define SYNC_ENGINE_SYNCER_COMMAND_H_
+#pragma once
+
+#include "base/basictypes.h"
+
+#include "sync/util/syncer_error.h"
+
+namespace browser_sync {
+
+namespace sessions {
+class SyncSession;
+}
+
+// Implementation of a simple command pattern intended to be driven by the
+// Syncer. SyncerCommand is abstract and all subclasses must implement
+// ExecuteImpl(). This is done so that chunks of syncer operation can be unit
+// tested.
+//
+// Example Usage:
+//
+// SyncSession session = ...;
+// SyncerCommand *cmd = SomeCommandFactory.createCommand(...);
+// cmd->Execute(session);
+// delete cmd;
+
+class SyncerCommand {
+ public:
+ SyncerCommand();
+ virtual ~SyncerCommand();
+
+ // Execute dispatches to a derived class's ExecuteImpl.
+ SyncerError Execute(sessions::SyncSession* session);
+
+ // ExecuteImpl is where derived classes actually do work.
+ virtual SyncerError ExecuteImpl(sessions::SyncSession* session) = 0;
+ private:
+ void SendNotifications(sessions::SyncSession* session);
+ DISALLOW_COPY_AND_ASSIGN(SyncerCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_SYNCER_COMMAND_H_
diff --git a/sync/engine/syncer_proto_util.cc b/sync/engine/syncer_proto_util.cc
new file mode 100644
index 0000000..c5041d2
--- /dev/null
+++ b/sync/engine/syncer_proto_util.cc
@@ -0,0 +1,537 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/syncer_proto_util.h"
+
+#include "base/format_macros.h"
+#include "base/stringprintf.h"
+#include "sync/engine/net/server_connection_manager.h"
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/protocol/service_constants.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/protocol/sync_enums.pb.h"
+#include "sync/protocol/sync_protocol_error.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable-inl.h"
+#include "sync/syncable/syncable.h"
+#include "sync/util/time.h"
+
+using browser_sync::SyncProtocolErrorType;
+using std::string;
+using std::stringstream;
+using syncable::BASE_VERSION;
+using syncable::CTIME;
+using syncable::ID;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNSYNCED;
+using syncable::MTIME;
+using syncable::PARENT_ID;
+
+namespace browser_sync {
+using sessions::SyncSession;
+
+namespace {
+
+// Time to backoff syncing after receiving a throttled response.
+const int kSyncDelayAfterThrottled = 2 * 60 * 60; // 2 hours
+
+void LogResponseProfilingData(const ClientToServerResponse& response) {
+ if (response.has_profiling_data()) {
+ stringstream response_trace;
+ response_trace << "Server response trace:";
+
+ if (response.profiling_data().has_user_lookup_time()) {
+ response_trace << " user lookup: "
+ << response.profiling_data().user_lookup_time() << "ms";
+ }
+
+ if (response.profiling_data().has_meta_data_write_time()) {
+ response_trace << " meta write: "
+ << response.profiling_data().meta_data_write_time()
+ << "ms";
+ }
+
+ if (response.profiling_data().has_meta_data_read_time()) {
+ response_trace << " meta read: "
+ << response.profiling_data().meta_data_read_time() << "ms";
+ }
+
+ if (response.profiling_data().has_file_data_write_time()) {
+ response_trace << " file write: "
+ << response.profiling_data().file_data_write_time()
+ << "ms";
+ }
+
+ if (response.profiling_data().has_file_data_read_time()) {
+ response_trace << " file read: "
+ << response.profiling_data().file_data_read_time() << "ms";
+ }
+
+ if (response.profiling_data().has_total_request_time()) {
+ response_trace << " total time: "
+ << response.profiling_data().total_request_time() << "ms";
+ }
+ DVLOG(1) << response_trace.str();
+ }
+}
+
+SyncerError ServerConnectionErrorAsSyncerError(
+ const HttpResponse::ServerConnectionCode server_status) {
+ switch (server_status) {
+ case HttpResponse::CONNECTION_UNAVAILABLE:
+ return NETWORK_CONNECTION_UNAVAILABLE;
+ case HttpResponse::IO_ERROR:
+ return NETWORK_IO_ERROR;
+ case HttpResponse::SYNC_SERVER_ERROR:
+ // FIXME what does this mean?
+ return SYNC_SERVER_ERROR;
+ case HttpResponse::SYNC_AUTH_ERROR:
+ return SYNC_AUTH_ERROR;
+ case HttpResponse::RETRY:
+ return SERVER_RETURN_TRANSIENT_ERROR;
+ case HttpResponse::SERVER_CONNECTION_OK:
+ case HttpResponse::NONE:
+ default:
+ NOTREACHED();
+ return UNSET;
+ }
+}
+
+} // namespace
+
+// static
+void SyncerProtoUtil::HandleMigrationDoneResponse(
+ const sync_pb::ClientToServerResponse* response,
+ sessions::SyncSession* session) {
+ LOG_IF(ERROR, 0 >= response->migrated_data_type_id_size())
+ << "MIGRATION_DONE but no types specified.";
+ syncable::ModelTypeSet to_migrate;
+ for (int i = 0; i < response->migrated_data_type_id_size(); i++) {
+ to_migrate.Put(syncable::GetModelTypeFromSpecificsFieldNumber(
+ response->migrated_data_type_id(i)));
+ }
+ // TODO(akalin): This should be a set union.
+ session->mutable_status_controller()->
+ set_types_needing_local_migration(to_migrate);
+}
+
+// static
+bool SyncerProtoUtil::VerifyResponseBirthday(syncable::Directory* dir,
+ const ClientToServerResponse* response) {
+
+ std::string local_birthday = dir->store_birthday();
+
+ if (local_birthday.empty()) {
+ if (!response->has_store_birthday()) {
+ LOG(WARNING) << "Expected a birthday on first sync.";
+ return false;
+ }
+
+ DVLOG(1) << "New store birthday: " << response->store_birthday();
+ dir->set_store_birthday(response->store_birthday());
+ return true;
+ }
+
+ // Error situation, but we're not stuck.
+ if (!response->has_store_birthday()) {
+ LOG(WARNING) << "No birthday in server response?";
+ return true;
+ }
+
+ if (response->store_birthday() != local_birthday) {
+ LOG(WARNING) << "Birthday changed, showing syncer stuck";
+ return false;
+ }
+
+ return true;
+}
+
+// static
+void SyncerProtoUtil::AddRequestBirthday(syncable::Directory* dir,
+ ClientToServerMessage* msg) {
+ if (!dir->store_birthday().empty())
+ msg->set_store_birthday(dir->store_birthday());
+}
+
+// static
+bool SyncerProtoUtil::PostAndProcessHeaders(ServerConnectionManager* scm,
+ sessions::SyncSession* session,
+ const ClientToServerMessage& msg,
+ ClientToServerResponse* response) {
+ ServerConnectionManager::PostBufferParams params;
+ msg.SerializeToString(&params.buffer_in);
+
+ ScopedServerStatusWatcher server_status_watcher(scm, &params.response);
+ // Fills in params.buffer_out and params.response.
+ if (!scm->PostBufferWithCachedAuth(&params, &server_status_watcher)) {
+ LOG(WARNING) << "Error posting from syncer:" << params.response;
+ return false;
+ }
+
+ std::string new_token = params.response.update_client_auth_header;
+ if (!new_token.empty()) {
+ SyncEngineEvent event(SyncEngineEvent::UPDATED_TOKEN);
+ event.updated_token = new_token;
+ session->context()->NotifyListeners(event);
+ }
+
+ if (response->ParseFromString(params.buffer_out)) {
+ // TODO(tim): This is an egregious layering violation (bug 35060).
+ switch (response->error_code()) {
+ case sync_pb::SyncEnums::ACCESS_DENIED:
+ case sync_pb::SyncEnums::AUTH_INVALID:
+ case sync_pb::SyncEnums::USER_NOT_ACTIVATED:
+ // Fires on ScopedServerStatusWatcher
+ params.response.server_status = HttpResponse::SYNC_AUTH_ERROR;
+ return false;
+ default:
+ return true;
+ }
+ }
+
+ return false;
+}
+
+base::TimeDelta SyncerProtoUtil::GetThrottleDelay(
+ const sync_pb::ClientToServerResponse& response) {
+ base::TimeDelta throttle_delay =
+ base::TimeDelta::FromSeconds(kSyncDelayAfterThrottled);
+ if (response.has_client_command()) {
+ const sync_pb::ClientCommand& command = response.client_command();
+ if (command.has_throttle_delay_seconds()) {
+ throttle_delay =
+ base::TimeDelta::FromSeconds(command.throttle_delay_seconds());
+ }
+ }
+ return throttle_delay;
+}
+
+void SyncerProtoUtil::HandleThrottleError(
+ const SyncProtocolError& error,
+ const base::TimeTicks& throttled_until,
+ sessions::SyncSessionContext* context,
+ sessions::SyncSession::Delegate* delegate) {
+ DCHECK_EQ(error.error_type, browser_sync::THROTTLED);
+ if (error.error_data_types.Empty()) {
+ // No datatypes indicates the client should be completely throttled.
+ delegate->OnSilencedUntil(throttled_until);
+ } else {
+ context->SetUnthrottleTime(error.error_data_types, throttled_until);
+ }
+}
+
+namespace {
+
+// Helper function for an assertion in PostClientToServerMessage.
+bool IsVeryFirstGetUpdates(const ClientToServerMessage& message) {
+ if (!message.has_get_updates())
+ return false;
+ DCHECK_LT(0, message.get_updates().from_progress_marker_size());
+ for (int i = 0; i < message.get_updates().from_progress_marker_size(); ++i) {
+ if (!message.get_updates().from_progress_marker(i).token().empty())
+ return false;
+ }
+ return true;
+}
+
+SyncProtocolErrorType ConvertSyncProtocolErrorTypePBToLocalType(
+ const sync_pb::SyncEnums::ErrorType& error_type) {
+ switch (error_type) {
+ case sync_pb::SyncEnums::SUCCESS:
+ return browser_sync::SYNC_SUCCESS;
+ case sync_pb::SyncEnums::NOT_MY_BIRTHDAY:
+ return browser_sync::NOT_MY_BIRTHDAY;
+ case sync_pb::SyncEnums::THROTTLED:
+ return browser_sync::THROTTLED;
+ case sync_pb::SyncEnums::CLEAR_PENDING:
+ return browser_sync::CLEAR_PENDING;
+ case sync_pb::SyncEnums::TRANSIENT_ERROR:
+ return browser_sync::TRANSIENT_ERROR;
+ case sync_pb::SyncEnums::MIGRATION_DONE:
+ return browser_sync::MIGRATION_DONE;
+ case sync_pb::SyncEnums::UNKNOWN:
+ return browser_sync::UNKNOWN_ERROR;
+ case sync_pb::SyncEnums::USER_NOT_ACTIVATED:
+ case sync_pb::SyncEnums::AUTH_INVALID:
+ case sync_pb::SyncEnums::ACCESS_DENIED:
+ return browser_sync::INVALID_CREDENTIAL;
+ default:
+ NOTREACHED();
+ return browser_sync::UNKNOWN_ERROR;
+ }
+}
+
+browser_sync::ClientAction ConvertClientActionPBToLocalClientAction(
+ const sync_pb::ClientToServerResponse::Error::Action& action) {
+ switch (action) {
+ case ClientToServerResponse::Error::UPGRADE_CLIENT:
+ return browser_sync::UPGRADE_CLIENT;
+ case ClientToServerResponse::Error::CLEAR_USER_DATA_AND_RESYNC:
+ return browser_sync::CLEAR_USER_DATA_AND_RESYNC;
+ case ClientToServerResponse::Error::ENABLE_SYNC_ON_ACCOUNT:
+ return browser_sync::ENABLE_SYNC_ON_ACCOUNT;
+ case ClientToServerResponse::Error::STOP_AND_RESTART_SYNC:
+ return browser_sync::STOP_AND_RESTART_SYNC;
+ case ClientToServerResponse::Error::DISABLE_SYNC_ON_CLIENT:
+ return browser_sync::DISABLE_SYNC_ON_CLIENT;
+ case ClientToServerResponse::Error::UNKNOWN_ACTION:
+ return browser_sync::UNKNOWN_ACTION;
+ default:
+ NOTREACHED();
+ return browser_sync::UNKNOWN_ACTION;
+ }
+}
+
+browser_sync::SyncProtocolError ConvertErrorPBToLocalType(
+ const sync_pb::ClientToServerResponse::Error& error) {
+ browser_sync::SyncProtocolError sync_protocol_error;
+ sync_protocol_error.error_type = ConvertSyncProtocolErrorTypePBToLocalType(
+ error.error_type());
+ sync_protocol_error.error_description = error.error_description();
+ sync_protocol_error.url = error.url();
+ sync_protocol_error.action = ConvertClientActionPBToLocalClientAction(
+ error.action());
+
+ if (error.error_data_type_ids_size() > 0) {
+ // THROTTLED is currently the only error code that uses |error_data_types|.
+ DCHECK_EQ(error.error_type(), sync_pb::SyncEnums::THROTTLED);
+ for (int i = 0; i < error.error_data_type_ids_size(); ++i) {
+ sync_protocol_error.error_data_types.Put(
+ syncable::GetModelTypeFromSpecificsFieldNumber(
+ error.error_data_type_ids(i)));
+ }
+ }
+
+ return sync_protocol_error;
+}
+
+// TODO(lipalani) : Rename these function names as per the CR for issue 7740067.
+browser_sync::SyncProtocolError ConvertLegacyErrorCodeToNewError(
+ const sync_pb::SyncEnums::ErrorType& error_type) {
+ browser_sync::SyncProtocolError error;
+ error.error_type = ConvertSyncProtocolErrorTypePBToLocalType(error_type);
+ if (error_type == sync_pb::SyncEnums::CLEAR_PENDING ||
+ error_type == sync_pb::SyncEnums::NOT_MY_BIRTHDAY) {
+ error.action = browser_sync::DISABLE_SYNC_ON_CLIENT;
+ } // There is no other action we can compute for legacy server.
+ return error;
+}
+
+} // namespace
+
+// static
+SyncerError SyncerProtoUtil::PostClientToServerMessage(
+ const ClientToServerMessage& msg,
+ ClientToServerResponse* response,
+ SyncSession* session) {
+
+ CHECK(response);
+ DCHECK(!msg.get_updates().has_from_timestamp()); // Deprecated.
+ DCHECK(!msg.get_updates().has_requested_types()); // Deprecated.
+ DCHECK(msg.has_store_birthday() || IsVeryFirstGetUpdates(msg))
+ << "Must call AddRequestBirthday to set birthday.";
+
+ syncable::Directory* dir = session->context()->directory();
+
+ if (!PostAndProcessHeaders(session->context()->connection_manager(), session,
+ msg, response)) {
+ // There was an error establishing communication with the server.
+ // We can not proceed beyond this point.
+ const browser_sync::HttpResponse::ServerConnectionCode server_status =
+ session->context()->connection_manager()->server_status();
+
+ DCHECK_NE(server_status, browser_sync::HttpResponse::NONE);
+ DCHECK_NE(server_status, browser_sync::HttpResponse::SERVER_CONNECTION_OK);
+
+ return ServerConnectionErrorAsSyncerError(server_status);
+ }
+
+ browser_sync::SyncProtocolError sync_protocol_error;
+
+ // Birthday mismatch overrides any error that is sent by the server.
+ if (!VerifyResponseBirthday(dir, response)) {
+ sync_protocol_error.error_type = browser_sync::NOT_MY_BIRTHDAY;
+ sync_protocol_error.action =
+ browser_sync::DISABLE_SYNC_ON_CLIENT;
+ } else if (response->has_error()) {
+ // This is a new server. Just get the error from the protocol.
+ sync_protocol_error = ConvertErrorPBToLocalType(response->error());
+ } else {
+ // Legacy server implementation. Compute the error based on |error_code|.
+ sync_protocol_error = ConvertLegacyErrorCodeToNewError(
+ response->error_code());
+ }
+
+ // Now set the error into the status so the layers above us could read it.
+ sessions::StatusController* status = session->mutable_status_controller();
+ status->set_sync_protocol_error(sync_protocol_error);
+
+ // Inform the delegate of the error we got.
+ session->delegate()->OnSyncProtocolError(session->TakeSnapshot());
+
+ // Now do any special handling for the error type and decide on the return
+ // value.
+ switch (sync_protocol_error.error_type) {
+ case browser_sync::UNKNOWN_ERROR:
+ LOG(WARNING) << "Sync protocol out-of-date. The server is using a more "
+ << "recent version.";
+ return SERVER_RETURN_UNKNOWN_ERROR;
+ case browser_sync::SYNC_SUCCESS:
+ LogResponseProfilingData(*response);
+ return SYNCER_OK;
+ case browser_sync::THROTTLED:
+ LOG(WARNING) << "Client silenced by server.";
+ HandleThrottleError(sync_protocol_error,
+ base::TimeTicks::Now() + GetThrottleDelay(*response),
+ session->context(),
+ session->delegate());
+ return SERVER_RETURN_THROTTLED;
+ case browser_sync::TRANSIENT_ERROR:
+ return SERVER_RETURN_TRANSIENT_ERROR;
+ case browser_sync::MIGRATION_DONE:
+ HandleMigrationDoneResponse(response, session);
+ return SERVER_RETURN_MIGRATION_DONE;
+ case browser_sync::CLEAR_PENDING:
+ return SERVER_RETURN_CLEAR_PENDING;
+ case browser_sync::NOT_MY_BIRTHDAY:
+ return SERVER_RETURN_NOT_MY_BIRTHDAY;
+ default:
+ NOTREACHED();
+ return UNSET;
+ }
+}
+
+// static
+bool SyncerProtoUtil::Compare(const syncable::Entry& local_entry,
+ const SyncEntity& server_entry) {
+ const std::string name = NameFromSyncEntity(server_entry);
+
+ CHECK(local_entry.Get(ID) == server_entry.id()) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+ CHECK(server_entry.version() == local_entry.Get(BASE_VERSION)) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+ CHECK(!local_entry.Get(IS_UNSYNCED)) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+
+ if (local_entry.Get(IS_DEL) && server_entry.deleted())
+ return true;
+ if (local_entry.Get(CTIME) != ProtoTimeToTime(server_entry.ctime())) {
+ LOG(WARNING) << "ctime mismatch";
+ return false;
+ }
+
+ // These checks are somewhat prolix, but they're easier to debug than a big
+ // boolean statement.
+ string client_name = local_entry.Get(syncable::NON_UNIQUE_NAME);
+ if (client_name != name) {
+ LOG(WARNING) << "Client name mismatch";
+ return false;
+ }
+ if (local_entry.Get(PARENT_ID) != server_entry.parent_id()) {
+ LOG(WARNING) << "Parent ID mismatch";
+ return false;
+ }
+ if (local_entry.Get(IS_DIR) != server_entry.IsFolder()) {
+ LOG(WARNING) << "Dir field mismatch";
+ return false;
+ }
+ if (local_entry.Get(IS_DEL) != server_entry.deleted()) {
+ LOG(WARNING) << "Deletion mismatch";
+ return false;
+ }
+ if (!local_entry.Get(IS_DIR) &&
+ (local_entry.Get(MTIME) != ProtoTimeToTime(server_entry.mtime()))) {
+ LOG(WARNING) << "mtime mismatch";
+ return false;
+ }
+
+ return true;
+}
+
+// static
+void SyncerProtoUtil::CopyProtoBytesIntoBlob(const std::string& proto_bytes,
+ syncable::Blob* blob) {
+ syncable::Blob proto_blob(proto_bytes.begin(), proto_bytes.end());
+ blob->swap(proto_blob);
+}
+
+// static
+bool SyncerProtoUtil::ProtoBytesEqualsBlob(const std::string& proto_bytes,
+ const syncable::Blob& blob) {
+ if (proto_bytes.size() != blob.size())
+ return false;
+ return std::equal(proto_bytes.begin(), proto_bytes.end(), blob.begin());
+}
+
+// static
+void SyncerProtoUtil::CopyBlobIntoProtoBytes(const syncable::Blob& blob,
+ std::string* proto_bytes) {
+ std::string blob_string(blob.begin(), blob.end());
+ proto_bytes->swap(blob_string);
+}
+
+// static
+const std::string& SyncerProtoUtil::NameFromSyncEntity(
+ const sync_pb::SyncEntity& entry) {
+ if (entry.has_non_unique_name())
+ return entry.non_unique_name();
+ return entry.name();
+}
+
+// static
+const std::string& SyncerProtoUtil::NameFromCommitEntryResponse(
+ const CommitResponse_EntryResponse& entry) {
+ if (entry.has_non_unique_name())
+ return entry.non_unique_name();
+ return entry.name();
+}
+
+std::string SyncerProtoUtil::SyncEntityDebugString(
+ const sync_pb::SyncEntity& entry) {
+ const std::string& mtime_str =
+ GetTimeDebugString(ProtoTimeToTime(entry.mtime()));
+ const std::string& ctime_str =
+ GetTimeDebugString(ProtoTimeToTime(entry.ctime()));
+ return base::StringPrintf(
+ "id: %s, parent_id: %s, "
+ "version: %"PRId64"d, "
+ "mtime: %" PRId64"d (%s), "
+ "ctime: %" PRId64"d (%s), "
+ "name: %s, sync_timestamp: %" PRId64"d, "
+ "%s ",
+ entry.id_string().c_str(),
+ entry.parent_id_string().c_str(),
+ entry.version(),
+ entry.mtime(), mtime_str.c_str(),
+ entry.ctime(), ctime_str.c_str(),
+ entry.name().c_str(), entry.sync_timestamp(),
+ entry.deleted() ? "deleted, ":"");
+}
+
+namespace {
+std::string GetUpdatesResponseString(
+ const sync_pb::GetUpdatesResponse& response) {
+ std::string output;
+ output.append("GetUpdatesResponse:\n");
+ for (int i = 0; i < response.entries_size(); i++) {
+ output.append(SyncerProtoUtil::SyncEntityDebugString(response.entries(i)));
+ output.append("\n");
+ }
+ return output;
+}
+} // namespace
+
+std::string SyncerProtoUtil::ClientToServerResponseDebugString(
+ const sync_pb::ClientToServerResponse& response) {
+ // Add more handlers as needed.
+ std::string output;
+ if (response.has_get_updates())
+ output.append(GetUpdatesResponseString(response.get_updates()));
+ return output;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/syncer_proto_util.h b/sync/engine/syncer_proto_util.h
new file mode 100644
index 0000000..e8086b5
--- /dev/null
+++ b/sync/engine/syncer_proto_util.h
@@ -0,0 +1,138 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
+#define SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
+#pragma once
+
+#include <string>
+
+#include "base/gtest_prod_util.h"
+#include "base/time.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/blob.h"
+#include "sync/syncable/model_type.h"
+#include "sync/util/syncer_error.h"
+
+namespace syncable {
+class Directory;
+class Entry;
+} // namespace syncable
+
+namespace sync_pb {
+class ClientToServerResponse;
+class EntitySpecifics;
+} // namespace sync_pb
+
+namespace browser_sync {
+
+namespace sessions {
+class SyncProtocolError;
+class SyncSessionContext;
+}
+
+class ClientToServerMessage;
+class ServerConnectionManager;
+class SyncEntity;
+class CommitResponse_EntryResponse;
+
+class SyncerProtoUtil {
+ public:
+ // Posts the given message and fills the buffer with the returned value.
+ // Returns true on success. Also handles store birthday verification: will
+ // produce a SyncError if the birthday is incorrect.
+ static SyncerError PostClientToServerMessage(
+ const ClientToServerMessage& msg,
+ sync_pb::ClientToServerResponse* response,
+ sessions::SyncSession* session);
+
+ // Compares a syncable Entry to SyncEntity, returns true iff the data is
+ // identical.
+ //
+ // TODO(sync): The places where this function is used are arguable big causes
+ // of the fragility, because there's a tendency to freak out the moment the
+ // local and server values diverge. However, this almost always indicates a
+ // sync bug somewhere earlier in the sync cycle.
+ static bool Compare(const syncable::Entry& local_entry,
+ const SyncEntity& server_entry);
+
+ // Utility methods for converting between syncable::Blobs and protobuf byte
+ // fields.
+ static void CopyProtoBytesIntoBlob(const std::string& proto_bytes,
+ syncable::Blob* blob);
+ static bool ProtoBytesEqualsBlob(const std::string& proto_bytes,
+ const syncable::Blob& blob);
+ static void CopyBlobIntoProtoBytes(const syncable::Blob& blob,
+ std::string* proto_bytes);
+
+ // Extract the name field from a sync entity.
+ static const std::string& NameFromSyncEntity(
+ const sync_pb::SyncEntity& entry);
+
+ // Extract the name field from a commit entry response.
+ static const std::string& NameFromCommitEntryResponse(
+ const CommitResponse_EntryResponse& entry);
+
+ // EntitySpecifics is used as a filter for the GetUpdates message to tell
+ // the server which datatypes to send back. This adds a datatype so that
+ // it's included in the filter.
+ static void AddToEntitySpecificDatatypesFilter(syncable::ModelType datatype,
+ sync_pb::EntitySpecifics* filter);
+
+ // Get a debug string representation of the client to server response.
+ static std::string ClientToServerResponseDebugString(
+ const sync_pb::ClientToServerResponse& response);
+
+ // Get update contents as a string. Intended for logging, and intended
+ // to have a smaller footprint than the protobuf's built-in pretty printer.
+ static std::string SyncEntityDebugString(const sync_pb::SyncEntity& entry);
+
+ // Pull the birthday from the dir and put it into the msg.
+ static void AddRequestBirthday(syncable::Directory* dir,
+ ClientToServerMessage* msg);
+
+ private:
+ SyncerProtoUtil() {}
+
+ // Helper functions for PostClientToServerMessage.
+
+ // Verifies the store birthday, alerting/resetting as appropriate if there's a
+ // mismatch. Return false if the syncer should be stuck.
+ static bool VerifyResponseBirthday(syncable::Directory* dir,
+ const sync_pb::ClientToServerResponse* response);
+
+ // Builds and sends a SyncEngineEvent to begin migration for types (specified
+ // in notification).
+ static void HandleMigrationDoneResponse(
+ const sync_pb::ClientToServerResponse* response,
+ sessions::SyncSession* session);
+
+ // Post the message using the scm, and do some processing on the returned
+ // headers. Decode the server response.
+ static bool PostAndProcessHeaders(browser_sync::ServerConnectionManager* scm,
+ sessions::SyncSession* session,
+ const ClientToServerMessage& msg,
+ sync_pb::ClientToServerResponse* response);
+
+ static base::TimeDelta GetThrottleDelay(
+ const sync_pb::ClientToServerResponse& response);
+
+ static void HandleThrottleError(const SyncProtocolError& error,
+ const base::TimeTicks& throttled_until,
+ sessions::SyncSessionContext* context,
+ sessions::SyncSession::Delegate* delegate);
+
+ friend class SyncerProtoUtilTest;
+ FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, AddRequestBirthday);
+ FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, PostAndProcessHeaders);
+ FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, VerifyResponseBirthday);
+ FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, HandleThrottlingNoDatatypes);
+ FRIEND_TEST_ALL_PREFIXES(SyncerProtoUtilTest, HandleThrottlingWithDatatypes);
+
+ DISALLOW_COPY_AND_ASSIGN(SyncerProtoUtil);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
diff --git a/sync/engine/syncer_proto_util_unittest.cc b/sync/engine/syncer_proto_util_unittest.cc
new file mode 100644
index 0000000..d70a07b
--- /dev/null
+++ b/sync/engine/syncer_proto_util_unittest.cc
@@ -0,0 +1,298 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/syncer_proto_util.h"
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+#include "base/time.h"
+#include "sync/engine/syncproto.h"
+#include "sync/sessions/session_state.h"
+#include "sync/sessions/sync_session_context.h"
+#include "sync/syncable/blob.h"
+#include "sync/syncable/model_type_test_util.h"
+#include "sync/syncable/syncable.h"
+#include "sync/test/engine/mock_connection_manager.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/password_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/protocol/sync_enums.pb.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using syncable::Blob;
+using ::testing::_;
+
+namespace browser_sync {
+using sessions::SyncSessionContext;
+
+class MockSyncSessionContext : public SyncSessionContext {
+ public:
+ MockSyncSessionContext() {}
+ ~MockSyncSessionContext() {}
+ MOCK_METHOD2(SetUnthrottleTime, void(syncable::ModelTypeSet,
+ const base::TimeTicks&));
+};
+
+class MockDelegate : public sessions::SyncSession::Delegate {
+ public:
+ MockDelegate() {}
+ ~MockDelegate() {}
+
+ MOCK_METHOD0(IsSyncingCurrentlySilenced, bool());
+ MOCK_METHOD1(OnReceivedShortPollIntervalUpdate, void(const base::TimeDelta&));
+ MOCK_METHOD1(OnReceivedLongPollIntervalUpdate ,void(const base::TimeDelta&));
+ MOCK_METHOD1(OnReceivedSessionsCommitDelay, void(const base::TimeDelta&));
+ MOCK_METHOD1(OnSyncProtocolError, void(const sessions::SyncSessionSnapshot&));
+ MOCK_METHOD0(OnShouldStopSyncingPermanently, void());
+ MOCK_METHOD1(OnSilencedUntil, void(const base::TimeTicks&));
+};
+
+TEST(SyncerProtoUtil, TestBlobToProtocolBufferBytesUtilityFunctions) {
+ unsigned char test_data1[] = {1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 4, 2, 9};
+ unsigned char test_data2[] = {1, 99, 3, 4, 5, 6, 7, 8, 0, 1, 4, 2, 9};
+ unsigned char test_data3[] = {99, 2, 3, 4, 5, 6, 7, 8};
+
+ syncable::Blob test_blob1, test_blob2, test_blob3;
+ for (size_t i = 0; i < arraysize(test_data1); ++i)
+ test_blob1.push_back(test_data1[i]);
+ for (size_t i = 0; i < arraysize(test_data2); ++i)
+ test_blob2.push_back(test_data2[i]);
+ for (size_t i = 0; i < arraysize(test_data3); ++i)
+ test_blob3.push_back(test_data3[i]);
+
+ std::string test_message1(reinterpret_cast<char*>(test_data1),
+ arraysize(test_data1));
+ std::string test_message2(reinterpret_cast<char*>(test_data2),
+ arraysize(test_data2));
+ std::string test_message3(reinterpret_cast<char*>(test_data3),
+ arraysize(test_data3));
+
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob1));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob2));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob3));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob1));
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob2));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob3));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob1));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob2));
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob3));
+
+ Blob blob1_copy;
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ blob1_copy));
+ SyncerProtoUtil::CopyProtoBytesIntoBlob(test_message1, &blob1_copy);
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ blob1_copy));
+
+ std::string message2_copy;
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(message2_copy,
+ test_blob2));
+ SyncerProtoUtil::CopyBlobIntoProtoBytes(test_blob2, &message2_copy);
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(message2_copy,
+ test_blob2));
+}
+
+// Tests NameFromSyncEntity and NameFromCommitEntryResponse when only the name
+// field is provided.
+TEST(SyncerProtoUtil, NameExtractionOneName) {
+ SyncEntity one_name_entity;
+ CommitResponse_EntryResponse one_name_response;
+
+ const std::string one_name_string("Eggheadednesses");
+ one_name_entity.set_name(one_name_string);
+ one_name_response.set_name(one_name_string);
+
+ const std::string name_a =
+ SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
+ EXPECT_EQ(one_name_string, name_a);
+}
+
+TEST(SyncerProtoUtil, NameExtractionOneUniqueName) {
+ SyncEntity one_name_entity;
+ CommitResponse_EntryResponse one_name_response;
+
+ const std::string one_name_string("Eggheadednesses");
+
+ one_name_entity.set_non_unique_name(one_name_string);
+ one_name_response.set_non_unique_name(one_name_string);
+
+ const std::string name_a =
+ SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
+ EXPECT_EQ(one_name_string, name_a);
+}
+
+// Tests NameFromSyncEntity and NameFromCommitEntryResponse when both the name
+// field and the non_unique_name fields are provided.
+// Should prioritize non_unique_name.
+TEST(SyncerProtoUtil, NameExtractionTwoNames) {
+ SyncEntity two_name_entity;
+ CommitResponse_EntryResponse two_name_response;
+
+ const std::string neuro("Neuroanatomists");
+ const std::string oxyphen("Oxyphenbutazone");
+
+ two_name_entity.set_name(oxyphen);
+ two_name_entity.set_non_unique_name(neuro);
+
+ two_name_response.set_name(oxyphen);
+ two_name_response.set_non_unique_name(neuro);
+
+ const std::string name_a =
+ SyncerProtoUtil::NameFromSyncEntity(two_name_entity);
+ EXPECT_EQ(neuro, name_a);
+}
+
+class SyncerProtoUtilTest : public testing::Test {
+ public:
+ virtual void SetUp() {
+ dir_maker_.SetUp();
+ }
+
+ virtual void TearDown() {
+ dir_maker_.TearDown();
+ }
+
+ syncable::Directory* directory() {
+ return dir_maker_.directory();
+ }
+
+ protected:
+ MessageLoop message_loop_;
+ TestDirectorySetterUpper dir_maker_;
+};
+
+TEST_F(SyncerProtoUtilTest, VerifyResponseBirthday) {
+ // Both sides empty
+ EXPECT_TRUE(directory()->store_birthday().empty());
+ ClientToServerResponse response;
+ EXPECT_FALSE(SyncerProtoUtil::VerifyResponseBirthday(directory(), &response));
+
+ // Remote set, local empty
+ response.set_store_birthday("flan");
+ EXPECT_TRUE(SyncerProtoUtil::VerifyResponseBirthday(directory(), &response));
+ EXPECT_EQ(directory()->store_birthday(), "flan");
+
+ // Remote empty, local set.
+ response.clear_store_birthday();
+ EXPECT_TRUE(SyncerProtoUtil::VerifyResponseBirthday(directory(), &response));
+ EXPECT_EQ(directory()->store_birthday(), "flan");
+
+ // Doesn't match
+ response.set_store_birthday("meat");
+ EXPECT_FALSE(SyncerProtoUtil::VerifyResponseBirthday(directory(), &response));
+
+ response.set_error_code(sync_pb::SyncEnums::CLEAR_PENDING);
+ EXPECT_FALSE(SyncerProtoUtil::VerifyResponseBirthday(directory(), &response));
+}
+
+TEST_F(SyncerProtoUtilTest, AddRequestBirthday) {
+ EXPECT_TRUE(directory()->store_birthday().empty());
+ ClientToServerMessage msg;
+ SyncerProtoUtil::AddRequestBirthday(directory(), &msg);
+ EXPECT_FALSE(msg.has_store_birthday());
+
+ directory()->set_store_birthday("meat");
+ SyncerProtoUtil::AddRequestBirthday(directory(), &msg);
+ EXPECT_EQ(msg.store_birthday(), "meat");
+}
+
+class DummyConnectionManager : public browser_sync::ServerConnectionManager {
+ public:
+ DummyConnectionManager()
+ : ServerConnectionManager("unused", 0, false, "version"),
+ send_error_(false),
+ access_denied_(false) {}
+
+ virtual ~DummyConnectionManager() {}
+ virtual bool PostBufferWithCachedAuth(
+ PostBufferParams* params,
+ ScopedServerStatusWatcher* watcher) OVERRIDE {
+ if (send_error_) {
+ return false;
+ }
+
+ ClientToServerResponse response;
+ if (access_denied_) {
+ response.set_error_code(sync_pb::SyncEnums::ACCESS_DENIED);
+ }
+ response.SerializeToString(&params->buffer_out);
+
+ return true;
+ }
+
+ void set_send_error(bool send) {
+ send_error_ = send;
+ }
+
+ void set_access_denied(bool denied) {
+ access_denied_ = denied;
+ }
+
+ private:
+ bool send_error_;
+ bool access_denied_;
+};
+
+TEST_F(SyncerProtoUtilTest, PostAndProcessHeaders) {
+ DummyConnectionManager dcm;
+ ClientToServerMessage msg;
+ msg.set_share("required");
+ msg.set_message_contents(ClientToServerMessage::GET_UPDATES);
+ ClientToServerResponse response;
+
+ dcm.set_send_error(true);
+ EXPECT_FALSE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
+ msg, &response));
+
+ dcm.set_send_error(false);
+ EXPECT_TRUE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
+ msg, &response));
+
+ dcm.set_access_denied(true);
+ EXPECT_FALSE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
+ msg, &response));
+}
+
+TEST_F(SyncerProtoUtilTest, HandleThrottlingWithDatatypes) {
+ MockSyncSessionContext context;
+ SyncProtocolError error;
+ error.error_type = browser_sync::THROTTLED;
+ syncable::ModelTypeSet types;
+ types.Put(syncable::BOOKMARKS);
+ types.Put(syncable::PASSWORDS);
+ error.error_data_types = types;
+
+ base::TimeTicks ticks = base::TimeTicks::Now();
+
+ EXPECT_CALL(context, SetUnthrottleTime(HasModelTypes(types), ticks));
+
+ SyncerProtoUtil::HandleThrottleError(error, ticks, &context, NULL);
+}
+
+TEST_F(SyncerProtoUtilTest, HandleThrottlingNoDatatypes) {
+ MockDelegate delegate;
+ SyncProtocolError error;
+ error.error_type = browser_sync::THROTTLED;
+
+ base::TimeTicks ticks = base::TimeTicks::Now();
+
+ EXPECT_CALL(delegate, OnSilencedUntil(ticks));
+
+ SyncerProtoUtil::HandleThrottleError(error, ticks, NULL, &delegate);
+}
+} // namespace browser_sync
diff --git a/sync/engine/syncer_types.cc b/sync/engine/syncer_types.cc
new file mode 100644
index 0000000..1899466
--- /dev/null
+++ b/sync/engine/syncer_types.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/syncer_types.h"
+
+namespace browser_sync {
+
+SyncEngineEvent::SyncEngineEvent(EventCause cause) : what_happened(cause),
+ snapshot(NULL) {
+}
+
+SyncEngineEvent::~SyncEngineEvent() {}
+
+} // namespace browser_sync
diff --git a/sync/engine/syncer_types.h b/sync/engine/syncer_types.h
new file mode 100644
index 0000000..c34621e
--- /dev/null
+++ b/sync/engine/syncer_types.h
@@ -0,0 +1,158 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_SYNCER_TYPES_H_
+#define SYNC_ENGINE_SYNCER_TYPES_H_
+#pragma once
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/observer_list.h"
+#include "sync/syncable/model_type.h"
+
+namespace syncable {
+class Id;
+}
+
+// The intent of this is to keep all shared data types and enums for the syncer
+// in a single place without having dependencies between other files.
+namespace browser_sync {
+
+namespace sessions {
+struct SyncSessionSnapshot;
+}
+class Syncer;
+
+enum UpdateAttemptResponse {
+ // Update was applied or safely ignored.
+ SUCCESS,
+
+ // The conditions described by the following enum values are not mutually
+ // exclusive. The list has been ordered according to priority in the case of
+ // overlap, with highest priority first.
+ //
+ // For example, in the case where an item had both the IS_UNSYCNED and
+ // IS_UNAPPLIED_UPDATE flags set (CONFLICT_SIMPLE), and a SERVER_PARENT_ID
+ // that, if applied, would cause a directory loop (CONFLICT_HIERARCHY), and
+ // specifics that we can't decrypt right now (CONFLICT_ENCRYPTION), the
+ // UpdateApplicator would return CONFLICT_ENCRYPTION when attempting to
+ // process the item.
+ //
+ // We do not attempt to resolve CONFLICT_HIERARCHY or CONFLICT_ENCRYPTION
+ // items. We will leave these updates unapplied and wait for the server
+ // to send us newer updates that will resolve the conflict.
+
+ // We were unable to decrypt/encrypt this server data. As such, we can't make
+ // forward progress on this node, but because the passphrase may not arrive
+ // until later we don't want to get the syncer stuck. See UpdateApplicator
+ // for how this is handled.
+ CONFLICT_ENCRYPTION,
+
+ // These are some updates that, if applied, would violate the tree's
+ // invariants. Examples of this include the server adding children to locally
+ // deleted directories and directory moves that would create loops.
+ CONFLICT_HIERARCHY,
+
+ // This indicates that item was modified both remotely (IS_UNAPPLIED_UPDATE)
+ // and locally (IS_UNSYNCED). We use the ConflictResolver to decide which of
+ // the changes should take priority, or if we can possibly merge the data.
+ CONFLICT_SIMPLE
+};
+
+enum ServerUpdateProcessingResult {
+ // Success. Update applied and stored in SERVER_* fields or dropped if
+ // irrelevant.
+ SUCCESS_PROCESSED,
+
+ // Success. Update details stored in SERVER_* fields, but wasn't applied.
+ SUCCESS_STORED,
+
+ // Update is illegally inconsistent with earlier updates. e.g. A bookmark
+ // becoming a folder.
+ FAILED_INCONSISTENT,
+
+ // Update is illegal when considered alone. e.g. broken UTF-8 in the name.
+ FAILED_CORRUPT,
+
+ // Only used by VerifyUpdate. Indicates that an update is valid. As
+ // VerifyUpdate cannot return SUCCESS_STORED, we reuse the value.
+ SUCCESS_VALID = SUCCESS_STORED
+};
+
+// Different results from the verify phase will yield different methods of
+// processing in the ProcessUpdates phase. The SKIP result means the entry
+// doesn't go to the ProcessUpdates phase.
+enum VerifyResult {
+ VERIFY_FAIL,
+ VERIFY_SUCCESS,
+ VERIFY_UNDELETE,
+ VERIFY_SKIP,
+ VERIFY_UNDECIDED
+};
+
+enum VerifyCommitResult {
+ VERIFY_UNSYNCABLE,
+ VERIFY_OK,
+};
+
+struct SyncEngineEvent {
+ enum EventCause {
+ ////////////////////////////////////////////////////////////////
+ // Sent on entry of Syncer state machine
+ SYNC_CYCLE_BEGIN,
+
+ // SyncerCommand generated events.
+ STATUS_CHANGED,
+
+ // We have reached the SYNCER_END state in the main sync loop.
+ SYNC_CYCLE_ENDED,
+
+ ////////////////////////////////////////////////////////////////
+ // Generated in response to specific protocol actions or events.
+
+ // New token in updated_token.
+ UPDATED_TOKEN,
+
+ // This is sent after the Syncer (and SyncerThread) have initiated self
+ // halt due to no longer being permitted to communicate with the server.
+ // The listener should sever the sync / browser connections and delete sync
+ // data (i.e. as if the user clicked 'Stop Syncing' in the browser.
+ STOP_SYNCING_PERMANENTLY,
+
+ // These events are sent to indicate when we know the clearing of
+ // server data have failed or succeeded.
+ CLEAR_SERVER_DATA_SUCCEEDED,
+ CLEAR_SERVER_DATA_FAILED,
+
+ // This event is sent when we receive an actionable error. It is upto
+ // the listeners to figure out the action to take using the snapshot sent.
+ ACTIONABLE_ERROR,
+ };
+
+ explicit SyncEngineEvent(EventCause cause);
+ ~SyncEngineEvent();
+
+ EventCause what_happened;
+
+ // The last session used for syncing.
+ const sessions::SyncSessionSnapshot* snapshot;
+
+ // Update-Client-Auth returns a new token for sync use.
+ std::string updated_token;
+};
+
+class SyncEngineEventListener {
+ public:
+ // TODO(tim): Consider splitting this up to multiple callbacks, rather than
+ // have to do Event e(type); OnSyncEngineEvent(e); at all callsites,
+ virtual void OnSyncEngineEvent(const SyncEngineEvent& event) = 0;
+ protected:
+ virtual ~SyncEngineEventListener() {}
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_SYNCER_TYPES_H_
diff --git a/sync/engine/syncer_unittest.cc b/sync/engine/syncer_unittest.cc
new file mode 100644
index 0000000..9a66d98
--- /dev/null
+++ b/sync/engine/syncer_unittest.cc
@@ -0,0 +1,4523 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Syncer unit tests. Unfortunately a lot of these tests
+// are outdated and need to be reworked and updated.
+
+#include <algorithm>
+#include <limits>
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop.h"
+#include "base/string_number_conversions.h"
+#include "base/stringprintf.h"
+#include "base/time.h"
+#include "build/build_config.h"
+#include "sync/engine/get_commit_ids_command.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/engine/net/server_connection_manager.h"
+#include "sync/engine/nigori_util.h"
+#include "sync/engine/process_updates_command.h"
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/engine/syncproto.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/protocol/preference_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/sessions/sync_session_context.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/mock_connection_manager.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "sync/test/engine/test_syncable_utils.h"
+#include "sync/test/fake_encryptor.h"
+#include "sync/test/fake_extensions_activity_monitor.h"
+#include "sync/util/cryptographer.h"
+#include "sync/util/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeDelta;
+
+using std::map;
+using std::multimap;
+using std::set;
+using std::string;
+
+namespace browser_sync {
+
+using syncable::BaseTransaction;
+using syncable::Blob;
+using syncable::CountEntriesWithName;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::GetFirstEntryWithName;
+using syncable::GetOnlyEntryWithName;
+using syncable::Id;
+using syncable::kEncryptedString;
+using syncable::MutableEntry;
+using syncable::ReadTransaction;
+using syncable::WriteTransaction;
+
+using syncable::BASE_VERSION;
+using syncable::CREATE;
+using syncable::CREATE_NEW_UPDATE_ITEM;
+using syncable::GET_BY_HANDLE;
+using syncable::GET_BY_ID;
+using syncable::GET_BY_CLIENT_TAG;
+using syncable::GET_BY_SERVER_TAG;
+using syncable::ID;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::META_HANDLE;
+using syncable::MTIME;
+using syncable::NEXT_ID;
+using syncable::NON_UNIQUE_NAME;
+using syncable::PARENT_ID;
+using syncable::PREV_ID;
+using syncable::BASE_SERVER_SPECIFICS;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_NON_UNIQUE_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_SPECIFICS;
+using syncable::SERVER_VERSION;
+using syncable::UNIQUE_CLIENT_TAG;
+using syncable::UNIQUE_SERVER_TAG;
+using syncable::SPECIFICS;
+using syncable::SYNCING;
+using syncable::UNITTEST;
+
+using sessions::ConflictProgress;
+using sessions::ScopedSetSessionWriteTransaction;
+using sessions::StatusController;
+using sessions::SyncSessionContext;
+using sessions::SyncSession;
+
+class SyncerTest : public testing::Test,
+ public SyncSession::Delegate,
+ public ModelSafeWorkerRegistrar,
+ public SyncEngineEventListener {
+ protected:
+ SyncerTest() : syncer_(NULL), saw_syncer_event_(false) {}
+
+ // SyncSession::Delegate implementation.
+ virtual void OnSilencedUntil(const base::TimeTicks& silenced_until) OVERRIDE {
+ FAIL() << "Should not get silenced.";
+ }
+ virtual bool IsSyncingCurrentlySilenced() OVERRIDE {
+ return false;
+ }
+ virtual void OnReceivedLongPollIntervalUpdate(
+ const base::TimeDelta& new_interval) OVERRIDE {
+ last_long_poll_interval_received_ = new_interval;
+ }
+ virtual void OnReceivedShortPollIntervalUpdate(
+ const base::TimeDelta& new_interval) OVERRIDE {
+ last_short_poll_interval_received_ = new_interval;
+ }
+ virtual void OnReceivedSessionsCommitDelay(
+ const base::TimeDelta& new_delay) OVERRIDE {
+ last_sessions_commit_delay_seconds_ = new_delay;
+ }
+ virtual void OnShouldStopSyncingPermanently() OVERRIDE {
+ }
+ virtual void OnSyncProtocolError(
+ const sessions::SyncSessionSnapshot& snapshot) OVERRIDE {
+ }
+
+ // ModelSafeWorkerRegistrar implementation.
+ virtual void GetWorkers(std::vector<ModelSafeWorker*>* out) OVERRIDE {
+ out->push_back(worker_.get());
+ }
+
+ virtual void GetModelSafeRoutingInfo(ModelSafeRoutingInfo* out) OVERRIDE {
+ // We're just testing the sync engine here, so we shunt everything to
+ // the SyncerThread. Datatypes which aren't enabled aren't in the map.
+ for (syncable::ModelTypeSet::Iterator it = enabled_datatypes_.First();
+ it.Good(); it.Inc()) {
+ (*out)[it.Get()] = GROUP_PASSIVE;
+ }
+ }
+
+ virtual void OnSyncEngineEvent(const SyncEngineEvent& event) OVERRIDE {
+ DVLOG(1) << "HandleSyncEngineEvent in unittest " << event.what_happened;
+ // we only test for entry-specific events, not status changed ones.
+ switch (event.what_happened) {
+ case SyncEngineEvent::SYNC_CYCLE_BEGIN: // Fall through.
+ case SyncEngineEvent::STATUS_CHANGED:
+ case SyncEngineEvent::SYNC_CYCLE_ENDED:
+ return;
+ default:
+ CHECK(false) << "Handling unknown error type in unit tests!!";
+ }
+ saw_syncer_event_ = true;
+ }
+
+ SyncSession* MakeSession() {
+ ModelSafeRoutingInfo info;
+ std::vector<ModelSafeWorker*> workers;
+ GetModelSafeRoutingInfo(&info);
+ GetWorkers(&workers);
+ syncable::ModelTypePayloadMap types =
+ syncable::ModelTypePayloadMapFromRoutingInfo(info, std::string());
+ return new SyncSession(context_.get(), this,
+ sessions::SyncSourceInfo(sync_pb::GetUpdatesCallerInfo::UNKNOWN, types),
+ info, workers);
+ }
+
+ bool SyncShareAsDelegate() {
+ session_.reset(MakeSession());
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ return session_->HasMoreToSync();
+ }
+
+ void LoopSyncShare() {
+ bool should_loop = false;
+ int loop_iterations = 0;
+ do {
+ ASSERT_LT(++loop_iterations, 100) << "infinite loop detected. please fix";
+ should_loop = SyncShareAsDelegate();
+ } while (should_loop);
+ }
+
+ virtual void SetUp() {
+ dir_maker_.SetUp();
+ mock_server_.reset(new MockConnectionManager(directory()));
+ EnableDatatype(syncable::BOOKMARKS);
+ EnableDatatype(syncable::NIGORI);
+ EnableDatatype(syncable::PREFERENCES);
+ EnableDatatype(syncable::NIGORI);
+ worker_ = new FakeModelWorker(GROUP_PASSIVE);
+ std::vector<SyncEngineEventListener*> listeners;
+ listeners.push_back(this);
+ context_.reset(
+ new SyncSessionContext(
+ mock_server_.get(), directory(), this,
+ &extensions_activity_monitor_, listeners, NULL));
+ ASSERT_FALSE(context_->resolver());
+ syncer_ = new Syncer();
+ session_.reset(MakeSession());
+
+ ReadTransaction trans(FROM_HERE, directory());
+ syncable::Directory::ChildHandles children;
+ directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
+ ASSERT_EQ(0u, children.size());
+ saw_syncer_event_ = false;
+ root_id_ = TestIdFactory::root();
+ parent_id_ = ids_.MakeServer("parent id");
+ child_id_ = ids_.MakeServer("child id");
+ }
+
+ virtual void TearDown() {
+ mock_server_.reset();
+ delete syncer_;
+ syncer_ = NULL;
+ dir_maker_.TearDown();
+ }
+ void WriteTestDataToEntry(WriteTransaction* trans, MutableEntry* entry) {
+ EXPECT_FALSE(entry->Get(IS_DIR));
+ EXPECT_FALSE(entry->Get(IS_DEL));
+ sync_pb::EntitySpecifics specifics;
+ specifics.mutable_bookmark()->set_url("http://demo/");
+ specifics.mutable_bookmark()->set_favicon("PNG");
+ entry->Put(syncable::SPECIFICS, specifics);
+ entry->Put(syncable::IS_UNSYNCED, true);
+ }
+ void VerifyTestDataInEntry(BaseTransaction* trans, Entry* entry) {
+ EXPECT_FALSE(entry->Get(IS_DIR));
+ EXPECT_FALSE(entry->Get(IS_DEL));
+ VerifyTestBookmarkDataInEntry(entry);
+ }
+ void VerifyTestBookmarkDataInEntry(Entry* entry) {
+ const sync_pb::EntitySpecifics& specifics = entry->Get(syncable::SPECIFICS);
+ EXPECT_TRUE(specifics.has_bookmark());
+ EXPECT_EQ("PNG", specifics.bookmark().favicon());
+ EXPECT_EQ("http://demo/", specifics.bookmark().url());
+ }
+
+ void SyncRepeatedlyToTriggerConflictResolution(SyncSession* session) {
+ // We should trigger after less than 6 syncs, but extra does no harm.
+ for (int i = 0 ; i < 6 ; ++i)
+ syncer_->SyncShare(session, SYNCER_BEGIN, SYNCER_END);
+ }
+ void SyncRepeatedlyToTriggerStuckSignal(SyncSession* session) {
+ // We should trigger after less than 10 syncs, but we want to avoid brittle
+ // tests.
+ for (int i = 0 ; i < 12 ; ++i)
+ syncer_->SyncShare(session, SYNCER_BEGIN, SYNCER_END);
+ }
+ sync_pb::EntitySpecifics DefaultBookmarkSpecifics() {
+ sync_pb::EntitySpecifics result;
+ AddDefaultFieldValue(syncable::BOOKMARKS, &result);
+ return result;
+ }
+
+ sync_pb::EntitySpecifics DefaultPreferencesSpecifics() {
+ sync_pb::EntitySpecifics result;
+ AddDefaultFieldValue(syncable::PREFERENCES, &result);
+ return result;
+ }
+ // Enumeration of alterations to entries for commit ordering tests.
+ enum EntryFeature {
+ LIST_END = 0, // Denotes the end of the list of features from below.
+ SYNCED, // Items are unsynced by default
+ DELETED,
+ OLD_MTIME,
+ MOVED_FROM_ROOT,
+ };
+
+ struct CommitOrderingTest {
+ // expected commit index.
+ int commit_index;
+ // Details about the item
+ syncable::Id id;
+ syncable::Id parent_id;
+ EntryFeature features[10];
+
+ static CommitOrderingTest MakeLastCommitItem() {
+ CommitOrderingTest last_commit_item;
+ last_commit_item.commit_index = -1;
+ last_commit_item.id = TestIdFactory::root();
+ return last_commit_item;
+ }
+ };
+
+ void RunCommitOrderingTest(CommitOrderingTest* test) {
+ map<int, syncable::Id> expected_positions;
+ { // Transaction scope.
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ while (!test->id.IsRoot()) {
+ if (test->commit_index >= 0) {
+ map<int, syncable::Id>::value_type entry(test->commit_index,
+ test->id);
+ bool double_position = !expected_positions.insert(entry).second;
+ ASSERT_FALSE(double_position) << "Two id's expected at one position";
+ }
+ string utf8_name = test->id.GetServerId();
+ string name(utf8_name.begin(), utf8_name.end());
+ MutableEntry entry(&trans, CREATE, test->parent_id, name);
+
+ entry.Put(syncable::ID, test->id);
+ if (test->id.ServerKnows()) {
+ entry.Put(BASE_VERSION, 5);
+ entry.Put(SERVER_VERSION, 5);
+ entry.Put(SERVER_PARENT_ID, test->parent_id);
+ }
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ // Set the time to 30 seconds in the future to reduce the chance of
+ // flaky tests.
+ const base::Time& now_plus_30s =
+ base::Time::Now() + base::TimeDelta::FromSeconds(30);
+ const base::Time& now_minus_2h =
+ base::Time::Now() - base::TimeDelta::FromHours(2);
+ entry.Put(syncable::MTIME, now_plus_30s);
+ for (size_t i = 0 ; i < arraysize(test->features) ; ++i) {
+ switch (test->features[i]) {
+ case LIST_END:
+ break;
+ case SYNCED:
+ entry.Put(syncable::IS_UNSYNCED, false);
+ break;
+ case DELETED:
+ entry.Put(syncable::IS_DEL, true);
+ break;
+ case OLD_MTIME:
+ entry.Put(MTIME, now_minus_2h);
+ break;
+ case MOVED_FROM_ROOT:
+ entry.Put(SERVER_PARENT_ID, trans.root_id());
+ break;
+ default:
+ FAIL() << "Bad value in CommitOrderingTest list";
+ }
+ }
+ test++;
+ }
+ }
+ LoopSyncShare();
+ ASSERT_TRUE(expected_positions.size() ==
+ mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ for (size_t i = 0; i < expected_positions.size(); ++i) {
+ EXPECT_EQ(1u, expected_positions.count(i));
+ EXPECT_TRUE(expected_positions[i] == mock_server_->committed_ids()[i]);
+ }
+ }
+
+ void DoTruncationTest(const vector<int64>& unsynced_handle_view,
+ const vector<syncable::Id>& expected_id_order) {
+ for (size_t limit = expected_id_order.size() + 2; limit > 0; --limit) {
+ StatusController* status = session_->mutable_status_controller();
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ ScopedSetSessionWriteTransaction set_trans(session_.get(), &wtrans);
+
+ ModelSafeRoutingInfo routes;
+ GetModelSafeRoutingInfo(&routes);
+ GetCommitIdsCommand command(limit);
+ std::set<int64> ready_unsynced_set;
+ command.FilterUnreadyEntries(&wtrans, syncable::ModelTypeSet(),
+ syncable::ModelTypeSet(), false,
+ unsynced_handle_view, &ready_unsynced_set);
+ command.BuildCommitIds(session_->write_transaction(), routes,
+ ready_unsynced_set);
+ syncable::Directory::UnsyncedMetaHandles ready_unsynced_vector(
+ ready_unsynced_set.begin(), ready_unsynced_set.end());
+ status->set_unsynced_handles(ready_unsynced_vector);
+ vector<syncable::Id> output =
+ command.ordered_commit_set_->GetAllCommitIds();
+ size_t truncated_size = std::min(limit, expected_id_order.size());
+ ASSERT_EQ(truncated_size, output.size());
+ for (size_t i = 0; i < truncated_size; ++i) {
+ ASSERT_EQ(expected_id_order[i], output[i])
+ << "At index " << i << " with batch size limited to " << limit;
+ }
+ sessions::OrderedCommitSet::Projection proj;
+ proj = command.ordered_commit_set_->GetCommitIdProjection(GROUP_PASSIVE);
+ ASSERT_EQ(truncated_size, proj.size());
+ for (size_t i = 0; i < truncated_size; ++i) {
+ SCOPED_TRACE(::testing::Message("Projection mismatch with i = ") << i);
+ syncable::Id projected =
+ command.ordered_commit_set_->GetCommitIdAt(proj[i]);
+ ASSERT_EQ(expected_id_order[proj[i]], projected);
+ // Since this projection is the identity, the following holds.
+ ASSERT_EQ(expected_id_order[i], projected);
+ }
+ }
+ }
+
+ Directory* directory() {
+ return dir_maker_.directory();
+ }
+
+ int64 CreateUnsyncedDirectory(const string& entry_name,
+ const string& idstring) {
+ return CreateUnsyncedDirectory(entry_name,
+ syncable::Id::CreateFromServerId(idstring));
+ }
+
+ int64 CreateUnsyncedDirectory(const string& entry_name,
+ const syncable::Id& id) {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&wtrans, syncable::CREATE, wtrans.root_id(),
+ entry_name);
+ EXPECT_TRUE(entry.good());
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ entry.Put(syncable::BASE_VERSION, id.ServerKnows() ? 1 : 0);
+ entry.Put(syncable::ID, id);
+ return entry.Get(META_HANDLE);
+ }
+
+ void EnableDatatype(syncable::ModelType model_type) {
+ enabled_datatypes_.Put(model_type);
+ mock_server_->ExpectGetUpdatesRequestTypes(enabled_datatypes_);
+ }
+
+ void DisableDatatype(syncable::ModelType model_type) {
+ enabled_datatypes_.Remove(model_type);
+ mock_server_->ExpectGetUpdatesRequestTypes(enabled_datatypes_);
+ }
+
+ template<typename FieldType, typename ValueType>
+ ValueType GetField(int64 metahandle, FieldType field,
+ ValueType default_value) {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_HANDLE, metahandle);
+ EXPECT_TRUE(entry.good());
+ if (!entry.good()) {
+ return default_value;
+ }
+ EXPECT_EQ(metahandle, entry.Get(META_HANDLE));
+ return entry.Get(field);
+ }
+
+ // Helper getters that work without a transaction, to reduce boilerplate.
+ Id Get(int64 metahandle, syncable::IdField field) {
+ return GetField(metahandle, field, syncable::GetNullId());
+ }
+
+ string Get(int64 metahandle, syncable::StringField field) {
+ return GetField(metahandle, field, string());
+ }
+
+ int64 Get(int64 metahandle, syncable::Int64Field field) {
+ return GetField(metahandle, field, syncable::kInvalidMetaHandle);
+ }
+
+ int64 Get(int64 metahandle, syncable::BaseVersion field) {
+ const int64 kDefaultValue = -100;
+ return GetField(metahandle, field, kDefaultValue);
+ }
+
+ bool Get(int64 metahandle, syncable::IndexedBitField field) {
+ return GetField(metahandle, field, false);
+ }
+
+ bool Get(int64 metahandle, syncable::IsDelField field) {
+ return GetField(metahandle, field, false);
+ }
+
+ bool Get(int64 metahandle, syncable::BitField field) {
+ return GetField(metahandle, field, false);
+ }
+
+ Cryptographer* cryptographer(syncable::BaseTransaction* trans) {
+ return directory()->GetCryptographer(trans);
+ }
+
+ MessageLoop message_loop_;
+
+ // Some ids to aid tests. Only the root one's value is specific. The rest
+ // are named for test clarity.
+ // TODO(chron): Get rid of these inbuilt IDs. They only make it
+ // more confusing.
+ syncable::Id root_id_;
+ syncable::Id parent_id_;
+ syncable::Id child_id_;
+
+ TestIdFactory ids_;
+
+ TestDirectorySetterUpper dir_maker_;
+ FakeEncryptor encryptor_;
+ FakeExtensionsActivityMonitor extensions_activity_monitor_;
+ scoped_ptr<MockConnectionManager> mock_server_;
+
+ Syncer* syncer_;
+
+ scoped_ptr<SyncSession> session_;
+ scoped_ptr<SyncSessionContext> context_;
+ bool saw_syncer_event_;
+ base::TimeDelta last_short_poll_interval_received_;
+ base::TimeDelta last_long_poll_interval_received_;
+ base::TimeDelta last_sessions_commit_delay_seconds_;
+ scoped_refptr<ModelSafeWorker> worker_;
+
+ syncable::ModelTypeSet enabled_datatypes_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncerTest);
+};
+
+TEST_F(SyncerTest, TestCallGatherUnsyncedEntries) {
+ {
+ Syncer::UnsyncedMetaHandles handles;
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ }
+ ASSERT_EQ(0u, handles.size());
+ }
+ // TODO(sync): When we can dynamically connect and disconnect the mock
+ // ServerConnectionManager test disconnected GetUnsyncedEntries here. It's a
+ // regression for a very old bug.
+}
+
+TEST_F(SyncerTest, GetCommitIdsCommandTruncates) {
+ syncable::Id root = ids_.root();
+ // Create two server entries.
+ mock_server_->AddUpdateDirectory(ids_.MakeServer("x"), root, "X", 10, 10);
+ mock_server_->AddUpdateDirectory(ids_.MakeServer("w"), root, "W", 10, 10);
+ SyncShareAsDelegate();
+
+ // Create some new client entries.
+ CreateUnsyncedDirectory("C", ids_.MakeLocal("c"));
+ CreateUnsyncedDirectory("B", ids_.MakeLocal("b"));
+ CreateUnsyncedDirectory("D", ids_.MakeLocal("d"));
+ CreateUnsyncedDirectory("E", ids_.MakeLocal("e"));
+ CreateUnsyncedDirectory("J", ids_.MakeLocal("j"));
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry_x(&wtrans, GET_BY_ID, ids_.MakeServer("x"));
+ MutableEntry entry_b(&wtrans, GET_BY_ID, ids_.MakeLocal("b"));
+ MutableEntry entry_c(&wtrans, GET_BY_ID, ids_.MakeLocal("c"));
+ MutableEntry entry_d(&wtrans, GET_BY_ID, ids_.MakeLocal("d"));
+ MutableEntry entry_e(&wtrans, GET_BY_ID, ids_.MakeLocal("e"));
+ MutableEntry entry_w(&wtrans, GET_BY_ID, ids_.MakeServer("w"));
+ MutableEntry entry_j(&wtrans, GET_BY_ID, ids_.MakeLocal("j"));
+ entry_x.Put(IS_UNSYNCED, true);
+ entry_b.Put(PARENT_ID, entry_x.Get(ID));
+ entry_d.Put(PARENT_ID, entry_b.Get(ID));
+ entry_c.Put(PARENT_ID, entry_x.Get(ID));
+ entry_c.PutPredecessor(entry_b.Get(ID));
+ entry_e.Put(PARENT_ID, entry_c.Get(ID));
+ entry_w.PutPredecessor(entry_x.Get(ID));
+ entry_w.Put(IS_UNSYNCED, true);
+ entry_w.Put(SERVER_VERSION, 20);
+ entry_w.Put(IS_UNAPPLIED_UPDATE, true); // Fake a conflict.
+ entry_j.PutPredecessor(entry_w.Get(ID));
+ }
+
+ // The arrangement is now: x (b (d) c (e)) w j
+ // Entry "w" is in conflict, making its sucessors unready to commit.
+ vector<int64> unsynced_handle_view;
+ vector<syncable::Id> expected_order;
+ {
+ ReadTransaction rtrans(FROM_HERE, directory());
+ SyncerUtil::GetUnsyncedEntries(&rtrans, &unsynced_handle_view);
+ }
+ // The expected order is "x", "b", "c", "d", "e", truncated appropriately.
+ expected_order.push_back(ids_.MakeServer("x"));
+ expected_order.push_back(ids_.MakeLocal("b"));
+ expected_order.push_back(ids_.MakeLocal("c"));
+ expected_order.push_back(ids_.MakeLocal("d"));
+ expected_order.push_back(ids_.MakeLocal("e"));
+ DoTruncationTest(unsynced_handle_view, expected_order);
+}
+
+TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
+ const syncable::ModelTypeSet throttled_types(syncable::BOOKMARKS);
+ sync_pb::EntitySpecifics bookmark_data;
+ AddDefaultFieldValue(syncable::BOOKMARKS, &bookmark_data);
+
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ SyncShareAsDelegate();
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(SPECIFICS, bookmark_data);
+ A.Put(NON_UNIQUE_NAME, "bookmark");
+ }
+
+ // Now set the throttled types.
+ context_->SetUnthrottleTime(
+ throttled_types,
+ base::TimeTicks::Now() + base::TimeDelta::FromSeconds(1200));
+ SyncShareAsDelegate();
+
+ {
+ // Nothing should have been committed as bookmarks is throttled.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entryA.good());
+ EXPECT_TRUE(entryA.Get(IS_UNSYNCED));
+ }
+
+ // Now unthrottle.
+ context_->SetUnthrottleTime(
+ throttled_types,
+ base::TimeTicks::Now() - base::TimeDelta::FromSeconds(1200));
+ SyncShareAsDelegate();
+ {
+ // It should have been committed.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entryA.good());
+ EXPECT_FALSE(entryA.Get(IS_UNSYNCED));
+ }
+}
+
+// We use a macro so we can preserve the error location.
+#define VERIFY_ENTRY(id, is_unapplied, is_unsynced, prev_initialized, \
+ parent_id, version, server_version, id_fac, rtrans) \
+ do { \
+ Entry entryA(rtrans, syncable::GET_BY_ID, id_fac.FromNumber(id)); \
+ ASSERT_TRUE(entryA.good()); \
+ /* We don't use EXPECT_EQ here because when the left side param is false,
+ gcc 4.6 warns about converting 'false' to pointer type for argument 1. */ \
+ EXPECT_TRUE(is_unsynced == entryA.Get(IS_UNSYNCED)); \
+ EXPECT_TRUE(is_unapplied == entryA.Get(IS_UNAPPLIED_UPDATE)); \
+ EXPECT_TRUE(prev_initialized == \
+ syncable::IsRealDataType(syncable::GetModelTypeFromSpecifics( \
+ entryA.Get(BASE_SERVER_SPECIFICS)))); \
+ EXPECT_TRUE(parent_id == -1 || \
+ entryA.Get(PARENT_ID) == id_fac.FromNumber(parent_id)); \
+ EXPECT_EQ(version, entryA.Get(BASE_VERSION)); \
+ EXPECT_EQ(server_version, entryA.Get(SERVER_VERSION)); \
+ } while (0)
+
+TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
+ KeyParams key_params = {"localhost", "dummy", "foobar"};
+ KeyParams other_params = {"localhost", "dummy", "foobar2"};
+ sync_pb::EntitySpecifics bookmark, encrypted_bookmark;
+ bookmark.mutable_bookmark()->set_url("url");
+ bookmark.mutable_bookmark()->set_title("title");
+ AddDefaultFieldValue(syncable::BOOKMARKS, &encrypted_bookmark);
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->AddUpdateDirectory(3, 0, "C", 10, 10);
+ mock_server_->AddUpdateDirectory(4, 0, "D", 10, 10);
+ SyncShareAsDelegate();
+ // Server side change will put A in conflict.
+ mock_server_->AddUpdateDirectory(1, 0, "A", 20, 20);
+ {
+ // Mark bookmarks as encrypted and set the cryptographer to have pending
+ // keys.
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ browser_sync::Cryptographer other_cryptographer(&encryptor_);
+ other_cryptographer.AddKey(other_params);
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ other_cryptographer.GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_bookmarks(true);
+ // Set up with an old passphrase, but have pending keys
+ cryptographer(&wtrans)->AddKey(key_params);
+ cryptographer(&wtrans)->Encrypt(bookmark,
+ encrypted_bookmark.mutable_encrypted());
+ cryptographer(&wtrans)->Update(*nigori);
+
+ // In conflict but properly encrypted.
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(SPECIFICS, encrypted_bookmark);
+ A.Put(NON_UNIQUE_NAME, kEncryptedString);
+ // Not in conflict and properly encrypted.
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNSYNCED, true);
+ B.Put(SPECIFICS, encrypted_bookmark);
+ B.Put(NON_UNIQUE_NAME, kEncryptedString);
+ // Unencrypted specifics.
+ MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(C.good());
+ C.Put(IS_UNSYNCED, true);
+ C.Put(NON_UNIQUE_NAME, kEncryptedString);
+ // Unencrypted non_unique_name.
+ MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(D.good());
+ D.Put(IS_UNSYNCED, true);
+ D.Put(SPECIFICS, encrypted_bookmark);
+ D.Put(NON_UNIQUE_NAME, "not encrypted");
+ }
+ SyncShareAsDelegate();
+ {
+ // We remove any unready entries from the status controller's unsynced
+ // handles, so this should remain 0 even though the entries didn't commit.
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Nothing should have commited due to bookmarks being encrypted and
+ // the cryptographer having pending keys. A would have been resolved
+ // as a simple conflict, but still be unsynced until the next sync cycle.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, false, true, false, 0, 20, 20, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, true, false, 0, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, true, false, 0, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, true, false, 0, 10, 10, ids_, &rtrans);
+
+ // Resolve the pending keys.
+ cryptographer(&rtrans)->DecryptPendingKeys(other_params);
+ }
+ SyncShareAsDelegate();
+ {
+ // 2 unsynced handles to reflect the items that committed succesfully.
+ EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
+ // All properly encrypted and non-conflicting items should commit. "A" was
+ // conflicting, but last sync cycle resolved it as simple conflict, so on
+ // this sync cycle it committed succesfullly.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ // Committed successfully.
+ VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
+ // Committed successfully.
+ VERIFY_ENTRY(2, false, false, false, 0, 11, 11, ids_, &rtrans);
+ // Was not properly encrypted.
+ VERIFY_ENTRY(3, false, true, false, 0, 10, 10, ids_, &rtrans);
+ // Was not properly encrypted.
+ VERIFY_ENTRY(4, false, true, false, 0, 10, 10, ids_, &rtrans);
+ }
+ {
+ // Fix the remaining items.
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(C.good());
+ C.Put(SPECIFICS, encrypted_bookmark);
+ C.Put(NON_UNIQUE_NAME, kEncryptedString);
+ MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(D.good());
+ D.Put(SPECIFICS, encrypted_bookmark);
+ D.Put(NON_UNIQUE_NAME, kEncryptedString);
+ }
+ SyncShareAsDelegate();
+ {
+ // We attempted to commit two items.
+ EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
+ EXPECT_TRUE(session_->status_controller().did_commit_items());
+ // None should be unsynced anymore.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, false, false, 0, 11, 11, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, false, false, 0, 11, 11, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, false, false, 0, 11, 11, ids_, &rtrans);
+ }
+}
+
+TEST_F(SyncerTest, EncryptionAwareConflicts) {
+ KeyParams key_params = {"localhost", "dummy", "foobar"};
+ browser_sync::Cryptographer other_cryptographer(&encryptor_);
+ other_cryptographer.AddKey(key_params);
+ sync_pb::EntitySpecifics bookmark, encrypted_bookmark, modified_bookmark;
+ bookmark.mutable_bookmark()->set_title("title");
+ other_cryptographer.Encrypt(bookmark,
+ encrypted_bookmark.mutable_encrypted());
+ AddDefaultFieldValue(syncable::BOOKMARKS, &encrypted_bookmark);
+ modified_bookmark.mutable_bookmark()->set_title("title2");
+ other_cryptographer.Encrypt(modified_bookmark,
+ modified_bookmark.mutable_encrypted());
+ sync_pb::EntitySpecifics pref, encrypted_pref, modified_pref;
+ pref.mutable_preference()->set_name("name");
+ AddDefaultFieldValue(syncable::PREFERENCES, &encrypted_pref);
+ other_cryptographer.Encrypt(pref,
+ encrypted_pref.mutable_encrypted());
+ modified_pref.mutable_preference()->set_name("name2");
+ other_cryptographer.Encrypt(modified_pref,
+ modified_pref.mutable_encrypted());
+ {
+ // Mark bookmarks and preferences as encrypted and set the cryptographer to
+ // have pending keys.
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ other_cryptographer.GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_bookmarks(true);
+ nigori->set_encrypt_preferences(true);
+ cryptographer(&wtrans)->Update(*nigori);
+ EXPECT_TRUE(cryptographer(&wtrans)->has_pending_keys());
+ }
+
+ mock_server_->AddUpdateSpecifics(1, 0, "A", 10, 10, true, 0, bookmark);
+ mock_server_->AddUpdateSpecifics(2, 1, "B", 10, 10, false, 2, bookmark);
+ mock_server_->AddUpdateSpecifics(3, 1, "C", 10, 10, false, 1, bookmark);
+ mock_server_->AddUpdateSpecifics(4, 0, "D", 10, 10, false, 0, pref);
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Initial state. Everything is normal.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, false, false, false, 0, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, false, false, 1, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, false, false, 1, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, false, false, 0, 10, 10, ids_, &rtrans);
+ }
+
+ // Server side encryption will not be applied due to undecryptable data.
+ // At this point, BASE_SERVER_SPECIFICS should be filled for all four items.
+ mock_server_->AddUpdateSpecifics(1, 0, kEncryptedString, 20, 20, true, 0,
+ encrypted_bookmark);
+ mock_server_->AddUpdateSpecifics(2, 1, kEncryptedString, 20, 20, false, 2,
+ encrypted_bookmark);
+ mock_server_->AddUpdateSpecifics(3, 1, kEncryptedString, 20, 20, false, 1,
+ encrypted_bookmark);
+ mock_server_->AddUpdateSpecifics(4, 0, kEncryptedString, 20, 20, false, 0,
+ encrypted_pref);
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // All should be unapplied due to being undecryptable and have a valid
+ // BASE_SERVER_SPECIFICS.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, true, false, true, 0, 10, 20, ids_, &rtrans);
+ VERIFY_ENTRY(2, true, false, true, 1, 10, 20, ids_, &rtrans);
+ VERIFY_ENTRY(3, true, false, true, 1, 10, 20, ids_, &rtrans);
+ VERIFY_ENTRY(4, true, false, true, 0, 10, 20, ids_, &rtrans);
+ }
+
+ // Server side change that don't modify anything should not affect
+ // BASE_SERVER_SPECIFICS (such as name changes and mtime changes).
+ mock_server_->AddUpdateSpecifics(1, 0, kEncryptedString, 30, 30, true, 0,
+ encrypted_bookmark);
+ mock_server_->AddUpdateSpecifics(2, 1, kEncryptedString, 30, 30, false, 2,
+ encrypted_bookmark);
+ // Item 3 doesn't change.
+ mock_server_->AddUpdateSpecifics(4, 0, kEncryptedString, 30, 30, false, 0,
+ encrypted_pref);
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Items 1, 2, and 4 should have newer server versions, 3 remains the same.
+ // All should remain unapplied due to be undecryptable.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, true, false, true, 0, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(2, true, false, true, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(3, true, false, true, 1, 10, 20, ids_, &rtrans);
+ VERIFY_ENTRY(4, true, false, true, 0, 10, 30, ids_, &rtrans);
+ }
+
+ // Positional changes, parent changes, and specifics changes should reset
+ // BASE_SERVER_SPECIFICS.
+ // Became unencrypted.
+ mock_server_->AddUpdateSpecifics(1, 0, "A", 40, 40, true, 0, bookmark);
+ // Reordered to after item 2.
+ mock_server_->AddUpdateSpecifics(3, 1, kEncryptedString, 30, 30, false, 3,
+ encrypted_bookmark);
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Items 2 and 4 should be the only ones with BASE_SERVER_SPECIFICS set.
+ // Items 1 is now unencrypted, so should have applied normally.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, false, false, false, 0, 40, 40, ids_, &rtrans);
+ VERIFY_ENTRY(2, true, false, true, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(3, true, false, false, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(4, true, false, true, 0, 10, 30, ids_, &rtrans);
+ }
+
+ // Make local changes, which should remain unsynced for items 2, 3, 4.
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(SPECIFICS, modified_bookmark);
+ A.Put(NON_UNIQUE_NAME, kEncryptedString);
+ A.Put(IS_UNSYNCED, true);
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(SPECIFICS, modified_bookmark);
+ B.Put(NON_UNIQUE_NAME, kEncryptedString);
+ B.Put(IS_UNSYNCED, true);
+ MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(C.good());
+ C.Put(SPECIFICS, modified_bookmark);
+ C.Put(NON_UNIQUE_NAME, kEncryptedString);
+ C.Put(IS_UNSYNCED, true);
+ MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(D.good());
+ D.Put(SPECIFICS, modified_pref);
+ D.Put(NON_UNIQUE_NAME, kEncryptedString);
+ D.Put(IS_UNSYNCED, true);
+ }
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Item 1 remains unsynced due to there being pending keys.
+ // Items 2, 3, 4 should remain unsynced since they were not up to date.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, false, true, false, 0, 40, 40, ids_, &rtrans);
+ VERIFY_ENTRY(2, true, true, true, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(3, true, true, false, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(4, true, true, true, 0, 10, 30, ids_, &rtrans);
+ }
+
+ {
+ ReadTransaction rtrans(FROM_HERE, directory());
+ // Resolve the pending keys.
+ cryptographer(&rtrans)->DecryptPendingKeys(key_params);
+ }
+ // First cycle resolves conflicts, second cycle commits changes.
+ SyncShareAsDelegate();
+ EXPECT_EQ(2, session_->status_controller().syncer_status().
+ num_server_overwrites);
+ EXPECT_EQ(1, session_->status_controller().syncer_status().
+ num_local_overwrites);
+ // We attempted to commit item 1.
+ EXPECT_EQ(1U, session_->status_controller().unsynced_handles().size());
+ EXPECT_TRUE(session_->status_controller().did_commit_items());
+ SyncShareAsDelegate();
+ {
+ // Everything should be resolved now. The local changes should have
+ // overwritten the server changes for 2 and 4, while the server changes
+ // overwrote the local for entry 3.
+ // We attempted to commit two handles.
+ EXPECT_EQ(0, session_->status_controller().syncer_status().
+ num_server_overwrites);
+ EXPECT_EQ(0, session_->status_controller().syncer_status().
+ num_local_overwrites);
+ EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
+ EXPECT_TRUE(session_->status_controller().did_commit_items());
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, false, false, false, 0, 41, 41, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, false, false, 1, 30, 30, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, false, false, 0, 31, 31, ids_, &rtrans);
+ }
+}
+
+#undef VERIFY_ENTRY
+
+TEST_F(SyncerTest, NigoriConflicts) {
+ KeyParams local_key_params = {"localhost", "dummy", "blargle"};
+ KeyParams other_key_params = {"localhost", "dummy", "foobar"};
+ browser_sync::Cryptographer other_cryptographer(&encryptor_);
+ other_cryptographer.AddKey(other_key_params);
+ syncable::ModelTypeSet encrypted_types(syncable::PASSWORDS, syncable::NIGORI);
+ sync_pb::EntitySpecifics initial_nigori_specifics;
+ initial_nigori_specifics.mutable_nigori();
+ mock_server_->SetNigori(1, 10, 10, initial_nigori_specifics);
+
+ // Data for testing encryption/decryption.
+ sync_pb::EntitySpecifics other_encrypted_specifics;
+ other_encrypted_specifics.mutable_bookmark()->set_title("title");
+ other_cryptographer.Encrypt(
+ other_encrypted_specifics,
+ other_encrypted_specifics.mutable_encrypted());
+ sync_pb::EntitySpecifics our_encrypted_specifics;
+ our_encrypted_specifics.mutable_bookmark()->set_title("title2");
+
+ // Receive the initial nigori node.
+ SyncShareAsDelegate();
+ encrypted_types = syncable::ModelTypeSet::All();
+ {
+ // Local changes with different passphrase, different types, and sync_tabs.
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ cryptographer(&wtrans)->AddKey(local_key_params);
+ cryptographer(&wtrans)->Encrypt(
+ our_encrypted_specifics,
+ our_encrypted_specifics.mutable_encrypted());
+ cryptographer(&wtrans)->GetKeys(
+ nigori->mutable_encrypted());
+ cryptographer(&wtrans)->UpdateNigoriFromEncryptedTypes(nigori);
+ nigori->set_sync_tabs(true);
+ cryptographer(&wtrans)->set_encrypt_everything();
+ MutableEntry nigori_entry(&wtrans, GET_BY_SERVER_TAG,
+ syncable::ModelTypeToRootTag(syncable::NIGORI));
+ ASSERT_TRUE(nigori_entry.good());
+ nigori_entry.Put(SPECIFICS, specifics);
+ nigori_entry.Put(IS_UNSYNCED, true);
+ EXPECT_FALSE(cryptographer(&wtrans)->has_pending_keys());
+ EXPECT_TRUE(encrypted_types.Equals(
+ cryptographer(&wtrans)->GetEncryptedTypes()));
+ }
+ {
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ other_cryptographer.GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_bookmarks(true);
+ nigori->set_encrypt_preferences(true);
+ nigori->set_encrypt_everything(false);
+ mock_server_->SetNigori(1, 20, 20, specifics);
+ }
+
+ // Will result in downloading the server nigori, which puts the local nigori
+ // in a state of conflict. This is resolved by merging the local and server
+ // data (with priority given to the server's encryption keys if they are
+ // undecryptable), which we then commit. The cryptographer should have pending
+ // keys and merge the set of encrypted types.
+ SyncShareAsDelegate(); // Resolve conflict in this cycle.
+ SyncShareAsDelegate(); // Commit local change in this cycle.
+ {
+ // Ensure the nigori data merged (encrypted types, sync_tabs).
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry nigori_entry(&wtrans, GET_BY_SERVER_TAG,
+ syncable::ModelTypeToRootTag(syncable::NIGORI));
+ ASSERT_TRUE(nigori_entry.good());
+ EXPECT_FALSE(nigori_entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(nigori_entry.Get(IS_UNSYNCED));
+ sync_pb::EntitySpecifics specifics = nigori_entry.Get(SPECIFICS);
+ EXPECT_TRUE(cryptographer(&wtrans)->has_pending_keys());
+ EXPECT_TRUE(encrypted_types.Equals(
+ cryptographer(&wtrans)->GetEncryptedTypes()));
+ EXPECT_TRUE(cryptographer(&wtrans)->encrypt_everything());
+ EXPECT_TRUE(specifics.nigori().sync_tabs());
+ // Supply the pending keys. Afterwards, we should be able to decrypt both
+ // our own encrypted data and data encrypted by the other cryptographer,
+ // but the key provided by the other cryptographer should be the default.
+ EXPECT_TRUE(cryptographer(&wtrans)->DecryptPendingKeys(other_key_params));
+ EXPECT_FALSE(cryptographer(&wtrans)->has_pending_keys());
+ sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
+ cryptographer(&wtrans)->GetKeys(nigori->mutable_encrypted());
+ cryptographer(&wtrans)->UpdateNigoriFromEncryptedTypes(nigori);
+ // Normally this would be written as part of SetPassphrase, but we do it
+ // manually for the test.
+ nigori_entry.Put(SPECIFICS, specifics);
+ nigori_entry.Put(IS_UNSYNCED, true);
+ }
+
+ SyncShareAsDelegate();
+ {
+ // Ensure everything is committed and stable now. The cryptographer
+ // should be able to decrypt both sets of keys, sync_tabs should be true,
+ // and the encrypted types should have been unioned.
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry nigori_entry(&wtrans, GET_BY_SERVER_TAG,
+ syncable::ModelTypeToRootTag(syncable::NIGORI));
+ ASSERT_TRUE(nigori_entry.good());
+ EXPECT_FALSE(nigori_entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(nigori_entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(cryptographer(&wtrans)->CanDecrypt(
+ our_encrypted_specifics.encrypted()));
+ EXPECT_FALSE(cryptographer(&wtrans)->
+ CanDecryptUsingDefaultKey(our_encrypted_specifics.encrypted()));
+ EXPECT_TRUE(cryptographer(&wtrans)->CanDecrypt(
+ other_encrypted_specifics.encrypted()));
+ EXPECT_TRUE(cryptographer(&wtrans)->
+ CanDecryptUsingDefaultKey(other_encrypted_specifics.encrypted()));
+ EXPECT_TRUE(nigori_entry.Get(SPECIFICS).nigori().sync_tabs());
+ }
+}
+
+TEST_F(SyncerTest, TestGetUnsyncedAndSimpleCommit) {
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ "Pete");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::BASE_VERSION, 1);
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, "Pete");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::ID, child_id_);
+ child.Put(syncable::BASE_VERSION, 1);
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+
+ const StatusController& status = session_->status_controller();
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ EXPECT_EQ(2u, status.unsynced_handles().size());
+ ASSERT_EQ(2u, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
+ EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
+ {
+ ReadTransaction rt(FROM_HERE, directory());
+ Entry entry(&rt, syncable::GET_BY_ID, child_id_);
+ ASSERT_TRUE(entry.good());
+ VerifyTestDataInEntry(&rt, &entry);
+ }
+}
+
+TEST_F(SyncerTest, TestPurgeWhileUnsynced) {
+ // Similar to above, but throw a purge operation into the mix. Bug 49278.
+ syncable::Id pref_node_id = TestIdFactory::MakeServer("Tim");
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), "Pete");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::BASE_VERSION, 1);
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, "Pete");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::ID, child_id_);
+ child.Put(syncable::BASE_VERSION, 1);
+ WriteTestDataToEntry(&wtrans, &child);
+
+ MutableEntry parent2(&wtrans, syncable::CREATE, wtrans.root_id(), "Tim");
+ ASSERT_TRUE(parent2.good());
+ parent2.Put(syncable::IS_UNSYNCED, true);
+ parent2.Put(syncable::IS_DIR, true);
+ parent2.Put(syncable::SPECIFICS, DefaultPreferencesSpecifics());
+ parent2.Put(syncable::BASE_VERSION, 1);
+ parent2.Put(syncable::ID, pref_node_id);
+ }
+
+ directory()->PurgeEntriesWithTypeIn(
+ syncable::ModelTypeSet(syncable::PREFERENCES));
+
+ const StatusController& status = session_->status_controller();
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ EXPECT_EQ(2U, status.unsynced_handles().size());
+ ASSERT_EQ(2U, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
+ EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
+ {
+ ReadTransaction rt(FROM_HERE, directory());
+ Entry entry(&rt, syncable::GET_BY_ID, child_id_);
+ ASSERT_TRUE(entry.good());
+ VerifyTestDataInEntry(&rt, &entry);
+ }
+ directory()->SaveChanges();
+ {
+ ReadTransaction rt(FROM_HERE, directory());
+ Entry entry(&rt, syncable::GET_BY_ID, pref_node_id);
+ ASSERT_FALSE(entry.good());
+ }
+}
+
+TEST_F(SyncerTest, TestPurgeWhileUnapplied) {
+ // Similar to above, but for unapplied items. Bug 49278.
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), "Pete");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::BASE_VERSION, 1);
+ parent.Put(syncable::ID, parent_id_);
+ }
+
+ directory()->PurgeEntriesWithTypeIn(
+ syncable::ModelTypeSet(syncable::BOOKMARKS));
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ directory()->SaveChanges();
+ {
+ ReadTransaction rt(FROM_HERE, directory());
+ Entry entry(&rt, syncable::GET_BY_ID, parent_id_);
+ ASSERT_FALSE(entry.good());
+ }
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoItemsTall) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-1001), ids_.FromNumber(-1000)},
+ {0, ids_.FromNumber(-1000), ids_.FromNumber(0)},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTall) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
+ {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
+ {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTallLimitedSize) {
+ context_->set_max_commit_batch_size(2);
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
+ {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
+ {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItem) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleUncommittedDeletedItem) {
+ CommitOrderingTest items[] = {
+ {-1, ids_.FromNumber(-1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest,
+ TestCommitListOrderingSingleLongDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoLongDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrdering3LongDeletedItemsWithSizeLimit) {
+ context_->set_max_commit_batch_size(2);
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {1, ids_.FromNumber(1001), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {2, ids_.FromNumber(1002), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoDeletedItemsWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingComplexDeletionScenario) {
+ CommitOrderingTest items[] = {
+ { 0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
+ {1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
+ {2, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest,
+ TestCommitListOrderingComplexDeletionScenarioWith2RecentDeletes) {
+ CommitOrderingTest items[] = {
+ { 0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
+ {1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
+ {2, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
+ {3, ids_.FromNumber(1005), ids_.FromNumber(1003), {DELETED}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingDeleteMovedItems) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {0, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME,
+ MOVED_FROM_ROOT}},
+ CommitOrderingTest::MakeLastCommitItem(),
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingWithNesting) {
+ const base::Time& now_minus_2h =
+ base::Time::Now() - base::TimeDelta::FromHours(2);
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ {
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ "Bob");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::ID, ids_.FromNumber(100));
+ parent.Put(syncable::BASE_VERSION, 1);
+ MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(100),
+ "Bob");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ child.Put(syncable::ID, ids_.FromNumber(101));
+ child.Put(syncable::BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, syncable::CREATE, ids_.FromNumber(101),
+ "Bob");
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(syncable::ID, ids_.FromNumber(102));
+ grandchild.Put(syncable::IS_UNSYNCED, true);
+ grandchild.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ grandchild.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ // Create three deleted items which deletions we expect to be sent to the
+ // server.
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ "Pete");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::IS_DEL, true);
+ parent.Put(syncable::ID, ids_.FromNumber(103));
+ parent.Put(syncable::BASE_VERSION, 1);
+ parent.Put(syncable::MTIME, now_minus_2h);
+ MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(103),
+ "Pete");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ child.Put(syncable::IS_DEL, true);
+ child.Put(syncable::ID, ids_.FromNumber(104));
+ child.Put(syncable::BASE_VERSION, 1);
+ child.Put(syncable::MTIME, now_minus_2h);
+ MutableEntry grandchild(&wtrans, syncable::CREATE, ids_.FromNumber(104),
+ "Pete");
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(syncable::IS_UNSYNCED, true);
+ grandchild.Put(syncable::ID, ids_.FromNumber(105));
+ grandchild.Put(syncable::IS_DEL, true);
+ grandchild.Put(syncable::IS_DIR, false);
+ grandchild.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ grandchild.Put(syncable::BASE_VERSION, 1);
+ grandchild.Put(syncable::MTIME, now_minus_2h);
+ }
+ }
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ EXPECT_EQ(6u, session_->status_controller().unsynced_handles().size());
+ ASSERT_EQ(6u, mock_server_->committed_ids().size());
+ // This test will NOT unroll deletes because SERVER_PARENT_ID is not set.
+ // It will treat these like moves.
+ vector<syncable::Id> commit_ids(mock_server_->committed_ids());
+ EXPECT_TRUE(ids_.FromNumber(100) == commit_ids[0]);
+ EXPECT_TRUE(ids_.FromNumber(101) == commit_ids[1]);
+ EXPECT_TRUE(ids_.FromNumber(102) == commit_ids[2]);
+ // We don't guarantee the delete orders in this test, only that they occur
+ // at the end.
+ std::sort(commit_ids.begin() + 3, commit_ids.end());
+ EXPECT_TRUE(ids_.FromNumber(103) == commit_ids[3]);
+ EXPECT_TRUE(ids_.FromNumber(104) == commit_ids[4]);
+ EXPECT_TRUE(ids_.FromNumber(105) == commit_ids[5]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), "1");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child(&wtrans, syncable::CREATE, wtrans.root_id(), "2");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ child.Put(syncable::ID, child_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, "A");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::ID, ids_.FromNumber(102));
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, "B");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ child.Put(syncable::ID, ids_.FromNumber(-103));
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, child_id_, "A");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::ID, ids_.FromNumber(-104));
+ MutableEntry child(&wtrans, syncable::CREATE, child_id_, "B");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ child.Put(syncable::ID, ids_.FromNumber(105));
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ EXPECT_EQ(6u, session_->status_controller().unsynced_handles().size());
+ ASSERT_EQ(6u, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
+ EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(ids_.FromNumber(102) == mock_server_->committed_ids()[2]);
+ EXPECT_TRUE(ids_.FromNumber(-103) == mock_server_->committed_ids()[3]);
+ EXPECT_TRUE(ids_.FromNumber(-104) == mock_server_->committed_ids()[4]);
+ EXPECT_TRUE(ids_.FromNumber(105) == mock_server_->committed_ids()[5]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
+ syncable::Id child2_id = ids_.NewServerId();
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), "P");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child1(&wtrans, syncable::CREATE, parent_id_, "1");
+ ASSERT_TRUE(child1.good());
+ child1.Put(syncable::IS_UNSYNCED, true);
+ child1.Put(syncable::ID, child_id_);
+ child1.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ MutableEntry child2(&wtrans, syncable::CREATE, parent_id_, "2");
+ ASSERT_TRUE(child2.good());
+ child2.Put(syncable::IS_UNSYNCED, true);
+ child2.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ child2.Put(syncable::ID, child2_id);
+
+ parent.Put(syncable::BASE_VERSION, 1);
+ child1.Put(syncable::BASE_VERSION, 1);
+ child2.Put(syncable::BASE_VERSION, 1);
+ }
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
+ ASSERT_EQ(3u, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
+ EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(child2_id == mock_server_->committed_ids()[2]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
+ string parent1_name = "1";
+ string parent2_name = "A";
+ string child_name = "B";
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ parent1_name);
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+
+ syncable::Id parent2_id = ids_.NewLocalId();
+ syncable::Id child_id = ids_.NewServerId();
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent2(&wtrans, syncable::CREATE, parent_id_, parent2_name);
+ ASSERT_TRUE(parent2.good());
+ parent2.Put(syncable::IS_UNSYNCED, true);
+ parent2.Put(syncable::IS_DIR, true);
+ parent2.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent2.Put(syncable::ID, parent2_id);
+
+ MutableEntry child(&wtrans, syncable::CREATE, parent2_id, child_name);
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ child.Put(syncable::ID, child_id);
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
+ ASSERT_EQ(3u, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
+ EXPECT_TRUE(parent2_id == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(child_id == mock_server_->committed_ids()[2]);
+ {
+ ReadTransaction rtrans(FROM_HERE, directory());
+ // Check that things committed correctly.
+ Entry entry_1(&rtrans, syncable::GET_BY_ID, parent_id_);
+ EXPECT_EQ(entry_1.Get(NON_UNIQUE_NAME), parent1_name);
+ // Check that parent2 is a subfolder of parent1.
+ EXPECT_EQ(1, CountEntriesWithName(&rtrans,
+ parent_id_,
+ parent2_name));
+
+ // Parent2 was a local ID and thus should have changed on commit!
+ Entry pre_commit_entry_parent2(&rtrans, syncable::GET_BY_ID, parent2_id);
+ ASSERT_FALSE(pre_commit_entry_parent2.good());
+
+ // Look up the new ID.
+ Id parent2_committed_id =
+ GetOnlyEntryWithName(&rtrans, parent_id_, parent2_name);
+ EXPECT_TRUE(parent2_committed_id.ServerKnows());
+
+ Entry child(&rtrans, syncable::GET_BY_ID, child_id);
+ EXPECT_EQ(parent2_committed_id, child.Get(syncable::PARENT_ID));
+ }
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
+ string parent_name = "1";
+ string parent2_name = "A";
+ string child_name = "B";
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans,
+ syncable::CREATE,
+ wtrans.root_id(),
+ parent_name);
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+
+ int64 meta_handle_b;
+ const Id parent2_local_id = ids_.NewLocalId();
+ const Id child_local_id = ids_.NewLocalId();
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent2(&wtrans, syncable::CREATE, parent_id_, parent2_name);
+ ASSERT_TRUE(parent2.good());
+ parent2.Put(syncable::IS_UNSYNCED, true);
+ parent2.Put(syncable::IS_DIR, true);
+ parent2.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+
+ parent2.Put(syncable::ID, parent2_local_id);
+ MutableEntry child(&wtrans, syncable::CREATE, parent2_local_id, child_name);
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ child.Put(syncable::ID, child_local_id);
+ meta_handle_b = child.Get(syncable::META_HANDLE);
+ }
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
+ ASSERT_EQ(3u, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
+ EXPECT_TRUE(parent2_local_id == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(child_local_id == mock_server_->committed_ids()[2]);
+ {
+ ReadTransaction rtrans(FROM_HERE, directory());
+
+ Entry parent(&rtrans, syncable::GET_BY_ID,
+ GetOnlyEntryWithName(&rtrans, rtrans.root_id(), parent_name));
+ ASSERT_TRUE(parent.good());
+ EXPECT_TRUE(parent.Get(syncable::ID).ServerKnows());
+
+ Entry parent2(&rtrans, syncable::GET_BY_ID,
+ GetOnlyEntryWithName(&rtrans, parent.Get(ID), parent2_name));
+ ASSERT_TRUE(parent2.good());
+ EXPECT_TRUE(parent2.Get(syncable::ID).ServerKnows());
+
+ // Id changed on commit, so this should fail.
+ Entry local_parent2_id_entry(&rtrans,
+ syncable::GET_BY_ID,
+ parent2_local_id);
+ ASSERT_FALSE(local_parent2_id_entry.good());
+
+ Entry entry_b(&rtrans, syncable::GET_BY_HANDLE, meta_handle_b);
+ EXPECT_TRUE(entry_b.Get(syncable::ID).ServerKnows());
+ EXPECT_TRUE(parent2.Get(syncable::ID) == entry_b.Get(syncable::PARENT_ID));
+ }
+}
+
+TEST_F(SyncerTest, UpdateWithZeroLengthName) {
+ // One illegal update
+ mock_server_->AddUpdateDirectory(1, 0, "", 1, 10);
+ // And one legal one that we're going to delete.
+ mock_server_->AddUpdateDirectory(2, 0, "FOO", 1, 10);
+ SyncShareAsDelegate();
+ // Delete the legal one. The new update has a null name.
+ mock_server_->AddUpdateDirectory(2, 0, "", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ SyncShareAsDelegate();
+}
+
+TEST_F(SyncerTest, TestBasicUpdate) {
+ string id = "some_id";
+ string parent_id = "0";
+ string name = "in_root";
+ int64 version = 10;
+ int64 timestamp = 10;
+ mock_server_->AddUpdateDirectory(id, parent_id, name, version, timestamp);
+
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ Entry entry(&trans, GET_BY_ID,
+ syncable::Id::CreateFromServerId("some_id"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DIR));
+ EXPECT_TRUE(entry.Get(SERVER_VERSION) == version);
+ EXPECT_TRUE(entry.Get(BASE_VERSION) == version);
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, IllegalAndLegalUpdates) {
+ Id root = TestIdFactory::root();
+ // Should apply just fine.
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 10, 10);
+
+ // Same name. But this SHOULD work.
+ mock_server_->AddUpdateDirectory(2, 0, "in_root", 10, 10);
+
+ // Unknown parent: should never be applied. "-80" is a legal server ID,
+ // because any string sent by the server is a legal server ID in the sync
+ // protocol, but it's not the ID of any item known to the client. This
+ // update should succeed validation, but be stuck in the unapplied state
+ // until an item with the server ID "-80" arrives.
+ mock_server_->AddUpdateDirectory(3, -80, "bad_parent", 10, 10);
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ StatusController* status = session_->mutable_status_controller();
+
+ // Id 3 should be in conflict now.
+ EXPECT_EQ(1, status->TotalNumConflictingItems());
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(1, status->conflict_progress()->HierarchyConflictingItemsSize());
+ }
+
+ // These entries will be used in the second set of updates.
+ mock_server_->AddUpdateDirectory(4, 0, "newer_version", 20, 10);
+ mock_server_->AddUpdateDirectory(5, 0, "circular1", 10, 10);
+ mock_server_->AddUpdateDirectory(6, 5, "circular2", 10, 10);
+ mock_server_->AddUpdateDirectory(9, 3, "bad_parent_child", 10, 10);
+ mock_server_->AddUpdateDirectory(100, 9, "bad_parent_child2", 10, 10);
+ mock_server_->AddUpdateDirectory(10, 0, "dir_to_bookmark", 10, 10);
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ // The three items with an unresolved parent should be unapplied (3, 9, 100).
+ // The name clash should also still be in conflict.
+ EXPECT_EQ(3, status->TotalNumConflictingItems());
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(3, status->conflict_progress()->HierarchyConflictingItemsSize());
+ }
+
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ // Even though it has the same name, it should work.
+ Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(name_clash.good());
+ EXPECT_FALSE(name_clash.Get(IS_UNAPPLIED_UPDATE))
+ << "Duplicate name SHOULD be OK.";
+
+ Entry bad_parent(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(bad_parent.good());
+ EXPECT_TRUE(bad_parent.Get(IS_UNAPPLIED_UPDATE))
+ << "child of unknown parent should be in conflict";
+
+ Entry bad_parent_child(&trans, GET_BY_ID, ids_.FromNumber(9));
+ ASSERT_TRUE(bad_parent_child.good());
+ EXPECT_TRUE(bad_parent_child.Get(IS_UNAPPLIED_UPDATE))
+ << "grandchild of unknown parent should be in conflict";
+
+ Entry bad_parent_child2(&trans, GET_BY_ID, ids_.FromNumber(100));
+ ASSERT_TRUE(bad_parent_child2.good());
+ EXPECT_TRUE(bad_parent_child2.Get(IS_UNAPPLIED_UPDATE))
+ << "great-grandchild of unknown parent should be in conflict";
+ }
+
+ // Updating 1 should not affect item 2 of the same name.
+ mock_server_->AddUpdateDirectory(1, 0, "new_name", 20, 20);
+
+ // Moving 5 under 6 will create a cycle: a conflict.
+ mock_server_->AddUpdateDirectory(5, 6, "circular3", 20, 20);
+
+ // Flip the is_dir bit: should fail verify & be dropped.
+ mock_server_->AddUpdateBookmark(10, 0, "dir_to_bookmark", 20, 20);
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+
+ // Version number older than last known: should fail verify & be dropped.
+ mock_server_->AddUpdateDirectory(4, 0, "old_version", 10, 10);
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry still_a_dir(&trans, GET_BY_ID, ids_.FromNumber(10));
+ ASSERT_TRUE(still_a_dir.good());
+ EXPECT_FALSE(still_a_dir.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(10u, still_a_dir.Get(BASE_VERSION));
+ EXPECT_EQ(10u, still_a_dir.Get(SERVER_VERSION));
+ EXPECT_TRUE(still_a_dir.Get(IS_DIR));
+
+ Entry rename(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(rename.good());
+ EXPECT_EQ(root, rename.Get(PARENT_ID));
+ EXPECT_EQ("new_name", rename.Get(NON_UNIQUE_NAME));
+ EXPECT_FALSE(rename.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(ids_.FromNumber(1) == rename.Get(ID));
+ EXPECT_EQ(20u, rename.Get(BASE_VERSION));
+
+ Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(name_clash.good());
+ EXPECT_EQ(root, name_clash.Get(PARENT_ID));
+ EXPECT_TRUE(ids_.FromNumber(2) == name_clash.Get(ID));
+ EXPECT_EQ(10u, name_clash.Get(BASE_VERSION));
+ EXPECT_EQ("in_root", name_clash.Get(NON_UNIQUE_NAME));
+
+ Entry ignored_old_version(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(ignored_old_version.good());
+ EXPECT_TRUE(
+ ignored_old_version.Get(NON_UNIQUE_NAME) == "newer_version");
+ EXPECT_FALSE(ignored_old_version.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(20u, ignored_old_version.Get(BASE_VERSION));
+
+ Entry circular_parent_issue(&trans, GET_BY_ID, ids_.FromNumber(5));
+ ASSERT_TRUE(circular_parent_issue.good());
+ EXPECT_TRUE(circular_parent_issue.Get(IS_UNAPPLIED_UPDATE))
+ << "circular move should be in conflict";
+ EXPECT_TRUE(circular_parent_issue.Get(PARENT_ID) == root_id_);
+ EXPECT_TRUE(circular_parent_issue.Get(SERVER_PARENT_ID) ==
+ ids_.FromNumber(6));
+ EXPECT_EQ(10u, circular_parent_issue.Get(BASE_VERSION));
+
+ Entry circular_parent_target(&trans, GET_BY_ID, ids_.FromNumber(6));
+ ASSERT_TRUE(circular_parent_target.good());
+ EXPECT_FALSE(circular_parent_target.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(circular_parent_issue.Get(ID) ==
+ circular_parent_target.Get(PARENT_ID));
+ EXPECT_EQ(10u, circular_parent_target.Get(BASE_VERSION));
+ }
+
+ EXPECT_FALSE(saw_syncer_event_);
+ EXPECT_EQ(4, status->TotalNumConflictingItems());
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ ASSERT_TRUE(status->conflict_progress());
+ EXPECT_EQ(4, status->conflict_progress()->HierarchyConflictingItemsSize());
+ }
+}
+
+TEST_F(SyncerTest, CommitTimeRename) {
+ int64 metahandle_folder;
+ int64 metahandle_new_entry;
+
+ // Create a folder and an entry.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&trans, CREATE, root_id_, "Folder");
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(IS_UNSYNCED, true);
+ metahandle_folder = parent.Get(META_HANDLE);
+
+ MutableEntry entry(&trans, CREATE, parent.Get(ID), "new_entry");
+ ASSERT_TRUE(entry.good());
+ metahandle_new_entry = entry.Get(META_HANDLE);
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Mix in a directory creation too for later.
+ mock_server_->AddUpdateDirectory(2, 0, "dir_in_root", 10, 10);
+ mock_server_->SetCommitTimeRename("renamed_");
+ SyncShareAsDelegate();
+
+ // Verify it was correctly renamed.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry_folder(&trans, GET_BY_HANDLE, metahandle_folder);
+ ASSERT_TRUE(entry_folder.good());
+ EXPECT_EQ("renamed_Folder", entry_folder.Get(NON_UNIQUE_NAME));
+
+ Entry entry_new(&trans, GET_BY_HANDLE, metahandle_new_entry);
+ ASSERT_TRUE(entry_new.good());
+ EXPECT_EQ(entry_folder.Get(ID), entry_new.Get(PARENT_ID));
+ EXPECT_EQ("renamed_new_entry", entry_new.Get(NON_UNIQUE_NAME));
+
+ // And that the unrelated directory creation worked without a rename.
+ Entry new_dir(&trans, GET_BY_ID, ids_.FromNumber(2));
+ EXPECT_TRUE(new_dir.good());
+ EXPECT_EQ("dir_in_root", new_dir.Get(NON_UNIQUE_NAME));
+ }
+}
+
+
+TEST_F(SyncerTest, CommitTimeRenameI18N) {
+ // This is utf-8 for the diacritized Internationalization.
+ const char* i18nString = "\xc3\x8e\xc3\xb1\x74\xc3\xa9\x72\xc3\xb1"
+ "\xc3\xa5\x74\xc3\xae\xc3\xb6\xc3\xb1\xc3\xa5\x6c\xc3\xae"
+ "\xc2\x9e\xc3\xa5\x74\xc3\xae\xc3\xb6\xc3\xb1";
+
+ int64 metahandle;
+ // Create a folder, expect a commit time rename.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&trans, CREATE, root_id_, "Folder");
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ parent.Put(IS_UNSYNCED, true);
+ metahandle = parent.Get(META_HANDLE);
+ }
+
+ mock_server_->SetCommitTimeRename(i18nString);
+ SyncShareAsDelegate();
+
+ // Verify it was correctly renamed.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ string expected_folder_name(i18nString);
+ expected_folder_name.append("Folder");
+
+
+ Entry entry_folder(&trans, GET_BY_HANDLE, metahandle);
+ ASSERT_TRUE(entry_folder.good());
+ EXPECT_EQ(expected_folder_name, entry_folder.Get(NON_UNIQUE_NAME));
+ }
+}
+
+// A commit with a lost response produces an update that has to be reunited with
+// its parent.
+TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
+ // Create a folder in the root.
+ int64 metahandle_folder;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, CREATE, trans.root_id(), "new_folder");
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_DIR, true);
+ entry.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ entry.Put(IS_UNSYNCED, true);
+ metahandle_folder = entry.Get(META_HANDLE);
+ }
+
+ // Verify it and pull the ID out of the folder.
+ syncable::Id folder_id;
+ int64 metahandle_entry;
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_HANDLE, metahandle_folder);
+ ASSERT_TRUE(entry.good());
+ folder_id = entry.Get(ID);
+ ASSERT_TRUE(!folder_id.ServerKnows());
+ }
+
+ // Create an entry in the newly created folder.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, CREATE, folder_id, "new_entry");
+ ASSERT_TRUE(entry.good());
+ metahandle_entry = entry.Get(META_HANDLE);
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Verify it and pull the ID out of the entry.
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, syncable::GET_BY_HANDLE, metahandle_entry);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(folder_id, entry.Get(PARENT_ID));
+ EXPECT_EQ("new_entry", entry.Get(NON_UNIQUE_NAME));
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value.
+ int64 timestamp = 20; // arbitrary value.
+ syncable::Id new_folder_id =
+ syncable::Id::CreateFromServerId("folder_server_id");
+
+ // The following update should cause the folder to both apply the update, as
+ // well as reassociate the id.
+ mock_server_->AddUpdateDirectory(new_folder_id, root_id_,
+ "new_folder", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ directory()->cache_guid(), folder_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Alright! Apply that update!
+ SyncShareAsDelegate();
+ {
+ // The folder's ID should have been updated.
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry folder(&trans, GET_BY_HANDLE, metahandle_folder);
+ ASSERT_TRUE(folder.good());
+ EXPECT_EQ("new_folder", folder.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(new_version == folder.Get(BASE_VERSION));
+ EXPECT_TRUE(new_folder_id == folder.Get(ID));
+ EXPECT_TRUE(folder.Get(ID).ServerKnows());
+ EXPECT_EQ(trans.root_id(), folder.Get(PARENT_ID));
+
+ // Since it was updated, the old folder should not exist.
+ Entry old_dead_folder(&trans, GET_BY_ID, folder_id);
+ EXPECT_FALSE(old_dead_folder.good());
+
+ // The child's parent should have changed.
+ Entry entry(&trans, syncable::GET_BY_HANDLE, metahandle_entry);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ("new_entry", entry.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(new_folder_id, entry.Get(PARENT_ID));
+ EXPECT_TRUE(!entry.Get(ID).ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+}
+
+// A commit with a lost response produces an update that has to be reunited with
+// its parent.
+TEST_F(SyncerTest, CommitReuniteUpdate) {
+ // Create an entry in the root.
+ int64 entry_metahandle;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, CREATE, trans.root_id(), "new_entry");
+ ASSERT_TRUE(entry.good());
+ entry_metahandle = entry.Get(META_HANDLE);
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Verify it and pull the ID out.
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
+ ASSERT_TRUE(entry.good());
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value.
+ int64 timestamp = 20; // arbitrary value.
+ syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
+
+ // Generate an update from the server with a relevant ID reassignment.
+ mock_server_->AddUpdateBookmark(new_entry_id, root_id_,
+ "new_entry", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ directory()->cache_guid(), entry_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Alright! Apply that update!
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(new_version == entry.Get(BASE_VERSION));
+ EXPECT_TRUE(new_entry_id == entry.Get(ID));
+ EXPECT_EQ("new_entry", entry.Get(NON_UNIQUE_NAME));
+ }
+}
+
+// A commit with a lost response must work even if the local entry was deleted
+// before the update is applied. We should not duplicate the local entry in
+// this case, but just create another one alongside. We may wish to examine
+// this behavior in the future as it can create hanging uploads that never
+// finish, that must be cleaned up on the server side after some time.
+TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
+ // Create a entry in the root.
+ int64 entry_metahandle;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, CREATE, trans.root_id(), "new_entry");
+ ASSERT_TRUE(entry.good());
+ entry_metahandle = entry.Get(META_HANDLE);
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ // Verify it and pull the ID out.
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
+ ASSERT_TRUE(entry.good());
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value.
+ int64 timestamp = 20; // arbitrary value.
+ syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
+
+ // Generate an update from the server with a relevant ID reassignment.
+ mock_server_->AddUpdateBookmark(new_entry_id, root_id_,
+ "new_entry", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ directory()->cache_guid(),
+ entry_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Purposefully delete the entry now before the update application finishes.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ Id new_entry_id = GetOnlyEntryWithName(
+ &trans, trans.root_id(), "new_entry");
+ MutableEntry entry(&trans, GET_BY_ID, new_entry_id);
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::IS_DEL, true);
+ }
+
+ // Just don't CHECK fail in sync, have the update split.
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Id new_entry_id = GetOnlyEntryWithName(
+ &trans, trans.root_id(), "new_entry");
+ Entry entry(&trans, GET_BY_ID, new_entry_id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_DEL));
+
+ Entry old_entry(&trans, GET_BY_ID, entry_id);
+ ASSERT_TRUE(old_entry.good());
+ EXPECT_TRUE(old_entry.Get(IS_DEL));
+ }
+}
+
+// TODO(chron): Add more unsanitized name tests.
+TEST_F(SyncerTest, ConflictMatchingEntryHandlesUnsanitizedNames) {
+ mock_server_->AddUpdateDirectory(1, 0, "A/A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B/B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(IS_UNAPPLIED_UPDATE, true);
+ A.Put(SERVER_VERSION, 20);
+
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNAPPLIED_UPDATE, true);
+ B.Put(SERVER_VERSION, 20);
+ }
+ LoopSyncShare();
+ saw_syncer_event_ = false;
+ mock_server_->set_conflict_all_commits(false);
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ EXPECT_TRUE(A.Get(IS_UNSYNCED) == false);
+ EXPECT_TRUE(A.Get(IS_UNAPPLIED_UPDATE) == false);
+ EXPECT_TRUE(A.Get(SERVER_VERSION) == 20);
+
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_TRUE(B.Get(IS_UNSYNCED) == false);
+ EXPECT_TRUE(B.Get(IS_UNAPPLIED_UPDATE) == false);
+ EXPECT_TRUE(B.Get(SERVER_VERSION) == 20);
+ }
+}
+
+TEST_F(SyncerTest, ConflictMatchingEntryHandlesNormalNames) {
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(IS_UNAPPLIED_UPDATE, true);
+ A.Put(SERVER_VERSION, 20);
+
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNAPPLIED_UPDATE, true);
+ B.Put(SERVER_VERSION, 20);
+ }
+ LoopSyncShare();
+ saw_syncer_event_ = false;
+ mock_server_->set_conflict_all_commits(false);
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ EXPECT_TRUE(A.Get(IS_UNSYNCED) == false);
+ EXPECT_TRUE(A.Get(IS_UNAPPLIED_UPDATE) == false);
+ EXPECT_TRUE(A.Get(SERVER_VERSION) == 20);
+
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_TRUE(B.Get(IS_UNSYNCED) == false);
+ EXPECT_TRUE(B.Get(IS_UNAPPLIED_UPDATE) == false);
+ EXPECT_TRUE(B.Get(SERVER_VERSION) == 20);
+ }
+}
+
+TEST_F(SyncerTest, ReverseFolderOrderingTest) {
+ mock_server_->AddUpdateDirectory(4, 3, "ggchild", 10, 10);
+ mock_server_->AddUpdateDirectory(3, 2, "gchild", 10, 10);
+ mock_server_->AddUpdateDirectory(5, 4, "gggchild", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "child", 10, 10);
+ mock_server_->AddUpdateDirectory(1, 0, "parent", 10, 10);
+ LoopSyncShare();
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Id child_id = GetOnlyEntryWithName(
+ &trans, ids_.FromNumber(4), "gggchild");
+ Entry child(&trans, GET_BY_ID, child_id);
+ ASSERT_TRUE(child.good());
+}
+
+class EntryCreatedInNewFolderTest : public SyncerTest {
+ public:
+ void CreateFolderInBob() {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry bob(&trans,
+ syncable::GET_BY_ID,
+ GetOnlyEntryWithName(&trans,
+ TestIdFactory::root(),
+ "bob"));
+ CHECK(bob.good());
+
+ MutableEntry entry2(&trans, syncable::CREATE, bob.Get(syncable::ID),
+ "bob");
+ CHECK(entry2.good());
+ entry2.Put(syncable::IS_DIR, true);
+ entry2.Put(syncable::IS_UNSYNCED, true);
+ entry2.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ }
+};
+
+TEST_F(EntryCreatedInNewFolderTest, EntryCreatedInNewFolderMidSync) {
+ directory()->set_store_birthday(mock_server_->store_birthday());
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::CREATE, trans.root_id(),
+ "bob");
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ }
+
+ mock_server_->SetMidCommitCallback(
+ base::Bind(&EntryCreatedInNewFolderTest::CreateFolderInBob,
+ base::Unretained(this)));
+ syncer_->SyncShare(session_.get(), BUILD_COMMIT_REQUEST, SYNCER_END);
+ EXPECT_EQ(1u, mock_server_->committed_ids().size());
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry parent_entry(&trans, syncable::GET_BY_ID,
+ GetOnlyEntryWithName(&trans, TestIdFactory::root(), "bob"));
+ ASSERT_TRUE(parent_entry.good());
+
+ Id child_id =
+ GetOnlyEntryWithName(&trans, parent_entry.Get(ID), "bob");
+ Entry child(&trans, syncable::GET_BY_ID, child_id);
+ ASSERT_TRUE(child.good());
+ EXPECT_EQ(parent_entry.Get(ID), child.Get(PARENT_ID));
+}
+}
+
+TEST_F(SyncerTest, NegativeIDInUpdate) {
+ mock_server_->AddUpdateBookmark(-10, 0, "bad", 40, 40);
+ SyncShareAsDelegate();
+ // The negative id would make us CHECK!
+}
+
+TEST_F(SyncerTest, UnappliedUpdateOnCreatedItemItemDoesNotCrash) {
+ int64 metahandle_fred;
+ {
+ // Create an item.
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry fred_match(&trans, CREATE, trans.root_id(),
+ "fred_match");
+ ASSERT_TRUE(fred_match.good());
+ metahandle_fred = fred_match.Get(META_HANDLE);
+ WriteTestDataToEntry(&trans, &fred_match);
+ }
+ // Commit it.
+ SyncShareAsDelegate();
+ EXPECT_EQ(1u, mock_server_->committed_ids().size());
+ mock_server_->set_conflict_all_commits(true);
+ syncable::Id fred_match_id;
+ {
+ // Now receive a change from outside.
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry fred_match(&trans, GET_BY_HANDLE, metahandle_fred);
+ ASSERT_TRUE(fred_match.good());
+ EXPECT_TRUE(fred_match.Get(ID).ServerKnows());
+ fred_match_id = fred_match.Get(ID);
+ mock_server_->AddUpdateBookmark(fred_match_id, trans.root_id(),
+ "fred_match", 40, 40);
+ }
+ // Run the syncer.
+ for (int i = 0 ; i < 30 ; ++i) {
+ SyncShareAsDelegate();
+ }
+}
+
+/**
+ * In the event that we have a double changed entry, that is changed on both
+ * the client and the server, the conflict resolver should just drop one of
+ * them and accept the other.
+ */
+
+TEST_F(SyncerTest, DoublyChangedWithResolver) {
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, root_id_, "Folder");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 5);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, "Pete.htm");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::ID, child_id_);
+ child.Put(syncable::BASE_VERSION, 10);
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+ mock_server_->AddUpdateBookmark(child_id_, parent_id_, "Pete2.htm", 11, 10);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare();
+ syncable::Directory::ChildHandles children;
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ directory()->GetChildHandlesById(&trans, parent_id_, &children);
+ // We expect the conflict resolver to preserve the local entry.
+ Entry child(&trans, syncable::GET_BY_ID, child_id_);
+ ASSERT_TRUE(child.good());
+ EXPECT_TRUE(child.Get(syncable::IS_UNSYNCED));
+ EXPECT_FALSE(child.Get(syncable::IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(child.Get(SPECIFICS).has_bookmark());
+ EXPECT_EQ("Pete.htm", child.Get(NON_UNIQUE_NAME));
+ VerifyTestBookmarkDataInEntry(&child);
+ }
+
+ // Only one entry, since we just overwrite one.
+ EXPECT_EQ(1u, children.size());
+ saw_syncer_event_ = false;
+}
+
+// We got this repro case when someone was editing bookmarks while sync was
+// occuring. The entry had changed out underneath the user.
+TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
+ const base::Time& test_time = ProtoTimeToTime(123456);
+ int64 entry_metahandle;
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&wtrans, syncable::CREATE, root_id_, "Pete");
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(ID).ServerKnows());
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::MTIME, test_time);
+ entry_metahandle = entry.Get(META_HANDLE);
+ }
+ SyncShareAsDelegate();
+ syncable::Id id;
+ int64 version;
+ int64 server_position_in_parent;
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, syncable::GET_BY_HANDLE, entry_metahandle);
+ ASSERT_TRUE(entry.good());
+ id = entry.Get(ID);
+ EXPECT_TRUE(id.ServerKnows());
+ version = entry.Get(BASE_VERSION);
+ server_position_in_parent = entry.Get(SERVER_POSITION_IN_PARENT);
+ }
+ sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
+ EXPECT_EQ("Pete", update->name());
+ EXPECT_EQ(id.GetServerId(), update->id_string());
+ EXPECT_EQ(root_id_.GetServerId(), update->parent_id_string());
+ EXPECT_EQ(version, update->version());
+ EXPECT_EQ(server_position_in_parent, update->position_in_parent());
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, syncable::GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(MTIME) == test_time);
+ }
+}
+
+TEST_F(SyncerTest, ParentAndChildBothMatch) {
+ const syncable::FullModelTypeSet all_types =
+ syncable::FullModelTypeSet::All();
+ syncable::Id parent_id = ids_.NewServerId();
+ syncable::Id child_id = ids_.NewServerId();
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, CREATE, root_id_, "Folder");
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ parent.Put(ID, parent_id);
+ parent.Put(BASE_VERSION, 1);
+ parent.Put(SPECIFICS, DefaultBookmarkSpecifics());
+
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), "test.htm");
+ ASSERT_TRUE(child.good());
+ child.Put(ID, child_id);
+ child.Put(BASE_VERSION, 1);
+ child.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+ mock_server_->AddUpdateDirectory(parent_id, root_id_, "Folder", 10, 10);
+ mock_server_->AddUpdateBookmark(child_id, parent_id, "test.htm", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ SyncShareAsDelegate();
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Directory::ChildHandles children;
+ directory()->GetChildHandlesById(&trans, root_id_, &children);
+ EXPECT_EQ(1u, children.size());
+ directory()->GetChildHandlesById(&trans, parent_id, &children);
+ EXPECT_EQ(1u, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ directory()->GetUnappliedUpdateMetaHandles(&trans, all_types, &unapplied);
+ EXPECT_EQ(0u, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ directory()->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0u, unsynced.size());
+ saw_syncer_event_ = false;
+ }
+}
+
+TEST_F(SyncerTest, CommittingNewDeleted) {
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, CREATE, trans.root_id(), "bob");
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(IS_DEL, true);
+ }
+ SyncShareAsDelegate();
+ EXPECT_EQ(0u, mock_server_->committed_ids().size());
+}
+
+// Original problem synopsis:
+// Check failed: entry->Get(BASE_VERSION) <= entry->Get(SERVER_VERSION)
+// Client creates entry, client finishes committing entry. Between
+// commit and getting update back, we delete the entry.
+// We get the update for the entry, but the local one was modified
+// so we store the entry but don't apply it. IS_UNAPPLIED_UPDATE is set.
+// We commit deletion and get a new version number.
+// We apply unapplied updates again before we get the update about the deletion.
+// This means we have an unapplied update where server_version < base_version.
+TEST_F(SyncerTest, UnappliedUpdateDuringCommit) {
+ // This test is a little fake.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, CREATE, trans.root_id(), "bob");
+ entry.Put(ID, ids_.FromNumber(20));
+ entry.Put(BASE_VERSION, 1);
+ entry.Put(SERVER_VERSION, 1);
+ entry.Put(SERVER_PARENT_ID, ids_.FromNumber(9999)); // Bad parent.
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(IS_UNAPPLIED_UPDATE, true);
+ entry.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ entry.Put(SERVER_SPECIFICS, DefaultBookmarkSpecifics());
+ entry.Put(IS_DEL, false);
+ }
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ EXPECT_EQ(1, session_->status_controller().TotalNumConflictingItems());
+ saw_syncer_event_ = false;
+}
+
+// Original problem synopsis:
+// Illegal parent
+// Unexpected error during sync if we:
+// make a new folder bob
+// wait for sync
+// make a new folder fred
+// move bob into fred
+// remove bob
+// remove fred
+// if no syncing occured midway, bob will have an illegal parent
+TEST_F(SyncerTest, DeletingEntryInFolder) {
+ // This test is a little fake.
+ int64 existing_metahandle;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, CREATE, trans.root_id(), "existing");
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_DIR, true);
+ entry.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ entry.Put(IS_UNSYNCED, true);
+ existing_metahandle = entry.Get(META_HANDLE);
+ }
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry newfolder(&trans, CREATE, trans.root_id(), "new");
+ ASSERT_TRUE(newfolder.good());
+ newfolder.Put(IS_DIR, true);
+ newfolder.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ newfolder.Put(IS_UNSYNCED, true);
+
+ MutableEntry existing(&trans, GET_BY_HANDLE, existing_metahandle);
+ ASSERT_TRUE(existing.good());
+ existing.Put(PARENT_ID, newfolder.Get(ID));
+ existing.Put(IS_UNSYNCED, true);
+ EXPECT_TRUE(existing.Get(ID).ServerKnows());
+
+ newfolder.Put(IS_DEL, true);
+ existing.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ const StatusController& status(session_->status_controller());
+ EXPECT_EQ(0, status.TotalNumServerConflictingItems());
+}
+
+TEST_F(SyncerTest, DeletingEntryWithLocalEdits) {
+ int64 newfolder_metahandle;
+
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry newfolder(&trans, CREATE, ids_.FromNumber(1), "local");
+ ASSERT_TRUE(newfolder.good());
+ newfolder.Put(IS_UNSYNCED, true);
+ newfolder.Put(IS_DIR, true);
+ newfolder.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ newfolder_metahandle = newfolder.Get(META_HANDLE);
+ }
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, APPLY_UPDATES);
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, syncable::GET_BY_HANDLE, newfolder_metahandle);
+ ASSERT_TRUE(entry.good());
+ }
+}
+
+TEST_F(SyncerTest, FolderSwapUpdate) {
+ mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
+ SyncShareAsDelegate();
+ mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20);
+ mock_server_->AddUpdateDirectory(7801, 0, "fred", 2, 20);
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_TRUE("fred" == id1.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_TRUE("bob" == id2.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
+ }
+ saw_syncer_event_ = false;
+}
+
+TEST_F(SyncerTest, NameCollidingFolderSwapWorksFine) {
+ mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(4096, 0, "alice", 1, 10);
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_TRUE("bob" == id1.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_TRUE("fred" == id2.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
+ ASSERT_TRUE(id3.good());
+ EXPECT_TRUE("alice" == id3.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(root_id_ == id3.Get(PARENT_ID));
+ }
+ mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20);
+ mock_server_->AddUpdateDirectory(7801, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(4096, 0, "bob", 2, 20);
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_TRUE("fred" == id1.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_TRUE("bob" == id2.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
+ ASSERT_TRUE(id3.good());
+ EXPECT_TRUE("bob" == id3.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(root_id_ == id3.Get(PARENT_ID));
+ }
+ saw_syncer_event_ = false;
+}
+
+TEST_F(SyncerTest, CommitManyItemsInOneGo) {
+ uint32 max_batches = 3;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * max_batches;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ for (uint32 i = 0; i < items_to_commit; i++) {
+ string nameutf8 = base::StringPrintf("%d", i);
+ string name(nameutf8.begin(), nameutf8.end());
+ MutableEntry e(&trans, CREATE, trans.root_id(), name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ e.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ }
+ }
+ uint32 num_loops = 0;
+ while (SyncShareAsDelegate()) {
+ num_loops++;
+ ASSERT_LT(num_loops, max_batches * 2);
+ }
+ EXPECT_GE(mock_server_->commit_messages().size(), max_batches);
+}
+
+TEST_F(SyncerTest, HugeConflict) {
+ int item_count = 300; // We should be able to do 300 or 3000 w/o issue.
+
+ syncable::Id parent_id = ids_.NewServerId();
+ syncable::Id last_id = parent_id;
+ vector<syncable::Id> tree_ids;
+
+ // Create a lot of updates for which the parent does not exist yet.
+ // Generate a huge deep tree which should all fail to apply at first.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ for (int i = 0; i < item_count ; i++) {
+ syncable::Id next_id = ids_.NewServerId();
+ tree_ids.push_back(next_id);
+ mock_server_->AddUpdateDirectory(next_id, last_id, "BOB", 2, 20);
+ last_id = next_id;
+ }
+ }
+ SyncShareAsDelegate();
+
+ // Check they're in the expected conflict state.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ for (int i = 0; i < item_count; i++) {
+ Entry e(&trans, GET_BY_ID, tree_ids[i]);
+ // They should all exist but none should be applied.
+ ASSERT_TRUE(e.good());
+ EXPECT_TRUE(e.Get(IS_DEL));
+ EXPECT_TRUE(e.Get(IS_UNAPPLIED_UPDATE));
+ }
+ }
+
+ // Add the missing parent directory.
+ mock_server_->AddUpdateDirectory(parent_id, TestIdFactory::root(),
+ "BOB", 2, 20);
+ SyncShareAsDelegate();
+
+ // Now they should all be OK.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ for (int i = 0; i < item_count; i++) {
+ Entry e(&trans, GET_BY_ID, tree_ids[i]);
+ ASSERT_TRUE(e.good());
+ EXPECT_FALSE(e.Get(IS_DEL));
+ EXPECT_FALSE(e.Get(IS_UNAPPLIED_UPDATE));
+ }
+ }
+}
+
+TEST_F(SyncerTest, DontCrashOnCaseChange) {
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry e(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(e.good());
+ e.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(1, 0, "BOB", 2, 20);
+ SyncShareAsDelegate(); // USED TO CAUSE AN ASSERT
+ saw_syncer_event_ = false;
+}
+
+TEST_F(SyncerTest, UnsyncedItemAndUpdate) {
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ SyncShareAsDelegate();
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(2, 0, "bob", 2, 20);
+ SyncShareAsDelegate(); // USED TO CAUSE AN ASSERT
+ saw_syncer_event_ = false;
+}
+
+TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath) {
+ mock_server_->AddUpdateBookmark(1, 0, "Foo.htm", 10, 10);
+ SyncShareAsDelegate();
+ int64 local_folder_handle;
+ syncable::Id local_folder_id;
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry new_entry(&wtrans, CREATE, wtrans.root_id(), "Bar.htm");
+ ASSERT_TRUE(new_entry.good());
+ local_folder_id = new_entry.Get(ID);
+ local_folder_handle = new_entry.Get(META_HANDLE);
+ new_entry.Put(IS_UNSYNCED, true);
+ new_entry.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ MutableEntry old(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(old.good());
+ WriteTestDataToEntry(&wtrans, &old);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ saw_syncer_event_ = false;
+ {
+ // Update #20 should have been dropped in favor of the local version.
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry server(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry local(&wtrans, GET_BY_HANDLE, local_folder_handle);
+ ASSERT_TRUE(server.good());
+ ASSERT_TRUE(local.good());
+ EXPECT_TRUE(local.Get(META_HANDLE) != server.Get(META_HANDLE));
+ EXPECT_FALSE(server.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(local.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(server.Get(IS_UNSYNCED));
+ EXPECT_TRUE(local.Get(IS_UNSYNCED));
+ EXPECT_EQ("Foo.htm", server.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ("Bar.htm", local.Get(NON_UNIQUE_NAME));
+ }
+ // Allow local changes to commit.
+ mock_server_->set_conflict_all_commits(false);
+ SyncShareAsDelegate();
+ saw_syncer_event_ = false;
+
+ // Now add a server change to make the two names equal. There should
+ // be no conflict with that, since names are not unique.
+ mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 30, 30);
+ SyncShareAsDelegate();
+ saw_syncer_event_ = false;
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry server(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry local(&wtrans, GET_BY_HANDLE, local_folder_handle);
+ ASSERT_TRUE(server.good());
+ ASSERT_TRUE(local.good());
+ EXPECT_TRUE(local.Get(META_HANDLE) != server.Get(META_HANDLE));
+ EXPECT_FALSE(server.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(local.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(server.Get(IS_UNSYNCED));
+ EXPECT_FALSE(local.Get(IS_UNSYNCED));
+ EXPECT_EQ("Bar.htm", server.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ("Bar.htm", local.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ("http://google.com", // Default from AddUpdateBookmark.
+ server.Get(SPECIFICS).bookmark().url());
+ }
+}
+
+// Same as NewEntryAnddServerEntrySharePath, but using the old-style protocol.
+TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath_OldBookmarksProto) {
+ mock_server_->set_use_legacy_bookmarks_protocol(true);
+ mock_server_->AddUpdateBookmark(1, 0, "Foo.htm", 10, 10);
+ SyncShareAsDelegate();
+ int64 local_folder_handle;
+ syncable::Id local_folder_id;
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry new_entry(&wtrans, CREATE, wtrans.root_id(), "Bar.htm");
+ ASSERT_TRUE(new_entry.good());
+ local_folder_id = new_entry.Get(ID);
+ local_folder_handle = new_entry.Get(META_HANDLE);
+ new_entry.Put(IS_UNSYNCED, true);
+ new_entry.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ MutableEntry old(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(old.good());
+ WriteTestDataToEntry(&wtrans, &old);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ saw_syncer_event_ = false;
+ {
+ // Update #20 should have been dropped in favor of the local version.
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry server(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry local(&wtrans, GET_BY_HANDLE, local_folder_handle);
+ ASSERT_TRUE(server.good());
+ ASSERT_TRUE(local.good());
+ EXPECT_TRUE(local.Get(META_HANDLE) != server.Get(META_HANDLE));
+ EXPECT_FALSE(server.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(local.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(server.Get(IS_UNSYNCED));
+ EXPECT_TRUE(local.Get(IS_UNSYNCED));
+ EXPECT_EQ("Foo.htm", server.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ("Bar.htm", local.Get(NON_UNIQUE_NAME));
+ }
+ // Allow local changes to commit.
+ mock_server_->set_conflict_all_commits(false);
+ SyncShareAsDelegate();
+ saw_syncer_event_ = false;
+
+ // Now add a server change to make the two names equal. There should
+ // be no conflict with that, since names are not unique.
+ mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 30, 30);
+ SyncShareAsDelegate();
+ saw_syncer_event_ = false;
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry server(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry local(&wtrans, GET_BY_HANDLE, local_folder_handle);
+ ASSERT_TRUE(server.good());
+ ASSERT_TRUE(local.good());
+ EXPECT_TRUE(local.Get(META_HANDLE) != server.Get(META_HANDLE));
+ EXPECT_FALSE(server.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(local.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(server.Get(IS_UNSYNCED));
+ EXPECT_FALSE(local.Get(IS_UNSYNCED));
+ EXPECT_EQ("Bar.htm", server.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ("Bar.htm", local.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ("http://google.com", // Default from AddUpdateBookmark.
+ server.Get(SPECIFICS).bookmark().url());
+ }
+}
+
+// Circular links should be resolved by the server.
+TEST_F(SyncerTest, SiblingDirectoriesBecomeCircular) {
+ // we don't currently resolve this. This test ensures we don't.
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ ASSERT_TRUE(A.Put(PARENT_ID, ids_.FromNumber(2)));
+ ASSERT_TRUE(A.Put(NON_UNIQUE_NAME, "B"));
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "A", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ saw_syncer_event_ = false;
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_TRUE(A.Get(NON_UNIQUE_NAME) == "B");
+ EXPECT_TRUE(B.Get(NON_UNIQUE_NAME) == "B");
+ }
+}
+
+TEST_F(SyncerTest, SwapEntryNames) {
+ // Simple transaction test.
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNSYNCED, true);
+ ASSERT_TRUE(A.Put(NON_UNIQUE_NAME, "C"));
+ ASSERT_TRUE(B.Put(NON_UNIQUE_NAME, "A"));
+ ASSERT_TRUE(A.Put(NON_UNIQUE_NAME, "B"));
+ }
+ SyncShareAsDelegate();
+ saw_syncer_event_ = false;
+}
+
+TEST_F(SyncerTest, DualDeletionWithNewItemNameClash) {
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateBookmark(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ WriteTestDataToEntry(&trans, &B);
+ B.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, "A", 11, 11);
+ mock_server_->SetLastUpdateDeleted();
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_FALSE(B.Get(IS_UNSYNCED));
+ EXPECT_FALSE(B.Get(IS_UNAPPLIED_UPDATE));
+ }
+ saw_syncer_event_ = false;
+}
+
+TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
+ int64 bob_metahandle;
+
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob_metahandle = bob.Get(META_HANDLE);
+ WriteTestDataToEntry(&trans, &bob);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 2, 10);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry bob(&trans, GET_BY_HANDLE, bob_metahandle);
+ ASSERT_TRUE(bob.good());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(bob.Get(ID).ServerKnows());
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_DEL));
+ }
+ saw_syncer_event_ = false;
+}
+
+// This test is to reproduce a check failure. Sometimes we would get a bad ID
+// back when creating an entry.
+TEST_F(SyncerTest, DuplicateIDReturn) {
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry folder(&trans, CREATE, trans.root_id(), "bob");
+ ASSERT_TRUE(folder.good());
+ folder.Put(IS_UNSYNCED, true);
+ folder.Put(IS_DIR, true);
+ folder.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ MutableEntry folder2(&trans, CREATE, trans.root_id(), "fred");
+ ASSERT_TRUE(folder2.good());
+ folder2.Put(IS_UNSYNCED, false);
+ folder2.Put(IS_DIR, true);
+ folder2.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ folder2.Put(BASE_VERSION, 3);
+ folder2.Put(ID, syncable::Id::CreateFromServerId("mock_server:10000"));
+ }
+ mock_server_->set_next_new_id(10000);
+ EXPECT_EQ(1u, directory()->unsynced_entity_count());
+ // we get back a bad id in here (should never happen).
+ SyncShareAsDelegate();
+ EXPECT_EQ(1u, directory()->unsynced_entity_count());
+ SyncShareAsDelegate(); // another bad id in here.
+ EXPECT_EQ(0u, directory()->unsynced_entity_count());
+ saw_syncer_event_ = false;
+}
+
+TEST_F(SyncerTest, DeletedEntryWithBadParentInLoopCalculation) {
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ // This is valid, because the parent could have gone away a long time ago.
+ bob.Put(PARENT_ID, ids_.FromNumber(54));
+ bob.Put(IS_DEL, true);
+ bob.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 1, 10);
+ SyncShareAsDelegate();
+ SyncShareAsDelegate();
+}
+
+TEST_F(SyncerTest, ConflictResolverMergesLocalDeleteAndServerUpdate) {
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+
+ MutableEntry local_deleted(&trans, CREATE, trans.root_id(), "name");
+ local_deleted.Put(ID, ids_.FromNumber(1));
+ local_deleted.Put(BASE_VERSION, 1);
+ local_deleted.Put(IS_DEL, true);
+ local_deleted.Put(IS_DIR, false);
+ local_deleted.Put(IS_UNSYNCED, true);
+ local_deleted.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ }
+
+ mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10);
+
+ // We don't care about actually committing, just the resolution.
+ mock_server_->set_conflict_all_commits(true);
+ SyncShareAsDelegate();
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry local_deleted(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_TRUE(local_deleted.Get(BASE_VERSION) == 10);
+ EXPECT_TRUE(local_deleted.Get(IS_UNAPPLIED_UPDATE) == false);
+ EXPECT_TRUE(local_deleted.Get(IS_UNSYNCED) == true);
+ EXPECT_TRUE(local_deleted.Get(IS_DEL) == true);
+ EXPECT_TRUE(local_deleted.Get(IS_DIR) == false);
+ }
+}
+
+// See what happens if the IS_DIR bit gets flipped. This can cause us
+// all kinds of disasters.
+TEST_F(SyncerTest, UpdateFlipsTheFolderBit) {
+ // Local object: a deleted directory (container), revision 1, unsynced.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+
+ MutableEntry local_deleted(&trans, CREATE, trans.root_id(), "name");
+ local_deleted.Put(ID, ids_.FromNumber(1));
+ local_deleted.Put(BASE_VERSION, 1);
+ local_deleted.Put(IS_DEL, true);
+ local_deleted.Put(IS_DIR, true);
+ local_deleted.Put(IS_UNSYNCED, true);
+ local_deleted.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ }
+
+ // Server update: entry-type object (not a container), revision 10.
+ mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10);
+
+ // Don't attempt to commit.
+ mock_server_->set_conflict_all_commits(true);
+
+ // The syncer should not attempt to apply the invalid update.
+ SyncShareAsDelegate();
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry local_deleted(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_TRUE(local_deleted.Get(BASE_VERSION) == 1);
+ EXPECT_TRUE(local_deleted.Get(IS_UNAPPLIED_UPDATE) == false);
+ EXPECT_TRUE(local_deleted.Get(IS_UNSYNCED) == true);
+ EXPECT_TRUE(local_deleted.Get(IS_DEL) == true);
+ EXPECT_TRUE(local_deleted.Get(IS_DIR) == true);
+ }
+}
+
+// Bug Synopsis:
+// Merge conflict resolution will merge a new local entry with another entry
+// that needs updates, resulting in CHECK.
+TEST_F(SyncerTest, MergingExistingItems) {
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(1, 0, "base", 10, 10);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, CREATE, trans.root_id(), "Copy of base");
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50);
+ SyncRepeatedlyToTriggerConflictResolution(session_.get());
+}
+
+// In this test a long changelog contains a child at the start of the changelog
+// and a parent at the end. While these updates are in progress the client would
+// appear stuck.
+TEST_F(SyncerTest, LongChangelistWithApplicationConflict) {
+ const int depth = 400;
+ syncable::Id folder_id = ids_.FromNumber(1);
+
+ // First we an item in a folder in the root. However the folder won't come
+ // till much later.
+ syncable::Id stuck_entry_id = TestIdFactory::FromNumber(99999);
+ mock_server_->AddUpdateDirectory(stuck_entry_id,
+ folder_id, "stuck", 1, 1);
+ mock_server_->SetChangesRemaining(depth - 1);
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+
+ // Buffer up a very long series of downloads.
+ // We should never be stuck (conflict resolution shouldn't
+ // kick in so long as we're making forward progress).
+ for (int i = 0; i < depth; i++) {
+ mock_server_->NextUpdateBatch();
+ mock_server_->SetNewTimestamp(i + 1);
+ mock_server_->SetChangesRemaining(depth - i);
+ }
+
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+
+ // Ensure our folder hasn't somehow applied.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry child(&trans, GET_BY_ID, stuck_entry_id);
+ EXPECT_TRUE(child.good());
+ EXPECT_TRUE(child.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(child.Get(IS_DEL));
+ EXPECT_FALSE(child.Get(IS_UNSYNCED));
+ }
+
+ // And finally the folder.
+ mock_server_->AddUpdateDirectory(folder_id,
+ TestIdFactory::root(), "folder", 1, 1);
+ mock_server_->SetChangesRemaining(0);
+ LoopSyncShare();
+ LoopSyncShare();
+ // Check that everything is as expected after the commit.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_ID, folder_id);
+ ASSERT_TRUE(entry.good());
+ Entry child(&trans, GET_BY_ID, stuck_entry_id);
+ EXPECT_EQ(entry.Get(ID), child.Get(PARENT_ID));
+ EXPECT_EQ("stuck", child.Get(NON_UNIQUE_NAME));
+ EXPECT_TRUE(child.good());
+ }
+}
+
+TEST_F(SyncerTest, DontMergeTwoExistingItems) {
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(1, 0, "base", 10, 10);
+ mock_server_->AddUpdateBookmark(2, 0, "base2", 10, 10);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Put(NON_UNIQUE_NAME, "Copy of base"));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50);
+ SyncRepeatedlyToTriggerConflictResolution(session_.get());
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_FALSE(entry1.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry1.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry1.Get(IS_DEL));
+ Entry entry2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ EXPECT_FALSE(entry2.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(entry2.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry2.Get(IS_DEL));
+ EXPECT_EQ(entry1.Get(NON_UNIQUE_NAME), entry2.Get(NON_UNIQUE_NAME));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteUpdate) {
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 1, 1);
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 1, 2);
+ SyncShareAsDelegate();
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 2, 3);
+ mock_server_->SetLastUpdateDeleted();
+ SyncShareAsDelegate();
+
+ int64 metahandle;
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ metahandle = entry.Get(META_HANDLE);
+ }
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 2, 4);
+ mock_server_->SetLastUpdateDeleted();
+ SyncShareAsDelegate();
+ // This used to be rejected as it's an undeletion. Now, it results in moving
+ // the delete path aside.
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 3, 5);
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ EXPECT_TRUE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_NE(entry.Get(META_HANDLE), metahandle);
+ }
+}
+
+TEST_F(SyncerTest, TestMoveSanitizedNamedFolder) {
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 1, 1);
+ mock_server_->AddUpdateDirectory(2, 0, ":::", 1, 2);
+ SyncShareAsDelegate();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Put(PARENT_ID, ids_.FromNumber(1)));
+ EXPECT_TRUE(entry.Put(IS_UNSYNCED, true));
+ }
+ SyncShareAsDelegate();
+ // We use the same sync ts as before so our times match up.
+ mock_server_->AddUpdateDirectory(2, 1, ":::", 2, 2);
+ SyncShareAsDelegate();
+}
+
+// Don't crash when this occurs.
+TEST_F(SyncerTest, UpdateWhereParentIsNotAFolder) {
+ mock_server_->AddUpdateBookmark(1, 0, "B", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "BookmarkParent", 10, 10);
+ // Used to cause a CHECK
+ SyncShareAsDelegate();
+ {
+ ReadTransaction rtrans(FROM_HERE, directory());
+ Entry good_entry(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(good_entry.good());
+ EXPECT_FALSE(good_entry.Get(IS_UNAPPLIED_UPDATE));
+ Entry bad_parent(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(bad_parent.good());
+ EXPECT_TRUE(bad_parent.Get(IS_UNAPPLIED_UPDATE));
+ }
+}
+
+const char kRootId[] = "0";
+
+TEST_F(SyncerTest, DirectoryUpdateTest) {
+ Id in_root_id = ids_.NewServerId();
+ Id in_in_root_id = ids_.NewServerId();
+
+ mock_server_->AddUpdateDirectory(in_root_id, TestIdFactory::root(),
+ "in_root_name", 2, 2);
+ mock_server_->AddUpdateDirectory(in_in_root_id, in_root_id,
+ "in_in_root_name", 3, 3);
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry in_root(&trans, GET_BY_ID, in_root_id);
+ ASSERT_TRUE(in_root.good());
+ EXPECT_EQ("in_root_name", in_root.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(TestIdFactory::root(), in_root.Get(PARENT_ID));
+
+ Entry in_in_root(&trans, GET_BY_ID, in_in_root_id);
+ ASSERT_TRUE(in_in_root.good());
+ EXPECT_EQ("in_in_root_name", in_in_root.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(in_root_id, in_in_root.Get(PARENT_ID));
+ }
+}
+
+TEST_F(SyncerTest, DirectoryCommitTest) {
+ syncable::Id in_root_id, in_dir_id;
+ int64 foo_metahandle;
+ int64 bar_metahandle;
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry parent(&wtrans, syncable::CREATE, root_id_, "foo");
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ in_root_id = parent.Get(syncable::ID);
+ foo_metahandle = parent.Get(META_HANDLE);
+
+ MutableEntry child(&wtrans, syncable::CREATE, parent.Get(ID), "bar");
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::SPECIFICS, DefaultBookmarkSpecifics());
+ bar_metahandle = child.Get(META_HANDLE);
+ in_dir_id = parent.Get(syncable::ID);
+ }
+ SyncShareAsDelegate();
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry fail_by_old_id_entry(&trans, GET_BY_ID, in_root_id);
+ ASSERT_FALSE(fail_by_old_id_entry.good());
+
+ Entry foo_entry(&trans, GET_BY_HANDLE, foo_metahandle);
+ ASSERT_TRUE(foo_entry.good());
+ EXPECT_EQ("foo", foo_entry.Get(NON_UNIQUE_NAME));
+ EXPECT_NE(foo_entry.Get(syncable::ID), in_root_id);
+
+ Entry bar_entry(&trans, GET_BY_HANDLE, bar_metahandle);
+ ASSERT_TRUE(bar_entry.good());
+ EXPECT_EQ("bar", bar_entry.Get(NON_UNIQUE_NAME));
+ EXPECT_NE(bar_entry.Get(syncable::ID), in_dir_id);
+ EXPECT_EQ(foo_entry.Get(syncable::ID), bar_entry.Get(PARENT_ID));
+ }
+}
+
+TEST_F(SyncerTest, TestClientCommand) {
+ using sync_pb::ClientCommand;
+
+ ClientCommand* command = mock_server_->GetNextClientCommand();
+ command->set_set_sync_poll_interval(8);
+ command->set_set_sync_long_poll_interval(800);
+ command->set_sessions_commit_delay_seconds(3141);
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 1, 1);
+ SyncShareAsDelegate();
+
+ EXPECT_TRUE(TimeDelta::FromSeconds(8) ==
+ last_short_poll_interval_received_);
+ EXPECT_TRUE(TimeDelta::FromSeconds(800) ==
+ last_long_poll_interval_received_);
+ EXPECT_TRUE(TimeDelta::FromSeconds(3141) ==
+ last_sessions_commit_delay_seconds_);
+
+ command = mock_server_->GetNextClientCommand();
+ command->set_set_sync_poll_interval(180);
+ command->set_set_sync_long_poll_interval(190);
+ command->set_sessions_commit_delay_seconds(2718);
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 1, 1);
+ SyncShareAsDelegate();
+
+ EXPECT_TRUE(TimeDelta::FromSeconds(180) ==
+ last_short_poll_interval_received_);
+ EXPECT_TRUE(TimeDelta::FromSeconds(190) ==
+ last_long_poll_interval_received_);
+ EXPECT_TRUE(TimeDelta::FromSeconds(2718) ==
+ last_sessions_commit_delay_seconds_);
+}
+
+TEST_F(SyncerTest, EnsureWeSendUpOldParent) {
+ syncable::Id folder_one_id = ids_.FromNumber(1);
+ syncable::Id folder_two_id = ids_.FromNumber(2);
+
+ mock_server_->AddUpdateDirectory(folder_one_id, TestIdFactory::root(),
+ "folder_one", 1, 1);
+ mock_server_->AddUpdateDirectory(folder_two_id, TestIdFactory::root(),
+ "folder_two", 1, 1);
+ SyncShareAsDelegate();
+ {
+ // A moved entry should send an "old parent."
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, GET_BY_ID, folder_one_id);
+ ASSERT_TRUE(entry.good());
+ entry.Put(PARENT_ID, folder_two_id);
+ entry.Put(IS_UNSYNCED, true);
+ // A new entry should send no "old parent."
+ MutableEntry create(&trans, CREATE, trans.root_id(), "new_folder");
+ create.Put(IS_UNSYNCED, true);
+ create.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ }
+ SyncShareAsDelegate();
+ const sync_pb::CommitMessage& commit = mock_server_->last_sent_commit();
+ ASSERT_EQ(2, commit.entries_size());
+ EXPECT_TRUE(commit.entries(0).parent_id_string() == "2");
+ EXPECT_TRUE(commit.entries(0).old_parent_id() == "0");
+ EXPECT_FALSE(commit.entries(1).has_old_parent_id());
+}
+
+TEST_F(SyncerTest, Test64BitVersionSupport) {
+ int64 really_big_int = std::numeric_limits<int64>::max() - 12;
+ const string name("ringo's dang orang ran rings around my o-ring");
+ int64 item_metahandle;
+
+ // Try writing max int64 to the version fields of a meta entry.
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&wtrans, syncable::CREATE, wtrans.root_id(), name);
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::BASE_VERSION, really_big_int);
+ entry.Put(syncable::SERVER_VERSION, really_big_int);
+ entry.Put(syncable::ID, ids_.NewServerId());
+ item_metahandle = entry.Get(META_HANDLE);
+ }
+ // Now read it back out and make sure the value is max int64.
+ ReadTransaction rtrans(FROM_HERE, directory());
+ Entry entry(&rtrans, syncable::GET_BY_HANDLE, item_metahandle);
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(really_big_int == entry.Get(syncable::BASE_VERSION));
+}
+
+TEST_F(SyncerTest, TestSimpleUndelete) {
+ Id id = ids_.MakeServer("undeletion item"), root = TestIdFactory::root();
+ mock_server_->set_conflict_all_commits(true);
+ // Let there be an entry from the server.
+ mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10);
+ SyncShareAsDelegate();
+ // Check it out and delete it.
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&wtrans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ // Delete it locally.
+ entry.Put(IS_DEL, true);
+ }
+ SyncShareAsDelegate();
+ // Confirm we see IS_DEL and not SERVER_IS_DEL.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+ SyncShareAsDelegate();
+ // Update from server confirming deletion.
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 11);
+ mock_server_->SetLastUpdateDeleted();
+ SyncShareAsDelegate();
+ // IS_DEL AND SERVER_IS_DEL now both true.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_TRUE(entry.Get(SERVER_IS_DEL));
+ }
+ // Undelete from server.
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12);
+ SyncShareAsDelegate();
+ // IS_DEL and SERVER_IS_DEL now both false.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
+ Id id = ids_.MakeServer("undeletion item"), root = TestIdFactory::root();
+ // Let there be a entry, from the server.
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10);
+ SyncShareAsDelegate();
+ // Check it out and delete it.
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&wtrans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ // Delete it locally.
+ entry.Put(IS_DEL, true);
+ }
+ SyncShareAsDelegate();
+ // Confirm we see IS_DEL and not SERVER_IS_DEL.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+ SyncShareAsDelegate();
+ // Say we do not get an update from server confirming deletion. Undelete
+ // from server
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12);
+ SyncShareAsDelegate();
+ // IS_DEL and SERVER_IS_DEL now both false.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteIgnoreCorrectlyUnappliedUpdate) {
+ Id id1 = ids_.MakeServer("first"), id2 = ids_.MakeServer("second");
+ Id root = TestIdFactory::root();
+ // Duplicate! expect path clashing!
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(id1, root, "foo", 1, 10);
+ mock_server_->AddUpdateBookmark(id2, root, "foo", 1, 10);
+ SyncShareAsDelegate();
+ mock_server_->AddUpdateBookmark(id2, root, "foo2", 2, 20);
+ SyncShareAsDelegate(); // Now just don't explode.
+}
+
+TEST_F(SyncerTest, ClientTagServerCreatedUpdatesWork) {
+ mock_server_->AddUpdateDirectory(1, 0, "permitem1", 1, 10);
+ mock_server_->SetLastUpdateClientTag("permfolder");
+
+ SyncShareAsDelegate();
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "permfolder");
+ EXPECT_EQ(perm_folder.Get(NON_UNIQUE_NAME), "permitem1");
+ }
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->SetLastUpdateClientTag("permfolder");
+ SyncShareAsDelegate();
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "permfolder");
+ EXPECT_EQ(perm_folder.Get(NON_UNIQUE_NAME), "permitem_renamed");
+ }
+}
+
+TEST_F(SyncerTest, ClientTagIllegalUpdateIgnored) {
+ mock_server_->AddUpdateDirectory(1, 0, "permitem1", 1, 10);
+ mock_server_->SetLastUpdateClientTag("permfolder");
+
+ SyncShareAsDelegate();
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "permfolder");
+ EXPECT_TRUE(perm_folder.Get(NON_UNIQUE_NAME) == "permitem1");
+ EXPECT_TRUE(perm_folder.Get(ID).ServerKnows());
+ }
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->SetLastUpdateClientTag("wrongtag");
+ SyncShareAsDelegate();
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ // This update is rejected because it has the same ID, but a
+ // different tag than one that is already on the client.
+ // The client has a ServerKnows ID, which cannot be overwritten.
+ Entry rejected_update(&trans, GET_BY_CLIENT_TAG, "wrongtag");
+ EXPECT_FALSE(rejected_update.good());
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(NON_UNIQUE_NAME), "permitem1");
+ }
+}
+
+TEST_F(SyncerTest, ClientTagUncommittedTagMatchesUpdate) {
+ int64 original_metahandle = 0;
+
+ sync_pb::EntitySpecifics local_bookmark(DefaultBookmarkSpecifics());
+ local_bookmark.mutable_bookmark()->set_url("http://foo/localsite");
+ sync_pb::EntitySpecifics server_bookmark(DefaultBookmarkSpecifics());
+ server_bookmark.mutable_bookmark()->set_url("http://bar/serversite");
+
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry perm_folder(&trans, CREATE, ids_.root(), "clientname");
+ ASSERT_TRUE(perm_folder.good());
+ perm_folder.Put(UNIQUE_CLIENT_TAG, "clientperm");
+ perm_folder.Put(SPECIFICS, local_bookmark);
+ perm_folder.Put(IS_UNSYNCED, true);
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(ID).ServerKnows());
+ original_metahandle = perm_folder.Get(META_HANDLE);
+ }
+
+ mock_server_->AddUpdateBookmark(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->SetLastUpdateClientTag("clientperm");
+ mock_server_->GetMutableLastUpdate()->mutable_specifics()->
+ CopyFrom(server_bookmark);
+ mock_server_->set_conflict_all_commits(true);
+
+ SyncShareAsDelegate();
+ // This should cause client tag reunion, preserving the metahandle.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "clientperm");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(10, perm_folder.Get(BASE_VERSION));
+ // Entry should have been given the new ID while preserving the
+ // metahandle; client should have won the conflict resolution.
+ EXPECT_EQ(original_metahandle, perm_folder.Get(META_HANDLE));
+ EXPECT_EQ("clientperm", perm_folder.Get(UNIQUE_CLIENT_TAG));
+ EXPECT_EQ("clientname", perm_folder.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(local_bookmark.SerializeAsString(),
+ perm_folder.Get(SPECIFICS).SerializeAsString());
+ EXPECT_TRUE(perm_folder.Get(ID).ServerKnows());
+ }
+
+ mock_server_->set_conflict_all_commits(false);
+ SyncShareAsDelegate();
+
+ // The resolved entry ought to commit cleanly.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "clientperm");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_TRUE(10 < perm_folder.Get(BASE_VERSION));
+ // Entry should have been given the new ID while preserving the
+ // metahandle; client should have won the conflict resolution.
+ EXPECT_EQ(original_metahandle, perm_folder.Get(META_HANDLE));
+ EXPECT_EQ("clientperm", perm_folder.Get(UNIQUE_CLIENT_TAG));
+ EXPECT_EQ("clientname", perm_folder.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(local_bookmark.SerializeAsString(),
+ perm_folder.Get(SPECIFICS).SerializeAsString());
+ EXPECT_TRUE(perm_folder.Get(ID).ServerKnows());
+ }
+}
+
+TEST_F(SyncerTest, ClientTagConflictWithDeletedLocalEntry) {
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry perm_folder(&trans, CREATE, ids_.root(), "clientname");
+ ASSERT_TRUE(perm_folder.good());
+ ASSERT_FALSE(perm_folder.Get(ID).ServerKnows());
+ perm_folder.Put(UNIQUE_CLIENT_TAG, "clientperm");
+ perm_folder.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ perm_folder.Put(IS_UNSYNCED, true);
+ perm_folder.Put(IS_DEL, true);
+ }
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->SetLastUpdateClientTag("clientperm");
+ mock_server_->set_conflict_all_commits(true);
+
+ SyncShareAsDelegate();
+ // This should cause client tag overwrite.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "clientperm");
+ ASSERT_TRUE(perm_folder.good());
+ ASSERT_TRUE(perm_folder.Get(ID).ServerKnows());
+ EXPECT_TRUE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(BASE_VERSION), 10);
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "clientperm");
+ }
+}
+
+TEST_F(SyncerTest, ClientTagUpdateClashesWithLocalEntry) {
+ // This test is written assuming that ID comparison
+ // will work out in a particular way.
+ EXPECT_TRUE(ids_.FromNumber(1) < ids_.FromNumber(2));
+ EXPECT_TRUE(ids_.FromNumber(3) < ids_.FromNumber(4));
+
+ mock_server_->AddUpdateBookmark(1, 0, "One", 10, 100);
+ mock_server_->SetLastUpdateClientTag("tag1");
+ mock_server_->AddUpdateBookmark(4, 0, "Four", 11, 110);
+ mock_server_->SetLastUpdateClientTag("tag2");
+
+ mock_server_->set_conflict_all_commits(true);
+
+ SyncShareAsDelegate();
+ int64 tag1_metahandle = syncable::kInvalidMetaHandle;
+ int64 tag2_metahandle = syncable::kInvalidMetaHandle;
+ // This should cause client tag overwrite.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry tag1(&trans, GET_BY_CLIENT_TAG, "tag1");
+ ASSERT_TRUE(tag1.good());
+ ASSERT_TRUE(tag1.Get(ID).ServerKnows());
+ ASSERT_TRUE(ids_.FromNumber(1) == tag1.Get(ID));
+ EXPECT_FALSE(tag1.Get(IS_DEL));
+ EXPECT_FALSE(tag1.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(tag1.Get(IS_UNSYNCED));
+ EXPECT_EQ("One", tag1.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(10, tag1.Get(BASE_VERSION));
+ EXPECT_EQ("tag1", tag1.Get(UNIQUE_CLIENT_TAG));
+ tag1_metahandle = tag1.Get(META_HANDLE);
+
+ Entry tag2(&trans, GET_BY_CLIENT_TAG, "tag2");
+ ASSERT_TRUE(tag2.good());
+ ASSERT_TRUE(tag2.Get(ID).ServerKnows());
+ ASSERT_TRUE(ids_.FromNumber(4) == tag2.Get(ID));
+ EXPECT_FALSE(tag2.Get(IS_DEL));
+ EXPECT_FALSE(tag2.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(tag2.Get(IS_UNSYNCED));
+ EXPECT_EQ("Four", tag2.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(11, tag2.Get(BASE_VERSION));
+ EXPECT_EQ("tag2", tag2.Get(UNIQUE_CLIENT_TAG));
+ tag2_metahandle = tag2.Get(META_HANDLE);
+
+ syncable::Directory::ChildHandles children;
+ directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
+ ASSERT_EQ(2U, children.size());
+ }
+
+ mock_server_->AddUpdateBookmark(2, 0, "Two", 12, 120);
+ mock_server_->SetLastUpdateClientTag("tag1");
+ mock_server_->AddUpdateBookmark(3, 0, "Three", 13, 130);
+ mock_server_->SetLastUpdateClientTag("tag2");
+ SyncShareAsDelegate();
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry tag1(&trans, GET_BY_CLIENT_TAG, "tag1");
+ ASSERT_TRUE(tag1.good());
+ ASSERT_TRUE(tag1.Get(ID).ServerKnows());
+ ASSERT_TRUE(ids_.FromNumber(1) == tag1.Get(ID))
+ << "ID 1 should be kept, since it was less than ID 2.";
+ EXPECT_FALSE(tag1.Get(IS_DEL));
+ EXPECT_FALSE(tag1.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(tag1.Get(IS_UNSYNCED));
+ EXPECT_EQ(10, tag1.Get(BASE_VERSION));
+ EXPECT_EQ("tag1", tag1.Get(UNIQUE_CLIENT_TAG));
+ EXPECT_EQ("One", tag1.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(tag1_metahandle, tag1.Get(META_HANDLE));
+
+ Entry tag2(&trans, GET_BY_CLIENT_TAG, "tag2");
+ ASSERT_TRUE(tag2.good());
+ ASSERT_TRUE(tag2.Get(ID).ServerKnows());
+ ASSERT_TRUE(ids_.FromNumber(3) == tag2.Get(ID))
+ << "ID 3 should be kept, since it was less than ID 4.";
+ EXPECT_FALSE(tag2.Get(IS_DEL));
+ EXPECT_FALSE(tag2.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(tag2.Get(IS_UNSYNCED));
+ EXPECT_EQ("Three", tag2.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(13, tag2.Get(BASE_VERSION));
+ EXPECT_EQ("tag2", tag2.Get(UNIQUE_CLIENT_TAG));
+ EXPECT_EQ(tag2_metahandle, tag2.Get(META_HANDLE));
+
+ syncable::Directory::ChildHandles children;
+ directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
+ ASSERT_EQ(2U, children.size());
+ }
+}
+
+TEST_F(SyncerTest, ClientTagClashWithinBatchOfUpdates) {
+ // This test is written assuming that ID comparison
+ // will work out in a particular way.
+ EXPECT_TRUE(ids_.FromNumber(1) < ids_.FromNumber(4));
+ EXPECT_TRUE(ids_.FromNumber(201) < ids_.FromNumber(205));
+
+ mock_server_->AddUpdateBookmark(1, 0, "One A", 1, 10);
+ mock_server_->SetLastUpdateClientTag("tag a"); // Least ID: winner.
+ mock_server_->AddUpdateBookmark(2, 0, "Two A", 11, 110);
+ mock_server_->SetLastUpdateClientTag("tag a");
+ mock_server_->AddUpdateBookmark(3, 0, "Three A", 12, 120);
+ mock_server_->SetLastUpdateClientTag("tag a");
+ mock_server_->AddUpdateBookmark(4, 0, "Four A", 13, 130);
+ mock_server_->SetLastUpdateClientTag("tag a");
+
+ mock_server_->AddUpdateBookmark(105, 0, "One B", 14, 140);
+ mock_server_->SetLastUpdateClientTag("tag b");
+ mock_server_->AddUpdateBookmark(102, 0, "Two B", 15, 150);
+ mock_server_->SetLastUpdateClientTag("tag b");
+ mock_server_->AddUpdateBookmark(101, 0, "Three B", 16, 160);
+ mock_server_->SetLastUpdateClientTag("tag b"); // Least ID: winner.
+ mock_server_->AddUpdateBookmark(104, 0, "Four B", 17, 170);
+ mock_server_->SetLastUpdateClientTag("tag b");
+
+ mock_server_->AddUpdateBookmark(205, 0, "One C", 18, 180);
+ mock_server_->SetLastUpdateClientTag("tag c");
+ mock_server_->AddUpdateBookmark(202, 0, "Two C", 19, 190);
+ mock_server_->SetLastUpdateClientTag("tag c");
+ mock_server_->AddUpdateBookmark(204, 0, "Three C", 20, 200);
+ mock_server_->SetLastUpdateClientTag("tag c");
+ mock_server_->AddUpdateBookmark(201, 0, "Four C", 21, 210);
+ mock_server_->SetLastUpdateClientTag("tag c"); // Least ID: winner.
+
+ mock_server_->set_conflict_all_commits(true);
+
+ SyncShareAsDelegate();
+ // This should cause client tag overwrite.
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Entry tag_a(&trans, GET_BY_CLIENT_TAG, "tag a");
+ ASSERT_TRUE(tag_a.good());
+ EXPECT_TRUE(tag_a.Get(ID).ServerKnows());
+ EXPECT_EQ(ids_.FromNumber(1), tag_a.Get(ID));
+ EXPECT_FALSE(tag_a.Get(IS_DEL));
+ EXPECT_FALSE(tag_a.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(tag_a.Get(IS_UNSYNCED));
+ EXPECT_EQ("One A", tag_a.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(1, tag_a.Get(BASE_VERSION));
+ EXPECT_EQ("tag a", tag_a.Get(UNIQUE_CLIENT_TAG));
+
+ Entry tag_b(&trans, GET_BY_CLIENT_TAG, "tag b");
+ ASSERT_TRUE(tag_b.good());
+ EXPECT_TRUE(tag_b.Get(ID).ServerKnows());
+ EXPECT_EQ(ids_.FromNumber(101), tag_b.Get(ID));
+ EXPECT_FALSE(tag_b.Get(IS_DEL));
+ EXPECT_FALSE(tag_b.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(tag_b.Get(IS_UNSYNCED));
+ EXPECT_EQ("Three B", tag_b.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(16, tag_b.Get(BASE_VERSION));
+ EXPECT_EQ("tag b", tag_b.Get(UNIQUE_CLIENT_TAG));
+
+ Entry tag_c(&trans, GET_BY_CLIENT_TAG, "tag c");
+ ASSERT_TRUE(tag_c.good());
+ EXPECT_TRUE(tag_c.Get(ID).ServerKnows());
+ EXPECT_EQ(ids_.FromNumber(201), tag_c.Get(ID));
+ EXPECT_FALSE(tag_c.Get(IS_DEL));
+ EXPECT_FALSE(tag_c.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(tag_c.Get(IS_UNSYNCED));
+ EXPECT_EQ("Four C", tag_c.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(21, tag_c.Get(BASE_VERSION));
+ EXPECT_EQ("tag c", tag_c.Get(UNIQUE_CLIENT_TAG));
+
+ syncable::Directory::ChildHandles children;
+ directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
+ ASSERT_EQ(3U, children.size());
+ }
+}
+
+TEST_F(SyncerTest, UniqueServerTagUpdates) {
+ // As a hurdle, introduce an item whose name is the same as the tag value
+ // we'll use later.
+ int64 hurdle_handle = CreateUnsyncedDirectory("bob", "id_bob");
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
+ ASSERT_TRUE(hurdle.good());
+ ASSERT_TRUE(!hurdle.Get(IS_DEL));
+ ASSERT_TRUE(hurdle.Get(UNIQUE_SERVER_TAG).empty());
+ ASSERT_TRUE(hurdle.Get(NON_UNIQUE_NAME) == "bob");
+
+ // Try to lookup by the tagname. These should fail.
+ Entry tag_alpha(&trans, GET_BY_SERVER_TAG, "alpha");
+ EXPECT_FALSE(tag_alpha.good());
+ Entry tag_bob(&trans, GET_BY_SERVER_TAG, "bob");
+ EXPECT_FALSE(tag_bob.good());
+ }
+
+ // Now download some tagged items as updates.
+ mock_server_->AddUpdateDirectory(1, 0, "update1", 1, 10);
+ mock_server_->SetLastUpdateServerTag("alpha");
+ mock_server_->AddUpdateDirectory(2, 0, "update2", 2, 20);
+ mock_server_->SetLastUpdateServerTag("bob");
+ SyncShareAsDelegate();
+
+ {
+ ReadTransaction trans(FROM_HERE, directory());
+
+ // The new items should be applied as new entries, and we should be able
+ // to look them up by their tag values.
+ Entry tag_alpha(&trans, GET_BY_SERVER_TAG, "alpha");
+ ASSERT_TRUE(tag_alpha.good());
+ ASSERT_TRUE(!tag_alpha.Get(IS_DEL));
+ ASSERT_TRUE(tag_alpha.Get(UNIQUE_SERVER_TAG) == "alpha");
+ ASSERT_TRUE(tag_alpha.Get(NON_UNIQUE_NAME) == "update1");
+ Entry tag_bob(&trans, GET_BY_SERVER_TAG, "bob");
+ ASSERT_TRUE(tag_bob.good());
+ ASSERT_TRUE(!tag_bob.Get(IS_DEL));
+ ASSERT_TRUE(tag_bob.Get(UNIQUE_SERVER_TAG) == "bob");
+ ASSERT_TRUE(tag_bob.Get(NON_UNIQUE_NAME) == "update2");
+ // The old item should be unchanged.
+ Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
+ ASSERT_TRUE(hurdle.good());
+ ASSERT_TRUE(!hurdle.Get(IS_DEL));
+ ASSERT_TRUE(hurdle.Get(UNIQUE_SERVER_TAG).empty());
+ ASSERT_TRUE(hurdle.Get(NON_UNIQUE_NAME) == "bob");
+ }
+}
+
+TEST_F(SyncerTest, GetUpdatesSetsRequestedTypes) {
+ // The expectations of this test happen in the MockConnectionManager's
+ // GetUpdates handler. EnableDatatype sets the expectation value from our
+ // set of enabled/disabled datatypes.
+ EnableDatatype(syncable::BOOKMARKS);
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+
+ EnableDatatype(syncable::AUTOFILL);
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+
+ EnableDatatype(syncable::PREFERENCES);
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+
+ DisableDatatype(syncable::BOOKMARKS);
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+
+ DisableDatatype(syncable::AUTOFILL);
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+
+ DisableDatatype(syncable::PREFERENCES);
+ EnableDatatype(syncable::AUTOFILL);
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+}
+
+// Test what happens if a client deletes, then recreates, an object very
+// quickly. It is possible that the deletion gets sent as a commit, and
+// the undelete happens during the commit request. The principle here
+// is that with a single committing client, conflicts should never
+// be encountered, and a client encountering its past actions during
+// getupdates should never feed back to override later actions.
+//
+// In cases of ordering A-F below, the outcome should be the same.
+// Exercised by UndeleteDuringCommit:
+// A. Delete - commit - undelete - commitresponse.
+// B. Delete - commit - undelete - commitresponse - getupdates.
+// Exercised by UndeleteBeforeCommit:
+// C. Delete - undelete - commit - commitresponse.
+// D. Delete - undelete - commit - commitresponse - getupdates.
+// Exercised by UndeleteAfterCommit:
+// E. Delete - commit - commitresponse - undelete - commit
+// - commitresponse.
+// F. Delete - commit - commitresponse - undelete - commit -
+// - commitresponse - getupdates.
+class SyncerUndeletionTest : public SyncerTest {
+ public:
+ SyncerUndeletionTest()
+ : client_tag_("foobar"),
+ metahandle_(syncable::kInvalidMetaHandle) {
+ }
+
+ void Create() {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry perm_folder(&trans, CREATE, ids_.root(), "clientname");
+ ASSERT_TRUE(perm_folder.good());
+ perm_folder.Put(UNIQUE_CLIENT_TAG, client_tag_);
+ perm_folder.Put(IS_UNSYNCED, true);
+ perm_folder.Put(SYNCING, false);
+ perm_folder.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(ID).ServerKnows());
+ metahandle_ = perm_folder.Get(META_HANDLE);
+ }
+
+ void Delete() {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(metahandle_, entry.Get(META_HANDLE));
+ entry.Put(IS_DEL, true);
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(SYNCING, false);
+ }
+
+ void Undelete() {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(metahandle_, entry.Get(META_HANDLE));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ entry.Put(IS_DEL, false);
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(SYNCING, false);
+ }
+
+ int64 GetMetahandleOfTag() {
+ ReadTransaction trans(FROM_HERE, directory());
+ Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
+ EXPECT_TRUE(entry.good());
+ if (!entry.good()) {
+ return syncable::kInvalidMetaHandle;
+ }
+ return entry.Get(META_HANDLE);
+ }
+
+ void ExpectUnsyncedCreation() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL)); // Never been committed.
+ EXPECT_GE(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_TRUE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ }
+
+ void ExpectUnsyncedUndeletion() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_TRUE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_EQ(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_TRUE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(Get(metahandle_, ID).ServerKnows());
+ }
+
+ void ExpectUnsyncedEdit() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_LT(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_TRUE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(Get(metahandle_, ID).ServerKnows());
+ }
+
+ void ExpectUnsyncedDeletion() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_TRUE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_TRUE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ EXPECT_LT(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_LT(0, Get(metahandle_, SERVER_VERSION));
+ }
+
+ void ExpectSyncedAndCreated() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_LT(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_EQ(Get(metahandle_, BASE_VERSION), Get(metahandle_, SERVER_VERSION));
+ EXPECT_FALSE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ }
+
+ void ExpectSyncedAndDeleted() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_TRUE(Get(metahandle_, IS_DEL));
+ EXPECT_TRUE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ EXPECT_GE(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_GE(0, Get(metahandle_, SERVER_VERSION));
+ }
+
+ protected:
+ const std::string client_tag_;
+ int64 metahandle_;
+};
+
+TEST_F(SyncerUndeletionTest, UndeleteDuringCommit) {
+ Create();
+ ExpectUnsyncedCreation();
+ SyncShareAsDelegate();
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Delete, begin committing the delete, then undelete while committing.
+ Delete();
+ ExpectUnsyncedDeletion();
+ mock_server_->SetMidCommitCallback(
+ base::Bind(&SyncerUndeletionTest::Undelete, base::Unretained(this)));
+ SyncShareAsDelegate();
+
+ // The item ought to exist as an unsynced undeletion (meaning,
+ // we think that the next commit ought to be a recreation commit).
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectUnsyncedUndeletion();
+
+ // Now, encounter a GetUpdates corresponding to the deletion from
+ // the server. The undeletion should prevail again and be committed.
+ // None of this should trigger any conflict detection -- it is perfectly
+ // normal to recieve updates from our own commits.
+ mock_server_->SetMidCommitCallback(base::Closure());
+ mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, UndeleteBeforeCommit) {
+ Create();
+ ExpectUnsyncedCreation();
+ SyncShareAsDelegate();
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Delete and undelete, then sync to pick up the result.
+ Delete();
+ ExpectUnsyncedDeletion();
+ Undelete();
+ ExpectUnsyncedEdit(); // Edit, not undelete: server thinks it exists.
+ SyncShareAsDelegate();
+
+ // The item ought to have committed successfully.
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+ EXPECT_EQ(2, Get(metahandle_, BASE_VERSION));
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // update.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, UndeleteAfterCommitButBeforeGetUpdates) {
+ Create();
+ ExpectUnsyncedCreation();
+ SyncShareAsDelegate();
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Delete and commit.
+ Delete();
+ ExpectUnsyncedDeletion();
+ SyncShareAsDelegate();
+
+ // The item ought to have committed successfully.
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Before the GetUpdates, the item is locally undeleted.
+ Undelete();
+ ExpectUnsyncedUndeletion();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. The undeletion should prevail.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, UndeleteAfterDeleteAndGetUpdates) {
+ Create();
+ ExpectUnsyncedCreation();
+ SyncShareAsDelegate();
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Delete and commit.
+ Delete();
+ ExpectUnsyncedDeletion();
+ SyncShareAsDelegate();
+
+ // The item ought to have committed successfully.
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. Should be consistent.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // After the GetUpdates, the item is locally undeleted.
+ Undelete();
+ ExpectUnsyncedUndeletion();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. The undeletion should prevail.
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+// Test processing of undeletion GetUpdateses.
+TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletes) {
+ Create();
+ ExpectUnsyncedCreation();
+ SyncShareAsDelegate();
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Add a delete from the server.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Some other client deletes the item.
+ mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
+ SyncShareAsDelegate();
+
+ // The update ought to have applied successfully.
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Undelete it locally.
+ Undelete();
+ ExpectUnsyncedUndeletion();
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. The undeletion should prevail.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletesImmediately) {
+ Create();
+ ExpectUnsyncedCreation();
+ SyncShareAsDelegate();
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Some other client deletes the item before we get a chance
+ // to GetUpdates our original request.
+ mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
+ SyncShareAsDelegate();
+
+ // The update ought to have applied successfully.
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Undelete it locally.
+ Undelete();
+ ExpectUnsyncedUndeletion();
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. The undeletion should prevail.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, OtherClientUndeletes) {
+ Create();
+ ExpectUnsyncedCreation();
+ SyncShareAsDelegate();
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Get the updates of our just-committed entry.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // We delete the item.
+ Delete();
+ ExpectUnsyncedDeletion();
+ SyncShareAsDelegate();
+
+ // The update ought to have applied successfully.
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Some other client undeletes the item.
+ mock_server_->AddUpdateBookmark(Get(metahandle_, ID),
+ Get(metahandle_, PARENT_ID),
+ "Thadeusz", 100, 1000);
+ mock_server_->SetLastUpdateClientTag(client_tag_);
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+ EXPECT_EQ("Thadeusz", Get(metahandle_, NON_UNIQUE_NAME));
+}
+
+TEST_F(SyncerUndeletionTest, OtherClientUndeletesImmediately) {
+ Create();
+ ExpectUnsyncedCreation();
+ SyncShareAsDelegate();
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Get the updates of our just-committed entry.
+ mock_server_->AddUpdateFromLastCommit();
+ SyncShareAsDelegate();
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // We delete the item.
+ Delete();
+ ExpectUnsyncedDeletion();
+ SyncShareAsDelegate();
+
+ // The update ought to have applied successfully.
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Some other client undeletes before we see the update from our
+ // commit.
+ mock_server_->AddUpdateBookmark(Get(metahandle_, ID),
+ Get(metahandle_, PARENT_ID),
+ "Thadeusz", 100, 1000);
+ mock_server_->SetLastUpdateClientTag(client_tag_);
+ SyncShareAsDelegate();
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+ EXPECT_EQ("Thadeusz", Get(metahandle_, NON_UNIQUE_NAME));
+}
+
+// A group of tests exercising the syncer's handling of sibling ordering, as
+// represented in the sync protocol.
+class SyncerPositionUpdateTest : public SyncerTest {
+ public:
+ SyncerPositionUpdateTest() : next_update_id_(1), next_revision_(1) {}
+
+ protected:
+ void ExpectLocalItemsInServerOrder() {
+ if (position_map_.empty())
+ return;
+
+ ReadTransaction trans(FROM_HERE, directory());
+
+ Id prev_id;
+ DCHECK(prev_id.IsRoot());
+ PosMap::iterator next = position_map_.begin();
+ for (PosMap::iterator i = next++; i != position_map_.end(); ++i) {
+ Id id = i->second;
+ Entry entry_with_id(&trans, GET_BY_ID, id);
+ EXPECT_TRUE(entry_with_id.good());
+ EXPECT_EQ(prev_id, entry_with_id.Get(PREV_ID));
+ EXPECT_EQ(i->first, entry_with_id.Get(SERVER_POSITION_IN_PARENT));
+ if (next == position_map_.end()) {
+ EXPECT_EQ(Id(), entry_with_id.Get(NEXT_ID));
+ } else {
+ EXPECT_EQ(next->second, entry_with_id.Get(NEXT_ID));
+ next++;
+ }
+ prev_id = id;
+ }
+ }
+
+ void AddRootItemWithPosition(int64 position) {
+ string id = string("ServerId") + base::Int64ToString(next_update_id_++);
+ string name = "my name is my id -- " + id;
+ int revision = next_revision_++;
+ mock_server_->AddUpdateDirectory(id, kRootId, name, revision, revision);
+ mock_server_->SetLastUpdatePosition(position);
+ position_map_.insert(
+ PosMap::value_type(position, Id::CreateFromServerId(id)));
+ }
+ private:
+ typedef multimap<int64, Id> PosMap;
+ PosMap position_map_;
+ int next_update_id_;
+ int next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(SyncerPositionUpdateTest);
+};
+
+TEST_F(SyncerPositionUpdateTest, InOrderPositive) {
+ // Add a bunch of items in increasing order, starting with just positive
+ // position values.
+ AddRootItemWithPosition(100);
+ AddRootItemWithPosition(199);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(400);
+
+ SyncShareAsDelegate();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, InOrderNegative) {
+ // Test negative position values, but in increasing order.
+ AddRootItemWithPosition(-400);
+ AddRootItemWithPosition(-201);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(100);
+
+ SyncShareAsDelegate();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, ReverseOrder) {
+ // Test when items are sent in the reverse order.
+ AddRootItemWithPosition(400);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(100);
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(-201);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(-400);
+
+ SyncShareAsDelegate();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, RandomOrderInBatches) {
+ // Mix it all up, interleaving position values, and try multiple batches of
+ // updates.
+ AddRootItemWithPosition(400);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(-400);
+ AddRootItemWithPosition(100);
+
+ SyncShareAsDelegate();
+ ExpectLocalItemsInServerOrder();
+
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(-201);
+
+ SyncShareAsDelegate();
+ ExpectLocalItemsInServerOrder();
+
+ AddRootItemWithPosition(-144);
+
+ SyncShareAsDelegate();
+ ExpectLocalItemsInServerOrder();
+}
+
+class SyncerPositionTiebreakingTest : public SyncerTest {
+ public:
+ SyncerPositionTiebreakingTest()
+ : low_id_(Id::CreateFromServerId("A")),
+ mid_id_(Id::CreateFromServerId("M")),
+ high_id_(Id::CreateFromServerId("Z")),
+ next_revision_(1) {
+ DCHECK(low_id_ < mid_id_);
+ DCHECK(mid_id_ < high_id_);
+ DCHECK(low_id_ < high_id_);
+ }
+
+ // Adds the item by its Id, using a constant value for the position
+ // so that the syncer has to resolve the order some other way.
+ void Add(const Id& id) {
+ int revision = next_revision_++;
+ mock_server_->AddUpdateDirectory(id.GetServerId(), kRootId,
+ id.GetServerId(), revision, revision);
+ // The update position doesn't vary.
+ mock_server_->SetLastUpdatePosition(90210);
+ }
+
+ void ExpectLocalOrderIsByServerId() {
+ ReadTransaction trans(FROM_HERE, directory());
+ Id null_id;
+ Entry low(&trans, GET_BY_ID, low_id_);
+ Entry mid(&trans, GET_BY_ID, mid_id_);
+ Entry high(&trans, GET_BY_ID, high_id_);
+ EXPECT_TRUE(low.good());
+ EXPECT_TRUE(mid.good());
+ EXPECT_TRUE(high.good());
+ EXPECT_TRUE(low.Get(PREV_ID) == null_id);
+ EXPECT_TRUE(mid.Get(PREV_ID) == low_id_);
+ EXPECT_TRUE(high.Get(PREV_ID) == mid_id_);
+ EXPECT_TRUE(high.Get(NEXT_ID) == null_id);
+ EXPECT_TRUE(mid.Get(NEXT_ID) == high_id_);
+ EXPECT_TRUE(low.Get(NEXT_ID) == mid_id_);
+ }
+
+ protected:
+ // When there's a tiebreak on the numeric position, it's supposed to be
+ // broken by string comparison of the ids. These ids are in increasing
+ // order.
+ const Id low_id_;
+ const Id mid_id_;
+ const Id high_id_;
+
+ private:
+ int next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(SyncerPositionTiebreakingTest);
+};
+
+TEST_F(SyncerPositionTiebreakingTest, LowMidHigh) {
+ Add(low_id_);
+ Add(mid_id_);
+ Add(high_id_);
+ SyncShareAsDelegate();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, LowHighMid) {
+ Add(low_id_);
+ Add(high_id_);
+ Add(mid_id_);
+ SyncShareAsDelegate();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, HighMidLow) {
+ Add(high_id_);
+ Add(mid_id_);
+ Add(low_id_);
+ SyncShareAsDelegate();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, HighLowMid) {
+ Add(high_id_);
+ Add(low_id_);
+ Add(mid_id_);
+ SyncShareAsDelegate();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, MidHighLow) {
+ Add(mid_id_);
+ Add(high_id_);
+ Add(low_id_);
+ SyncShareAsDelegate();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, MidLowHigh) {
+ Add(mid_id_);
+ Add(low_id_);
+ Add(high_id_);
+ SyncShareAsDelegate();
+ ExpectLocalOrderIsByServerId();
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/syncer_util.cc b/sync/engine/syncer_util.cc
new file mode 100644
index 0000000..f003bcd
--- /dev/null
+++ b/sync/engine/syncer_util.cc
@@ -0,0 +1,778 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/syncer_util.h"
+
+#include <algorithm>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/location.h"
+#include "base/metrics/histogram.h"
+#include "sync/engine/conflict_resolver.h"
+#include "sync/engine/nigori_util.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/engine/syncproto.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/protocol/password_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_changes_version.h"
+#include "sync/util/cryptographer.h"
+#include "sync/util/time.h"
+
+using syncable::BASE_VERSION;
+using syncable::Blob;
+using syncable::CHANGES_VERSION;
+using syncable::CREATE;
+using syncable::CREATE_NEW_UPDATE_ITEM;
+using syncable::CTIME;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::GetModelTypeFromSpecifics;
+using syncable::GET_BY_HANDLE;
+using syncable::GET_BY_ID;
+using syncable::ID;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::Id;
+using syncable::IsRealDataType;
+using syncable::META_HANDLE;
+using syncable::MTIME;
+using syncable::MutableEntry;
+using syncable::NEXT_ID;
+using syncable::NON_UNIQUE_NAME;
+using syncable::BASE_SERVER_SPECIFICS;
+using syncable::PARENT_ID;
+using syncable::PREV_ID;
+using syncable::ReadTransaction;
+using syncable::SERVER_CTIME;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_IS_DIR;
+using syncable::SERVER_MTIME;
+using syncable::SERVER_NON_UNIQUE_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_SPECIFICS;
+using syncable::SERVER_VERSION;
+using syncable::UNIQUE_CLIENT_TAG;
+using syncable::UNIQUE_SERVER_TAG;
+using syncable::SPECIFICS;
+using syncable::SYNCER;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+// Returns the number of unsynced entries.
+// static
+int SyncerUtil::GetUnsyncedEntries(syncable::BaseTransaction* trans,
+ std::vector<int64> *handles) {
+ trans->directory()->GetUnsyncedMetaHandles(trans, handles);
+ DVLOG_IF(1, !handles->empty()) << "Have " << handles->size()
+ << " unsynced items.";
+ return handles->size();
+}
+
+// static
+void SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id,
+ syncable::Directory::ChildHandles* children) {
+ syncable::Id old_id = entry->Get(ID);
+ if (!entry->Put(ID, new_id)) {
+ Entry old_entry(trans, GET_BY_ID, new_id);
+ CHECK(old_entry.good());
+ LOG(FATAL) << "Attempt to change ID to " << new_id
+ << " conflicts with existing entry.\n\n"
+ << *entry << "\n\n" << old_entry;
+ }
+ if (entry->Get(IS_DIR)) {
+ // Get all child entries of the old id.
+ trans->directory()->GetChildHandlesById(trans, old_id, children);
+ Directory::ChildHandles::iterator i = children->begin();
+ while (i != children->end()) {
+ MutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
+ CHECK(child_entry.good());
+ // Use the unchecked setter here to avoid touching the child's NEXT_ID
+ // and PREV_ID fields (which Put(PARENT_ID) would normally do to
+ // maintain linked-list invariants). In this case, NEXT_ID and PREV_ID
+ // among the children will be valid after the loop, since we update all
+ // the children at once.
+ child_entry.PutParentIdPropertyOnly(new_id);
+ }
+ }
+ // Update Id references on the previous and next nodes in the sibling
+ // order. Do this by reinserting into the linked list; the first
+ // step in PutPredecessor is to Unlink from the existing order, which
+ // will overwrite the stale Id value from the adjacent nodes.
+ if (entry->Get(PREV_ID) == entry->Get(NEXT_ID) &&
+ entry->Get(PREV_ID) == old_id) {
+ // We just need a shallow update to |entry|'s fields since it is already
+ // self looped.
+ entry->Put(NEXT_ID, new_id);
+ entry->Put(PREV_ID, new_id);
+ } else {
+ entry->PutPredecessor(entry->Get(PREV_ID));
+ }
+}
+
+// static
+void SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id) {
+ syncable::Directory::ChildHandles children;
+ ChangeEntryIDAndUpdateChildren(trans, entry, new_id, &children);
+}
+
+// static
+syncable::Id SyncerUtil::FindLocalIdToUpdate(
+ syncable::BaseTransaction* trans,
+ const SyncEntity& update) {
+ // Expected entry points of this function:
+ // SyncEntity has NOT been applied to SERVER fields.
+ // SyncEntity has NOT been applied to LOCAL fields.
+ // DB has not yet been modified, no entries created for this update.
+
+ const std::string& client_id = trans->directory()->cache_guid();
+
+ if (update.has_client_defined_unique_tag() &&
+ !update.client_defined_unique_tag().empty()) {
+ // When a server sends down a client tag, the following cases can occur:
+ // 1) Client has entry for tag already, ID is server style, matches
+ // 2) Client has entry for tag already, ID is server, doesn't match.
+ // 3) Client has entry for tag already, ID is local, (never matches)
+ // 4) Client has no entry for tag
+
+ // Case 1, we don't have to do anything since the update will
+ // work just fine. Update will end up in the proper entry, via ID lookup.
+ // Case 2 - Happens very rarely due to lax enforcement of client tags
+ // on the server, if two clients commit the same tag at the same time.
+ // When this happens, we pick the lexically-least ID and ignore all other
+ // items.
+ // Case 3 - We need to replace the local ID with the server ID so that
+ // this update gets targeted at the correct local entry; we expect conflict
+ // resolution to occur.
+ // Case 4 - Perfect. Same as case 1.
+
+ syncable::Entry local_entry(trans, syncable::GET_BY_CLIENT_TAG,
+ update.client_defined_unique_tag());
+
+ // The SyncAPI equivalent of this function will return !good if IS_DEL.
+ // The syncable version will return good even if IS_DEL.
+ // TODO(chron): Unit test the case with IS_DEL and make sure.
+ if (local_entry.good()) {
+ if (local_entry.Get(ID).ServerKnows()) {
+ if (local_entry.Get(ID) != update.id()) {
+ // Case 2.
+ LOG(WARNING) << "Duplicated client tag.";
+ if (local_entry.Get(ID) < update.id()) {
+ // Signal an error; drop this update on the floor. Note that
+ // we don't server delete the item, because we don't allow it to
+ // exist locally at all. So the item will remain orphaned on
+ // the server, and we won't pay attention to it.
+ return syncable::GetNullId();
+ }
+ }
+ // Target this change to the existing local entry; later,
+ // we'll change the ID of the local entry to update.id()
+ // if needed.
+ return local_entry.Get(ID);
+ } else {
+ // Case 3: We have a local entry with the same client tag.
+ // We should change the ID of the local entry to the server entry.
+ // This will result in an server ID with base version == 0, but that's
+ // a legal state for an item with a client tag. By changing the ID,
+ // update will now be applied to local_entry.
+ DCHECK(0 == local_entry.Get(BASE_VERSION) ||
+ CHANGES_VERSION == local_entry.Get(BASE_VERSION));
+ return local_entry.Get(ID);
+ }
+ }
+ } else if (update.has_originator_cache_guid() &&
+ update.originator_cache_guid() == client_id) {
+ // If a commit succeeds, but the response does not come back fast enough
+ // then the syncer might assume that it was never committed.
+ // The server will track the client that sent up the original commit and
+ // return this in a get updates response. When this matches a local
+ // uncommitted item, we must mutate our local item and version to pick up
+ // the committed version of the same item whose commit response was lost.
+ // There is however still a race condition if the server has not
+ // completed the commit by the time the syncer tries to get updates
+ // again. To mitigate this, we need to have the server time out in
+ // a reasonable span, our commit batches have to be small enough
+ // to process within our HTTP response "assumed alive" time.
+
+ // We need to check if we have an entry that didn't get its server
+ // id updated correctly. The server sends down a client ID
+ // and a local (negative) id. If we have a entry by that
+ // description, we should update the ID and version to the
+ // server side ones to avoid multiple copies of the same thing.
+
+ syncable::Id client_item_id = syncable::Id::CreateFromClientString(
+ update.originator_client_item_id());
+ DCHECK(!client_item_id.ServerKnows());
+ syncable::Entry local_entry(trans, GET_BY_ID, client_item_id);
+
+ // If it exists, then our local client lost a commit response. Use
+ // the local entry.
+ if (local_entry.good() && !local_entry.Get(IS_DEL)) {
+ int64 old_version = local_entry.Get(BASE_VERSION);
+ int64 new_version = update.version();
+ DCHECK_LE(old_version, 0);
+ DCHECK_GT(new_version, 0);
+ // Otherwise setting the base version could cause a consistency failure.
+ // An entry should never be version 0 and SYNCED.
+ DCHECK(local_entry.Get(IS_UNSYNCED));
+
+ // Just a quick sanity check.
+ DCHECK(!local_entry.Get(ID).ServerKnows());
+
+ DVLOG(1) << "Reuniting lost commit response IDs. server id: "
+ << update.id() << " local id: " << local_entry.Get(ID)
+ << " new version: " << new_version;
+
+ return local_entry.Get(ID);
+ }
+ }
+ // Fallback: target an entry having the server ID, creating one if needed.
+ return update.id();
+}
+
+// static
+UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntry(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ ConflictResolver* resolver,
+ Cryptographer* cryptographer) {
+ CHECK(entry->good());
+ if (!entry->Get(IS_UNAPPLIED_UPDATE))
+ return SUCCESS; // No work to do.
+ syncable::Id id = entry->Get(ID);
+ const sync_pb::EntitySpecifics& specifics = entry->Get(SERVER_SPECIFICS);
+
+ // We intercept updates to the Nigori node, update the Cryptographer and
+ // encrypt any unsynced changes here because there is no Nigori
+ // ChangeProcessor. We never put the nigori node in a state of
+ // conflict_encryption.
+ //
+ // We always update the cryptographer with the server's nigori node,
+ // even if we have a locally modified nigori node (we manually merge nigori
+ // data in the conflict resolver in that case). This handles the case where
+ // two clients both set a different passphrase. The second client to attempt
+ // to commit will go into a state of having pending keys, unioned the set of
+ // encrypted types, and eventually re-encrypt everything with the passphrase
+ // of the first client and commit the set of merged encryption keys. Until the
+ // second client provides the pending passphrase, the cryptographer will
+ // preserve the encryption keys based on the local passphrase, while the
+ // nigori node will preserve the server encryption keys.
+ //
+ // If non-encryption changes are made to the nigori node, they will be
+ // lost as part of conflict resolution. This is intended, as we place a higher
+ // priority on preserving the server's passphrase change to preserving local
+ // non-encryption changes. Next time the non-encryption changes are made to
+ // the nigori node (e.g. on restart), they will commit without issue.
+ if (specifics.has_nigori()) {
+ const sync_pb::NigoriSpecifics& nigori = specifics.nigori();
+ cryptographer->Update(nigori);
+
+ // Make sure any unsynced changes are properly encrypted as necessary.
+ // We only perform this if the cryptographer is ready. If not, these are
+ // re-encrypted at SetPassphrase time (via ReEncryptEverything). This logic
+ // covers the case where the nigori updated marked new datatypes for
+ // encryption, but didn't change the passphrase.
+ if (cryptographer->is_ready()) {
+ // Note that we don't bother to encrypt any data for which IS_UNSYNCED
+ // == false here. The machine that turned on encryption should know about
+ // and re-encrypt all synced data. It's possible it could get interrupted
+ // during this process, but we currently reencrypt everything at startup
+ // as well, so as soon as a client is restarted with this datatype marked
+ // for encryption, all the data should be updated as necessary.
+
+ // If this fails, something is wrong with the cryptographer, but there's
+ // nothing we can do about it here.
+ DVLOG(1) << "Received new nigori, encrypting unsynced changes.";
+ syncable::ProcessUnsyncedChangesForEncryption(trans, cryptographer);
+ }
+ }
+
+ // Only apply updates that we can decrypt. If we can't decrypt the update, it
+ // is likely because the passphrase has not arrived yet. Because the
+ // passphrase may not arrive within this GetUpdates, we can't just return
+ // conflict, else we try to perform normal conflict resolution prematurely or
+ // the syncer may get stuck. As such, we return CONFLICT_ENCRYPTION, which is
+ // treated as an unresolvable conflict. See the description in syncer_types.h.
+ // This prevents any unsynced changes from commiting and postpones conflict
+ // resolution until all data can be decrypted.
+ if (specifics.has_encrypted() &&
+ !cryptographer->CanDecrypt(specifics.encrypted())) {
+ // We can't decrypt this node yet.
+ DVLOG(1) << "Received an undecryptable "
+ << syncable::ModelTypeToString(entry->GetServerModelType())
+ << " update, returning encryption_conflict.";
+ return CONFLICT_ENCRYPTION;
+ } else if (specifics.has_password() &&
+ entry->Get(UNIQUE_SERVER_TAG).empty()) {
+ // Passwords use their own legacy encryption scheme.
+ const sync_pb::PasswordSpecifics& password = specifics.password();
+ if (!cryptographer->CanDecrypt(password.encrypted())) {
+ DVLOG(1) << "Received an undecryptable password update, returning "
+ << "encryption_conflict.";
+ return CONFLICT_ENCRYPTION;
+ }
+ }
+
+ if (!entry->Get(SERVER_IS_DEL)) {
+ syncable::Id new_parent = entry->Get(SERVER_PARENT_ID);
+ Entry parent(trans, GET_BY_ID, new_parent);
+ // A note on non-directory parents:
+ // We catch most unfixable tree invariant errors at update receipt time,
+ // however we deal with this case here because we may receive the child
+ // first then the illegal parent. Instead of dealing with it twice in
+ // different ways we deal with it once here to reduce the amount of code and
+ // potential errors.
+ if (!parent.good() || parent.Get(IS_DEL) || !parent.Get(IS_DIR)) {
+ return CONFLICT_HIERARCHY;
+ }
+ if (entry->Get(PARENT_ID) != new_parent) {
+ if (!entry->Get(IS_DEL) && !IsLegalNewParent(trans, id, new_parent)) {
+ DVLOG(1) << "Not updating item " << id
+ << ", illegal new parent (would cause loop).";
+ return CONFLICT_HIERARCHY;
+ }
+ }
+ } else if (entry->Get(IS_DIR)) {
+ Directory::ChildHandles handles;
+ trans->directory()->GetChildHandlesById(trans, id, &handles);
+ if (!handles.empty()) {
+ // If we have still-existing children, then we need to deal with
+ // them before we can process this change.
+ DVLOG(1) << "Not deleting directory; it's not empty " << *entry;
+ return CONFLICT_HIERARCHY;
+ }
+ }
+
+ if (entry->Get(IS_UNSYNCED)) {
+ DVLOG(1) << "Skipping update, returning conflict for: " << id
+ << " ; it's unsynced.";
+ return CONFLICT_SIMPLE;
+ }
+
+ if (specifics.has_encrypted()) {
+ DVLOG(2) << "Received a decryptable "
+ << syncable::ModelTypeToString(entry->GetServerModelType())
+ << " update, applying normally.";
+ } else {
+ DVLOG(2) << "Received an unencrypted "
+ << syncable::ModelTypeToString(entry->GetServerModelType())
+ << " update, applying normally.";
+ }
+
+ SyncerUtil::UpdateLocalDataFromServerData(trans, entry);
+
+ return SUCCESS;
+}
+
+namespace {
+// Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally,
+// when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based
+// protocol.
+void UpdateBookmarkSpecifics(const std::string& singleton_tag,
+ const std::string& url,
+ const std::string& favicon_bytes,
+ MutableEntry* local_entry) {
+ // In the new-style protocol, the server no longer sends bookmark info for
+ // the "google_chrome" folder. Mimic that here.
+ if (singleton_tag == "google_chrome")
+ return;
+ sync_pb::EntitySpecifics pb;
+ sync_pb::BookmarkSpecifics* bookmark = pb.mutable_bookmark();
+ if (!url.empty())
+ bookmark->set_url(url);
+ if (!favicon_bytes.empty())
+ bookmark->set_favicon(favicon_bytes);
+ local_entry->Put(SERVER_SPECIFICS, pb);
+}
+
+} // namespace
+
+// Pass in name and checksum because of UTF8 conversion.
+// static
+void SyncerUtil::UpdateServerFieldsFromUpdate(
+ MutableEntry* target,
+ const SyncEntity& update,
+ const std::string& name) {
+ if (update.deleted()) {
+ if (target->Get(SERVER_IS_DEL)) {
+ // If we already think the item is server-deleted, we're done.
+ // Skipping these cases prevents our committed deletions from coming
+ // back and overriding subsequent undeletions. For non-deleted items,
+ // the version number check has a similar effect.
+ return;
+ }
+ // The server returns very lightweight replies for deletions, so we don't
+ // clobber a bunch of fields on delete.
+ target->Put(SERVER_IS_DEL, true);
+ if (!target->Get(UNIQUE_CLIENT_TAG).empty()) {
+ // Items identified by the client unique tag are undeletable; when
+ // they're deleted, they go back to version 0.
+ target->Put(SERVER_VERSION, 0);
+ } else {
+ // Otherwise, fake a server version by bumping the local number.
+ target->Put(SERVER_VERSION,
+ std::max(target->Get(SERVER_VERSION),
+ target->Get(BASE_VERSION)) + 1);
+ }
+ target->Put(IS_UNAPPLIED_UPDATE, true);
+ return;
+ }
+
+ DCHECK(target->Get(ID) == update.id())
+ << "ID Changing not supported here";
+ target->Put(SERVER_PARENT_ID, update.parent_id());
+ target->Put(SERVER_NON_UNIQUE_NAME, name);
+ target->Put(SERVER_VERSION, update.version());
+ target->Put(SERVER_CTIME, ProtoTimeToTime(update.ctime()));
+ target->Put(SERVER_MTIME, ProtoTimeToTime(update.mtime()));
+ target->Put(SERVER_IS_DIR, update.IsFolder());
+ if (update.has_server_defined_unique_tag()) {
+ const std::string& tag = update.server_defined_unique_tag();
+ target->Put(UNIQUE_SERVER_TAG, tag);
+ }
+ if (update.has_client_defined_unique_tag()) {
+ const std::string& tag = update.client_defined_unique_tag();
+ target->Put(UNIQUE_CLIENT_TAG, tag);
+ }
+ // Store the datatype-specific part as a protobuf.
+ if (update.has_specifics()) {
+ DCHECK(update.GetModelType() != syncable::UNSPECIFIED)
+ << "Storing unrecognized datatype in sync database.";
+ target->Put(SERVER_SPECIFICS, update.specifics());
+ } else if (update.has_bookmarkdata()) {
+ // Legacy protocol response for bookmark data.
+ const SyncEntity::BookmarkData& bookmark = update.bookmarkdata();
+ UpdateBookmarkSpecifics(update.server_defined_unique_tag(),
+ bookmark.bookmark_url(),
+ bookmark.bookmark_favicon(),
+ target);
+ }
+ if (update.has_position_in_parent())
+ target->Put(SERVER_POSITION_IN_PARENT, update.position_in_parent());
+
+ target->Put(SERVER_IS_DEL, update.deleted());
+ // We only mark the entry as unapplied if its version is greater than the
+ // local data. If we're processing the update that corresponds to one of our
+ // commit we don't apply it as time differences may occur.
+ if (update.version() > target->Get(BASE_VERSION)) {
+ target->Put(IS_UNAPPLIED_UPDATE, true);
+ }
+}
+
+// Creates a new Entry iff no Entry exists with the given id.
+// static
+void SyncerUtil::CreateNewEntry(syncable::WriteTransaction *trans,
+ const syncable::Id& id) {
+ syncable::MutableEntry entry(trans, GET_BY_ID, id);
+ if (!entry.good()) {
+ syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ id);
+ }
+}
+
+// static
+void SyncerUtil::SplitServerInformationIntoNewEntry(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry) {
+ syncable::Id id = entry->Get(ID);
+ ChangeEntryIDAndUpdateChildren(trans, entry, trans->directory()->NextId());
+ entry->Put(BASE_VERSION, 0);
+
+ MutableEntry new_entry(trans, CREATE_NEW_UPDATE_ITEM, id);
+ CopyServerFields(entry, &new_entry);
+ ClearServerData(entry);
+
+ DVLOG(1) << "Splitting server information, local entry: " << *entry
+ << " server entry: " << new_entry;
+}
+
+// This function is called on an entry when we can update the user-facing data
+// from the server data.
+// static
+void SyncerUtil::UpdateLocalDataFromServerData(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry) {
+ DCHECK(!entry->Get(IS_UNSYNCED));
+ DCHECK(entry->Get(IS_UNAPPLIED_UPDATE));
+
+ DVLOG(2) << "Updating entry : " << *entry;
+ // Start by setting the properties that determine the model_type.
+ entry->Put(SPECIFICS, entry->Get(SERVER_SPECIFICS));
+ // Clear the previous server specifics now that we're applying successfully.
+ entry->Put(BASE_SERVER_SPECIFICS, sync_pb::EntitySpecifics());
+ entry->Put(IS_DIR, entry->Get(SERVER_IS_DIR));
+ // This strange dance around the IS_DEL flag avoids problems when setting
+ // the name.
+ // TODO(chron): Is this still an issue? Unit test this codepath.
+ if (entry->Get(SERVER_IS_DEL)) {
+ entry->Put(IS_DEL, true);
+ } else {
+ entry->Put(NON_UNIQUE_NAME, entry->Get(SERVER_NON_UNIQUE_NAME));
+ entry->Put(PARENT_ID, entry->Get(SERVER_PARENT_ID));
+ CHECK(entry->Put(IS_DEL, false));
+ Id new_predecessor =
+ entry->ComputePrevIdFromServerPosition(entry->Get(SERVER_PARENT_ID));
+ CHECK(entry->PutPredecessor(new_predecessor))
+ << " Illegal predecessor after converting from server position.";
+ }
+
+ entry->Put(CTIME, entry->Get(SERVER_CTIME));
+ entry->Put(MTIME, entry->Get(SERVER_MTIME));
+ entry->Put(BASE_VERSION, entry->Get(SERVER_VERSION));
+ entry->Put(IS_DEL, entry->Get(SERVER_IS_DEL));
+ entry->Put(IS_UNAPPLIED_UPDATE, false);
+}
+
+// static
+VerifyCommitResult SyncerUtil::ValidateCommitEntry(
+ syncable::Entry* entry) {
+ syncable::Id id = entry->Get(ID);
+ if (id == entry->Get(PARENT_ID)) {
+ CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry;
+ // If the root becomes unsynced it can cause us problems.
+ LOG(ERROR) << "Root item became unsynced " << *entry;
+ return VERIFY_UNSYNCABLE;
+ }
+ if (entry->IsRoot()) {
+ LOG(ERROR) << "Permanent item became unsynced " << *entry;
+ return VERIFY_UNSYNCABLE;
+ }
+ if (entry->Get(IS_DEL) && !entry->Get(ID).ServerKnows()) {
+ // Drop deleted uncommitted entries.
+ return VERIFY_UNSYNCABLE;
+ }
+ return VERIFY_OK;
+}
+
+// static
+bool SyncerUtil::AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids) {
+
+ if (!inserted_items->insert(item->Get(META_HANDLE)).second)
+ return false;
+ commit_ids->push_back(item->Get(ID));
+ if (item->Get(IS_DEL))
+ return true; // Deleted items have no predecessors.
+
+ Id prev_id = item->Get(PREV_ID);
+ while (!prev_id.IsRoot()) {
+ Entry prev(trans, GET_BY_ID, prev_id);
+ CHECK(prev.good()) << "Bad id when walking predecessors.";
+ if (!prev.Get(inclusion_filter))
+ break;
+ if (!inserted_items->insert(prev.Get(META_HANDLE)).second)
+ break;
+ commit_ids->push_back(prev_id);
+ prev_id = prev.Get(PREV_ID);
+ }
+ return true;
+}
+
+// static
+void SyncerUtil::AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids) {
+ size_t initial_size = commit_ids->size();
+ if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items,
+ commit_ids))
+ return;
+ // Reverse what we added to get the correct order.
+ std::reverse(commit_ids->begin() + initial_size, commit_ids->end());
+}
+
+// static
+void SyncerUtil::MarkDeletedChildrenSynced(
+ syncable::Directory* dir,
+ std::set<syncable::Id>* deleted_folders) {
+ // There's two options here.
+ // 1. Scan deleted unsynced entries looking up their pre-delete tree for any
+ // of the deleted folders.
+ // 2. Take each folder and do a tree walk of all entries underneath it.
+ // #2 has a lower big O cost, but writing code to limit the time spent inside
+ // the transaction during each step is simpler with 1. Changing this decision
+ // may be sensible if this code shows up in profiling.
+ if (deleted_folders->empty())
+ return;
+ Directory::UnsyncedMetaHandles handles;
+ {
+ ReadTransaction trans(FROM_HERE, dir);
+ dir->GetUnsyncedMetaHandles(&trans, &handles);
+ }
+ if (handles.empty())
+ return;
+ Directory::UnsyncedMetaHandles::iterator it;
+ for (it = handles.begin() ; it != handles.end() ; ++it) {
+ // Single transaction / entry we deal with.
+ WriteTransaction trans(FROM_HERE, SYNCER, dir);
+ MutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ if (!entry.Get(IS_UNSYNCED) || !entry.Get(IS_DEL))
+ continue;
+ syncable::Id id = entry.Get(PARENT_ID);
+ while (id != trans.root_id()) {
+ if (deleted_folders->find(id) != deleted_folders->end()) {
+ // We've synced the deletion of this deleted entries parent.
+ entry.Put(IS_UNSYNCED, false);
+ break;
+ }
+ Entry parent(&trans, GET_BY_ID, id);
+ if (!parent.good() || !parent.Get(IS_DEL))
+ break;
+ id = parent.Get(PARENT_ID);
+ }
+ }
+}
+
+// static
+VerifyResult SyncerUtil::VerifyNewEntry(
+ const SyncEntity& update,
+ syncable::Entry* target,
+ const bool deleted) {
+ if (target->good()) {
+ // Not a new update.
+ return VERIFY_UNDECIDED;
+ }
+ if (deleted) {
+ // Deletion of an item we've never seen can be ignored.
+ return VERIFY_SKIP;
+ }
+
+ return VERIFY_SUCCESS;
+}
+
+// Assumes we have an existing entry; check here for updates that break
+// consistency rules.
+// static
+VerifyResult SyncerUtil::VerifyUpdateConsistency(
+ syncable::WriteTransaction* trans,
+ const SyncEntity& update,
+ syncable::MutableEntry* target,
+ const bool deleted,
+ const bool is_directory,
+ syncable::ModelType model_type) {
+
+ CHECK(target->good());
+
+ // If the update is a delete, we don't really need to worry at this stage.
+ if (deleted)
+ return VERIFY_SUCCESS;
+
+ if (model_type == syncable::UNSPECIFIED) {
+ // This update is to an item of a datatype we don't recognize. The server
+ // shouldn't have sent it to us. Throw it on the ground.
+ return VERIFY_SKIP;
+ }
+
+ if (target->Get(SERVER_VERSION) > 0) {
+ // Then we've had an update for this entry before.
+ if (is_directory != target->Get(SERVER_IS_DIR) ||
+ model_type != target->GetServerModelType()) {
+ if (target->Get(IS_DEL)) { // If we've deleted the item, we don't care.
+ return VERIFY_SKIP;
+ } else {
+ LOG(ERROR) << "Server update doesn't agree with previous updates. ";
+ LOG(ERROR) << " Entry: " << *target;
+ LOG(ERROR) << " Update: "
+ << SyncerProtoUtil::SyncEntityDebugString(update);
+ return VERIFY_FAIL;
+ }
+ }
+
+ if (!deleted && (target->Get(ID) == update.id()) &&
+ (target->Get(SERVER_IS_DEL) ||
+ (!target->Get(IS_UNSYNCED) && target->Get(IS_DEL) &&
+ target->Get(BASE_VERSION) > 0))) {
+ // An undelete. The latter case in the above condition is for
+ // when the server does not give us an update following the
+ // commit of a delete, before undeleting.
+ // Undeletion is common for items that reuse the client-unique tag.
+ VerifyResult result =
+ SyncerUtil::VerifyUndelete(trans, update, target);
+ if (VERIFY_UNDECIDED != result)
+ return result;
+ }
+ }
+ if (target->Get(BASE_VERSION) > 0) {
+ // We've committed this update in the past.
+ if (is_directory != target->Get(IS_DIR) ||
+ model_type != target->GetModelType()) {
+ LOG(ERROR) << "Server update doesn't agree with committed item. ";
+ LOG(ERROR) << " Entry: " << *target;
+ LOG(ERROR) << " Update: "
+ << SyncerProtoUtil::SyncEntityDebugString(update);
+ return VERIFY_FAIL;
+ }
+ if (target->Get(ID) == update.id()) {
+ if (target->Get(SERVER_VERSION) > update.version()) {
+ LOG(WARNING) << "We've already seen a more recent version.";
+ LOG(WARNING) << " Entry: " << *target;
+ LOG(WARNING) << " Update: "
+ << SyncerProtoUtil::SyncEntityDebugString(update);
+ return VERIFY_SKIP;
+ }
+ }
+ }
+ return VERIFY_SUCCESS;
+}
+
+// Assumes we have an existing entry; verify an update that seems to be
+// expressing an 'undelete'
+// static
+VerifyResult SyncerUtil::VerifyUndelete(syncable::WriteTransaction* trans,
+ const SyncEntity& update,
+ syncable::MutableEntry* target) {
+ // TODO(nick): We hit this path for items deleted items that the server
+ // tells us to re-create; only deleted items with positive base versions
+ // will hit this path. However, it's not clear how such an undeletion
+ // would actually succeed on the server; in the protocol, a base
+ // version of 0 is required to undelete an object. This codepath
+ // should be deprecated in favor of client-tag style undeletion
+ // (where items go to version 0 when they're deleted), or else
+ // removed entirely (if this type of undeletion is indeed impossible).
+ CHECK(target->good());
+ DVLOG(1) << "Server update is attempting undelete. " << *target
+ << "Update:" << SyncerProtoUtil::SyncEntityDebugString(update);
+ // Move the old one aside and start over. It's too tricky to get the old one
+ // back into a state that would pass CheckTreeInvariants().
+ if (target->Get(IS_DEL)) {
+ DCHECK(target->Get(UNIQUE_CLIENT_TAG).empty())
+ << "Doing move-aside undeletion on client-tagged item.";
+ target->Put(ID, trans->directory()->NextId());
+ target->Put(UNIQUE_CLIENT_TAG, "");
+ target->Put(BASE_VERSION, CHANGES_VERSION);
+ target->Put(SERVER_VERSION, 0);
+ return VERIFY_SUCCESS;
+ }
+ if (update.version() < target->Get(SERVER_VERSION)) {
+ LOG(WARNING) << "Update older than current server version for "
+ << *target << " Update:"
+ << SyncerProtoUtil::SyncEntityDebugString(update);
+ return VERIFY_SUCCESS; // Expected in new sync protocol.
+ }
+ return VERIFY_UNDECIDED;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/syncer_util.h b/sync/engine/syncer_util.h
new file mode 100644
index 0000000..026a237
--- /dev/null
+++ b/sync/engine/syncer_util.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Utility functions manipulating syncable::Entries, intended for use by the
+// syncer.
+
+#ifndef SYNC_ENGINE_SYNCER_UTIL_H_
+#define SYNC_ENGINE_SYNCER_UTIL_H_
+#pragma once
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_id.h"
+
+namespace browser_sync {
+
+class Cryptographer;
+class SyncEntity;
+
+class SyncerUtil {
+ public:
+ static void ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id,
+ syncable::Directory::ChildHandles* children);
+
+ // Returns the number of unsynced entries.
+ static int GetUnsyncedEntries(syncable::BaseTransaction* trans,
+ std::vector<int64> *handles);
+
+ static void ChangeEntryIDAndUpdateChildren(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id);
+
+ // If the server sent down a client-tagged entry, or an entry whose
+ // commit response was lost, it is necessary to update a local entry
+ // with an ID that doesn't match the ID of the update. Here, we
+ // find the ID of such an entry, if it exists. This function may
+ // determine that |server_entry| should be dropped; if so, it returns
+ // the null ID -- callers must handle this case. When update application
+ // should proceed normally with a new local entry, this function will
+ // return server_entry.id(); the caller must create an entry with that
+ // ID. This function does not alter the database.
+ static syncable::Id FindLocalIdToUpdate(
+ syncable::BaseTransaction* trans,
+ const SyncEntity& server_entry);
+
+ static UpdateAttemptResponse AttemptToUpdateEntry(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ ConflictResolver* resolver,
+ Cryptographer* cryptographer);
+
+ // Pass in name to avoid redundant UTF8 conversion.
+ static void UpdateServerFieldsFromUpdate(
+ syncable::MutableEntry* local_entry,
+ const SyncEntity& server_entry,
+ const std::string& name);
+
+ // Creates a new Entry iff no Entry exists with the given id.
+ static void CreateNewEntry(syncable::WriteTransaction *trans,
+ const syncable::Id& id);
+
+ static void SplitServerInformationIntoNewEntry(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ // This function is called on an entry when we can update the user-facing data
+ // from the server data.
+ static void UpdateLocalDataFromServerData(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ static VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry);
+
+ static VerifyResult VerifyNewEntry(const SyncEntity& update,
+ syncable::Entry* target,
+ const bool deleted);
+
+ // Assumes we have an existing entry; check here for updates that break
+ // consistency rules.
+ static VerifyResult VerifyUpdateConsistency(syncable::WriteTransaction* trans,
+ const SyncEntity& update,
+ syncable::MutableEntry* target,
+ const bool deleted,
+ const bool is_directory,
+ syncable::ModelType model_type);
+
+ // Assumes we have an existing entry; verify an update that seems to be
+ // expressing an 'undelete'
+ static VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
+ const SyncEntity& update,
+ syncable::MutableEntry* target);
+
+ // Append |item|, followed by a chain of its predecessors selected by
+ // |inclusion_filter|, to the |commit_ids| vector and tag them as included by
+ // storing in the set |inserted_items|. |inclusion_filter| (typically one of
+ // IS_UNAPPLIED_UPDATE or IS_UNSYNCED) selects which type of predecessors to
+ // include. Returns true if |item| was added, and false if it was already in
+ // the list.
+ //
+ // Use AddPredecessorsThenItem instead of this method if you want the
+ // item to be the last, rather than first, item appended.
+ static bool AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids);
+
+ // Exactly like AddItemThenPredecessors, except items are appended in the
+ // reverse (and generally more useful) order: a chain of predecessors from
+ // far to near, and finally the item.
+ static void AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids);
+
+ static void MarkDeletedChildrenSynced(
+ syncable::Directory* dir,
+ std::set<syncable::Id>* deleted_folders);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SyncerUtil);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_SYNCER_UTIL_H_
diff --git a/sync/engine/syncproto.h b/sync/engine/syncproto.h
new file mode 100644
index 0000000..101585f
--- /dev/null
+++ b/sync/engine/syncproto.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Wrappers to help us work with ids and protobuffers.
+
+#ifndef SYNC_ENGINE_SYNCPROTO_H_
+#define SYNC_ENGINE_SYNCPROTO_H_
+#pragma once
+
+#include "sync/protocol/sync.pb.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable_id.h"
+
+namespace browser_sync {
+
+template<class Base>
+class IdWrapper : public Base {
+ public:
+ IdWrapper() {}
+ explicit IdWrapper(const Base& other) : Base(other) {
+ }
+ syncable::Id id() const {
+ return syncable::Id::CreateFromServerId(Base::id_string());
+ }
+ void set_id(const syncable::Id& id) {
+ Base::set_id_string(id.GetServerId());
+ }
+};
+
+// These wrapper classes contain no data, so their super classes can be cast to
+// them directly.
+class SyncEntity : public IdWrapper<sync_pb::SyncEntity> {
+ public:
+ SyncEntity() {}
+ explicit SyncEntity(const sync_pb::SyncEntity& other)
+ : IdWrapper<sync_pb::SyncEntity>(other) {
+ }
+
+ void set_parent_id(const syncable::Id& id) {
+ set_parent_id_string(id.GetServerId());
+ }
+ syncable::Id parent_id() const {
+ return syncable::Id::CreateFromServerId(parent_id_string());
+ }
+ void set_old_parent_id(const syncable::Id& id) {
+ IdWrapper<sync_pb::SyncEntity>::set_old_parent_id(
+ id.GetServerId());
+ }
+ syncable::Id old_parent_id() const {
+ return syncable::Id::CreateFromServerId(
+ sync_pb::SyncEntity::old_parent_id());
+ }
+ // Binary predicate helper to determine whether an Entity represents a folder
+ // or non-folder object. Use this instead of checking these properties
+ // directly, because the addition of bookmarks to the protobuf schema
+ // makes the check slightly more tricky.
+ bool IsFolder() const {
+ return ((has_folder() && folder()) ||
+ (has_bookmarkdata() && bookmarkdata().bookmark_folder()));
+ }
+
+ syncable::ModelType GetModelType() const {
+ return syncable::GetModelType(*this);
+ }
+};
+
+class CommitResponse_EntryResponse
+ : public IdWrapper<sync_pb::CommitResponse_EntryResponse> {
+};
+
+class ClientToServerMessage : public sync_pb::ClientToServerMessage {
+ public:
+ ClientToServerMessage() {
+ set_protocol_version(protocol_version());
+ }
+};
+
+typedef sync_pb::CommitMessage CommitMessage;
+typedef sync_pb::ClientToServerResponse ClientToServerResponse;
+typedef sync_pb::CommitResponse CommitResponse;
+typedef sync_pb::GetUpdatesResponse GetUpdatesResponse;
+typedef sync_pb::GetUpdatesMessage GetUpdatesMessage;
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_SYNCPROTO_H_
diff --git a/sync/engine/syncproto_unittest.cc b/sync/engine/syncproto_unittest.cc
new file mode 100644
index 0000000..2c0598b
--- /dev/null
+++ b/sync/engine/syncproto_unittest.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/syncproto.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+class SyncProtoTest : public testing::Test {
+};
+
+TEST_F(SyncProtoTest, ProtocolVersionPresent) {
+ ClientToServerMessage csm;
+ EXPECT_TRUE(csm.has_protocol_version());
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/update_applicator.cc b/sync/engine/update_applicator.cc
new file mode 100644
index 0000000..5f510f3
--- /dev/null
+++ b/sync/engine/update_applicator.cc
@@ -0,0 +1,190 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/update_applicator.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/sessions/session_state.h"
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_id.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+UpdateApplicator::UpdateApplicator(ConflictResolver* resolver,
+ Cryptographer* cryptographer,
+ const UpdateIterator& begin,
+ const UpdateIterator& end,
+ const ModelSafeRoutingInfo& routes,
+ ModelSafeGroup group_filter)
+ : resolver_(resolver),
+ cryptographer_(cryptographer),
+ begin_(begin),
+ end_(end),
+ pointer_(begin),
+ group_filter_(group_filter),
+ progress_(false),
+ routing_info_(routes),
+ application_results_(end - begin) {
+ size_t item_count = end - begin;
+ DVLOG(1) << "UpdateApplicator created for " << item_count << " items.";
+}
+
+UpdateApplicator::~UpdateApplicator() {
+}
+
+// Returns true if there's more to do.
+bool UpdateApplicator::AttemptOneApplication(
+ syncable::WriteTransaction* trans) {
+ // If there are no updates left to consider, we're done.
+ if (end_ == begin_)
+ return false;
+ if (pointer_ == end_) {
+ if (!progress_)
+ return false;
+
+ DVLOG(1) << "UpdateApplicator doing additional pass.";
+ pointer_ = begin_;
+ progress_ = false;
+
+ // Clear the tracked failures to avoid double-counting.
+ application_results_.ClearConflicts();
+ }
+
+ syncable::Entry read_only(trans, syncable::GET_BY_HANDLE, *pointer_);
+ if (SkipUpdate(read_only)) {
+ Advance();
+ return true;
+ }
+
+ syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE, *pointer_);
+ UpdateAttemptResponse updateResponse = SyncerUtil::AttemptToUpdateEntry(
+ trans, &entry, resolver_, cryptographer_);
+ switch (updateResponse) {
+ case SUCCESS:
+ Advance();
+ progress_ = true;
+ application_results_.AddSuccess(entry.Get(syncable::ID));
+ break;
+ case CONFLICT_SIMPLE:
+ pointer_++;
+ application_results_.AddSimpleConflict(entry.Get(syncable::ID));
+ break;
+ case CONFLICT_ENCRYPTION:
+ pointer_++;
+ application_results_.AddEncryptionConflict(entry.Get(syncable::ID));
+ break;
+ case CONFLICT_HIERARCHY:
+ pointer_++;
+ application_results_.AddHierarchyConflict(entry.Get(syncable::ID));
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ DVLOG(1) << "Apply Status for " << entry.Get(syncable::META_HANDLE)
+ << " is " << updateResponse;
+
+ return true;
+}
+
+void UpdateApplicator::Advance() {
+ --end_;
+ *pointer_ = *end_;
+}
+
+bool UpdateApplicator::SkipUpdate(const syncable::Entry& entry) {
+ syncable::ModelType type = entry.GetServerModelType();
+ ModelSafeGroup g = GetGroupForModelType(type, routing_info_);
+ // The set of updates passed to the UpdateApplicator should already
+ // be group-filtered.
+ if (g != group_filter_) {
+ NOTREACHED();
+ return true;
+ }
+ if (g == GROUP_PASSIVE &&
+ !routing_info_.count(type) &&
+ type != syncable::UNSPECIFIED &&
+ type != syncable::TOP_LEVEL_FOLDER) {
+ DVLOG(1) << "Skipping update application, type not permitted.";
+ return true;
+ }
+ return false;
+}
+
+bool UpdateApplicator::AllUpdatesApplied() const {
+ return application_results_.no_conflicts() && begin_ == end_;
+}
+
+void UpdateApplicator::SaveProgressIntoSessionState(
+ sessions::ConflictProgress* conflict_progress,
+ sessions::UpdateProgress* update_progress) {
+ DCHECK(begin_ == end_ || ((pointer_ == end_) && !progress_))
+ << "SaveProgress called before updates exhausted.";
+
+ application_results_.SaveProgress(conflict_progress, update_progress);
+}
+
+UpdateApplicator::ResultTracker::ResultTracker(size_t num_results) {
+ successful_ids_.reserve(num_results);
+}
+
+UpdateApplicator::ResultTracker::~ResultTracker() {
+}
+
+void UpdateApplicator::ResultTracker::AddSimpleConflict(syncable::Id id) {
+ conflicting_ids_.push_back(id);
+}
+
+void UpdateApplicator::ResultTracker::AddEncryptionConflict(syncable::Id id) {
+ encryption_conflict_ids_.push_back(id);
+}
+
+void UpdateApplicator::ResultTracker::AddHierarchyConflict(syncable::Id id) {
+ hierarchy_conflict_ids_.push_back(id);
+}
+
+void UpdateApplicator::ResultTracker::AddSuccess(syncable::Id id) {
+ successful_ids_.push_back(id);
+}
+
+void UpdateApplicator::ResultTracker::SaveProgress(
+ sessions::ConflictProgress* conflict_progress,
+ sessions::UpdateProgress* update_progress) {
+ vector<syncable::Id>::const_iterator i;
+ for (i = conflicting_ids_.begin(); i != conflicting_ids_.end(); ++i) {
+ conflict_progress->AddSimpleConflictingItemById(*i);
+ update_progress->AddAppliedUpdate(CONFLICT_SIMPLE, *i);
+ }
+ for (i = encryption_conflict_ids_.begin();
+ i != encryption_conflict_ids_.end(); ++i) {
+ conflict_progress->AddEncryptionConflictingItemById(*i);
+ update_progress->AddAppliedUpdate(CONFLICT_ENCRYPTION, *i);
+ }
+ for (i = hierarchy_conflict_ids_.begin();
+ i != hierarchy_conflict_ids_.end(); ++i) {
+ conflict_progress->AddHierarchyConflictingItemById(*i);
+ update_progress->AddAppliedUpdate(CONFLICT_HIERARCHY, *i);
+ }
+ for (i = successful_ids_.begin(); i != successful_ids_.end(); ++i) {
+ conflict_progress->EraseSimpleConflictingItemById(*i);
+ update_progress->AddAppliedUpdate(SUCCESS, *i);
+ }
+}
+
+void UpdateApplicator::ResultTracker::ClearConflicts() {
+ conflicting_ids_.clear();
+ encryption_conflict_ids_.clear();
+ hierarchy_conflict_ids_.clear();
+}
+
+bool UpdateApplicator::ResultTracker::no_conflicts() const {
+ return conflicting_ids_.empty();
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/update_applicator.h b/sync/engine/update_applicator.h
new file mode 100644
index 0000000..5b3ee18
--- /dev/null
+++ b/sync/engine/update_applicator.h
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// An UpdateApplicator is used to iterate over a number of unapplied updates,
+// applying them to the client using the given syncer session.
+//
+// UpdateApplicator might resemble an iterator, but it actually keeps retrying
+// failed updates until no remaining updates can be successfully applied.
+
+#ifndef SYNC_ENGINE_UPDATE_APPLICATOR_H_
+#define SYNC_ENGINE_UPDATE_APPLICATOR_H_
+#pragma once
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/port.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/syncable/syncable.h"
+
+namespace browser_sync {
+
+namespace sessions {
+class ConflictProgress;
+class UpdateProgress;
+}
+
+class ConflictResolver;
+class Cryptographer;
+
+class UpdateApplicator {
+ public:
+ typedef syncable::Directory::UnappliedUpdateMetaHandles::iterator
+ UpdateIterator;
+
+ UpdateApplicator(ConflictResolver* resolver,
+ Cryptographer* cryptographer,
+ const UpdateIterator& begin,
+ const UpdateIterator& end,
+ const ModelSafeRoutingInfo& routes,
+ ModelSafeGroup group_filter);
+ ~UpdateApplicator();
+
+ // returns true if there's more we can do.
+ bool AttemptOneApplication(syncable::WriteTransaction* trans);
+ // return true if we've applied all updates.
+ bool AllUpdatesApplied() const;
+
+ // This class does not automatically save its progress into the
+ // SyncSession -- to get that to happen, call this method after update
+ // application is finished (i.e., when AttemptOneAllocation stops returning
+ // true).
+ void SaveProgressIntoSessionState(
+ sessions::ConflictProgress* conflict_progress,
+ sessions::UpdateProgress* update_progress);
+
+ private:
+ // Track the status of all applications.
+ class ResultTracker {
+ public:
+ explicit ResultTracker(size_t num_results);
+ virtual ~ResultTracker();
+ void AddSimpleConflict(syncable::Id);
+ void AddEncryptionConflict(syncable::Id);
+ void AddHierarchyConflict(syncable::Id);
+ void AddSuccess(syncable::Id);
+ void SaveProgress(sessions::ConflictProgress* conflict_progress,
+ sessions::UpdateProgress* update_progress);
+ void ClearConflicts();
+
+ // Returns true iff conflicting_ids_ is empty. Does not check
+ // encryption_conflict_ids_.
+ bool no_conflicts() const;
+ private:
+ std::vector<syncable::Id> conflicting_ids_;
+ std::vector<syncable::Id> successful_ids_;
+ std::vector<syncable::Id> encryption_conflict_ids_;
+ std::vector<syncable::Id> hierarchy_conflict_ids_;
+ };
+
+ // If true, AttemptOneApplication will skip over |entry| and return true.
+ bool SkipUpdate(const syncable::Entry& entry);
+
+ // Adjusts the UpdateIterator members to move ahead by one update.
+ void Advance();
+
+ // Used to resolve conflicts when trying to apply updates.
+ ConflictResolver* const resolver_;
+
+ // Used to decrypt sensitive sync nodes.
+ Cryptographer* cryptographer_;
+
+ UpdateIterator const begin_;
+ UpdateIterator end_;
+ UpdateIterator pointer_;
+ ModelSafeGroup group_filter_;
+ bool progress_;
+
+ const ModelSafeRoutingInfo routing_info_;
+
+ // Track the result of the attempts to update applications.
+ ResultTracker application_results_;
+
+ DISALLOW_COPY_AND_ASSIGN(UpdateApplicator);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_UPDATE_APPLICATOR_H_
diff --git a/sync/engine/verify_updates_command.cc b/sync/engine/verify_updates_command.cc
new file mode 100644
index 0000000..ab555a2
--- /dev/null
+++ b/sync/engine/verify_updates_command.cc
@@ -0,0 +1,139 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/verify_updates_command.h"
+
+#include <string>
+
+#include "base/location.h"
+#include "sync/engine/syncer.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/engine/syncer_util.h"
+#include "sync/engine/syncproto.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/syncable/syncable.h"
+
+namespace browser_sync {
+
+using syncable::WriteTransaction;
+
+using syncable::GET_BY_ID;
+using syncable::SYNCER;
+
+VerifyUpdatesCommand::VerifyUpdatesCommand() {}
+VerifyUpdatesCommand::~VerifyUpdatesCommand() {}
+
+std::set<ModelSafeGroup> VerifyUpdatesCommand::GetGroupsToChange(
+ const sessions::SyncSession& session) const {
+ std::set<ModelSafeGroup> groups_with_updates;
+
+ const GetUpdatesResponse& updates =
+ session.status_controller().updates_response().get_updates();
+ for (int i = 0; i < updates.entries().size(); i++) {
+ groups_with_updates.insert(
+ GetGroupForModelType(syncable::GetModelType(updates.entries(i)),
+ session.routing_info()));
+ }
+
+ return groups_with_updates;
+}
+
+SyncerError VerifyUpdatesCommand::ModelChangingExecuteImpl(
+ sessions::SyncSession* session) {
+ DVLOG(1) << "Beginning Update Verification";
+ syncable::Directory* dir = session->context()->directory();
+ WriteTransaction trans(FROM_HERE, SYNCER, dir);
+ sessions::StatusController* status = session->mutable_status_controller();
+ const GetUpdatesResponse& updates = status->updates_response().get_updates();
+ int update_count = updates.entries().size();
+
+ DVLOG(1) << update_count << " entries to verify";
+ for (int i = 0; i < update_count; i++) {
+ const SyncEntity& update =
+ *reinterpret_cast<const SyncEntity *>(&(updates.entries(i)));
+ ModelSafeGroup g = GetGroupForModelType(update.GetModelType(),
+ session->routing_info());
+ if (g != status->group_restriction())
+ continue;
+
+ VerifyUpdateResult result = VerifyUpdate(&trans, update,
+ session->routing_info());
+ status->mutable_update_progress()->AddVerifyResult(result.value, update);
+ status->increment_num_updates_downloaded_by(1);
+ if (update.deleted())
+ status->increment_num_tombstone_updates_downloaded_by(1);
+ }
+
+ return SYNCER_OK;
+}
+
+namespace {
+// In the event that IDs match, but tags differ AttemptReuniteClient tag
+// will have refused to unify the update.
+// We should not attempt to apply it at all since it violates consistency
+// rules.
+VerifyResult VerifyTagConsistency(const SyncEntity& entry,
+ const syncable::MutableEntry& same_id) {
+ if (entry.has_client_defined_unique_tag() &&
+ entry.client_defined_unique_tag() !=
+ same_id.Get(syncable::UNIQUE_CLIENT_TAG)) {
+ return VERIFY_FAIL;
+ }
+ return VERIFY_UNDECIDED;
+}
+} // namespace
+
+VerifyUpdatesCommand::VerifyUpdateResult VerifyUpdatesCommand::VerifyUpdate(
+ syncable::WriteTransaction* trans, const SyncEntity& entry,
+ const ModelSafeRoutingInfo& routes) {
+ syncable::Id id = entry.id();
+ VerifyUpdateResult result = {VERIFY_FAIL, GROUP_PASSIVE};
+
+ const bool deleted = entry.has_deleted() && entry.deleted();
+ const bool is_directory = entry.IsFolder();
+ const syncable::ModelType model_type = entry.GetModelType();
+
+ if (!id.ServerKnows()) {
+ LOG(ERROR) << "Illegal negative id in received updates";
+ return result;
+ }
+ {
+ const std::string name = SyncerProtoUtil::NameFromSyncEntity(entry);
+ if (name.empty() && !deleted) {
+ LOG(ERROR) << "Zero length name in non-deleted update";
+ return result;
+ }
+ }
+
+ syncable::MutableEntry same_id(trans, GET_BY_ID, id);
+ result.value = SyncerUtil::VerifyNewEntry(entry, &same_id, deleted);
+
+ syncable::ModelType placement_type = !deleted ? entry.GetModelType()
+ : same_id.good() ? same_id.GetModelType() : syncable::UNSPECIFIED;
+ result.placement = GetGroupForModelType(placement_type, routes);
+
+ if (VERIFY_UNDECIDED == result.value) {
+ result.value = VerifyTagConsistency(entry, same_id);
+ }
+
+ if (VERIFY_UNDECIDED == result.value) {
+ if (deleted)
+ result.value = VERIFY_SUCCESS;
+ }
+
+ // If we have an existing entry, we check here for updates that break
+ // consistency rules.
+ if (VERIFY_UNDECIDED == result.value) {
+ result.value = SyncerUtil::VerifyUpdateConsistency(trans, entry, &same_id,
+ deleted, is_directory, model_type);
+ }
+
+ if (VERIFY_UNDECIDED == result.value)
+ result.value = VERIFY_SUCCESS; // No news is good news.
+
+ return result; // This might be VERIFY_SUCCESS as well
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/verify_updates_command.h b/sync/engine/verify_updates_command.h
new file mode 100644
index 0000000..37a0b1f
--- /dev/null
+++ b/sync/engine/verify_updates_command.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_
+#define SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_
+#pragma once
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "sync/engine/model_changing_syncer_command.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/engine/syncproto.h"
+
+namespace syncable {
+class WriteTransaction;
+}
+
+namespace browser_sync {
+
+// Verifies the response from a GetUpdates request. All invalid updates will be
+// noted in the SyncSession after this command is executed.
+class VerifyUpdatesCommand : public ModelChangingSyncerCommand {
+ public:
+ VerifyUpdatesCommand();
+ virtual ~VerifyUpdatesCommand();
+
+ protected:
+ // ModelChangingSyncerCommand implementation.
+ virtual std::set<ModelSafeGroup> GetGroupsToChange(
+ const sessions::SyncSession& session) const OVERRIDE;
+ virtual SyncerError ModelChangingExecuteImpl(
+ sessions::SyncSession* session) OVERRIDE;
+
+ private:
+ struct VerifyUpdateResult {
+ VerifyResult value;
+ ModelSafeGroup placement;
+ };
+ VerifyUpdateResult VerifyUpdate(syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ const ModelSafeRoutingInfo& routes);
+ DISALLOW_COPY_AND_ASSIGN(VerifyUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_
diff --git a/sync/engine/verify_updates_command_unittest.cc b/sync/engine/verify_updates_command_unittest.cc
new file mode 100644
index 0000000..20a6cf6
--- /dev/null
+++ b/sync/engine/verify_updates_command_unittest.cc
@@ -0,0 +1,111 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/location.h"
+#include "sync/engine/verify_updates_command.h"
+#include "sync/sessions/session_state.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_id.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/syncer_command_test.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+using sessions::StatusController;
+using std::string;
+using syncable::Entry;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::ReadTransaction;
+using syncable::UNITTEST;
+using syncable::WriteTransaction;
+
+class VerifyUpdatesCommandTest : public SyncerCommandTest {
+ public:
+ virtual void SetUp() {
+ workers()->clear();
+ mutable_routing_info()->clear();
+ workers()->push_back(make_scoped_refptr(new FakeModelWorker(GROUP_DB)));
+ workers()->push_back(make_scoped_refptr(new FakeModelWorker(GROUP_UI)));
+ (*mutable_routing_info())[syncable::PREFERENCES] = GROUP_UI;
+ (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_UI;
+ (*mutable_routing_info())[syncable::AUTOFILL] = GROUP_DB;
+ SyncerCommandTest::SetUp();
+ }
+
+ void CreateLocalItem(const std::string& item_id,
+ const std::string& parent_id,
+ const syncable::ModelType& type) {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ Id::CreateFromServerId(item_id));
+ ASSERT_TRUE(entry.good());
+
+ entry.Put(syncable::BASE_VERSION, 1);
+ entry.Put(syncable::SERVER_VERSION, 1);
+ entry.Put(syncable::NON_UNIQUE_NAME, item_id);
+ entry.Put(syncable::PARENT_ID, Id::CreateFromServerId(parent_id));
+ sync_pb::EntitySpecifics default_specifics;
+ AddDefaultFieldValue(type, &default_specifics);
+ entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
+ }
+
+ void AddUpdate(GetUpdatesResponse* updates,
+ const std::string& id, const std::string& parent,
+ const syncable::ModelType& type) {
+ sync_pb::SyncEntity* e = updates->add_entries();
+ e->set_id_string("b1");
+ e->set_parent_id_string(parent);
+ e->set_non_unique_name("b1");
+ e->set_name("b1");
+ AddDefaultFieldValue(type, e->mutable_specifics());
+ }
+
+ VerifyUpdatesCommand command_;
+
+};
+
+TEST_F(VerifyUpdatesCommandTest, AllVerified) {
+ string root = syncable::GetNullId().GetServerId();
+
+ CreateLocalItem("b1", root, syncable::BOOKMARKS);
+ CreateLocalItem("b2", root, syncable::BOOKMARKS);
+ CreateLocalItem("p1", root, syncable::PREFERENCES);
+ CreateLocalItem("a1", root, syncable::AUTOFILL);
+
+ ExpectNoGroupsToChange(command_);
+
+ GetUpdatesResponse* updates = session()->mutable_status_controller()->
+ mutable_updates_response()->mutable_get_updates();
+ AddUpdate(updates, "b1", root, syncable::BOOKMARKS);
+ AddUpdate(updates, "b2", root, syncable::BOOKMARKS);
+ AddUpdate(updates, "p1", root, syncable::PREFERENCES);
+ AddUpdate(updates, "a1", root, syncable::AUTOFILL);
+
+ ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
+
+ command_.ExecuteImpl(session());
+
+ StatusController* status = session()->mutable_status_controller();
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_UI);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(3, status->update_progress()->VerifiedUpdatesSize());
+ }
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_DB);
+ ASSERT_TRUE(status->update_progress());
+ EXPECT_EQ(1, status->update_progress()->VerifiedUpdatesSize());
+ }
+ {
+ sessions::ScopedModelSafeGroupRestriction r(status, GROUP_PASSIVE);
+ EXPECT_FALSE(status->update_progress());
+ }
+}
+
+}
diff --git a/sync/js/DEPS b/sync/js/DEPS
new file mode 100644
index 0000000..82d1d29
--- /dev/null
+++ b/sync/js/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+sync/util",
+]
diff --git a/sync/js/README.js b/sync/js/README.js
new file mode 100644
index 0000000..0fbfa66
--- /dev/null
+++ b/sync/js/README.js
@@ -0,0 +1,48 @@
+Overview of chrome://sync-internals
+-----------------------------------
+
+This note explains how chrome://sync-internals (also known as
+about:sync) interacts with the sync service/backend.
+
+Basically, chrome://sync-internals sends messages to the sync backend
+and the sync backend sends the reply asynchronously. The sync backend
+also asynchronously raises events which chrome://sync-internals listen
+to.
+
+A message and its reply has a name and a list of arguments, which is
+basically a wrapper around an immutable ListValue.
+
+An event has a name and a details object, which is represented by a
+JsEventDetails (js_event_details.h) object, which is basically a
+wrapper around an immutable DictionaryValue.
+
+Message/event flow
+------------------
+
+chrome://sync-internals is represented by SyncInternalsUI
+(chrome/browser/ui/webui/sync_internals_ui.h). SyncInternalsUI
+interacts with the sync service via a JsController (js_controller.h)
+object, which has a ProcessJsMessage() method that just delegates to
+an underlying JsBackend instance (js_backend.h). The SyncInternalsUI
+object also registers itself (as a JsEventHandler
+[js_event_handler.h]) to the JsController object, and any events
+raised by the JsBackend are propagated to the JsController and then to
+the registered JsEventHandlers.
+
+The ProcessJsMessage() takes a WeakHandle (weak_handle.h) to a
+JsReplyHandler (js_reply_handler.h), which the backend uses to send
+replies safely across threads. SyncInternalsUI implements
+JsReplyHandler, so it simply passes itself as the reply handler when
+it calls ProcessJsMessage() on the JsController.
+
+The following objects live on the UI thread:
+
+- SyncInternalsUI (implements JsEventHandler, JsReplyHandler)
+- SyncJsController (implements JsController, JsEventHandler)
+
+The following objects live on the sync thread:
+
+- SyncManager::SyncInternal (implements JsBackend)
+
+Of course, none of these objects need to know where the other objects
+live, since they interact via WeakHandles.
diff --git a/sync/js/js_arg_list.cc b/sync/js/js_arg_list.cc
new file mode 100644
index 0000000..d8ab8e2
--- /dev/null
+++ b/sync/js/js_arg_list.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/js/js_arg_list.h"
+
+#include "base/json/json_writer.h"
+
+namespace browser_sync {
+
+JsArgList::JsArgList() {}
+
+JsArgList::JsArgList(ListValue* args) : args_(args) {}
+
+JsArgList::~JsArgList() {}
+
+const ListValue& JsArgList::Get() const {
+ return args_.Get();
+}
+
+std::string JsArgList::ToString() const {
+ std::string str;
+ base::JSONWriter::Write(&Get(), false, &str);
+ return str;
+}
+
+} // namespace browser_sync
diff --git a/sync/js/js_arg_list.h b/sync/js/js_arg_list.h
new file mode 100644
index 0000000..aab49d9
--- /dev/null
+++ b/sync/js/js_arg_list.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_JS_JS_ARG_LIST_H_
+#define SYNC_JS_JS_ARG_LIST_H_
+#pragma once
+
+// See README.js for design comments.
+
+#include <string>
+
+#include "base/values.h"
+#include "sync/util/immutable.h"
+
+namespace browser_sync {
+
+// A thin wrapper around Immutable<ListValue>. Used for passing
+// around argument lists to different threads.
+class JsArgList {
+ public:
+ // Uses an empty argument list.
+ JsArgList();
+
+ // Takes over the data in |args|, leaving |args| empty.
+ explicit JsArgList(ListValue* args);
+
+ ~JsArgList();
+
+ const ListValue& Get() const;
+
+ std::string ToString() const;
+
+ // Copy constructor and assignment operator welcome.
+
+ private:
+ typedef Immutable<ListValue, HasSwapMemFnByPtr<ListValue> >
+ ImmutableListValue;
+ ImmutableListValue args_;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_JS_JS_ARG_LIST_H_
diff --git a/sync/js/js_arg_list_unittest.cc b/sync/js/js_arg_list_unittest.cc
new file mode 100644
index 0000000..3f00e4b
--- /dev/null
+++ b/sync/js/js_arg_list_unittest.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/js/js_arg_list.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+class JsArgListTest : public testing::Test {};
+
+TEST_F(JsArgListTest, EmptyList) {
+ JsArgList arg_list;
+ EXPECT_TRUE(arg_list.Get().empty());
+ EXPECT_EQ("[]", arg_list.ToString());
+}
+
+TEST_F(JsArgListTest, FromList) {
+ scoped_ptr<ListValue> list(new ListValue());
+ list->Append(Value::CreateBooleanValue(false));
+ list->Append(Value::CreateIntegerValue(5));
+ DictionaryValue* dict = new DictionaryValue();
+ list->Append(dict);
+ dict->SetString("foo", "bar");
+ dict->Set("baz", new ListValue());
+
+ scoped_ptr<ListValue> list_copy(list->DeepCopy());
+
+ JsArgList arg_list(list.get());
+
+ // |arg_list| should take over |list|'s data.
+ EXPECT_TRUE(list->empty());
+ EXPECT_TRUE(arg_list.Get().Equals(list_copy.get()));
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/js/js_backend.h b/sync/js/js_backend.h
new file mode 100644
index 0000000..3c7f89a
--- /dev/null
+++ b/sync/js/js_backend.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_JS_JS_BACKEND_H_
+#define SYNC_JS_JS_BACKEND_H_
+#pragma once
+
+// See README.js for design comments.
+
+#include <string>
+
+namespace browser_sync {
+
+class JsArgList;
+class JsEventHandler;
+class JsReplyHandler;
+template <typename T> class WeakHandle;
+
+// Interface representing the backend of chrome://sync-internals. A
+// JsBackend can handle messages and can emit events to a
+// JsEventHandler.
+class JsBackend {
+ public:
+ // Starts emitting events to the given handler, if initialized.
+ virtual void SetJsEventHandler(
+ const WeakHandle<JsEventHandler>& event_handler) = 0;
+
+ // Processes the given message and replies via the given handler, if
+ // initialized.
+ virtual void ProcessJsMessage(
+ const std::string& name, const JsArgList& args,
+ const WeakHandle<JsReplyHandler>& reply_handler) = 0;
+
+ protected:
+ virtual ~JsBackend() {}
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_JS_JS_BACKEND_H_
diff --git a/sync/js/js_controller.h b/sync/js/js_controller.h
new file mode 100644
index 0000000..d0deb20
--- /dev/null
+++ b/sync/js/js_controller.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_JS_JS_CONTROLLER_H_
+#define SYNC_JS_JS_CONTROLLER_H_
+#pragma once
+
+// See README.js for design comments.
+
+#include <string>
+
+namespace browser_sync {
+
+class JsArgList;
+class JsEventHandler;
+class JsReplyHandler;
+template <typename T> class WeakHandle;
+
+// An interface for objects that JsEventHandlers directly interact
+// with. JsEventHandlers can add themselves to receive events and
+// also send messages which will eventually reach the backend.
+class JsController {
+ public:
+ // Adds an event handler which will start receiving JS events (not
+ // immediately, so this can be called in the handler's constructor).
+ // Multiple event handlers are supported, but each event handler
+ // must be added at most once.
+ //
+ // Ideally, we'd take WeakPtrs, but we need the raw pointer values
+ // to be able to look them up for removal.
+ virtual void AddJsEventHandler(JsEventHandler* event_handler) = 0;
+
+ // Removes the given event handler if it has been added. It will
+ // immediately stop receiving any JS events.
+ virtual void RemoveJsEventHandler(JsEventHandler* event_handler) = 0;
+
+ // Processes a JS message. The reply (if any) will be sent to
+ // |reply_handler| if it is initialized.
+ virtual void ProcessJsMessage(
+ const std::string& name, const JsArgList& args,
+ const WeakHandle<JsReplyHandler>& reply_handler) = 0;
+
+ protected:
+ virtual ~JsController() {}
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_JS_JS_CONTROLLER_H_
diff --git a/sync/js/js_event_details.cc b/sync/js/js_event_details.cc
new file mode 100644
index 0000000..693ba4c
--- /dev/null
+++ b/sync/js/js_event_details.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/js/js_event_details.h"
+
+#include "base/json/json_writer.h"
+
+namespace browser_sync {
+
+JsEventDetails::JsEventDetails() {}
+
+JsEventDetails::JsEventDetails(DictionaryValue* details) : details_(details) {}
+
+JsEventDetails::~JsEventDetails() {}
+
+const DictionaryValue& JsEventDetails::Get() const {
+ return details_.Get();
+}
+
+std::string JsEventDetails::ToString() const {
+ std::string str;
+ base::JSONWriter::Write(&Get(), false, &str);
+ return str;
+}
+
+} // namespace browser_sync
diff --git a/sync/js/js_event_details.h b/sync/js/js_event_details.h
new file mode 100644
index 0000000..df59851
--- /dev/null
+++ b/sync/js/js_event_details.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_JS_JS_EVENT_DETAILS_H_
+#define SYNC_JS_JS_EVENT_DETAILS_H_
+#pragma once
+
+// See README.js for design comments.
+
+#include <string>
+
+#include "base/values.h"
+#include "sync/util/immutable.h"
+
+namespace browser_sync {
+
+// A thin wrapper around Immutable<DictionaryValue>. Used for passing
+// around event details to different threads.
+class JsEventDetails {
+ public:
+ // Uses an empty dictionary.
+ JsEventDetails();
+
+ // Takes over the data in |details|, leaving |details| empty.
+ explicit JsEventDetails(DictionaryValue* details);
+
+ ~JsEventDetails();
+
+ const DictionaryValue& Get() const;
+
+ std::string ToString() const;
+
+ // Copy constructor and assignment operator welcome.
+
+ private:
+ typedef Immutable<DictionaryValue, HasSwapMemFnByPtr<DictionaryValue> >
+ ImmutableDictionaryValue;
+
+ ImmutableDictionaryValue details_;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_JS_JS_EVENT_DETAILS_H_
diff --git a/sync/js/js_event_details_unittest.cc b/sync/js/js_event_details_unittest.cc
new file mode 100644
index 0000000..01cea19
--- /dev/null
+++ b/sync/js/js_event_details_unittest.cc
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/js/js_event_details.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+class JsEventDetailsTest : public testing::Test {};
+
+TEST_F(JsEventDetailsTest, EmptyList) {
+ JsEventDetails details;
+ EXPECT_TRUE(details.Get().empty());
+ EXPECT_EQ("{}", details.ToString());
+}
+
+TEST_F(JsEventDetailsTest, FromDictionary) {
+ DictionaryValue dict;
+ dict.SetString("foo", "bar");
+ dict.Set("baz", new ListValue());
+
+ scoped_ptr<DictionaryValue> dict_copy(dict.DeepCopy());
+
+ JsEventDetails details(&dict);
+
+ // |details| should take over |dict|'s data.
+ EXPECT_TRUE(dict.empty());
+ EXPECT_TRUE(details.Get().Equals(dict_copy.get()));
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/js/js_event_handler.h b/sync/js/js_event_handler.h
new file mode 100644
index 0000000..ce15903
--- /dev/null
+++ b/sync/js/js_event_handler.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_JS_JS_EVENT_HANDLER_H_
+#define SYNC_JS_JS_EVENT_HANDLER_H_
+#pragma once
+
+// See README.js for design comments.
+
+#include <string>
+
+namespace browser_sync {
+
+class JsEventDetails;
+
+// An interface for objects that handle Javascript events (e.g.,
+// WebUIs).
+class JsEventHandler {
+ public:
+ virtual void HandleJsEvent(
+ const std::string& name, const JsEventDetails& details) = 0;
+
+ protected:
+ virtual ~JsEventHandler() {}
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_JS_JS_EVENT_HANDLER_H_
diff --git a/sync/js/js_reply_handler.h b/sync/js/js_reply_handler.h
new file mode 100644
index 0000000..3b10309
--- /dev/null
+++ b/sync/js/js_reply_handler.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_JS_JS_REPLY_HANDLER_H_
+#define SYNC_JS_JS_REPLY_HANDLER_H_
+#pragma once
+
+// See README.js for design comments.
+
+#include <string>
+
+namespace browser_sync {
+
+class JsArgList;
+
+// An interface for objects that handle Javascript message replies
+// (e.g., WebUIs).
+class JsReplyHandler {
+ public:
+ virtual void HandleJsReply(
+ const std::string& name, const JsArgList& args) = 0;
+
+ protected:
+ virtual ~JsReplyHandler() {}
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_JS_JS_REPLY_HANDLER_H_
diff --git a/sync/js/js_test_util.cc b/sync/js/js_test_util.cc
new file mode 100644
index 0000000..257a947
--- /dev/null
+++ b/sync/js/js_test_util.cc
@@ -0,0 +1,137 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/js/js_test_util.h"
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "sync/js/js_arg_list.h"
+#include "sync/js/js_event_details.h"
+
+namespace browser_sync {
+
+void PrintTo(const JsArgList& args, ::std::ostream* os) {
+ *os << args.ToString();
+}
+
+void PrintTo(const JsEventDetails& details, ::std::ostream* os) {
+ *os << details.ToString();
+}
+
+namespace {
+
+// Matcher implementation for HasArgs().
+class HasArgsMatcher
+ : public ::testing::MatcherInterface<const JsArgList&> {
+ public:
+ explicit HasArgsMatcher(const JsArgList& expected_args)
+ : expected_args_(expected_args) {}
+
+ virtual ~HasArgsMatcher() {}
+
+ virtual bool MatchAndExplain(
+ const JsArgList& args,
+ ::testing::MatchResultListener* listener) const {
+ // No need to annotate listener since we already define PrintTo().
+ return args.Get().Equals(&expected_args_.Get());
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "has args " << expected_args_.ToString();
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't have args " << expected_args_.ToString();
+ }
+
+ private:
+ const JsArgList expected_args_;
+
+ DISALLOW_COPY_AND_ASSIGN(HasArgsMatcher);
+};
+
+// Matcher implementation for HasDetails().
+class HasDetailsMatcher
+ : public ::testing::MatcherInterface<const JsEventDetails&> {
+ public:
+ explicit HasDetailsMatcher(const JsEventDetails& expected_details)
+ : expected_details_(expected_details) {}
+
+ virtual ~HasDetailsMatcher() {}
+
+ virtual bool MatchAndExplain(
+ const JsEventDetails& details,
+ ::testing::MatchResultListener* listener) const {
+ // No need to annotate listener since we already define PrintTo().
+ return details.Get().Equals(&expected_details_.Get());
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "has details " << expected_details_.ToString();
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't have details " << expected_details_.ToString();
+ }
+
+ private:
+ const JsEventDetails expected_details_;
+
+ DISALLOW_COPY_AND_ASSIGN(HasDetailsMatcher);
+};
+
+} // namespace
+
+::testing::Matcher<const JsArgList&> HasArgs(const JsArgList& expected_args) {
+ return ::testing::MakeMatcher(new HasArgsMatcher(expected_args));
+}
+
+::testing::Matcher<const JsArgList&> HasArgsAsList(
+ const ListValue& expected_args) {
+ scoped_ptr<ListValue> expected_args_copy(expected_args.DeepCopy());
+ return HasArgs(JsArgList(expected_args_copy.get()));
+}
+
+::testing::Matcher<const JsEventDetails&> HasDetails(
+ const JsEventDetails& expected_details) {
+ return ::testing::MakeMatcher(new HasDetailsMatcher(expected_details));
+}
+
+::testing::Matcher<const JsEventDetails&> HasDetailsAsDictionary(
+ const DictionaryValue& expected_details) {
+ scoped_ptr<DictionaryValue> expected_details_copy(
+ expected_details.DeepCopy());
+ return HasDetails(JsEventDetails(expected_details_copy.get()));
+}
+
+MockJsBackend::MockJsBackend() {}
+
+MockJsBackend::~MockJsBackend() {}
+
+WeakHandle<JsBackend> MockJsBackend::AsWeakHandle() {
+ return MakeWeakHandle(AsWeakPtr());
+}
+
+MockJsController::MockJsController() {}
+
+MockJsController::~MockJsController() {}
+
+MockJsEventHandler::MockJsEventHandler() {}
+
+WeakHandle<JsEventHandler> MockJsEventHandler::AsWeakHandle() {
+ return MakeWeakHandle(AsWeakPtr());
+}
+
+MockJsEventHandler::~MockJsEventHandler() {}
+
+MockJsReplyHandler::MockJsReplyHandler() {}
+
+MockJsReplyHandler::~MockJsReplyHandler() {}
+
+WeakHandle<JsReplyHandler> MockJsReplyHandler::AsWeakHandle() {
+ return MakeWeakHandle(AsWeakPtr());
+}
+
+} // namespace browser_sync
+
diff --git a/sync/js/js_test_util.h b/sync/js/js_test_util.h
new file mode 100644
index 0000000..ef6bec7
--- /dev/null
+++ b/sync/js/js_test_util.h
@@ -0,0 +1,109 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_JS_JS_TEST_UTIL_H_
+#define SYNC_JS_JS_TEST_UTIL_H_
+#pragma once
+
+#include <ostream>
+#include <string>
+
+#include "base/memory/weak_ptr.h"
+#include "sync/js/js_backend.h"
+#include "sync/js/js_controller.h"
+#include "sync/js/js_event_handler.h"
+#include "sync/js/js_reply_handler.h"
+#include "sync/util/weak_handle.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+class DictionaryValue;
+class ListValue;
+}
+
+namespace browser_sync {
+
+class JsArgList;
+class JsEventDetails;
+
+// Defined for googletest. Equivalent to "*os << args.ToString()".
+void PrintTo(const JsArgList& args, ::std::ostream* os);
+void PrintTo(const JsEventDetails& details, ::std::ostream* os);
+
+// A gmock matcher for JsArgList. Use like:
+//
+// EXPECT_CALL(mock, HandleJsReply("foo", HasArgs(expected_args)));
+::testing::Matcher<const JsArgList&> HasArgs(const JsArgList& expected_args);
+
+// Like HasArgs() but takes a ListValue instead.
+::testing::Matcher<const JsArgList&> HasArgsAsList(
+ const base::ListValue& expected_args);
+
+// A gmock matcher for JsEventDetails. Use like:
+//
+// EXPECT_CALL(mock, HandleJsEvent("foo", HasArgs(expected_details)));
+::testing::Matcher<const JsEventDetails&> HasDetails(
+ const JsEventDetails& expected_details);
+
+// Like HasDetails() but takes a DictionaryValue instead.
+::testing::Matcher<const JsEventDetails&> HasDetailsAsDictionary(
+ const base::DictionaryValue& expected_details);
+
+// Mocks.
+
+class MockJsBackend : public JsBackend,
+ public base::SupportsWeakPtr<MockJsBackend> {
+ public:
+ MockJsBackend();
+ virtual ~MockJsBackend();
+
+ WeakHandle<JsBackend> AsWeakHandle();
+
+ MOCK_METHOD1(SetJsEventHandler, void(const WeakHandle<JsEventHandler>&));
+ MOCK_METHOD3(ProcessJsMessage, void(const ::std::string&, const JsArgList&,
+ const WeakHandle<JsReplyHandler>&));
+};
+
+class MockJsController : public JsController,
+ public base::SupportsWeakPtr<MockJsController> {
+ public:
+ MockJsController();
+ virtual ~MockJsController();
+
+ MOCK_METHOD1(AddJsEventHandler, void(JsEventHandler*));
+ MOCK_METHOD1(RemoveJsEventHandler, void(JsEventHandler*));
+ MOCK_METHOD3(ProcessJsMessage,
+ void(const ::std::string&, const JsArgList&,
+ const WeakHandle<JsReplyHandler>&));
+};
+
+class MockJsEventHandler
+ : public JsEventHandler,
+ public base::SupportsWeakPtr<MockJsEventHandler> {
+ public:
+ MockJsEventHandler();
+ virtual ~MockJsEventHandler();
+
+ WeakHandle<JsEventHandler> AsWeakHandle();
+
+ MOCK_METHOD2(HandleJsEvent,
+ void(const ::std::string&, const JsEventDetails&));
+};
+
+class MockJsReplyHandler
+ : public JsReplyHandler,
+ public base::SupportsWeakPtr<MockJsReplyHandler> {
+ public:
+ MockJsReplyHandler();
+ virtual ~MockJsReplyHandler();
+
+ WeakHandle<JsReplyHandler> AsWeakHandle();
+
+ MOCK_METHOD2(HandleJsReply,
+ void(const ::std::string&, const JsArgList&));
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_JS_JS_TEST_UTIL_H_
diff --git a/sync/js/sync_js_controller.cc b/sync/js/sync_js_controller.cc
new file mode 100644
index 0000000..0448486
--- /dev/null
+++ b/sync/js/sync_js_controller.cc
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/js/sync_js_controller.h"
+
+#include "base/location.h"
+#include "sync/js/js_backend.h"
+#include "sync/js/js_event_details.h"
+
+namespace browser_sync {
+
+SyncJsController::PendingJsMessage::PendingJsMessage(
+ const std::string& name, const JsArgList& args,
+ const WeakHandle<JsReplyHandler>& reply_handler)
+ : name(name), args(args), reply_handler(reply_handler) {}
+
+SyncJsController::PendingJsMessage::~PendingJsMessage() {}
+
+SyncJsController::SyncJsController() {}
+
+SyncJsController::~SyncJsController() {
+ AttachJsBackend(WeakHandle<JsBackend>());
+}
+
+void SyncJsController::AddJsEventHandler(JsEventHandler* event_handler) {
+ js_event_handlers_.AddObserver(event_handler);
+ UpdateBackendEventHandler();
+}
+
+void SyncJsController::RemoveJsEventHandler(JsEventHandler* event_handler) {
+ js_event_handlers_.RemoveObserver(event_handler);
+ UpdateBackendEventHandler();
+}
+
+void SyncJsController::AttachJsBackend(
+ const WeakHandle<JsBackend>& js_backend) {
+ js_backend_ = js_backend;
+ UpdateBackendEventHandler();
+
+ if (js_backend_.IsInitialized()) {
+ // Process any queued messages.
+ for (PendingJsMessageList::const_iterator it =
+ pending_js_messages_.begin();
+ it != pending_js_messages_.end(); ++it) {
+ js_backend_.Call(FROM_HERE, &JsBackend::ProcessJsMessage,
+ it->name, it->args, it->reply_handler);
+ }
+ }
+}
+
+void SyncJsController::ProcessJsMessage(
+ const std::string& name, const JsArgList& args,
+ const WeakHandle<JsReplyHandler>& reply_handler) {
+ if (js_backend_.IsInitialized()) {
+ js_backend_.Call(FROM_HERE, &JsBackend::ProcessJsMessage,
+ name, args, reply_handler);
+ } else {
+ pending_js_messages_.push_back(
+ PendingJsMessage(name, args, reply_handler));
+ }
+}
+
+void SyncJsController::HandleJsEvent(const std::string& name,
+ const JsEventDetails& details) {
+ FOR_EACH_OBSERVER(JsEventHandler, js_event_handlers_,
+ HandleJsEvent(name, details));
+}
+
+void SyncJsController::UpdateBackendEventHandler() {
+ if (js_backend_.IsInitialized()) {
+ // To avoid making the backend send useless events, we clear the
+ // event handler we pass to it if we don't have any event
+ // handlers.
+ WeakHandle<JsEventHandler> backend_event_handler =
+ (js_event_handlers_.size() > 0) ?
+ MakeWeakHandle(AsWeakPtr()) : WeakHandle<SyncJsController>();
+ js_backend_.Call(FROM_HERE, &JsBackend::SetJsEventHandler,
+ backend_event_handler);
+ }
+}
+
+} // namespace browser_sync
diff --git a/sync/js/sync_js_controller.h b/sync/js/sync_js_controller.h
new file mode 100644
index 0000000..6e8f100
--- /dev/null
+++ b/sync/js/sync_js_controller.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_JS_SYNC_JS_CONTROLLER_H_
+#define SYNC_JS_SYNC_JS_CONTROLLER_H_
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/memory/weak_ptr.h"
+#include "base/observer_list.h"
+#include "sync/js/js_arg_list.h"
+#include "sync/js/js_controller.h"
+#include "sync/js/js_event_handler.h"
+#include "sync/util/weak_handle.h"
+
+namespace browser_sync {
+
+class JsBackend;
+
+// A class that mediates between the sync JsEventHandlers and the sync
+// JsBackend.
+class SyncJsController
+ : public JsController, public JsEventHandler,
+ public base::SupportsWeakPtr<SyncJsController> {
+ public:
+ SyncJsController();
+
+ virtual ~SyncJsController();
+
+ // Sets the backend to route all messages to (if initialized).
+ // Sends any queued-up messages if |backend| is initialized.
+ void AttachJsBackend(const WeakHandle<JsBackend>& js_backend);
+
+ // JsController implementation.
+ virtual void AddJsEventHandler(JsEventHandler* event_handler) OVERRIDE;
+ virtual void RemoveJsEventHandler(JsEventHandler* event_handler) OVERRIDE;
+ // Queues up any messages that are sent when there is no attached
+ // initialized backend.
+ virtual void ProcessJsMessage(
+ const std::string& name, const JsArgList& args,
+ const WeakHandle<JsReplyHandler>& reply_handler) OVERRIDE;
+
+ // JsEventHandler implementation.
+ virtual void HandleJsEvent(const std::string& name,
+ const JsEventDetails& details) OVERRIDE;
+
+ private:
+ // A struct used to hold the arguments to ProcessJsMessage() for
+ // future invocation.
+ struct PendingJsMessage {
+ std::string name;
+ JsArgList args;
+ WeakHandle<JsReplyHandler> reply_handler;
+
+ PendingJsMessage(const std::string& name, const JsArgList& args,
+ const WeakHandle<JsReplyHandler>& reply_handler);
+
+ ~PendingJsMessage();
+ };
+
+ typedef std::vector<PendingJsMessage> PendingJsMessageList;
+
+ // Sets |js_backend_|'s event handler depending on how many
+ // underlying event handlers we have.
+ void UpdateBackendEventHandler();
+
+ WeakHandle<JsBackend> js_backend_;
+ ObserverList<JsEventHandler> js_event_handlers_;
+ PendingJsMessageList pending_js_messages_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncJsController);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_JS_SYNC_JS_CONTROLLER_H_
diff --git a/sync/js/sync_js_controller_unittest.cc b/sync/js/sync_js_controller_unittest.cc
new file mode 100644
index 0000000..10a982d
--- /dev/null
+++ b/sync/js/sync_js_controller_unittest.cc
@@ -0,0 +1,126 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/js/sync_js_controller.h"
+
+#include "base/message_loop.h"
+#include "base/values.h"
+#include "sync/js/js_arg_list.h"
+#include "sync/js/js_event_details.h"
+#include "sync/js/js_test_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Mock;
+using ::testing::StrictMock;
+
+class SyncJsControllerTest : public testing::Test {
+ protected:
+ void PumpLoop() {
+ message_loop_.RunAllPending();
+ }
+
+ private:
+ MessageLoop message_loop_;
+};
+
+TEST_F(SyncJsControllerTest, Messages) {
+ InSequence dummy;
+ // |mock_backend| needs to outlive |sync_js_controller|.
+ StrictMock<MockJsBackend> mock_backend;
+ SyncJsController sync_js_controller;
+
+ ListValue arg_list1, arg_list2;
+ arg_list1.Append(Value::CreateBooleanValue(false));
+ arg_list2.Append(Value::CreateIntegerValue(5));
+ JsArgList args1(&arg_list1), args2(&arg_list2);
+
+ // TODO(akalin): Write matchers for WeakHandle and use them here
+ // instead of _.
+ EXPECT_CALL(mock_backend, SetJsEventHandler(_));
+ EXPECT_CALL(mock_backend, ProcessJsMessage("test1", HasArgs(args2), _));
+ EXPECT_CALL(mock_backend, ProcessJsMessage("test2", HasArgs(args1), _));
+
+ sync_js_controller.AttachJsBackend(mock_backend.AsWeakHandle());
+ sync_js_controller.ProcessJsMessage("test1", args2,
+ WeakHandle<JsReplyHandler>());
+ sync_js_controller.ProcessJsMessage("test2", args1,
+ WeakHandle<JsReplyHandler>());
+ PumpLoop();
+
+ // Let destructor of |sync_js_controller| call RemoveBackend().
+}
+
+TEST_F(SyncJsControllerTest, QueuedMessages) {
+ // |mock_backend| needs to outlive |sync_js_controller|.
+ StrictMock<MockJsBackend> mock_backend;
+ SyncJsController sync_js_controller;
+
+ ListValue arg_list1, arg_list2;
+ arg_list1.Append(Value::CreateBooleanValue(false));
+ arg_list2.Append(Value::CreateIntegerValue(5));
+ JsArgList args1(&arg_list1), args2(&arg_list2);
+
+ // Should queue messages.
+ sync_js_controller.ProcessJsMessage("test1", args2,
+ WeakHandle<JsReplyHandler>());
+ sync_js_controller.ProcessJsMessage("test2", args1,
+ WeakHandle<JsReplyHandler>());
+
+ Mock::VerifyAndClearExpectations(&mock_backend);
+
+ // TODO(akalin): Write matchers for WeakHandle and use them here
+ // instead of _.
+ EXPECT_CALL(mock_backend, SetJsEventHandler(_));
+ EXPECT_CALL(mock_backend, ProcessJsMessage("test1", HasArgs(args2), _));
+ EXPECT_CALL(mock_backend, ProcessJsMessage("test2", HasArgs(args1), _));
+
+ // Should call the queued messages.
+ sync_js_controller.AttachJsBackend(mock_backend.AsWeakHandle());
+ PumpLoop();
+
+ // Should do nothing.
+ sync_js_controller.AttachJsBackend(WeakHandle<JsBackend>());
+ PumpLoop();
+
+ // Should also do nothing.
+ sync_js_controller.AttachJsBackend(WeakHandle<JsBackend>());
+ PumpLoop();
+}
+
+TEST_F(SyncJsControllerTest, Events) {
+ InSequence dummy;
+ SyncJsController sync_js_controller;
+
+ DictionaryValue details_dict1, details_dict2;
+ details_dict1.SetString("foo", "bar");
+ details_dict2.SetInteger("baz", 5);
+ JsEventDetails details1(&details_dict1), details2(&details_dict2);
+
+ StrictMock<MockJsEventHandler> event_handler1, event_handler2;
+ EXPECT_CALL(event_handler1, HandleJsEvent("event", HasDetails(details1)));
+ EXPECT_CALL(event_handler2, HandleJsEvent("event", HasDetails(details1)));
+ EXPECT_CALL(event_handler1,
+ HandleJsEvent("anotherevent", HasDetails(details2)));
+ EXPECT_CALL(event_handler2,
+ HandleJsEvent("anotherevent", HasDetails(details2)));
+
+ sync_js_controller.AddJsEventHandler(&event_handler1);
+ sync_js_controller.AddJsEventHandler(&event_handler2);
+ sync_js_controller.HandleJsEvent("event", details1);
+ sync_js_controller.HandleJsEvent("anotherevent", details2);
+ sync_js_controller.RemoveJsEventHandler(&event_handler1);
+ sync_js_controller.RemoveJsEventHandler(&event_handler2);
+ sync_js_controller.HandleJsEvent("droppedevent", details2);
+
+ PumpLoop();
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/protocol/DEPS b/sync/protocol/DEPS
new file mode 100644
index 0000000..a21ff1a
--- /dev/null
+++ b/sync/protocol/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+sync/syncable/model_type.h",
+]
diff --git a/sync/protocol/proto_enum_conversions.cc b/sync/protocol/proto_enum_conversions.cc
new file mode 100644
index 0000000..47a0016
--- /dev/null
+++ b/sync/protocol/proto_enum_conversions.cc
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Keep this file in sync with the .proto files in this directory.
+
+#include "sync/protocol/proto_enum_conversions.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+namespace browser_sync {
+
+#define ASSERT_ENUM_BOUNDS(enum_parent, enum_type, enum_min, enum_max) \
+ COMPILE_ASSERT(enum_parent::enum_type##_MIN == enum_parent::enum_min, \
+ enum_type##_MIN_not_##enum_min); \
+ COMPILE_ASSERT(enum_parent::enum_type##_MAX == enum_parent::enum_max, \
+ enum_type##_MAX_not_##enum_max);
+
+#define ENUM_CASE(enum_parent, enum_value) \
+ case enum_parent::enum_value: return #enum_value
+
+const char* GetBrowserTypeString(
+ sync_pb::SessionWindow::BrowserType browser_type) {
+ ASSERT_ENUM_BOUNDS(sync_pb::SessionWindow, BrowserType,
+ TYPE_TABBED, TYPE_POPUP);
+ switch (browser_type) {
+ ENUM_CASE(sync_pb::SessionWindow, TYPE_TABBED);
+ ENUM_CASE(sync_pb::SessionWindow, TYPE_POPUP);
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetPageTransitionString(
+ sync_pb::TabNavigation::PageTransition page_transition) {
+ ASSERT_ENUM_BOUNDS(sync_pb::TabNavigation, PageTransition,
+ LINK, CHAIN_END);
+ switch (page_transition) {
+ ENUM_CASE(sync_pb::TabNavigation, LINK);
+ ENUM_CASE(sync_pb::TabNavigation, TYPED);
+ ENUM_CASE(sync_pb::TabNavigation, AUTO_BOOKMARK);
+ ENUM_CASE(sync_pb::TabNavigation, AUTO_SUBFRAME);
+ ENUM_CASE(sync_pb::TabNavigation, MANUAL_SUBFRAME);
+ ENUM_CASE(sync_pb::TabNavigation, GENERATED);
+ ENUM_CASE(sync_pb::TabNavigation, START_PAGE);
+ ENUM_CASE(sync_pb::TabNavigation, FORM_SUBMIT);
+ ENUM_CASE(sync_pb::TabNavigation, RELOAD);
+ ENUM_CASE(sync_pb::TabNavigation, KEYWORD);
+ ENUM_CASE(sync_pb::TabNavigation, KEYWORD_GENERATED);
+ ENUM_CASE(sync_pb::TabNavigation, CHAIN_START);
+ ENUM_CASE(sync_pb::TabNavigation, CHAIN_END);
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetPageTransitionQualifierString(
+ sync_pb::TabNavigation::PageTransitionQualifier
+ page_transition_qualifier) {
+ ASSERT_ENUM_BOUNDS(sync_pb::TabNavigation, PageTransitionQualifier,
+ CLIENT_REDIRECT, SERVER_REDIRECT);
+ switch (page_transition_qualifier) {
+ ENUM_CASE(sync_pb::TabNavigation, CLIENT_REDIRECT);
+ ENUM_CASE(sync_pb::TabNavigation, SERVER_REDIRECT);
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetUpdatesSourceString(
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source) {
+ ASSERT_ENUM_BOUNDS(sync_pb::GetUpdatesCallerInfo, GetUpdatesSource,
+ UNKNOWN, DATATYPE_REFRESH);
+ switch (updates_source) {
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, UNKNOWN);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, FIRST_UPDATE);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, LOCAL);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, NOTIFICATION);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, PERIODIC);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, SYNC_CYCLE_CONTINUATION);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, CLEAR_PRIVATE_DATA);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, NEWLY_SUPPORTED_DATATYPE);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, MIGRATION);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, NEW_CLIENT);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, RECONFIGURATION);
+ ENUM_CASE(sync_pb::GetUpdatesCallerInfo, DATATYPE_REFRESH);
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetDeviceTypeString(
+ sync_pb::SessionHeader::DeviceType device_type) {
+ ASSERT_ENUM_BOUNDS(sync_pb::SessionHeader, DeviceType, TYPE_WIN, TYPE_TABLET);
+ switch (device_type) {
+ ENUM_CASE(sync_pb::SessionHeader, TYPE_WIN);
+ ENUM_CASE(sync_pb::SessionHeader, TYPE_MAC);
+ ENUM_CASE(sync_pb::SessionHeader, TYPE_LINUX);
+ ENUM_CASE(sync_pb::SessionHeader, TYPE_CROS);
+ ENUM_CASE(sync_pb::SessionHeader, TYPE_OTHER);
+ ENUM_CASE(sync_pb::SessionHeader, TYPE_PHONE);
+ ENUM_CASE(sync_pb::SessionHeader, TYPE_TABLET);
+ }
+ NOTREACHED();
+ return "";
+}
+
+#undef ASSERT_ENUM_BOUNDS
+#undef ENUM_CASE
+
+} // namespace
diff --git a/sync/protocol/proto_enum_conversions.h b/sync/protocol/proto_enum_conversions.h
new file mode 100644
index 0000000..fb8d44b
--- /dev/null
+++ b/sync/protocol/proto_enum_conversions.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_PROTOCOL_PROTO_ENUM_CONVERSIONS_H_
+#define SYNC_PROTOCOL_PROTO_ENUM_CONVERSIONS_H_
+#pragma once
+
+// Keep this file in sync with the .proto files in this directory.
+
+#include "sync/protocol/session_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+
+// Utility functions to get the string equivalent for some sync proto
+// enums.
+
+namespace browser_sync {
+
+// The returned strings (which don't have to be freed) are in ASCII.
+// The result of passing in an invalid enum value is undefined.
+
+const char* GetBrowserTypeString(
+ sync_pb::SessionWindow::BrowserType browser_type);
+
+const char* GetPageTransitionString(
+ sync_pb::TabNavigation::PageTransition page_transition);
+
+const char* GetPageTransitionQualifierString(
+ sync_pb::TabNavigation::PageTransitionQualifier
+ page_transition_qualifier);
+
+const char* GetUpdatesSourceString(
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source);
+
+const char* GetDeviceTypeString(
+ sync_pb::SessionHeader::DeviceType device_type);
+
+} // namespace browser_sync
+
+#endif // SYNC_PROTOCOL_PROTO_ENUM_CONVERSIONS_H_
diff --git a/sync/protocol/proto_enum_conversions_unittest.cc b/sync/protocol/proto_enum_conversions_unittest.cc
new file mode 100644
index 0000000..2445a30
--- /dev/null
+++ b/sync/protocol/proto_enum_conversions_unittest.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Keep this file in sync with the .proto files in this directory.
+
+#include "sync/protocol/proto_enum_conversions.h"
+
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+class ProtoEnumConversionsTest : public testing::Test {
+};
+
+template <class T>
+void TestEnumStringFunction(const char* (*enum_string_fn)(T),
+ int enum_min, int enum_max) {
+ for (int i = enum_min; i <= enum_max; ++i) {
+ const std::string& str = enum_string_fn(static_cast<T>(i));
+ EXPECT_FALSE(str.empty());
+ }
+}
+
+TEST_F(ProtoEnumConversionsTest, GetBrowserTypeString) {
+ TestEnumStringFunction(
+ GetBrowserTypeString,
+ sync_pb::SessionWindow::BrowserType_MIN,
+ sync_pb::SessionWindow::BrowserType_MAX);
+}
+
+TEST_F(ProtoEnumConversionsTest, GetPageTransitionString) {
+ // We have a gap, so we need to do two ranges.
+ TestEnumStringFunction(
+ GetPageTransitionString,
+ sync_pb::TabNavigation::PageTransition_MIN,
+ sync_pb::TabNavigation::KEYWORD_GENERATED);
+ TestEnumStringFunction(
+ GetPageTransitionString,
+ sync_pb::TabNavigation::CHAIN_START,
+ sync_pb::TabNavigation::PageTransition_MAX);
+}
+
+TEST_F(ProtoEnumConversionsTest, GetPageTransitionQualifierString) {
+ TestEnumStringFunction(
+ GetPageTransitionQualifierString,
+ sync_pb::TabNavigation::PageTransitionQualifier_MIN,
+ sync_pb::TabNavigation::PageTransitionQualifier_MAX);
+}
+
+TEST_F(ProtoEnumConversionsTest, GetUpdatesSourceString) {
+ TestEnumStringFunction(
+ GetUpdatesSourceString,
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource_MIN,
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource_MAX);
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/protocol/proto_value_conversions.cc b/sync/protocol/proto_value_conversions.cc
new file mode 100644
index 0000000..009c030
--- /dev/null
+++ b/sync/protocol/proto_value_conversions.cc
@@ -0,0 +1,413 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Keep this file in sync with the .proto files in this directory.
+
+#include "sync/protocol/proto_value_conversions.h"
+
+#include "base/base64.h"
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/string_number_conversions.h"
+#include "base/values.h"
+#include "sync/protocol/app_notification_specifics.pb.h"
+#include "sync/protocol/app_setting_specifics.pb.h"
+#include "sync/protocol/app_specifics.pb.h"
+#include "sync/protocol/autofill_specifics.pb.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/encryption.pb.h"
+#include "sync/protocol/extension_setting_specifics.pb.h"
+#include "sync/protocol/extension_specifics.pb.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/protocol/password_specifics.pb.h"
+#include "sync/protocol/preference_specifics.pb.h"
+#include "sync/protocol/proto_enum_conversions.h"
+#include "sync/protocol/search_engine_specifics.pb.h"
+#include "sync/protocol/session_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/protocol/theme_specifics.pb.h"
+#include "sync/protocol/typed_url_specifics.pb.h"
+
+namespace browser_sync {
+
+namespace {
+
+// Basic Type -> Value functions.
+
+StringValue* MakeInt64Value(int64 x) {
+ return Value::CreateStringValue(base::Int64ToString(x));
+}
+
+// TODO(akalin): Perhaps make JSONWriter support BinaryValue and use
+// that instead of a StringValue.
+StringValue* MakeBytesValue(const std::string& bytes) {
+ std::string bytes_base64;
+ if (!base::Base64Encode(bytes, &bytes_base64)) {
+ NOTREACHED();
+ }
+ return Value::CreateStringValue(bytes_base64);
+}
+
+// T is the enum type.
+template <class T>
+StringValue* MakeEnumValue(T t, const char* (*converter_fn)(T)) {
+ return Value::CreateStringValue(converter_fn(t));
+}
+
+// T is the field type, F is either RepeatedField or RepeatedPtrField,
+// and V is a subclass of Value.
+template <class T, class F, class V>
+ListValue* MakeRepeatedValue(const F& fields, V* (*converter_fn)(T)) {
+ ListValue* list = new ListValue();
+ for (typename F::const_iterator it = fields.begin(); it != fields.end();
+ ++it) {
+ list->Append(converter_fn(*it));
+ }
+ return list;
+}
+
+} // namespace
+
+// Helper macros to reduce the amount of boilerplate.
+
+#define SET(field, fn) value->Set(#field, fn(proto.field()))
+#define SET_REP(field, fn) \
+ value->Set(#field, MakeRepeatedValue(proto.field(), fn))
+#define SET_ENUM(field, fn) \
+ value->Set(#field, MakeEnumValue(proto.field(), fn))
+
+#define SET_BOOL(field) SET(field, Value::CreateBooleanValue)
+#define SET_BYTES(field) SET(field, MakeBytesValue)
+#define SET_INT32(field) SET(field, MakeInt64Value)
+#define SET_INT32_REP(field) SET_REP(field, MakeInt64Value)
+#define SET_INT64(field) SET(field, MakeInt64Value)
+#define SET_INT64_REP(field) SET_REP(field, MakeInt64Value)
+#define SET_STR(field) SET(field, Value::CreateStringValue)
+#define SET_STR_REP(field) \
+ value->Set(#field, \
+ MakeRepeatedValue<const std::string&, \
+ google::protobuf::RepeatedPtrField< \
+ std::string >, \
+ StringValue>(proto.field(), \
+ Value::CreateStringValue))
+
+#define SET_FIELD(field, fn) \
+ do { \
+ if (specifics.has_##field()) { \
+ value->Set(#field, fn(specifics.field())); \
+ } \
+ } while (0)
+
+// If you add another macro, don't forget to add an #undef at the end
+// of this file, too.
+
+DictionaryValue* EncryptedDataToValue(const sync_pb::EncryptedData& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(key_name);
+ // TODO(akalin): Shouldn't blob be of type bytes instead of string?
+ SET_BYTES(blob);
+ return value;
+}
+
+DictionaryValue* AppSettingsToValue(
+ const sync_pb::AppNotificationSettings& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_BOOL(initial_setup_done);
+ SET_BOOL(disabled);
+ SET_STR(oauth_client_id);
+ return value;
+}
+
+DictionaryValue* SessionHeaderToValue(
+ const sync_pb::SessionHeader& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_REP(window, SessionWindowToValue);
+ SET_STR(client_name);
+ SET_ENUM(device_type, GetDeviceTypeString);
+ return value;
+}
+
+DictionaryValue* SessionTabToValue(
+ const sync_pb::SessionTab& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_INT32(tab_id);
+ SET_INT32(window_id);
+ SET_INT32(tab_visual_index);
+ SET_INT32(current_navigation_index);
+ SET_BOOL(pinned);
+ SET_STR(extension_app_id);
+ SET_REP(navigation, TabNavigationToValue);
+ return value;
+}
+
+DictionaryValue* SessionWindowToValue(
+ const sync_pb::SessionWindow& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_INT32(window_id);
+ SET_INT32(selected_tab_index);
+ SET_INT32_REP(tab);
+ SET_ENUM(browser_type, GetBrowserTypeString);
+ return value;
+}
+
+DictionaryValue* TabNavigationToValue(
+ const sync_pb::TabNavigation& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_INT32(index);
+ SET_STR(virtual_url);
+ SET_STR(referrer);
+ SET_STR(title);
+ SET_STR(state);
+ SET_ENUM(page_transition, GetPageTransitionString);
+ SET_ENUM(navigation_qualifier, GetPageTransitionQualifierString);
+ return value;
+}
+
+DictionaryValue* PasswordSpecificsDataToValue(
+ const sync_pb::PasswordSpecificsData& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_INT32(scheme);
+ SET_STR(signon_realm);
+ SET_STR(origin);
+ SET_STR(action);
+ SET_STR(username_element);
+ SET_STR(username_value);
+ SET_STR(password_element);
+ value->SetString("password_value", "<redacted>");
+ SET_BOOL(ssl_valid);
+ SET_BOOL(preferred);
+ SET_INT64(date_created);
+ SET_BOOL(blacklisted);
+ return value;
+}
+
+DictionaryValue* DeviceInformationToValue(
+ const sync_pb::DeviceInformation& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(cache_guid);
+ SET_STR(name);
+ SET_STR(platform);
+ SET_STR(chrome_version);
+ return value;
+}
+
+DictionaryValue* AppNotificationToValue(
+ const sync_pb::AppNotification& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(guid);
+ SET_STR(app_id);
+ SET_INT64(creation_timestamp_ms);
+ SET_STR(title);
+ SET_STR(body_text);
+ SET_STR(link_url);
+ SET_STR(link_text);
+ return value;
+}
+
+DictionaryValue* AppSettingSpecificsToValue(
+ const sync_pb::AppSettingSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET(extension_setting, ExtensionSettingSpecificsToValue);
+ return value;
+}
+
+DictionaryValue* AppSpecificsToValue(
+ const sync_pb::AppSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET(extension, ExtensionSpecificsToValue);
+ SET(notification_settings, AppSettingsToValue);
+ SET_STR(app_launch_ordinal);
+ SET_STR(page_ordinal);
+
+ return value;
+}
+
+DictionaryValue* AutofillSpecificsToValue(
+ const sync_pb::AutofillSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(name);
+ SET_STR(value);
+ SET_INT64_REP(usage_timestamp);
+ SET(profile, AutofillProfileSpecificsToValue);
+ return value;
+}
+
+DictionaryValue* AutofillProfileSpecificsToValue(
+ const sync_pb::AutofillProfileSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(label);
+ SET_STR(guid);
+
+ SET_STR_REP(name_first);
+ SET_STR_REP(name_middle);
+ SET_STR_REP(name_last);
+ SET_STR_REP(email_address);
+ SET_STR(company_name);
+
+ SET_STR(address_home_line1);
+ SET_STR(address_home_line2);
+ SET_STR(address_home_city);
+ SET_STR(address_home_state);
+ SET_STR(address_home_zip);
+ SET_STR(address_home_country);
+
+ SET_STR_REP(phone_home_whole_number);
+ return value;
+}
+
+DictionaryValue* BookmarkSpecificsToValue(
+ const sync_pb::BookmarkSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(url);
+ SET_BYTES(favicon);
+ SET_STR(title);
+ return value;
+}
+
+DictionaryValue* ExtensionSettingSpecificsToValue(
+ const sync_pb::ExtensionSettingSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(extension_id);
+ SET_STR(key);
+ SET_STR(value);
+ return value;
+}
+
+DictionaryValue* ExtensionSpecificsToValue(
+ const sync_pb::ExtensionSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(id);
+ SET_STR(version);
+ SET_STR(update_url);
+ SET_BOOL(enabled);
+ SET_BOOL(incognito_enabled);
+ SET_STR(name);
+ return value;
+}
+
+DictionaryValue* NigoriSpecificsToValue(
+ const sync_pb::NigoriSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET(encrypted, EncryptedDataToValue);
+ SET_BOOL(using_explicit_passphrase);
+ SET_BOOL(encrypt_bookmarks);
+ SET_BOOL(encrypt_preferences);
+ SET_BOOL(encrypt_autofill_profile);
+ SET_BOOL(encrypt_autofill);
+ SET_BOOL(encrypt_themes);
+ SET_BOOL(encrypt_typed_urls);
+ SET_BOOL(encrypt_extension_settings);
+ SET_BOOL(encrypt_extensions);
+ SET_BOOL(encrypt_sessions);
+ SET_BOOL(encrypt_app_settings);
+ SET_BOOL(encrypt_apps);
+ SET_BOOL(encrypt_search_engines);
+ SET_BOOL(sync_tabs);
+ SET_BOOL(encrypt_everything);
+ SET_REP(device_information, DeviceInformationToValue);
+ return value;
+}
+
+DictionaryValue* PasswordSpecificsToValue(
+ const sync_pb::PasswordSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET(encrypted, EncryptedDataToValue);
+ return value;
+}
+
+DictionaryValue* PreferenceSpecificsToValue(
+ const sync_pb::PreferenceSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(name);
+ SET_STR(value);
+ return value;
+}
+
+DictionaryValue* SearchEngineSpecificsToValue(
+ const sync_pb::SearchEngineSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(short_name);
+ SET_STR(keyword);
+ SET_STR(favicon_url);
+ SET_STR(url);
+ SET_BOOL(safe_for_autoreplace);
+ SET_STR(originating_url);
+ SET_INT64(date_created);
+ SET_STR(input_encodings);
+ SET_BOOL(show_in_default_list);
+ SET_STR(suggestions_url);
+ SET_INT32(prepopulate_id);
+ SET_BOOL(autogenerate_keyword);
+ SET_STR(instant_url);
+ SET_INT64(last_modified);
+ SET_STR(sync_guid);
+ return value;
+}
+
+DictionaryValue* SessionSpecificsToValue(
+ const sync_pb::SessionSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(session_tag);
+ SET(header, SessionHeaderToValue);
+ SET(tab, SessionTabToValue);
+ return value;
+}
+
+DictionaryValue* ThemeSpecificsToValue(
+ const sync_pb::ThemeSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_BOOL(use_custom_theme);
+ SET_BOOL(use_system_theme_by_default);
+ SET_STR(custom_theme_name);
+ SET_STR(custom_theme_id);
+ SET_STR(custom_theme_update_url);
+ return value;
+}
+
+DictionaryValue* TypedUrlSpecificsToValue(
+ const sync_pb::TypedUrlSpecifics& proto) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_STR(url);
+ SET_STR(title);
+ SET_BOOL(hidden);
+ SET_INT64_REP(visits);
+ SET_INT32_REP(visit_transitions);
+ return value;
+}
+
+DictionaryValue* EntitySpecificsToValue(
+ const sync_pb::EntitySpecifics& specifics) {
+ DictionaryValue* value = new DictionaryValue();
+ SET_FIELD(app, AppSpecificsToValue);
+ SET_FIELD(app_notification, AppNotificationToValue);
+ SET_FIELD(app_setting, AppSettingSpecificsToValue);
+ SET_FIELD(autofill, AutofillSpecificsToValue);
+ SET_FIELD(autofill_profile, AutofillProfileSpecificsToValue);
+ SET_FIELD(bookmark, BookmarkSpecificsToValue);
+ SET_FIELD(extension, ExtensionSpecificsToValue);
+ SET_FIELD(extension_setting, ExtensionSettingSpecificsToValue);
+ SET_FIELD(nigori, NigoriSpecificsToValue);
+ SET_FIELD(password, PasswordSpecificsToValue);
+ SET_FIELD(preference, PreferenceSpecificsToValue);
+ SET_FIELD(search_engine, SearchEngineSpecificsToValue);
+ SET_FIELD(session, SessionSpecificsToValue);
+ SET_FIELD(theme, ThemeSpecificsToValue);
+ SET_FIELD(typed_url, TypedUrlSpecificsToValue);
+ return value;
+}
+
+#undef SET
+#undef SET_REP
+
+#undef SET_BOOL
+#undef SET_BYTES
+#undef SET_INT32
+#undef SET_INT64
+#undef SET_INT64_REP
+#undef SET_STR
+#undef SET_STR_REP
+
+#undef SET_FIELD
+
+} // namespace browser_sync
diff --git a/sync/protocol/proto_value_conversions.h b/sync/protocol/proto_value_conversions.h
new file mode 100644
index 0000000..79bf1b1
--- /dev/null
+++ b/sync/protocol/proto_value_conversions.h
@@ -0,0 +1,142 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Keep this file in sync with the .proto files in this directory.
+
+#ifndef SYNC_PROTOCOL_PROTO_VALUE_CONVERSIONS_H_
+#define SYNC_PROTOCOL_PROTO_VALUE_CONVERSIONS_H_
+#pragma once
+
+namespace base {
+class DictionaryValue;
+}
+
+namespace sync_pb {
+class AppNotification;
+class AppNotificationSettings;
+class AppSettingSpecifics;
+class AppSpecifics;
+class AutofillProfileSpecifics;
+class AutofillSpecifics;
+class BookmarkSpecifics;
+class DeviceInformation;
+class EncryptedData;
+class EntitySpecifics;
+class ExtensionSettingSpecifics;
+class ExtensionSpecifics;
+class NigoriSpecifics;
+class PasswordSpecifics;
+class PasswordSpecificsData;
+class PreferenceSpecifics;
+class SearchEngineSpecifics;
+class SessionHeader;
+class SessionSpecifics;
+class SessionTab;
+class SessionWindow;
+class TabNavigation;
+class ThemeSpecifics;
+class TypedUrlSpecifics;
+} // namespace sync_pb
+
+// Utility functions to convert sync protocol buffers to dictionaries.
+// Each protocol field is mapped to a key of the same name. Repeated
+// fields are mapped to array values and sub-messages are mapped to
+// sub-dictionary values.
+//
+// TODO(akalin): Add has_* information.
+//
+// TODO(akalin): Improve enum support.
+
+namespace browser_sync {
+
+// Ownership of all returned DictionaryValues are transferred to the
+// caller.
+
+// TODO(akalin): Perhaps extend this to decrypt?
+base::DictionaryValue* EncryptedDataToValue(
+ const sync_pb::EncryptedData& encrypted_data);
+
+// Sub-protocol of AppSpecifics.
+base::DictionaryValue* AppSettingsToValue(
+ const sync_pb::AppNotificationSettings& app_notification_settings);
+
+// Sub-protocols of SessionSpecifics.
+
+base::DictionaryValue* SessionHeaderToValue(
+ const sync_pb::SessionHeader& session_header);
+
+base::DictionaryValue* SessionTabToValue(
+ const sync_pb::SessionTab& session_tab);
+
+base::DictionaryValue* SessionWindowToValue(
+ const sync_pb::SessionWindow& session_window);
+
+base::DictionaryValue* TabNavigationToValue(
+ const sync_pb::TabNavigation& tab_navigation);
+
+// Sub-protocol of PasswordSpecifics.
+
+base::DictionaryValue* PasswordSpecificsDataToValue(
+ const sync_pb::PasswordSpecificsData& password_specifics_data);
+
+// Sub-protocol of NigoriSpecifics.
+
+base::DictionaryValue* DeviceInformationToValue(
+ const sync_pb::DeviceInformation& device_information);
+
+// Main *SpecificsToValue functions.
+
+base::DictionaryValue* AppNotificationToValue(
+ const sync_pb::AppNotification& app_notification_specifics);
+
+base::DictionaryValue* AppSettingSpecificsToValue(
+ const sync_pb::AppSettingSpecifics& app_setting_specifics);
+
+base::DictionaryValue* AppSpecificsToValue(
+ const sync_pb::AppSpecifics& app_specifics);
+
+base::DictionaryValue* AutofillSpecificsToValue(
+ const sync_pb::AutofillSpecifics& autofill_specifics);
+
+base::DictionaryValue* AutofillProfileSpecificsToValue(
+ const sync_pb::AutofillProfileSpecifics& autofill_profile_specifics);
+
+base::DictionaryValue* BookmarkSpecificsToValue(
+ const sync_pb::BookmarkSpecifics& bookmark_specifics);
+
+base::DictionaryValue* ExtensionSettingSpecificsToValue(
+ const sync_pb::ExtensionSettingSpecifics& extension_setting_specifics);
+
+base::DictionaryValue* ExtensionSpecificsToValue(
+ const sync_pb::ExtensionSpecifics& extension_specifics);
+
+base::DictionaryValue* NigoriSpecificsToValue(
+ const sync_pb::NigoriSpecifics& nigori_specifics);
+
+base::DictionaryValue* PasswordSpecificsToValue(
+ const sync_pb::PasswordSpecifics& password_specifics);
+
+base::DictionaryValue* PreferenceSpecificsToValue(
+ const sync_pb::PreferenceSpecifics& password_specifics);
+
+base::DictionaryValue* SearchEngineSpecificsToValue(
+ const sync_pb::SearchEngineSpecifics& search_engine_specifics);
+
+base::DictionaryValue* SessionSpecificsToValue(
+ const sync_pb::SessionSpecifics& session_specifics);
+
+base::DictionaryValue* ThemeSpecificsToValue(
+ const sync_pb::ThemeSpecifics& theme_specifics);
+
+base::DictionaryValue* TypedUrlSpecificsToValue(
+ const sync_pb::TypedUrlSpecifics& typed_url_specifics);
+
+// Any present extensions are mapped to sub-dictionary values with the
+// key equal to the extension name.
+base::DictionaryValue* EntitySpecificsToValue(
+ const sync_pb::EntitySpecifics& specifics);
+
+} // namespace browser_sync
+
+#endif // SYNC_PROTOCOL_PROTO_VALUE_CONVERSIONS_H_
diff --git a/sync/protocol/proto_value_conversions_unittest.cc b/sync/protocol/proto_value_conversions_unittest.cc
new file mode 100644
index 0000000..3d96378
--- /dev/null
+++ b/sync/protocol/proto_value_conversions_unittest.cc
@@ -0,0 +1,191 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Keep this file in sync with the .proto files in this directory.
+
+#include "sync/protocol/proto_value_conversions.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "base/values.h"
+#include "sync/protocol/app_notification_specifics.pb.h"
+#include "sync/protocol/app_setting_specifics.pb.h"
+#include "sync/protocol/app_specifics.pb.h"
+#include "sync/protocol/autofill_specifics.pb.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/encryption.pb.h"
+#include "sync/protocol/extension_setting_specifics.pb.h"
+#include "sync/protocol/extension_specifics.pb.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/protocol/password_specifics.pb.h"
+#include "sync/protocol/preference_specifics.pb.h"
+#include "sync/protocol/search_engine_specifics.pb.h"
+#include "sync/protocol/session_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/protocol/theme_specifics.pb.h"
+#include "sync/protocol/typed_url_specifics.pb.h"
+#include "sync/syncable/model_type.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+class ProtoValueConversionsTest : public testing::Test {
+ protected:
+ template <class T>
+ void TestSpecificsToValue(
+ DictionaryValue* (*specifics_to_value)(const T&)) {
+ const T& specifics(T::default_instance());
+ scoped_ptr<DictionaryValue> value(specifics_to_value(specifics));
+ // We can't do much but make sure that the returned value has
+ // something in it.
+ EXPECT_FALSE(value->empty());
+ }
+};
+
+TEST_F(ProtoValueConversionsTest, ProtoChangeCheck) {
+ // If this number changes, that means we added or removed a data
+ // type. Don't forget to add a unit test for {New
+ // type}SpecificsToValue below.
+ EXPECT_EQ(17, syncable::MODEL_TYPE_COUNT);
+
+ // We'd also like to check if we changed any field in our messages.
+ // However, that's hard to do: sizeof could work, but it's
+ // platform-dependent. default_instance().ByteSize() won't change
+ // for most changes, since most of our fields are optional. So we
+ // just settle for comments in the proto files.
+}
+
+TEST_F(ProtoValueConversionsTest, EncryptedDataToValue) {
+ TestSpecificsToValue(EncryptedDataToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, SessionHeaderToValue) {
+ TestSpecificsToValue(SessionHeaderToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, SessionTabToValue) {
+ TestSpecificsToValue(SessionTabToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, SessionWindowToValue) {
+ TestSpecificsToValue(SessionWindowToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, TabNavigationToValue) {
+ TestSpecificsToValue(TabNavigationToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, PasswordSpecificsData) {
+ sync_pb::PasswordSpecificsData specifics;
+ specifics.set_password_value("secret");
+ scoped_ptr<DictionaryValue> value(PasswordSpecificsDataToValue(specifics));
+ EXPECT_FALSE(value->empty());
+ std::string password_value;
+ EXPECT_TRUE(value->GetString("password_value", &password_value));
+ EXPECT_EQ("<redacted>", password_value);
+}
+
+TEST_F(ProtoValueConversionsTest, AppNotificationToValue) {
+ TestSpecificsToValue(AppNotificationToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, AppSettingSpecificsToValue) {
+ sync_pb::AppNotificationSettings specifics;
+ specifics.set_disabled(true);
+ specifics.set_oauth_client_id("some_id_value");
+ scoped_ptr<DictionaryValue> value(AppSettingsToValue(specifics));
+ EXPECT_FALSE(value->empty());
+ bool disabled_value = false;
+ std::string oauth_client_id_value;
+ EXPECT_TRUE(value->GetBoolean("disabled", &disabled_value));
+ EXPECT_EQ(true, disabled_value);
+ EXPECT_TRUE(value->GetString("oauth_client_id", &oauth_client_id_value));
+ EXPECT_EQ("some_id_value", oauth_client_id_value);
+}
+
+TEST_F(ProtoValueConversionsTest, AppSpecificsToValue) {
+ TestSpecificsToValue(AppSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, AutofillSpecificsToValue) {
+ TestSpecificsToValue(AutofillSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, AutofillProfileSpecificsToValue) {
+ TestSpecificsToValue(AutofillProfileSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, BookmarkSpecificsToValue) {
+ TestSpecificsToValue(BookmarkSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, ExtensionSettingSpecificsToValue) {
+ TestSpecificsToValue(ExtensionSettingSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, ExtensionSpecificsToValue) {
+ TestSpecificsToValue(ExtensionSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, NigoriSpecificsToValue) {
+ TestSpecificsToValue(NigoriSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, PasswordSpecificsToValue) {
+ TestSpecificsToValue(PasswordSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, PreferenceSpecificsToValue) {
+ TestSpecificsToValue(PreferenceSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, SearchEngineSpecificsToValue) {
+ TestSpecificsToValue(SearchEngineSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, SessionSpecificsToValue) {
+ TestSpecificsToValue(SessionSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, ThemeSpecificsToValue) {
+ TestSpecificsToValue(ThemeSpecificsToValue);
+}
+
+TEST_F(ProtoValueConversionsTest, TypedUrlSpecificsToValue) {
+ TestSpecificsToValue(TypedUrlSpecificsToValue);
+}
+
+// TODO(akalin): Figure out how to better test EntitySpecificsToValue.
+
+TEST_F(ProtoValueConversionsTest, EntitySpecificsToValue) {
+ sync_pb::EntitySpecifics specifics;
+ // Touch the extensions to make sure it shows up in the generated
+ // value.
+#define SET_FIELD(key) (void)specifics.mutable_##key()
+
+ SET_FIELD(app);
+ SET_FIELD(app_notification);
+ SET_FIELD(app_setting);
+ SET_FIELD(autofill);
+ SET_FIELD(autofill_profile);
+ SET_FIELD(bookmark);
+ SET_FIELD(extension);
+ SET_FIELD(extension_setting);
+ SET_FIELD(nigori);
+ SET_FIELD(password);
+ SET_FIELD(preference);
+ SET_FIELD(search_engine);
+ SET_FIELD(session);
+ SET_FIELD(theme);
+ SET_FIELD(typed_url);
+
+#undef SET_FIELD
+
+ scoped_ptr<DictionaryValue> value(EntitySpecificsToValue(specifics));
+ EXPECT_EQ(syncable::MODEL_TYPE_COUNT - syncable::FIRST_REAL_MODEL_TYPE,
+ static_cast<int>(value->size()));
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/protocol/service_constants.h b/sync/protocol/service_constants.h
new file mode 100644
index 0000000..83a65b1
--- /dev/null
+++ b/sync/protocol/service_constants.h
@@ -0,0 +1,23 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Product-specific constants.
+
+#ifndef SYNC_PROTOCOL_SERVICE_CONSTANTS_H_
+#define SYNC_PROTOCOL_SERVICE_CONSTANTS_H_
+#pragma once
+
+// These fixed service names are used to obtain auth cookies for the
+// corresponding services. It might be interesting to make these updateable
+// as well as have the ability to add new ones.
+#define SYNC_SERVICE_NAME "chromiumsync"
+
+#define DEFAULT_SIGNIN_DOMAIN "gmail.com"
+
+#define PRODUCT_NAME_STRING_NARROW "Chromium Browser Sync"
+
+#define PRODUCT_NAME_STRING PRODUCT_NAME_STRING_NARROW
+#define PRODUCT_NAME_STRING_WIDE L##PRODUCT_NAME_STRING
+
+#endif // SYNC_PROTOCOL_SERVICE_CONSTANTS_H_
diff --git a/sync/protocol/sync_protocol_error.cc b/sync/protocol/sync_protocol_error.cc
new file mode 100644
index 0000000..544d98a
--- /dev/null
+++ b/sync/protocol/sync_protocol_error.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/protocol/sync_protocol_error.h"
+
+#include <string>
+
+#include "base/logging.h"
+#include "base/values.h"
+
+namespace browser_sync {
+#define ENUM_CASE(x) case x: return #x; break;
+
+const char* GetSyncErrorTypeString(SyncProtocolErrorType type) {
+ switch (type) {
+ ENUM_CASE(SYNC_SUCCESS);
+ ENUM_CASE(NOT_MY_BIRTHDAY);
+ ENUM_CASE(THROTTLED);
+ ENUM_CASE(CLEAR_PENDING);
+ ENUM_CASE(TRANSIENT_ERROR);
+ ENUM_CASE(NON_RETRIABLE_ERROR);
+ ENUM_CASE(MIGRATION_DONE);
+ ENUM_CASE(INVALID_CREDENTIAL);
+ ENUM_CASE(UNKNOWN_ERROR);
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetClientActionString(ClientAction action) {
+ switch (action) {
+ ENUM_CASE(UPGRADE_CLIENT);
+ ENUM_CASE(CLEAR_USER_DATA_AND_RESYNC);
+ ENUM_CASE(ENABLE_SYNC_ON_ACCOUNT);
+ ENUM_CASE(STOP_AND_RESTART_SYNC);
+ ENUM_CASE(DISABLE_SYNC_ON_CLIENT);
+ ENUM_CASE(UNKNOWN_ACTION);
+ }
+ NOTREACHED();
+ return "";
+}
+
+SyncProtocolError::SyncProtocolError()
+ : error_type(UNKNOWN_ERROR),
+ action(UNKNOWN_ACTION) {
+}
+
+SyncProtocolError::~SyncProtocolError() {
+}
+
+DictionaryValue* SyncProtocolError::ToValue() const {
+ DictionaryValue* value = new DictionaryValue();
+ value->SetString("ErrorType",
+ GetSyncErrorTypeString(error_type));
+ value->SetString("ErrorDescription", error_description);
+ value->SetString("url", url);
+ value->SetString("action", GetClientActionString(action));
+ return value;
+}
+
+} // namespace browser_sync
+
diff --git a/sync/protocol/sync_protocol_error.h b/sync/protocol/sync_protocol_error.h
new file mode 100644
index 0000000..9ae5317
--- /dev/null
+++ b/sync/protocol/sync_protocol_error.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef SYNC_PROTOCOL_SYNC_PROTOCOL_ERROR_H_
+#define SYNC_PROTOCOL_SYNC_PROTOCOL_ERROR_H_
+#pragma once
+
+#include <string>
+
+#include "base/values.h"
+#include "sync/syncable/model_type.h"
+
+namespace browser_sync{
+
+enum SyncProtocolErrorType {
+ // Success case.
+ SYNC_SUCCESS,
+
+ // Birthday does not match that of the server.
+ NOT_MY_BIRTHDAY,
+
+ // Server is busy. Try later.
+ THROTTLED,
+
+ // Clear user data is being currently executed by the server.
+ CLEAR_PENDING,
+
+ // Server cannot service the request now.
+ TRANSIENT_ERROR,
+
+ // Server does not wish the client to retry any more until the action has
+ // been taken.
+ NON_RETRIABLE_ERROR,
+
+ // Indicates the datatypes have been migrated and the client should resync
+ // them to get the latest progress markers.
+ MIGRATION_DONE,
+
+ // Invalid Credential.
+ INVALID_CREDENTIAL,
+
+ // The default value.
+ UNKNOWN_ERROR
+};
+
+enum ClientAction {
+ // Upgrade the client to latest version.
+ UPGRADE_CLIENT,
+
+ // Clear user data and setup sync again.
+ CLEAR_USER_DATA_AND_RESYNC,
+
+ // Set the bit on the account to enable sync.
+ ENABLE_SYNC_ON_ACCOUNT,
+
+ // Stop sync and restart sync.
+ STOP_AND_RESTART_SYNC,
+
+ // Wipe this client of any sync data.
+ DISABLE_SYNC_ON_CLIENT,
+
+ // The default. No action.
+ UNKNOWN_ACTION
+};
+
+struct SyncProtocolError {
+ SyncProtocolErrorType error_type;
+ std::string error_description;
+ std::string url;
+ ClientAction action;
+ syncable::ModelTypeSet error_data_types;
+ SyncProtocolError();
+ ~SyncProtocolError();
+ DictionaryValue* ToValue() const;
+};
+
+const char* GetSyncErrorTypeString(SyncProtocolErrorType type);
+const char* GetClientActionString(ClientAction action);
+} // namespace browser_sync
+#endif // SYNC_PROTOCOL_SYNC_PROTOCOL_ERROR_H_
+
diff --git a/sync/sessions/DEPS b/sync/sessions/DEPS
new file mode 100644
index 0000000..a320b0d
--- /dev/null
+++ b/sync/sessions/DEPS
@@ -0,0 +1,7 @@
+include_rules = [
+ "+sync/engine",
+ "+sync/protocol",
+ "+sync/syncable",
+ "+sync/test",
+ "+sync/util",
+]
diff --git a/sync/sessions/debug_info_getter.h b/sync/sessions/debug_info_getter.h
new file mode 100644
index 0000000..257a853
--- /dev/null
+++ b/sync/sessions/debug_info_getter.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SESSIONS_DEBUG_INFO_GETTER_H_
+#define SYNC_SESSIONS_DEBUG_INFO_GETTER_H_
+
+#include "sync/protocol/sync.pb.h"
+
+namespace browser_sync {
+namespace sessions {
+
+// This is the interface that needs to be implemented by the event listener
+// to communicate the debug info data to the syncer.
+class DebugInfoGetter {
+ public:
+ // Gets the client debug info and clears the state so the same data is not
+ // sent again.
+ virtual void GetAndClearDebugInfo(sync_pb::DebugInfo* debug_info) = 0;
+ virtual ~DebugInfoGetter() {}
+};
+
+} // namespace sessions
+} // namespace browser_sync
+
+#endif // SYNC_SESSIONS_DEBUG_INFO_GETTER_H_
+
diff --git a/sync/sessions/ordered_commit_set.cc b/sync/sessions/ordered_commit_set.cc
new file mode 100644
index 0000000..51a354e
--- /dev/null
+++ b/sync/sessions/ordered_commit_set.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/ordered_commit_set.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+
+namespace browser_sync {
+namespace sessions {
+
+OrderedCommitSet::OrderedCommitSet(
+ const browser_sync::ModelSafeRoutingInfo& routes)
+ : routes_(routes) {
+}
+
+OrderedCommitSet::~OrderedCommitSet() {}
+
+void OrderedCommitSet::AddCommitItem(const int64 metahandle,
+ const syncable::Id& commit_id,
+ syncable::ModelType type) {
+ if (!HaveCommitItem(metahandle)) {
+ inserted_metahandles_.insert(metahandle);
+ metahandle_order_.push_back(metahandle);
+ commit_ids_.push_back(commit_id);
+ projections_[GetGroupForModelType(type, routes_)].push_back(
+ commit_ids_.size() - 1);
+ types_.push_back(type);
+ }
+}
+
+void OrderedCommitSet::Append(const OrderedCommitSet& other) {
+ for (int i = 0; i < other.Size(); ++i) {
+ CommitItem item = other.GetCommitItemAt(i);
+ AddCommitItem(item.meta, item.id, item.group);
+ }
+}
+
+void OrderedCommitSet::AppendReverse(const OrderedCommitSet& other) {
+ for (int i = other.Size() - 1; i >= 0; i--) {
+ CommitItem item = other.GetCommitItemAt(i);
+ AddCommitItem(item.meta, item.id, item.group);
+ }
+}
+
+void OrderedCommitSet::Truncate(size_t max_size) {
+ if (max_size < metahandle_order_.size()) {
+ for (size_t i = max_size; i < metahandle_order_.size(); ++i) {
+ inserted_metahandles_.erase(metahandle_order_[i]);
+ }
+
+ // Some projections may refer to indices that are getting chopped.
+ // Since projections are in increasing order, it's easy to fix. Except
+ // that you can't erase(..) using a reverse_iterator, so we use binary
+ // search to find the chop point.
+ Projections::iterator it = projections_.begin();
+ for (; it != projections_.end(); ++it) {
+ // For each projection, chop off any indices larger than or equal to
+ // max_size by looking for max_size using binary search.
+ Projection& p = it->second;
+ Projection::iterator element = std::lower_bound(p.begin(), p.end(),
+ max_size);
+ if (element != p.end())
+ p.erase(element, p.end());
+ }
+ commit_ids_.resize(max_size);
+ metahandle_order_.resize(max_size);
+ types_.resize(max_size);
+ }
+}
+
+OrderedCommitSet::CommitItem OrderedCommitSet::GetCommitItemAt(
+ const int position) const {
+ DCHECK(position < Size());
+ CommitItem return_item = {metahandle_order_[position],
+ commit_ids_[position],
+ types_[position]};
+ return return_item;
+}
+
+bool OrderedCommitSet::HasBookmarkCommitId() const {
+ ModelSafeRoutingInfo::const_iterator group
+ = routes_.find(syncable::BOOKMARKS);
+ if (group == routes_.end())
+ return false;
+ Projections::const_iterator proj = projections_.find(group->second);
+ if (proj == projections_.end())
+ return false;
+ DCHECK_LE(proj->second.size(), types_.size());
+ for (size_t i = 0; i < proj->second.size(); i++) {
+ if (types_[proj->second[i]] == syncable::BOOKMARKS)
+ return true;
+ }
+ return false;
+}
+
+void OrderedCommitSet::operator=(const OrderedCommitSet& other) {
+ inserted_metahandles_ = other.inserted_metahandles_;
+ commit_ids_ = other.commit_ids_;
+ metahandle_order_ = other.metahandle_order_;
+ projections_ = other.projections_;
+ types_ = other.types_;
+ routes_ = other.routes_;
+}
+
+} // namespace sessions
+} // namespace browser_sync
+
diff --git a/sync/sessions/ordered_commit_set.h b/sync/sessions/ordered_commit_set.h
new file mode 100644
index 0000000..8551c07
--- /dev/null
+++ b/sync/sessions/ordered_commit_set.h
@@ -0,0 +1,119 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
+#define SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
+#pragma once
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "sync/engine/model_safe_worker.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable_id.h"
+
+namespace browser_sync {
+namespace sessions {
+
+// TODO(ncarter): This code is more generic than just Commit and can
+// be reused elsewhere (e.g. ChangeReorderBuffer do similar things). Merge
+// all these implementations.
+class OrderedCommitSet {
+ public:
+ // A list of indices into the full list of commit ids such that:
+ // 1 - each element is an index belonging to a particular ModelSafeGroup.
+ // 2 - the vector is in sorted (smallest to largest) order.
+ // 3 - each element is a valid index for GetCommitItemAt.
+ // See GetCommitIdProjection for usage.
+ typedef std::vector<size_t> Projection;
+
+ // TODO(chron): Reserve space according to batch size?
+ explicit OrderedCommitSet(const browser_sync::ModelSafeRoutingInfo& routes);
+ ~OrderedCommitSet();
+
+ bool HaveCommitItem(const int64 metahandle) const {
+ return inserted_metahandles_.count(metahandle) > 0;
+ }
+
+ void AddCommitItem(const int64 metahandle, const syncable::Id& commit_id,
+ syncable::ModelType type);
+
+ const std::vector<syncable::Id>& GetAllCommitIds() const {
+ return commit_ids_;
+ }
+
+ // Return the Id at index |position| in this OrderedCommitSet. Note that
+ // the index uniquely identifies the same logical item in each of:
+ // 1) this OrderedCommitSet
+ // 2) the CommitRequest sent to the server
+ // 3) the list of EntryResponse objects in the CommitResponse.
+ // These together allow re-association of the pre-commit Id with the
+ // actual committed entry.
+ const syncable::Id& GetCommitIdAt(const size_t position) const {
+ return commit_ids_[position];
+ }
+
+ // Same as above, but for ModelType of the item.
+ syncable::ModelType GetModelTypeAt(const size_t position) const {
+ return types_[position];
+ }
+
+ // Get the projection of commit ids onto the space of commit ids
+ // belonging to |group|. This is useful when you need to process a commit
+ // response one ModelSafeGroup at a time. See GetCommitIdAt for how the
+ // indices contained in the returned Projection can be used.
+ const Projection& GetCommitIdProjection(browser_sync::ModelSafeGroup group) {
+ return projections_[group];
+ }
+
+ int Size() const {
+ return commit_ids_.size();
+ }
+
+ // Returns true iff any of the commit ids added to this set have model type
+ // BOOKMARKS.
+ bool HasBookmarkCommitId() const;
+
+ void Append(const OrderedCommitSet& other);
+ void AppendReverse(const OrderedCommitSet& other);
+ void Truncate(size_t max_size);
+
+ void operator=(const OrderedCommitSet& other);
+ private:
+ // A set of CommitIdProjections associated with particular ModelSafeGroups.
+ typedef std::map<browser_sync::ModelSafeGroup, Projection> Projections;
+
+ // Helper container for return value of GetCommitItemAt.
+ struct CommitItem {
+ int64 meta;
+ syncable::Id id;
+ syncable::ModelType group;
+ };
+
+ CommitItem GetCommitItemAt(const int position) const;
+
+ // These lists are different views of the same items; e.g they are
+ // isomorphic.
+ std::set<int64> inserted_metahandles_;
+ std::vector<syncable::Id> commit_ids_;
+ std::vector<int64> metahandle_order_;
+ Projections projections_;
+
+ // We need this because of operations like AppendReverse that take ids from
+ // one OrderedCommitSet and insert into another -- we need to know the
+ // group for each ID so that the insertion can update the appropriate
+ // projection. We could store it in commit_ids_, but sometimes we want
+ // to just return the vector of Ids, so this is more straightforward
+ // and shouldn't take up too much extra space since commit lists are small.
+ std::vector<syncable::ModelType> types_;
+
+ browser_sync::ModelSafeRoutingInfo routes_;
+};
+
+} // namespace sessions
+} // namespace browser_sync
+
+#endif // SYNC_SESSIONS_ORDERED_COMMIT_SET_H_
+
diff --git a/sync/sessions/ordered_commit_set_unittest.cc b/sync/sessions/ordered_commit_set_unittest.cc
new file mode 100644
index 0000000..fee37bf
--- /dev/null
+++ b/sync/sessions/ordered_commit_set_unittest.cc
@@ -0,0 +1,120 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/ordered_commit_set.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::vector;
+
+class OrderedCommitSetTest : public testing::Test {
+ public:
+ OrderedCommitSetTest() {
+ routes_[syncable::BOOKMARKS] = browser_sync::GROUP_UI;
+ routes_[syncable::PREFERENCES] = browser_sync::GROUP_UI;
+ routes_[syncable::AUTOFILL] = browser_sync::GROUP_DB;
+ routes_[syncable::TOP_LEVEL_FOLDER] = browser_sync::GROUP_PASSIVE;
+ }
+ protected:
+ browser_sync::TestIdFactory ids_;
+ browser_sync::ModelSafeRoutingInfo routes_;
+};
+
+namespace browser_sync {
+namespace sessions {
+
+TEST_F(OrderedCommitSetTest, Projections) {
+ vector<syncable::Id> expected;
+ for (int i = 0; i < 8; i++)
+ expected.push_back(ids_.NewLocalId());
+
+ OrderedCommitSet commit_set1(routes_), commit_set2(routes_);
+ commit_set1.AddCommitItem(0, expected[0], syncable::BOOKMARKS);
+ commit_set1.AddCommitItem(1, expected[1], syncable::BOOKMARKS);
+ commit_set1.AddCommitItem(2, expected[2], syncable::PREFERENCES);
+ // Duplicates should be dropped.
+ commit_set1.AddCommitItem(2, expected[2], syncable::PREFERENCES);
+ commit_set1.AddCommitItem(3, expected[3], syncable::TOP_LEVEL_FOLDER);
+ commit_set1.AddCommitItem(4, expected[4], syncable::TOP_LEVEL_FOLDER);
+ commit_set2.AddCommitItem(7, expected[7], syncable::AUTOFILL);
+ commit_set2.AddCommitItem(6, expected[6], syncable::AUTOFILL);
+ commit_set2.AddCommitItem(5, expected[5], syncable::AUTOFILL);
+ // Add something in set1 to set2, which should get dropped by AppendReverse.
+ commit_set2.AddCommitItem(0, expected[0], syncable::BOOKMARKS);
+ commit_set1.AppendReverse(commit_set2);
+
+ // First, we should verify the projections are correct. Second, we want to
+ // do the same verification after truncating by 1. Next, try truncating
+ // the set to a size of 4, so that the DB projection is wiped out and
+ // PASSIVE has one element removed. Finally, truncate to 1 so only UI is
+ // remaining.
+ int j = 0;
+ do {
+ SCOPED_TRACE(::testing::Message("Iteration j = ") << j);
+ vector<syncable::Id> all_ids = commit_set1.GetAllCommitIds();
+ EXPECT_EQ(expected.size(), all_ids.size());
+ for (size_t i = 0; i < expected.size(); i++) {
+ SCOPED_TRACE(::testing::Message("CommitSet mismatch at iteration i = ")
+ << i);
+ EXPECT_TRUE(expected[i] == all_ids[i]);
+ EXPECT_TRUE(expected[i] == commit_set1.GetCommitIdAt(i));
+ }
+
+ OrderedCommitSet::Projection p1, p2, p3;
+ p1 = commit_set1.GetCommitIdProjection(GROUP_UI);
+ p2 = commit_set1.GetCommitIdProjection(GROUP_PASSIVE);
+ p3 = commit_set1.GetCommitIdProjection(GROUP_DB);
+ EXPECT_TRUE(p1.size() + p2.size() + p3.size() == expected.size()) << "Sum"
+ << "of sizes of projections should equal full expected size!";
+
+ for (size_t i = 0; i < p1.size(); i++) {
+ SCOPED_TRACE(::testing::Message("UI projection mismatch at i = ") << i);
+ EXPECT_TRUE(expected[p1[i]] == commit_set1.GetCommitIdAt(p1[i]))
+ << "expected[p1[i]] = " << expected[p1[i]]
+ << ", commit_set1[p1[i]] = " << commit_set1.GetCommitIdAt(p1[i]);
+ }
+ for (size_t i = 0; i < p2.size(); i++) {
+ SCOPED_TRACE(::testing::Message("PASSIVE projection mismatch at i = ")
+ << i);
+ EXPECT_TRUE(expected[p2[i]] == commit_set1.GetCommitIdAt(p2[i]))
+ << "expected[p2[i]] = " << expected[p2[i]]
+ << ", commit_set1[p2[i]] = " << commit_set1.GetCommitIdAt(p2[i]);
+ }
+ for (size_t i = 0; i < p3.size(); i++) {
+ SCOPED_TRACE(::testing::Message("DB projection mismatch at i = ") << i);
+ EXPECT_TRUE(expected[p3[i]] == commit_set1.GetCommitIdAt(p3[i]))
+ << "expected[p3[i]] = " << expected[p3[i]]
+ << ", commit_set1[p3[i]] = " << commit_set1.GetCommitIdAt(p3[i]);
+ }
+
+ int cut_to_size = 7 - 3 * j++;
+ if (cut_to_size < 0)
+ break;
+
+ expected.resize(cut_to_size);
+ commit_set1.Truncate(cut_to_size);
+ } while (true);
+}
+
+TEST_F(OrderedCommitSetTest, HasBookmarkCommitId) {
+ OrderedCommitSet commit_set(routes_);
+
+ commit_set.AddCommitItem(0, ids_.NewLocalId(), syncable::AUTOFILL);
+ commit_set.AddCommitItem(1, ids_.NewLocalId(), syncable::TOP_LEVEL_FOLDER);
+ EXPECT_FALSE(commit_set.HasBookmarkCommitId());
+
+ commit_set.AddCommitItem(2, ids_.NewLocalId(), syncable::PREFERENCES);
+ commit_set.AddCommitItem(3, ids_.NewLocalId(), syncable::PREFERENCES);
+ EXPECT_FALSE(commit_set.HasBookmarkCommitId());
+
+ commit_set.AddCommitItem(4, ids_.NewLocalId(), syncable::BOOKMARKS);
+ EXPECT_TRUE(commit_set.HasBookmarkCommitId());
+
+ commit_set.Truncate(4);
+ EXPECT_FALSE(commit_set.HasBookmarkCommitId());
+}
+
+} // namespace sessions
+} // namespace browser_sync
+
diff --git a/sync/sessions/session_state.cc b/sync/sessions/session_state.cc
new file mode 100644
index 0000000..a6655f9
--- /dev/null
+++ b/sync/sessions/session_state.cc
@@ -0,0 +1,324 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/session_state.h"
+
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/base64.h"
+#include "base/json/json_writer.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/values.h"
+#include "sync/protocol/proto_enum_conversions.h"
+
+using std::set;
+using std::vector;
+
+namespace browser_sync {
+namespace sessions {
+
+SyncSourceInfo::SyncSourceInfo()
+ : updates_source(sync_pb::GetUpdatesCallerInfo::UNKNOWN) {}
+
+SyncSourceInfo::SyncSourceInfo(
+ const syncable::ModelTypePayloadMap& t)
+ : updates_source(sync_pb::GetUpdatesCallerInfo::UNKNOWN), types(t) {}
+
+SyncSourceInfo::SyncSourceInfo(
+ const sync_pb::GetUpdatesCallerInfo::GetUpdatesSource& u,
+ const syncable::ModelTypePayloadMap& t)
+ : updates_source(u), types(t) {}
+
+SyncSourceInfo::~SyncSourceInfo() {}
+
+DictionaryValue* SyncSourceInfo::ToValue() const {
+ DictionaryValue* value = new DictionaryValue();
+ value->SetString("updatesSource",
+ GetUpdatesSourceString(updates_source));
+ value->Set("types", syncable::ModelTypePayloadMapToValue(types));
+ return value;
+}
+
+SyncerStatus::SyncerStatus()
+ : invalid_store(false),
+ num_successful_commits(0),
+ num_successful_bookmark_commits(0),
+ num_updates_downloaded_total(0),
+ num_tombstone_updates_downloaded_total(0),
+ num_local_overwrites(0),
+ num_server_overwrites(0) {
+}
+
+SyncerStatus::~SyncerStatus() {
+}
+
+DictionaryValue* SyncerStatus::ToValue() const {
+ DictionaryValue* value = new DictionaryValue();
+ value->SetBoolean("invalidStore", invalid_store);
+ value->SetInteger("numSuccessfulCommits", num_successful_commits);
+ value->SetInteger("numSuccessfulBookmarkCommits",
+ num_successful_bookmark_commits);
+ value->SetInteger("numUpdatesDownloadedTotal",
+ num_updates_downloaded_total);
+ value->SetInteger("numTombstoneUpdatesDownloadedTotal",
+ num_tombstone_updates_downloaded_total);
+ value->SetInteger("numLocalOverwrites", num_local_overwrites);
+ value->SetInteger("numServerOverwrites", num_server_overwrites);
+ return value;
+}
+
+DictionaryValue* DownloadProgressMarkersToValue(
+ const std::string
+ (&download_progress_markers)[syncable::MODEL_TYPE_COUNT]) {
+ DictionaryValue* value = new DictionaryValue();
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i < syncable::MODEL_TYPE_COUNT; ++i) {
+ // TODO(akalin): Unpack the value into a protobuf.
+ std::string base64_marker;
+ bool encoded =
+ base::Base64Encode(download_progress_markers[i], &base64_marker);
+ DCHECK(encoded);
+ value->SetString(
+ syncable::ModelTypeToString(syncable::ModelTypeFromInt(i)),
+ base64_marker);
+ }
+ return value;
+}
+
+ErrorCounters::ErrorCounters()
+ : last_download_updates_result(UNSET),
+ last_post_commit_result(UNSET),
+ last_process_commit_response_result(UNSET) {
+}
+
+SyncSessionSnapshot::SyncSessionSnapshot(
+ const SyncerStatus& syncer_status,
+ const ErrorCounters& errors,
+ int64 num_server_changes_remaining,
+ bool is_share_usable,
+ syncable::ModelTypeSet initial_sync_ended,
+ const std::string
+ (&download_progress_markers)[syncable::MODEL_TYPE_COUNT],
+ bool more_to_sync,
+ bool is_silenced,
+ int64 unsynced_count,
+ int num_encryption_conflicts,
+ int num_hierarchy_conflicts,
+ int num_simple_conflicts,
+ int num_server_conflicts,
+ bool did_commit_items,
+ const SyncSourceInfo& source,
+ size_t num_entries,
+ base::Time sync_start_time,
+ bool retry_scheduled)
+ : syncer_status(syncer_status),
+ errors(errors),
+ num_server_changes_remaining(num_server_changes_remaining),
+ is_share_usable(is_share_usable),
+ initial_sync_ended(initial_sync_ended),
+ download_progress_markers(),
+ has_more_to_sync(more_to_sync),
+ is_silenced(is_silenced),
+ unsynced_count(unsynced_count),
+ num_encryption_conflicts(num_encryption_conflicts),
+ num_hierarchy_conflicts(num_hierarchy_conflicts),
+ num_simple_conflicts(num_simple_conflicts),
+ num_server_conflicts(num_server_conflicts),
+ did_commit_items(did_commit_items),
+ source(source),
+ num_entries(num_entries),
+ sync_start_time(sync_start_time),
+ retry_scheduled(retry_scheduled) {
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i < syncable::MODEL_TYPE_COUNT; ++i) {
+ const_cast<std::string&>(this->download_progress_markers[i]).assign(
+ download_progress_markers[i]);
+ }
+}
+
+SyncSessionSnapshot::~SyncSessionSnapshot() {}
+
+DictionaryValue* SyncSessionSnapshot::ToValue() const {
+ DictionaryValue* value = new DictionaryValue();
+ value->Set("syncerStatus", syncer_status.ToValue());
+ // We don't care too much if we lose precision here.
+ value->SetInteger("numServerChangesRemaining",
+ static_cast<int>(num_server_changes_remaining));
+ value->SetBoolean("isShareUsable", is_share_usable);
+ value->Set("initialSyncEnded",
+ syncable::ModelTypeSetToValue(initial_sync_ended));
+ value->Set("downloadProgressMarkers",
+ DownloadProgressMarkersToValue(download_progress_markers));
+ value->SetBoolean("hasMoreToSync", has_more_to_sync);
+ value->SetBoolean("isSilenced", is_silenced);
+ // We don't care too much if we lose precision here, also.
+ value->SetInteger("unsyncedCount",
+ static_cast<int>(unsynced_count));
+ value->SetInteger("numEncryptionConflicts",
+ num_encryption_conflicts);
+ value->SetInteger("numHierarchyConflicts",
+ num_hierarchy_conflicts);
+ value->SetInteger("numSimpleConflicts",
+ num_simple_conflicts);
+ value->SetInteger("numServerConflicts",
+ num_server_conflicts);
+ value->SetBoolean("didCommitItems", did_commit_items);
+ value->SetInteger("numEntries", num_entries);
+ value->Set("source", source.ToValue());
+ return value;
+}
+
+std::string SyncSessionSnapshot::ToString() const {
+ scoped_ptr<DictionaryValue> value(ToValue());
+ std::string json;
+ base::JSONWriter::Write(value.get(), true, &json);
+ return json;
+}
+
+ConflictProgress::ConflictProgress(bool* dirty_flag)
+ : num_server_conflicting_items(0), num_hierarchy_conflicting_items(0),
+ num_encryption_conflicting_items(0), dirty_(dirty_flag) {
+}
+
+ConflictProgress::~ConflictProgress() {
+}
+
+bool ConflictProgress::HasSimpleConflictItem(const syncable::Id& id) const {
+ return simple_conflicting_item_ids_.count(id) > 0;
+}
+
+std::set<syncable::Id>::const_iterator
+ConflictProgress::SimpleConflictingItemsBegin() const {
+ return simple_conflicting_item_ids_.begin();
+}
+std::set<syncable::Id>::const_iterator
+ConflictProgress::SimpleConflictingItemsEnd() const {
+ return simple_conflicting_item_ids_.end();
+}
+
+void ConflictProgress::AddSimpleConflictingItemById(
+ const syncable::Id& the_id) {
+ std::pair<std::set<syncable::Id>::iterator, bool> ret =
+ simple_conflicting_item_ids_.insert(the_id);
+ if (ret.second)
+ *dirty_ = true;
+}
+
+void ConflictProgress::EraseSimpleConflictingItemById(
+ const syncable::Id& the_id) {
+ int items_erased = simple_conflicting_item_ids_.erase(the_id);
+ if (items_erased != 0)
+ *dirty_ = true;
+}
+
+void ConflictProgress::AddEncryptionConflictingItemById(
+ const syncable::Id& the_id) {
+ std::pair<std::set<syncable::Id>::iterator, bool> ret =
+ unresolvable_conflicting_item_ids_.insert(the_id);
+ if (ret.second) {
+ num_encryption_conflicting_items++;
+ *dirty_ = true;
+ }
+}
+
+void ConflictProgress::AddHierarchyConflictingItemById(
+ const syncable::Id& the_id) {
+ std::pair<std::set<syncable::Id>::iterator, bool> ret =
+ unresolvable_conflicting_item_ids_.insert(the_id);
+ if (ret.second) {
+ num_hierarchy_conflicting_items++;
+ *dirty_ = true;
+ }
+}
+
+void ConflictProgress::AddServerConflictingItemById(
+ const syncable::Id& the_id) {
+ std::pair<std::set<syncable::Id>::iterator, bool> ret =
+ unresolvable_conflicting_item_ids_.insert(the_id);
+ if (ret.second) {
+ num_server_conflicting_items++;
+ *dirty_ = true;
+ }
+}
+
+UpdateProgress::UpdateProgress() {}
+
+UpdateProgress::~UpdateProgress() {}
+
+void UpdateProgress::AddVerifyResult(const VerifyResult& verify_result,
+ const sync_pb::SyncEntity& entity) {
+ verified_updates_.push_back(std::make_pair(verify_result, entity));
+}
+
+void UpdateProgress::AddAppliedUpdate(const UpdateAttemptResponse& response,
+ const syncable::Id& id) {
+ applied_updates_.push_back(std::make_pair(response, id));
+}
+
+std::vector<AppliedUpdate>::iterator UpdateProgress::AppliedUpdatesBegin() {
+ return applied_updates_.begin();
+}
+
+std::vector<VerifiedUpdate>::const_iterator
+UpdateProgress::VerifiedUpdatesBegin() const {
+ return verified_updates_.begin();
+}
+
+std::vector<AppliedUpdate>::const_iterator
+UpdateProgress::AppliedUpdatesEnd() const {
+ return applied_updates_.end();
+}
+
+std::vector<VerifiedUpdate>::const_iterator
+UpdateProgress::VerifiedUpdatesEnd() const {
+ return verified_updates_.end();
+}
+
+int UpdateProgress::SuccessfullyAppliedUpdateCount() const {
+ int count = 0;
+ for (std::vector<AppliedUpdate>::const_iterator it =
+ applied_updates_.begin();
+ it != applied_updates_.end();
+ ++it) {
+ if (it->first == SUCCESS)
+ count++;
+ }
+ return count;
+}
+
+// Returns true if at least one update application failed due to a conflict
+// during this sync cycle.
+bool UpdateProgress::HasConflictingUpdates() const {
+ std::vector<AppliedUpdate>::const_iterator it;
+ for (it = applied_updates_.begin(); it != applied_updates_.end(); ++it) {
+ if (it->first != SUCCESS) {
+ return true;
+ }
+ }
+ return false;
+}
+
+AllModelTypeState::AllModelTypeState(bool* dirty_flag)
+ : unsynced_handles(dirty_flag),
+ syncer_status(dirty_flag),
+ error(dirty_flag),
+ num_server_changes_remaining(dirty_flag, 0),
+ commit_set(ModelSafeRoutingInfo()) {
+}
+
+AllModelTypeState::~AllModelTypeState() {}
+
+PerModelSafeGroupState::PerModelSafeGroupState(bool* dirty_flag)
+ : conflict_progress(dirty_flag) {
+}
+
+PerModelSafeGroupState::~PerModelSafeGroupState() {
+}
+
+} // namespace sessions
+} // namespace browser_sync
diff --git a/sync/sessions/session_state.h b/sync/sessions/session_state.h
new file mode 100644
index 0000000..676fe5b
--- /dev/null
+++ b/sync/sessions/session_state.h
@@ -0,0 +1,329 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The 'sessions' namespace comprises all the pieces of state that are
+// combined to form a SyncSession instance. In that way, it can be thought of
+// as an extension of the SyncSession type itself. Session scoping gives
+// context to things like "conflict progress", "update progress", etc, and the
+// separation this file provides allows clients to only include the parts they
+// need rather than the entire session stack.
+
+#ifndef SYNC_SESSIONS_SESSION_STATE_H_
+#define SYNC_SESSIONS_SESSION_STATE_H_
+#pragma once
+
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/engine/syncproto.h"
+#include "sync/protocol/sync_protocol_error.h"
+#include "sync/sessions/ordered_commit_set.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/model_type_payload_map.h"
+#include "sync/syncable/syncable.h"
+
+namespace base {
+class DictionaryValue;
+}
+
+namespace browser_sync {
+namespace sessions {
+
+class UpdateProgress;
+
+// A container for the source of a sync session. This includes the update
+// source, the datatypes triggering the sync session, and possible session
+// specific payloads which should be sent to the server.
+struct SyncSourceInfo {
+ SyncSourceInfo();
+ explicit SyncSourceInfo(const syncable::ModelTypePayloadMap& t);
+ SyncSourceInfo(
+ const sync_pb::GetUpdatesCallerInfo::GetUpdatesSource& u,
+ const syncable::ModelTypePayloadMap& t);
+ ~SyncSourceInfo();
+
+ // Caller takes ownership of the returned dictionary.
+ base::DictionaryValue* ToValue() const;
+
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source;
+ syncable::ModelTypePayloadMap types;
+};
+
+// Data pertaining to the status of an active Syncer object.
+struct SyncerStatus {
+ SyncerStatus();
+ ~SyncerStatus();
+
+ // Caller takes ownership of the returned dictionary.
+ base::DictionaryValue* ToValue() const;
+
+ // True when we get such an INVALID_STORE error from the server.
+ bool invalid_store;
+ int num_successful_commits;
+ // This is needed for monitoring extensions activity.
+ int num_successful_bookmark_commits;
+
+ // Download event counters.
+ int num_updates_downloaded_total;
+ int num_tombstone_updates_downloaded_total;
+
+ // If the syncer encountered a MIGRATION_DONE code, these are the types that
+ // the client must now "migrate", by purging and re-downloading all updates.
+ syncable::ModelTypeSet types_needing_local_migration;
+
+ // Overwrites due to conflict resolution counters.
+ int num_local_overwrites;
+ int num_server_overwrites;
+};
+
+// Counters for various errors that can occur repeatedly during a sync session.
+// TODO(lipalani) : Rename this structure to Error.
+struct ErrorCounters {
+ ErrorCounters();
+
+ // Any protocol errors that we received during this sync session.
+ SyncProtocolError sync_protocol_error;
+
+ // Records the most recent results of PostCommit and GetUpdates commands.
+ SyncerError last_download_updates_result;
+ SyncerError last_post_commit_result;
+ SyncerError last_process_commit_response_result;
+};
+
+// Caller takes ownership of the returned dictionary.
+base::DictionaryValue* DownloadProgressMarkersToValue(
+ const std::string
+ (&download_progress_markers)[syncable::MODEL_TYPE_COUNT]);
+
+// An immutable snapshot of state from a SyncSession. Convenient to use as
+// part of notifications as it is inherently thread-safe.
+struct SyncSessionSnapshot {
+ SyncSessionSnapshot(
+ const SyncerStatus& syncer_status,
+ const ErrorCounters& errors,
+ int64 num_server_changes_remaining,
+ bool is_share_usable,
+ syncable::ModelTypeSet initial_sync_ended,
+ const std::string
+ (&download_progress_markers)[syncable::MODEL_TYPE_COUNT],
+ bool more_to_sync,
+ bool is_silenced,
+ int64 unsynced_count,
+ int num_encryption_conflicts,
+ int num_hierarchy_conflicts,
+ int num_simple_conflicts,
+ int num_server_conflicts,
+ bool did_commit_items,
+ const SyncSourceInfo& source,
+ size_t num_entries,
+ base::Time sync_start_time,
+ bool retry_scheduled);
+ ~SyncSessionSnapshot();
+
+ // Caller takes ownership of the returned dictionary.
+ base::DictionaryValue* ToValue() const;
+
+ std::string ToString() const;
+
+ const SyncerStatus syncer_status;
+ const ErrorCounters errors;
+ const int64 num_server_changes_remaining;
+ const bool is_share_usable;
+ const syncable::ModelTypeSet initial_sync_ended;
+ const std::string download_progress_markers[syncable::MODEL_TYPE_COUNT];
+ const bool has_more_to_sync;
+ const bool is_silenced;
+ const int64 unsynced_count;
+ const int num_encryption_conflicts;
+ const int num_hierarchy_conflicts;
+ const int num_simple_conflicts;
+ const int num_server_conflicts;
+ const bool did_commit_items;
+ const SyncSourceInfo source;
+ const size_t num_entries;
+ base::Time sync_start_time;
+ const bool retry_scheduled;
+};
+
+// Tracks progress of conflicts and their resolutions.
+class ConflictProgress {
+ public:
+ explicit ConflictProgress(bool* dirty_flag);
+ ~ConflictProgress();
+
+ bool HasSimpleConflictItem(const syncable::Id &id) const;
+
+ // Various mutators for tracking commit conflicts.
+ void AddSimpleConflictingItemById(const syncable::Id& the_id);
+ void EraseSimpleConflictingItemById(const syncable::Id& the_id);
+ std::set<syncable::Id>::const_iterator SimpleConflictingItemsBegin() const;
+ std::set<syncable::Id>::const_iterator SimpleConflictingItemsEnd() const;
+ int SimpleConflictingItemsSize() const {
+ return simple_conflicting_item_ids_.size();
+ }
+
+ // Mutators for unresolvable conflicting items (see description below).
+ void AddEncryptionConflictingItemById(const syncable::Id& the_id);
+ int EncryptionConflictingItemsSize() const {
+ return num_encryption_conflicting_items;
+ }
+
+ void AddHierarchyConflictingItemById(const syncable::Id& id);
+ int HierarchyConflictingItemsSize() const {
+ return num_hierarchy_conflicting_items;
+ }
+
+ void AddServerConflictingItemById(const syncable::Id& id);
+ int ServerConflictingItemsSize() const {
+ return num_server_conflicting_items;
+ }
+
+ private:
+ // Conflicts that occur when local and server changes collide and can be
+ // resolved locally.
+ std::set<syncable::Id> simple_conflicting_item_ids_;
+
+ // Unresolvable conflicts are not processed by the conflict resolver. We wait
+ // and hope the server will provide us with an update that resolves these
+ // conflicts.
+ std::set<syncable::Id> unresolvable_conflicting_item_ids_;
+
+ size_t num_server_conflicting_items;
+ size_t num_hierarchy_conflicting_items;
+ size_t num_encryption_conflicting_items;
+
+ // Whether a conflicting item was added or removed since
+ // the last call to reset_progress_changed(), if any. In practice this
+ // points to StatusController::is_dirty_.
+ bool* dirty_;
+};
+
+typedef std::pair<VerifyResult, sync_pb::SyncEntity> VerifiedUpdate;
+typedef std::pair<UpdateAttemptResponse, syncable::Id> AppliedUpdate;
+
+// Tracks update application and verification.
+class UpdateProgress {
+ public:
+ UpdateProgress();
+ ~UpdateProgress();
+
+ void AddVerifyResult(const VerifyResult& verify_result,
+ const sync_pb::SyncEntity& entity);
+
+ // Log a successful or failing update attempt.
+ void AddAppliedUpdate(const UpdateAttemptResponse& response,
+ const syncable::Id& id);
+
+ // Various iterators.
+ std::vector<AppliedUpdate>::iterator AppliedUpdatesBegin();
+ std::vector<VerifiedUpdate>::const_iterator VerifiedUpdatesBegin() const;
+ std::vector<AppliedUpdate>::const_iterator AppliedUpdatesEnd() const;
+ std::vector<VerifiedUpdate>::const_iterator VerifiedUpdatesEnd() const;
+
+ // Returns the number of update application attempts. This includes both
+ // failures and successes.
+ int AppliedUpdatesSize() const { return applied_updates_.size(); }
+ int VerifiedUpdatesSize() const { return verified_updates_.size(); }
+ bool HasVerifiedUpdates() const { return !verified_updates_.empty(); }
+ bool HasAppliedUpdates() const { return !applied_updates_.empty(); }
+ void ClearVerifiedUpdates() { verified_updates_.clear(); }
+
+ // Count the number of successful update applications that have happend this
+ // cycle. Note that if an item is successfully applied twice, it will be
+ // double counted here.
+ int SuccessfullyAppliedUpdateCount() const;
+
+ // Returns true if at least one update application failed due to a conflict
+ // during this sync cycle.
+ bool HasConflictingUpdates() const;
+
+ private:
+ // Container for updates that passed verification.
+ std::vector<VerifiedUpdate> verified_updates_;
+
+ // Stores the result of the various ApplyUpdate attempts we've made.
+ // May contain duplicate entries.
+ std::vector<AppliedUpdate> applied_updates_;
+};
+
+struct SyncCycleControlParameters {
+ SyncCycleControlParameters() : conflicts_resolved(false),
+ items_committed(false),
+ debug_info_sent(false) {}
+ // Set to true by ResolveConflictsCommand if any forward progress was made.
+ bool conflicts_resolved;
+
+ // Set to true by PostCommitMessageCommand if any commits were successful.
+ bool items_committed;
+
+ // True indicates debug info has been sent once this session.
+ bool debug_info_sent;
+};
+
+// DirtyOnWrite wraps a value such that any write operation will update a
+// specified dirty bit, which can be used to determine if a notification should
+// be sent due to state change.
+template <typename T>
+class DirtyOnWrite {
+ public:
+ explicit DirtyOnWrite(bool* dirty) : dirty_(dirty) {}
+ DirtyOnWrite(bool* dirty, const T& t) : t_(t), dirty_(dirty) {}
+ T* mutate() {
+ *dirty_ = true;
+ return &t_;
+ }
+ const T& value() const { return t_; }
+ private:
+ T t_;
+ bool* dirty_;
+};
+
+// The next 3 structures declare how all the state involved in running a sync
+// cycle is divided between global scope (applies to all model types),
+// ModelSafeGroup scope (applies to all data types in a group), and single
+// model type scope. Within this breakdown, each struct declares which bits
+// of state are dirty-on-write and should incur dirty bit updates if changed.
+
+// Grouping of all state that applies to all model types. Note that some
+// components of the global grouping can internally implement finer grained
+// scope control (such as OrderedCommitSet), but the top level entity is still
+// a singleton with respect to model types.
+struct AllModelTypeState {
+ explicit AllModelTypeState(bool* dirty_flag);
+ ~AllModelTypeState();
+
+ // Commits for all model types are bundled together into a single message.
+ ClientToServerMessage commit_message;
+ ClientToServerResponse commit_response;
+ // We GetUpdates for some combination of types at once.
+ // requested_update_types stores the set of types which were requested.
+ syncable::ModelTypeSet updates_request_types;
+ ClientToServerResponse updates_response;
+ // Used to build the shared commit message.
+ DirtyOnWrite<std::vector<int64> > unsynced_handles;
+ DirtyOnWrite<SyncerStatus> syncer_status;
+ DirtyOnWrite<ErrorCounters> error;
+ SyncCycleControlParameters control_params;
+ DirtyOnWrite<int64> num_server_changes_remaining;
+ OrderedCommitSet commit_set;
+};
+
+// Grouping of all state that applies to a single ModelSafeGroup.
+struct PerModelSafeGroupState {
+ explicit PerModelSafeGroupState(bool* dirty_flag);
+ ~PerModelSafeGroupState();
+
+ UpdateProgress update_progress;
+ ConflictProgress conflict_progress;
+};
+
+} // namespace sessions
+} // namespace browser_sync
+
+#endif // SYNC_SESSIONS_SESSION_STATE_H_
diff --git a/sync/sessions/session_state_unittest.cc b/sync/sessions/session_state_unittest.cc
new file mode 100644
index 0000000..b18964a
--- /dev/null
+++ b/sync/sessions/session_state_unittest.cc
@@ -0,0 +1,176 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/session_state.h"
+
+#include <string>
+
+#include "base/base64.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/test/values_test_util.h"
+#include "base/time.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace sessions {
+namespace {
+
+using base::ExpectDictBooleanValue;
+using base::ExpectDictDictionaryValue;
+using base::ExpectDictIntegerValue;
+using base::ExpectDictListValue;
+using base::ExpectDictStringValue;
+
+class SessionStateTest : public testing::Test {};
+
+TEST_F(SessionStateTest, SyncSourceInfoToValue) {
+ sync_pb::GetUpdatesCallerInfo::GetUpdatesSource updates_source =
+ sync_pb::GetUpdatesCallerInfo::PERIODIC;
+ syncable::ModelTypePayloadMap types;
+ types[syncable::PREFERENCES] = "preferencespayload";
+ types[syncable::EXTENSIONS] = "";
+ scoped_ptr<DictionaryValue> expected_types_value(
+ syncable::ModelTypePayloadMapToValue(types));
+
+ SyncSourceInfo source_info(updates_source, types);
+
+ scoped_ptr<DictionaryValue> value(source_info.ToValue());
+ EXPECT_EQ(2u, value->size());
+ ExpectDictStringValue("PERIODIC", *value, "updatesSource");
+ ExpectDictDictionaryValue(*expected_types_value, *value, "types");
+}
+
+TEST_F(SessionStateTest, SyncerStatusToValue) {
+ SyncerStatus status;
+ status.invalid_store = true;
+ status.num_successful_commits = 5;
+ status.num_successful_bookmark_commits = 10;
+ status.num_updates_downloaded_total = 100;
+ status.num_tombstone_updates_downloaded_total = 200;
+ status.num_local_overwrites = 15;
+ status.num_server_overwrites = 18;
+
+ scoped_ptr<DictionaryValue> value(status.ToValue());
+ EXPECT_EQ(7u, value->size());
+ ExpectDictBooleanValue(status.invalid_store, *value, "invalidStore");
+ ExpectDictIntegerValue(status.num_successful_commits,
+ *value, "numSuccessfulCommits");
+ ExpectDictIntegerValue(status.num_successful_bookmark_commits,
+ *value, "numSuccessfulBookmarkCommits");
+ ExpectDictIntegerValue(status.num_updates_downloaded_total,
+ *value, "numUpdatesDownloadedTotal");
+ ExpectDictIntegerValue(status.num_tombstone_updates_downloaded_total,
+ *value, "numTombstoneUpdatesDownloadedTotal");
+ ExpectDictIntegerValue(status.num_local_overwrites,
+ *value, "numLocalOverwrites");
+ ExpectDictIntegerValue(status.num_server_overwrites,
+ *value, "numServerOverwrites");
+}
+
+TEST_F(SessionStateTest, DownloadProgressMarkersToValue) {
+ std::string download_progress_markers[syncable::MODEL_TYPE_COUNT];
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i < syncable::MODEL_TYPE_COUNT; ++i) {
+ std::string marker(i, i);
+ download_progress_markers[i] = marker;
+ }
+
+ scoped_ptr<DictionaryValue> value(
+ DownloadProgressMarkersToValue(download_progress_markers));
+ EXPECT_EQ(syncable::MODEL_TYPE_COUNT - syncable::FIRST_REAL_MODEL_TYPE,
+ static_cast<int>(value->size()));
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i < syncable::MODEL_TYPE_COUNT; ++i) {
+ syncable::ModelType model_type = syncable::ModelTypeFromInt(i);
+ std::string marker(i, i);
+ std::string expected_value;
+ EXPECT_TRUE(base::Base64Encode(marker, &expected_value));
+ ExpectDictStringValue(expected_value,
+ *value, syncable::ModelTypeToString(model_type));
+ }
+}
+
+TEST_F(SessionStateTest, SyncSessionSnapshotToValue) {
+ SyncerStatus syncer_status;
+ syncer_status.num_successful_commits = 500;
+ scoped_ptr<DictionaryValue> expected_syncer_status_value(
+ syncer_status.ToValue());
+
+ ErrorCounters errors;
+
+ const int kNumServerChangesRemaining = 105;
+ const bool kIsShareUsable = true;
+
+ const syncable::ModelTypeSet initial_sync_ended(
+ syncable::BOOKMARKS, syncable::PREFERENCES);
+ scoped_ptr<ListValue> expected_initial_sync_ended_value(
+ syncable::ModelTypeSetToValue(initial_sync_ended));
+
+ std::string download_progress_markers[syncable::MODEL_TYPE_COUNT];
+ download_progress_markers[syncable::BOOKMARKS] = "test";
+ download_progress_markers[syncable::APPS] = "apps";
+ scoped_ptr<DictionaryValue> expected_download_progress_markers_value(
+ DownloadProgressMarkersToValue(download_progress_markers));
+
+ const bool kHasMoreToSync = false;
+ const bool kIsSilenced = true;
+ const int kUnsyncedCount = 1053;
+ const int kNumEncryptionConflicts = 1054;
+ const int kNumHierarchyConflicts = 1055;
+ const int kNumSimpleConflicts = 1056;
+ const int kNumServerConflicts = 1057;
+ const bool kDidCommitItems = true;
+
+ SyncSourceInfo source;
+ scoped_ptr<DictionaryValue> expected_source_value(source.ToValue());
+
+ SyncSessionSnapshot snapshot(syncer_status,
+ errors,
+ kNumServerChangesRemaining,
+ kIsShareUsable,
+ initial_sync_ended,
+ download_progress_markers,
+ kHasMoreToSync,
+ kIsSilenced,
+ kUnsyncedCount,
+ kNumEncryptionConflicts,
+ kNumHierarchyConflicts,
+ kNumSimpleConflicts,
+ kNumServerConflicts,
+ kDidCommitItems,
+ source,
+ 0,
+ base::Time::Now(),
+ false);
+ scoped_ptr<DictionaryValue> value(snapshot.ToValue());
+ EXPECT_EQ(15u, value->size());
+ ExpectDictDictionaryValue(*expected_syncer_status_value, *value,
+ "syncerStatus");
+ ExpectDictIntegerValue(kNumServerChangesRemaining, *value,
+ "numServerChangesRemaining");
+ ExpectDictBooleanValue(kIsShareUsable, *value, "isShareUsable");
+ ExpectDictListValue(*expected_initial_sync_ended_value, *value,
+ "initialSyncEnded");
+ ExpectDictDictionaryValue(*expected_download_progress_markers_value,
+ *value, "downloadProgressMarkers");
+ ExpectDictBooleanValue(kHasMoreToSync, *value, "hasMoreToSync");
+ ExpectDictBooleanValue(kIsSilenced, *value, "isSilenced");
+ ExpectDictIntegerValue(kUnsyncedCount, *value, "unsyncedCount");
+ ExpectDictIntegerValue(kNumEncryptionConflicts, *value,
+ "numEncryptionConflicts");
+ ExpectDictIntegerValue(kNumHierarchyConflicts, *value,
+ "numHierarchyConflicts");
+ ExpectDictIntegerValue(kNumSimpleConflicts, *value,
+ "numSimpleConflicts");
+ ExpectDictIntegerValue(kNumServerConflicts, *value,
+ "numServerConflicts");
+ ExpectDictBooleanValue(kDidCommitItems, *value,
+ "didCommitItems");
+ ExpectDictDictionaryValue(*expected_source_value, *value, "source");
+}
+
+} // namespace
+} // namespace sessions
+} // namespace browser_sync
diff --git a/sync/sessions/status_controller.cc b/sync/sessions/status_controller.cc
new file mode 100644
index 0000000..2ae1a96
--- /dev/null
+++ b/sync/sessions/status_controller.cc
@@ -0,0 +1,310 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/status_controller.h"
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "sync/protocol/sync_protocol_error.h"
+#include "sync/syncable/model_type.h"
+
+namespace browser_sync {
+namespace sessions {
+
+using syncable::FIRST_REAL_MODEL_TYPE;
+using syncable::MODEL_TYPE_COUNT;
+
+StatusController::StatusController(const ModelSafeRoutingInfo& routes)
+ : shared_(&is_dirty_),
+ per_model_group_deleter_(&per_model_group_),
+ is_dirty_(false),
+ group_restriction_in_effect_(false),
+ group_restriction_(GROUP_PASSIVE),
+ routing_info_(routes) {
+}
+
+StatusController::~StatusController() {}
+
+bool StatusController::TestAndClearIsDirty() {
+ bool is_dirty = is_dirty_;
+ is_dirty_ = false;
+ return is_dirty;
+}
+
+const UpdateProgress* StatusController::update_progress() const {
+ const PerModelSafeGroupState* state =
+ GetModelSafeGroupState(true, group_restriction_);
+ return state ? &state->update_progress : NULL;
+}
+
+UpdateProgress* StatusController::mutable_update_progress() {
+ return &GetOrCreateModelSafeGroupState(
+ true, group_restriction_)->update_progress;
+}
+
+const ConflictProgress* StatusController::conflict_progress() const {
+ const PerModelSafeGroupState* state =
+ GetModelSafeGroupState(true, group_restriction_);
+ return state ? &state->conflict_progress : NULL;
+}
+
+ConflictProgress* StatusController::mutable_conflict_progress() {
+ return &GetOrCreateModelSafeGroupState(
+ true, group_restriction_)->conflict_progress;
+}
+
+const ConflictProgress* StatusController::GetUnrestrictedConflictProgress(
+ ModelSafeGroup group) const {
+ const PerModelSafeGroupState* state =
+ GetModelSafeGroupState(false, group);
+ return state ? &state->conflict_progress : NULL;
+}
+
+ConflictProgress*
+ StatusController::GetUnrestrictedMutableConflictProgressForTest(
+ ModelSafeGroup group) {
+ return &GetOrCreateModelSafeGroupState(false, group)->conflict_progress;
+}
+
+const UpdateProgress* StatusController::GetUnrestrictedUpdateProgress(
+ ModelSafeGroup group) const {
+ const PerModelSafeGroupState* state =
+ GetModelSafeGroupState(false, group);
+ return state ? &state->update_progress : NULL;
+}
+
+UpdateProgress*
+ StatusController::GetUnrestrictedMutableUpdateProgressForTest(
+ ModelSafeGroup group) {
+ return &GetOrCreateModelSafeGroupState(false, group)->update_progress;
+}
+
+const PerModelSafeGroupState* StatusController::GetModelSafeGroupState(
+ bool restrict, ModelSafeGroup group) const {
+ DCHECK_EQ(restrict, group_restriction_in_effect_);
+ std::map<ModelSafeGroup, PerModelSafeGroupState*>::const_iterator it =
+ per_model_group_.find(group);
+ return (it == per_model_group_.end()) ? NULL : it->second;
+}
+
+PerModelSafeGroupState* StatusController::GetOrCreateModelSafeGroupState(
+ bool restrict, ModelSafeGroup group) {
+ DCHECK_EQ(restrict, group_restriction_in_effect_);
+ std::map<ModelSafeGroup, PerModelSafeGroupState*>::iterator it =
+ per_model_group_.find(group);
+ if (it == per_model_group_.end()) {
+ PerModelSafeGroupState* state = new PerModelSafeGroupState(&is_dirty_);
+ it = per_model_group_.insert(std::make_pair(group, state)).first;
+ }
+ return it->second;
+}
+
+void StatusController::increment_num_updates_downloaded_by(int value) {
+ shared_.syncer_status.mutate()->num_updates_downloaded_total += value;
+}
+
+void StatusController::set_types_needing_local_migration(
+ syncable::ModelTypeSet types) {
+ shared_.syncer_status.mutate()->types_needing_local_migration = types;
+}
+
+void StatusController::increment_num_tombstone_updates_downloaded_by(
+ int value) {
+ shared_.syncer_status.mutate()->num_tombstone_updates_downloaded_total +=
+ value;
+}
+
+void StatusController::set_num_server_changes_remaining(
+ int64 changes_remaining) {
+ if (shared_.num_server_changes_remaining.value() != changes_remaining)
+ *(shared_.num_server_changes_remaining.mutate()) = changes_remaining;
+}
+
+void StatusController::set_invalid_store(bool invalid_store) {
+ if (shared_.syncer_status.value().invalid_store != invalid_store)
+ shared_.syncer_status.mutate()->invalid_store = invalid_store;
+}
+
+void StatusController::UpdateStartTime() {
+ sync_start_time_ = base::Time::Now();
+}
+
+void StatusController::set_num_successful_bookmark_commits(int value) {
+ if (shared_.syncer_status.value().num_successful_bookmark_commits != value)
+ shared_.syncer_status.mutate()->num_successful_bookmark_commits = value;
+}
+
+void StatusController::set_unsynced_handles(
+ const std::vector<int64>& unsynced_handles) {
+ if (!operator==(unsynced_handles, shared_.unsynced_handles.value())) {
+ *(shared_.unsynced_handles.mutate()) = unsynced_handles;
+ }
+}
+
+void StatusController::increment_num_successful_bookmark_commits() {
+ set_num_successful_bookmark_commits(
+ shared_.syncer_status.value().num_successful_bookmark_commits + 1);
+}
+
+void StatusController::increment_num_successful_commits() {
+ shared_.syncer_status.mutate()->num_successful_commits++;
+}
+
+void StatusController::increment_num_local_overwrites() {
+ shared_.syncer_status.mutate()->num_local_overwrites++;
+}
+
+void StatusController::increment_num_server_overwrites() {
+ shared_.syncer_status.mutate()->num_server_overwrites++;
+}
+
+void StatusController::set_sync_protocol_error(
+ const SyncProtocolError& error) {
+ shared_.error.mutate()->sync_protocol_error = error;
+}
+
+void StatusController::set_last_download_updates_result(
+ const SyncerError result) {
+ shared_.error.mutate()->last_download_updates_result = result;
+}
+
+void StatusController::set_last_post_commit_result(const SyncerError result) {
+ shared_.error.mutate()->last_post_commit_result = result;
+}
+
+void StatusController::set_last_process_commit_response_result(
+ const SyncerError result) {
+ shared_.error.mutate()->last_process_commit_response_result = result;
+}
+
+void StatusController::set_commit_set(const OrderedCommitSet& commit_set) {
+ DCHECK(!group_restriction_in_effect_);
+ shared_.commit_set = commit_set;
+}
+
+void StatusController::update_conflicts_resolved(bool resolved) {
+ shared_.control_params.conflicts_resolved |= resolved;
+}
+void StatusController::reset_conflicts_resolved() {
+ shared_.control_params.conflicts_resolved = false;
+}
+void StatusController::set_items_committed() {
+ shared_.control_params.items_committed = true;
+}
+
+// Returns the number of updates received from the sync server.
+int64 StatusController::CountUpdates() const {
+ const ClientToServerResponse& updates = shared_.updates_response;
+ if (updates.has_get_updates()) {
+ return updates.get_updates().entries().size();
+ } else {
+ return 0;
+ }
+}
+
+bool StatusController::CurrentCommitIdProjectionHasIndex(size_t index) {
+ OrderedCommitSet::Projection proj =
+ shared_.commit_set.GetCommitIdProjection(group_restriction_);
+ return std::binary_search(proj.begin(), proj.end(), index);
+}
+
+bool StatusController::HasConflictingUpdates() const {
+ DCHECK(!group_restriction_in_effect_)
+ << "HasConflictingUpdates applies to all ModelSafeGroups";
+ std::map<ModelSafeGroup, PerModelSafeGroupState*>::const_iterator it =
+ per_model_group_.begin();
+ for (; it != per_model_group_.end(); ++it) {
+ if (it->second->update_progress.HasConflictingUpdates())
+ return true;
+ }
+ return false;
+}
+
+int StatusController::TotalNumEncryptionConflictingItems() const {
+ DCHECK(!group_restriction_in_effect_)
+ << "TotalNumEncryptionConflictingItems applies to all ModelSafeGroups";
+ std::map<ModelSafeGroup, PerModelSafeGroupState*>::const_iterator it =
+ per_model_group_.begin();
+ int sum = 0;
+ for (; it != per_model_group_.end(); ++it) {
+ sum += it->second->conflict_progress.EncryptionConflictingItemsSize();
+ }
+ return sum;
+}
+
+int StatusController::TotalNumHierarchyConflictingItems() const {
+ DCHECK(!group_restriction_in_effect_)
+ << "TotalNumHierarchyConflictingItems applies to all ModelSafeGroups";
+ std::map<ModelSafeGroup, PerModelSafeGroupState*>::const_iterator it =
+ per_model_group_.begin();
+ int sum = 0;
+ for (; it != per_model_group_.end(); ++it) {
+ sum += it->second->conflict_progress.HierarchyConflictingItemsSize();
+ }
+ return sum;
+}
+
+int StatusController::TotalNumSimpleConflictingItems() const {
+ DCHECK(!group_restriction_in_effect_)
+ << "TotalNumSimpleConflictingItems applies to all ModelSafeGroups";
+ std::map<ModelSafeGroup, PerModelSafeGroupState*>::const_iterator it =
+ per_model_group_.begin();
+ int sum = 0;
+ for (; it != per_model_group_.end(); ++it) {
+ sum += it->second->conflict_progress.SimpleConflictingItemsSize();
+ }
+ return sum;
+}
+
+int StatusController::TotalNumServerConflictingItems() const {
+ DCHECK(!group_restriction_in_effect_)
+ << "TotalNumServerConflictingItems applies to all ModelSafeGroups";
+ std::map<ModelSafeGroup, PerModelSafeGroupState*>::const_iterator it =
+ per_model_group_.begin();
+ int sum = 0;
+ for (; it != per_model_group_.end(); ++it) {
+ sum += it->second->conflict_progress.ServerConflictingItemsSize();
+ }
+ return sum;
+}
+
+int StatusController::TotalNumConflictingItems() const {
+ DCHECK(!group_restriction_in_effect_)
+ << "TotalNumConflictingItems applies to all ModelSafeGroups";
+ std::map<ModelSafeGroup, PerModelSafeGroupState*>::const_iterator it =
+ per_model_group_.begin();
+ int sum = 0;
+ for (; it != per_model_group_.end(); ++it) {
+ sum += it->second->conflict_progress.SimpleConflictingItemsSize();
+ sum += it->second->conflict_progress.EncryptionConflictingItemsSize();
+ sum += it->second->conflict_progress.HierarchyConflictingItemsSize();
+ sum += it->second->conflict_progress.ServerConflictingItemsSize();
+ }
+ return sum;
+}
+
+bool StatusController::ServerSaysNothingMoreToDownload() const {
+ if (!download_updates_succeeded())
+ return false;
+
+ if (!updates_response().get_updates().has_changes_remaining()) {
+ NOTREACHED(); // Server should always send changes remaining.
+ return false; // Avoid looping forever.
+ }
+ // Changes remaining is an estimate, but if it's estimated to be
+ // zero, that's firm and we don't have to ask again.
+ return updates_response().get_updates().changes_remaining() == 0;
+}
+
+void StatusController::set_debug_info_sent() {
+ shared_.control_params.debug_info_sent = true;
+}
+
+bool StatusController::debug_info_sent() const {
+ return shared_.control_params.debug_info_sent;
+}
+
+} // namespace sessions
+} // namespace browser_sync
diff --git a/sync/sessions/status_controller.h b/sync/sessions/status_controller.h
new file mode 100644
index 0000000..6a2491c
--- /dev/null
+++ b/sync/sessions/status_controller.h
@@ -0,0 +1,297 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// StatusController handles all counter and status related number crunching and
+// state tracking on behalf of a SyncSession. It 'controls' the model data
+// defined in session_state.h. The most important feature of StatusController
+// is the ScopedModelSafetyRestriction. When one of these is active, the
+// underlying data set exposed via accessors is swapped out to the appropriate
+// set for the restricted ModelSafeGroup behind the scenes. For example, if
+// GROUP_UI is set, then accessors such as conflict_progress() and commit_ids()
+// are implicitly restricted to returning only data pertaining to GROUP_UI.
+// You can see which parts of status fall into this "restricted" category, or
+// the global "shared" category for all model types, by looking at the struct
+// declarations in session_state.h. If these accessors are invoked without a
+// restriction in place, this is a violation and will cause debug assertions
+// to surface improper use of the API in development. Likewise for
+// invocation of "shared" accessors when a restriction is in place; for
+// safety's sake, an assertion will fire.
+//
+// NOTE: There is no concurrent access protection provided by this class. It
+// assumes one single thread is accessing this class for each unique
+// ModelSafeGroup, and also only one single thread (in practice, the
+// SyncerThread) responsible for all "shared" access when no restriction is in
+// place. Thus, every bit of data is to be accessed mutually exclusively with
+// respect to threads.
+//
+// StatusController can also track if changes occur to certain parts of state
+// so that various parts of the sync engine can avoid broadcasting
+// notifications if no changes occurred.
+
+#ifndef SYNC_SESSIONS_STATUS_CONTROLLER_H_
+#define SYNC_SESSIONS_STATUS_CONTROLLER_H_
+#pragma once
+
+#include <map>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "base/time.h"
+#include "sync/sessions/ordered_commit_set.h"
+#include "sync/sessions/session_state.h"
+
+namespace browser_sync {
+namespace sessions {
+
+class StatusController {
+ public:
+ explicit StatusController(const ModelSafeRoutingInfo& routes);
+ ~StatusController();
+
+ // Returns true if some portion of the session state has changed (is dirty)
+ // since it was created or was last reset.
+ bool TestAndClearIsDirty();
+
+ // Progress counters. All const methods may return NULL if the
+ // progress structure doesn't exist, but all non-const methods
+ // auto-create.
+ const ConflictProgress* conflict_progress() const;
+ ConflictProgress* mutable_conflict_progress();
+ const UpdateProgress* update_progress() const;
+ UpdateProgress* mutable_update_progress();
+ const ConflictProgress* GetUnrestrictedConflictProgress(
+ ModelSafeGroup group) const;
+ ConflictProgress* GetUnrestrictedMutableConflictProgressForTest(
+ ModelSafeGroup group);
+ const UpdateProgress* GetUnrestrictedUpdateProgress(
+ ModelSafeGroup group) const;
+ UpdateProgress* GetUnrestrictedMutableUpdateProgressForTest(
+ ModelSafeGroup group);
+
+ // ClientToServer messages.
+ const ClientToServerMessage& commit_message() {
+ return shared_.commit_message;
+ }
+ ClientToServerMessage* mutable_commit_message() {
+ return &shared_.commit_message;
+ }
+ const ClientToServerResponse& commit_response() const {
+ return shared_.commit_response;
+ }
+ ClientToServerResponse* mutable_commit_response() {
+ return &shared_.commit_response;
+ }
+ const syncable::ModelTypeSet updates_request_types() const {
+ return shared_.updates_request_types;
+ }
+ void set_updates_request_types(syncable::ModelTypeSet value) {
+ shared_.updates_request_types = value;
+ }
+ const ClientToServerResponse& updates_response() const {
+ return shared_.updates_response;
+ }
+ ClientToServerResponse* mutable_updates_response() {
+ return &shared_.updates_response;
+ }
+
+ // Errors and SyncerStatus.
+ const ErrorCounters& error() const {
+ return shared_.error.value();
+ }
+ const SyncerStatus& syncer_status() const {
+ return shared_.syncer_status.value();
+ }
+
+ // Changelog related state.
+ int64 num_server_changes_remaining() const {
+ return shared_.num_server_changes_remaining.value();
+ }
+
+ // Commit path data.
+ const std::vector<syncable::Id>& commit_ids() const {
+ DCHECK(!group_restriction_in_effect_) << "Group restriction in effect!";
+ return shared_.commit_set.GetAllCommitIds();
+ }
+ const OrderedCommitSet::Projection& commit_id_projection() {
+ DCHECK(group_restriction_in_effect_)
+ << "No group restriction for projection.";
+ return shared_.commit_set.GetCommitIdProjection(group_restriction_);
+ }
+ const syncable::Id& GetCommitIdAt(size_t index) {
+ DCHECK(CurrentCommitIdProjectionHasIndex(index));
+ return shared_.commit_set.GetCommitIdAt(index);
+ }
+ syncable::ModelType GetCommitModelTypeAt(size_t index) {
+ DCHECK(CurrentCommitIdProjectionHasIndex(index));
+ return shared_.commit_set.GetModelTypeAt(index);
+ }
+ syncable::ModelType GetUnrestrictedCommitModelTypeAt(size_t index) const {
+ DCHECK(!group_restriction_in_effect_) << "Group restriction in effect!";
+ return shared_.commit_set.GetModelTypeAt(index);
+ }
+ const std::vector<int64>& unsynced_handles() const {
+ DCHECK(!group_restriction_in_effect_)
+ << "unsynced_handles is unrestricted.";
+ return shared_.unsynced_handles.value();
+ }
+
+ // Control parameters for sync cycles.
+ bool conflicts_resolved() const {
+ return shared_.control_params.conflicts_resolved;
+ }
+ bool did_commit_items() const {
+ return shared_.control_params.items_committed;
+ }
+
+ // If a GetUpdates for any data type resulted in downloading an update that
+ // is in conflict, this method returns true.
+ // Note: this includes unresolvable conflicts.
+ bool HasConflictingUpdates() const;
+
+ // Aggregate sums of various types of conflict counters accross all
+ // ConflictProgress objects (one for each ModelSafeGroup currently in-use).
+ int TotalNumEncryptionConflictingItems() const;
+ int TotalNumHierarchyConflictingItems() const;
+ int TotalNumServerConflictingItems() const;
+ int TotalNumSimpleConflictingItems() const;
+
+ // Aggregate sum of SimpleConflictingItemSize() and other
+ // ${Type}ConflictingItemSize() methods over all ConflictProgress objects (one
+ // for each ModelSafeGroup currently in-use).
+ int TotalNumConflictingItems() const;
+
+ // Returns the number of updates received from the sync server.
+ int64 CountUpdates() const;
+
+ // Returns true iff any of the commit ids added during this session are
+ // bookmark related, and the bookmark group restriction is in effect.
+ bool HasBookmarkCommitActivity() const {
+ return ActiveGroupRestrictionIncludesModel(syncable::BOOKMARKS) &&
+ shared_.commit_set.HasBookmarkCommitId();
+ }
+
+ // Returns true if the last download_updates_command received a valid
+ // server response.
+ bool download_updates_succeeded() const {
+ return updates_response().has_get_updates();
+ }
+
+ // Returns true if the last updates response indicated that we were fully
+ // up to date. This is subtle: if it's false, it could either mean that
+ // the server said there WAS more to download, or it could mean that we
+ // were unable to reach the server. If we didn't request every enabled
+ // datatype, then we can't say for sure that there's nothing left to
+ // download: in that case, this also returns false.
+ bool ServerSaysNothingMoreToDownload() const;
+
+ ModelSafeGroup group_restriction() const {
+ return group_restriction_;
+ }
+
+ base::Time sync_start_time() const {
+ // The time at which we sent the first GetUpdates command for this sync.
+ return sync_start_time_;
+ }
+
+ // Check whether a particular model is included by the active group
+ // restriction.
+ bool ActiveGroupRestrictionIncludesModel(syncable::ModelType model) const {
+ if (!group_restriction_in_effect_)
+ return true;
+ ModelSafeRoutingInfo::const_iterator it = routing_info_.find(model);
+ if (it == routing_info_.end())
+ return false;
+ return group_restriction() == it->second;
+ }
+
+ // A toolbelt full of methods for updating counters and flags.
+ void set_num_server_changes_remaining(int64 changes_remaining);
+ void set_invalid_store(bool invalid_store);
+ void set_num_successful_bookmark_commits(int value);
+ void increment_num_successful_commits();
+ void increment_num_successful_bookmark_commits();
+ void increment_num_updates_downloaded_by(int value);
+ void increment_num_tombstone_updates_downloaded_by(int value);
+ void set_types_needing_local_migration(syncable::ModelTypeSet types);
+ void set_unsynced_handles(const std::vector<int64>& unsynced_handles);
+ void increment_num_local_overwrites();
+ void increment_num_server_overwrites();
+ void set_sync_protocol_error(const SyncProtocolError& error);
+ void set_last_download_updates_result(const SyncerError result);
+ void set_last_post_commit_result(const SyncerError result);
+ void set_last_process_commit_response_result(const SyncerError result);
+
+ void set_commit_set(const OrderedCommitSet& commit_set);
+ void update_conflicts_resolved(bool resolved);
+ void reset_conflicts_resolved();
+ void set_items_committed();
+
+ void UpdateStartTime();
+
+ void set_debug_info_sent();
+
+ bool debug_info_sent() const;
+
+ private:
+ friend class ScopedModelSafeGroupRestriction;
+
+ // Returns true iff the commit id projection for |group_restriction_|
+ // references position |index| into the full set of commit ids in play.
+ bool CurrentCommitIdProjectionHasIndex(size_t index);
+
+ // Returns the state, if it exists, or NULL otherwise.
+ const PerModelSafeGroupState* GetModelSafeGroupState(
+ bool restrict, ModelSafeGroup group) const;
+
+ // Helper to lazily create objects for per-ModelSafeGroup state.
+ PerModelSafeGroupState* GetOrCreateModelSafeGroupState(
+ bool restrict, ModelSafeGroup group);
+
+ AllModelTypeState shared_;
+ std::map<ModelSafeGroup, PerModelSafeGroupState*> per_model_group_;
+
+ STLValueDeleter<std::map<ModelSafeGroup, PerModelSafeGroupState*> >
+ per_model_group_deleter_;
+
+ // Set to true if any DirtyOnWrite pieces of state we maintain are changed.
+ // Reset to false by TestAndClearIsDirty.
+ bool is_dirty_;
+
+ // Used to fail read/write operations on state that don't obey the current
+ // active ModelSafeWorker contract.
+ bool group_restriction_in_effect_;
+ ModelSafeGroup group_restriction_;
+
+ const ModelSafeRoutingInfo routing_info_;
+
+ base::Time sync_start_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(StatusController);
+};
+
+// A utility to restrict access to only those parts of the given
+// StatusController that pertain to the specified ModelSafeGroup.
+class ScopedModelSafeGroupRestriction {
+ public:
+ ScopedModelSafeGroupRestriction(StatusController* to_restrict,
+ ModelSafeGroup restriction)
+ : status_(to_restrict) {
+ DCHECK(!status_->group_restriction_in_effect_);
+ status_->group_restriction_ = restriction;
+ status_->group_restriction_in_effect_ = true;
+ }
+ ~ScopedModelSafeGroupRestriction() {
+ DCHECK(status_->group_restriction_in_effect_);
+ status_->group_restriction_in_effect_ = false;
+ }
+ private:
+ StatusController* status_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedModelSafeGroupRestriction);
+};
+
+}
+}
+
+#endif // SYNC_SESSIONS_STATUS_CONTROLLER_H_
diff --git a/sync/sessions/status_controller_unittest.cc b/sync/sessions/status_controller_unittest.cc
new file mode 100644
index 0000000..59d3919
--- /dev/null
+++ b/sync/sessions/status_controller_unittest.cc
@@ -0,0 +1,198 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/sync_session.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace sessions {
+
+class StatusControllerTest : public testing::Test {
+ public:
+ virtual void SetUp() {
+ routes_[syncable::BOOKMARKS] = GROUP_UI;
+ }
+ protected:
+ ModelSafeRoutingInfo routes_;
+};
+
+TEST_F(StatusControllerTest, GetsDirty) {
+ StatusController status(routes_);
+ status.set_num_server_changes_remaining(30);
+ EXPECT_TRUE(status.TestAndClearIsDirty());
+
+ status.set_invalid_store(true);
+ EXPECT_TRUE(status.TestAndClearIsDirty());
+ status.set_invalid_store(false);
+ EXPECT_TRUE(status.TestAndClearIsDirty());
+
+ status.increment_num_successful_commits();
+ EXPECT_TRUE(status.TestAndClearIsDirty());
+ status.increment_num_successful_commits();
+ EXPECT_TRUE(status.TestAndClearIsDirty());
+
+ {
+ ScopedModelSafeGroupRestriction r(&status, GROUP_UI);
+ status.mutable_conflict_progress()->
+ AddSimpleConflictingItemById(syncable::Id());
+ }
+ EXPECT_TRUE(status.TestAndClearIsDirty());
+
+ std::vector<int64> v;
+ v.push_back(1);
+ status.set_unsynced_handles(v);
+ EXPECT_TRUE(status.TestAndClearIsDirty());
+ std::vector<int64> v2;
+ v2.push_back(1);
+ status.set_unsynced_handles(v2);
+ EXPECT_FALSE(status.TestAndClearIsDirty()); // Test for deep comparison.
+}
+
+TEST_F(StatusControllerTest, StaysClean) {
+ StatusController status(routes_);
+ status.update_conflicts_resolved(true);
+ EXPECT_FALSE(status.TestAndClearIsDirty());
+
+ status.set_items_committed();
+ EXPECT_FALSE(status.TestAndClearIsDirty());
+
+ OrderedCommitSet commits(routes_);
+ commits.AddCommitItem(0, syncable::Id(), syncable::BOOKMARKS);
+ status.set_commit_set(commits);
+ EXPECT_FALSE(status.TestAndClearIsDirty());
+}
+
+// This test is useful, as simple as it sounds, due to the copy-paste prone
+// nature of status_controller.cc (we have had bugs in the past where a set_foo
+// method was actually setting |bar_| instead!).
+TEST_F(StatusControllerTest, ReadYourWrites) {
+ StatusController status(routes_);
+ status.set_num_server_changes_remaining(13);
+ EXPECT_EQ(13, status.num_server_changes_remaining());
+
+ EXPECT_FALSE(status.syncer_status().invalid_store);
+ status.set_invalid_store(true);
+ EXPECT_TRUE(status.syncer_status().invalid_store);
+
+ EXPECT_FALSE(status.conflicts_resolved());
+ status.update_conflicts_resolved(true);
+ EXPECT_TRUE(status.conflicts_resolved());
+
+ status.set_last_download_updates_result(SYNCER_OK);
+ EXPECT_EQ(SYNCER_OK, status.error().last_download_updates_result);
+
+ status.set_last_post_commit_result(SYNC_AUTH_ERROR);
+ EXPECT_EQ(SYNC_AUTH_ERROR, status.error().last_post_commit_result);
+
+ status.set_last_process_commit_response_result(SYNC_SERVER_ERROR);
+ EXPECT_EQ(SYNC_SERVER_ERROR,
+ status.error().last_process_commit_response_result);
+
+ for (int i = 0; i < 14; i++)
+ status.increment_num_successful_commits();
+ EXPECT_EQ(14, status.syncer_status().num_successful_commits);
+
+ std::vector<int64> v;
+ v.push_back(16);
+ status.set_unsynced_handles(v);
+ EXPECT_EQ(16, v[0]);
+}
+
+TEST_F(StatusControllerTest, HasConflictingUpdates) {
+ StatusController status(routes_);
+ EXPECT_FALSE(status.HasConflictingUpdates());
+ {
+ ScopedModelSafeGroupRestriction r(&status, GROUP_UI);
+ EXPECT_FALSE(status.update_progress());
+ status.mutable_update_progress()->AddAppliedUpdate(SUCCESS,
+ syncable::Id());
+ status.mutable_update_progress()->AddAppliedUpdate(CONFLICT_SIMPLE,
+ syncable::Id());
+ EXPECT_TRUE(status.update_progress()->HasConflictingUpdates());
+ }
+
+ EXPECT_TRUE(status.HasConflictingUpdates());
+
+ {
+ ScopedModelSafeGroupRestriction r(&status, GROUP_PASSIVE);
+ EXPECT_FALSE(status.update_progress());
+ }
+}
+
+TEST_F(StatusControllerTest, HasConflictingUpdates_NonBlockingUpdates) {
+ StatusController status(routes_);
+ EXPECT_FALSE(status.HasConflictingUpdates());
+ {
+ ScopedModelSafeGroupRestriction r(&status, GROUP_UI);
+ EXPECT_FALSE(status.update_progress());
+ status.mutable_update_progress()->AddAppliedUpdate(SUCCESS,
+ syncable::Id());
+ status.mutable_update_progress()->AddAppliedUpdate(CONFLICT_HIERARCHY,
+ syncable::Id());
+ EXPECT_TRUE(status.update_progress()->HasConflictingUpdates());
+ }
+
+ EXPECT_TRUE(status.HasConflictingUpdates());
+}
+
+TEST_F(StatusControllerTest, CountUpdates) {
+ StatusController status(routes_);
+ EXPECT_EQ(0, status.CountUpdates());
+ ClientToServerResponse* response(status.mutable_updates_response());
+ sync_pb::SyncEntity* entity1 = response->mutable_get_updates()->add_entries();
+ sync_pb::SyncEntity* entity2 = response->mutable_get_updates()->add_entries();
+ ASSERT_TRUE(entity1 != NULL && entity2 != NULL);
+ EXPECT_EQ(2, status.CountUpdates());
+}
+
+// Test TotalNumConflictingItems
+TEST_F(StatusControllerTest, TotalNumConflictingItems) {
+ StatusController status(routes_);
+ TestIdFactory f;
+ {
+ ScopedModelSafeGroupRestriction r(&status, GROUP_UI);
+ EXPECT_FALSE(status.conflict_progress());
+ status.mutable_conflict_progress()->
+ AddSimpleConflictingItemById(f.NewLocalId());
+ status.mutable_conflict_progress()->
+ AddSimpleConflictingItemById(f.NewLocalId());
+ EXPECT_EQ(2, status.conflict_progress()->SimpleConflictingItemsSize());
+ }
+ EXPECT_EQ(2, status.TotalNumConflictingItems());
+ {
+ ScopedModelSafeGroupRestriction r(&status, GROUP_DB);
+ EXPECT_FALSE(status.conflict_progress());
+ status.mutable_conflict_progress()->
+ AddSimpleConflictingItemById(f.NewLocalId());
+ status.mutable_conflict_progress()->
+ AddSimpleConflictingItemById(f.NewLocalId());
+ EXPECT_EQ(2, status.conflict_progress()->SimpleConflictingItemsSize());
+ }
+ EXPECT_EQ(4, status.TotalNumConflictingItems());
+}
+
+// Basic test that non group-restricted state accessors don't cause violations.
+TEST_F(StatusControllerTest, Unrestricted) {
+ StatusController status(routes_);
+ const UpdateProgress* progress =
+ status.GetUnrestrictedUpdateProgress(GROUP_UI);
+ EXPECT_FALSE(progress);
+ status.mutable_commit_message();
+ status.commit_response();
+ status.mutable_commit_response();
+ status.updates_response();
+ status.mutable_updates_response();
+ status.error();
+ status.syncer_status();
+ status.num_server_changes_remaining();
+ status.commit_ids();
+ status.HasBookmarkCommitActivity();
+ status.download_updates_succeeded();
+ status.ServerSaysNothingMoreToDownload();
+ status.group_restriction();
+}
+
+} // namespace sessions
+} // namespace browser_sync
diff --git a/sync/sessions/sync_session.cc b/sync/sessions/sync_session.cc
new file mode 100644
index 0000000..2a93b44
--- /dev/null
+++ b/sync/sessions/sync_session.cc
@@ -0,0 +1,253 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/sync_session.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+
+namespace browser_sync {
+namespace sessions {
+
+namespace {
+
+std::set<ModelSafeGroup> ComputeEnabledGroups(
+ const ModelSafeRoutingInfo& routing_info,
+ const std::vector<ModelSafeWorker*>& workers) {
+ std::set<ModelSafeGroup> enabled_groups;
+ // Project the list of enabled types (i.e., types in the routing
+ // info) to a list of enabled groups.
+ for (ModelSafeRoutingInfo::const_iterator it = routing_info.begin();
+ it != routing_info.end(); ++it) {
+ enabled_groups.insert(it->second);
+ }
+ // GROUP_PASSIVE is always enabled, since that's the group that
+ // top-level folders map to.
+ enabled_groups.insert(GROUP_PASSIVE);
+ if (DCHECK_IS_ON()) {
+ // We sometimes create dummy SyncSession objects (see
+ // SyncScheduler::InitialSnapshot) so don't check in that case.
+ if (!routing_info.empty() || !workers.empty()) {
+ std::set<ModelSafeGroup> groups_with_workers;
+ for (std::vector<ModelSafeWorker*>::const_iterator it = workers.begin();
+ it != workers.end(); ++it) {
+ groups_with_workers.insert((*it)->GetModelSafeGroup());
+ }
+ // All enabled groups should have a corresponding worker.
+ DCHECK(std::includes(
+ groups_with_workers.begin(), groups_with_workers.end(),
+ enabled_groups.begin(), enabled_groups.end()));
+ }
+ }
+ return enabled_groups;
+}
+
+} // namesepace
+
+SyncSession::SyncSession(SyncSessionContext* context, Delegate* delegate,
+ const SyncSourceInfo& source,
+ const ModelSafeRoutingInfo& routing_info,
+ const std::vector<ModelSafeWorker*>& workers)
+ : context_(context),
+ source_(source),
+ write_transaction_(NULL),
+ delegate_(delegate),
+ workers_(workers),
+ routing_info_(routing_info),
+ enabled_groups_(ComputeEnabledGroups(routing_info_, workers_)) {
+ status_controller_.reset(new StatusController(routing_info_));
+ std::sort(workers_.begin(), workers_.end());
+}
+
+SyncSession::~SyncSession() {}
+
+void SyncSession::Coalesce(const SyncSession& session) {
+ if (context_ != session.context() || delegate_ != session.delegate_) {
+ NOTREACHED();
+ return;
+ }
+
+ // When we coalesce sessions, the sync update source gets overwritten with the
+ // most recent, while the type/payload map gets merged.
+ CoalescePayloads(&source_.types, session.source_.types);
+ source_.updates_source = session.source_.updates_source;
+
+ std::vector<ModelSafeWorker*> temp;
+ std::set_union(workers_.begin(), workers_.end(),
+ session.workers_.begin(), session.workers_.end(),
+ std::back_inserter(temp));
+ workers_.swap(temp);
+
+ // We have to update the model safe routing info to the union. In case the
+ // same key is present in both pick the one from session.
+ for (ModelSafeRoutingInfo::const_iterator it =
+ session.routing_info_.begin();
+ it != session.routing_info_.end();
+ ++it) {
+ routing_info_[it->first] = it->second;
+ }
+
+ // Now update enabled groups.
+ enabled_groups_ = ComputeEnabledGroups(routing_info_, workers_);
+}
+
+void SyncSession::RebaseRoutingInfoWithLatest(const SyncSession& session) {
+ ModelSafeRoutingInfo temp_routing_info;
+
+ // Take the intersecion and also set the routing info(it->second) from the
+ // passed in session.
+ for (ModelSafeRoutingInfo::const_iterator it =
+ session.routing_info_.begin(); it != session.routing_info_.end();
+ ++it) {
+ if (routing_info_.find(it->first) != routing_info_.end()) {
+ temp_routing_info[it->first] = it->second;
+ }
+ }
+
+ // Now swap it.
+ routing_info_.swap(temp_routing_info);
+
+ // Now update the payload map.
+ PurgeStalePayload(&source_.types, session.routing_info_);
+
+ // Now update the workers.
+ std::vector<ModelSafeWorker*> temp;
+ std::set_intersection(workers_.begin(), workers_.end(),
+ session.workers_.begin(), session.workers_.end(),
+ std::back_inserter(temp));
+ workers_.swap(temp);
+
+ // Now update enabled groups.
+ enabled_groups_ = ComputeEnabledGroups(routing_info_, workers_);
+}
+
+void SyncSession::PrepareForAnotherSyncCycle() {
+ source_.updates_source =
+ sync_pb::GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION;
+ status_controller_.reset(new StatusController(routing_info_));
+}
+
+SyncSessionSnapshot SyncSession::TakeSnapshot() const {
+ syncable::Directory* dir = context_->directory();
+
+ bool is_share_useable = true;
+ syncable::ModelTypeSet initial_sync_ended;
+ std::string download_progress_markers[syncable::MODEL_TYPE_COUNT];
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i < syncable::MODEL_TYPE_COUNT; ++i) {
+ syncable::ModelType type(syncable::ModelTypeFromInt(i));
+ if (routing_info_.count(type) != 0) {
+ if (dir->initial_sync_ended_for_type(type))
+ initial_sync_ended.Put(type);
+ else
+ is_share_useable = false;
+ }
+ dir->GetDownloadProgressAsString(type, &download_progress_markers[i]);
+ }
+
+ return SyncSessionSnapshot(
+ status_controller_->syncer_status(),
+ status_controller_->error(),
+ status_controller_->num_server_changes_remaining(),
+ is_share_useable,
+ initial_sync_ended,
+ download_progress_markers,
+ HasMoreToSync(),
+ delegate_->IsSyncingCurrentlySilenced(),
+ status_controller_->unsynced_handles().size(),
+ status_controller_->TotalNumEncryptionConflictingItems(),
+ status_controller_->TotalNumHierarchyConflictingItems(),
+ status_controller_->TotalNumSimpleConflictingItems(),
+ status_controller_->TotalNumServerConflictingItems(),
+ status_controller_->did_commit_items(),
+ source_,
+ dir->GetEntriesCount(),
+ status_controller_->sync_start_time(),
+ !Succeeded());
+}
+
+void SyncSession::SendEventNotification(SyncEngineEvent::EventCause cause) {
+ SyncEngineEvent event(cause);
+ const SyncSessionSnapshot& snapshot = TakeSnapshot();
+ event.snapshot = &snapshot;
+
+ DVLOG(1) << "Sending event with snapshot: " << snapshot.ToString();
+ context()->NotifyListeners(event);
+}
+
+bool SyncSession::HasMoreToSync() const {
+ const StatusController* status = status_controller_.get();
+ return ((status->commit_ids().size() < status->unsynced_handles().size()) &&
+ status->syncer_status().num_successful_commits > 0) ||
+ status->conflicts_resolved();
+ // Or, we have conflicting updates, but we're making progress on
+ // resolving them...
+}
+
+const std::set<ModelSafeGroup>& SyncSession::GetEnabledGroups() const {
+ return enabled_groups_;
+}
+
+std::set<ModelSafeGroup> SyncSession::GetEnabledGroupsWithConflicts() const {
+ const std::set<ModelSafeGroup>& enabled_groups = GetEnabledGroups();
+ std::set<ModelSafeGroup> enabled_groups_with_conflicts;
+ for (std::set<ModelSafeGroup>::const_iterator it =
+ enabled_groups.begin(); it != enabled_groups.end(); ++it) {
+ const sessions::ConflictProgress* conflict_progress =
+ status_controller_->GetUnrestrictedConflictProgress(*it);
+ if (conflict_progress &&
+ (conflict_progress->SimpleConflictingItemsBegin() !=
+ conflict_progress->SimpleConflictingItemsEnd())) {
+ enabled_groups_with_conflicts.insert(*it);
+ }
+ }
+ return enabled_groups_with_conflicts;
+}
+
+std::set<ModelSafeGroup>
+ SyncSession::GetEnabledGroupsWithVerifiedUpdates() const {
+ const std::set<ModelSafeGroup>& enabled_groups = GetEnabledGroups();
+ std::set<ModelSafeGroup> enabled_groups_with_verified_updates;
+ for (std::set<ModelSafeGroup>::const_iterator it =
+ enabled_groups.begin(); it != enabled_groups.end(); ++it) {
+ const UpdateProgress* update_progress =
+ status_controller_->GetUnrestrictedUpdateProgress(*it);
+ if (update_progress &&
+ (update_progress->VerifiedUpdatesBegin() !=
+ update_progress->VerifiedUpdatesEnd())) {
+ enabled_groups_with_verified_updates.insert(*it);
+ }
+ }
+
+ return enabled_groups_with_verified_updates;
+}
+
+namespace {
+// Return true if the command in question was attempted and did not complete
+// successfully.
+//
+bool IsError(SyncerError error) {
+ return error != UNSET
+ && error != SYNCER_OK
+ && error != SERVER_RETURN_MIGRATION_DONE;
+}
+} // namespace
+
+bool SyncSession::Succeeded() const {
+ const bool download_updates_error =
+ IsError(status_controller_->error().last_download_updates_result);
+ const bool post_commit_error =
+ IsError(status_controller_->error().last_post_commit_result);
+ const bool process_commit_response_error =
+ IsError(status_controller_->error().last_process_commit_response_result);
+ return !download_updates_error
+ && !post_commit_error
+ && !process_commit_response_error;
+}
+
+} // namespace sessions
+} // namespace browser_sync
diff --git a/sync/sessions/sync_session.h b/sync/sessions/sync_session.h
new file mode 100644
index 0000000..1dfe78c
--- /dev/null
+++ b/sync/sessions/sync_session.h
@@ -0,0 +1,240 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A class representing an attempt to synchronize the local syncable data
+// store with a sync server. A SyncSession instance is passed as a stateful
+// bundle to and from various SyncerCommands with the goal of converging the
+// client view of data with that of the server. The commands twiddle with
+// session status in response to events and hiccups along the way, set and
+// query session progress with regards to conflict resolution and applying
+// server updates, and access the SyncSessionContext for the current session
+// via SyncSession instances.
+
+#ifndef SYNC_SESSIONS_SYNC_SESSION_H_
+#define SYNC_SESSIONS_SYNC_SESSION_H_
+#pragma once
+
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/sessions/ordered_commit_set.h"
+#include "sync/sessions/session_state.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/sessions/sync_session_context.h"
+#include "sync/syncable/model_type.h"
+#include "sync/util/extensions_activity_monitor.h"
+
+namespace syncable {
+class WriteTransaction;
+}
+
+namespace browser_sync {
+class ModelSafeWorker;
+
+namespace sessions {
+
+class SyncSession {
+ public:
+ // The Delegate services events that occur during the session requiring an
+ // explicit (and session-global) action, as opposed to events that are simply
+ // recorded in per-session state.
+ class Delegate {
+ public:
+ // The client was throttled and should cease-and-desist syncing activity
+ // until the specified time.
+ virtual void OnSilencedUntil(const base::TimeTicks& silenced_until) = 0;
+
+ // Silenced intervals can be out of phase with individual sessions, so the
+ // delegate is the only thing that can give an authoritative answer for
+ // "is syncing silenced right now". This shouldn't be necessary very often
+ // as the delegate ensures no session is started if syncing is silenced.
+ // ** Note ** This will return true if silencing commenced during this
+ // session and the interval has not yet elapsed, but the contract here is
+ // solely based on absolute time values. So, this cannot be used to infer
+ // that any given session _instance_ is silenced. An example of reasonable
+ // use is for UI reporting.
+ virtual bool IsSyncingCurrentlySilenced() = 0;
+
+ // The client has been instructed to change its short poll interval.
+ virtual void OnReceivedShortPollIntervalUpdate(
+ const base::TimeDelta& new_interval) = 0;
+
+ // The client has been instructed to change its long poll interval.
+ virtual void OnReceivedLongPollIntervalUpdate(
+ const base::TimeDelta& new_interval) = 0;
+
+ // The client has been instructed to change its sessions commit
+ // delay.
+ virtual void OnReceivedSessionsCommitDelay(
+ const base::TimeDelta& new_delay) = 0;
+
+ // The client needs to cease and desist syncing at once. This occurs when
+ // the Syncer detects that the backend store has fundamentally changed or
+ // is a different instance altogether (e.g. swapping from a test instance
+ // to production, or a global stop syncing operation has wiped the store).
+ // TODO(lipalani) : Replace this function with the one below. This function
+ // stops the current sync cycle and purges the client. In the new model
+ // the former would be done by the |SyncProtocolError| and
+ // the latter(which is an action) would be done in ProfileSyncService
+ // along with the rest of the actions.
+ virtual void OnShouldStopSyncingPermanently() = 0;
+
+ // Called for the syncer to respond to the error sent by the server.
+ virtual void OnSyncProtocolError(
+ const sessions::SyncSessionSnapshot& snapshot) = 0;
+
+ protected:
+ virtual ~Delegate() {}
+ };
+
+ SyncSession(SyncSessionContext* context,
+ Delegate* delegate,
+ const SyncSourceInfo& source,
+ const ModelSafeRoutingInfo& routing_info,
+ const std::vector<ModelSafeWorker*>& workers);
+ ~SyncSession();
+
+ // Builds a thread-safe and read-only copy of the current session state.
+ SyncSessionSnapshot TakeSnapshot() const;
+
+ // Builds and sends a snapshot to the session context's listeners.
+ void SendEventNotification(SyncEngineEvent::EventCause cause);
+
+ // Returns true if this session contains data that should go through the sync
+ // engine again.
+ bool HasMoreToSync() const;
+
+ // Returns true if there we did not detect any errors in this session.
+ //
+ // There are many errors that could prevent a sync cycle from succeeding.
+ // These include invalid local state, inability to contact the server,
+ // inability to authenticate with the server, and server errors. What they
+ // have in common is that the we either need to take some action and then
+ // retry the sync cycle or, in the case of transient errors, retry after some
+ // backoff timer has expired. Most importantly, the SyncScheduler should not
+ // assume that the original action that triggered the sync cycle (ie. a nudge
+ // or a notification) has been properly serviced.
+ //
+ // This function also returns false if SyncShare has not been called on this
+ // session yet, or if ResetTransientState() has been called on this session
+ // since the last call to SyncShare.
+ bool Succeeded() const;
+
+ // Collects all state pertaining to how and why |s| originated and unions it
+ // with corresponding state in |this|, leaving |s| unchanged. Allows |this|
+ // to take on the responsibilities |s| had (e.g. certain data types) in the
+ // next SyncShare operation using |this|, rather than needed two separate
+ // sessions.
+ void Coalesce(const SyncSession& session);
+
+ // Compares the routing_info_, workers and payload map with the passed in
+ // session. Purges types from the above 3 which are not in session. Useful
+ // to update the sync session when the user has disabled some types from
+ // syncing.
+ void RebaseRoutingInfoWithLatest(const SyncSession& session);
+
+ // Should be called any time |this| is being re-used in a new call to
+ // SyncShare (e.g., HasMoreToSync returned true).
+ void PrepareForAnotherSyncCycle();
+
+ // TODO(akalin): Split this into context() and mutable_context().
+ SyncSessionContext* context() const { return context_; }
+ Delegate* delegate() const { return delegate_; }
+ syncable::WriteTransaction* write_transaction() { return write_transaction_; }
+ const StatusController& status_controller() const {
+ return *status_controller_.get();
+ }
+ StatusController* mutable_status_controller() {
+ return status_controller_.get();
+ }
+
+ const ExtensionsActivityMonitor::Records& extensions_activity() const {
+ return extensions_activity_;
+ }
+ ExtensionsActivityMonitor::Records* mutable_extensions_activity() {
+ return &extensions_activity_;
+ }
+
+ const std::vector<ModelSafeWorker*>& workers() const { return workers_; }
+ const ModelSafeRoutingInfo& routing_info() const { return routing_info_; }
+ const SyncSourceInfo& source() const { return source_; }
+
+ // Returns the set of groups which have enabled types.
+ const std::set<ModelSafeGroup>& GetEnabledGroups() const;
+
+ // Returns the set of enabled groups that have conflicts.
+ std::set<ModelSafeGroup> GetEnabledGroupsWithConflicts() const;
+
+ // Returns the set of enabled groups that have verified updates.
+ std::set<ModelSafeGroup> GetEnabledGroupsWithVerifiedUpdates() const;
+
+ private:
+ // Extend the encapsulation boundary to utilities for internal member
+ // assignments. This way, the scope of these actions is explicit, they can't
+ // be overridden, and assigning is always accompanied by unassigning.
+ friend class ScopedSetSessionWriteTransaction;
+
+ // The context for this session, guaranteed to outlive |this|.
+ SyncSessionContext* const context_;
+
+ // The source for initiating this sync session.
+ SyncSourceInfo source_;
+
+ // Information about extensions activity since the last successful commit.
+ ExtensionsActivityMonitor::Records extensions_activity_;
+
+ // Used to allow various steps to share a transaction. Can be NULL.
+ syncable::WriteTransaction* write_transaction_;
+
+ // The delegate for this session, must never be NULL.
+ Delegate* const delegate_;
+
+ // Our controller for various status and error counters.
+ scoped_ptr<StatusController> status_controller_;
+
+ // The set of active ModelSafeWorkers for the duration of this session.
+ // This can change if this session is Coalesce()'d with another.
+ std::vector<ModelSafeWorker*> workers_;
+
+ // The routing info for the duration of this session, dictating which
+ // datatypes should be synced and which workers should be used when working
+ // on those datatypes.
+ ModelSafeRoutingInfo routing_info_;
+
+ // The set of groups with enabled types. Computed from
+ // |routing_info_|.
+ std::set<ModelSafeGroup> enabled_groups_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncSession);
+};
+
+// Installs a WriteTransaction to a given session and later clears it when the
+// utility falls out of scope. Transactions are not nestable, so it is an error
+// to try and use one of these if the session already has a transaction.
+class ScopedSetSessionWriteTransaction {
+ public:
+ ScopedSetSessionWriteTransaction(SyncSession* session,
+ syncable::WriteTransaction* trans)
+ : session_(session) {
+ DCHECK(!session_->write_transaction_);
+ session_->write_transaction_ = trans;
+ }
+ ~ScopedSetSessionWriteTransaction() { session_->write_transaction_ = NULL; }
+
+ private:
+ SyncSession* session_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedSetSessionWriteTransaction);
+};
+
+} // namespace sessions
+} // namespace browser_sync
+
+#endif // SYNC_SESSIONS_SYNC_SESSION_H_
diff --git a/sync/sessions/sync_session_context.cc b/sync/sessions/sync_session_context.cc
new file mode 100644
index 0000000..2001e7f
--- /dev/null
+++ b/sync/sessions/sync_session_context.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/sync_session_context.h"
+
+#include "sync/sessions/debug_info_getter.h"
+#include "sync/sessions/session_state.h"
+#include "sync/util/extensions_activity_monitor.h"
+
+namespace browser_sync {
+namespace sessions {
+
+SyncSessionContext::SyncSessionContext(
+ ServerConnectionManager* connection_manager,
+ syncable::Directory* directory,
+ ModelSafeWorkerRegistrar* model_safe_worker_registrar,
+ ExtensionsActivityMonitor* extensions_activity_monitor,
+ const std::vector<SyncEngineEventListener*>& listeners,
+ DebugInfoGetter* debug_info_getter)
+ : resolver_(NULL),
+ connection_manager_(connection_manager),
+ directory_(directory),
+ registrar_(model_safe_worker_registrar),
+ extensions_activity_monitor_(extensions_activity_monitor),
+ notifications_enabled_(false),
+ max_commit_batch_size_(kDefaultMaxCommitBatchSize),
+ debug_info_getter_(debug_info_getter) {
+ std::vector<SyncEngineEventListener*>::const_iterator it;
+ for (it = listeners.begin(); it != listeners.end(); ++it)
+ listeners_.AddObserver(*it);
+}
+
+SyncSessionContext::SyncSessionContext()
+ : connection_manager_(NULL),
+ directory_(NULL),
+ registrar_(NULL),
+ extensions_activity_monitor_(NULL),
+ debug_info_getter_(NULL) {
+}
+
+SyncSessionContext::~SyncSessionContext() {
+}
+
+void SyncSessionContext::SetUnthrottleTime(syncable::ModelTypeSet types,
+ const base::TimeTicks& time) {
+ for (syncable::ModelTypeSet::Iterator it = types.First();
+ it.Good(); it.Inc()) {
+ unthrottle_times_[it.Get()] = time;
+ }
+}
+
+void SyncSessionContext::PruneUnthrottledTypes(const base::TimeTicks& time) {
+ UnthrottleTimes::iterator it = unthrottle_times_.begin();
+ while (it != unthrottle_times_.end()) {
+ if (it->second <= time) {
+ // Delete and increment the iterator.
+ UnthrottleTimes::iterator iterator_to_delete = it;
+ ++it;
+ unthrottle_times_.erase(iterator_to_delete);
+ } else {
+ // Just increment the iterator.
+ ++it;
+ }
+ }
+}
+
+// TODO(lipalani): Call this function and fill the return values in snapshot
+// so it could be shown in the about:sync page.
+syncable::ModelTypeSet SyncSessionContext::GetThrottledTypes() const {
+ syncable::ModelTypeSet types;
+ for (UnthrottleTimes::const_iterator it = unthrottle_times_.begin();
+ it != unthrottle_times_.end();
+ ++it) {
+ types.Put(it->first);
+ }
+ return types;
+}
+
+} // namespace sessions
+} // namespace browser_sync
diff --git a/sync/sessions/sync_session_context.h b/sync/sessions/sync_session_context.h
new file mode 100644
index 0000000..94129ce
--- /dev/null
+++ b/sync/sessions/sync_session_context.h
@@ -0,0 +1,208 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// SyncSessionContext encapsulates the contextual information and engine
+// components specific to a SyncSession. A context is accessible via
+// a SyncSession so that session SyncerCommands and parts of the engine have
+// a convenient way to access other parts. In this way it can be thought of as
+// the surrounding environment for the SyncSession. The components of this
+// environment are either valid or not valid for the entire context lifetime,
+// or they are valid for explicitly scoped periods of time by using Scoped
+// installation utilities found below. This means that the context assumes no
+// ownership whatsoever of any object that was not created by the context
+// itself.
+//
+// It can only be used from the SyncerThread.
+
+#ifndef SYNC_SESSIONS_SYNC_SESSION_CONTEXT_H_
+#define SYNC_SESSIONS_SYNC_SESSION_CONTEXT_H_
+#pragma once
+
+#include <map>
+#include <string>
+
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/sessions/debug_info_getter.h"
+
+namespace syncable {
+class Directory;
+}
+
+namespace browser_sync {
+
+class ConflictResolver;
+class ExtensionsActivityMonitor;
+class ModelSafeWorkerRegistrar;
+class ServerConnectionManager;
+
+// Default number of items a client can commit in a single message.
+static const int kDefaultMaxCommitBatchSize = 25;
+
+namespace sessions {
+class ScopedSessionContextConflictResolver;
+struct SyncSessionSnapshot;
+class TestScopedSessionEventListener;
+
+class SyncSessionContext {
+ public:
+ SyncSessionContext(ServerConnectionManager* connection_manager,
+ syncable::Directory* directory,
+ ModelSafeWorkerRegistrar* model_safe_worker_registrar,
+ ExtensionsActivityMonitor* extensions_activity_monitor,
+ const std::vector<SyncEngineEventListener*>& listeners,
+ DebugInfoGetter* debug_info_getter);
+
+ // Empty constructor for unit tests.
+ SyncSessionContext();
+ virtual ~SyncSessionContext();
+
+ ConflictResolver* resolver() { return resolver_; }
+ ServerConnectionManager* connection_manager() {
+ return connection_manager_;
+ }
+ syncable::Directory* directory() {
+ return directory_;
+ }
+
+ ModelSafeWorkerRegistrar* registrar() {
+ return registrar_;
+ }
+ ExtensionsActivityMonitor* extensions_monitor() {
+ return extensions_activity_monitor_;
+ }
+
+ DebugInfoGetter* debug_info_getter() {
+ return debug_info_getter_;
+ }
+
+ // Talk notification status.
+ void set_notifications_enabled(bool enabled) {
+ notifications_enabled_ = enabled;
+ }
+ bool notifications_enabled() { return notifications_enabled_; }
+
+ // Account name, set once a directory has been opened.
+ void set_account_name(const std::string name) {
+ DCHECK(account_name_.empty());
+ account_name_ = name;
+ }
+ const std::string& account_name() const { return account_name_; }
+
+ void set_max_commit_batch_size(int batch_size) {
+ max_commit_batch_size_ = batch_size;
+ }
+ int32 max_commit_batch_size() const { return max_commit_batch_size_; }
+
+ const ModelSafeRoutingInfo& previous_session_routing_info() const {
+ return previous_session_routing_info_;
+ }
+
+ void set_previous_session_routing_info(const ModelSafeRoutingInfo& info) {
+ previous_session_routing_info_ = info;
+ }
+
+ void NotifyListeners(const SyncEngineEvent& event) {
+ FOR_EACH_OBSERVER(SyncEngineEventListener, listeners_,
+ OnSyncEngineEvent(event));
+ }
+
+ // This is virtual for unit tests.
+ virtual void SetUnthrottleTime(syncable::ModelTypeSet types,
+ const base::TimeTicks& time);
+
+ // This prunes the |unthrottle_time_| map based on the |time| passed in. This
+ // is called by syncer at the SYNCER_BEGIN stage.
+ void PruneUnthrottledTypes(const base::TimeTicks& time);
+
+ // This returns the list of currently throttled types. Unless server returns
+ // new throttled types this will remain constant through out the sync cycle.
+ syncable::ModelTypeSet GetThrottledTypes() const;
+
+ private:
+ typedef std::map<syncable::ModelType, base::TimeTicks> UnthrottleTimes;
+
+ FRIEND_TEST_ALL_PREFIXES(SyncSessionContextTest, AddUnthrottleTimeTest);
+ FRIEND_TEST_ALL_PREFIXES(SyncSessionContextTest,
+ GetCurrentlyThrottledTypesTest);
+
+ // Rather than force clients to set and null-out various context members, we
+ // extend our encapsulation boundary to scoped helpers that take care of this
+ // once they are allocated. See definitions of these below.
+ friend class ScopedSessionContextConflictResolver;
+ friend class TestScopedSessionEventListener;
+
+ // This is installed by Syncer objects when needed and may be NULL.
+ ConflictResolver* resolver_;
+
+ ObserverList<SyncEngineEventListener> listeners_;
+
+ ServerConnectionManager* const connection_manager_;
+ syncable::Directory* const directory_;
+
+ // A registrar of workers capable of processing work closures on a thread
+ // that is guaranteed to be safe for model modifications.
+ ModelSafeWorkerRegistrar* registrar_;
+
+ // We use this to stuff extensions activity into CommitMessages so the server
+ // can correlate commit traffic with extension-related bookmark mutations.
+ ExtensionsActivityMonitor* extensions_activity_monitor_;
+
+ // Kept up to date with talk events to determine whether notifications are
+ // enabled. True only if the notification channel is authorized and open.
+ bool notifications_enabled_;
+
+ // The name of the account being synced.
+ std::string account_name_;
+
+ // The server limits the number of items a client can commit in one batch.
+ int max_commit_batch_size_;
+
+ // Some routing info history to help us clean up types that get disabled
+ // by the user.
+ ModelSafeRoutingInfo previous_session_routing_info_;
+
+ // Cache of last session snapshot information.
+ scoped_ptr<sessions::SyncSessionSnapshot> previous_session_snapshot_;
+
+ // We use this to get debug info to send to the server for debugging
+ // client behavior on server side.
+ DebugInfoGetter* const debug_info_getter_;
+
+ // This is a map from throttled data types to the time at which they can be
+ // unthrottled.
+ UnthrottleTimes unthrottle_times_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncSessionContext);
+};
+
+// Installs a ConflictResolver to a given session context for the lifetime of
+// the ScopedSessionContextConflictResolver. There should never be more than
+// one ConflictResolver in the system, so it is an error to use this if the
+// context already has a resolver.
+class ScopedSessionContextConflictResolver {
+ public:
+ // Note: |context| and |resolver| should outlive |this|.
+ ScopedSessionContextConflictResolver(SyncSessionContext* context,
+ ConflictResolver* resolver)
+ : context_(context), resolver_(resolver) {
+ DCHECK(NULL == context->resolver_);
+ context->resolver_ = resolver;
+ }
+ ~ScopedSessionContextConflictResolver() {
+ context_->resolver_ = NULL;
+ }
+ private:
+ SyncSessionContext* context_;
+ ConflictResolver* resolver_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedSessionContextConflictResolver);
+};
+
+} // namespace sessions
+} // namespace browser_sync
+
+#endif // SYNC_SESSIONS_SYNC_SESSION_CONTEXT_H_
diff --git a/sync/sessions/sync_session_context_unittest.cc b/sync/sessions/sync_session_context_unittest.cc
new file mode 100644
index 0000000..0e1fbf3
--- /dev/null
+++ b/sync/sessions/sync_session_context_unittest.cc
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/sync_session_context.h"
+
+#include "sync/syncable/model_type.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace sessions {
+TEST(SyncSessionContextTest, AddUnthrottleTimeTest) {
+ const syncable::ModelTypeSet types(
+ syncable::BOOKMARKS, syncable::PASSWORDS);
+
+ SyncSessionContext context;
+ base::TimeTicks now = base::TimeTicks::Now();
+ context.SetUnthrottleTime(types, now);
+
+ EXPECT_EQ(context.unthrottle_times_.size(), 2U);
+ EXPECT_EQ(context.unthrottle_times_[syncable::BOOKMARKS], now);
+ EXPECT_EQ(context.unthrottle_times_[syncable::PASSWORDS], now);
+}
+
+TEST(SyncSessionContextTest, GetCurrentlyThrottledTypesTest) {
+ const syncable::ModelTypeSet types(
+ syncable::BOOKMARKS, syncable::PASSWORDS);
+
+ SyncSessionContext context;
+ base::TimeTicks now = base::TimeTicks::Now();
+
+ // Now update the throttled types with time set to 10 seconds earlier from
+ // now.
+ context.SetUnthrottleTime(types, now - base::TimeDelta::FromSeconds(10));
+ context.PruneUnthrottledTypes(base::TimeTicks::Now());
+ EXPECT_TRUE(context.GetThrottledTypes().Empty());
+
+ // Now update the throttled types with time set to 2 hours from now.
+ context.SetUnthrottleTime(types, now + base::TimeDelta::FromSeconds(1200));
+ context.PruneUnthrottledTypes(base::TimeTicks::Now());
+ EXPECT_TRUE(context.GetThrottledTypes().Equals(types));
+}
+} // namespace sessions.
+} // namespace browser_sync
+
diff --git a/sync/sessions/sync_session_unittest.cc b/sync/sessions/sync_session_unittest.cc
new file mode 100644
index 0000000..1f87018
--- /dev/null
+++ b/sync/sessions/sync_session_unittest.cc
@@ -0,0 +1,596 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/sync_session.h"
+
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop.h"
+#include "sync/engine/conflict_resolver.h"
+#include "sync/engine/syncer_types.h"
+#include "sync/sessions/session_state.h"
+#include "sync/sessions/status_controller.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_id.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/fake_extensions_activity_monitor.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+namespace sessions {
+namespace {
+
+class SyncSessionTest : public testing::Test,
+ public SyncSession::Delegate,
+ public ModelSafeWorkerRegistrar {
+ public:
+ SyncSessionTest() : controller_invocations_allowed_(false) {}
+
+ SyncSession* MakeSession() {
+ std::vector<ModelSafeWorker*> workers;
+ GetWorkers(&workers);
+ return new SyncSession(context_.get(), this, SyncSourceInfo(),
+ routes_, workers);
+ }
+
+ virtual void SetUp() {
+ context_.reset(
+ new SyncSessionContext(
+ NULL, NULL, this, &extensions_activity_monitor_,
+ std::vector<SyncEngineEventListener*>(), NULL));
+ routes_.clear();
+ routes_[syncable::BOOKMARKS] = GROUP_UI;
+ routes_[syncable::AUTOFILL] = GROUP_DB;
+ scoped_refptr<ModelSafeWorker> passive_worker(
+ new FakeModelWorker(GROUP_PASSIVE));
+ scoped_refptr<ModelSafeWorker> ui_worker(
+ new FakeModelWorker(GROUP_UI));
+ scoped_refptr<ModelSafeWorker> db_worker(
+ new FakeModelWorker(GROUP_DB));
+ workers_.clear();
+ workers_.push_back(passive_worker);
+ workers_.push_back(ui_worker);
+ workers_.push_back(db_worker);
+ session_.reset(MakeSession());
+ }
+ virtual void TearDown() {
+ session_.reset();
+ context_.reset();
+ }
+
+ virtual void OnSilencedUntil(const base::TimeTicks& silenced_until) OVERRIDE {
+ FailControllerInvocationIfDisabled("OnSilencedUntil");
+ }
+ virtual bool IsSyncingCurrentlySilenced() OVERRIDE {
+ FailControllerInvocationIfDisabled("IsSyncingCurrentlySilenced");
+ return false;
+ }
+ virtual void OnReceivedLongPollIntervalUpdate(
+ const base::TimeDelta& new_interval) OVERRIDE {
+ FailControllerInvocationIfDisabled("OnReceivedLongPollIntervalUpdate");
+ }
+ virtual void OnReceivedShortPollIntervalUpdate(
+ const base::TimeDelta& new_interval) OVERRIDE {
+ FailControllerInvocationIfDisabled("OnReceivedShortPollIntervalUpdate");
+ }
+ virtual void OnReceivedSessionsCommitDelay(
+ const base::TimeDelta& new_delay) OVERRIDE {
+ FailControllerInvocationIfDisabled("OnReceivedSessionsCommitDelay");
+ }
+ virtual void OnShouldStopSyncingPermanently() OVERRIDE {
+ FailControllerInvocationIfDisabled("OnShouldStopSyncingPermanently");
+ }
+ virtual void OnSyncProtocolError(
+ const sessions::SyncSessionSnapshot& snapshot) {
+ FailControllerInvocationIfDisabled("SyncProtocolError");
+ }
+
+ // ModelSafeWorkerRegistrar implementation.
+ virtual void GetWorkers(std::vector<ModelSafeWorker*>* out) OVERRIDE {
+ out->clear();
+ for (std::vector<scoped_refptr<ModelSafeWorker> >::const_iterator it =
+ workers_.begin(); it != workers_.end(); ++it) {
+ out->push_back(it->get());
+ }
+ }
+ virtual void GetModelSafeRoutingInfo(ModelSafeRoutingInfo* out) OVERRIDE {
+ *out = routes_;
+ }
+
+ StatusController* status() { return session_->mutable_status_controller(); }
+ protected:
+ void FailControllerInvocationIfDisabled(const std::string& msg) {
+ if (!controller_invocations_allowed_)
+ FAIL() << msg;
+ }
+
+ syncable::ModelTypeSet ParamsMeaningAllEnabledTypes() {
+ syncable::ModelTypeSet request_params(
+ syncable::BOOKMARKS, syncable::AUTOFILL);
+ return request_params;
+ }
+
+ syncable::ModelTypeSet ParamsMeaningJustOneEnabledType() {
+ return syncable::ModelTypeSet(syncable::AUTOFILL);
+ }
+
+ MessageLoop message_loop_;
+ bool controller_invocations_allowed_;
+ scoped_ptr<SyncSession> session_;
+ scoped_ptr<SyncSessionContext> context_;
+ std::vector<scoped_refptr<ModelSafeWorker> > workers_;
+ ModelSafeRoutingInfo routes_;
+ FakeExtensionsActivityMonitor extensions_activity_monitor_;
+};
+
+TEST_F(SyncSessionTest, EnabledGroupsEmpty) {
+ routes_.clear();
+ workers_.clear();
+ scoped_ptr<SyncSession> session(MakeSession());
+ std::set<ModelSafeGroup> expected_enabled_groups;
+ expected_enabled_groups.insert(GROUP_PASSIVE);
+ EXPECT_EQ(expected_enabled_groups, session->GetEnabledGroups());
+}
+
+TEST_F(SyncSessionTest, EnabledGroups) {
+ scoped_ptr<SyncSession> session(MakeSession());
+ std::set<ModelSafeGroup> expected_enabled_groups;
+ expected_enabled_groups.insert(GROUP_PASSIVE);
+ expected_enabled_groups.insert(GROUP_UI);
+ expected_enabled_groups.insert(GROUP_DB);
+ EXPECT_EQ(expected_enabled_groups, session->GetEnabledGroups());
+}
+
+TEST_F(SyncSessionTest, EnabledGroupsWithConflictsEmpty) {
+ scoped_ptr<SyncSession> session(MakeSession());
+ // Auto-create conflict progress. This shouldn't put that group in
+ // conflict.
+ session->mutable_status_controller()->
+ GetUnrestrictedMutableConflictProgressForTest(GROUP_PASSIVE);
+ EXPECT_TRUE(session->GetEnabledGroupsWithConflicts().empty());
+}
+
+TEST_F(SyncSessionTest, EnabledGroupsWithConflicts) {
+ scoped_ptr<SyncSession> session(MakeSession());
+ // Put GROUP_UI in conflict.
+ session->mutable_status_controller()->
+ GetUnrestrictedMutableConflictProgressForTest(GROUP_UI)->
+ AddSimpleConflictingItemById(syncable::Id());
+ std::set<ModelSafeGroup> expected_enabled_groups_with_conflicts;
+ expected_enabled_groups_with_conflicts.insert(GROUP_UI);
+ EXPECT_EQ(expected_enabled_groups_with_conflicts,
+ session->GetEnabledGroupsWithConflicts());
+}
+
+TEST_F(SyncSessionTest, ScopedContextHelpers) {
+ ConflictResolver resolver;
+ EXPECT_FALSE(context_->resolver());
+ {
+ ScopedSessionContextConflictResolver s_resolver(context_.get(), &resolver);
+ EXPECT_EQ(&resolver, context_->resolver());
+ }
+ EXPECT_FALSE(context_->resolver());
+}
+
+TEST_F(SyncSessionTest, SetWriteTransaction) {
+ TestDirectorySetterUpper dir_maker;
+ dir_maker.SetUp();
+ syncable::Directory* directory = dir_maker.directory();
+
+ scoped_ptr<SyncSession> session(MakeSession());
+ EXPECT_TRUE(NULL == session->write_transaction());
+ {
+ WriteTransaction trans(FROM_HERE, syncable::UNITTEST, directory);
+ sessions::ScopedSetSessionWriteTransaction set_trans(session.get(), &trans);
+ EXPECT_TRUE(&trans == session->write_transaction());
+ }
+}
+
+TEST_F(SyncSessionTest, MoreToSyncIfUnsyncedGreaterThanCommitted) {
+ // If any forward progress was made during the session, and the number of
+ // unsynced handles still exceeds the number of commit ids we added, there is
+ // more to sync. For example, this occurs if we had more commit ids
+ // than could fit in a single commit batch.
+ EXPECT_FALSE(session_->HasMoreToSync());
+ OrderedCommitSet commit_set(routes_);
+ commit_set.AddCommitItem(0, syncable::Id(), syncable::BOOKMARKS);
+ status()->set_commit_set(commit_set);
+ EXPECT_FALSE(session_->HasMoreToSync());
+
+ std::vector<int64> unsynced_handles;
+ unsynced_handles.push_back(1);
+ unsynced_handles.push_back(2);
+ status()->set_unsynced_handles(unsynced_handles);
+ EXPECT_FALSE(session_->HasMoreToSync());
+ status()->increment_num_successful_commits();
+ EXPECT_TRUE(session_->HasMoreToSync());
+}
+
+TEST_F(SyncSessionTest, MoreToDownloadIfDownloadFailed) {
+ status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
+
+ // When DownloadUpdatesCommand fails, these should be false.
+ EXPECT_FALSE(status()->ServerSaysNothingMoreToDownload());
+ EXPECT_FALSE(status()->download_updates_succeeded());
+
+ // Download updates has its own loop in the syncer; it shouldn't factor
+ // into HasMoreToSync.
+ EXPECT_FALSE(session_->HasMoreToSync());
+}
+
+TEST_F(SyncSessionTest, MoreToDownloadIfGotChangesRemaining) {
+ status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
+
+ // When the server returns changes_remaining, that means there's
+ // more to download.
+ status()->mutable_updates_response()->mutable_get_updates()
+ ->set_changes_remaining(1000L);
+ EXPECT_FALSE(status()->ServerSaysNothingMoreToDownload());
+ EXPECT_TRUE(status()->download_updates_succeeded());
+
+ // Download updates has its own loop in the syncer; it shouldn't factor
+ // into HasMoreToSync.
+ EXPECT_FALSE(session_->HasMoreToSync());
+}
+
+TEST_F(SyncSessionTest, MoreToDownloadIfGotNoChangesRemaining) {
+ status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
+
+ // When the server returns a timestamp, that means we're up to date.
+ status()->mutable_updates_response()->mutable_get_updates()
+ ->set_changes_remaining(0);
+ EXPECT_TRUE(status()->ServerSaysNothingMoreToDownload());
+ EXPECT_TRUE(status()->download_updates_succeeded());
+
+ // Download updates has its own loop in the syncer; it shouldn't factor
+ // into HasMoreToSync.
+ EXPECT_FALSE(session_->HasMoreToSync());
+}
+
+TEST_F(SyncSessionTest, MoreToDownloadIfGotNoChangesRemainingForSubset) {
+ status()->set_updates_request_types(ParamsMeaningJustOneEnabledType());
+
+ // When the server returns a timestamp, that means we're up to date for that
+ // type. But there may still be more to download if there are other
+ // datatypes that we didn't request on this go-round.
+ status()->mutable_updates_response()->mutable_get_updates()
+ ->set_changes_remaining(0);
+
+ EXPECT_TRUE(status()->ServerSaysNothingMoreToDownload());
+ EXPECT_TRUE(status()->download_updates_succeeded());
+
+ // Download updates has its own loop in the syncer; it shouldn't factor
+ // into HasMoreToSync.
+ EXPECT_FALSE(session_->HasMoreToSync());
+}
+
+TEST_F(SyncSessionTest, MoreToDownloadIfGotChangesRemainingAndEntries) {
+ status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
+ // The actual entry count should not factor into the HasMoreToSync
+ // determination.
+ status()->mutable_updates_response()->mutable_get_updates()->add_entries();
+ status()->mutable_updates_response()->mutable_get_updates()
+ ->set_changes_remaining(1000000L);;
+ EXPECT_FALSE(status()->ServerSaysNothingMoreToDownload());
+ EXPECT_TRUE(status()->download_updates_succeeded());
+
+ // Download updates has its own loop in the syncer; it shouldn't factor
+ // into HasMoreToSync.
+ EXPECT_FALSE(session_->HasMoreToSync());
+}
+
+TEST_F(SyncSessionTest, MoreToDownloadIfGotNoChangesRemainingAndEntries) {
+ status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
+ // The actual entry count should not factor into the HasMoreToSync
+ // determination.
+ status()->mutable_updates_response()->mutable_get_updates()->add_entries();
+ status()->mutable_updates_response()->mutable_get_updates()
+ ->set_changes_remaining(0);
+ EXPECT_TRUE(status()->ServerSaysNothingMoreToDownload());
+ EXPECT_TRUE(status()->download_updates_succeeded());
+
+ // Download updates has its own loop in the syncer; it shouldn't factor
+ // into HasMoreToSync.
+ EXPECT_FALSE(session_->HasMoreToSync());
+}
+
+TEST_F(SyncSessionTest, MoreToSyncIfConflictsResolved) {
+ // Conflict resolution happens after get updates and commit,
+ // so we need to loop back and get updates / commit again now
+ // that we have made forward progress.
+ status()->update_conflicts_resolved(true);
+ EXPECT_TRUE(session_->HasMoreToSync());
+}
+
+TEST_F(SyncSessionTest, ResetTransientState) {
+ status()->update_conflicts_resolved(true);
+ status()->increment_num_successful_commits();
+ EXPECT_TRUE(session_->HasMoreToSync());
+ session_->PrepareForAnotherSyncCycle();
+ EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION,
+ session_->source().updates_source);
+ EXPECT_FALSE(status()->conflicts_resolved());
+ EXPECT_FALSE(session_->HasMoreToSync());
+ EXPECT_FALSE(status()->TestAndClearIsDirty());
+}
+
+TEST_F(SyncSessionTest, Coalesce) {
+ std::vector<ModelSafeWorker*> workers_one, workers_two;
+ ModelSafeRoutingInfo routes_one, routes_two;
+ syncable::ModelTypePayloadMap one_type =
+ syncable::ModelTypePayloadMapFromEnumSet(
+ ParamsMeaningJustOneEnabledType(),
+ std::string());
+ syncable::ModelTypePayloadMap all_types =
+ syncable::ModelTypePayloadMapFromEnumSet(
+ ParamsMeaningAllEnabledTypes(),
+ std::string());
+ SyncSourceInfo source_one(sync_pb::GetUpdatesCallerInfo::PERIODIC, one_type);
+ SyncSourceInfo source_two(sync_pb::GetUpdatesCallerInfo::LOCAL, all_types);
+
+ scoped_refptr<ModelSafeWorker> passive_worker(
+ new FakeModelWorker(GROUP_PASSIVE));
+ scoped_refptr<ModelSafeWorker> db_worker(new FakeModelWorker(GROUP_DB));
+ scoped_refptr<ModelSafeWorker> ui_worker(new FakeModelWorker(GROUP_UI));
+ workers_one.push_back(passive_worker);
+ workers_one.push_back(db_worker);
+ workers_two.push_back(passive_worker);
+ workers_two.push_back(db_worker);
+ workers_two.push_back(ui_worker);
+ routes_one[syncable::AUTOFILL] = GROUP_DB;
+ routes_two[syncable::AUTOFILL] = GROUP_DB;
+ routes_two[syncable::BOOKMARKS] = GROUP_UI;
+ SyncSession one(context_.get(), this, source_one, routes_one, workers_one);
+ SyncSession two(context_.get(), this, source_two, routes_two, workers_two);
+
+ std::set<ModelSafeGroup> expected_enabled_groups_one;
+ expected_enabled_groups_one.insert(GROUP_PASSIVE);
+ expected_enabled_groups_one.insert(GROUP_DB);
+
+ std::set<ModelSafeGroup> expected_enabled_groups_two;
+ expected_enabled_groups_two.insert(GROUP_PASSIVE);
+ expected_enabled_groups_two.insert(GROUP_DB);
+ expected_enabled_groups_two.insert(GROUP_UI);
+
+ EXPECT_EQ(expected_enabled_groups_one, one.GetEnabledGroups());
+ EXPECT_EQ(expected_enabled_groups_two, two.GetEnabledGroups());
+
+ one.Coalesce(two);
+
+ EXPECT_EQ(expected_enabled_groups_two, one.GetEnabledGroups());
+ EXPECT_EQ(expected_enabled_groups_two, two.GetEnabledGroups());
+
+ EXPECT_EQ(two.source().updates_source, one.source().updates_source);
+ EXPECT_EQ(all_types, one.source().types);
+ std::vector<ModelSafeWorker*>::const_iterator it_db =
+ std::find(one.workers().begin(), one.workers().end(), db_worker);
+ std::vector<ModelSafeWorker*>::const_iterator it_ui =
+ std::find(one.workers().begin(), one.workers().end(), ui_worker);
+ EXPECT_NE(it_db, one.workers().end());
+ EXPECT_NE(it_ui, one.workers().end());
+ EXPECT_EQ(routes_two, one.routing_info());
+}
+
+TEST_F(SyncSessionTest, RebaseRoutingInfoWithLatestRemoveOneType) {
+ std::vector<ModelSafeWorker*> workers_one, workers_two;
+ ModelSafeRoutingInfo routes_one, routes_two;
+ syncable::ModelTypePayloadMap one_type =
+ syncable::ModelTypePayloadMapFromEnumSet(
+ ParamsMeaningJustOneEnabledType(),
+ std::string());
+ syncable::ModelTypePayloadMap all_types =
+ syncable::ModelTypePayloadMapFromEnumSet(
+ ParamsMeaningAllEnabledTypes(),
+ std::string());
+ SyncSourceInfo source_one(sync_pb::GetUpdatesCallerInfo::PERIODIC, one_type);
+ SyncSourceInfo source_two(sync_pb::GetUpdatesCallerInfo::LOCAL, all_types);
+
+ scoped_refptr<ModelSafeWorker> passive_worker(
+ new FakeModelWorker(GROUP_PASSIVE));
+ scoped_refptr<ModelSafeWorker> db_worker(new FakeModelWorker(GROUP_DB));
+ scoped_refptr<ModelSafeWorker> ui_worker(new FakeModelWorker(GROUP_UI));
+ workers_one.push_back(passive_worker);
+ workers_one.push_back(db_worker);
+ workers_two.push_back(passive_worker);
+ workers_two.push_back(db_worker);
+ workers_two.push_back(ui_worker);
+ routes_one[syncable::AUTOFILL] = GROUP_DB;
+ routes_two[syncable::AUTOFILL] = GROUP_DB;
+ routes_two[syncable::BOOKMARKS] = GROUP_UI;
+ SyncSession one(context_.get(), this, source_one, routes_one, workers_one);
+ SyncSession two(context_.get(), this, source_two, routes_two, workers_two);
+
+ std::set<ModelSafeGroup> expected_enabled_groups_one;
+ expected_enabled_groups_one.insert(GROUP_PASSIVE);
+ expected_enabled_groups_one.insert(GROUP_DB);
+
+ std::set<ModelSafeGroup> expected_enabled_groups_two;
+ expected_enabled_groups_two.insert(GROUP_PASSIVE);
+ expected_enabled_groups_two.insert(GROUP_DB);
+ expected_enabled_groups_two.insert(GROUP_UI);
+
+ EXPECT_EQ(expected_enabled_groups_one, one.GetEnabledGroups());
+ EXPECT_EQ(expected_enabled_groups_two, two.GetEnabledGroups());
+
+ two.RebaseRoutingInfoWithLatest(one);
+
+ EXPECT_EQ(expected_enabled_groups_one, one.GetEnabledGroups());
+ EXPECT_EQ(expected_enabled_groups_one, two.GetEnabledGroups());
+
+ // Make sure the source has not been touched.
+ EXPECT_EQ(two.source().updates_source,
+ sync_pb::GetUpdatesCallerInfo::LOCAL);
+
+ // Make sure the payload is reduced to one.
+ EXPECT_EQ(one_type, two.source().types);
+
+ // Make sure the workers are udpated.
+ std::vector<ModelSafeWorker*>::const_iterator it_db =
+ std::find(two.workers().begin(), two.workers().end(), db_worker);
+ std::vector<ModelSafeWorker*>::const_iterator it_ui =
+ std::find(two.workers().begin(), two.workers().end(), ui_worker);
+ EXPECT_NE(it_db, two.workers().end());
+ EXPECT_EQ(it_ui, two.workers().end());
+ EXPECT_EQ(two.workers().size(), 2U);
+
+ // Make sure the model safe routing info is reduced to one type.
+ ModelSafeRoutingInfo::const_iterator it =
+ two.routing_info().find(syncable::AUTOFILL);
+ // Note that attempting to use EXPECT_NE would fail for an Android build due
+ // to seeming incompatibility with gtest and stlport.
+ EXPECT_TRUE(it != two.routing_info().end());
+ EXPECT_EQ(it->second, GROUP_DB);
+ EXPECT_EQ(two.routing_info().size(), 1U);
+}
+
+TEST_F(SyncSessionTest, RebaseRoutingInfoWithLatestWithSameType) {
+ std::vector<ModelSafeWorker*> workers_first, workers_second;
+ ModelSafeRoutingInfo routes_first, routes_second;
+ syncable::ModelTypePayloadMap all_types =
+ syncable::ModelTypePayloadMapFromEnumSet(
+ ParamsMeaningAllEnabledTypes(),
+ std::string());
+ SyncSourceInfo source_first(sync_pb::GetUpdatesCallerInfo::PERIODIC,
+ all_types);
+ SyncSourceInfo source_second(sync_pb::GetUpdatesCallerInfo::LOCAL,
+ all_types);
+
+ scoped_refptr<ModelSafeWorker> passive_worker(
+ new FakeModelWorker(GROUP_PASSIVE));
+ scoped_refptr<FakeModelWorker> db_worker(new FakeModelWorker(GROUP_DB));
+ scoped_refptr<FakeModelWorker> ui_worker(new FakeModelWorker(GROUP_UI));
+ workers_first.push_back(passive_worker);
+ workers_first.push_back(db_worker);
+ workers_first.push_back(ui_worker);
+ workers_second.push_back(passive_worker);
+ workers_second.push_back(db_worker);
+ workers_second.push_back(ui_worker);
+ routes_first[syncable::AUTOFILL] = GROUP_DB;
+ routes_first[syncable::BOOKMARKS] = GROUP_UI;
+ routes_second[syncable::AUTOFILL] = GROUP_DB;
+ routes_second[syncable::BOOKMARKS] = GROUP_UI;
+ SyncSession first(context_.get(), this, source_first, routes_first,
+ workers_first);
+ SyncSession second(context_.get(), this, source_second, routes_second,
+ workers_second);
+
+ std::set<ModelSafeGroup> expected_enabled_groups;
+ expected_enabled_groups.insert(GROUP_PASSIVE);
+ expected_enabled_groups.insert(GROUP_DB);
+ expected_enabled_groups.insert(GROUP_UI);
+
+ EXPECT_EQ(expected_enabled_groups, first.GetEnabledGroups());
+ EXPECT_EQ(expected_enabled_groups, second.GetEnabledGroups());
+
+ second.RebaseRoutingInfoWithLatest(first);
+
+ EXPECT_EQ(expected_enabled_groups, first.GetEnabledGroups());
+ EXPECT_EQ(expected_enabled_groups, second.GetEnabledGroups());
+
+ // Make sure the source has not been touched.
+ EXPECT_EQ(second.source().updates_source,
+ sync_pb::GetUpdatesCallerInfo::LOCAL);
+
+ // Make sure our payload is still the same.
+ EXPECT_EQ(all_types, second.source().types);
+
+ // Make sure the workers are still the same.
+ std::vector<ModelSafeWorker*>::const_iterator it_passive =
+ std::find(second.workers().begin(), second.workers().end(),
+ passive_worker);
+ std::vector<ModelSafeWorker*>::const_iterator it_db =
+ std::find(second.workers().begin(), second.workers().end(), db_worker);
+ std::vector<ModelSafeWorker*>::const_iterator it_ui =
+ std::find(second.workers().begin(), second.workers().end(), ui_worker);
+ EXPECT_NE(it_passive, second.workers().end());
+ EXPECT_NE(it_db, second.workers().end());
+ EXPECT_NE(it_ui, second.workers().end());
+ EXPECT_EQ(second.workers().size(), 3U);
+
+ // Make sure the model safe routing info is reduced to first type.
+ ModelSafeRoutingInfo::const_iterator it1 =
+ second.routing_info().find(syncable::AUTOFILL);
+ ModelSafeRoutingInfo::const_iterator it2 =
+ second.routing_info().find(syncable::BOOKMARKS);
+
+ // Note that attempting to use EXPECT_NE would fail for an Android build due
+ // to seeming incompatibility with gtest and stlport.
+ EXPECT_TRUE(it1 != second.routing_info().end());
+ EXPECT_EQ(it1->second, GROUP_DB);
+
+ // Note that attempting to use EXPECT_NE would fail for an Android build due
+ // to seeming incompatibility with gtest and stlport.
+ EXPECT_TRUE(it2 != second.routing_info().end());
+ EXPECT_EQ(it2->second, GROUP_UI);
+ EXPECT_EQ(second.routing_info().size(), 2U);
+}
+
+
+TEST_F(SyncSessionTest, MakeTypePayloadMapFromBitSet) {
+ syncable::ModelTypeSet types;
+ std::string payload = "test";
+ syncable::ModelTypePayloadMap types_with_payloads =
+ syncable::ModelTypePayloadMapFromEnumSet(types, payload);
+ EXPECT_TRUE(types_with_payloads.empty());
+
+ types.Put(syncable::BOOKMARKS);
+ types.Put(syncable::PASSWORDS);
+ types.Put(syncable::AUTOFILL);
+ payload = "test2";
+ types_with_payloads =
+ syncable::ModelTypePayloadMapFromEnumSet(types, payload);
+
+ ASSERT_EQ(3U, types_with_payloads.size());
+ EXPECT_EQ(types_with_payloads[syncable::BOOKMARKS], payload);
+ EXPECT_EQ(types_with_payloads[syncable::PASSWORDS], payload);
+ EXPECT_EQ(types_with_payloads[syncable::AUTOFILL], payload);
+}
+
+TEST_F(SyncSessionTest, MakeTypePayloadMapFromRoutingInfo) {
+ std::string payload = "test";
+ syncable::ModelTypePayloadMap types_with_payloads
+ = syncable::ModelTypePayloadMapFromRoutingInfo(routes_, payload);
+ ASSERT_EQ(routes_.size(), types_with_payloads.size());
+ for (ModelSafeRoutingInfo::iterator iter = routes_.begin();
+ iter != routes_.end();
+ ++iter) {
+ EXPECT_EQ(payload, types_with_payloads[iter->first]);
+ }
+}
+
+TEST_F(SyncSessionTest, CoalescePayloads) {
+ syncable::ModelTypePayloadMap original;
+ std::string empty_payload;
+ std::string payload1 = "payload1";
+ std::string payload2 = "payload2";
+ std::string payload3 = "payload3";
+ original[syncable::BOOKMARKS] = empty_payload;
+ original[syncable::PASSWORDS] = payload1;
+ original[syncable::AUTOFILL] = payload2;
+ original[syncable::THEMES] = payload3;
+
+ syncable::ModelTypePayloadMap update;
+ update[syncable::BOOKMARKS] = empty_payload; // Same.
+ update[syncable::PASSWORDS] = empty_payload; // Overwrite with empty.
+ update[syncable::AUTOFILL] = payload1; // Overwrite with non-empty.
+ update[syncable::SESSIONS] = payload2; // New.
+ // Themes untouched.
+
+ CoalescePayloads(&original, update);
+ ASSERT_EQ(5U, original.size());
+ EXPECT_EQ(empty_payload, original[syncable::BOOKMARKS]);
+ EXPECT_EQ(payload1, original[syncable::PASSWORDS]);
+ EXPECT_EQ(payload1, original[syncable::AUTOFILL]);
+ EXPECT_EQ(payload2, original[syncable::SESSIONS]);
+ EXPECT_EQ(payload3, original[syncable::THEMES]);
+}
+
+} // namespace
+} // namespace sessions
+} // namespace browser_sync
diff --git a/sync/sessions/test_util.cc b/sync/sessions/test_util.cc
new file mode 100644
index 0000000..7e9b54b
--- /dev/null
+++ b/sync/sessions/test_util.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/sessions/test_util.h"
+
+namespace browser_sync {
+namespace sessions {
+namespace test_util {
+
+void SimulateHasMoreToSync(sessions::SyncSession* session,
+ SyncerStep begin, SyncerStep end) {
+ session->mutable_status_controller()->update_conflicts_resolved(true);
+ ASSERT_TRUE(session->HasMoreToSync());
+}
+
+void SimulateDownloadUpdatesFailed(sessions::SyncSession* session,
+ SyncerStep begin, SyncerStep end) {
+ session->mutable_status_controller()->set_last_download_updates_result(
+ SERVER_RETURN_TRANSIENT_ERROR);
+}
+
+void SimulateCommitFailed(sessions::SyncSession* session,
+ SyncerStep begin, SyncerStep end) {
+ session->mutable_status_controller()->set_last_post_commit_result(
+ SERVER_RETURN_TRANSIENT_ERROR);
+}
+
+void SimulateSuccess(sessions::SyncSession* session,
+ SyncerStep begin, SyncerStep end) {
+ if (session->HasMoreToSync()) {
+ ADD_FAILURE() << "Shouldn't have more to sync";
+ }
+ ASSERT_EQ(0U, session->status_controller().num_server_changes_remaining());
+ ASSERT_EQ(0U, session->status_controller().unsynced_handles().size());
+}
+
+void SimulateThrottledImpl(sessions::SyncSession* session,
+ const base::TimeDelta& delta) {
+ session->delegate()->OnSilencedUntil(base::TimeTicks::Now() + delta);
+}
+
+void SimulatePollIntervalUpdateImpl(sessions::SyncSession* session,
+ const base::TimeDelta& new_poll) {
+ session->delegate()->OnReceivedLongPollIntervalUpdate(new_poll);
+}
+
+void SimulateSessionsCommitDelayUpdateImpl(sessions::SyncSession* session,
+ const base::TimeDelta& new_delay) {
+ session->delegate()->OnReceivedSessionsCommitDelay(new_delay);
+}
+
+} // namespace test_util
+} // namespace sessions
+} // namespace browser_sync
diff --git a/sync/sessions/test_util.h b/sync/sessions/test_util.h
new file mode 100644
index 0000000..b8ecf8f
--- /dev/null
+++ b/sync/sessions/test_util.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Utils to simulate various outcomes of a sync session.
+#ifndef SYNC_SESSIONS_TEST_UTIL_H_
+#define SYNC_SESSIONS_TEST_UTIL_H_
+#pragma once
+
+#include "sync/engine/syncer.h"
+#include "sync/sessions/sync_session.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace sessions {
+namespace test_util {
+
+void SimulateHasMoreToSync(sessions::SyncSession* session,
+ SyncerStep begin, SyncerStep end);
+void SimulateDownloadUpdatesFailed(sessions::SyncSession* session,
+ SyncerStep begin, SyncerStep end);
+void SimulateCommitFailed(sessions::SyncSession* session,
+ SyncerStep begin, SyncerStep end);
+void SimulateSuccess(sessions::SyncSession* session,
+ SyncerStep begin, SyncerStep end);
+void SimulateThrottledImpl(sessions::SyncSession* session,
+ const base::TimeDelta& delta);
+void SimulatePollIntervalUpdateImpl(sessions::SyncSession* session,
+ const base::TimeDelta& new_poll);
+void SimulateSessionsCommitDelayUpdateImpl(sessions::SyncSession* session,
+ const base::TimeDelta& new_delay);
+
+ACTION_P(SimulateThrottled, throttle) {
+ SimulateThrottledImpl(arg0, throttle);
+}
+
+ACTION_P(SimulatePollIntervalUpdate, poll) {
+ SimulatePollIntervalUpdateImpl(arg0, poll);
+}
+
+ACTION_P(SimulateSessionsCommitDelayUpdate, poll) {
+ SimulateSessionsCommitDelayUpdateImpl(arg0, poll);
+}
+
+} // namespace test_util
+} // namespace sessions
+} // namespace browser_sync
+
+#endif // SYNC_SESSIONS_TEST_UTIL_H_
diff --git a/sync/sync.gyp b/sync/sync.gyp
new file mode 100644
index 0000000..07e6b04
--- /dev/null
+++ b/sync/sync.gyp
@@ -0,0 +1,351 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'targets': [
+ # The core sync library.
+ #
+ # TODO(akalin): Rename this to something like 'sync_core' and
+ # reserve the 'sync' name for the overarching library that clients
+ # should depend on.
+ {
+ 'target_name': 'sync',
+ 'type': 'static_library',
+ 'variables': { 'enable_wexit_time_destructors': 1, },
+ 'include_dirs': [
+ '..',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../build/temp_gyp/googleurl.gyp:googleurl',
+ '../crypto/crypto.gyp:crypto',
+ '../net/net.gyp:net',
+ '../sql/sql.gyp:sql',
+ 'protocol/sync_proto.gyp:sync_proto',
+ ],
+ 'export_dependent_settings': [
+ '../base/base.gyp:base',
+ '../build/temp_gyp/googleurl.gyp:googleurl',
+ '../crypto/crypto.gyp:crypto',
+ '../net/net.gyp:net',
+ '../sql/sql.gyp:sql',
+ 'protocol/sync_proto.gyp:sync_proto',
+ ],
+ 'sources': [
+ 'engine/apply_updates_command.cc',
+ 'engine/apply_updates_command.h',
+ 'engine/build_commit_command.cc',
+ 'engine/build_commit_command.h',
+ 'engine/cleanup_disabled_types_command.cc',
+ 'engine/cleanup_disabled_types_command.h',
+ 'engine/clear_data_command.cc',
+ 'engine/clear_data_command.h',
+ 'engine/conflict_resolver.cc',
+ 'engine/conflict_resolver.h',
+ 'engine/download_updates_command.cc',
+ 'engine/download_updates_command.h',
+ 'engine/get_commit_ids_command.cc',
+ 'engine/get_commit_ids_command.h',
+ 'engine/model_changing_syncer_command.cc',
+ 'engine/model_changing_syncer_command.h',
+ 'engine/model_safe_worker.cc',
+ 'engine/model_safe_worker.h',
+ 'engine/passive_model_worker.cc',
+ 'engine/passive_model_worker.h',
+ 'engine/net/server_connection_manager.cc',
+ 'engine/net/server_connection_manager.h',
+ 'engine/net/url_translator.cc',
+ 'engine/net/url_translator.h',
+ 'engine/nigori_util.cc',
+ 'engine/nigori_util.h',
+ 'engine/nudge_source.cc',
+ 'engine/nudge_source.h',
+ 'engine/polling_constants.cc',
+ 'engine/polling_constants.h',
+ 'engine/post_commit_message_command.cc',
+ 'engine/post_commit_message_command.h',
+ 'engine/process_commit_response_command.cc',
+ 'engine/process_commit_response_command.h',
+ 'engine/process_updates_command.cc',
+ 'engine/process_updates_command.h',
+ 'engine/resolve_conflicts_command.cc',
+ 'engine/resolve_conflicts_command.h',
+ 'engine/store_timestamps_command.cc',
+ 'engine/store_timestamps_command.h',
+ 'engine/syncer.cc',
+ 'engine/syncer.h',
+ 'engine/syncer_command.cc',
+ 'engine/syncer_command.h',
+ 'engine/syncer_proto_util.cc',
+ 'engine/syncer_proto_util.h',
+ 'engine/sync_scheduler.cc',
+ 'engine/sync_scheduler.h',
+ 'engine/syncer_types.cc',
+ 'engine/syncer_types.h',
+ 'engine/syncer_util.cc',
+ 'engine/syncer_util.h',
+ 'engine/syncproto.h',
+ 'engine/update_applicator.cc',
+ 'engine/update_applicator.h',
+ 'engine/verify_updates_command.cc',
+ 'engine/verify_updates_command.h',
+ 'js/js_arg_list.cc',
+ 'js/js_arg_list.h',
+ 'js/js_backend.h',
+ 'js/js_controller.h',
+ 'js/js_event_details.cc',
+ 'js/js_event_details.h',
+ 'js/js_event_handler.h',
+ 'js/js_reply_handler.h',
+ 'js/sync_js_controller.cc',
+ 'js/sync_js_controller.h',
+ 'protocol/proto_enum_conversions.cc',
+ 'protocol/proto_enum_conversions.h',
+ 'protocol/proto_value_conversions.cc',
+ 'protocol/proto_value_conversions.h',
+ 'protocol/service_constants.h',
+ 'protocol/sync_protocol_error.cc',
+ 'protocol/sync_protocol_error.h',
+ 'sessions/debug_info_getter.h',
+ 'sessions/ordered_commit_set.cc',
+ 'sessions/ordered_commit_set.h',
+ 'sessions/session_state.cc',
+ 'sessions/session_state.h',
+ 'sessions/status_controller.cc',
+ 'sessions/status_controller.h',
+ 'sessions/sync_session.cc',
+ 'sessions/sync_session.h',
+ 'sessions/sync_session_context.cc',
+ 'sessions/sync_session_context.h',
+ 'syncable/blob.h',
+ 'syncable/directory_backing_store.cc',
+ 'syncable/directory_backing_store.h',
+ 'syncable/directory_change_delegate.h',
+ 'syncable/dir_open_result.h',
+ 'syncable/in_memory_directory_backing_store.cc',
+ 'syncable/in_memory_directory_backing_store.h',
+ 'syncable/model_type.cc',
+ 'syncable/model_type.h',
+ 'syncable/model_type_payload_map.cc',
+ 'syncable/model_type_payload_map.h',
+ 'syncable/on_disk_directory_backing_store.cc',
+ 'syncable/on_disk_directory_backing_store.h',
+ 'syncable/syncable.cc',
+ 'syncable/syncable_changes_version.h',
+ 'syncable/syncable_columns.h',
+ 'syncable/syncable_enum_conversions.cc',
+ 'syncable/syncable_enum_conversions.h',
+ 'syncable/syncable.h',
+ 'syncable/syncable_id.cc',
+ 'syncable/syncable_id.h',
+ 'syncable/syncable-inl.h',
+ 'syncable/transaction_observer.h',
+ 'util/cryptographer.cc',
+ 'util/cryptographer.h',
+
+ # TODO(akalin): Figure out a better place to put
+ # data_encryption_win*; it's also used by autofill.
+ 'util/data_encryption_win.cc',
+ 'util/data_encryption_win.h',
+
+ 'util/data_type_histogram.h',
+ 'util/encryptor.h',
+ 'util/enum_set.h',
+ 'util/extensions_activity_monitor.cc',
+ 'util/extensions_activity_monitor.h',
+ 'util/get_session_name.cc',
+ 'util/get_session_name.h',
+ 'util/get_session_name_mac.mm',
+ 'util/get_session_name_mac.h',
+ 'util/get_session_name_win.cc',
+ 'util/get_session_name_win.h',
+ 'util/immutable.h',
+ 'util/logging.cc',
+ 'util/logging.h',
+ 'util/nigori.cc',
+ 'util/nigori.h',
+ 'util/report_unrecoverable_error_function.h',
+ 'util/syncer_error.cc',
+ 'util/syncer_error.h',
+ 'util/time.cc',
+ 'util/time.h',
+ 'util/unrecoverable_error_handler.h',
+ 'util/unrecoverable_error_info.h',
+ 'util/unrecoverable_error_info.cc',
+ 'util/weak_handle.cc',
+ 'util/weak_handle.h',
+ ],
+ },
+
+ # Test support files for the 'sync' target.
+ {
+ 'target_name': 'test_support_sync',
+ 'type': 'static_library',
+ 'variables': { 'enable_wexit_time_destructors': 1, },
+ 'include_dirs': [
+ '..',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ 'sync',
+ ],
+ 'export_dependent_settings': [
+ '../base/base.gyp:base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ 'sync',
+ ],
+ 'sources': [
+ 'js/js_test_util.cc',
+ 'js/js_test_util.h',
+ 'sessions/test_util.cc',
+ 'sessions/test_util.h',
+ 'syncable/model_type_test_util.cc',
+ 'syncable/model_type_test_util.h',
+ 'syncable/syncable_mock.cc',
+ 'syncable/syncable_mock.h',
+ 'test/fake_encryptor.cc',
+ 'test/fake_encryptor.h',
+ 'test/fake_extensions_activity_monitor.cc',
+ 'test/fake_extensions_activity_monitor.h',
+ 'test/null_directory_change_delegate.cc',
+ 'test/null_directory_change_delegate.h',
+ 'test/null_transaction_observer.cc',
+ 'test/null_transaction_observer.h',
+ 'test/engine/test_directory_setter_upper.cc',
+ 'test/engine/test_directory_setter_upper.h',
+ 'test/engine/fake_model_safe_worker_registrar.cc',
+ 'test/engine/fake_model_safe_worker_registrar.h',
+ 'test/engine/fake_model_worker.cc',
+ 'test/engine/fake_model_worker.h',
+ 'test/engine/mock_connection_manager.cc',
+ 'test/engine/mock_connection_manager.h',
+ 'test/engine/syncer_command_test.cc',
+ 'test/engine/syncer_command_test.h',
+ 'test/engine/test_id_factory.h',
+ 'test/engine/test_syncable_utils.cc',
+ 'test/engine/test_syncable_utils.h',
+ 'test/sessions/test_scoped_session_event_listener.h',
+ 'test/test_directory_backing_store.cc',
+ 'test/test_directory_backing_store.h',
+ 'util/test_unrecoverable_error_handler.cc',
+ 'util/test_unrecoverable_error_handler.h',
+ ],
+ },
+
+ # Unit tests for the 'sync' target. This cannot be a static
+ # library because the unit test files have to be compiled directly
+ # into the executable, so we push the target files to the
+ # depending executable target via direct_dependent_settings.
+ {
+ 'target_name': 'sync_tests',
+ 'type': 'none',
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ 'sync',
+ 'test_support_sync',
+ ],
+ 'export_dependent_settings': [
+ '../base/base.gyp:base',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ 'sync',
+ 'test_support_sync',
+ ],
+ 'direct_dependent_settings': {
+ 'variables': { 'enable_wexit_time_destructors': 1, },
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'engine/apply_updates_command_unittest.cc',
+ 'engine/build_commit_command_unittest.cc',
+ 'engine/clear_data_command_unittest.cc',
+ 'engine/cleanup_disabled_types_command_unittest.cc',
+ 'engine/download_updates_command_unittest.cc',
+ 'engine/model_changing_syncer_command_unittest.cc',
+ 'engine/model_safe_worker_unittest.cc',
+ 'engine/nigori_util_unittest.cc',
+ 'engine/process_commit_response_command_unittest.cc',
+ 'engine/process_updates_command_unittest.cc',
+ 'engine/resolve_conflicts_command_unittest.cc',
+ 'engine/syncer_proto_util_unittest.cc',
+ 'engine/sync_scheduler_unittest.cc',
+ 'engine/sync_scheduler_whitebox_unittest.cc',
+ 'engine/syncer_unittest.cc',
+ 'engine/syncproto_unittest.cc',
+ 'engine/verify_updates_command_unittest.cc',
+ 'js/js_arg_list_unittest.cc',
+ 'js/js_event_details_unittest.cc',
+ 'js/sync_js_controller_unittest.cc',
+ 'protocol/proto_enum_conversions_unittest.cc',
+ 'protocol/proto_value_conversions_unittest.cc',
+ 'sessions/ordered_commit_set_unittest.cc',
+ 'sessions/session_state_unittest.cc',
+ 'sessions/status_controller_unittest.cc',
+ 'sessions/sync_session_context_unittest.cc',
+ 'sessions/sync_session_unittest.cc',
+ 'syncable/directory_backing_store_unittest.cc',
+ 'syncable/model_type_payload_map_unittest.cc',
+ 'syncable/model_type_unittest.cc',
+ 'syncable/syncable_enum_conversions_unittest.cc',
+ 'syncable/syncable_id_unittest.cc',
+ 'syncable/syncable_unittest.cc',
+ 'util/cryptographer_unittest.cc',
+ 'util/data_encryption_win_unittest.cc',
+ 'util/data_type_histogram_unittest.cc',
+ 'util/enum_set_unittest.cc',
+ 'util/get_session_name_unittest.cc',
+ 'util/immutable_unittest.cc',
+ 'util/nigori_unittest.cc',
+ 'util/protobuf_unittest.cc',
+ 'util/weak_handle_unittest.cc',
+ ],
+ },
+ },
+
+ # The unit test executable for sync tests. Currently this isn't
+ # automatically run, as there is already a sync_unit_tests
+ # executable in chrome.gyp; this is just to make sure that all the
+ # link-time dependencies for the files in the targets above
+ # resolve.
+ #
+ # TODO(akalin): Rename this to sync_unit_tests once we've moved
+ # everything from chrome.gyp.
+ #
+ # TODO(akalin): Make base.gyp have a test_main target that
+ # includes run_all_unittests.cc and the possible tcmalloc
+ # dependency and use that everywhere.
+ {
+ 'target_name': 'sync_unit_tests_canary',
+ 'type': 'executable',
+ 'sources': [
+ '../base/test/run_all_unittests.cc',
+ ],
+ 'dependencies': [
+ 'sync_tests',
+ ],
+
+ # TODO(akalin): This is needed because histogram.cc uses
+ # leak_annotations.h, which pulls this in. Make 'base'
+ # propagate this dependency.
+ 'conditions': [
+ ['OS=="linux" and linux_use_tcmalloc==1', {
+ 'dependencies': [
+ '../base/allocator/allocator.gyp:allocator',
+ ],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/sync/syncable/DEPS b/sync/syncable/DEPS
new file mode 100644
index 0000000..cb86572
--- /dev/null
+++ b/sync/syncable/DEPS
@@ -0,0 +1,13 @@
+include_rules = [
+ "+net/base/escape.h",
+ "+sql",
+ "+sync/protocol",
+ "+sync/test",
+ "+sync/util",
+
+ # this file is weird.
+ "+sync/engine/syncproto.h",
+
+ # maybe this file should live in syncable?
+ "+sync/engine/model_safe_worker.h",
+]
diff --git a/sync/syncable/blob.h b/sync/syncable/blob.h
new file mode 100644
index 0000000..7266763
--- /dev/null
+++ b/sync/syncable/blob.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_BLOB_H_
+#define SYNC_SYNCABLE_BLOB_H_
+#pragma once
+
+#include <vector>
+
+#include "base/basictypes.h" // For uint8.
+
+namespace syncable {
+
+typedef std::vector<uint8> Blob;
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_BLOB_H_
diff --git a/sync/syncable/dir_open_result.h b/sync/syncable/dir_open_result.h
new file mode 100644
index 0000000..6138a26
--- /dev/null
+++ b/sync/syncable/dir_open_result.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
+#define SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
+#pragma once
+
+namespace syncable {
+enum DirOpenResult { OPENED, // success.
+ FAILED_NEWER_VERSION, // DB version is too new.
+ FAILED_MAKE_REPOSITORY, // Couldn't create subdir.
+ FAILED_OPEN_DATABASE, // sqlite_open() failed.
+ FAILED_DISK_FULL, // The disk is full.
+ FAILED_DATABASE_CORRUPT, // Something is wrong with the DB
+ FAILED_LOGICAL_CORRUPTION, // Invalid database contents
+};
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
new file mode 100644
index 0000000..c7395c3
--- /dev/null
+++ b/sync/syncable/directory_backing_store.cc
@@ -0,0 +1,1078 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/directory_backing_store.h"
+
+#include "build/build_config.h"
+
+#include <limits>
+
+#include "base/base64.h"
+#include "base/file_util.h"
+#include "base/hash_tables.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
+#include "base/stl_util.h"
+#include "base/string_number_conversions.h"
+#include "base/stringprintf.h"
+#include "base/time.h"
+#include "sql/connection.h"
+#include "sql/statement.h"
+#include "sql/transaction.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/service_constants.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/syncable/syncable-inl.h"
+#include "sync/syncable/syncable_columns.h"
+#include "sync/util/time.h"
+
+using std::string;
+
+namespace syncable {
+
+// This just has to be big enough to hold an UPDATE or INSERT statement that
+// modifies all the columns in the entry table.
+static const string::size_type kUpdateStatementBufferSize = 2048;
+
+// Increment this version whenever updating DB tables.
+extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
+const int32 kCurrentDBVersion = 78;
+
+// Iterate over the fields of |entry| and bind each to |statement| for
+// updating. Returns the number of args bound.
+void BindFields(const EntryKernel& entry,
+ sql::Statement* statement) {
+ int index = 0;
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
+ }
+ for ( ; i < TIME_FIELDS_END; ++i) {
+ statement->BindInt64(index++,
+ browser_sync::TimeToProtoTime(
+ entry.ref(static_cast<TimeField>(i))));
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_);
+ }
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ statement->BindInt(index++, entry.ref(static_cast<BitField>(i)));
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
+ }
+ std::string temp;
+ for ( ; i < PROTO_FIELDS_END; ++i) {
+ entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
+ statement->BindBlob(index++, temp.data(), temp.length());
+ }
+}
+
+// The caller owns the returned EntryKernel*. Assumes the statement currently
+// points to a valid row in the metas table.
+EntryKernel* UnpackEntry(sql::Statement* statement) {
+ EntryKernel* kernel = new EntryKernel();
+ DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT));
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i));
+ }
+ for ( ; i < TIME_FIELDS_END; ++i) {
+ kernel->put(static_cast<TimeField>(i),
+ browser_sync::ProtoTimeToTime(statement->ColumnInt64(i)));
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ kernel->mutable_ref(static_cast<IdField>(i)).s_ =
+ statement->ColumnString(i);
+ }
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i)));
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ kernel->put(static_cast<StringField>(i),
+ statement->ColumnString(i));
+ }
+ for ( ; i < PROTO_FIELDS_END; ++i) {
+ kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
+ statement->ColumnBlob(i), statement->ColumnByteLength(i));
+ }
+ return kernel;
+}
+
+namespace {
+
+string ComposeCreateTableColumnSpecs() {
+ const ColumnSpec* begin = g_metas_columns;
+ const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ char separator = '(';
+ for (const ColumnSpec* column = begin; column != end; ++column) {
+ query.push_back(separator);
+ separator = ',';
+ query.append(column->name);
+ query.push_back(' ');
+ query.append(column->spec);
+ }
+ query.push_back(')');
+ return query;
+}
+
+void AppendColumnList(std::string* output) {
+ const char* joiner = " ";
+ // Be explicit in SELECT order to match up with UnpackEntry.
+ for (int i = BEGIN_FIELDS; i < BEGIN_FIELDS + FIELD_COUNT; ++i) {
+ output->append(joiner);
+ output->append(ColumnName(i));
+ joiner = ", ";
+ }
+}
+
+} // namespace
+
+///////////////////////////////////////////////////////////////////////////////
+// DirectoryBackingStore implementation.
+
+DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
+ : db_(new sql::Connection()),
+ dir_name_(dir_name),
+ needs_column_refresh_(false) {
+}
+
+DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
+ sql::Connection* db)
+ : db_(db),
+ dir_name_(dir_name),
+ needs_column_refresh_(false) {
+}
+
+DirectoryBackingStore::~DirectoryBackingStore() {
+}
+
+bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) {
+ if (handles.empty())
+ return true;
+
+ sql::Statement statement(db_->GetCachedStatement(
+ SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
+
+ for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
+ ++i) {
+ statement.BindInt64(0, *i);
+ if (!statement.Run())
+ return false;
+ statement.Reset();
+ }
+ return true;
+}
+
+bool DirectoryBackingStore::SaveChanges(
+ const Directory::SaveChangesSnapshot& snapshot) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(db_->is_open());
+
+ // Back out early if there is nothing to write.
+ bool save_info =
+ (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
+ if (snapshot.dirty_metas.size() < 1 && !save_info)
+ return true;
+
+ sql::Transaction transaction(db_.get());
+ if (!transaction.Begin())
+ return false;
+
+ for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ DCHECK(i->is_dirty());
+ if (!SaveEntryToDB(*i))
+ return false;
+ }
+
+ if (!DeleteEntries(snapshot.metahandles_to_purge))
+ return false;
+
+ if (save_info) {
+ const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
+ sql::Statement s1(db_->GetCachedStatement(
+ SQL_FROM_HERE,
+ "UPDATE share_info "
+ "SET store_birthday = ?, "
+ "next_id = ?, "
+ "notification_state = ?"));
+ s1.BindString(0, info.store_birthday);
+ s1.BindInt64(1, info.next_id);
+ s1.BindBlob(2, info.notification_state.data(),
+ info.notification_state.size());
+
+ if (!s1.Run())
+ return false;
+ DCHECK_EQ(db_->GetLastChangeCount(), 1);
+
+ sql::Statement s2(db_->GetCachedStatement(
+ SQL_FROM_HERE,
+ "INSERT OR REPLACE "
+ "INTO models (model_id, progress_marker, initial_sync_ended) "
+ "VALUES (?, ?, ?)"));
+
+ for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
+ // We persist not ModelType but rather a protobuf-derived ID.
+ string model_id = ModelTypeEnumToModelId(ModelTypeFromInt(i));
+ string progress_marker;
+ info.download_progress[i].SerializeToString(&progress_marker);
+ s2.BindBlob(0, model_id.data(), model_id.length());
+ s2.BindBlob(1, progress_marker.data(), progress_marker.length());
+ s2.BindBool(2, info.initial_sync_ended.Has(ModelTypeFromInt(i)));
+ if (!s2.Run())
+ return false;
+ DCHECK_EQ(db_->GetLastChangeCount(), 1);
+ s2.Reset();
+ }
+ }
+
+ return transaction.Commit();
+}
+
+bool DirectoryBackingStore::InitializeTables() {
+ sql::Transaction transaction(db_.get());
+ if (!transaction.Begin())
+ return false;
+
+ int version_on_disk = GetVersion();
+
+ // Upgrade from version 67. Version 67 was widely distributed as the original
+ // Bookmark Sync release. Version 68 removed unique naming.
+ if (version_on_disk == 67) {
+ if (MigrateVersion67To68())
+ version_on_disk = 68;
+ }
+ // Version 69 introduced additional datatypes.
+ if (version_on_disk == 68) {
+ if (MigrateVersion68To69())
+ version_on_disk = 69;
+ }
+
+ if (version_on_disk == 69) {
+ if (MigrateVersion69To70())
+ version_on_disk = 70;
+ }
+
+ // Version 71 changed the sync progress information to be per-datatype.
+ if (version_on_disk == 70) {
+ if (MigrateVersion70To71())
+ version_on_disk = 71;
+ }
+
+ // Version 72 removed extended attributes, a legacy way to do extensible
+ // key/value information, stored in their own table.
+ if (version_on_disk == 71) {
+ if (MigrateVersion71To72())
+ version_on_disk = 72;
+ }
+
+ // Version 73 added a field for notification state.
+ if (version_on_disk == 72) {
+ if (MigrateVersion72To73())
+ version_on_disk = 73;
+ }
+
+ // Version 74 added state for the autofill migration.
+ if (version_on_disk == 73) {
+ if (MigrateVersion73To74())
+ version_on_disk = 74;
+ }
+
+ // Version 75 migrated from int64-based timestamps to per-datatype tokens.
+ if (version_on_disk == 74) {
+ if (MigrateVersion74To75())
+ version_on_disk = 75;
+ }
+
+ // Version 76 removed all (5) autofill migration related columns.
+ if (version_on_disk == 75) {
+ if (MigrateVersion75To76())
+ version_on_disk = 76;
+ }
+
+ // Version 77 standardized all time fields to ms since the Unix
+ // epoch.
+ if (version_on_disk == 76) {
+ if (MigrateVersion76To77())
+ version_on_disk = 77;
+ }
+
+ // Version 78 added the column base_server_specifics to the metas table.
+ if (version_on_disk == 77) {
+ if (MigrateVersion77To78())
+ version_on_disk = 78;
+ }
+
+ // If one of the migrations requested it, drop columns that aren't current.
+ // It's only safe to do this after migrating all the way to the current
+ // version.
+ if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
+ if (!RefreshColumns())
+ version_on_disk = 0;
+ }
+
+ // A final, alternative catch-all migration to simply re-sync everything.
+ //
+ // TODO(rlarocque): It's wrong to recreate the database here unless the higher
+ // layers were expecting us to do so. See crbug.com/103824. We must leave
+ // this code as is for now because this is the code that ends up creating the
+ // database in the first time sync case, where the higher layers are expecting
+ // us to create a fresh database. The solution to this should be to implement
+ // crbug.com/105018.
+ if (version_on_disk != kCurrentDBVersion) {
+ if (version_on_disk > kCurrentDBVersion)
+ return FAILED_NEWER_VERSION;
+
+ // Fallback (re-sync everything) migration path.
+ DVLOG(1) << "Old/null sync database, version " << version_on_disk;
+ // Delete the existing database (if any), and create a fresh one.
+ DropAllTables();
+ if (!CreateTables())
+ return false;
+ }
+
+ sql::Statement s(db_->GetUniqueStatement(
+ "SELECT db_create_version, db_create_time FROM share_info"));
+ if (!s.Step())
+ return false;
+ string db_create_version = s.ColumnString(0);
+ int db_create_time = s.ColumnInt(1);
+ DVLOG(1) << "DB created at " << db_create_time << " by version " <<
+ db_create_version;
+
+ return transaction.Commit();
+}
+
+// This function drops unused columns by creating a new table that contains only
+// the currently used columns then copying all rows from the old tables into
+// this new one. The tables are then rearranged so the new replaces the old.
+bool DirectoryBackingStore::RefreshColumns() {
+ DCHECK(needs_column_refresh_);
+
+ // Create a new table named temp_metas.
+ SafeDropTable("temp_metas");
+ if (!CreateMetasTable(true))
+ return false;
+
+ // Populate temp_metas from metas.
+ //
+ // At this point, the metas table may contain columns belonging to obsolete
+ // schema versions. This statement explicitly lists only the columns that
+ // belong to the current schema version, so the obsolete columns will be
+ // effectively dropped once we rename temp_metas over top of metas.
+ std::string query = "INSERT INTO temp_metas (";
+ AppendColumnList(&query);
+ query.append(") SELECT ");
+ AppendColumnList(&query);
+ query.append(" FROM metas");
+ if (!db_->Execute(query.c_str()))
+ return false;
+
+ // Drop metas.
+ SafeDropTable("metas");
+
+ // Rename temp_metas -> metas.
+ if (!db_->Execute("ALTER TABLE temp_metas RENAME TO metas"))
+ return false;
+
+ // Repeat the process for share_info.
+ SafeDropTable("temp_share_info");
+ if (!CreateShareInfoTable(true))
+ return false;
+
+ if (!db_->Execute(
+ "INSERT INTO temp_share_info (id, name, store_birthday, "
+ "db_create_version, db_create_time, next_id, cache_guid,"
+ "notification_state) "
+ "SELECT id, name, store_birthday, db_create_version, "
+ "db_create_time, next_id, cache_guid, notification_state "
+ "FROM share_info"))
+ return false;
+
+ SafeDropTable("share_info");
+ if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
+ return false;
+
+ needs_column_refresh_ = false;
+ return true;
+}
+
+bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
+ string select;
+ select.reserve(kUpdateStatementBufferSize);
+ select.append("SELECT ");
+ AppendColumnList(&select);
+ select.append(" FROM metas ");
+
+ sql::Statement s(db_->GetUniqueStatement(select.c_str()));
+
+ while (s.Step()) {
+ EntryKernel *kernel = UnpackEntry(&s);
+ entry_bucket->insert(kernel);
+ }
+ return s.Succeeded();
+}
+
+bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
+ {
+ sql::Statement s(
+ db_->GetUniqueStatement(
+ "SELECT store_birthday, next_id, cache_guid, notification_state "
+ "FROM share_info"));
+ if (!s.Step())
+ return false;
+
+ info->kernel_info.store_birthday = s.ColumnString(0);
+ info->kernel_info.next_id = s.ColumnInt64(1);
+ info->cache_guid = s.ColumnString(2);
+ s.ColumnBlobAsString(3, &(info->kernel_info.notification_state));
+
+ // Verify there was only one row returned.
+ DCHECK(!s.Step());
+ DCHECK(s.Succeeded());
+ }
+
+ {
+ sql::Statement s(
+ db_->GetUniqueStatement(
+ "SELECT model_id, progress_marker, initial_sync_ended "
+ "FROM models"));
+
+ while (s.Step()) {
+ ModelType type = ModelIdToModelTypeEnum(s.ColumnBlob(0),
+ s.ColumnByteLength(0));
+ if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
+ info->kernel_info.download_progress[type].ParseFromArray(
+ s.ColumnBlob(1), s.ColumnByteLength(1));
+ if (s.ColumnBool(2))
+ info->kernel_info.initial_sync_ended.Put(type);
+ }
+ }
+ if (!s.Succeeded())
+ return false;
+ }
+ {
+ sql::Statement s(
+ db_->GetUniqueStatement(
+ "SELECT MAX(metahandle) FROM metas"));
+ if (!s.Step())
+ return false;
+
+ info->max_metahandle = s.ColumnInt64(0);
+
+ // Verify only one row was returned.
+ DCHECK(!s.Step());
+ DCHECK(s.Succeeded());
+ }
+ return true;
+}
+
+bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
+ // This statement is constructed at runtime, so we can't use
+ // GetCachedStatement() to let the Connection cache it. We will construct
+ // and cache it ourselves the first time this function is called.
+ if (!save_entry_statement_.is_valid()) {
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ query.append("INSERT OR REPLACE INTO metas ");
+ string values;
+ values.reserve(kUpdateStatementBufferSize);
+ values.append("VALUES ");
+ const char* separator = "( ";
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < PROTO_FIELDS_END; ++i) {
+ query.append(separator);
+ values.append(separator);
+ separator = ", ";
+ query.append(ColumnName(i));
+ values.append("?");
+ }
+ query.append(" ) ");
+ values.append(" )");
+ query.append(values);
+
+ save_entry_statement_.Assign(
+ db_->GetUniqueStatement(query.c_str()));
+ } else {
+ save_entry_statement_.Reset();
+ }
+
+ BindFields(entry, &save_entry_statement_);
+ return save_entry_statement_.Run();
+}
+
+bool DirectoryBackingStore::DropDeletedEntries() {
+ return db_->Execute("DELETE FROM metas "
+ "WHERE is_del > 0 "
+ "AND is_unsynced < 1 "
+ "AND is_unapplied_update < 1");
+}
+
+bool DirectoryBackingStore::SafeDropTable(const char* table_name) {
+ string query = "DROP TABLE IF EXISTS ";
+ query.append(table_name);
+ return db_->Execute(query.c_str());
+}
+
+void DirectoryBackingStore::DropAllTables() {
+ SafeDropTable("metas");
+ SafeDropTable("temp_metas");
+ SafeDropTable("share_info");
+ SafeDropTable("temp_share_info");
+ SafeDropTable("share_version");
+ SafeDropTable("extended_attributes");
+ SafeDropTable("models");
+ SafeDropTable("temp_models");
+ needs_column_refresh_ = false;
+}
+
+// static
+ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
+ const void* data, int size) {
+ sync_pb::EntitySpecifics specifics;
+ if (!specifics.ParseFromArray(data, size))
+ return syncable::UNSPECIFIED;
+ return syncable::GetModelTypeFromSpecifics(specifics);
+}
+
+// static
+string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
+ sync_pb::EntitySpecifics specifics;
+ syncable::AddDefaultFieldValue(model_type, &specifics);
+ return specifics.SerializeAsString();
+}
+
+// static
+std::string DirectoryBackingStore::GenerateCacheGUID() {
+ // Generate a GUID with 128 bits of randomness.
+ const int kGuidBytes = 128 / 8;
+ std::string guid;
+ base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
+ return guid;
+}
+
+bool DirectoryBackingStore::MigrateToSpecifics(
+ const char* old_columns,
+ const char* specifics_column,
+ void (*handler_function)(sql::Statement* old_value_query,
+ int old_value_column,
+ sync_pb::EntitySpecifics* mutable_new_value)) {
+ std::string query_sql = base::StringPrintf(
+ "SELECT metahandle, %s, %s FROM metas", specifics_column, old_columns);
+ std::string update_sql = base::StringPrintf(
+ "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
+
+ sql::Statement query(db_->GetUniqueStatement(query_sql.c_str()));
+ sql::Statement update(db_->GetUniqueStatement(update_sql.c_str()));
+
+ while (query.Step()) {
+ int64 metahandle = query.ColumnInt64(0);
+ std::string new_value_bytes;
+ query.ColumnBlobAsString(1, &new_value_bytes);
+ sync_pb::EntitySpecifics new_value;
+ new_value.ParseFromString(new_value_bytes);
+ handler_function(&query, 2, &new_value);
+ new_value.SerializeToString(&new_value_bytes);
+
+ update.BindBlob(0, new_value_bytes.data(), new_value_bytes.length());
+ update.BindInt64(1, metahandle);
+ if (!update.Run())
+ return false;
+ update.Reset();
+ }
+ return query.Succeeded();
+}
+
+bool DirectoryBackingStore::SetVersion(int version) {
+ sql::Statement s(db_->GetCachedStatement(
+ SQL_FROM_HERE, "UPDATE share_version SET data = ?"));
+ s.BindInt(0, version);
+
+ return s.Run();
+}
+
+int DirectoryBackingStore::GetVersion() {
+ if (!db_->DoesTableExist("share_version"))
+ return 0;
+
+ sql::Statement statement(db_->GetUniqueStatement(
+ "SELECT data FROM share_version"));
+ if (statement.Step()) {
+ return statement.ColumnInt(0);
+ } else {
+ return 0;
+ }
+}
+
+bool DirectoryBackingStore::MigrateVersion67To68() {
+ // This change simply removed three columns:
+ // string NAME
+ // string UNSANITIZED_NAME
+ // string SERVER_NAME
+ // No data migration is necessary, but we should do a column refresh.
+ SetVersion(68);
+ needs_column_refresh_ = true;
+ return true;
+}
+
+bool DirectoryBackingStore::MigrateVersion69To70() {
+ // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
+ SetVersion(70);
+ if (!db_->Execute(
+ "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
+ return false;
+ if (!db_->Execute(
+ "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
+ return false;
+ needs_column_refresh_ = true;
+
+ if (!db_->Execute(
+ "UPDATE metas SET unique_server_tag = singleton_tag"))
+ return false;
+
+ return true;
+}
+
+namespace {
+
+// Callback passed to MigrateToSpecifics for the v68->v69 migration. See
+// MigrateVersion68To69().
+void EncodeBookmarkURLAndFavicon(sql::Statement* old_value_query,
+ int old_value_column,
+ sync_pb::EntitySpecifics* mutable_new_value) {
+ // Extract data from the column trio we expect.
+ bool old_is_bookmark_object = old_value_query->ColumnBool(old_value_column);
+ std::string old_url = old_value_query->ColumnString(old_value_column + 1);
+ std::string old_favicon;
+ old_value_query->ColumnBlobAsString(old_value_column + 2, &old_favicon);
+ bool old_is_dir = old_value_query->ColumnBool(old_value_column + 3);
+
+ if (old_is_bookmark_object) {
+ sync_pb::BookmarkSpecifics* bookmark_data =
+ mutable_new_value->mutable_bookmark();
+ if (!old_is_dir) {
+ bookmark_data->set_url(old_url);
+ bookmark_data->set_favicon(old_favicon);
+ }
+ }
+}
+
+} // namespace
+
+bool DirectoryBackingStore::MigrateVersion68To69() {
+ // In Version 68, there were columns on table 'metas':
+ // string BOOKMARK_URL
+ // string SERVER_BOOKMARK_URL
+ // blob BOOKMARK_FAVICON
+ // blob SERVER_BOOKMARK_FAVICON
+ // In version 69, these columns went away in favor of storing
+ // a serialized EntrySpecifics protobuf in the columns:
+ // protobuf blob SPECIFICS
+ // protobuf blob SERVER_SPECIFICS
+ // For bookmarks, EntrySpecifics is extended as per
+ // bookmark_specifics.proto. This migration converts bookmarks from the
+ // former scheme to the latter scheme.
+
+ // First, add the two new columns to the schema.
+ if (!db_->Execute(
+ "ALTER TABLE metas ADD COLUMN specifics blob"))
+ return false;
+ if (!db_->Execute(
+ "ALTER TABLE metas ADD COLUMN server_specifics blob"))
+ return false;
+
+ // Next, fold data from the old columns into the new protobuf columns.
+ if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
+ "bookmark_favicon, is_dir"),
+ "specifics",
+ &EncodeBookmarkURLAndFavicon)) {
+ return false;
+ }
+ if (!MigrateToSpecifics(("server_is_bookmark_object, "
+ "server_bookmark_url, "
+ "server_bookmark_favicon, "
+ "server_is_dir"),
+ "server_specifics",
+ &EncodeBookmarkURLAndFavicon)) {
+ return false;
+ }
+
+ // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
+ // ModelType: it shouldn't have BookmarkSpecifics.
+ if (!db_->Execute(
+ "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
+ "singleton_tag IN ('google_chrome')"))
+ return false;
+
+ SetVersion(69);
+ needs_column_refresh_ = true; // Trigger deletion of old columns.
+ return true;
+}
+
+// Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
+// were removed from the share_info table. They were replaced by
+// the 'models' table, which has these values on a per-datatype basis.
+bool DirectoryBackingStore::MigrateVersion70To71() {
+ if (!CreateV71ModelsTable())
+ return false;
+
+ // Move data from the old share_info columns to the new models table.
+ {
+ sql::Statement fetch(db_->GetUniqueStatement(
+ "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
+ if (!fetch.Step())
+ return false;
+
+ int64 last_sync_timestamp = fetch.ColumnInt64(0);
+ bool initial_sync_ended = fetch.ColumnBool(1);
+
+ // Verify there were no additional rows returned.
+ DCHECK(!fetch.Step());
+ DCHECK(fetch.Succeeded());
+
+ sql::Statement update(db_->GetUniqueStatement(
+ "INSERT INTO models (model_id, "
+ "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
+ string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
+ update.BindBlob(0, bookmark_model_id.data(), bookmark_model_id.size());
+ update.BindInt64(1, last_sync_timestamp);
+ update.BindBool(2, initial_sync_ended);
+
+ if (!update.Run())
+ return false;
+ }
+
+ // Drop the columns from the old share_info table via a temp table.
+ const bool kCreateAsTempShareInfo = true;
+
+ if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo))
+ return false;
+ if (!db_->Execute(
+ "INSERT INTO temp_share_info (id, name, store_birthday, "
+ "db_create_version, db_create_time, next_id, cache_guid) "
+ "SELECT id, name, store_birthday, db_create_version, "
+ "db_create_time, next_id, cache_guid FROM share_info"))
+ return false;
+ SafeDropTable("share_info");
+ if (!db_->Execute(
+ "ALTER TABLE temp_share_info RENAME TO share_info"))
+ return false;
+ SetVersion(71);
+ return true;
+}
+
+bool DirectoryBackingStore::MigrateVersion71To72() {
+ // Version 72 removed a table 'extended_attributes', whose
+ // contents didn't matter.
+ SafeDropTable("extended_attributes");
+ SetVersion(72);
+ return true;
+}
+
+bool DirectoryBackingStore::MigrateVersion72To73() {
+ // Version 73 added one column to the table 'share_info': notification_state
+ if (!db_->Execute(
+ "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
+ return false;
+ SetVersion(73);
+ return true;
+}
+
+bool DirectoryBackingStore::MigrateVersion73To74() {
+ // Version 74 added the following columns to the table 'share_info':
+ // autofill_migration_state
+ // bookmarks_added_during_autofill_migration
+ // autofill_migration_time
+ // autofill_entries_added_during_migration
+ // autofill_profiles_added_during_migration
+
+ if (!db_->Execute(
+ "ALTER TABLE share_info ADD COLUMN "
+ "autofill_migration_state INT default 0"))
+ return false;
+
+ if (!db_->Execute(
+ "ALTER TABLE share_info ADD COLUMN "
+ "bookmarks_added_during_autofill_migration "
+ "INT default 0"))
+ return false;
+
+ if (!db_->Execute(
+ "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
+ "INT default 0"))
+ return false;
+
+ if (!db_->Execute(
+ "ALTER TABLE share_info ADD COLUMN "
+ "autofill_entries_added_during_migration "
+ "INT default 0"))
+ return false;
+
+ if (!db_->Execute(
+ "ALTER TABLE share_info ADD COLUMN "
+ "autofill_profiles_added_during_migration "
+ "INT default 0"))
+ return false;
+
+ SetVersion(74);
+ return true;
+}
+
+bool DirectoryBackingStore::MigrateVersion74To75() {
+ // In version 74, there was a table 'models':
+ // blob model_id (entity specifics, primary key)
+ // int last_download_timestamp
+ // boolean initial_sync_ended
+ // In version 75, we deprecated the integer-valued last_download_timestamp,
+ // using insted a protobuf-valued progress_marker field:
+ // blob progress_marker
+ // The progress_marker values are initialized from the value of
+ // last_download_timestamp, thereby preserving the download state.
+
+ // Move aside the old table and create a new empty one at the current schema.
+ if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
+ return false;
+ if (!CreateModelsTable())
+ return false;
+
+ sql::Statement query(db_->GetUniqueStatement(
+ "SELECT model_id, last_download_timestamp, initial_sync_ended "
+ "FROM temp_models"));
+
+ sql::Statement update(db_->GetUniqueStatement(
+ "INSERT INTO models (model_id, "
+ "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
+
+ while (query.Step()) {
+ ModelType type = ModelIdToModelTypeEnum(query.ColumnBlob(0),
+ query.ColumnByteLength(0));
+ if (type != UNSPECIFIED) {
+ // Set the |timestamp_token_for_migration| on a new
+ // DataTypeProgressMarker, using the old value of last_download_timestamp.
+ // The server will turn this into a real token on our behalf the next
+ // time we check for updates.
+ sync_pb::DataTypeProgressMarker progress_marker;
+ progress_marker.set_data_type_id(
+ GetSpecificsFieldNumberFromModelType(type));
+ progress_marker.set_timestamp_token_for_migration(query.ColumnInt64(1));
+ std::string progress_blob;
+ progress_marker.SerializeToString(&progress_blob);
+
+ update.BindBlob(0, query.ColumnBlob(0), query.ColumnByteLength(0));
+ update.BindBlob(1, progress_blob.data(), progress_blob.length());
+ update.BindBool(2, query.ColumnBool(2));
+ if (!update.Run())
+ return false;
+ update.Reset();
+ }
+ }
+ if (!query.Succeeded())
+ return false;
+
+ // Drop the old table.
+ SafeDropTable("temp_models");
+
+ SetVersion(75);
+ return true;
+}
+
+bool DirectoryBackingStore::MigrateVersion75To76() {
+ // This change removed five columns:
+ // autofill_migration_state
+ // bookmarks_added_during_autofill_migration
+ // autofill_migration_time
+ // autofill_entries_added_during_migration
+ // autofill_profiles_added_during_migration
+ // No data migration is necessary, but we should do a column refresh.
+ SetVersion(76);
+ needs_column_refresh_ = true;
+ return true;
+}
+
+bool DirectoryBackingStore::MigrateVersion76To77() {
+ // This change changes the format of stored timestamps to ms since
+ // the Unix epoch.
+#if defined(OS_WIN)
+// On Windows, we used to store timestamps in FILETIME format (100s of
+// ns since Jan 1, 1601). Magic numbers taken from
+// http://stackoverflow.com/questions/5398557/java-library-for-dealing-with-win32-filetime
+// .
+#define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
+#else
+// On other platforms, we used to store timestamps in time_t format (s
+// since the Unix epoch).
+#define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
+#endif
+ sql::Statement update_timestamps(db_->GetUniqueStatement(
+ "UPDATE metas SET "
+ TO_UNIX_TIME_MS(mtime) ", "
+ TO_UNIX_TIME_MS(server_mtime) ", "
+ TO_UNIX_TIME_MS(ctime) ", "
+ TO_UNIX_TIME_MS(server_ctime)));
+#undef TO_UNIX_TIME_MS
+ if (!update_timestamps.Run())
+ return false;
+ SetVersion(77);
+ return true;
+}
+
+bool DirectoryBackingStore::MigrateVersion77To78() {
+ // Version 78 added one column to table 'metas': base_server_specifics.
+ if (!db_->Execute(
+ "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
+ return false;
+ }
+ SetVersion(78);
+ return true;
+}
+
+bool DirectoryBackingStore::CreateTables() {
+ DVLOG(1) << "First run, creating tables";
+ // Create two little tables share_version and share_info
+ if (!db_->Execute(
+ "CREATE TABLE share_version ("
+ "id VARCHAR(128) primary key, data INT)")) {
+ return false;
+ }
+
+ {
+ sql::Statement s(db_->GetUniqueStatement(
+ "INSERT INTO share_version VALUES(?, ?)"));
+ s.BindString(0, dir_name_);
+ s.BindInt(1, kCurrentDBVersion);
+
+ if (!s.Run())
+ return false;
+ }
+
+ const bool kCreateAsTempShareInfo = false;
+ if (!CreateShareInfoTable(kCreateAsTempShareInfo)) {
+ return false;
+ }
+
+ {
+ sql::Statement s(db_->GetUniqueStatement(
+ "INSERT INTO share_info VALUES"
+ "(?, " // id
+ "?, " // name
+ "?, " // store_birthday
+ "?, " // db_create_version
+ "?, " // db_create_time
+ "-2, " // next_id
+ "?, " // cache_guid
+ "?);")); // notification_state
+ s.BindString(0, dir_name_); // id
+ s.BindString(1, dir_name_); // name
+ s.BindString(2, ""); // store_birthday
+ // TODO(akalin): Remove this unused db_create_version field. (Or
+ // actually use it for something.) http://crbug.com/118356
+ s.BindString(3, "Unknown"); // db_create_version
+ s.BindInt(4, static_cast<int32>(time(0))); // db_create_time
+ s.BindString(5, GenerateCacheGUID()); // cache_guid
+ s.BindBlob(6, NULL, 0); // notification_state
+
+ if (!s.Run())
+ return false;
+ }
+
+ if (!CreateModelsTable())
+ return false;
+
+ // Create the big metas table.
+ if (!CreateMetasTable(false))
+ return false;
+
+ {
+ // Insert the entry for the root into the metas table.
+ const int64 now = browser_sync::TimeToProtoTime(base::Time::Now());
+ sql::Statement s(db_->GetUniqueStatement(
+ "INSERT INTO metas "
+ "( id, metahandle, is_dir, ctime, mtime) "
+ "VALUES ( \"r\", 1, 1, ?, ?)"));
+ s.BindInt64(0, now);
+ s.BindInt64(1, now);
+
+ if (!s.Run())
+ return false;
+ }
+
+ return true;
+}
+
+bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
+ const char* name = is_temporary ? "temp_metas" : "metas";
+ string query = "CREATE TABLE ";
+ query.append(name);
+ query.append(ComposeCreateTableColumnSpecs());
+ return db_->Execute(query.c_str());
+}
+
+bool DirectoryBackingStore::CreateV71ModelsTable() {
+ // This is an old schema for the Models table, used from versions 71 to 74.
+ return db_->Execute(
+ "CREATE TABLE models ("
+ "model_id BLOB primary key, "
+ "last_download_timestamp INT, "
+ // Gets set if the syncer ever gets updates from the
+ // server and the server returns 0. Lets us detect the
+ // end of the initial sync.
+ "initial_sync_ended BOOLEAN default 0)");
+}
+
+bool DirectoryBackingStore::CreateModelsTable() {
+ // This is the current schema for the Models table, from version 75
+ // onward. If you change the schema, you'll probably want to double-check
+ // the use of this function in the v74-v75 migration.
+ return db_->Execute(
+ "CREATE TABLE models ("
+ "model_id BLOB primary key, "
+ "progress_marker BLOB, "
+ // Gets set if the syncer ever gets updates from the
+ // server and the server returns 0. Lets us detect the
+ // end of the initial sync.
+ "initial_sync_ended BOOLEAN default 0)");
+}
+
+bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
+ const char* name = is_temporary ? "temp_share_info" : "share_info";
+ string query = "CREATE TABLE ";
+ query.append(name);
+ // This is the current schema for the ShareInfo table, from version 76
+ // onward.
+ query.append(" ("
+ "id TEXT primary key, "
+ "name TEXT, "
+ "store_birthday TEXT, "
+ "db_create_version TEXT, "
+ "db_create_time INT, "
+ "next_id INT default -2, "
+ "cache_guid TEXT ");
+
+ query.append(", notification_state BLOB");
+ query.append(")");
+ return db_->Execute(query.c_str());
+}
+
+bool DirectoryBackingStore::CreateShareInfoTableVersion71(
+ bool is_temporary) {
+ const char* name = is_temporary ? "temp_share_info" : "share_info";
+ string query = "CREATE TABLE ";
+ query.append(name);
+ // This is the schema for the ShareInfo table used from versions 71 to 72.
+ query.append(" ("
+ "id TEXT primary key, "
+ "name TEXT, "
+ "store_birthday TEXT, "
+ "db_create_version TEXT, "
+ "db_create_time INT, "
+ "next_id INT default -2, "
+ "cache_guid TEXT )");
+ return db_->Execute(query.c_str());
+}
+
+} // namespace syncable
diff --git a/sync/syncable/directory_backing_store.h b/sync/syncable/directory_backing_store.h
new file mode 100644
index 0000000..4c9bd846
--- /dev/null
+++ b/sync/syncable/directory_backing_store.h
@@ -0,0 +1,166 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
+#define SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
+#pragma once
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "sql/connection.h"
+#include "sql/statement.h"
+#include "sync/syncable/dir_open_result.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+
+namespace sync_pb {
+class EntitySpecifics;
+}
+
+namespace syncable {
+
+struct ColumnSpec;
+typedef Directory::MetahandlesIndex MetahandlesIndex;
+
+// Interface that provides persistence for a syncable::Directory object. You can
+// load all the persisted data to prime a syncable::Directory on startup by
+// invoking Load. The only other thing you (or more correctly, a Directory) can
+// do here is save any changes that have occurred since calling Load, which can
+// be done periodically as often as desired.
+//
+// The DirectoryBackingStore will own an sqlite lock on its database for most of
+// its lifetime. You must not have two DirectoryBackingStore objects accessing
+// the database simultaneously. Because the lock exists at the database level,
+// not even two separate browser instances would be able to acquire it
+// simultaneously.
+//
+// This class is abstract so that we can extend it in interesting ways for use
+// in tests. The concrete class used in non-test scenarios is
+// OnDiskDirectoryBackingStore.
+class DirectoryBackingStore : public base::NonThreadSafe {
+ public:
+ explicit DirectoryBackingStore(const std::string& dir_name);
+ virtual ~DirectoryBackingStore();
+
+ // Loads and drops all currently persisted meta entries into |entry_bucket|
+ // and loads appropriate persisted kernel info into |info_bucket|.
+ //
+ // This function can perform some cleanup tasks behind the scenes. It will
+ // clean up unused entries from the database and migrate to the latest
+ // database version. The caller can safely ignore these details.
+ //
+ // NOTE: On success (return value of OPENED), the buckets are populated with
+ // newly allocated items, meaning ownership is bestowed upon the caller.
+ virtual DirOpenResult Load(MetahandlesIndex* entry_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) = 0;
+
+ // Updates the on-disk store with the input |snapshot| as a database
+ // transaction. Does NOT open any syncable transactions as this would cause
+ // opening transactions elsewhere to block on synchronous I/O.
+ // DO NOT CALL THIS FROM MORE THAN ONE THREAD EVER. Also, whichever thread
+ // calls SaveChanges *must* be the thread that owns/destroys |this|.
+ virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot);
+
+ protected:
+ // For test classes.
+ DirectoryBackingStore(const std::string& dir_name,
+ sql::Connection* connection);
+
+ // General Directory initialization and load helpers.
+ bool InitializeTables();
+ bool CreateTables();
+
+ // Create 'share_info' or 'temp_share_info' depending on value of
+ // is_temporary. Returns an sqlite
+ bool CreateShareInfoTable(bool is_temporary);
+
+ bool CreateShareInfoTableVersion71(bool is_temporary);
+ // Create 'metas' or 'temp_metas' depending on value of is_temporary.
+ bool CreateMetasTable(bool is_temporary);
+ bool CreateModelsTable();
+ bool CreateV71ModelsTable();
+
+ // We don't need to load any synced and applied deleted entries, we can
+ // in fact just purge them forever on startup.
+ bool DropDeletedEntries();
+ // Drops a table if it exists, harmless if the table did not already exist.
+ bool SafeDropTable(const char* table_name);
+
+ // Load helpers for entries and attributes.
+ bool LoadEntries(MetahandlesIndex* entry_bucket);
+ bool LoadInfo(Directory::KernelLoadInfo* info);
+
+ // Save/update helpers for entries. Return false if sqlite commit fails.
+ bool SaveEntryToDB(const EntryKernel& entry);
+ bool SaveNewEntryToDB(const EntryKernel& entry);
+ bool UpdateEntryToDB(const EntryKernel& entry);
+
+ DirOpenResult DoLoad(MetahandlesIndex* entry_bucket,
+ Directory::KernelLoadInfo* kernel_load_info);
+
+ // Close save_dbhandle_. Broken out for testing.
+ void EndSave();
+
+ // Removes each entry whose metahandle is in |handles| from the database.
+ // Does synchronous I/O. Returns false on error.
+ bool DeleteEntries(const MetahandleSet& handles);
+
+ // Drop all tables in preparation for reinitialization.
+ void DropAllTables();
+
+ // Serialization helpers for syncable::ModelType. These convert between
+ // the ModelType enum and the values we persist in the database to identify
+ // a model. We persist a default instance of the specifics protobuf as the
+ // ID, rather than the enum value.
+ static ModelType ModelIdToModelTypeEnum(const void* data, int length);
+ static std::string ModelTypeEnumToModelId(ModelType model_type);
+
+ static std::string GenerateCacheGUID();
+
+ // Runs an integrity check on the current database. If the
+ // integrity check fails, false is returned and error is populated
+ // with an error message.
+ bool CheckIntegrity(sqlite3* handle, std::string* error) const;
+
+ // Migration utilities.
+ bool RefreshColumns();
+ bool SetVersion(int version);
+ int GetVersion();
+
+ bool MigrateToSpecifics(const char* old_columns,
+ const char* specifics_column,
+ void(*handler_function) (
+ sql::Statement* old_value_query,
+ int old_value_column,
+ sync_pb::EntitySpecifics* mutable_new_value));
+
+ // Individual version migrations.
+ bool MigrateVersion67To68();
+ bool MigrateVersion68To69();
+ bool MigrateVersion69To70();
+ bool MigrateVersion70To71();
+ bool MigrateVersion71To72();
+ bool MigrateVersion72To73();
+ bool MigrateVersion73To74();
+ bool MigrateVersion74To75();
+ bool MigrateVersion75To76();
+ bool MigrateVersion76To77();
+ bool MigrateVersion77To78();
+
+ scoped_ptr<sql::Connection> db_;
+ sql::Statement save_entry_statement_;
+ std::string dir_name_;
+
+ // Set to true if migration left some old columns around that need to be
+ // discarded.
+ bool needs_column_refresh_;
+
+ DISALLOW_COPY_AND_ASSIGN(DirectoryBackingStore);
+};
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
diff --git a/sync/syncable/directory_backing_store_unittest.cc b/sync/syncable/directory_backing_store_unittest.cc
new file mode 100644
index 0000000..5161817
--- /dev/null
+++ b/sync/syncable/directory_backing_store_unittest.cc
@@ -0,0 +1,2162 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include <string>
+
+#include "base/file_path.h"
+#include "base/file_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/scoped_temp_dir.h"
+#include "base/stl_util.h"
+#include "base/string_number_conversions.h"
+#include "sync/syncable/directory_backing_store.h"
+#include "sync/syncable/on_disk_directory_backing_store.h"
+#include "sync/syncable/syncable-inl.h"
+#include "sync/syncable/syncable.h"
+#include "sync/test/test_directory_backing_store.h"
+#include "sync/util/time.h"
+#include "sql/connection.h"
+#include "sql/statement.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "testing/gtest/include/gtest/gtest-param-test.h"
+
+namespace syncable {
+
+extern const int32 kCurrentDBVersion;
+
+class MigrationTest : public testing::TestWithParam<int> {
+ public:
+ virtual void SetUp() {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ }
+
+ protected:
+ std::string GetUsername() {
+ return "nick@chromium.org";
+ }
+
+ FilePath GetDatabasePath() {
+ return temp_dir_.path().Append(Directory::kSyncDatabaseFilename);
+ }
+
+ static bool LoadAndIgnoreReturnedData(DirectoryBackingStore *dbs) {
+ MetahandlesIndex metas;
+ STLElementDeleter<MetahandlesIndex> index_deleter(&metas);
+ Directory::KernelLoadInfo kernel_load_info;
+ return dbs->Load(&metas, &kernel_load_info) == OPENED;
+ }
+
+ void SetUpVersion67Database(sql::Connection* connection);
+ void SetUpVersion68Database(sql::Connection* connection);
+ void SetUpVersion69Database(sql::Connection* connection);
+ void SetUpVersion70Database(sql::Connection* connection);
+ void SetUpVersion71Database(sql::Connection* connection);
+ void SetUpVersion72Database(sql::Connection* connection);
+ void SetUpVersion73Database(sql::Connection* connection);
+ void SetUpVersion74Database(sql::Connection* connection);
+ void SetUpVersion75Database(sql::Connection* connection);
+ void SetUpVersion76Database(sql::Connection* connection);
+ void SetUpVersion77Database(sql::Connection* connection);
+
+ void SetUpCurrentDatabaseAndCheckVersion(sql::Connection* connection) {
+ SetUpVersion77Database(connection); // Prepopulates data.
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), connection));
+
+ ASSERT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
+ }
+
+ private:
+ ScopedTempDir temp_dir_;
+};
+
+class DirectoryBackingStoreTest : public MigrationTest {};
+
+#if defined(OS_WIN)
+
+// On Windows, we used to store timestamps in FILETIME format.
+#define LEGACY_META_PROTO_TIMES_1 129079956640320000LL
+#define LEGACY_META_PROTO_TIMES_2 128976886618480000LL
+#define LEGACY_META_PROTO_TIMES_4 129002163642690000LL
+#define LEGACY_META_PROTO_TIMES_5 129001555500000000LL
+#define LEGACY_META_PROTO_TIMES_6 129053976170000000LL
+#define LEGACY_META_PROTO_TIMES_7 128976864758480000LL
+#define LEGACY_META_PROTO_TIMES_8 128976864758480000LL
+#define LEGACY_META_PROTO_TIMES_9 128976864758480000LL
+#define LEGACY_META_PROTO_TIMES_10 128976864758480000LL
+#define LEGACY_META_PROTO_TIMES_11 129079956948440000LL
+#define LEGACY_META_PROTO_TIMES_12 129079957513650000LL
+#define LEGACY_META_PROTO_TIMES_13 129079957985300000LL
+#define LEGACY_META_PROTO_TIMES_14 129079958383000000LL
+
+#define LEGACY_META_PROTO_TIMES_STR_1 "129079956640320000"
+#define LEGACY_META_PROTO_TIMES_STR_2 "128976886618480000"
+#define LEGACY_META_PROTO_TIMES_STR_4 "129002163642690000"
+#define LEGACY_META_PROTO_TIMES_STR_5 "129001555500000000"
+#define LEGACY_META_PROTO_TIMES_STR_6 "129053976170000000"
+#define LEGACY_META_PROTO_TIMES_STR_7 "128976864758480000"
+#define LEGACY_META_PROTO_TIMES_STR_8 "128976864758480000"
+#define LEGACY_META_PROTO_TIMES_STR_9 "128976864758480000"
+#define LEGACY_META_PROTO_TIMES_STR_10 "128976864758480000"
+#define LEGACY_META_PROTO_TIMES_STR_11 "129079956948440000"
+#define LEGACY_META_PROTO_TIMES_STR_12 "129079957513650000"
+#define LEGACY_META_PROTO_TIMES_STR_13 "129079957985300000"
+#define LEGACY_META_PROTO_TIMES_STR_14 "129079958383000000"
+
+// Generated via:
+//
+// ruby -ane '$F[1].sub!("LEGACY_", ""); $F[2] = Integer($F[2].sub!("LL", "")) / 10000 - 11644473600000; print "#{$F[0]} #{$F[1]} #{$F[2]}LL\n"'
+//
+// Magic numbers taken from
+// http://stackoverflow.com/questions/5398557/java-library-for-dealing-with-win32-filetime .
+
+// Now we store them in Java format (ms since the Unix epoch).
+#define META_PROTO_TIMES_1 1263522064032LL
+#define META_PROTO_TIMES_2 1253215061848LL
+#define META_PROTO_TIMES_4 1255742764269LL
+#define META_PROTO_TIMES_5 1255681950000LL
+#define META_PROTO_TIMES_6 1260924017000LL
+#define META_PROTO_TIMES_7 1253212875848LL
+#define META_PROTO_TIMES_8 1253212875848LL
+#define META_PROTO_TIMES_9 1253212875848LL
+#define META_PROTO_TIMES_10 1253212875848LL
+#define META_PROTO_TIMES_11 1263522094844LL
+#define META_PROTO_TIMES_12 1263522151365LL
+#define META_PROTO_TIMES_13 1263522198530LL
+#define META_PROTO_TIMES_14 1263522238300LL
+
+#define META_PROTO_TIMES_STR_1 "1263522064032"
+#define META_PROTO_TIMES_STR_2 "1253215061848"
+#define META_PROTO_TIMES_STR_4 "1255742764269"
+#define META_PROTO_TIMES_STR_5 "1255681950000"
+#define META_PROTO_TIMES_STR_6 "1260924017000"
+#define META_PROTO_TIMES_STR_7 "1253212875848"
+#define META_PROTO_TIMES_STR_8 "1253212875848"
+#define META_PROTO_TIMES_STR_9 "1253212875848"
+#define META_PROTO_TIMES_STR_10 "1253212875848"
+#define META_PROTO_TIMES_STR_11 "1263522094844"
+#define META_PROTO_TIMES_STR_12 "1263522151365"
+#define META_PROTO_TIMES_STR_13 "1263522198530"
+#define META_PROTO_TIMES_STR_14 "1263522238300"
+
+#else
+
+// On other platforms, we used to store timestamps in time_t format (s
+// since the Unix epoch).
+#define LEGACY_META_PROTO_TIMES_1 1263522064LL
+#define LEGACY_META_PROTO_TIMES_2 1253215061LL
+#define LEGACY_META_PROTO_TIMES_4 1255742764LL
+#define LEGACY_META_PROTO_TIMES_5 1255681950LL
+#define LEGACY_META_PROTO_TIMES_6 1260924017LL
+#define LEGACY_META_PROTO_TIMES_7 1253212875LL
+#define LEGACY_META_PROTO_TIMES_8 1253212875LL
+#define LEGACY_META_PROTO_TIMES_9 1253212875LL
+#define LEGACY_META_PROTO_TIMES_10 1253212875LL
+#define LEGACY_META_PROTO_TIMES_11 1263522094LL
+#define LEGACY_META_PROTO_TIMES_12 1263522151LL
+#define LEGACY_META_PROTO_TIMES_13 1263522198LL
+#define LEGACY_META_PROTO_TIMES_14 1263522238LL
+
+#define LEGACY_META_PROTO_TIMES_STR_1 "1263522064"
+#define LEGACY_META_PROTO_TIMES_STR_2 "1253215061"
+#define LEGACY_META_PROTO_TIMES_STR_4 "1255742764"
+#define LEGACY_META_PROTO_TIMES_STR_5 "1255681950"
+#define LEGACY_META_PROTO_TIMES_STR_6 "1260924017"
+#define LEGACY_META_PROTO_TIMES_STR_7 "1253212875"
+#define LEGACY_META_PROTO_TIMES_STR_8 "1253212875"
+#define LEGACY_META_PROTO_TIMES_STR_9 "1253212875"
+#define LEGACY_META_PROTO_TIMES_STR_10 "1253212875"
+#define LEGACY_META_PROTO_TIMES_STR_11 "1263522094"
+#define LEGACY_META_PROTO_TIMES_STR_12 "1263522151"
+#define LEGACY_META_PROTO_TIMES_STR_13 "1263522198"
+#define LEGACY_META_PROTO_TIMES_STR_14 "1263522238"
+
+// Now we store them in Java format (ms since the Unix epoch).
+#define META_PROTO_TIMES_1 1263522064000LL
+#define META_PROTO_TIMES_2 1253215061000LL
+#define META_PROTO_TIMES_4 1255742764000LL
+#define META_PROTO_TIMES_5 1255681950000LL
+#define META_PROTO_TIMES_6 1260924017000LL
+#define META_PROTO_TIMES_7 1253212875000LL
+#define META_PROTO_TIMES_8 1253212875000LL
+#define META_PROTO_TIMES_9 1253212875000LL
+#define META_PROTO_TIMES_10 1253212875000LL
+#define META_PROTO_TIMES_11 1263522094000LL
+#define META_PROTO_TIMES_12 1263522151000LL
+#define META_PROTO_TIMES_13 1263522198000LL
+#define META_PROTO_TIMES_14 1263522238000LL
+
+#define META_PROTO_TIMES_STR_1 "1263522064000"
+#define META_PROTO_TIMES_STR_2 "1253215061000"
+#define META_PROTO_TIMES_STR_4 "1255742764000"
+#define META_PROTO_TIMES_STR_5 "1255681950000"
+#define META_PROTO_TIMES_STR_6 "1260924017000"
+#define META_PROTO_TIMES_STR_7 "1253212875000"
+#define META_PROTO_TIMES_STR_8 "1253212875000"
+#define META_PROTO_TIMES_STR_9 "1253212875000"
+#define META_PROTO_TIMES_STR_10 "1253212875000"
+#define META_PROTO_TIMES_STR_11 "1263522094000"
+#define META_PROTO_TIMES_STR_12 "1263522151000"
+#define META_PROTO_TIMES_STR_13 "1263522198000"
+#define META_PROTO_TIMES_STR_14 "1263522238000"
+
+#endif
+
+// Helper macros for the database dumps in the SetUpVersion*Database
+// functions.
+#define LEGACY_META_PROTO_TIMES(x) LEGACY_META_PROTO_TIMES_##x
+#define LEGACY_META_PROTO_TIMES_STR(x) LEGACY_META_PROTO_TIMES_STR_##x
+#define LEGACY_PROTO_TIME_VALS(x) \
+ LEGACY_META_PROTO_TIMES_STR(x) "," \
+ LEGACY_META_PROTO_TIMES_STR(x) "," \
+ LEGACY_META_PROTO_TIMES_STR(x) "," \
+ LEGACY_META_PROTO_TIMES_STR(x)
+#define META_PROTO_TIMES(x) META_PROTO_TIMES_##x
+#define META_PROTO_TIMES_STR(x) META_PROTO_TIMES_STR_##x
+#define META_PROTO_TIMES_VALS(x) \
+ META_PROTO_TIMES_STR(x) "," \
+ META_PROTO_TIMES_STR(x) "," \
+ META_PROTO_TIMES_STR(x) "," \
+ META_PROTO_TIMES_STR(x)
+
+namespace {
+
+// Helper functions for testing.
+
+enum ShouldIncludeDeletedItems {
+ INCLUDE_DELETED_ITEMS,
+ DONT_INCLUDE_DELETED_ITEMS
+};
+
+// Returns a map from metahandle -> expected legacy time (in proto
+// format).
+std::map<int64, int64> GetExpectedLegacyMetaProtoTimes(
+ enum ShouldIncludeDeletedItems include_deleted) {
+ std::map<int64, int64> expected_legacy_meta_proto_times;
+ expected_legacy_meta_proto_times[1] = LEGACY_META_PROTO_TIMES(1);
+ if (include_deleted == INCLUDE_DELETED_ITEMS) {
+ expected_legacy_meta_proto_times[2] = LEGACY_META_PROTO_TIMES(2);
+ expected_legacy_meta_proto_times[4] = LEGACY_META_PROTO_TIMES(4);
+ expected_legacy_meta_proto_times[5] = LEGACY_META_PROTO_TIMES(5);
+ }
+ expected_legacy_meta_proto_times[6] = LEGACY_META_PROTO_TIMES(6);
+ expected_legacy_meta_proto_times[7] = LEGACY_META_PROTO_TIMES(7);
+ expected_legacy_meta_proto_times[8] = LEGACY_META_PROTO_TIMES(8);
+ expected_legacy_meta_proto_times[9] = LEGACY_META_PROTO_TIMES(9);
+ expected_legacy_meta_proto_times[10] = LEGACY_META_PROTO_TIMES(10);
+ expected_legacy_meta_proto_times[11] = LEGACY_META_PROTO_TIMES(11);
+ expected_legacy_meta_proto_times[12] = LEGACY_META_PROTO_TIMES(12);
+ expected_legacy_meta_proto_times[13] = LEGACY_META_PROTO_TIMES(13);
+ expected_legacy_meta_proto_times[14] = LEGACY_META_PROTO_TIMES(14);
+ return expected_legacy_meta_proto_times;
+}
+
+// Returns a map from metahandle -> expected time (in proto format).
+std::map<int64, int64> GetExpectedMetaProtoTimes(
+ enum ShouldIncludeDeletedItems include_deleted) {
+ std::map<int64, int64> expected_meta_proto_times;
+ expected_meta_proto_times[1] = META_PROTO_TIMES(1);
+ if (include_deleted == INCLUDE_DELETED_ITEMS) {
+ expected_meta_proto_times[2] = META_PROTO_TIMES(2);
+ expected_meta_proto_times[4] = META_PROTO_TIMES(4);
+ expected_meta_proto_times[5] = META_PROTO_TIMES(5);
+ }
+ expected_meta_proto_times[6] = META_PROTO_TIMES(6);
+ expected_meta_proto_times[7] = META_PROTO_TIMES(7);
+ expected_meta_proto_times[8] = META_PROTO_TIMES(8);
+ expected_meta_proto_times[9] = META_PROTO_TIMES(9);
+ expected_meta_proto_times[10] = META_PROTO_TIMES(10);
+ expected_meta_proto_times[11] = META_PROTO_TIMES(11);
+ expected_meta_proto_times[12] = META_PROTO_TIMES(12);
+ expected_meta_proto_times[13] = META_PROTO_TIMES(13);
+ expected_meta_proto_times[14] = META_PROTO_TIMES(14);
+ return expected_meta_proto_times;
+}
+
+// Returns a map from metahandle -> expected time (as a Time object).
+std::map<int64, base::Time> GetExpectedMetaTimes() {
+ std::map<int64, base::Time> expected_meta_times;
+ const std::map<int64, int64>& expected_meta_proto_times =
+ GetExpectedMetaProtoTimes(INCLUDE_DELETED_ITEMS);
+ for (std::map<int64, int64>::const_iterator it =
+ expected_meta_proto_times.begin();
+ it != expected_meta_proto_times.end(); ++it) {
+ expected_meta_times[it->first] =
+ browser_sync::ProtoTimeToTime(it->second);
+ }
+ return expected_meta_times;
+}
+
+// Extracts a map from metahandle -> time (in proto format) from the
+// given database.
+std::map<int64, int64> GetMetaProtoTimes(sql::Connection *db) {
+ sql::Statement s(db->GetCachedStatement(
+ SQL_FROM_HERE,
+ "SELECT metahandle, mtime, server_mtime, ctime, server_ctime "
+ "FROM metas"));
+ EXPECT_EQ(5, s.ColumnCount());
+ std::map<int64, int64> meta_times;
+ while (s.Step()) {
+ int64 metahandle = s.ColumnInt64(0);
+ int64 mtime = s.ColumnInt64(1);
+ int64 server_mtime = s.ColumnInt64(2);
+ int64 ctime = s.ColumnInt64(3);
+ int64 server_ctime = s.ColumnInt64(4);
+ EXPECT_EQ(mtime, server_mtime);
+ EXPECT_EQ(mtime, ctime);
+ EXPECT_EQ(mtime, server_ctime);
+ meta_times[metahandle] = mtime;
+ }
+ EXPECT_TRUE(s.Succeeded());
+ return meta_times;
+}
+
+::testing::AssertionResult AssertTimesMatch(const char* t1_expr,
+ const char* t2_expr,
+ const base::Time& t1,
+ const base::Time& t2) {
+ if (t1 == t2)
+ return ::testing::AssertionSuccess();
+
+ return ::testing::AssertionFailure()
+ << t1_expr << " and " << t2_expr
+ << " (internal values: " << t1.ToInternalValue()
+ << " and " << t2.ToInternalValue()
+ << ") (proto time: " << browser_sync::TimeToProtoTime(t1)
+ << " and " << browser_sync::TimeToProtoTime(t2)
+ << ") do not match";
+}
+
+// Expect that all time fields of the given entry kernel will be the
+// given time.
+void ExpectTime(const EntryKernel& entry_kernel,
+ const base::Time& expected_time) {
+ EXPECT_PRED_FORMAT2(AssertTimesMatch,
+ expected_time, entry_kernel.ref(CTIME));
+ EXPECT_PRED_FORMAT2(AssertTimesMatch,
+ expected_time, entry_kernel.ref(SERVER_CTIME));
+ EXPECT_PRED_FORMAT2(AssertTimesMatch,
+ expected_time, entry_kernel.ref(MTIME));
+ EXPECT_PRED_FORMAT2(AssertTimesMatch,
+ expected_time, entry_kernel.ref(SERVER_MTIME));
+}
+
+// Expect that all the entries in |index| have times matching those in
+// the given map (from metahandle to expect time).
+void ExpectTimes(const MetahandlesIndex& index,
+ const std::map<int64, base::Time>& expected_times) {
+ for (MetahandlesIndex::const_iterator it = index.begin();
+ it != index.end(); ++it) {
+ int64 meta_handle = (*it)->ref(META_HANDLE);
+ SCOPED_TRACE(meta_handle);
+ std::map<int64, base::Time>::const_iterator it2 =
+ expected_times.find(meta_handle);
+ if (it2 == expected_times.end()) {
+ ADD_FAILURE() << "Could not find expected time for " << meta_handle;
+ continue;
+ }
+ ExpectTime(**it, it2->second);
+ }
+}
+
+} // namespace
+
+void MigrationTest::SetUpVersion67Database(sql::Connection* connection) {
+ // This is a version 67 database dump whose contents were backformed from
+ // the contents of the version 68 database dump (the v68 migration was
+ // actually written first).
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
+ "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
+ "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
+ "base_version bigint default -1,server_version bigint default 0,"
+ "mtime bigint default 0,server_mtime bigint default 0,"
+ "ctime bigint default 0,server_ctime bigint default 0,"
+ "server_position_in_parent bigint default 0,"
+ "local_external_id bigint default 0,id varchar(255) default 'r',"
+ "parent_id varchar(255) default 'r',"
+ "server_parent_id varchar(255) default 'r',"
+ "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
+ "is_unsynced bit default 0,is_unapplied_update bit default 0,"
+ "is_del bit default 0,is_dir bit default 0,"
+ "is_bookmark_object bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,server_is_bookmark_object bit default 0,"
+ "name varchar(255), " /* COLLATE PATHNAME, */
+ "unsanitized_name varchar(255)," /* COLLATE PATHNAME, */
+ "non_unique_name varchar,"
+ "server_name varchar(255)," /* COLLATE PATHNAME */
+ "server_non_unique_name varchar,"
+ "bookmark_url varchar,server_bookmark_url varchar,"
+ "singleton_tag varchar,bookmark_favicon blob,"
+ "server_bookmark_favicon blob);"
+ "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,"
+ "NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,"
+ "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
+ "'Deleted Item',NULL,'Deleted Item','Deleted Item','Deleted Item',"
+ "'http://www.google.com/','http://www.google.com/2',NULL,'AASGASGA',"
+ "'ASADGADGADG');"
+ "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,"
+ "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
+ "'Welcome to Chromium',NULL,'Welcome to Chromium',"
+ "'Welcome to Chromium','Welcome to Chromium',"
+ "'http://www.google.com/chrome/intl/en/welcome.html',"
+ "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,"
+ "NULL);"
+ "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,"
+ "7,'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
+ "'Google',NULL,'Google','Google','Google','http://www.google.com/',"
+ "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG');"
+ "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,"
+ "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,"
+ "'The Internet',NULL,'The Internet','The Internet',"
+ "'The Internet',NULL,NULL,NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ","
+ "1048576,0,'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,"
+ "'Google Chrome',NULL,'Google Chrome','Google Chrome',"
+ "'Google Chrome',NULL,NULL,'google_chrome',NULL,NULL);"
+ "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,"
+ "0,'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
+ "NULL,'Bookmarks','Bookmarks','Bookmarks',NULL,NULL,"
+ "'google_chrome_bookmarks',NULL,NULL);"
+ "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ","
+ "1048576,1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,"
+ "1,'Bookmark Bar',NULL,'Bookmark Bar','Bookmark Bar','Bookmark Bar',"
+ "NULL,NULL,'bookmark_bar',NULL,NULL);"
+ "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,"
+ "2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
+ "'Other Bookmarks',NULL,'Other Bookmarks','Other Bookmarks',"
+ "'Other Bookmarks',NULL,NULL,'other_bookmarks',"
+ "NULL,NULL);"
+ "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,"
+ "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
+ "'Home (The Chromium Projects)',NULL,'Home (The Chromium Projects)',"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
+ "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
+ "'AGATWA','AFAGVASF');"
+ "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,"
+ "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
+ "'Extra Bookmarks',NULL,'Extra Bookmarks','Extra Bookmarks',"
+ "'Extra Bookmarks',NULL,NULL,NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,"
+ "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
+ "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'http://www.icann.com/','http://www.icann.com/',NULL,"
+ "'PNGAXF0AAFF','DAAFASF');"
+ "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,"
+ "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
+ "'The WebKit Open Source Project',NULL,"
+ "'The WebKit Open Source Project','The WebKit Open Source Project',"
+ "'The WebKit Open Source Project','http://webkit.org/',"
+ "'http://webkit.org/x',NULL,'PNGX','PNG2Y');"
+ "CREATE TABLE share_info (id VARCHAR(128) primary key, "
+ "last_sync_timestamp INT, name VARCHAR(128), "
+ "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
+ "db_create_version VARCHAR(128), db_create_time int, "
+ "next_id bigint default -2, cache_guid VARCHAR(32));"
+ "INSERT INTO share_info VALUES('nick@chromium.org',694,"
+ "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
+ "'Unknown',1263522064,-65542,"
+ "'9010788312004066376x-6609234393368420856x');"
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO share_version VALUES('nick@chromium.org',68);"));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion68Database(sql::Connection* connection) {
+ // This sets up an actual version 68 database dump. The IDs were
+ // canonicalized to be less huge, and the favicons were overwritten
+ // with random junk so that they didn't contain any unprintable
+ // characters. A few server URLs were tweaked so that they'd be
+ // different from the local URLs. Lastly, the custom collation on
+ // the server_non_unique_name column was removed.
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
+ "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
+ "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
+ "base_version bigint default -1,server_version bigint default 0,"
+ "mtime bigint default 0,server_mtime bigint default 0,"
+ "ctime bigint default 0,server_ctime bigint default 0,"
+ "server_position_in_parent bigint default 0,"
+ "local_external_id bigint default 0,id varchar(255) default 'r',"
+ "parent_id varchar(255) default 'r',"
+ "server_parent_id varchar(255) default 'r',"
+ "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
+ "is_unsynced bit default 0,is_unapplied_update bit default 0,"
+ "is_del bit default 0,is_dir bit default 0,"
+ "is_bookmark_object bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,"
+ "server_is_bookmark_object bit default 0,"
+ "non_unique_name varchar,server_non_unique_name varchar(255),"
+ "bookmark_url varchar,server_bookmark_url varchar,"
+ "singleton_tag varchar,bookmark_favicon blob,"
+ "server_bookmark_favicon blob);"
+ "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,"
+ "NULL,NULL,NULL,NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,"
+ "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
+ "'Deleted Item','Deleted Item','http://www.google.com/',"
+ "'http://www.google.com/2',NULL,'AASGASGA','ASADGADGADG');"
+ "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,"
+ "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
+ "'Welcome to Chromium','Welcome to Chromium',"
+ "'http://www.google.com/chrome/intl/en/welcome.html',"
+ "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,"
+ "NULL);"
+ "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,"
+ "7,'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
+ "'Google','Google','http://www.google.com/',"
+ "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG');"
+ "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,"
+ "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,"
+ "'The Internet','The Internet',NULL,NULL,NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ","
+ "1048576,0,'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,"
+ "'Google Chrome','Google Chrome',NULL,NULL,'google_chrome',NULL,"
+ "NULL);"
+ "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,"
+ "0,'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
+ "'Bookmarks',NULL,NULL,'google_chrome_bookmarks',NULL,NULL);"
+ "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ","
+ "1048576,1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,"
+ "1,'Bookmark Bar','Bookmark Bar',NULL,NULL,'bookmark_bar',NULL,"
+ "NULL);"
+ "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,"
+ "2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
+ "'Other Bookmarks','Other Bookmarks',NULL,NULL,'other_bookmarks',"
+ "NULL,NULL);"
+ "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,"
+ "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
+ "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
+ "'AGATWA','AFAGVASF');"
+ "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,"
+ "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,"
+ "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
+ "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'http://www.icann.com/','http://www.icann.com/',NULL,"
+ "'PNGAXF0AAFF','DAAFASF');"
+ "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,"
+ "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
+ "'The WebKit Open Source Project','The WebKit Open Source Project',"
+ "'http://webkit.org/','http://webkit.org/x',NULL,'PNGX','PNG2Y');"
+ "CREATE TABLE share_info (id VARCHAR(128) primary key, "
+ "last_sync_timestamp INT, name VARCHAR(128), "
+ "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
+ "db_create_version VARCHAR(128), db_create_time int, "
+ "next_id bigint default -2, cache_guid VARCHAR(32));"
+ "INSERT INTO share_info VALUES('nick@chromium.org',694,"
+ "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
+ "'Unknown',1263522064,-65542,"
+ "'9010788312004066376x-6609234393368420856x');"
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO share_version VALUES('nick@chromium.org',68);"));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion69Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
+ "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
+ "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
+ "base_version bigint default -1,server_version bigint default 0,"
+ "mtime bigint default 0,server_mtime bigint default 0,"
+ "ctime bigint default 0,server_ctime bigint default 0,"
+ "server_position_in_parent bigint default 0,"
+ "local_external_id bigint default 0,id varchar(255) default 'r',"
+ "parent_id varchar(255) default 'r',"
+ "server_parent_id varchar(255) default 'r',"
+ "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
+ "is_unsynced bit default 0,is_unapplied_update bit default 0,"
+ "is_del bit default 0,is_dir bit default 0,"
+ "is_bookmark_object bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,"
+ "server_is_bookmark_object bit default 0,"
+ "non_unique_name varchar,server_non_unique_name varchar(255),"
+ "bookmark_url varchar,server_bookmark_url varchar,"
+ "singleton_tag varchar,bookmark_favicon blob,"
+ "server_bookmark_favicon blob, specifics blob, "
+ "server_specifics blob);"
+ "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,NULL,NULL,NULL,NULL,"
+ "NULL,NULL,X'',X'');"
+ "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,"
+ "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
+ "'Deleted Item','Deleted Item','http://www.google.com/',"
+ "'http://www.google.com/2',NULL,'AASGASGA','ASADGADGADG',"
+ "X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F120841415"
+ "34741534741',X'C28810260A17687474703A2F2F7777772E676F6F676C652E636F"
+ "6D2F32120B4153414447414447414447');"
+ "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,"
+ "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
+ "'Welcome to Chromium','Welcome to Chromium',"
+ "'http://www.google.com/chrome/intl/en/welcome.html',"
+ "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,NULL,"
+ "X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
+ "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A3168"
+ "7474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F6"
+ "56E2F77656C636F6D652E68746D6C1200');"
+ "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,7,"
+ "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
+ "'Google','Google','http://www.google.com/',"
+ "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG',X'C28810220A166"
+ "87474703A2F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'"
+ "C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464"
+ "447415347');"
+ "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,6"
+ ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,'The Internet',"
+ "'The Internet',NULL,NULL,NULL,NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ",1048576,0,"
+ "'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,'Google Chrome',"
+ "'Google Chrome',NULL,NULL,'google_chrome',NULL,NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,0,"
+ "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
+ "'Bookmarks',NULL,NULL,'google_chrome_bookmarks',NULL,NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ",1048576,1,"
+ "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,1,"
+ "'Bookmark Bar','Bookmark Bar',NULL,NULL,'bookmark_bar',NULL,NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,2,"
+ "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
+ "'Other Bookmarks','Other Bookmarks',NULL,NULL,'other_bookmarks',"
+ "NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,"
+ "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
+ "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
+ "'AGATWA','AFAGVASF',X'C28810220A18687474703A2F2F6465762E6368726F6D6"
+ "9756D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F646576"
+ "2E6368726F6D69756D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,"
+ "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,NULL,NULL,NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,"
+ "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
+ "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'http://www.icann.com/','http://www.icann.com/',NULL,'PNGAXF0AAFF',"
+ "'DAAFASF',X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F1"
+ "20B504E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963"
+ "616E6E2E636F6D2F120744414146415346');"
+ "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,11,"
+ "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
+ "'The WebKit Open Source Project','The WebKit Open Source Project',"
+ "'http://webkit.org/','http://webkit.org/x',NULL,'PNGX','PNG2Y',"
+ "X'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E4758',X'C2"
+ "88101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473259');"
+ "CREATE TABLE share_info (id VARCHAR(128) primary key, "
+ "last_sync_timestamp INT, name VARCHAR(128), "
+ "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
+ "db_create_version VARCHAR(128), db_create_time int, "
+ "next_id bigint default -2, cache_guid VARCHAR(32));"
+ "INSERT INTO share_info VALUES('nick@chromium.org',694,"
+ "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
+ "'Unknown',1263522064,-65542,"
+ "'9010788312004066376x-6609234393368420856x');"
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO share_version VALUES('nick@chromium.org',69);"
+ ));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion70Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
+ "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
+ "CREATE TABLE share_info (id VARCHAR(128) primary key, "
+ "last_sync_timestamp INT, name VARCHAR(128), "
+ "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
+ "db_create_version VARCHAR(128), db_create_time int, "
+ "next_id bigint default -2, cache_guid VARCHAR(32));"
+ "INSERT INTO share_info VALUES('nick@chromium.org',694,"
+ "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
+ "'Unknown',1263522064,-65542,"
+ "'9010788312004066376x-6609234393368420856x');"
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO share_version VALUES('nick@chromium.org',70);"
+ "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
+ "base_version bigint default -1,server_version bigint default 0,"
+ "mtime bigint default 0,server_mtime bigint default 0,"
+ "ctime bigint default 0,server_ctime bigint default 0,"
+ "server_position_in_parent bigint default 0,"
+ "local_external_id bigint default 0,id varchar(255) default 'r',"
+ "parent_id varchar(255) default 'r',"
+ "server_parent_id varchar(255) default 'r',"
+ "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
+ "is_unsynced bit default 0,is_unapplied_update bit default 0,"
+ "is_del bit default 0,is_dir bit default 0,"
+ "server_is_dir bit default 0,server_is_del bit default 0,"
+ "non_unique_name varchar,server_non_unique_name varchar(255),"
+ "unique_server_tag varchar,unique_client_tag varchar,"
+ "specifics blob,server_specifics blob);"
+ "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'');"
+ "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2) ","
+ "-2097152,4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,"
+ "1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A"
+ "2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X'C2881026"
+ "0A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B415341444741"
+ "4447414447');"
+ "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,"
+ "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
+ "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A"
+ "31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E74"
+ "6C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F"
+ "2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F7765"
+ "6C636F6D652E68746D6C1200');"
+ "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,7,"
+ "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
+ "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C6"
+ "52E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F77777"
+ "72E676F6F676C652E636F6D2F12084147464447415347');"
+ "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,"
+ "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
+ "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ",1048576,0,"
+ "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome',"
+ "'Google Chrome','google_chrome',NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,0,"
+ "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
+ "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',"
+ "X'C2881000');"
+ "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ",1048576,"
+ "1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,"
+ "'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881000',"
+ "X'C2881000');"
+ "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ","
+ "2097152,2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
+ "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,"
+ "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
+ "NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F"
+ "72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636872"
+ "6F6D69756D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,"
+ "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
+ "X'C2881000');"
+ "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,"
+ "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F"
+ "120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772E69"
+ "63616E6E2E636F6D2F120744414146415346');"
+ "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,"
+ "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
+ "'The WebKit Open Source Project','The WebKit Open Source Project',"
+ "NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
+ "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F78120550"
+ "4E473259');"
+ ));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion71Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
+ "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',71);"
+ "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
+ "base_version bigint default -1,server_version bigint default 0,"
+ "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
+ "default 0,server_ctime bigint default 0,server_position_in_parent "
+ "bigint default 0,local_external_id bigint default 0,id varchar(255) "
+ "default 'r',parent_id varchar(255) default 'r',server_parent_id "
+ "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
+ "varchar(255) default 'r',is_unsynced bit default 0,"
+ "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
+ "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
+ "non_unique_name varchar,server_non_unique_name varchar(255),"
+ "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
+ "server_specifics blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
+ "NULL,NULL,X'',X'');"
+ "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,4,"
+ "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
+ "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
+ "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
+ "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
+ "47');"
+ "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,3,"
+ "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
+ "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
+ "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
+ "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
+ "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
+ "652E68746D6C1200');"
+ "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,7,"
+ "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
+ "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
+ "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
+ "76F6F676C652E636F6D2F12084147464447415347');"
+ "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,6,"
+ "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
+ "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ",1048576,0,"
+ "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
+ ",'google_chrome',NULL,NULL,NULL);"
+ "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,0,"
+ "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
+ "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ",1048576,1,"
+ "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
+ "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,2,"
+ "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
+ "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,8,"
+ "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
+ "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
+ "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
+ "D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,"
+ "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
+ "X'C2881000');"
+ "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,10,"
+ "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
+ "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
+ "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
+ "E636F6D2F120744414146415346');"
+ "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,11,"
+ "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
+ "'The WebKit Open Source Project','The WebKit Open Source Project',"
+ "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
+ "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
+ "473259');"
+ "CREATE TABLE models (model_id BLOB primary key, "
+ "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
+ "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
+ "next_id INT default -2, cache_guid TEXT);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
+ "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
+ "'9010788312004066376x-6609234393368420856x');"));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion72Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',72);"
+ "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
+ "base_version bigint default -1,server_version bigint default 0,"
+ "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
+ "default 0,server_ctime bigint default 0,server_position_in_parent "
+ "bigint default 0,local_external_id bigint default 0,id varchar(255) "
+ "default 'r',parent_id varchar(255) default 'r',server_parent_id "
+ "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
+ "varchar(255) default 'r',is_unsynced bit default 0,"
+ "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
+ "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
+ "non_unique_name varchar,server_non_unique_name varchar(255),"
+ "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
+ "server_specifics blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
+ "NULL,NULL,X'',X'');"
+ "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,4,"
+ "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
+ "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
+ "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
+ "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
+ "47');"
+ "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,3,"
+ "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
+ "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
+ "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
+ "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
+ "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
+ "652E68746D6C1200');"
+ "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,7,"
+ "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
+ "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
+ "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
+ "76F6F676C652E636F6D2F12084147464447415347');"
+ "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,6,"
+ "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
+ "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ",1048576,0,"
+ "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
+ ",'google_chrome',NULL,NULL,NULL);"
+ "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,0,"
+ "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
+ "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ",1048576,1,"
+ "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
+ "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,2,"
+ "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
+ "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,8,"
+ "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
+ "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
+ "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
+ "D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,"
+ "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
+ "X'C2881000');"
+ "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,10,"
+ "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
+ "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
+ "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
+ "E636F6D2F120744414146415346');"
+ "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,11,"
+ "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
+ "'The WebKit Open Source Project','The WebKit Open Source Project',"
+ "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
+ "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
+ "473259');"
+ "CREATE TABLE models (model_id BLOB primary key, "
+ "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
+ "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
+ "next_id INT default -2, cache_guid TEXT);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
+ "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
+ "'9010788312004066376x-6609234393368420856x');"));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion73Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',73);"
+ "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
+ "base_version bigint default -1,server_version bigint default 0,"
+ "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
+ "default 0,server_ctime bigint default 0,server_position_in_parent "
+ "bigint default 0,local_external_id bigint default 0,id varchar(255) "
+ "default 'r',parent_id varchar(255) default 'r',server_parent_id "
+ "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
+ "varchar(255) default 'r',is_unsynced bit default 0,"
+ "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
+ "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
+ "non_unique_name varchar,server_non_unique_name varchar(255),"
+ "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
+ "server_specifics blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
+ "NULL,NULL,X'',X'');"
+ "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,4,"
+ "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
+ "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
+ "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
+ "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
+ "47');"
+ "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,3,"
+ "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
+ "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
+ "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
+ "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
+ "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
+ "652E68746D6C1200');"
+ "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,7,"
+ "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
+ "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
+ "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
+ "76F6F676C652E636F6D2F12084147464447415347');"
+ "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,6,"
+ "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
+ "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ",1048576,0,"
+ "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
+ ",'google_chrome',NULL,NULL,NULL);"
+ "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,0,"
+ "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
+ "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ",1048576,1,"
+ "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
+ "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,2,"
+ "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
+ "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,8,"
+ "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
+ "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
+ "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
+ "D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,"
+ "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
+ "X'C2881000');"
+ "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,10,"
+ "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
+ "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
+ "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
+ "E636F6D2F120744414146415346');"
+ "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,11,"
+ "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
+ "'The WebKit Open Source Project','The WebKit Open Source Project',"
+ "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
+ "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
+ "473259');"
+ "CREATE TABLE models (model_id BLOB primary key, "
+ "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
+ "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
+ "next_id INT default -2, cache_guid TEXT, "
+ "notification_state BLOB);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
+ "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
+ "'9010788312004066376x-6609234393368420856x',X'C2881000');"));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion74Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',74);"
+ "CREATE TABLE models (model_id BLOB primary key, last_download_timestamp"
+ " INT, initial_sync_ended BOOLEAN default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthd"
+ "ay TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
+ "fault -2, cache_guid TEXT , notification_state BLOB, autofill_migra"
+ "tion_state INT default 0, bookmarks_added_during_autofill_migration"
+ " INT default 0, autofill_migration_time INT default 0, autofill_ent"
+ "ries_added_during_migration INT default 0, autofill_profiles_added_"
+ "during_migration INT default 0);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org'"
+ ",'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542"
+ ",'9010788312004066376x-6609234393368420856x',NULL,0,0,0,0,0);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,bas"
+ "e_version bigint default -1,server_version bigint default 0,mtime b"
+ "igint default 0,server_mtime bigint default 0,ctime bigint default "
+ "0,server_ctime bigint default 0,server_position_in_parent bigint de"
+ "fault 0,local_external_id bigint default 0,id varchar(255) default "
+ "'r',parent_id varchar(255) default 'r',server_parent_id varchar(255"
+ ") default 'r',prev_id varchar(255) default 'r',next_id varchar(255)"
+ " default 'r',is_unsynced bit default 0,is_unapplied_update bit defa"
+ "ult 0,is_del bit default 0,is_dir bit default 0,server_is_dir bit d"
+ "efault 0,server_is_del bit default 0,non_unique_name varchar,server"
+ "_non_unique_name varchar(255),unique_server_tag varchar,unique_clie"
+ "nt_tag varchar,specifics blob,server_specifics blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'"
+ "');"
+ "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,4,'s_ID_2','s_ID"
+ "_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Item','Deleted "
+ "Item',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E6"
+ "36F6D2F12084141534741534741',X'C28810260A17687474703A2F2F7777772E67"
+ "6F6F676C652E636F6D2F32120B4153414447414447414447');"
+ "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,3,'s_ID_4','s_ID"
+ "_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to Chromium','W"
+ "elcome to Chromium',NULL,NULL,X'C28810350A31687474703A2F2F7777772E6"
+ "76F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E"
+ "68746D6C1200',X'C28810350A31687474703A2F2F7777772E676F6F676C652E636"
+ "F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E68746D6C1200');"
+ "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,7,'s_ID_5','s_ID_"
+ "9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Google',NULL,NU"
+ "LL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F120841"
+ "47415347415347',X'C28810220A16687474703A2F2F7777772E676F6F676C652E6"
+ "36F6D2F12084147464447415347');"
+ "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,6,'s_ID_6','s_ID"
+ "_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The Internet',NULL"
+ ",NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ",1048576,0,'s_ID_7','r','r"
+ "','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','google_chrom"
+ "e',NULL,NULL,NULL);"
+ "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,0,'s_ID_8','s_ID_"
+ "7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks','google_chr"
+ "ome_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ",1048576,1,'s_ID_9','s_ID_"
+ "8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','Bookmark Bar'"
+ ",'bookmark_bar',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,2,'s_ID_10','s_I"
+ "D_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks','Other Boo"
+ "kmarks','other_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,8,'s_ID_11','s_"
+ "ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chromium Projec"
+ "ts)','Home (The Chromium Projects)',NULL,NULL,X'C28810220A186874747"
+ "03A2F2F6465762E6368726F6D69756D2E6F72672F1206414741545741',X'C28810"
+ "290A1D687474703A2F2F6465762E6368726F6D69756D2E6F72672F6F74686572120"
+ "84146414756415346');"
+ "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,'s_ID_12','s_ID_6','"
+ "s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bo"
+ "okmarks',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,10,'s_ID_13','s_"
+ "ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN | Internet Co"
+ "rporation for Assigned Names and Numbers','ICANN | Internet Corpora"
+ "tion for Assigned Names and Numbers',NULL,NULL,X'C28810240A15687474"
+ "703A2F2F7777772E6963616E6E2E636F6D2F120B504E474158463041414646',X'C"
+ "28810200A15687474703A2F2F7777772E6963616E6E2E636F6D2F12074441414641"
+ "5346');"
+ "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,11,'s_ID_14','s_"
+ "ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Open Source Pr"
+ "oject','The WebKit Open Source Project',NULL,NULL,X'C288101A0A12687"
+ "474703A2F2F7765626B69742E6F72672F1204504E4758',X'C288101C0A13687474"
+ "703A2F2F7765626B69742E6F72672F781205504E473259');"
+ ));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion75Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',75);"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthd"
+ "ay TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
+ "fault -2, cache_guid TEXT , notification_state BLOB, autofill_migra"
+ "tion_state INT default 0,bookmarks_added_during_autofill_migration "
+ "INT default 0, autofill_migration_time INT default 0, autofill_entr"
+ "ies_added_during_migration INT default 0, autofill_profiles_added_d"
+ "uring_migration INT default 0);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org"
+ "','c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-655"
+ "42,'9010788312004066376x-6609234393368420856x',NULL,0,0,0,0,0);"
+ "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, "
+ "initial_sync_ended BOOLEAN default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,ba"
+ "se_version bigint default -1,server_version bigint default 0,mtime"
+ " bigint default 0,server_mtime bigint default 0,ctime bigint defau"
+ "lt 0,server_ctime bigint default 0,server_position_in_parent bigin"
+ "t default 0,local_external_id bigint default 0,id varchar(255) def"
+ "ault 'r',parent_id varchar(255) default 'r',server_parent_id varch"
+ "ar(255) default 'r',prev_id varchar(255) default 'r',next_id varch"
+ "ar(255) default 'r',is_unsynced bit default 0,is_unapplied_update "
+ "bit default 0,is_del bit default 0,is_dir bit default 0,server_is_"
+ "dir bit default 0,server_is_del bit default 0,non_unique_name varc"
+ "har,server_non_unique_name varchar(255),unique_server_tag varchar,"
+ "unique_client_tag varchar,specifics blob,server_specifics blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NUL"
+ "L,X'',X'');"
+ "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,4,'s_ID_"
+ "2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Ite"
+ "m','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F7777772"
+ "E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A176874"
+ "74703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474"
+ "14447');"
+ "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,3,'s_ID_"
+ "4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to "
+ "Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A316874747"
+ "03A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F65"
+ "6E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7"
+ "777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F7765"
+ "6C636F6D652E68746D6C1200');"
+ "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,7,'s_ID_5"
+ "','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Goo"
+ "gle',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C65"
+ "2E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F777"
+ "7772E676F6F676C652E636F6D2F12084147464447415347');"
+ "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,6,'s_ID_"
+ "6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The In"
+ "ternet',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ",1048576,0,'s_ID_7"
+ "','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','"
+ "google_chrome',NULL,NULL,NULL);"
+ "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,0,'s_ID_8"
+ "','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks'"
+ ",'google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ",1048576,1,'s_ID_9"
+ "','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','B"
+ "ookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,2,'s_ID_"
+ "10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks"
+ "','Other Bookmarks','other_bookmarks',NULL,X'C2881000',X'C28810"
+ "00');"
+ "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,8,'s_ID"
+ "_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chr"
+ "omium Projects)','Home (The Chromium Projects)',NULL,NULL,X'C28"
+ "810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F120641"
+ "4741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
+ "D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,'s_ID_12','s"
+ "_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmark"
+ "s','Extra Bookmarks',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,10,'s_ID"
+ "_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN |"
+ " Internet Corporation for Assigned Names and Numbers','ICANN | "
+ "Internet Corporation for Assigned Names and Numbers',NULL,NULL,"
+ "X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
+ "E474158463041414646',X'C28810200A15687474703A2F2F7777772E696361"
+ "6E6E2E636F6D2F120744414146415346');"
+ "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,11,'s_ID"
+ "_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Op"
+ "en Source Project','The WebKit Open Source Project',NULL,NULL,X"
+ "'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E4758',"
+ "X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473"
+ "259');"
+ ));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion76Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',76);"
+ "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
+ "itial_sync_ended BOOLEAN default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
+ "_version bigint default -1,server_version bigint default 0,mtime big"
+ "int default 0,server_mtime bigint default 0,ctime bigint default 0,s"
+ "erver_ctime bigint default 0,server_position_in_parent bigint defaul"
+ "t 0,local_external_id bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
+ "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
+ "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
+ "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
+ "har,specifics blob,server_specifics blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
+ ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'')"
+ ";"
+ "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
+ ",-2097152,4,'s_ID_2','s_ID_9"
+ "','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Item','Deleted Ite"
+ "m',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6"
+ "D2F12084141534741534741',X'C28810260A17687474703A2F2F7777772E676F6F6"
+ "76C652E636F6D2F32120B4153414447414447414447');"
+ "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
+ ",-3145728,3,'s_ID_4','s_ID_9"
+ "','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to Chromium','Welc"
+ "ome to Chromium',NULL,NULL,X'C28810350A31687474703A2F2F7777772E676F6"
+ "F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E68746"
+ "D6C1200',X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6"
+ "368726F6D652F696E746C2F656E2F77656C636F6D652E68746D6C1200');"
+ "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
+ ",1048576,7,'s_ID_5','s_ID_9'"
+ ",'s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Google',NULL,NULL,"
+ "X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F1208414741"
+ "5347415347',X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D"
+ "2F12084147464447415347');"
+ "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
+ ",-4194304,6,'s_ID_6','s_ID_9"
+ "','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The Internet',NULL,NU"
+ "LL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
+ ",1048576,0,'s_ID_7','r','r',"
+ "'r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','google_chrome',"
+ "NULL,NULL,NULL);"
+ "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
+ ",1048576,0,'s_ID_8','s_ID_7'"
+ ",'s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks','google_chrome"
+ "_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
+ ",1048576,1,'s_ID_9','s_ID_8'"
+ ",'s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','Bookmark Bar','b"
+ "ookmark_bar',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
+ ",2097152,2,'s_ID_10','s_ID_"
+ "8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks','Other Bookma"
+ "rks','other_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
+ ",-1048576,8,'s_ID_11','s_ID"
+ "_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chromium Projects)"
+ "','Home (The Chromium Projects)',NULL,NULL,X'C28810220A18687474703A2"
+ "F2F6465762E6368726F6D69756D2E6F72672F1206414741545741',X'C28810290A1"
+ "D687474703A2F2F6465762E6368726F6D69756D2E6F72672F6F74686572120841464"
+ "14756415346');"
+ "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
+ ",0,9,'s_ID_12','s_ID_6','s_"
+ "ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookm"
+ "arks',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
+ ",-917504,10,'s_ID_13','s_ID"
+ "_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN | Internet Corpo"
+ "ration for Assigned Names and Numbers','ICANN | Internet Corporation"
+ " for Assigned Names and Numbers',NULL,NULL,X'C28810240A15687474703A2"
+ "F2F7777772E6963616E6E2E636F6D2F120B504E474158463041414646',X'C288102"
+ "00A15687474703A2F2F7777772E6963616E6E2E636F6D2F120744414146415346');"
+ "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
+ ",1048576,11,'s_ID_14','s_ID"
+ "_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Open Source Proje"
+ "ct','The WebKit Open Source Project',NULL,NULL,X'C288101A0A126874747"
+ "03A2F2F7765626B69742E6F72672F1204504E4758',X'C288101C0A13687474703A2"
+ "F2F7765626B69742E6F72672F781205504E473259');"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
+ "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
+ "ult -2, cache_guid TEXT , notification_state BLOB);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
+ "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,'"
+ "9010788312004066376x-6609234393368420856x',NULL);"
+ ));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion77Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',77);"
+ "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
+ "itial_sync_ended BOOLEAN default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
+ "_version bigint default -1,server_version bigint default 0,server_po"
+ "sition_in_parent bigint default 0,local_external_id bigint default 0"
+ ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
+ "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
+ "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
+ "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
+ "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
+ "har,specifics blob,server_specifics blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0,0,0," META_PROTO_TIMES_VALS(1)
+ ",'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'');"
+ "INSERT INTO 'metas' VALUES(2,669,669,-2097152,4,"
+ META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
+ "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
+ "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
+ "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
+ "14447414447414447');"
+ "INSERT INTO 'metas' VALUES(4,681,681,-3145728,3,"
+ META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
+ "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
+ ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
+ "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
+ "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
+ "E2F77656C636F6D652E68746D6C1200');"
+ "INSERT INTO 'metas' VALUES(5,677,677,1048576,7," META_PROTO_TIMES_VALS(5)
+ ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','"
+ "Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E"
+ "636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E67"
+ "6F6F676C652E636F6D2F12084147464447415347');"
+ "INSERT INTO 'metas' VALUES(6,694,694,-4194304,6,"
+ META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
+ ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
+ ");"
+ "INSERT INTO 'metas' VALUES(7,663,663,1048576,0," META_PROTO_TIMES_VALS(7)
+ ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Goo"
+ "gle Chrome','google_chrome',NULL,NULL,NULL);"
+ "INSERT INTO 'metas' VALUES(8,664,664,1048576,0," META_PROTO_TIMES_VALS(8)
+ ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmar"
+ "ks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(9,665,665,1048576,1," META_PROTO_TIMES_VALS(9)
+ ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar'"
+ ",'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(10,666,666,2097152,2,"
+ META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
+ "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
+ "LL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(11,683,683,-1048576,8,"
+ META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
+ ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
+ "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
+ "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
+ "8726F6D69756D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO 'metas' VALUES(12,685,685,0,9," META_PROTO_TIMES_VALS(12)
+ ",'s_ID_12','s_ID_6','s_"
+ "ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookm"
+ "arks',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(13,687,687,-917504,10,"
+ META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
+ "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
+ "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
+ "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
+ "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
+ "E6963616E6E2E636F6D2F120744414146415346');"
+ "INSERT INTO 'metas' VALUES(14,692,692,1048576,11,"
+ META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
+ ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
+ "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
+ "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
+ "81205504E473259');"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
+ "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
+ "ult -2, cache_guid TEXT , notification_state BLOB);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
+ "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,'"
+ "9010788312004066376x-6609234393368420856x',NULL);"
+ ));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion67To68) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+
+ SetUpVersion67Database(&connection);
+
+ // Columns existing before version 67.
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "name"));
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "unsanitized_name"));
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "server_name"));
+
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion67To68());
+ ASSERT_EQ(68, dbs->GetVersion());
+ ASSERT_TRUE(dbs->needs_column_refresh_);
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion68To69) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion68Database(&connection);
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion68To69());
+ ASSERT_EQ(69, dbs->GetVersion());
+ ASSERT_TRUE(dbs->needs_column_refresh_);
+ }
+
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "specifics"));
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "server_specifics"));
+ sql::Statement s(connection.GetUniqueStatement("SELECT non_unique_name,"
+ "is_del, is_dir, id, specifics, server_specifics FROM metas "
+ "WHERE metahandle = 2"));
+ ASSERT_TRUE(s.Step());
+ ASSERT_EQ("Deleted Item", s.ColumnString(0));
+ ASSERT_TRUE(s.ColumnBool(1));
+ ASSERT_FALSE(s.ColumnBool(2));
+ ASSERT_EQ("s_ID_2", s.ColumnString(3));
+ sync_pb::EntitySpecifics specifics;
+ specifics.ParseFromArray(s.ColumnBlob(4), s.ColumnByteLength(4));
+ ASSERT_TRUE(specifics.has_bookmark());
+ ASSERT_EQ("http://www.google.com/", specifics.bookmark().url());
+ ASSERT_EQ("AASGASGA", specifics.bookmark().favicon());
+ specifics.ParseFromArray(s.ColumnBlob(5), s.ColumnByteLength(5));
+ ASSERT_TRUE(specifics.has_bookmark());
+ ASSERT_EQ("http://www.google.com/2", specifics.bookmark().url());
+ ASSERT_EQ("ASADGADGADG", specifics.bookmark().favicon());
+ ASSERT_FALSE(s.Step());
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion69To70) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion69Database(&connection);
+
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "singleton_tag"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "unique_server_tag"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "unique_client_tag"));
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion69To70());
+ ASSERT_EQ(70, dbs->GetVersion());
+ ASSERT_TRUE(dbs->needs_column_refresh_);
+ }
+
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_server_tag"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_client_tag"));
+ sql::Statement s(connection.GetUniqueStatement("SELECT id"
+ " FROM metas WHERE unique_server_tag = 'google_chrome'"));
+ ASSERT_TRUE(s.Step());
+ EXPECT_EQ("s_ID_7", s.ColumnString(0));
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion70To71) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion70Database(&connection);
+
+ ASSERT_TRUE(connection.DoesColumnExist("share_info", "last_sync_timestamp"));
+ ASSERT_TRUE(connection.DoesColumnExist("share_info", "initial_sync_ended"));
+ ASSERT_FALSE(connection.DoesTableExist("models"));
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion70To71());
+ ASSERT_EQ(71, dbs->GetVersion());
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ }
+
+ ASSERT_FALSE(connection.DoesColumnExist("share_info", "last_sync_timestamp"));
+ ASSERT_FALSE(connection.DoesColumnExist("share_info", "initial_sync_ended"));
+ ASSERT_TRUE(connection.DoesTableExist("models"));
+ ASSERT_TRUE(connection.DoesColumnExist("models", "initial_sync_ended"));
+ ASSERT_TRUE(connection.DoesColumnExist("models", "last_download_timestamp"));
+ ASSERT_TRUE(connection.DoesColumnExist("models", "model_id"));
+
+ sql::Statement s(connection.GetUniqueStatement("SELECT model_id, "
+ "initial_sync_ended, last_download_timestamp FROM models"));
+ ASSERT_TRUE(s.Step());
+ std::string model_id = s.ColumnString(0);
+ EXPECT_EQ("C2881000", base::HexEncode(model_id.data(), model_id.size()))
+ << "Model ID is expected to be the empty BookmarkSpecifics proto.";
+ EXPECT_TRUE(s.ColumnBool(1));
+ EXPECT_EQ(694, s.ColumnInt64(2));
+ ASSERT_FALSE(s.Step());
+}
+
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion71To72) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion71Database(&connection);
+
+ ASSERT_TRUE(connection.DoesTableExist("extended_attributes"));
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion71To72());
+ ASSERT_EQ(72, dbs->GetVersion());
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ }
+
+ ASSERT_FALSE(connection.DoesTableExist("extended_attributes"));
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion72To73) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion72Database(&connection);
+
+ ASSERT_FALSE(connection.DoesColumnExist("share_info", "notification_state"));
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion72To73());
+ ASSERT_EQ(73, dbs->GetVersion());
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ }
+
+ ASSERT_TRUE(connection.DoesColumnExist("share_info", "notification_state"));
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion73To74) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion73Database(&connection);
+
+ ASSERT_FALSE(
+ connection.DoesColumnExist("share_info", "autofill_migration_state"));
+ ASSERT_FALSE(
+ connection.DoesColumnExist("share_info",
+ "bookmarks_added_during_autofill_migration"));
+ ASSERT_FALSE(
+ connection.DoesColumnExist("share_info", "autofill_migration_time"));
+ ASSERT_FALSE(
+ connection.DoesColumnExist("share_info",
+ "autofill_entries_added_during_migration"));
+
+ ASSERT_FALSE(
+ connection.DoesColumnExist("share_info",
+ "autofill_profiles_added_during_migration"));
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion73To74());
+ ASSERT_EQ(74, dbs->GetVersion());
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ }
+
+ ASSERT_TRUE(
+ connection.DoesColumnExist("share_info", "autofill_migration_state"));
+ ASSERT_TRUE(
+ connection.DoesColumnExist("share_info",
+ "bookmarks_added_during_autofill_migration"));
+ ASSERT_TRUE(
+ connection.DoesColumnExist("share_info", "autofill_migration_time"));
+ ASSERT_TRUE(
+ connection.DoesColumnExist("share_info",
+ "autofill_entries_added_during_migration"));
+
+ ASSERT_TRUE(
+ connection.DoesColumnExist("share_info",
+ "autofill_profiles_added_during_migration"));
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion74To75) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion74Database(&connection);
+
+ ASSERT_FALSE(connection.DoesColumnExist("models", "progress_marker"));
+ ASSERT_TRUE(connection.DoesColumnExist("models", "last_download_timestamp"));
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion74To75());
+ ASSERT_EQ(75, dbs->GetVersion());
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ }
+
+ ASSERT_TRUE(connection.DoesColumnExist("models", "progress_marker"));
+ ASSERT_FALSE(connection.DoesColumnExist("models", "last_download_timestamp"));
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion75To76) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion75Database(&connection);
+
+ ASSERT_TRUE(
+ connection.DoesColumnExist("share_info", "autofill_migration_state"));
+ ASSERT_TRUE(connection.DoesColumnExist("share_info",
+ "bookmarks_added_during_autofill_migration"));
+ ASSERT_TRUE(
+ connection.DoesColumnExist("share_info", "autofill_migration_time"));
+ ASSERT_TRUE(connection.DoesColumnExist("share_info",
+ "autofill_entries_added_during_migration"));
+ ASSERT_TRUE(connection.DoesColumnExist("share_info",
+ "autofill_profiles_added_during_migration"));
+
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion75To76());
+ ASSERT_EQ(76, dbs->GetVersion());
+ ASSERT_TRUE(dbs->needs_column_refresh_);
+ // Cannot actual refresh columns due to version 76 not containing all
+ // necessary columns.
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion76To77) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion76Database(&connection);
+
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+
+ EXPECT_EQ(GetExpectedLegacyMetaProtoTimes(INCLUDE_DELETED_ITEMS),
+ GetMetaProtoTimes(dbs->db_.get()));
+ // Since the proto times are expected to be in a legacy format, they may not
+ // be compatible with ProtoTimeToTime, so we don't call ExpectTimes().
+
+ ASSERT_TRUE(dbs->MigrateVersion76To77());
+ ASSERT_EQ(77, dbs->GetVersion());
+
+ EXPECT_EQ(GetExpectedMetaProtoTimes(INCLUDE_DELETED_ITEMS),
+ GetMetaProtoTimes(dbs->db_.get()));
+ // Cannot actually load entries due to version 77 not having all required
+ // columns.
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+}
+
+TEST_F(DirectoryBackingStoreTest, MigrateVersion77To78) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion77Database(&connection);
+
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "BASE_SERVER_SPECIFICS"));
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion77To78());
+ ASSERT_EQ(78, dbs->GetVersion());
+
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ }
+
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "base_server_specifics"));
+}
+
+TEST_P(MigrationTest, ToCurrentVersion) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ switch (GetParam()) {
+ case 67:
+ SetUpVersion67Database(&connection);
+ break;
+ case 68:
+ SetUpVersion68Database(&connection);
+ break;
+ case 69:
+ SetUpVersion69Database(&connection);
+ break;
+ case 70:
+ SetUpVersion70Database(&connection);
+ break;
+ case 71:
+ SetUpVersion71Database(&connection);
+ break;
+ case 72:
+ SetUpVersion72Database(&connection);
+ break;
+ case 73:
+ SetUpVersion73Database(&connection);
+ break;
+ case 74:
+ SetUpVersion74Database(&connection);
+ break;
+ case 75:
+ SetUpVersion75Database(&connection);
+ break;
+ case 76:
+ SetUpVersion76Database(&connection);
+ break;
+ case 77:
+ SetUpVersion77Database(&connection);
+ break;
+ default:
+ // If you see this error, it may mean that you've increased the
+ // database version number but you haven't finished adding unit tests
+ // for the database migration code. You need to need to supply a
+ // SetUpVersionXXDatabase function with a dump of the test database
+ // at the old schema. Here's one way to do that:
+ // 1. Start on a clean tree (with none of your pending schema changes).
+ // 2. Set a breakpoint in this function and run the unit test.
+ // 3. Allow this test to run to completion (step out of the call),
+ // without allowing ~MigrationTest to execute.
+ // 4. Examine this->temp_dir_ to determine the location of the
+ // test database (it is currently of the version you need).
+ // 5. Dump this using the sqlite3 command line tool:
+ // > .output foo_dump.sql
+ // > .dump
+ // 6. Replace the timestamp columns with META_PROTO_TIMES(x) (or
+ // LEGACY_META_PROTO_TIMES(x) if before Version 77).
+ FAIL() << "Need to supply database dump for version " << GetParam();
+ }
+
+ syncable::Directory::KernelLoadInfo dir_info;
+ MetahandlesIndex index;
+ STLElementDeleter<MetahandlesIndex> index_deleter(&index);
+
+ {
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_EQ(OPENED, dbs->Load(&index, &dir_info));
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
+ }
+
+ // Columns deleted in Version 67.
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "name"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "unsanitized_name"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "server_name"));
+
+ // Columns added in Version 68.
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "specifics"));
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "server_specifics"));
+
+ // Columns deleted in Version 68.
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "is_bookmark_object"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas",
+ "server_is_bookmark_object"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "bookmark_favicon"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "bookmark_url"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "server_bookmark_url"));
+
+ // Renamed a column in Version 70
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "singleton_tag"));
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "unique_server_tag"));
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "unique_client_tag"));
+
+ // Removed extended attributes in Version 72.
+ ASSERT_FALSE(connection.DoesTableExist("extended_attributes"));
+
+ // Columns added in Version 73.
+ ASSERT_TRUE(connection.DoesColumnExist("share_info", "notification_state"));
+
+ // Column replaced in version 75.
+ ASSERT_TRUE(connection.DoesColumnExist("models", "progress_marker"));
+ ASSERT_FALSE(connection.DoesColumnExist("models", "last_download_timestamp"));
+
+ // Columns removed in version 76.
+ ASSERT_FALSE(
+ connection.DoesColumnExist("share_info", "autofill_migration_state"));
+ ASSERT_FALSE(connection.DoesColumnExist("share_info",
+ "bookmarks_added_during_autofill_migration"));
+ ASSERT_FALSE(
+ connection.DoesColumnExist("share_info", "autofill_migration_time"));
+ ASSERT_FALSE(connection.DoesColumnExist("share_info",
+ "autofill_entries_added_during_migration"));
+ ASSERT_FALSE(connection.DoesColumnExist("share_info",
+ "autofill_profiles_added_during_migration"));
+
+ // Column added in version 78.
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "base_server_specifics"));
+
+ // Check download_progress state (v75 migration)
+ ASSERT_EQ(694,
+ dir_info.kernel_info.download_progress[syncable::BOOKMARKS]
+ .timestamp_token_for_migration());
+ ASSERT_FALSE(
+ dir_info.kernel_info.download_progress[syncable::BOOKMARKS]
+ .has_token());
+ ASSERT_EQ(32904,
+ dir_info.kernel_info.download_progress[syncable::BOOKMARKS]
+ .data_type_id());
+ ASSERT_FALSE(
+ dir_info.kernel_info.download_progress[syncable::THEMES]
+ .has_timestamp_token_for_migration());
+ ASSERT_TRUE(
+ dir_info.kernel_info.download_progress[syncable::THEMES]
+ .has_token());
+ ASSERT_TRUE(
+ dir_info.kernel_info.download_progress[syncable::THEMES]
+ .token().empty());
+ ASSERT_EQ(41210,
+ dir_info.kernel_info.download_progress[syncable::THEMES]
+ .data_type_id());
+
+ // Check metas
+ EXPECT_EQ(GetExpectedMetaProtoTimes(DONT_INCLUDE_DELETED_ITEMS),
+ GetMetaProtoTimes(&connection));
+ ExpectTimes(index, GetExpectedMetaTimes());
+
+ MetahandlesIndex::iterator it = index.begin();
+ ASSERT_TRUE(it != index.end());
+ ASSERT_EQ(1, (*it)->ref(META_HANDLE));
+ EXPECT_TRUE((*it)->ref(ID).IsRoot());
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(6, (*it)->ref(META_HANDLE));
+ EXPECT_TRUE((*it)->ref(IS_DIR));
+ EXPECT_TRUE((*it)->ref(SERVER_IS_DIR));
+ EXPECT_FALSE(
+ (*it)->ref(SPECIFICS).bookmark().has_url());
+ EXPECT_FALSE(
+ (*it)->ref(SERVER_SPECIFICS).bookmark().has_url());
+ EXPECT_FALSE(
+ (*it)->ref(SPECIFICS).bookmark().has_favicon());
+ EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).bookmark().has_favicon());
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(7, (*it)->ref(META_HANDLE));
+ EXPECT_EQ("google_chrome", (*it)->ref(UNIQUE_SERVER_TAG));
+ EXPECT_FALSE((*it)->ref(SPECIFICS).has_bookmark());
+ EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(8, (*it)->ref(META_HANDLE));
+ EXPECT_EQ("google_chrome_bookmarks", (*it)->ref(UNIQUE_SERVER_TAG));
+ EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
+ EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(9, (*it)->ref(META_HANDLE));
+ EXPECT_EQ("bookmark_bar", (*it)->ref(UNIQUE_SERVER_TAG));
+ EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
+ EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(10, (*it)->ref(META_HANDLE));
+ EXPECT_FALSE((*it)->ref(IS_DEL));
+ EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
+ EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
+ EXPECT_FALSE((*it)->ref(SPECIFICS).bookmark().has_url());
+ EXPECT_FALSE(
+ (*it)->ref(SPECIFICS).bookmark().has_favicon());
+ EXPECT_FALSE(
+ (*it)->ref(SERVER_SPECIFICS).bookmark().has_url());
+ EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).bookmark().has_favicon());
+ EXPECT_EQ("other_bookmarks", (*it)->ref(UNIQUE_SERVER_TAG));
+ EXPECT_EQ("Other Bookmarks", (*it)->ref(NON_UNIQUE_NAME));
+ EXPECT_EQ("Other Bookmarks", (*it)->ref(SERVER_NON_UNIQUE_NAME));
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(11, (*it)->ref(META_HANDLE));
+ EXPECT_FALSE((*it)->ref(IS_DEL));
+ EXPECT_FALSE((*it)->ref(IS_DIR));
+ EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
+ EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
+ EXPECT_EQ("http://dev.chromium.org/",
+ (*it)->ref(SPECIFICS).bookmark().url());
+ EXPECT_EQ("AGATWA",
+ (*it)->ref(SPECIFICS).bookmark().favicon());
+ EXPECT_EQ("http://dev.chromium.org/other",
+ (*it)->ref(SERVER_SPECIFICS).bookmark().url());
+ EXPECT_EQ("AFAGVASF",
+ (*it)->ref(SERVER_SPECIFICS).bookmark().favicon());
+ EXPECT_EQ("", (*it)->ref(UNIQUE_SERVER_TAG));
+ EXPECT_EQ("Home (The Chromium Projects)", (*it)->ref(NON_UNIQUE_NAME));
+ EXPECT_EQ("Home (The Chromium Projects)", (*it)->ref(SERVER_NON_UNIQUE_NAME));
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(12, (*it)->ref(META_HANDLE));
+ EXPECT_FALSE((*it)->ref(IS_DEL));
+ EXPECT_TRUE((*it)->ref(IS_DIR));
+ EXPECT_EQ("Extra Bookmarks", (*it)->ref(NON_UNIQUE_NAME));
+ EXPECT_EQ("Extra Bookmarks", (*it)->ref(SERVER_NON_UNIQUE_NAME));
+ EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
+ EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
+ EXPECT_FALSE(
+ (*it)->ref(SPECIFICS).bookmark().has_url());
+ EXPECT_FALSE(
+ (*it)->ref(SERVER_SPECIFICS).bookmark().has_url());
+ EXPECT_FALSE(
+ (*it)->ref(SPECIFICS).bookmark().has_favicon());
+ EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).bookmark().has_favicon());
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(13, (*it)->ref(META_HANDLE));
+
+ ASSERT_TRUE(++it != index.end());
+ ASSERT_EQ(14, (*it)->ref(META_HANDLE));
+
+ ASSERT_TRUE(++it == index.end());
+}
+
+INSTANTIATE_TEST_CASE_P(DirectoryBackingStore, MigrationTest,
+ testing::Range(67, kCurrentDBVersion));
+
+TEST_F(DirectoryBackingStoreTest, ModelTypeIds) {
+ for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
+ std::string model_id =
+ TestDirectoryBackingStore::ModelTypeEnumToModelId(ModelTypeFromInt(i));
+ EXPECT_EQ(i,
+ TestDirectoryBackingStore::ModelIdToModelTypeEnum(model_id.data(),
+ model_id.size()));
+ }
+}
+
+// TODO(109668): This had to be disabled because the latest API will
+// intentionally crash when a database is this badly corrupted.
+TEST_F(DirectoryBackingStoreTest, DISABLED_Corruption) {
+ {
+ scoped_ptr<OnDiskDirectoryBackingStore> dbs(
+ new OnDiskDirectoryBackingStore(GetUsername(), GetDatabasePath()));
+ EXPECT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
+ }
+ std::string bad_data("BAD DATA");
+ EXPECT_TRUE(file_util::WriteFile(GetDatabasePath(), bad_data.data(),
+ bad_data.size()));
+ {
+ scoped_ptr<OnDiskDirectoryBackingStore> dbs(
+ new OnDiskDirectoryBackingStore(GetUsername(), GetDatabasePath()));
+
+ EXPECT_FALSE(LoadAndIgnoreReturnedData(dbs.get()));
+ }
+}
+
+TEST_F(DirectoryBackingStoreTest, DeleteEntries) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+
+ SetUpCurrentDatabaseAndCheckVersion(&connection);
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ MetahandlesIndex index;
+ Directory::KernelLoadInfo kernel_load_info;
+ STLElementDeleter<MetahandlesIndex> index_deleter(&index);
+
+ dbs->Load(&index, &kernel_load_info);
+ size_t initial_size = index.size();
+ ASSERT_LT(0U, initial_size) << "Test requires entries to delete.";
+ int64 first_to_die = (*index.begin())->ref(META_HANDLE);
+ MetahandleSet to_delete;
+ to_delete.insert(first_to_die);
+ EXPECT_TRUE(dbs->DeleteEntries(to_delete));
+
+ STLDeleteElements(&index);
+ dbs->LoadEntries(&index);
+
+ EXPECT_EQ(initial_size - 1, index.size());
+ bool delete_failed = false;
+ for (MetahandlesIndex::iterator it = index.begin(); it != index.end();
+ ++it) {
+ if ((*it)->ref(META_HANDLE) == first_to_die) {
+ delete_failed = true;
+ break;
+ }
+ }
+ EXPECT_FALSE(delete_failed);
+
+ to_delete.clear();
+ for (MetahandlesIndex::iterator it = index.begin(); it != index.end();
+ ++it) {
+ to_delete.insert((*it)->ref(META_HANDLE));
+ }
+
+ EXPECT_TRUE(dbs->DeleteEntries(to_delete));
+
+ STLDeleteElements(&index);
+ dbs->LoadEntries(&index);
+ EXPECT_EQ(0U, index.size());
+}
+
+TEST_F(DirectoryBackingStoreTest, GenerateCacheGUID) {
+ const std::string& guid1 = TestDirectoryBackingStore::GenerateCacheGUID();
+ const std::string& guid2 = TestDirectoryBackingStore::GenerateCacheGUID();
+ EXPECT_EQ(24U, guid1.size());
+ EXPECT_EQ(24U, guid2.size());
+ // In theory this test can fail, but it won't before the universe
+ // dies of heat death.
+ EXPECT_NE(guid1, guid2);
+}
+
+} // namespace syncable
diff --git a/sync/syncable/directory_change_delegate.h b/sync/syncable/directory_change_delegate.h
new file mode 100644
index 0000000..e3b0f45
--- /dev/null
+++ b/sync/syncable/directory_change_delegate.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
+#define SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
+#pragma once
+
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+
+namespace syncable {
+
+// This is an interface for listening to directory change events, triggered by
+// the releasing of the syncable transaction. The delegate performs work to
+// 1. Calculate changes, depending on the source of the transaction
+// (HandleCalculateChangesChangeEventFromSyncer/Syncapi).
+// 2. Perform final work while the transaction is held
+// (HandleTransactionEndingChangeEvent).
+// 3. Perform any work that should be done after the transaction is released.
+// (HandleTransactionCompleteChangeEvent).
+//
+// Note that these methods may be called on *any* thread.
+class DirectoryChangeDelegate {
+ public:
+ virtual void HandleCalculateChangesChangeEventFromSyncApi(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) = 0;
+ virtual void HandleCalculateChangesChangeEventFromSyncer(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) = 0;
+ // Must return the set of all ModelTypes that were modified in the
+ // transaction.
+ virtual ModelTypeSet HandleTransactionEndingChangeEvent(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) = 0;
+ virtual void HandleTransactionCompleteChangeEvent(
+ ModelTypeSet models_with_changes) = 0;
+ protected:
+ virtual ~DirectoryChangeDelegate() {}
+};
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
diff --git a/sync/syncable/in_memory_directory_backing_store.cc b/sync/syncable/in_memory_directory_backing_store.cc
new file mode 100644
index 0000000..30d3e19
--- /dev/null
+++ b/sync/syncable/in_memory_directory_backing_store.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/in_memory_directory_backing_store.h"
+
+namespace syncable {
+
+InMemoryDirectoryBackingStore::InMemoryDirectoryBackingStore(
+ const std::string& dir_name) : DirectoryBackingStore(dir_name) {
+}
+
+DirOpenResult InMemoryDirectoryBackingStore::Load(
+ MetahandlesIndex* entry_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) {
+ if (!db_->is_open()) {
+ if (!db_->OpenInMemory())
+ return FAILED_OPEN_DATABASE;
+ }
+
+ if (!InitializeTables())
+ return FAILED_OPEN_DATABASE;
+
+ if (!LoadEntries(entry_bucket))
+ return FAILED_DATABASE_CORRUPT;
+ if (!LoadInfo(kernel_load_info))
+ return FAILED_DATABASE_CORRUPT;
+
+ return OPENED;
+}
+
+} // namespace syncable
diff --git a/sync/syncable/in_memory_directory_backing_store.h b/sync/syncable/in_memory_directory_backing_store.h
new file mode 100644
index 0000000..15f3171
--- /dev/null
+++ b/sync/syncable/in_memory_directory_backing_store.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
+#define SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
+#pragma once
+
+#include "sync/syncable/directory_backing_store.h"
+
+namespace syncable {
+
+// This implementation of DirectoryBackingStore is used in tests that do not
+// require us to write to a file. An in-memory sqlite database is much faster
+// than an on-disk database, so this can result in significant speedups in our
+// unit tests.
+//
+// An InMemoryDirectoryBackingStore cannot load data from existing databases.
+// When an InMemoryDirectoryBackingStore is destroyed, all data stored in this
+// database is lost. If these limitations are a problem for you, consider using
+// TestDirectoryBackingStore.
+class InMemoryDirectoryBackingStore : public DirectoryBackingStore {
+ public:
+ explicit InMemoryDirectoryBackingStore(const std::string& dir_name);
+ virtual DirOpenResult Load(
+ MetahandlesIndex* entry_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
+};
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
diff --git a/sync/syncable/model_type.cc b/sync/syncable/model_type.cc
new file mode 100644
index 0000000..3b53a90
--- /dev/null
+++ b/sync/syncable/model_type.cc
@@ -0,0 +1,542 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/model_type.h"
+
+#include "base/string_split.h"
+#include "base/values.h"
+#include "sync/engine/syncproto.h"
+#include "sync/protocol/app_notification_specifics.pb.h"
+#include "sync/protocol/app_setting_specifics.pb.h"
+#include "sync/protocol/app_specifics.pb.h"
+#include "sync/protocol/autofill_specifics.pb.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/protocol/extension_setting_specifics.pb.h"
+#include "sync/protocol/extension_specifics.pb.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/protocol/password_specifics.pb.h"
+#include "sync/protocol/preference_specifics.pb.h"
+#include "sync/protocol/search_engine_specifics.pb.h"
+#include "sync/protocol/session_specifics.pb.h"
+#include "sync/protocol/sync.pb.h"
+#include "sync/protocol/theme_specifics.pb.h"
+#include "sync/protocol/typed_url_specifics.pb.h"
+
+namespace syncable {
+
+void AddDefaultFieldValue(syncable::ModelType datatype,
+ sync_pb::EntitySpecifics* specifics) {
+ switch (datatype) {
+ case BOOKMARKS:
+ specifics->mutable_bookmark();
+ break;
+ case PASSWORDS:
+ specifics->mutable_password();
+ break;
+ case PREFERENCES:
+ specifics->mutable_preference();
+ break;
+ case AUTOFILL:
+ specifics->mutable_autofill();
+ break;
+ case AUTOFILL_PROFILE:
+ specifics->mutable_autofill_profile();
+ break;
+ case THEMES:
+ specifics->mutable_theme();
+ break;
+ case TYPED_URLS:
+ specifics->mutable_typed_url();
+ break;
+ case EXTENSIONS:
+ specifics->mutable_extension();
+ break;
+ case NIGORI:
+ specifics->mutable_nigori();
+ break;
+ case SEARCH_ENGINES:
+ specifics->mutable_search_engine();
+ break;
+ case SESSIONS:
+ specifics->mutable_session();
+ break;
+ case APPS:
+ specifics->mutable_app();
+ break;
+ case APP_SETTINGS:
+ specifics->mutable_app_setting();
+ break;
+ case EXTENSION_SETTINGS:
+ specifics->mutable_extension_setting();
+ break;
+ case APP_NOTIFICATIONS:
+ specifics->mutable_app_notification();
+ break;
+ default:
+ NOTREACHED() << "No known extension for model type.";
+ }
+}
+
+ModelType GetModelTypeFromSpecificsFieldNumber(int field_number) {
+ for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
+ ModelType model_type = ModelTypeFromInt(i);
+ if (GetSpecificsFieldNumberFromModelType(model_type) == field_number)
+ return model_type;
+ }
+ NOTREACHED();
+ return UNSPECIFIED;
+}
+
+int GetSpecificsFieldNumberFromModelType(ModelType model_type) {
+ switch (model_type) {
+ case BOOKMARKS:
+ return sync_pb::EntitySpecifics::kBookmarkFieldNumber;
+ break;
+ case PASSWORDS:
+ return sync_pb::EntitySpecifics::kPasswordFieldNumber;
+ break;
+ case PREFERENCES:
+ return sync_pb::EntitySpecifics::kPreferenceFieldNumber;
+ break;
+ case AUTOFILL:
+ return sync_pb::EntitySpecifics::kAutofillFieldNumber;
+ break;
+ case AUTOFILL_PROFILE:
+ return sync_pb::EntitySpecifics::kAutofillProfileFieldNumber;
+ break;
+ case THEMES:
+ return sync_pb::EntitySpecifics::kThemeFieldNumber;
+ break;
+ case TYPED_URLS:
+ return sync_pb::EntitySpecifics::kTypedUrlFieldNumber;
+ break;
+ case EXTENSIONS:
+ return sync_pb::EntitySpecifics::kExtensionFieldNumber;
+ break;
+ case NIGORI:
+ return sync_pb::EntitySpecifics::kNigoriFieldNumber;
+ break;
+ case SEARCH_ENGINES:
+ return sync_pb::EntitySpecifics::kSearchEngineFieldNumber;
+ break;
+ case SESSIONS:
+ return sync_pb::EntitySpecifics::kSessionFieldNumber;
+ break;
+ case APPS:
+ return sync_pb::EntitySpecifics::kAppFieldNumber;
+ break;
+ case APP_SETTINGS:
+ return sync_pb::EntitySpecifics::kAppSettingFieldNumber;
+ break;
+ case EXTENSION_SETTINGS:
+ return sync_pb::EntitySpecifics::kExtensionSettingFieldNumber;
+ break;
+ case APP_NOTIFICATIONS:
+ return sync_pb::EntitySpecifics::kAppNotificationFieldNumber;
+ break;
+ default:
+ NOTREACHED() << "No known extension for model type.";
+ return 0;
+ }
+ NOTREACHED() << "Needed for linux_keep_shadow_stacks because of "
+ << "http://gcc.gnu.org/bugzilla/show_bug.cgi?id=20681";
+ return 0;
+}
+
+// Note: keep this consistent with GetModelType in syncable.cc!
+ModelType GetModelType(const sync_pb::SyncEntity& sync_pb_entity) {
+ const browser_sync::SyncEntity& sync_entity =
+ static_cast<const browser_sync::SyncEntity&>(sync_pb_entity);
+ DCHECK(!sync_entity.id().IsRoot()); // Root shouldn't ever go over the wire.
+
+ if (sync_entity.deleted())
+ return UNSPECIFIED;
+
+ // Backwards compatibility with old (pre-specifics) protocol.
+ if (sync_entity.has_bookmarkdata())
+ return BOOKMARKS;
+
+ ModelType specifics_type = GetModelTypeFromSpecifics(sync_entity.specifics());
+ if (specifics_type != UNSPECIFIED)
+ return specifics_type;
+
+ // Loose check for server-created top-level folders that aren't
+ // bound to a particular model type.
+ if (!sync_entity.server_defined_unique_tag().empty() &&
+ sync_entity.IsFolder()) {
+ return TOP_LEVEL_FOLDER;
+ }
+
+ // This is an item of a datatype we can't understand. Maybe it's
+ // from the future? Either we mis-encoded the object, or the
+ // server sent us entries it shouldn't have.
+ NOTREACHED() << "Unknown datatype in sync proto.";
+ return UNSPECIFIED;
+}
+
+ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics) {
+ if (specifics.has_bookmark())
+ return BOOKMARKS;
+
+ if (specifics.has_password())
+ return PASSWORDS;
+
+ if (specifics.has_preference())
+ return PREFERENCES;
+
+ if (specifics.has_autofill())
+ return AUTOFILL;
+
+ if (specifics.has_autofill_profile())
+ return AUTOFILL_PROFILE;
+
+ if (specifics.has_theme())
+ return THEMES;
+
+ if (specifics.has_typed_url())
+ return TYPED_URLS;
+
+ if (specifics.has_extension())
+ return EXTENSIONS;
+
+ if (specifics.has_nigori())
+ return NIGORI;
+
+ if (specifics.has_app())
+ return APPS;
+
+ if (specifics.has_search_engine())
+ return SEARCH_ENGINES;
+
+ if (specifics.has_session())
+ return SESSIONS;
+
+ if (specifics.has_app_setting())
+ return APP_SETTINGS;
+
+ if (specifics.has_extension_setting())
+ return EXTENSION_SETTINGS;
+
+ if (specifics.has_app_notification())
+ return APP_NOTIFICATIONS;
+
+ return UNSPECIFIED;
+}
+
+bool ShouldMaintainPosition(ModelType model_type) {
+ return model_type == BOOKMARKS;
+}
+
+const char* ModelTypeToString(ModelType model_type) {
+ // This is used in serialization routines as well as for displaying debug
+ // information. Do not attempt to change these string values unless you know
+ // what you're doing.
+ switch (model_type) {
+ case TOP_LEVEL_FOLDER:
+ return "Top Level Folder";
+ case UNSPECIFIED:
+ return "Unspecified";
+ case BOOKMARKS:
+ return "Bookmarks";
+ case PREFERENCES:
+ return "Preferences";
+ case PASSWORDS:
+ return "Passwords";
+ case AUTOFILL:
+ return "Autofill";
+ case THEMES:
+ return "Themes";
+ case TYPED_URLS:
+ return "Typed URLs";
+ case EXTENSIONS:
+ return "Extensions";
+ case NIGORI:
+ return "Encryption keys";
+ case SEARCH_ENGINES:
+ return "Search Engines";
+ case SESSIONS:
+ return "Sessions";
+ case APPS:
+ return "Apps";
+ case AUTOFILL_PROFILE:
+ return "Autofill Profiles";
+ case APP_SETTINGS:
+ return "App settings";
+ case EXTENSION_SETTINGS:
+ return "Extension settings";
+ case APP_NOTIFICATIONS:
+ return "App Notifications";
+ default:
+ break;
+ }
+ NOTREACHED() << "No known extension for model type.";
+ return "INVALID";
+}
+
+StringValue* ModelTypeToValue(ModelType model_type) {
+ if (model_type >= syncable::FIRST_REAL_MODEL_TYPE) {
+ return Value::CreateStringValue(ModelTypeToString(model_type));
+ } else if (model_type == syncable::TOP_LEVEL_FOLDER) {
+ return Value::CreateStringValue("Top-level folder");
+ } else if (model_type == syncable::UNSPECIFIED) {
+ return Value::CreateStringValue("Unspecified");
+ }
+ NOTREACHED();
+ return Value::CreateStringValue("");
+}
+
+ModelType ModelTypeFromValue(const Value& value) {
+ if (value.IsType(Value::TYPE_STRING)) {
+ std::string result;
+ CHECK(value.GetAsString(&result));
+ return ModelTypeFromString(result);
+ } else if (value.IsType(Value::TYPE_INTEGER)) {
+ int result;
+ CHECK(value.GetAsInteger(&result));
+ return ModelTypeFromInt(result);
+ } else {
+ NOTREACHED() << "Unsupported value type: " << value.GetType();
+ return UNSPECIFIED;
+ }
+}
+
+ModelType ModelTypeFromString(const std::string& model_type_string) {
+ if (model_type_string == "Bookmarks")
+ return BOOKMARKS;
+ else if (model_type_string == "Preferences")
+ return PREFERENCES;
+ else if (model_type_string == "Passwords")
+ return PASSWORDS;
+ else if (model_type_string == "Autofill")
+ return AUTOFILL;
+ else if (model_type_string == "Autofill Profiles")
+ return AUTOFILL_PROFILE;
+ else if (model_type_string == "Themes")
+ return THEMES;
+ else if (model_type_string == "Typed URLs")
+ return TYPED_URLS;
+ else if (model_type_string == "Extensions")
+ return EXTENSIONS;
+ else if (model_type_string == "Encryption keys")
+ return NIGORI;
+ else if (model_type_string == "Search Engines")
+ return SEARCH_ENGINES;
+ else if (model_type_string == "Sessions")
+ return SESSIONS;
+ else if (model_type_string == "Apps")
+ return APPS;
+ else if (model_type_string == "App settings")
+ return APP_SETTINGS;
+ else if (model_type_string == "Extension settings")
+ return EXTENSION_SETTINGS;
+ else if (model_type_string == "App Notifications")
+ return APP_NOTIFICATIONS;
+ else
+ NOTREACHED() << "No known model type corresponding to "
+ << model_type_string << ".";
+ return UNSPECIFIED;
+}
+
+std::string ModelTypeSetToString(ModelTypeSet model_types) {
+ std::string result;
+ for (ModelTypeSet::Iterator it = model_types.First(); it.Good(); it.Inc()) {
+ if (!result.empty()) {
+ result += ", ";
+ }
+ result += ModelTypeToString(it.Get());
+ }
+ return result;
+}
+
+base::ListValue* ModelTypeSetToValue(ModelTypeSet model_types) {
+ ListValue* value = new ListValue();
+ for (ModelTypeSet::Iterator it = model_types.First(); it.Good(); it.Inc()) {
+ value->Append(
+ Value::CreateStringValue(ModelTypeToString(it.Get())));
+ }
+ return value;
+}
+
+ModelTypeSet ModelTypeSetFromValue(const base::ListValue& value) {
+ ModelTypeSet result;
+ for (ListValue::const_iterator i = value.begin(); i != value.end(); ++i) {
+ result.Put(ModelTypeFromValue(**i));
+ }
+ return result;
+}
+
+// TODO(zea): remove all hardcoded tags in model associators and have them use
+// this instead.
+std::string ModelTypeToRootTag(ModelType type) {
+ switch (type) {
+ case BOOKMARKS:
+ return "google_chrome_bookmarks";
+ case PREFERENCES:
+ return "google_chrome_preferences";
+ case PASSWORDS:
+ return "google_chrome_passwords";
+ case AUTOFILL:
+ return "google_chrome_autofill";
+ case THEMES:
+ return "google_chrome_themes";
+ case TYPED_URLS:
+ return "google_chrome_typed_urls";
+ case EXTENSIONS:
+ return "google_chrome_extensions";
+ case NIGORI:
+ return "google_chrome_nigori";
+ case SEARCH_ENGINES:
+ return "google_chrome_search_engines";
+ case SESSIONS:
+ return "google_chrome_sessions";
+ case APPS:
+ return "google_chrome_apps";
+ case AUTOFILL_PROFILE:
+ return "google_chrome_autofill_profiles";
+ case APP_SETTINGS:
+ return "google_chrome_app_settings";
+ case EXTENSION_SETTINGS:
+ return "google_chrome_extension_settings";
+ case APP_NOTIFICATIONS:
+ return "google_chrome_app_notifications";
+ default:
+ break;
+ }
+ NOTREACHED() << "No known extension for model type.";
+ return "INVALID";
+}
+
+// TODO(akalin): Figure out a better way to do these mappings.
+
+namespace {
+const char kBookmarkNotificationType[] = "BOOKMARK";
+const char kPreferenceNotificationType[] = "PREFERENCE";
+const char kPasswordNotificationType[] = "PASSWORD";
+const char kAutofillNotificationType[] = "AUTOFILL";
+const char kThemeNotificationType[] = "THEME";
+const char kTypedUrlNotificationType[] = "TYPED_URL";
+const char kExtensionNotificationType[] = "EXTENSION";
+const char kExtensionSettingNotificationType[] = "EXTENSION_SETTING";
+const char kNigoriNotificationType[] = "NIGORI";
+const char kAppSettingNotificationType[] = "APP_SETTING";
+const char kAppNotificationType[] = "APP";
+const char kSearchEngineNotificationType[] = "SEARCH_ENGINE";
+const char kSessionNotificationType[] = "SESSION";
+const char kAutofillProfileNotificationType[] = "AUTOFILL_PROFILE";
+const char kAppNotificationNotificationType[] = "APP_NOTIFICATION";
+} // namespace
+
+bool RealModelTypeToNotificationType(ModelType model_type,
+ std::string* notification_type) {
+ switch (model_type) {
+ case BOOKMARKS:
+ *notification_type = kBookmarkNotificationType;
+ return true;
+ case PREFERENCES:
+ *notification_type = kPreferenceNotificationType;
+ return true;
+ case PASSWORDS:
+ *notification_type = kPasswordNotificationType;
+ return true;
+ case AUTOFILL:
+ *notification_type = kAutofillNotificationType;
+ return true;
+ case THEMES:
+ *notification_type = kThemeNotificationType;
+ return true;
+ case TYPED_URLS:
+ *notification_type = kTypedUrlNotificationType;
+ return true;
+ case EXTENSIONS:
+ *notification_type = kExtensionNotificationType;
+ return true;
+ case NIGORI:
+ *notification_type = kNigoriNotificationType;
+ return true;
+ case APP_SETTINGS:
+ *notification_type = kAppNotificationType;
+ return true;
+ case APPS:
+ *notification_type = kAppNotificationType;
+ return true;
+ case SEARCH_ENGINES:
+ *notification_type = kSearchEngineNotificationType;
+ return true;
+ case SESSIONS:
+ *notification_type = kSessionNotificationType;
+ return true;
+ case AUTOFILL_PROFILE:
+ *notification_type = kAutofillProfileNotificationType;
+ return true;
+ case EXTENSION_SETTINGS:
+ *notification_type = kExtensionSettingNotificationType;
+ return true;
+ case APP_NOTIFICATIONS:
+ *notification_type = kAppNotificationNotificationType;
+ return true;
+ default:
+ break;
+ }
+ notification_type->clear();
+ return false;
+}
+
+bool NotificationTypeToRealModelType(const std::string& notification_type,
+ ModelType* model_type) {
+ if (notification_type == kBookmarkNotificationType) {
+ *model_type = BOOKMARKS;
+ return true;
+ } else if (notification_type == kPreferenceNotificationType) {
+ *model_type = PREFERENCES;
+ return true;
+ } else if (notification_type == kPasswordNotificationType) {
+ *model_type = PASSWORDS;
+ return true;
+ } else if (notification_type == kAutofillNotificationType) {
+ *model_type = AUTOFILL;
+ return true;
+ } else if (notification_type == kThemeNotificationType) {
+ *model_type = THEMES;
+ return true;
+ } else if (notification_type == kTypedUrlNotificationType) {
+ *model_type = TYPED_URLS;
+ return true;
+ } else if (notification_type == kExtensionNotificationType) {
+ *model_type = EXTENSIONS;
+ return true;
+ } else if (notification_type == kNigoriNotificationType) {
+ *model_type = NIGORI;
+ return true;
+ } else if (notification_type == kAppNotificationType) {
+ *model_type = APPS;
+ return true;
+ } else if (notification_type == kSearchEngineNotificationType) {
+ *model_type = SEARCH_ENGINES;
+ return true;
+ } else if (notification_type == kSessionNotificationType) {
+ *model_type = SESSIONS;
+ return true;
+ } else if (notification_type == kAutofillProfileNotificationType) {
+ *model_type = AUTOFILL_PROFILE;
+ return true;
+ } else if (notification_type == kAppSettingNotificationType) {
+ *model_type = APP_SETTINGS;
+ return true;
+ } else if (notification_type == kExtensionSettingNotificationType) {
+ *model_type = EXTENSION_SETTINGS;
+ return true;
+ } else if (notification_type == kAppNotificationNotificationType) {
+ *model_type = APP_NOTIFICATIONS;
+ return true;
+ } else {
+ *model_type = UNSPECIFIED;
+ return false;
+ }
+}
+
+bool IsRealDataType(ModelType model_type) {
+ return model_type >= FIRST_REAL_MODEL_TYPE && model_type < MODEL_TYPE_COUNT;
+}
+
+} // namespace syncable
diff --git a/sync/syncable/model_type.h b/sync/syncable/model_type.h
new file mode 100644
index 0000000..133af9c
--- /dev/null
+++ b/sync/syncable/model_type.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Enumerate the various item subtypes that are supported by sync.
+// Each sync object is expected to have an immutable object type.
+// An object's type is inferred from the type of data it holds.
+
+#ifndef SYNC_SYNCABLE_MODEL_TYPE_H_
+#define SYNC_SYNCABLE_MODEL_TYPE_H_
+#pragma once
+
+#include <set>
+#include <string>
+
+#include "base/logging.h"
+#include "base/time.h"
+#include "sync/util/enum_set.h"
+
+namespace base {
+class ListValue;
+class StringValue;
+class Value;
+}
+
+namespace sync_pb {
+class EntitySpecifics;
+class SyncEntity;
+}
+
+namespace syncable {
+
+enum ModelType {
+ // Object type unknown. Objects may transition through
+ // the unknown state during their initial creation, before
+ // their properties are set. After deletion, object types
+ // are generally preserved.
+ UNSPECIFIED,
+ // A permanent folder whose children may be of mixed
+ // datatypes (e.g. the "Google Chrome" folder).
+ TOP_LEVEL_FOLDER,
+
+ // ------------------------------------ Start of "real" model types.
+ // The model types declared before here are somewhat special, as they
+ // they do not correspond to any browser data model. The remaining types
+ // are bona fide model types; all have a related browser data model and
+ // can be represented in the protocol using a specific Message type in the
+ // EntitySpecifics protocol buffer.
+ //
+ // A bookmark folder or a bookmark URL object.
+ BOOKMARKS,
+ FIRST_REAL_MODEL_TYPE = BOOKMARKS, // Declared 2nd, for debugger prettiness.
+
+ // A preference folder or a preference object.
+ PREFERENCES,
+ // A password folder or password object.
+ PASSWORDS,
+ // An AutofillProfile Object
+ AUTOFILL_PROFILE,
+ // An autofill folder or an autofill object.
+ AUTOFILL,
+
+ // A themes folder or a themes object.
+ THEMES,
+ // A typed_url folder or a typed_url object.
+ TYPED_URLS,
+ // An extension folder or an extension object.
+ EXTENSIONS,
+ // An object representing a set of Nigori keys.
+ NIGORI,
+ // An object representing a custom search engine.
+ SEARCH_ENGINES,
+ // An object representing a browser session.
+ SESSIONS,
+ // An app folder or an app object.
+ APPS,
+ // An app setting from the extension settings API.
+ APP_SETTINGS,
+ // An extension setting from the extension settings API.
+ EXTENSION_SETTINGS,
+ // App notifications.
+ APP_NOTIFICATIONS,
+ LAST_REAL_MODEL_TYPE = APP_NOTIFICATIONS,
+
+ // If you are adding a new sync datatype that is exposed to the user via the
+ // sync preferences UI, be sure to update the list in
+ // chrome/browser/sync/user_selectable_sync_type.h so that the UMA histograms
+ // for sync include your new type.
+
+ MODEL_TYPE_COUNT,
+};
+
+typedef browser_sync::EnumSet<
+ ModelType, FIRST_REAL_MODEL_TYPE, LAST_REAL_MODEL_TYPE> ModelTypeSet;
+typedef browser_sync::EnumSet<
+ ModelType, UNSPECIFIED, LAST_REAL_MODEL_TYPE> FullModelTypeSet;
+
+inline ModelType ModelTypeFromInt(int i) {
+ DCHECK_GE(i, 0);
+ DCHECK_LT(i, MODEL_TYPE_COUNT);
+ return static_cast<ModelType>(i);
+}
+
+void AddDefaultFieldValue(syncable::ModelType datatype,
+ sync_pb::EntitySpecifics* specifics);
+
+// Extract the model type of a SyncEntity protocol buffer. ModelType is a
+// local concept: the enum is not in the protocol. The SyncEntity's ModelType
+// is inferred from the presence of particular datatype field in the
+// entity specifics.
+ModelType GetModelType(const sync_pb::SyncEntity& sync_entity);
+
+// Extract the model type from an EntitySpecifics field. Note that there
+// are some ModelTypes (like TOP_LEVEL_FOLDER) that can't be inferred this way;
+// prefer using GetModelType where possible.
+ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics);
+
+// If this returns false, we shouldn't bother maintaining a position
+// value (sibling ordering) for this item.
+bool ShouldMaintainPosition(ModelType model_type);
+
+// Determine a model type from the field number of its associated
+// EntitySpecifics field.
+ModelType GetModelTypeFromSpecificsFieldNumber(int field_number);
+
+// Return the field number of the EntitySpecifics field associated with
+// a model type.
+int GetSpecificsFieldNumberFromModelType(ModelType model_type);
+
+// TODO(sync): The functions below badly need some cleanup.
+
+// Returns a pointer to a string with application lifetime that represents
+// the name of |model_type|.
+const char* ModelTypeToString(ModelType model_type);
+
+// Handles all model types, and not just real ones.
+//
+// Caller takes ownership of returned value.
+base::StringValue* ModelTypeToValue(ModelType model_type);
+
+// Converts a Value into a ModelType - complement to ModelTypeToValue().
+ModelType ModelTypeFromValue(const base::Value& value);
+
+// Returns the ModelType corresponding to the name |model_type_string|.
+ModelType ModelTypeFromString(const std::string& model_type_string);
+
+std::string ModelTypeSetToString(ModelTypeSet model_types);
+
+// Caller takes ownership of returned list.
+base::ListValue* ModelTypeSetToValue(ModelTypeSet model_types);
+
+ModelTypeSet ModelTypeSetFromValue(const base::ListValue& value);
+
+// Returns a string corresponding to the syncable tag for this datatype.
+std::string ModelTypeToRootTag(ModelType type);
+
+// Convert a real model type to a notification type (used for
+// subscribing to server-issued notifications). Returns true iff
+// |model_type| was a real model type and |notification_type| was
+// filled in.
+bool RealModelTypeToNotificationType(ModelType model_type,
+ std::string* notification_type);
+
+// Converts a notification type to a real model type. Returns true
+// iff |notification_type| was the notification type of a real model
+// type and |model_type| was filled in.
+bool NotificationTypeToRealModelType(const std::string& notification_type,
+ ModelType* model_type);
+
+// Returns true if |model_type| is a real datatype
+bool IsRealDataType(ModelType model_type);
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_MODEL_TYPE_H_
diff --git a/sync/syncable/model_type_payload_map.cc b/sync/syncable/model_type_payload_map.cc
new file mode 100644
index 0000000..dde1cd8
--- /dev/null
+++ b/sync/syncable/model_type_payload_map.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/model_type_payload_map.h"
+
+#include <vector>
+
+#include "base/json/json_writer.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/values.h"
+
+using browser_sync::ModelSafeRoutingInfo;
+namespace syncable {
+
+ModelTypePayloadMap ModelTypePayloadMapFromEnumSet(
+ syncable::ModelTypeSet types,
+ const std::string& payload) {
+ ModelTypePayloadMap types_with_payloads;
+ for (syncable::ModelTypeSet::Iterator it = types.First();
+ it.Good(); it.Inc()) {
+ types_with_payloads[it.Get()] = payload;
+ }
+ return types_with_payloads;
+}
+
+ModelTypeSet ModelTypePayloadMapToEnumSet(
+ const ModelTypePayloadMap& payload_map) {
+ ModelTypeSet types;
+ for (ModelTypePayloadMap::const_iterator it = payload_map.begin();
+ it != payload_map.end(); ++it) {
+ types.Put(it->first);
+ }
+ return types;
+}
+
+ModelTypePayloadMap ModelTypePayloadMapFromRoutingInfo(
+ const browser_sync::ModelSafeRoutingInfo& routes,
+ const std::string& payload) {
+ ModelTypePayloadMap types_with_payloads;
+ for (browser_sync::ModelSafeRoutingInfo::const_iterator i = routes.begin();
+ i != routes.end(); ++i) {
+ types_with_payloads[i->first] = payload;
+ }
+ return types_with_payloads;
+}
+
+std::string ModelTypePayloadMapToString(
+ const ModelTypePayloadMap& type_payloads) {
+ scoped_ptr<DictionaryValue> value(
+ ModelTypePayloadMapToValue(type_payloads));
+ std::string json;
+ base::JSONWriter::Write(value.get(), false, &json);
+ return json;
+}
+
+DictionaryValue* ModelTypePayloadMapToValue(
+ const ModelTypePayloadMap& type_payloads) {
+ DictionaryValue* value = new DictionaryValue();
+ for (ModelTypePayloadMap::const_iterator it = type_payloads.begin();
+ it != type_payloads.end(); ++it) {
+ value->SetString(syncable::ModelTypeToString(it->first), it->second);
+ }
+ return value;
+}
+
+void CoalescePayloads(ModelTypePayloadMap* original,
+ const ModelTypePayloadMap& update) {
+ for (ModelTypePayloadMap::const_iterator i = update.begin();
+ i != update.end(); ++i) {
+ if (original->count(i->first) == 0) {
+ // If this datatype isn't already in our map, add it with
+ // whatever payload it has.
+ (*original)[i->first] = i->second;
+ } else if (i->second.length() > 0) {
+ // If this datatype is already in our map, we only overwrite the
+ // payload if the new one is non-empty.
+ (*original)[i->first] = i->second;
+ }
+ }
+}
+
+void PurgeStalePayload(ModelTypePayloadMap* original,
+ const ModelSafeRoutingInfo& routing_info) {
+ std::vector<ModelTypePayloadMap::iterator> iterators_to_delete;
+ for (ModelTypePayloadMap::iterator i = original->begin();
+ i != original->end(); ++i) {
+ if (routing_info.end() == routing_info.find(i->first)) {
+ iterators_to_delete.push_back(i);
+ }
+ }
+
+ for (std::vector<ModelTypePayloadMap::iterator>::iterator
+ it = iterators_to_delete.begin(); it != iterators_to_delete.end();
+ ++it) {
+ original->erase(*it);
+ }
+}
+
+} // namespace syncable
diff --git a/sync/syncable/model_type_payload_map.h b/sync/syncable/model_type_payload_map.h
new file mode 100644
index 0000000..f46f743
--- /dev/null
+++ b/sync/syncable/model_type_payload_map.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Definition of ModelTypePayloadMap and various utility functions.
+
+#ifndef SYNC_SYNCABLE_MODEL_TYPE_PAYLOAD_MAP_H_
+#define SYNC_SYNCABLE_MODEL_TYPE_PAYLOAD_MAP_H_
+#pragma once
+
+#include <map>
+#include <string>
+
+#include "sync/engine/model_safe_worker.h"
+#include "sync/syncable/model_type.h"
+
+namespace base {
+class DictionaryValue;
+}
+
+namespace syncable {
+
+// A container that contains a set of datatypes with possible string
+// payloads.
+typedef std::map<ModelType, std::string> ModelTypePayloadMap;
+
+// Helper functions for building ModelTypePayloadMaps.
+
+// Make a TypePayloadMap from all the types in a ModelTypeSet using a
+// default payload.
+ModelTypePayloadMap ModelTypePayloadMapFromEnumSet(
+ ModelTypeSet model_types, const std::string& payload);
+
+ModelTypeSet ModelTypePayloadMapToEnumSet(
+ const ModelTypePayloadMap& payload_map);
+
+// Make a TypePayloadMap for all the enabled types in a
+// ModelSafeRoutingInfo using a default payload.
+ModelTypePayloadMap ModelTypePayloadMapFromRoutingInfo(
+ const browser_sync::ModelSafeRoutingInfo& routes,
+ const std::string& payload);
+
+std::string ModelTypePayloadMapToString(
+ const ModelTypePayloadMap& model_type_payloads);
+
+// Caller takes ownership of the returned dictionary.
+base::DictionaryValue* ModelTypePayloadMapToValue(
+ const ModelTypePayloadMap& model_type_payloads);
+
+// Coalesce |update| into |original|, overwriting only when |update| has
+// a non-empty payload.
+void CoalescePayloads(ModelTypePayloadMap* original,
+ const ModelTypePayloadMap& update);
+
+void PurgeStalePayload(ModelTypePayloadMap* original,
+ const browser_sync::ModelSafeRoutingInfo& routing_info);
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_MODEL_TYPE_PAYLOAD_MAP_H_
diff --git a/sync/syncable/model_type_payload_map_unittest.cc b/sync/syncable/model_type_payload_map_unittest.cc
new file mode 100644
index 0000000..27e36a1
--- /dev/null
+++ b/sync/syncable/model_type_payload_map_unittest.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/model_type_payload_map.h"
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/test/values_test_util.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncable {
+namespace {
+
+using base::ExpectDictStringValue;
+
+class ModelTypePayloadMapTest : public testing::Test {};
+
+TEST_F(ModelTypePayloadMapTest, TypePayloadMapToSet) {
+ ModelTypePayloadMap payloads;
+ payloads[BOOKMARKS] = "bookmarkpayload";
+ payloads[APPS] = "";
+
+ const ModelTypeSet types(BOOKMARKS, APPS);
+ EXPECT_TRUE(ModelTypePayloadMapToEnumSet(payloads).Equals(types));
+}
+
+TEST_F(ModelTypePayloadMapTest, TypePayloadMapToValue) {
+ ModelTypePayloadMap payloads;
+ payloads[BOOKMARKS] = "bookmarkpayload";
+ payloads[APPS] = "";
+
+ scoped_ptr<DictionaryValue> value(ModelTypePayloadMapToValue(payloads));
+ EXPECT_EQ(2u, value->size());
+ ExpectDictStringValue("bookmarkpayload", *value, "Bookmarks");
+ ExpectDictStringValue("", *value, "Apps");
+ EXPECT_FALSE(value->HasKey("Preferences"));
+}
+
+} // namespace
+} // namespace syncable
diff --git a/sync/syncable/model_type_test_util.cc b/sync/syncable/model_type_test_util.cc
new file mode 100644
index 0000000..a3b6742
--- /dev/null
+++ b/sync/syncable/model_type_test_util.cc
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/model_type_test_util.h"
+
+namespace syncable {
+
+void PrintTo(ModelTypeSet model_types, ::std::ostream* os) {
+ *os << ModelTypeSetToString(model_types);
+}
+
+namespace {
+
+// Matcher implementation for HasModelTypes().
+class HasModelTypesMatcher
+ : public ::testing::MatcherInterface<ModelTypeSet> {
+ public:
+ explicit HasModelTypesMatcher(ModelTypeSet expected_types)
+ : expected_types_(expected_types) {}
+
+ virtual ~HasModelTypesMatcher() {}
+
+ virtual bool MatchAndExplain(
+ ModelTypeSet model_types,
+ ::testing::MatchResultListener* listener) const {
+ // No need to annotate listener since we already define PrintTo().
+ return model_types.Equals(expected_types_);
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "has model types " << ModelTypeSetToString(expected_types_);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't have model types "
+ << ModelTypeSetToString(expected_types_);
+ }
+
+ private:
+ const ModelTypeSet expected_types_;
+
+ DISALLOW_COPY_AND_ASSIGN(HasModelTypesMatcher);
+};
+
+} // namespace
+
+::testing::Matcher<ModelTypeSet> HasModelTypes(ModelTypeSet expected_types) {
+ return ::testing::MakeMatcher(new HasModelTypesMatcher(expected_types));
+}
+
+} // namespace syncable
diff --git a/sync/syncable/model_type_test_util.h b/sync/syncable/model_type_test_util.h
new file mode 100644
index 0000000..a852a07
--- /dev/null
+++ b/sync/syncable/model_type_test_util.h
@@ -0,0 +1,26 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_MODEL_TYPE_TEST_UTIL_H_
+#define SYNC_SYNCABLE_MODEL_TYPE_TEST_UTIL_H_
+#pragma once
+
+#include <ostream>
+
+#include "sync/syncable/model_type.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace syncable {
+
+// Defined for googletest. Forwards to ModelTypeSetToString().
+void PrintTo(ModelTypeSet model_types, ::std::ostream* os);
+
+// A gmock matcher for ModelTypeSet. Use like:
+//
+// EXPECT_CALL(mock, ProcessModelTypes(HasModelTypes(expected_types)));
+::testing::Matcher<ModelTypeSet> HasModelTypes(ModelTypeSet expected_types);
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_MODEL_TYPE_TEST_UTIL_H_
diff --git a/sync/syncable/model_type_unittest.cc b/sync/syncable/model_type_unittest.cc
new file mode 100644
index 0000000..a8925b6
--- /dev/null
+++ b/sync/syncable/model_type_unittest.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/model_type.h"
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/test/values_test_util.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncable {
+namespace {
+
+class ModelTypeTest : public testing::Test {};
+
+TEST_F(ModelTypeTest, ModelTypeToValue) {
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i < syncable::MODEL_TYPE_COUNT; ++i) {
+ ModelType model_type = ModelTypeFromInt(i);
+ base::ExpectStringValue(ModelTypeToString(model_type),
+ ModelTypeToValue(model_type));
+ }
+ base::ExpectStringValue("Top-level folder",
+ ModelTypeToValue(TOP_LEVEL_FOLDER));
+ base::ExpectStringValue("Unspecified",
+ ModelTypeToValue(UNSPECIFIED));
+}
+
+TEST_F(ModelTypeTest, ModelTypeFromValue) {
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i < syncable::MODEL_TYPE_COUNT; ++i) {
+ ModelType model_type = ModelTypeFromInt(i);
+ scoped_ptr<StringValue> value(ModelTypeToValue(model_type));
+ EXPECT_EQ(model_type, ModelTypeFromValue(*value));
+ }
+}
+
+TEST_F(ModelTypeTest, ModelTypeSetToValue) {
+ const ModelTypeSet model_types(syncable::BOOKMARKS, syncable::APPS);
+
+ scoped_ptr<ListValue> value(ModelTypeSetToValue(model_types));
+ EXPECT_EQ(2u, value->GetSize());
+ std::string types[2];
+ EXPECT_TRUE(value->GetString(0, &types[0]));
+ EXPECT_TRUE(value->GetString(1, &types[1]));
+ EXPECT_EQ("Bookmarks", types[0]);
+ EXPECT_EQ("Apps", types[1]);
+}
+
+TEST_F(ModelTypeTest, ModelTypeSetFromValue) {
+ // Try empty set first.
+ ModelTypeSet model_types;
+ scoped_ptr<ListValue> value(ModelTypeSetToValue(model_types));
+ EXPECT_TRUE(model_types.Equals(ModelTypeSetFromValue(*value)));
+
+ // Now try with a few random types.
+ model_types.Put(BOOKMARKS);
+ model_types.Put(APPS);
+ value.reset(ModelTypeSetToValue(model_types));
+ EXPECT_TRUE(model_types.Equals(ModelTypeSetFromValue(*value)));
+}
+
+TEST_F(ModelTypeTest, IsRealDataType) {
+ EXPECT_FALSE(IsRealDataType(UNSPECIFIED));
+ EXPECT_FALSE(IsRealDataType(MODEL_TYPE_COUNT));
+ EXPECT_FALSE(IsRealDataType(TOP_LEVEL_FOLDER));
+ EXPECT_TRUE(IsRealDataType(FIRST_REAL_MODEL_TYPE));
+ EXPECT_TRUE(IsRealDataType(BOOKMARKS));
+ EXPECT_TRUE(IsRealDataType(APPS));
+}
+
+} // namespace
+} // namespace syncable
diff --git a/sync/syncable/on_disk_directory_backing_store.cc b/sync/syncable/on_disk_directory_backing_store.cc
new file mode 100644
index 0000000..22ffb77
--- /dev/null
+++ b/sync/syncable/on_disk_directory_backing_store.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/on_disk_directory_backing_store.h"
+
+#include "base/logging.h"
+
+namespace syncable {
+
+OnDiskDirectoryBackingStore::OnDiskDirectoryBackingStore(
+ const std::string& dir_name, const FilePath& backing_filepath)
+ : DirectoryBackingStore(dir_name),
+ backing_filepath_(backing_filepath) {
+ db_->set_exclusive_locking();
+ db_->set_page_size(4096);
+}
+
+DirOpenResult OnDiskDirectoryBackingStore::Load(
+ MetahandlesIndex* entry_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) {
+ DCHECK(CalledOnValidThread());
+ if (!db_->is_open()) {
+ if (!db_->Open(backing_filepath_))
+ return FAILED_OPEN_DATABASE;
+ }
+
+ if (!InitializeTables())
+ return FAILED_OPEN_DATABASE;
+
+ if (!DropDeletedEntries())
+ return FAILED_DATABASE_CORRUPT;
+ if (!LoadEntries(entry_bucket))
+ return FAILED_DATABASE_CORRUPT;
+ if (!LoadInfo(kernel_load_info))
+ return FAILED_DATABASE_CORRUPT;
+
+ return OPENED;
+}
+
+} // namespace syncable
diff --git a/sync/syncable/on_disk_directory_backing_store.h b/sync/syncable/on_disk_directory_backing_store.h
new file mode 100644
index 0000000..9c114a5
--- /dev/null
+++ b/sync/syncable/on_disk_directory_backing_store.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
+#define SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
+#pragma once
+
+#include "base/file_path.h"
+#include "sync/syncable/directory_backing_store.h"
+
+namespace syncable {
+
+// This is the concrete class that provides a useful implementation of
+// DirectoryBackingStore.
+class OnDiskDirectoryBackingStore : public DirectoryBackingStore {
+ public:
+ OnDiskDirectoryBackingStore(const std::string& dir_name,
+ const FilePath& backing_filepath);
+ virtual DirOpenResult Load(
+ MetahandlesIndex* entry_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
+
+ private:
+ FilePath backing_filepath_;
+};
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
diff --git a/sync/syncable/syncable-inl.h b/sync/syncable/syncable-inl.h
new file mode 100644
index 0000000..4e5119f
--- /dev/null
+++ b/sync/syncable/syncable-inl.h
@@ -0,0 +1,22 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_INL_H_
+#define SYNC_SYNCABLE_SYNCABLE_INL_H_
+#pragma once
+
+namespace syncable {
+
+template <typename FieldType, FieldType field_index>
+class LessField {
+ public:
+ inline bool operator() (const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ return a->ref(field_index) < b->ref(field_index);
+ }
+};
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_SYNCABLE_INL_H_
diff --git a/sync/syncable/syncable.cc b/sync/syncable/syncable.cc
new file mode 100644
index 0000000..e34aa77
--- /dev/null
+++ b/sync/syncable/syncable.cc
@@ -0,0 +1,2405 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable.h"
+
+#include <algorithm>
+#include <cstring>
+#include <functional>
+#include <iomanip>
+#include <iterator>
+#include <limits>
+#include <set>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/compiler_specific.h"
+#include "base/debug/trace_event.h"
+#include "base/file_util.h"
+#include "base/hash_tables.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/perftimer.h"
+#include "base/stl_util.h"
+#include "base/string_number_conversions.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "base/utf_string_conversions.h"
+#include "base/values.h"
+#include "sync/protocol/proto_value_conversions.h"
+#include "sync/protocol/service_constants.h"
+#include "sync/syncable/directory_backing_store.h"
+#include "sync/syncable/directory_change_delegate.h"
+#include "sync/syncable/in_memory_directory_backing_store.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/on_disk_directory_backing_store.h"
+#include "sync/syncable/syncable-inl.h"
+#include "sync/syncable/syncable_changes_version.h"
+#include "sync/syncable/syncable_columns.h"
+#include "sync/syncable/syncable_enum_conversions.h"
+#include "sync/syncable/transaction_observer.h"
+#include "sync/util/logging.h"
+#include "sync/util/cryptographer.h"
+#include "net/base/escape.h"
+
+namespace {
+
+enum InvariantCheckLevel {
+ OFF = 0,
+ VERIFY_IN_MEMORY = 1,
+ FULL_DB_VERIFICATION = 2
+};
+
+const InvariantCheckLevel kInvariantCheckLevel = VERIFY_IN_MEMORY;
+
+// Max number of milliseconds to spend checking syncable entry invariants
+const int kInvariantCheckMaxMs = 50;
+
+// This function checks to see if the given list of Metahandles has any nodes
+// whose PREV_ID, PARENT_ID or NEXT_ID values refer to ID values that do not
+// actually exist. Returns true on success.
+//
+// This function is "Unsafe" because it does not attempt to acquire any locks
+// that may be protecting this list that gets passed in. The caller is
+// responsible for ensuring that no one modifies this list while the function is
+// running.
+bool VerifyReferenceIntegrityUnsafe(const syncable::MetahandlesIndex &index) {
+ TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
+ using namespace syncable;
+ typedef base::hash_set<std::string> IdsSet;
+
+ IdsSet ids_set;
+ bool is_ok = true;
+
+ for (MetahandlesIndex::const_iterator it = index.begin();
+ it != index.end(); ++it) {
+ EntryKernel* entry = *it;
+ bool is_duplicate_id = !(ids_set.insert(entry->ref(ID).value()).second);
+ is_ok = is_ok && !is_duplicate_id;
+ }
+
+ IdsSet::iterator end = ids_set.end();
+ for (MetahandlesIndex::const_iterator it = index.begin();
+ it != index.end(); ++it) {
+ EntryKernel* entry = *it;
+ bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end);
+ bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
+ bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end);
+ is_ok = is_ok && prev_exists && parent_exists && next_exists;
+ }
+ return is_ok;
+}
+
+} // namespace
+
+using std::string;
+using browser_sync::Encryptor;
+using browser_sync::ReportUnrecoverableErrorFunction;
+using browser_sync::UnrecoverableErrorHandler;
+
+namespace syncable {
+
+namespace {
+
+// Function to handle runtime failures on syncable code. Rather than crashing,
+// if the |condition| is false the following will happen:
+// 1. Sets unrecoverable error on transaction.
+// 2. Returns false.
+bool SyncAssert(bool condition,
+ const tracked_objects::Location& location,
+ const char* msg,
+ BaseTransaction* trans) {
+ if (!condition) {
+ trans->OnUnrecoverableError(location, msg);
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+#define ENUM_CASE(x) case x: return #x; break
+
+std::string WriterTagToString(WriterTag writer_tag) {
+ switch (writer_tag) {
+ ENUM_CASE(INVALID);
+ ENUM_CASE(SYNCER);
+ ENUM_CASE(AUTHWATCHER);
+ ENUM_CASE(UNITTEST);
+ ENUM_CASE(VACUUM_AFTER_SAVE);
+ ENUM_CASE(PURGE_ENTRIES);
+ ENUM_CASE(SYNCAPI);
+ };
+ NOTREACHED();
+ return "";
+}
+
+#undef ENUM_CASE
+
+WriteTransactionInfo::WriteTransactionInfo(
+ int64 id,
+ tracked_objects::Location location,
+ WriterTag writer,
+ ImmutableEntryKernelMutationMap mutations)
+ : id(id),
+ location_string(location.ToString()),
+ writer(writer),
+ mutations(mutations) {}
+
+WriteTransactionInfo::WriteTransactionInfo()
+ : id(-1), writer(INVALID) {}
+
+WriteTransactionInfo::~WriteTransactionInfo() {}
+
+base::DictionaryValue* WriteTransactionInfo::ToValue(
+ size_t max_mutations_size) const {
+ DictionaryValue* dict = new DictionaryValue();
+ dict->SetString("id", base::Int64ToString(id));
+ dict->SetString("location", location_string);
+ dict->SetString("writer", WriterTagToString(writer));
+ Value* mutations_value = NULL;
+ const size_t mutations_size = mutations.Get().size();
+ if (mutations_size <= max_mutations_size) {
+ mutations_value = EntryKernelMutationMapToValue(mutations.Get());
+ } else {
+ mutations_value =
+ Value::CreateStringValue(
+ base::Uint64ToString(static_cast<uint64>(mutations_size)) +
+ " mutations");
+ }
+ dict->Set("mutations", mutations_value);
+ return dict;
+}
+
+DictionaryValue* EntryKernelMutationToValue(
+ const EntryKernelMutation& mutation) {
+ DictionaryValue* dict = new DictionaryValue();
+ dict->Set("original", mutation.original.ToValue());
+ dict->Set("mutated", mutation.mutated.ToValue());
+ return dict;
+}
+
+ListValue* EntryKernelMutationMapToValue(
+ const EntryKernelMutationMap& mutations) {
+ ListValue* list = new ListValue();
+ for (EntryKernelMutationMap::const_iterator it = mutations.begin();
+ it != mutations.end(); ++it) {
+ list->Append(EntryKernelMutationToValue(it->second));
+ }
+ return list;
+}
+
+namespace {
+
+// A ScopedIndexUpdater temporarily removes an entry from an index,
+// and restores it to the index when the scope exits. This simplifies
+// the common pattern where items need to be removed from an index
+// before updating the field.
+//
+// This class is parameterized on the Indexer traits type, which
+// must define a Comparator and a static bool ShouldInclude
+// function for testing whether the item ought to be included
+// in the index.
+template<typename Indexer>
+class ScopedIndexUpdater {
+ public:
+ ScopedIndexUpdater(const ScopedKernelLock& proof_of_lock,
+ EntryKernel* entry,
+ typename Index<Indexer>::Set* index)
+ : entry_(entry),
+ index_(index) {
+ // First call to ShouldInclude happens before the field is updated.
+ if (Indexer::ShouldInclude(entry_)) {
+ // TODO(lipalani): Replace this CHECK with |SyncAssert| by refactorting
+ // this class into a function.
+ CHECK(index_->erase(entry_));
+ }
+ }
+
+ ~ScopedIndexUpdater() {
+ // Second call to ShouldInclude happens after the field is updated.
+ if (Indexer::ShouldInclude(entry_)) {
+ // TODO(lipalani): Replace this CHECK with |SyncAssert| by refactorting
+ // this class into a function.
+ CHECK(index_->insert(entry_).second);
+ }
+ }
+ private:
+ // The entry that was temporarily removed from the index.
+ EntryKernel* entry_;
+ // The index which we are updating.
+ typename Index<Indexer>::Set* const index_;
+};
+
+// Helper function to add an item to the index, if it ought to be added.
+template<typename Indexer>
+void InitializeIndexEntry(EntryKernel* entry,
+ typename Index<Indexer>::Set* index) {
+ if (Indexer::ShouldInclude(entry)) {
+ index->insert(entry);
+ }
+}
+
+} // namespace
+
+///////////////////////////////////////////////////////////////////////////
+// Comparator and filter functions for the indices.
+
+// static
+bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) {
+ return !a->ref(UNIQUE_CLIENT_TAG).empty();
+}
+
+bool ParentIdAndHandleIndexer::Comparator::operator() (
+ const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID));
+ if (cmp != 0)
+ return cmp < 0;
+
+ int64 a_position = a->ref(SERVER_POSITION_IN_PARENT);
+ int64 b_position = b->ref(SERVER_POSITION_IN_PARENT);
+ if (a_position != b_position)
+ return a_position < b_position;
+
+ cmp = a->ref(ID).compare(b->ref(ID));
+ return cmp < 0;
+}
+
+// static
+bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) {
+ // This index excludes deleted items and the root item. The root
+ // item is excluded so that it doesn't show up as a child of itself.
+ return !a->ref(IS_DEL) && !a->ref(ID).IsRoot();
+}
+
+///////////////////////////////////////////////////////////////////////////
+// EntryKernel
+
+EntryKernel::EntryKernel() : dirty_(false) {
+ // Everything else should already be default-initialized.
+ for (int i = INT64_FIELDS_BEGIN; i < INT64_FIELDS_END; ++i) {
+ int64_fields[i] = 0;
+ }
+}
+
+EntryKernel::~EntryKernel() {}
+
+syncable::ModelType EntryKernel::GetServerModelType() const {
+ ModelType specifics_type = GetModelTypeFromSpecifics(ref(SERVER_SPECIFICS));
+ if (specifics_type != UNSPECIFIED)
+ return specifics_type;
+ if (ref(ID).IsRoot())
+ return TOP_LEVEL_FOLDER;
+ // Loose check for server-created top-level folders that aren't
+ // bound to a particular model type.
+ if (!ref(UNIQUE_SERVER_TAG).empty() && ref(SERVER_IS_DIR))
+ return TOP_LEVEL_FOLDER;
+
+ return UNSPECIFIED;
+}
+
+bool EntryKernel::ContainsString(const std::string& lowercase_query) const {
+ // TODO(lipalani) - figure out what to do if the node is encrypted.
+ const sync_pb::EntitySpecifics& specifics = ref(SPECIFICS);
+ std::string temp;
+ // The protobuf serialized string contains the original strings. So
+ // we will just serialize it and search it.
+ specifics.SerializeToString(&temp);
+
+ // Now convert to lower case.
+ StringToLowerASCII(&temp);
+
+ if (temp.find(lowercase_query) != std::string::npos)
+ return true;
+
+ // Now go through all the string fields to see if the value is there.
+ for (int i = STRING_FIELDS_BEGIN; i < STRING_FIELDS_END; ++i) {
+ if (StringToLowerASCII(ref(static_cast<StringField>(i))).find(
+ lowercase_query) != std::string::npos)
+ return true;
+ }
+
+ for (int i = ID_FIELDS_BEGIN; i < ID_FIELDS_END; ++i) {
+ const Id& id = ref(static_cast<IdField>(i));
+ if (id.ContainsStringCaseInsensitive(lowercase_query)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+namespace {
+
+// Utility function to loop through a set of enum values and add the
+// field keys/values in the kernel to the given dictionary.
+//
+// V should be convertible to Value.
+template <class T, class U, class V>
+void SetFieldValues(const EntryKernel& kernel,
+ DictionaryValue* dictionary_value,
+ const char* (*enum_key_fn)(T),
+ V* (*enum_value_fn)(U),
+ int field_key_min, int field_key_max) {
+ DCHECK_LE(field_key_min, field_key_max);
+ for (int i = field_key_min; i <= field_key_max; ++i) {
+ T field = static_cast<T>(i);
+ const std::string& key = enum_key_fn(field);
+ V* value = enum_value_fn(kernel.ref(field));
+ dictionary_value->Set(key, value);
+ }
+}
+
+// Helper functions for SetFieldValues().
+
+StringValue* Int64ToValue(int64 i) {
+ return Value::CreateStringValue(base::Int64ToString(i));
+}
+
+StringValue* TimeToValue(const base::Time& t) {
+ return Value::CreateStringValue(browser_sync::GetTimeDebugString(t));
+}
+
+StringValue* IdToValue(const Id& id) {
+ return id.ToValue();
+}
+
+} // namespace
+
+DictionaryValue* EntryKernel::ToValue() const {
+ DictionaryValue* kernel_info = new DictionaryValue();
+ kernel_info->SetBoolean("isDirty", is_dirty());
+ kernel_info->Set("serverModelType", ModelTypeToValue(GetServerModelType()));
+
+ // Int64 fields.
+ SetFieldValues(*this, kernel_info,
+ &GetMetahandleFieldString, &Int64ToValue,
+ INT64_FIELDS_BEGIN, META_HANDLE);
+ SetFieldValues(*this, kernel_info,
+ &GetBaseVersionString, &Int64ToValue,
+ META_HANDLE + 1, BASE_VERSION);
+ SetFieldValues(*this, kernel_info,
+ &GetInt64FieldString, &Int64ToValue,
+ BASE_VERSION + 1, INT64_FIELDS_END - 1);
+
+ // Time fields.
+ SetFieldValues(*this, kernel_info,
+ &GetTimeFieldString, &TimeToValue,
+ TIME_FIELDS_BEGIN, TIME_FIELDS_END - 1);
+
+ // ID fields.
+ SetFieldValues(*this, kernel_info,
+ &GetIdFieldString, &IdToValue,
+ ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
+
+ // Bit fields.
+ SetFieldValues(*this, kernel_info,
+ &GetIndexedBitFieldString, &Value::CreateBooleanValue,
+ BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
+ SetFieldValues(*this, kernel_info,
+ &GetIsDelFieldString, &Value::CreateBooleanValue,
+ INDEXED_BIT_FIELDS_END, IS_DEL);
+ SetFieldValues(*this, kernel_info,
+ &GetBitFieldString, &Value::CreateBooleanValue,
+ IS_DEL + 1, BIT_FIELDS_END - 1);
+
+ // String fields.
+ {
+ // Pick out the function overload we want.
+ StringValue* (*string_to_value)(const std::string&) =
+ &Value::CreateStringValue;
+ SetFieldValues(*this, kernel_info,
+ &GetStringFieldString, string_to_value,
+ STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
+ }
+
+ // Proto fields.
+ SetFieldValues(*this, kernel_info,
+ &GetProtoFieldString, &browser_sync::EntitySpecificsToValue,
+ PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
+
+ // Bit temps.
+ SetFieldValues(*this, kernel_info,
+ &GetBitTempString, &Value::CreateBooleanValue,
+ BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
+
+ return kernel_info;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Directory
+
+// static
+const FilePath::CharType Directory::kSyncDatabaseFilename[] =
+ FILE_PATH_LITERAL("SyncData.sqlite3");
+
+void Directory::InitKernelForTest(
+ const std::string& name,
+ DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer) {
+ DCHECK(!kernel_);
+ kernel_ = new Kernel(name, KernelLoadInfo(), delegate, transaction_observer);
+}
+
+Directory::PersistedKernelInfo::PersistedKernelInfo()
+ : next_id(0) {
+ for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
+ reset_download_progress(ModelTypeFromInt(i));
+ }
+}
+
+Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
+
+void Directory::PersistedKernelInfo::reset_download_progress(
+ ModelType model_type) {
+ download_progress[model_type].set_data_type_id(
+ GetSpecificsFieldNumberFromModelType(model_type));
+ // An empty-string token indicates no prior knowledge.
+ download_progress[model_type].set_token(std::string());
+}
+
+Directory::SaveChangesSnapshot::SaveChangesSnapshot()
+ : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
+}
+
+Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {}
+
+Directory::Kernel::Kernel(
+ const std::string& name,
+ const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer)
+ : refcount(1),
+ next_write_transaction_id(0),
+ name(name),
+ metahandles_index(new Directory::MetahandlesIndex),
+ ids_index(new Directory::IdsIndex),
+ parent_id_child_index(new Directory::ParentIdChildIndex),
+ client_tag_index(new Directory::ClientTagIndex),
+ unsynced_metahandles(new MetahandleSet),
+ dirty_metahandles(new MetahandleSet),
+ metahandles_to_purge(new MetahandleSet),
+ info_status(Directory::KERNEL_SHARE_INFO_VALID),
+ persisted_info(info.kernel_info),
+ cache_guid(info.cache_guid),
+ next_metahandle(info.max_metahandle + 1),
+ delegate(delegate),
+ transaction_observer(transaction_observer) {
+ DCHECK(delegate);
+ DCHECK(transaction_observer.IsInitialized());
+}
+
+void Directory::Kernel::AddRef() {
+ base::subtle::NoBarrier_AtomicIncrement(&refcount, 1);
+}
+
+void Directory::Kernel::Release() {
+ if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1))
+ delete this;
+}
+
+Directory::Kernel::~Kernel() {
+ CHECK_EQ(0, refcount);
+ delete unsynced_metahandles;
+ delete dirty_metahandles;
+ delete metahandles_to_purge;
+ delete parent_id_child_index;
+ delete client_tag_index;
+ delete ids_index;
+ STLDeleteElements(metahandles_index);
+ delete metahandles_index;
+}
+
+Directory::Directory(
+ Encryptor* encryptor,
+ UnrecoverableErrorHandler* unrecoverable_error_handler,
+ ReportUnrecoverableErrorFunction report_unrecoverable_error_function)
+ : cryptographer_(encryptor),
+ kernel_(NULL),
+ store_(NULL),
+ unrecoverable_error_handler_(unrecoverable_error_handler),
+ report_unrecoverable_error_function_(
+ report_unrecoverable_error_function),
+ unrecoverable_error_set_(false) {
+}
+
+Directory::~Directory() {
+ Close();
+}
+
+DirOpenResult Directory::Open(
+ const FilePath& file_path, const string& name,
+ DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer) {
+ TRACE_EVENT0("sync", "SyncDatabaseOpen");
+
+ FilePath db_path(file_path);
+ file_util::AbsolutePath(&db_path);
+ DirectoryBackingStore* store = new OnDiskDirectoryBackingStore(name, db_path);
+
+ const DirOpenResult result =
+ OpenImpl(store, name, delegate, transaction_observer);
+
+ if (OPENED != result)
+ Close();
+ return result;
+}
+
+DirOpenResult Directory::OpenInMemoryForTest(
+ const string& name, DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer) {
+
+ DirectoryBackingStore* store = new InMemoryDirectoryBackingStore(name);
+
+ const DirOpenResult result =
+ OpenImpl(store, name, delegate, transaction_observer);
+ if (OPENED != result)
+ Close();
+ return result;
+}
+
+void Directory::InitializeIndices() {
+ MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
+ for (; it != kernel_->metahandles_index->end(); ++it) {
+ EntryKernel* entry = *it;
+ InitializeIndexEntry<ParentIdAndHandleIndexer>(entry,
+ kernel_->parent_id_child_index);
+ InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index);
+ InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index);
+ const int64 metahandle = entry->ref(META_HANDLE);
+ if (entry->ref(IS_UNSYNCED))
+ kernel_->unsynced_metahandles->insert(metahandle);
+ if (entry->ref(IS_UNAPPLIED_UPDATE)) {
+ const ModelType type = entry->GetServerModelType();
+ kernel_->unapplied_update_metahandles[type].insert(metahandle);
+ }
+ DCHECK(!entry->is_dirty());
+ }
+}
+
+DirOpenResult Directory::OpenImpl(
+ DirectoryBackingStore* store,
+ const string& name,
+ DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer) {
+ DCHECK_EQ(static_cast<DirectoryBackingStore*>(NULL), store_);
+ store_ = store;
+
+ KernelLoadInfo info;
+ // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
+ // swap these later.
+ MetahandlesIndex metas_bucket;
+ DirOpenResult result = store_->Load(&metas_bucket, &info);
+ if (OPENED != result)
+ return result;
+
+ if (!VerifyReferenceIntegrityUnsafe(metas_bucket))
+ return FAILED_LOGICAL_CORRUPTION;
+
+ kernel_ = new Kernel(name, info, delegate, transaction_observer);
+ kernel_->metahandles_index->swap(metas_bucket);
+ InitializeIndices();
+ return OPENED;
+}
+
+void Directory::Close() {
+ if (store_)
+ delete store_;
+ store_ = NULL;
+ if (kernel_) {
+ bool del = !base::subtle::NoBarrier_AtomicIncrement(&kernel_->refcount, -1);
+ DCHECK(del) << "Kernel should only have a single ref";
+ if (del)
+ delete kernel_;
+ kernel_ = NULL;
+ }
+}
+
+void Directory::OnUnrecoverableError(const BaseTransaction* trans,
+ const tracked_objects::Location& location,
+ const std::string & message) {
+ DCHECK(trans != NULL);
+ unrecoverable_error_set_ = true;
+ unrecoverable_error_handler_->OnUnrecoverableError(location,
+ message);
+}
+
+
+EntryKernel* Directory::GetEntryById(const Id& id) {
+ ScopedKernelLock lock(this);
+ return GetEntryById(id, &lock);
+}
+
+EntryKernel* Directory::GetEntryById(const Id& id,
+ ScopedKernelLock* const lock) {
+ DCHECK(kernel_);
+ // Find it in the in memory ID index.
+ kernel_->needle.put(ID, id);
+ IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
+ if (id_found != kernel_->ids_index->end()) {
+ return *id_found;
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
+ ScopedKernelLock lock(this);
+ DCHECK(kernel_);
+ // Find it in the ClientTagIndex.
+ kernel_->needle.put(UNIQUE_CLIENT_TAG, tag);
+ ClientTagIndex::iterator found = kernel_->client_tag_index->find(
+ &kernel_->needle);
+ if (found != kernel_->client_tag_index->end()) {
+ return *found;
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
+ ScopedKernelLock lock(this);
+ DCHECK(kernel_);
+ // We don't currently keep a separate index for the tags. Since tags
+ // only exist for server created items that are the first items
+ // to be created in a store, they should have small metahandles.
+ // So, we just iterate over the items in sorted metahandle order,
+ // looking for a match.
+ MetahandlesIndex& set = *kernel_->metahandles_index;
+ for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) {
+ if ((*i)->ref(UNIQUE_SERVER_TAG) == tag) {
+ return *i;
+ }
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
+ ScopedKernelLock lock(this);
+ return GetEntryByHandle(metahandle, &lock);
+}
+
+EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
+ ScopedKernelLock* lock) {
+ // Look up in memory
+ kernel_->needle.put(META_HANDLE, metahandle);
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ if (found != kernel_->metahandles_index->end()) {
+ // Found it in memory. Easy.
+ return *found;
+ }
+ return NULL;
+}
+
+bool Directory::GetChildHandlesById(
+ BaseTransaction* trans, const Id& parent_id,
+ Directory::ChildHandles* result) {
+ if (!SyncAssert(this == trans->directory(), FROM_HERE,
+ "Directories don't match", trans))
+ return false;
+ result->clear();
+
+ ScopedKernelLock lock(this);
+ AppendChildHandles(lock, parent_id, result);
+ return true;
+}
+
+bool Directory::GetChildHandlesByHandle(
+ BaseTransaction* trans, int64 handle,
+ Directory::ChildHandles* result) {
+ if (!SyncAssert(this == trans->directory(), FROM_HERE,
+ "Directories don't match", trans))
+ return false;
+
+ result->clear();
+
+ ScopedKernelLock lock(this);
+ EntryKernel* kernel = GetEntryByHandle(handle, &lock);
+ if (!kernel)
+ return true;
+
+ AppendChildHandles(lock, kernel->ref(ID), result);
+ return true;
+}
+
+EntryKernel* Directory::GetRootEntry() {
+ return GetEntryById(Id());
+}
+
+bool Directory::InsertEntry(WriteTransaction* trans, EntryKernel* entry) {
+ ScopedKernelLock lock(this);
+ return InsertEntry(trans, entry, &lock);
+}
+
+bool Directory::InsertEntry(WriteTransaction* trans,
+ EntryKernel* entry,
+ ScopedKernelLock* lock) {
+ DCHECK(NULL != lock);
+ if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
+ return false;
+
+ static const char error[] = "Entry already in memory index.";
+ if (!SyncAssert(kernel_->metahandles_index->insert(entry).second,
+ FROM_HERE,
+ error,
+ trans))
+ return false;
+
+ if (!entry->ref(IS_DEL)) {
+ if (!SyncAssert(kernel_->parent_id_child_index->insert(entry).second,
+ FROM_HERE,
+ error,
+ trans)) {
+ return false;
+ }
+ }
+ if (!SyncAssert(kernel_->ids_index->insert(entry).second,
+ FROM_HERE,
+ error,
+ trans))
+ return false;
+
+ // Should NEVER be created with a client tag.
+ if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
+ "Client should be empty", trans))
+ return false;
+
+ return true;
+}
+
+bool Directory::ReindexId(WriteTransaction* trans,
+ EntryKernel* const entry,
+ const Id& new_id) {
+ ScopedKernelLock lock(this);
+ if (NULL != GetEntryById(new_id, &lock))
+ return false;
+
+ {
+ // Update the indices that depend on the ID field.
+ ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index);
+ ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry,
+ kernel_->parent_id_child_index);
+ entry->put(ID, new_id);
+ }
+ return true;
+}
+
+bool Directory::ReindexParentId(WriteTransaction* trans,
+ EntryKernel* const entry,
+ const Id& new_parent_id) {
+ ScopedKernelLock lock(this);
+
+ {
+ // Update the indices that depend on the PARENT_ID field.
+ ScopedIndexUpdater<ParentIdAndHandleIndexer> index_updater(lock, entry,
+ kernel_->parent_id_child_index);
+ entry->put(PARENT_ID, new_parent_id);
+ }
+ return true;
+}
+
+bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
+ DCHECK(trans != NULL);
+ return unrecoverable_error_set_;
+}
+
+void Directory::ClearDirtyMetahandles() {
+ kernel_->transaction_mutex.AssertAcquired();
+ kernel_->dirty_metahandles->clear();
+}
+
+bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
+ const EntryKernel* const entry) const {
+ bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
+ !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
+ !entry->ref(IS_UNSYNCED);
+
+ if (safe) {
+ int64 handle = entry->ref(META_HANDLE);
+ const ModelType type = entry->GetServerModelType();
+ if (!SyncAssert(kernel_->dirty_metahandles->count(handle) == 0U,
+ FROM_HERE,
+ "Dirty metahandles should be empty", trans))
+ return false;
+ // TODO(tim): Bug 49278.
+ if (!SyncAssert(!kernel_->unsynced_metahandles->count(handle),
+ FROM_HERE,
+ "Unsynced handles should be empty",
+ trans))
+ return false;
+ if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
+ FROM_HERE,
+ "Unapplied metahandles should be empty",
+ trans))
+ return false;
+ }
+
+ return safe;
+}
+
+void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
+ ReadTransaction trans(FROM_HERE, this);
+ ScopedKernelLock lock(this);
+
+ // If there is an unrecoverable error then just bail out.
+ if (unrecoverable_error_set(&trans))
+ return;
+
+ // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
+ // clear dirty flags.
+ for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin();
+ i != kernel_->dirty_metahandles->end(); ++i) {
+ EntryKernel* entry = GetEntryByHandle(*i, &lock);
+ if (!entry)
+ continue;
+ // Skip over false positives; it happens relatively infrequently.
+ if (!entry->is_dirty())
+ continue;
+ snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry);
+ DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i));
+ // We don't bother removing from the index here as we blow the entire thing
+ // in a moment, and it unnecessarily complicates iteration.
+ entry->clear_dirty(NULL);
+ }
+ ClearDirtyMetahandles();
+
+ // Set purged handles.
+ DCHECK(snapshot->metahandles_to_purge.empty());
+ snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge));
+
+ // Fill kernel_info_status and kernel_info.
+ snapshot->kernel_info = kernel_->persisted_info;
+ // To avoid duplicates when the process crashes, we record the next_id to be
+ // greater magnitude than could possibly be reached before the next save
+ // changes. In other words, it's effectively impossible for the user to
+ // generate 65536 new bookmarks in 3 seconds.
+ snapshot->kernel_info.next_id -= 65536;
+ snapshot->kernel_info_status = kernel_->info_status;
+ // This one we reset on failure.
+ kernel_->info_status = KERNEL_SHARE_INFO_VALID;
+}
+
+bool Directory::SaveChanges() {
+ bool success = false;
+ DCHECK(store_);
+
+ base::AutoLock scoped_lock(kernel_->save_changes_mutex);
+
+ // Snapshot and save.
+ SaveChangesSnapshot snapshot;
+ TakeSnapshotForSaveChanges(&snapshot);
+ success = store_->SaveChanges(snapshot);
+
+ // Handle success or failure.
+ if (success)
+ success = VacuumAfterSaveChanges(snapshot);
+ else
+ HandleSaveChangesFailure(snapshot);
+ return success;
+}
+
+bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
+ if (snapshot.dirty_metas.empty())
+ return true;
+
+ // Need a write transaction as we are about to permanently purge entries.
+ WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
+ ScopedKernelLock lock(this);
+ // Now drop everything we can out of memory.
+ for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
+ NULL : *found);
+ if (entry && SafeToPurgeFromMemory(&trans, entry)) {
+ // We now drop deleted metahandles that are up to date on both the client
+ // and the server.
+ size_t num_erased = 0;
+ num_erased = kernel_->ids_index->erase(entry);
+ DCHECK_EQ(1u, num_erased);
+ num_erased = kernel_->metahandles_index->erase(entry);
+ DCHECK_EQ(1u, num_erased);
+
+ // Might not be in it
+ num_erased = kernel_->client_tag_index->erase(entry);
+ DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
+ if (!SyncAssert(!kernel_->parent_id_child_index->count(entry),
+ FROM_HERE,
+ "Deleted entry still present",
+ (&trans)))
+ return false;
+ delete entry;
+ }
+ if (trans.unrecoverable_error_set())
+ return false;
+ }
+ return true;
+}
+
+void Directory::PurgeEntriesWithTypeIn(ModelTypeSet types) {
+ if (types.Empty())
+ return;
+
+ {
+ WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
+ {
+ ScopedKernelLock lock(this);
+ MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
+ while (it != kernel_->metahandles_index->end()) {
+ const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS);
+ const sync_pb::EntitySpecifics& server_specifics =
+ (*it)->ref(SERVER_SPECIFICS);
+ ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
+ ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
+
+ // Note the dance around incrementing |it|, since we sometimes erase().
+ if ((IsRealDataType(local_type) && types.Has(local_type)) ||
+ (IsRealDataType(server_type) && types.Has(server_type))) {
+ if (!UnlinkEntryFromOrder(*it, NULL, &lock))
+ return;
+
+ int64 handle = (*it)->ref(META_HANDLE);
+ kernel_->metahandles_to_purge->insert(handle);
+
+ size_t num_erased = 0;
+ EntryKernel* entry = *it;
+ num_erased = kernel_->ids_index->erase(entry);
+ DCHECK_EQ(1u, num_erased);
+ num_erased = kernel_->client_tag_index->erase(entry);
+ DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
+ num_erased = kernel_->unsynced_metahandles->erase(handle);
+ DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
+ num_erased =
+ kernel_->unapplied_update_metahandles[server_type].erase(handle);
+ DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
+ num_erased = kernel_->parent_id_child_index->erase(entry);
+ DCHECK_EQ(entry->ref(IS_DEL), !num_erased);
+ kernel_->metahandles_index->erase(it++);
+ delete entry;
+ } else {
+ ++it;
+ }
+ }
+
+ // Ensure meta tracking for these data types reflects the deleted state.
+ for (syncable::ModelTypeSet::Iterator it = types.First();
+ it.Good(); it.Inc()) {
+ set_initial_sync_ended_for_type_unsafe(it.Get(), false);
+ kernel_->persisted_info.reset_download_progress(it.Get());
+ }
+ }
+ }
+}
+
+void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
+ ScopedKernelLock lock(this);
+ kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
+
+ // Because we optimistically cleared the dirty bit on the real entries when
+ // taking the snapshot, we must restore it on failure. Not doing this could
+ // cause lost data, if no other changes are made to the in-memory entries
+ // that would cause the dirty bit to get set again. Setting the bit ensures
+ // that SaveChanges will at least try again later.
+ for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ if (found != kernel_->metahandles_index->end()) {
+ (*found)->mark_dirty(kernel_->dirty_metahandles);
+ }
+ }
+
+ kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(),
+ snapshot.metahandles_to_purge.end());
+}
+
+void Directory::GetDownloadProgress(
+ ModelType model_type,
+ sync_pb::DataTypeProgressMarker* value_out) const {
+ ScopedKernelLock lock(this);
+ return value_out->CopyFrom(
+ kernel_->persisted_info.download_progress[model_type]);
+}
+
+void Directory::GetDownloadProgressAsString(
+ ModelType model_type,
+ std::string* value_out) const {
+ ScopedKernelLock lock(this);
+ kernel_->persisted_info.download_progress[model_type].SerializeToString(
+ value_out);
+}
+
+size_t Directory::GetEntriesCount() const {
+ ScopedKernelLock lock(this);
+ return kernel_->metahandles_index ? kernel_->metahandles_index->size() : 0;
+}
+
+void Directory::SetDownloadProgress(
+ ModelType model_type,
+ const sync_pb::DataTypeProgressMarker& new_progress) {
+ ScopedKernelLock lock(this);
+ kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
+ kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
+}
+
+bool Directory::initial_sync_ended_for_type(ModelType type) const {
+ ScopedKernelLock lock(this);
+ return kernel_->persisted_info.initial_sync_ended.Has(type);
+}
+
+template <class T> void Directory::TestAndSet(
+ T* kernel_data, const T* data_to_set) {
+ if (*kernel_data != *data_to_set) {
+ *kernel_data = *data_to_set;
+ kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
+ }
+}
+
+void Directory::set_initial_sync_ended_for_type(ModelType type, bool x) {
+ ScopedKernelLock lock(this);
+ set_initial_sync_ended_for_type_unsafe(type, x);
+}
+
+void Directory::set_initial_sync_ended_for_type_unsafe(ModelType type,
+ bool x) {
+ if (kernel_->persisted_info.initial_sync_ended.Has(type) == x)
+ return;
+ if (x) {
+ kernel_->persisted_info.initial_sync_ended.Put(type);
+ } else {
+ kernel_->persisted_info.initial_sync_ended.Remove(type);
+ }
+ kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
+}
+
+void Directory::SetNotificationStateUnsafe(
+ const std::string& notification_state) {
+ if (notification_state == kernel_->persisted_info.notification_state)
+ return;
+ kernel_->persisted_info.notification_state = notification_state;
+ kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
+}
+
+string Directory::store_birthday() const {
+ ScopedKernelLock lock(this);
+ return kernel_->persisted_info.store_birthday;
+}
+
+void Directory::set_store_birthday(const string& store_birthday) {
+ ScopedKernelLock lock(this);
+ if (kernel_->persisted_info.store_birthday == store_birthday)
+ return;
+ kernel_->persisted_info.store_birthday = store_birthday;
+ kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
+}
+
+std::string Directory::GetNotificationState() const {
+ ScopedKernelLock lock(this);
+ std::string notification_state = kernel_->persisted_info.notification_state;
+ return notification_state;
+}
+
+void Directory::SetNotificationState(const std::string& notification_state) {
+ ScopedKernelLock lock(this);
+ SetNotificationStateUnsafe(notification_state);
+}
+
+string Directory::cache_guid() const {
+ // No need to lock since nothing ever writes to it after load.
+ return kernel_->cache_guid;
+}
+
+browser_sync::Cryptographer* Directory::GetCryptographer(
+ const BaseTransaction* trans) {
+ DCHECK_EQ(this, trans->directory());
+ return &cryptographer_;
+}
+
+void Directory::GetAllMetaHandles(BaseTransaction* trans,
+ MetahandleSet* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ MetahandlesIndex::iterator i;
+ for (i = kernel_->metahandles_index->begin();
+ i != kernel_->metahandles_index->end();
+ ++i) {
+ result->insert((*i)->ref(META_HANDLE));
+ }
+}
+
+void Directory::GetAllEntryKernels(BaseTransaction* trans,
+ std::vector<const EntryKernel*>* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ result->insert(result->end(),
+ kernel_->metahandles_index->begin(),
+ kernel_->metahandles_index->end());
+}
+
+void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
+ UnsyncedMetaHandles* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ copy(kernel_->unsynced_metahandles->begin(),
+ kernel_->unsynced_metahandles->end(), back_inserter(*result));
+}
+
+int64 Directory::unsynced_entity_count() const {
+ ScopedKernelLock lock(this);
+ return kernel_->unsynced_metahandles->size();
+}
+
+FullModelTypeSet Directory::GetServerTypesWithUnappliedUpdates(
+ BaseTransaction* trans) const {
+ syncable::FullModelTypeSet server_types;
+ ScopedKernelLock lock(this);
+ for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
+ const ModelType type = ModelTypeFromInt(i);
+ if (!kernel_->unapplied_update_metahandles[type].empty()) {
+ server_types.Put(type);
+ }
+ }
+ return server_types;
+}
+
+void Directory::GetUnappliedUpdateMetaHandles(
+ BaseTransaction* trans,
+ FullModelTypeSet server_types,
+ UnappliedUpdateMetaHandles* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
+ const ModelType type = ModelTypeFromInt(i);
+ if (server_types.Has(type)) {
+ std::copy(kernel_->unapplied_update_metahandles[type].begin(),
+ kernel_->unapplied_update_metahandles[type].end(),
+ back_inserter(*result));
+ }
+ }
+}
+
+
+class IdFilter {
+ public:
+ virtual ~IdFilter() { }
+ virtual bool ShouldConsider(const Id& id) const = 0;
+};
+
+
+class FullScanFilter : public IdFilter {
+ public:
+ virtual bool ShouldConsider(const Id& id) const {
+ return true;
+ }
+};
+
+class SomeIdsFilter : public IdFilter {
+ public:
+ virtual bool ShouldConsider(const Id& id) const {
+ return std::binary_search(ids_.begin(), ids_.end(), id);
+ }
+ std::vector<Id> ids_;
+};
+
+bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const EntryKernelMutationMap& mutations) {
+ MetahandleSet handles;
+ SomeIdsFilter filter;
+ filter.ids_.reserve(mutations.size());
+ for (EntryKernelMutationMap::const_iterator it = mutations.begin(),
+ end = mutations.end(); it != end; ++it) {
+ filter.ids_.push_back(it->second.mutated.ref(ID));
+ handles.insert(it->first);
+ }
+ std::sort(filter.ids_.begin(), filter.ids_.end());
+ if (!CheckTreeInvariants(trans, handles, filter))
+ return false;
+ return true;
+}
+
+bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ bool full_scan) {
+ // TODO(timsteele): This is called every time a WriteTransaction finishes.
+ // The performance hit is substantial given that we now examine every single
+ // syncable entry. Need to redesign this.
+ MetahandleSet handles;
+ GetAllMetaHandles(trans, &handles);
+ if (full_scan) {
+ FullScanFilter fullfilter;
+ if (!CheckTreeInvariants(trans, handles, fullfilter))
+ return false;
+ } else {
+ SomeIdsFilter filter;
+ MetahandleSet::iterator i;
+ for (i = handles.begin() ; i != handles.end() ; ++i) {
+ Entry e(trans, GET_BY_HANDLE, *i);
+ if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
+ return false;
+ filter.ids_.push_back(e.Get(ID));
+ }
+ std::sort(filter.ids_.begin(), filter.ids_.end());
+ if (!CheckTreeInvariants(trans, handles, filter))
+ return false;
+ }
+ return true;
+}
+
+bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const MetahandleSet& handles,
+ const IdFilter& idfilter) {
+ const int64 max_ms = kInvariantCheckMaxMs;
+ PerfTimer check_timer;
+ MetahandleSet::const_iterator i;
+ int entries_done = 0;
+ for (i = handles.begin() ; i != handles.end() ; ++i) {
+ int64 metahandle = *i;
+ Entry e(trans, GET_BY_HANDLE, metahandle);
+ if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
+ return false;
+ syncable::Id id = e.Get(ID);
+ syncable::Id parentid = e.Get(PARENT_ID);
+
+ if (id.IsRoot()) {
+ if (!SyncAssert(e.Get(IS_DIR), FROM_HERE,
+ "Entry should be a directory",
+ trans))
+ return false;
+ if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
+ "Entry should be root",
+ trans))
+ return false;
+ if (!SyncAssert(!e.Get(IS_UNSYNCED), FROM_HERE,
+ "Entry should be sycned",
+ trans))
+ return false;
+ ++entries_done;
+ continue;
+ }
+
+ if (!e.Get(IS_DEL)) {
+ if (!SyncAssert(id != parentid, FROM_HERE,
+ "Id should be different from parent id.",
+ trans))
+ return false;
+ if (!SyncAssert(!e.Get(NON_UNIQUE_NAME).empty(), FROM_HERE,
+ "Non unique name should not be empty.",
+ trans))
+ return false;
+ int safety_count = handles.size() + 1;
+ while (!parentid.IsRoot()) {
+ if (!idfilter.ShouldConsider(parentid))
+ break;
+ Entry parent(trans, GET_BY_ID, parentid);
+ if (!SyncAssert(parent.good(), FROM_HERE,
+ "Parent entry is not valid.",
+ trans))
+ return false;
+ if (!SyncAssert(parent.Get(IS_DIR), FROM_HERE,
+ "Parent should be a directory",
+ trans))
+ return false;
+ if (!SyncAssert(!parent.Get(IS_DEL), FROM_HERE,
+ "Parent should not have been marked for deletion.",
+ trans))
+ return false;
+ if (!SyncAssert(handles.end() != handles.find(parent.Get(META_HANDLE)),
+ FROM_HERE,
+ "Parent should be in the index.",
+ trans))
+ return false;
+ parentid = parent.Get(PARENT_ID);
+ if (!SyncAssert(--safety_count > 0, FROM_HERE,
+ "Count should be greater than zero.",
+ trans))
+ return false;
+ }
+ }
+ int64 base_version = e.Get(BASE_VERSION);
+ int64 server_version = e.Get(SERVER_VERSION);
+ bool using_unique_client_tag = !e.Get(UNIQUE_CLIENT_TAG).empty();
+ if (CHANGES_VERSION == base_version || 0 == base_version) {
+ if (e.Get(IS_UNAPPLIED_UPDATE)) {
+ // Must be a new item, or a de-duplicated unique client tag
+ // that was created both locally and remotely.
+ if (!using_unique_client_tag) {
+ if (!SyncAssert(e.Get(IS_DEL), FROM_HERE,
+ "The entry should not have been deleted.",
+ trans))
+ return false;
+ }
+ // It came from the server, so it must have a server ID.
+ if (!SyncAssert(id.ServerKnows(), FROM_HERE,
+ "The id should be from a server.",
+ trans))
+ return false;
+ } else {
+ if (e.Get(IS_DIR)) {
+ // TODO(chron): Implement this mode if clients ever need it.
+ // For now, you can't combine a client tag and a directory.
+ if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
+ "Directory cannot have a client tag.",
+ trans))
+ return false;
+ }
+ // Should be an uncomitted item, or a successfully deleted one.
+ if (!e.Get(IS_DEL)) {
+ if (!SyncAssert(e.Get(IS_UNSYNCED), FROM_HERE,
+ "The item should be unsynced.",
+ trans))
+ return false;
+ }
+ // If the next check failed, it would imply that an item exists
+ // on the server, isn't waiting for application locally, but either
+ // is an unsynced create or a sucessful delete in the local copy.
+ // Either way, that's a mismatch.
+ if (!SyncAssert(0 == server_version, FROM_HERE,
+ "Server version should be zero.",
+ trans))
+ return false;
+ // Items that aren't using the unique client tag should have a zero
+ // base version only if they have a local ID. Items with unique client
+ // tags are allowed to use the zero base version for undeletion and
+ // de-duplication; the unique client tag trumps the server ID.
+ if (!using_unique_client_tag) {
+ if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
+ "Should be a client only id.",
+ trans))
+ return false;
+ }
+ }
+ } else {
+ if (!SyncAssert(id.ServerKnows(),
+ FROM_HERE,
+ "Should be a server id.",
+ trans))
+ return false;
+ }
+ ++entries_done;
+ int64 elapsed_ms = check_timer.Elapsed().InMilliseconds();
+ if (elapsed_ms > max_ms) {
+ DVLOG(1) << "Cutting Invariant check short after " << elapsed_ms
+ << "ms. Processed " << entries_done << "/" << handles.size()
+ << " entries";
+ return true;
+ }
+
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// ScopedKernelLock
+
+ScopedKernelLock::ScopedKernelLock(const Directory* dir)
+ : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Transactions
+
+void BaseTransaction::Lock() {
+ TRACE_EVENT2("sync_lock_contention", "AcquireLock",
+ "src_file", from_here_.file_name(),
+ "src_func", from_here_.function_name());
+
+ dirkernel_->transaction_mutex.Acquire();
+}
+
+void BaseTransaction::Unlock() {
+ dirkernel_->transaction_mutex.Release();
+}
+
+void BaseTransaction::OnUnrecoverableError(
+ const tracked_objects::Location& location,
+ const std::string& message) {
+ unrecoverable_error_set_ = true;
+ unrecoverable_error_location_ = location;
+ unrecoverable_error_msg_ = message;
+
+ // Note: We dont call the Directory's OnUnrecoverableError method right
+ // away. Instead we wait to unwind the stack and in the destructor of the
+ // transaction we would call the OnUnrecoverableError method.
+
+ directory()->ReportUnrecoverableError();
+}
+
+bool BaseTransaction::unrecoverable_error_set() const {
+ return unrecoverable_error_set_;
+}
+
+void BaseTransaction::HandleUnrecoverableErrorIfSet() {
+ if (unrecoverable_error_set_) {
+ directory()->OnUnrecoverableError(this,
+ unrecoverable_error_location_,
+ unrecoverable_error_msg_);
+ }
+}
+
+BaseTransaction::BaseTransaction(const tracked_objects::Location& from_here,
+ const char* name,
+ WriterTag writer,
+ Directory* directory)
+ : from_here_(from_here), name_(name), writer_(writer),
+ directory_(directory), dirkernel_(directory->kernel_),
+ unrecoverable_error_set_(false) {
+ // TODO(lipalani): Don't issue a good transaction if the directory has
+ // unrecoverable error set. And the callers have to check trans.good before
+ // proceeding.
+ TRACE_EVENT_BEGIN2("sync", name_,
+ "src_file", from_here_.file_name(),
+ "src_func", from_here_.function_name());
+}
+
+BaseTransaction::~BaseTransaction() {
+ TRACE_EVENT_END0("sync", name_);
+}
+
+ReadTransaction::ReadTransaction(const tracked_objects::Location& location,
+ Directory* directory)
+ : BaseTransaction(location, "ReadTransaction", INVALID, directory) {
+ Lock();
+}
+
+ReadTransaction::~ReadTransaction() {
+ HandleUnrecoverableErrorIfSet();
+ Unlock();
+}
+
+WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
+ WriterTag writer, Directory* directory)
+ : BaseTransaction(location, "WriteTransaction", writer, directory) {
+ Lock();
+}
+
+void WriteTransaction::SaveOriginal(const EntryKernel* entry) {
+ if (!entry) {
+ return;
+ }
+ // Insert only if it's not already there.
+ const int64 handle = entry->ref(META_HANDLE);
+ EntryKernelMutationMap::iterator it = mutations_.lower_bound(handle);
+ if (it == mutations_.end() || it->first != handle) {
+ EntryKernelMutation mutation;
+ mutation.original = *entry;
+ ignore_result(mutations_.insert(it, std::make_pair(handle, mutation)));
+ }
+}
+
+ImmutableEntryKernelMutationMap WriteTransaction::RecordMutations() {
+ dirkernel_->transaction_mutex.AssertAcquired();
+ for (syncable::EntryKernelMutationMap::iterator it = mutations_.begin();
+ it != mutations_.end();) {
+ EntryKernel* kernel = directory()->GetEntryByHandle(it->first);
+ if (!kernel) {
+ NOTREACHED();
+ continue;
+ }
+ if (kernel->is_dirty()) {
+ it->second.mutated = *kernel;
+ ++it;
+ } else {
+ DCHECK(!it->second.original.is_dirty());
+ // Not actually mutated, so erase from |mutations_|.
+ mutations_.erase(it++);
+ }
+ }
+ return ImmutableEntryKernelMutationMap(&mutations_);
+}
+
+void WriteTransaction::UnlockAndNotify(
+ const ImmutableEntryKernelMutationMap& mutations) {
+ // Work while transaction mutex is held.
+ ModelTypeSet models_with_changes;
+ bool has_mutations = !mutations.Get().empty();
+ if (has_mutations) {
+ models_with_changes = NotifyTransactionChangingAndEnding(mutations);
+ }
+ Unlock();
+
+ // Work after mutex is relased.
+ if (has_mutations) {
+ NotifyTransactionComplete(models_with_changes);
+ }
+}
+
+ModelTypeSet WriteTransaction::NotifyTransactionChangingAndEnding(
+ const ImmutableEntryKernelMutationMap& mutations) {
+ dirkernel_->transaction_mutex.AssertAcquired();
+ DCHECK(!mutations.Get().empty());
+
+ WriteTransactionInfo write_transaction_info(
+ dirkernel_->next_write_transaction_id, from_here_, writer_, mutations);
+ ++dirkernel_->next_write_transaction_id;
+
+ ImmutableWriteTransactionInfo immutable_write_transaction_info(
+ &write_transaction_info);
+ DirectoryChangeDelegate* const delegate = dirkernel_->delegate;
+ if (writer_ == syncable::SYNCAPI) {
+ delegate->HandleCalculateChangesChangeEventFromSyncApi(
+ immutable_write_transaction_info, this);
+ } else {
+ delegate->HandleCalculateChangesChangeEventFromSyncer(
+ immutable_write_transaction_info, this);
+ }
+
+ ModelTypeSet models_with_changes =
+ delegate->HandleTransactionEndingChangeEvent(
+ immutable_write_transaction_info, this);
+
+ dirkernel_->transaction_observer.Call(FROM_HERE,
+ &TransactionObserver::OnTransactionWrite,
+ immutable_write_transaction_info, models_with_changes);
+
+ return models_with_changes;
+}
+
+void WriteTransaction::NotifyTransactionComplete(
+ ModelTypeSet models_with_changes) {
+ dirkernel_->delegate->HandleTransactionCompleteChangeEvent(
+ models_with_changes);
+}
+
+WriteTransaction::~WriteTransaction() {
+ const ImmutableEntryKernelMutationMap& mutations = RecordMutations();
+
+ if (!unrecoverable_error_set_) {
+ if (OFF != kInvariantCheckLevel) {
+ const bool full_scan = (FULL_DB_VERIFICATION == kInvariantCheckLevel);
+ if (full_scan)
+ directory()->CheckTreeInvariants(this, full_scan);
+ else
+ directory()->CheckTreeInvariants(this, mutations.Get());
+ }
+ }
+
+ // |CheckTreeInvariants| could have thrown an unrecoverable error.
+ if (unrecoverable_error_set_) {
+ HandleUnrecoverableErrorIfSet();
+ Unlock();
+ return;
+ }
+
+ UnlockAndNotify(mutations);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Entry
+
+Entry::Entry(BaseTransaction* trans, GetById, const Id& id)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryById(id);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByClientTag, const string& tag)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByClientTag(tag);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByServerTag, const string& tag)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByServerTag(tag);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByHandle(metahandle);
+}
+
+Directory* Entry::dir() const {
+ return basetrans_->directory();
+}
+
+Id Entry::ComputePrevIdFromServerPosition(const Id& parent_id) const {
+ return dir()->ComputePrevIdFromServerPosition(kernel_, parent_id);
+}
+
+DictionaryValue* Entry::ToValue() const {
+ DictionaryValue* entry_info = new DictionaryValue();
+ entry_info->SetBoolean("good", good());
+ if (good()) {
+ entry_info->Set("kernel", kernel_->ToValue());
+ entry_info->Set("modelType",
+ ModelTypeToValue(GetModelType()));
+ entry_info->SetBoolean("existsOnClientBecauseNameIsNonEmpty",
+ ExistsOnClientBecauseNameIsNonEmpty());
+ entry_info->SetBoolean("isRoot", IsRoot());
+ }
+ return entry_info;
+}
+
+const string& Entry::Get(StringField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+}
+
+syncable::ModelType Entry::GetServerModelType() const {
+ ModelType specifics_type = kernel_->GetServerModelType();
+ if (specifics_type != UNSPECIFIED)
+ return specifics_type;
+
+ // Otherwise, we don't have a server type yet. That should only happen
+ // if the item is an uncommitted locally created item.
+ // It's possible we'll need to relax these checks in the future; they're
+ // just here for now as a safety measure.
+ DCHECK(Get(IS_UNSYNCED));
+ DCHECK_EQ(Get(SERVER_VERSION), 0);
+ DCHECK(Get(SERVER_IS_DEL));
+ // Note: can't enforce !Get(ID).ServerKnows() here because that could
+ // actually happen if we hit AttemptReuniteLostCommitResponses.
+ return UNSPECIFIED;
+}
+
+syncable::ModelType Entry::GetModelType() const {
+ ModelType specifics_type = GetModelTypeFromSpecifics(Get(SPECIFICS));
+ if (specifics_type != UNSPECIFIED)
+ return specifics_type;
+ if (IsRoot())
+ return TOP_LEVEL_FOLDER;
+ // Loose check for server-created top-level folders that aren't
+ // bound to a particular model type.
+ if (!Get(UNIQUE_SERVER_TAG).empty() && Get(IS_DIR))
+ return TOP_LEVEL_FOLDER;
+
+ return UNSPECIFIED;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// MutableEntry
+
+MutableEntry::MutableEntry(WriteTransaction* trans, Create,
+ const Id& parent_id, const string& name)
+ : Entry(trans),
+ write_transaction_(trans) {
+ Init(trans, parent_id, name);
+}
+
+
+void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
+ const string& name) {
+ scoped_ptr<EntryKernel> kernel(new EntryKernel);
+ kernel_ = NULL;
+
+ kernel->put(ID, trans->directory_->NextId());
+ kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
+ kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
+ kernel->put(PARENT_ID, parent_id);
+ kernel->put(NON_UNIQUE_NAME, name);
+ const base::Time& now = base::Time::Now();
+ kernel->put(CTIME, now);
+ kernel->put(MTIME, now);
+ // We match the database defaults here
+ kernel->put(BASE_VERSION, CHANGES_VERSION);
+ if (!trans->directory()->InsertEntry(trans, kernel.get())) {
+ return; // We failed inserting, nothing more to do.
+ }
+ // Because this entry is new, it was originally deleted.
+ kernel->put(IS_DEL, true);
+ trans->SaveOriginal(kernel.get());
+ kernel->put(IS_DEL, false);
+
+ // Now swap the pointers.
+ kernel_ = kernel.release();
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
+ const Id& id)
+ : Entry(trans), write_transaction_(trans) {
+ Entry same_id(trans, GET_BY_ID, id);
+ kernel_ = NULL;
+ if (same_id.good()) {
+ return; // already have an item with this ID.
+ }
+ scoped_ptr<EntryKernel> kernel(new EntryKernel());
+
+ kernel->put(ID, id);
+ kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
+ kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
+ kernel->put(IS_DEL, true);
+ // We match the database defaults here
+ kernel->put(BASE_VERSION, CHANGES_VERSION);
+ if (!trans->directory()->InsertEntry(trans, kernel.get())) {
+ return; // Failed inserting.
+ }
+ trans->SaveOriginal(kernel.get());
+
+ kernel_ = kernel.release();
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
+ : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
+ int64 metahandle)
+ : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag,
+ const std::string& tag)
+ : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag,
+ const string& tag)
+ : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) {
+ trans->SaveOriginal(kernel_);
+}
+
+bool MutableEntry::PutIsDel(bool is_del) {
+ DCHECK(kernel_);
+ if (is_del == kernel_->ref(IS_DEL)) {
+ return true;
+ }
+ if (is_del) {
+ if (!UnlinkFromOrder()) {
+ return false;
+ }
+ }
+
+ {
+ ScopedKernelLock lock(dir());
+ // Some indices don't include deleted items and must be updated
+ // upon a value change.
+ ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
+ dir()->kernel_->parent_id_child_index);
+
+ kernel_->put(IS_DEL, is_del);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+ }
+
+ if (!is_del)
+ // Restores position to the 0th index.
+ if (!PutPredecessor(Id())) {
+ // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
+ NOTREACHED();
+ }
+
+ return true;
+}
+
+bool MutableEntry::Put(Int64Field field, const int64& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ ScopedKernelLock lock(dir());
+ if (SERVER_POSITION_IN_PARENT == field) {
+ ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
+ dir()->kernel_->parent_id_child_index);
+ kernel_->put(field, value);
+ } else {
+ kernel_->put(field, value);
+ }
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+bool MutableEntry::Put(TimeField field, const base::Time& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->put(field, value);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+bool MutableEntry::Put(IdField field, const Id& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ if (ID == field) {
+ if (!dir()->ReindexId(write_transaction(), kernel_, value))
+ return false;
+ } else if (PARENT_ID == field) {
+ PutParentIdPropertyOnly(value); // Makes sibling order inconsistent.
+ // Fixes up the sibling order inconsistency.
+ if (!PutPredecessor(Id())) {
+ // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
+ NOTREACHED();
+ }
+ } else {
+ kernel_->put(field, value);
+ }
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+void MutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
+ dir()->ReindexParentId(write_transaction(), kernel_, parent_id);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+}
+
+bool MutableEntry::Put(BaseVersion field, int64 value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->put(field, value);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+bool MutableEntry::Put(StringField field, const string& value) {
+ return PutImpl(field, value);
+}
+
+bool MutableEntry::Put(ProtoField field,
+ const sync_pb::EntitySpecifics& value) {
+ DCHECK(kernel_);
+ // TODO(ncarter): This is unfortunately heavyweight. Can we do
+ // better?
+ if (kernel_->ref(field).SerializeAsString() != value.SerializeAsString()) {
+ const bool update_unapplied_updates_index =
+ (field == SERVER_SPECIFICS) && kernel_->ref(IS_UNAPPLIED_UPDATE);
+ if (update_unapplied_updates_index) {
+ // Remove ourselves from unapplied_update_metahandles with our
+ // old server type.
+ const syncable::ModelType old_server_type =
+ kernel_->GetServerModelType();
+ const int64 metahandle = kernel_->ref(META_HANDLE);
+ size_t erase_count =
+ dir()->kernel_->unapplied_update_metahandles[old_server_type]
+ .erase(metahandle);
+ DCHECK_EQ(erase_count, 1u);
+ }
+
+ kernel_->put(field, value);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+
+ if (update_unapplied_updates_index) {
+ // Add ourselves back into unapplied_update_metahandles with our
+ // new server type.
+ const syncable::ModelType new_server_type =
+ kernel_->GetServerModelType();
+ const int64 metahandle = kernel_->ref(META_HANDLE);
+ dir()->kernel_->unapplied_update_metahandles[new_server_type]
+ .insert(metahandle);
+ }
+ }
+ return true;
+}
+
+bool MutableEntry::Put(BitField field, bool value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->put(field, value);
+ kernel_->mark_dirty(GetDirtyIndexHelper());
+ }
+ return true;
+}
+
+MetahandleSet* MutableEntry::GetDirtyIndexHelper() {
+ return dir()->kernel_->dirty_metahandles;
+}
+
+bool MutableEntry::PutUniqueClientTag(const string& new_tag) {
+ // There is no SERVER_UNIQUE_CLIENT_TAG. This field is similar to ID.
+ string old_tag = kernel_->ref(UNIQUE_CLIENT_TAG);
+ if (old_tag == new_tag) {
+ return true;
+ }
+
+ ScopedKernelLock lock(dir());
+ if (!new_tag.empty()) {
+ // Make sure your new value is not in there already.
+ EntryKernel lookup_kernel_ = *kernel_;
+ lookup_kernel_.put(UNIQUE_CLIENT_TAG, new_tag);
+ bool new_tag_conflicts =
+ (dir()->kernel_->client_tag_index->count(&lookup_kernel_) > 0);
+ if (new_tag_conflicts) {
+ return false;
+ }
+ }
+
+ {
+ ScopedIndexUpdater<ClientTagIndexer> index_updater(lock, kernel_,
+ dir()->kernel_->client_tag_index);
+ kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+bool MutableEntry::PutImpl(StringField field, const string& value) {
+ DCHECK(kernel_);
+ if (field == UNIQUE_CLIENT_TAG) {
+ return PutUniqueClientTag(value);
+ }
+
+ if (kernel_->ref(field) != value) {
+ kernel_->put(field, value);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+bool MutableEntry::Put(IndexedBitField field, bool value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ MetahandleSet* index;
+ if (IS_UNSYNCED == field) {
+ index = dir()->kernel_->unsynced_metahandles;
+ } else {
+ // Use kernel_->GetServerModelType() instead of
+ // GetServerModelType() as we may trigger some DCHECKs in the
+ // latter.
+ index =
+ &dir()->kernel_->unapplied_update_metahandles[
+ kernel_->GetServerModelType()];
+ }
+
+ ScopedKernelLock lock(dir());
+ if (value) {
+ if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
+ FROM_HERE,
+ "Could not insert",
+ write_transaction())) {
+ return false;
+ }
+ } else {
+ if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
+ FROM_HERE,
+ "Entry Not succesfully erased",
+ write_transaction())) {
+ return false;
+ }
+ }
+ kernel_->put(field, value);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+bool MutableEntry::UnlinkFromOrder() {
+ ScopedKernelLock lock(dir());
+ return dir()->UnlinkEntryFromOrder(kernel_, write_transaction(), &lock);
+}
+
+bool Directory::UnlinkEntryFromOrder(EntryKernel* entry,
+ WriteTransaction* trans,
+ ScopedKernelLock* lock) {
+ if (!SyncAssert(!trans || this == trans->directory(),
+ FROM_HERE,
+ "Transaction not pointing to the right directory",
+ trans))
+ return false;
+ Id old_previous = entry->ref(PREV_ID);
+ Id old_next = entry->ref(NEXT_ID);
+
+ entry->put(NEXT_ID, entry->ref(ID));
+ entry->put(PREV_ID, entry->ref(ID));
+ entry->mark_dirty(kernel_->dirty_metahandles);
+
+ if (!old_previous.IsRoot()) {
+ if (old_previous == old_next) {
+ // Note previous == next doesn't imply previous == next == Get(ID). We
+ // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
+ // and deleted before receiving the server ID in the commit response.
+ if (!SyncAssert(
+ (old_next == entry->ref(ID)) || !old_next.ServerKnows(),
+ FROM_HERE,
+ "Encounteered inconsistent entry while deleting",
+ trans)) {
+ return false;
+ }
+ return true; // Done if we were already self-looped (hence unlinked).
+ }
+ EntryKernel* previous_entry = GetEntryById(old_previous, lock);
+ ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
+ // TODO(tim): Multiple asserts here for bug 101039 investigation.
+ if (type == AUTOFILL) {
+ if (!SyncAssert(previous_entry != NULL,
+ FROM_HERE,
+ "Could not find previous autofill entry",
+ trans)) {
+ return false;
+ }
+ } else {
+ if (!SyncAssert(previous_entry != NULL,
+ FROM_HERE,
+ "Could not find previous entry",
+ trans)) {
+ return false;
+ }
+ }
+ if (trans)
+ trans->SaveOriginal(previous_entry);
+ previous_entry->put(NEXT_ID, old_next);
+ previous_entry->mark_dirty(kernel_->dirty_metahandles);
+ }
+
+ if (!old_next.IsRoot()) {
+ EntryKernel* next_entry = GetEntryById(old_next, lock);
+ if (!SyncAssert(next_entry != NULL,
+ FROM_HERE,
+ "Could not find next entry",
+ trans)) {
+ return false;
+ }
+ if (trans)
+ trans->SaveOriginal(next_entry);
+ next_entry->put(PREV_ID, old_previous);
+ next_entry->mark_dirty(kernel_->dirty_metahandles);
+ }
+ return true;
+}
+
+bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
+ if (!UnlinkFromOrder())
+ return false;
+
+ if (Get(IS_DEL)) {
+ DCHECK(predecessor_id.IsNull());
+ return true;
+ }
+
+ // TODO(ncarter): It should be possible to not maintain position for
+ // non-bookmark items. However, we'd need to robustly handle all possible
+ // permutations of setting IS_DEL and the SPECIFICS to identify the
+ // object type; or else, we'd need to add a ModelType to the
+ // MutableEntry's Create ctor.
+ // if (!ShouldMaintainPosition()) {
+ // return false;
+ // }
+
+ // This is classic insert-into-doubly-linked-list from CS 101 and your last
+ // job interview. An "IsRoot" Id signifies the head or tail.
+ Id successor_id;
+ if (!predecessor_id.IsRoot()) {
+ MutableEntry predecessor(write_transaction(), GET_BY_ID, predecessor_id);
+ if (!predecessor.good()) {
+ LOG(ERROR) << "Predecessor is not good : "
+ << predecessor_id.GetServerId();
+ return false;
+ }
+ if (predecessor.Get(PARENT_ID) != Get(PARENT_ID))
+ return false;
+ successor_id = predecessor.Get(NEXT_ID);
+ predecessor.Put(NEXT_ID, Get(ID));
+ } else {
+ syncable::Directory* dir = trans()->directory();
+ if (!dir->GetFirstChildId(trans(), Get(PARENT_ID), &successor_id)) {
+ return false;
+ }
+ }
+ if (!successor_id.IsRoot()) {
+ MutableEntry successor(write_transaction(), GET_BY_ID, successor_id);
+ if (!successor.good()) {
+ LOG(ERROR) << "Successor is not good: "
+ << successor_id.GetServerId();
+ return false;
+ }
+ if (successor.Get(PARENT_ID) != Get(PARENT_ID))
+ return false;
+ successor.Put(PREV_ID, Get(ID));
+ }
+ DCHECK(predecessor_id != Get(ID));
+ DCHECK(successor_id != Get(ID));
+ Put(PREV_ID, predecessor_id);
+ Put(NEXT_ID, successor_id);
+ return true;
+}
+
+bool MutableEntry::Put(BitTemp field, bool value) {
+ DCHECK(kernel_);
+ kernel_->put(field, value);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// High-level functions
+
+int64 Directory::NextMetahandle() {
+ ScopedKernelLock lock(this);
+ int64 metahandle = (kernel_->next_metahandle)++;
+ return metahandle;
+}
+
+// Always returns a client ID that is the string representation of a negative
+// number.
+Id Directory::NextId() {
+ int64 result;
+ {
+ ScopedKernelLock lock(this);
+ result = (kernel_->persisted_info.next_id)--;
+ kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
+ }
+ DCHECK_LT(result, 0);
+ return Id::CreateFromClientString(base::Int64ToString(result));
+}
+
+bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
+ ScopedKernelLock lock(this);
+ return (GetPossibleFirstChild(lock, id) != NULL);
+}
+
+bool Directory::GetFirstChildId(BaseTransaction* trans,
+ const Id& parent_id,
+ Id* first_child_id) {
+ ScopedKernelLock lock(this);
+ EntryKernel* entry = GetPossibleFirstChild(lock, parent_id);
+ if (!entry) {
+ *first_child_id = Id();
+ return true;
+ }
+
+ // Walk to the front of the list; the server position ordering
+ // is commonly identical to the linked-list ordering, but pending
+ // unsynced or unapplied items may diverge.
+ while (!entry->ref(PREV_ID).IsRoot()) {
+ entry = GetEntryById(entry->ref(PREV_ID), &lock);
+ if (!entry) {
+ *first_child_id = Id();
+ return false;
+ }
+ }
+ *first_child_id = entry->ref(ID);
+ return true;
+}
+
+bool Directory::GetLastChildIdForTest(
+ BaseTransaction* trans, const Id& parent_id, Id* last_child_id) {
+ ScopedKernelLock lock(this);
+ EntryKernel* entry = GetPossibleLastChildForTest(lock, parent_id);
+ if (!entry) {
+ *last_child_id = Id();
+ return true;
+ }
+
+ // Walk to the back of the list; the server position ordering
+ // is commonly identical to the linked-list ordering, but pending
+ // unsynced or unapplied items may diverge.
+ while (!entry->ref(NEXT_ID).IsRoot()) {
+ entry = GetEntryById(entry->ref(NEXT_ID), &lock);
+ if (!entry) {
+ *last_child_id = Id();
+ return false;
+ }
+ }
+
+ *last_child_id = entry->ref(ID);
+ return true;
+}
+
+Id Directory::ComputePrevIdFromServerPosition(
+ const EntryKernel* entry,
+ const syncable::Id& parent_id) {
+ ScopedKernelLock lock(this);
+
+ // Find the natural insertion point in the parent_id_child_index, and
+ // work back from there, filtering out ineligible candidates.
+ ParentIdChildIndex::iterator sibling = LocateInParentChildIndex(lock,
+ parent_id, entry->ref(SERVER_POSITION_IN_PARENT), entry->ref(ID));
+ ParentIdChildIndex::iterator first_sibling =
+ GetParentChildIndexLowerBound(lock, parent_id);
+
+ while (sibling != first_sibling) {
+ --sibling;
+ EntryKernel* candidate = *sibling;
+
+ // The item itself should never be in the range under consideration.
+ DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE));
+
+ // Ignore unapplied updates -- they might not even be server-siblings.
+ if (candidate->ref(IS_UNAPPLIED_UPDATE))
+ continue;
+
+ // We can't trust the SERVER_ fields of unsynced items, but they are
+ // potentially legitimate local predecessors. In the case where
+ // |update_item| and an unsynced item wind up in the same insertion
+ // position, we need to choose how to order them. The following check puts
+ // the unapplied update first; removing it would put the unsynced item(s)
+ // first.
+ if (candidate->ref(IS_UNSYNCED))
+ continue;
+
+ // Skip over self-looped items, which are not valid predecessors. This
+ // shouldn't happen in practice, but is worth defending against.
+ if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) &&
+ !candidate->ref(PREV_ID).IsRoot()) {
+ NOTREACHED();
+ continue;
+ }
+ return candidate->ref(ID);
+ }
+ // This item will be the first in the sibling order.
+ return Id();
+}
+
+bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
+ const Id& new_parent_id) {
+ if (entry_id.IsRoot())
+ return false;
+ // we have to ensure that the entry is not an ancestor of the new parent.
+ Id ancestor_id = new_parent_id;
+ while (!ancestor_id.IsRoot()) {
+ if (entry_id == ancestor_id)
+ return false;
+ Entry new_parent(trans, GET_BY_ID, ancestor_id);
+ if (!SyncAssert(new_parent.good(),
+ FROM_HERE,
+ "Invalid new parent",
+ trans))
+ return false;
+ ancestor_id = new_parent.Get(PARENT_ID);
+ }
+ return true;
+}
+
+// This function sets only the flags needed to get this entry to sync.
+bool MarkForSyncing(syncable::MutableEntry* e) {
+ DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
+ DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing.";
+ if (!(e->Put(IS_UNSYNCED, true)))
+ return false;
+ e->Put(SYNCING, false);
+ return true;
+}
+
+std::ostream& operator<<(std::ostream& os, const Entry& entry) {
+ int i;
+ EntryKernel* const kernel = entry.kernel_;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ os << g_metas_columns[i].name << ": "
+ << kernel->ref(static_cast<Int64Field>(i)) << ", ";
+ }
+ for ( ; i < TIME_FIELDS_END; ++i) {
+ os << g_metas_columns[i].name << ": "
+ << browser_sync::GetTimeDebugString(
+ kernel->ref(static_cast<TimeField>(i))) << ", ";
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ os << g_metas_columns[i].name << ": "
+ << kernel->ref(static_cast<IdField>(i)) << ", ";
+ }
+ os << "Flags: ";
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ if (kernel->ref(static_cast<BitField>(i)))
+ os << g_metas_columns[i].name << ", ";
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ const string& field = kernel->ref(static_cast<StringField>(i));
+ os << g_metas_columns[i].name << ": " << field << ", ";
+ }
+ for ( ; i < PROTO_FIELDS_END; ++i) {
+ os << g_metas_columns[i].name << ": "
+ << net::EscapePath(
+ kernel->ref(static_cast<ProtoField>(i)).SerializeAsString())
+ << ", ";
+ }
+ os << "TempFlags: ";
+ for ( ; i < BIT_TEMPS_END; ++i) {
+ if (kernel->ref(static_cast<BitTemp>(i)))
+ os << "#" << i - BIT_TEMPS_BEGIN << ", ";
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& s, const Blob& blob) {
+ for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i)
+ s << std::hex << std::setw(2)
+ << std::setfill('0') << static_cast<unsigned int>(*i);
+ return s << std::dec;
+}
+
+Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex(
+ const ScopedKernelLock& lock,
+ const Id& parent_id,
+ int64 position_in_parent,
+ const Id& item_id_for_tiebreaking) {
+ kernel_->needle.put(PARENT_ID, parent_id);
+ kernel_->needle.put(SERVER_POSITION_IN_PARENT, position_in_parent);
+ kernel_->needle.put(ID, item_id_for_tiebreaking);
+ return kernel_->parent_id_child_index->lower_bound(&kernel_->needle);
+}
+
+Directory::ParentIdChildIndex::iterator
+Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock,
+ const Id& parent_id) {
+ // Peg the parent ID, and use the least values for the remaining
+ // index variables.
+ return LocateInParentChildIndex(lock, parent_id,
+ std::numeric_limits<int64>::min(),
+ Id::GetLeastIdForLexicographicComparison());
+}
+
+Directory::ParentIdChildIndex::iterator
+Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock,
+ const Id& parent_id) {
+ // The upper bound of |parent_id|'s range is the lower
+ // bound of |++parent_id|'s range.
+ return GetParentChildIndexLowerBound(lock,
+ parent_id.GetLexicographicSuccessor());
+}
+
+void Directory::AppendChildHandles(const ScopedKernelLock& lock,
+ const Id& parent_id,
+ Directory::ChildHandles* result) {
+ typedef ParentIdChildIndex::iterator iterator;
+ CHECK(result);
+ for (iterator i = GetParentChildIndexLowerBound(lock, parent_id),
+ end = GetParentChildIndexUpperBound(lock, parent_id);
+ i != end; ++i) {
+ DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
+ result->push_back((*i)->ref(META_HANDLE));
+ }
+}
+
+EntryKernel* Directory::GetPossibleFirstChild(
+ const ScopedKernelLock& lock, const Id& parent_id) {
+ // We can use the server positional ordering as a hint because it's generally
+ // in sync with the local (linked-list) positional ordering, and we have an
+ // index on it.
+ ParentIdChildIndex::iterator candidate =
+ GetParentChildIndexLowerBound(lock, parent_id);
+ ParentIdChildIndex::iterator end_range =
+ GetParentChildIndexUpperBound(lock, parent_id);
+ for (; candidate != end_range; ++candidate) {
+ EntryKernel* entry = *candidate;
+ // Filter out self-looped items, which are temporarily not in the child
+ // ordering.
+ if (entry->ref(PREV_ID).IsRoot() ||
+ entry->ref(PREV_ID) != entry->ref(NEXT_ID)) {
+ return entry;
+ }
+ }
+ // There were no children in the linked list.
+ return NULL;
+}
+
+EntryKernel* Directory::GetPossibleLastChildForTest(
+ const ScopedKernelLock& lock, const Id& parent_id) {
+ // We can use the server positional ordering as a hint because it's generally
+ // in sync with the local (linked-list) positional ordering, and we have an
+ // index on it.
+ ParentIdChildIndex::iterator begin_range =
+ GetParentChildIndexLowerBound(lock, parent_id);
+ ParentIdChildIndex::iterator candidate =
+ GetParentChildIndexUpperBound(lock, parent_id);
+
+ while (begin_range != candidate) {
+ --candidate;
+ EntryKernel* entry = *candidate;
+
+ // Filter out self-looped items, which are temporarily not in the child
+ // ordering.
+ if (entry->ref(NEXT_ID).IsRoot() ||
+ entry->ref(NEXT_ID) != entry->ref(PREV_ID)) {
+ return entry;
+ }
+ }
+ // There were no children in the linked list.
+ return NULL;
+}
+
+} // namespace syncable
diff --git a/sync/syncable/syncable.h b/sync/syncable/syncable.h
new file mode 100644
index 0000000..758dcac
--- /dev/null
+++ b/sync/syncable/syncable.h
@@ -0,0 +1,1349 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_H_
+#define SYNC_SYNCABLE_SYNCABLE_H_
+#pragma once
+
+#include <algorithm>
+#include <bitset>
+#include <cstddef>
+#include <iosfwd>
+#include <limits>
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "base/time.h"
+#include "sync/syncable/blob.h"
+#include "sync/syncable/dir_open_result.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable_id.h"
+#include "sync/util/cryptographer.h"
+#include "sync/util/immutable.h"
+#include "sync/util/report_unrecoverable_error_function.h"
+#include "sync/util/unrecoverable_error_handler.h"
+#include "sync/util/time.h"
+#include "sync/util/weak_handle.h"
+#include "sync/protocol/sync.pb.h"
+
+namespace base {
+class DictionaryValue;
+class ListValue;
+}
+
+namespace browser_sync {
+class Encryptor;
+} // namespace browser_sync
+
+namespace sync_api {
+class ReadTransaction;
+class WriteNode;
+class ReadNode;
+} // sync_api
+
+namespace syncable {
+class DirectoryChangeDelegate;
+class TransactionObserver;
+class Entry;
+
+std::ostream& operator<<(std::ostream& s, const Entry& e);
+
+class DirectoryBackingStore;
+
+static const int64 kInvalidMetaHandle = 0;
+
+// Things you need to update if you change any of the fields below:
+// - EntryKernel struct in syncable.h (this file)
+// - syncable_columns.h
+// - syncable_enum_conversions{.h,.cc,_unittest.cc}
+// - EntryKernel::EntryKernel(), EntryKernel::ToValue(), operator<<
+// for Entry in syncable.cc
+// - BindFields() and UnpackEntry() in directory_backing_store.cc
+// - TestSimpleFieldsPreservedDuringSaveChanges in syncable_unittest.cc
+
+enum {
+ BEGIN_FIELDS = 0,
+ INT64_FIELDS_BEGIN = BEGIN_FIELDS
+};
+
+enum MetahandleField {
+ // Primary key into the table. Keep this as a handle to the meta entry
+ // across transactions.
+ META_HANDLE = INT64_FIELDS_BEGIN
+};
+
+enum BaseVersion {
+ // After initial upload, the version is controlled by the server, and is
+ // increased whenever the data or metadata changes on the server.
+ BASE_VERSION = META_HANDLE + 1,
+};
+
+enum Int64Field {
+ SERVER_VERSION = BASE_VERSION + 1,
+
+ // A numeric position value that indicates the relative ordering of
+ // this object among its siblings.
+ SERVER_POSITION_IN_PARENT,
+
+ LOCAL_EXTERNAL_ID, // ID of an item in the external local storage that this
+ // entry is associated with. (such as bookmarks.js)
+
+ INT64_FIELDS_END
+};
+
+enum {
+ INT64_FIELDS_COUNT = INT64_FIELDS_END - INT64_FIELDS_BEGIN,
+ TIME_FIELDS_BEGIN = INT64_FIELDS_END,
+};
+
+enum TimeField {
+ MTIME = TIME_FIELDS_BEGIN,
+ SERVER_MTIME,
+ CTIME,
+ SERVER_CTIME,
+ TIME_FIELDS_END,
+};
+
+enum {
+ TIME_FIELDS_COUNT = TIME_FIELDS_END - TIME_FIELDS_BEGIN,
+ ID_FIELDS_BEGIN = TIME_FIELDS_END,
+};
+
+enum IdField {
+ // Code in InitializeTables relies on ID being the first IdField value.
+ ID = ID_FIELDS_BEGIN,
+ PARENT_ID,
+ SERVER_PARENT_ID,
+
+ PREV_ID,
+ NEXT_ID,
+ ID_FIELDS_END
+};
+
+enum {
+ ID_FIELDS_COUNT = ID_FIELDS_END - ID_FIELDS_BEGIN,
+ BIT_FIELDS_BEGIN = ID_FIELDS_END
+};
+
+enum IndexedBitField {
+ IS_UNSYNCED = BIT_FIELDS_BEGIN,
+ IS_UNAPPLIED_UPDATE,
+ INDEXED_BIT_FIELDS_END,
+};
+
+enum IsDelField {
+ IS_DEL = INDEXED_BIT_FIELDS_END,
+};
+
+enum BitField {
+ IS_DIR = IS_DEL + 1,
+ SERVER_IS_DIR,
+ SERVER_IS_DEL,
+ BIT_FIELDS_END
+};
+
+enum {
+ BIT_FIELDS_COUNT = BIT_FIELDS_END - BIT_FIELDS_BEGIN,
+ STRING_FIELDS_BEGIN = BIT_FIELDS_END
+};
+
+enum StringField {
+ // Name, will be truncated by server. Can be duplicated in a folder.
+ NON_UNIQUE_NAME = STRING_FIELDS_BEGIN,
+ // The server version of |NON_UNIQUE_NAME|.
+ SERVER_NON_UNIQUE_NAME,
+
+ // A tag string which identifies this node as a particular top-level
+ // permanent object. The tag can be thought of as a unique key that
+ // identifies a singleton instance.
+ UNIQUE_SERVER_TAG, // Tagged by the server
+ UNIQUE_CLIENT_TAG, // Tagged by the client
+ STRING_FIELDS_END,
+};
+
+enum {
+ STRING_FIELDS_COUNT = STRING_FIELDS_END - STRING_FIELDS_BEGIN,
+ PROTO_FIELDS_BEGIN = STRING_FIELDS_END
+};
+
+// From looking at the sqlite3 docs, it's not directly stated, but it
+// seems the overhead for storing a NULL blob is very small.
+enum ProtoField {
+ SPECIFICS = PROTO_FIELDS_BEGIN,
+ SERVER_SPECIFICS,
+ BASE_SERVER_SPECIFICS,
+ PROTO_FIELDS_END,
+};
+
+enum {
+ PROTO_FIELDS_COUNT = PROTO_FIELDS_END - PROTO_FIELDS_BEGIN
+};
+
+enum {
+ FIELD_COUNT = PROTO_FIELDS_END,
+ // Past this point we have temporaries, stored in memory only.
+ BEGIN_TEMPS = PROTO_FIELDS_END,
+ BIT_TEMPS_BEGIN = BEGIN_TEMPS,
+};
+
+enum BitTemp {
+ SYNCING = BIT_TEMPS_BEGIN,
+ BIT_TEMPS_END,
+};
+
+enum {
+ BIT_TEMPS_COUNT = BIT_TEMPS_END - BIT_TEMPS_BEGIN
+};
+
+class BaseTransaction;
+class WriteTransaction;
+class ReadTransaction;
+class Directory;
+
+// Instead of:
+// Entry e = transaction.GetById(id);
+// use:
+// Entry e(transaction, GET_BY_ID, id);
+//
+// Why? The former would require a copy constructor, and it would be difficult
+// to enforce that an entry never outlived its transaction if there were a copy
+// constructor.
+enum GetById {
+ GET_BY_ID
+};
+
+enum GetByClientTag {
+ GET_BY_CLIENT_TAG
+};
+
+enum GetByServerTag {
+ GET_BY_SERVER_TAG
+};
+
+enum GetByHandle {
+ GET_BY_HANDLE
+};
+
+enum Create {
+ CREATE
+};
+
+enum CreateNewUpdateItem {
+ CREATE_NEW_UPDATE_ITEM
+};
+
+typedef std::set<int64> MetahandleSet;
+
+// TODO(akalin): Move EntryKernel and related into its own header file.
+
+// Why the singular enums? So the code compile-time dispatches instead of
+// runtime dispatches as it would with a single enum and an if() statement.
+
+// The EntryKernel class contains the actual data for an entry.
+struct EntryKernel {
+ private:
+ std::string string_fields[STRING_FIELDS_COUNT];
+ sync_pb::EntitySpecifics specifics_fields[PROTO_FIELDS_COUNT];
+ int64 int64_fields[INT64_FIELDS_COUNT];
+ base::Time time_fields[TIME_FIELDS_COUNT];
+ Id id_fields[ID_FIELDS_COUNT];
+ std::bitset<BIT_FIELDS_COUNT> bit_fields;
+ std::bitset<BIT_TEMPS_COUNT> bit_temps;
+
+ public:
+ EntryKernel();
+ ~EntryKernel();
+
+ // Set the dirty bit, and optionally add this entry's metahandle to
+ // a provided index on dirty bits in |dirty_index|. Parameter may be null,
+ // and will result only in setting the dirty bit of this entry.
+ inline void mark_dirty(syncable::MetahandleSet* dirty_index) {
+ if (!dirty_ && dirty_index) {
+ DCHECK_NE(0, ref(META_HANDLE));
+ dirty_index->insert(ref(META_HANDLE));
+ }
+ dirty_ = true;
+ }
+
+ // Clear the dirty bit, and optionally remove this entry's metahandle from
+ // a provided index on dirty bits in |dirty_index|. Parameter may be null,
+ // and will result only in clearing dirty bit of this entry.
+ inline void clear_dirty(syncable::MetahandleSet* dirty_index) {
+ if (dirty_ && dirty_index) {
+ DCHECK_NE(0, ref(META_HANDLE));
+ dirty_index->erase(ref(META_HANDLE));
+ }
+ dirty_ = false;
+ }
+
+ inline bool is_dirty() const {
+ return dirty_;
+ }
+
+ // Setters.
+ inline void put(MetahandleField field, int64 value) {
+ int64_fields[field - INT64_FIELDS_BEGIN] = value;
+ }
+ inline void put(Int64Field field, int64 value) {
+ int64_fields[field - INT64_FIELDS_BEGIN] = value;
+ }
+ inline void put(TimeField field, const base::Time& value) {
+ // Round-trip to proto time format and back so that we have
+ // consistent time resolutions (ms).
+ time_fields[field - TIME_FIELDS_BEGIN] =
+ browser_sync::ProtoTimeToTime(
+ browser_sync::TimeToProtoTime(value));
+ }
+ inline void put(IdField field, const Id& value) {
+ id_fields[field - ID_FIELDS_BEGIN] = value;
+ }
+ inline void put(BaseVersion field, int64 value) {
+ int64_fields[field - INT64_FIELDS_BEGIN] = value;
+ }
+ inline void put(IndexedBitField field, bool value) {
+ bit_fields[field - BIT_FIELDS_BEGIN] = value;
+ }
+ inline void put(IsDelField field, bool value) {
+ bit_fields[field - BIT_FIELDS_BEGIN] = value;
+ }
+ inline void put(BitField field, bool value) {
+ bit_fields[field - BIT_FIELDS_BEGIN] = value;
+ }
+ inline void put(StringField field, const std::string& value) {
+ string_fields[field - STRING_FIELDS_BEGIN] = value;
+ }
+ inline void put(ProtoField field, const sync_pb::EntitySpecifics& value) {
+ specifics_fields[field - PROTO_FIELDS_BEGIN].CopyFrom(value);
+ }
+ inline void put(BitTemp field, bool value) {
+ bit_temps[field - BIT_TEMPS_BEGIN] = value;
+ }
+
+ // Const ref getters.
+ inline int64 ref(MetahandleField field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline int64 ref(Int64Field field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline const base::Time& ref(TimeField field) const {
+ return time_fields[field - TIME_FIELDS_BEGIN];
+ }
+ inline const Id& ref(IdField field) const {
+ return id_fields[field - ID_FIELDS_BEGIN];
+ }
+ inline int64 ref(BaseVersion field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline bool ref(IndexedBitField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline bool ref(IsDelField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline bool ref(BitField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline const std::string& ref(StringField field) const {
+ return string_fields[field - STRING_FIELDS_BEGIN];
+ }
+ inline const sync_pb::EntitySpecifics& ref(ProtoField field) const {
+ return specifics_fields[field - PROTO_FIELDS_BEGIN];
+ }
+ inline bool ref(BitTemp field) const {
+ return bit_temps[field - BIT_TEMPS_BEGIN];
+ }
+
+ // Non-const, mutable ref getters for object types only.
+ inline std::string& mutable_ref(StringField field) {
+ return string_fields[field - STRING_FIELDS_BEGIN];
+ }
+ inline sync_pb::EntitySpecifics& mutable_ref(ProtoField field) {
+ return specifics_fields[field - PROTO_FIELDS_BEGIN];
+ }
+ inline Id& mutable_ref(IdField field) {
+ return id_fields[field - ID_FIELDS_BEGIN];
+ }
+
+ syncable::ModelType GetServerModelType() const;
+
+ // Does a case in-sensitive search for a given string, which must be
+ // lower case.
+ bool ContainsString(const std::string& lowercase_query) const;
+
+ // Dumps all kernel info into a DictionaryValue and returns it.
+ // Transfers ownership of the DictionaryValue to the caller.
+ base::DictionaryValue* ToValue() const;
+
+ private:
+ // Tracks whether this entry needs to be saved to the database.
+ bool dirty_;
+};
+
+// A read-only meta entry.
+class Entry {
+ friend class Directory;
+ friend std::ostream& operator << (std::ostream& s, const Entry& e);
+
+ public:
+ // After constructing, you must check good() to test whether the Get
+ // succeeded.
+ Entry(BaseTransaction* trans, GetByHandle, int64 handle);
+ Entry(BaseTransaction* trans, GetById, const Id& id);
+ Entry(BaseTransaction* trans, GetByServerTag, const std::string& tag);
+ Entry(BaseTransaction* trans, GetByClientTag, const std::string& tag);
+
+ bool good() const { return 0 != kernel_; }
+
+ BaseTransaction* trans() const { return basetrans_; }
+
+ // Field accessors.
+ inline int64 Get(MetahandleField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline Id Get(IdField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline int64 Get(Int64Field field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline const base::Time& Get(TimeField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline int64 Get(BaseVersion field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(IndexedBitField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(IsDelField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(BitField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ const std::string& Get(StringField field) const;
+ inline const sync_pb::EntitySpecifics& Get(ProtoField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(BitTemp field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+
+ ModelType GetServerModelType() const;
+ ModelType GetModelType() const;
+
+ inline bool ExistsOnClientBecauseNameIsNonEmpty() const {
+ DCHECK(kernel_);
+ return !kernel_->ref(NON_UNIQUE_NAME).empty();
+ }
+
+ inline bool IsRoot() const {
+ DCHECK(kernel_);
+ return kernel_->ref(ID).IsRoot();
+ }
+
+ Directory* dir() const;
+
+ const EntryKernel GetKernelCopy() const {
+ return *kernel_;
+ }
+
+ // Compute a local predecessor position for |update_item|, based on its
+ // absolute server position. The returned ID will be a valid predecessor
+ // under SERVER_PARENT_ID that is consistent with the
+ // SERVER_POSITION_IN_PARENT ordering.
+ Id ComputePrevIdFromServerPosition(const Id& parent_id) const;
+
+ // Dumps all entry info into a DictionaryValue and returns it.
+ // Transfers ownership of the DictionaryValue to the caller.
+ base::DictionaryValue* ToValue() const;
+
+ protected: // Don't allow creation on heap, except by sync API wrappers.
+ friend class sync_api::ReadNode;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ inline explicit Entry(BaseTransaction* trans)
+ : basetrans_(trans),
+ kernel_(NULL) { }
+
+ protected:
+ BaseTransaction* const basetrans_;
+
+ EntryKernel* kernel_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Entry);
+};
+
+// A mutable meta entry. Changes get committed to the database when the
+// WriteTransaction is destroyed.
+class MutableEntry : public Entry {
+ friend class WriteTransaction;
+ friend class Directory;
+ void Init(WriteTransaction* trans, const Id& parent_id,
+ const std::string& name);
+
+ public:
+ MutableEntry(WriteTransaction* trans, Create, const Id& parent_id,
+ const std::string& name);
+ MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
+ MutableEntry(WriteTransaction* trans, GetByHandle, int64);
+ MutableEntry(WriteTransaction* trans, GetById, const Id&);
+ MutableEntry(WriteTransaction* trans, GetByClientTag, const std::string& tag);
+ MutableEntry(WriteTransaction* trans, GetByServerTag, const std::string& tag);
+
+ inline WriteTransaction* write_transaction() const {
+ return write_transaction_;
+ }
+
+ // Field Accessors. Some of them trigger the re-indexing of the entry.
+ // Return true on success, return false on failure, which means
+ // that putting the value would have caused a duplicate in the index.
+ // TODO(chron): Remove some of these unecessary return values.
+ bool Put(Int64Field field, const int64& value);
+ bool Put(TimeField field, const base::Time& value);
+ bool Put(IdField field, const Id& value);
+
+ // Do a simple property-only update if the PARENT_ID field. Use with caution.
+ //
+ // The normal Put(IS_PARENT) call will move the item to the front of the
+ // sibling order to maintain the linked list invariants when the parent
+ // changes. That's usually what you want to do, but it's inappropriate
+ // when the caller is trying to change the parent ID of a the whole set
+ // of children (e.g. because the ID changed during a commit). For those
+ // cases, there's this function. It will corrupt the sibling ordering
+ // if you're not careful.
+ void PutParentIdPropertyOnly(const Id& parent_id);
+
+ bool Put(StringField field, const std::string& value);
+ bool Put(BaseVersion field, int64 value);
+
+ bool Put(ProtoField field, const sync_pb::EntitySpecifics& value);
+ bool Put(BitField field, bool value);
+ inline bool Put(IsDelField field, bool value) {
+ return PutIsDel(value);
+ }
+ bool Put(IndexedBitField field, bool value);
+
+ // Sets the position of this item, and updates the entry kernels of the
+ // adjacent siblings so that list invariants are maintained. Returns false
+ // and fails if |predecessor_id| does not identify a sibling. Pass the root
+ // ID to put the node in first position.
+ bool PutPredecessor(const Id& predecessor_id);
+
+ bool Put(BitTemp field, bool value);
+
+ protected:
+ syncable::MetahandleSet* GetDirtyIndexHelper();
+
+ bool PutIsDel(bool value);
+
+ private: // Don't allow creation on heap, except by sync API wrappers.
+ friend class sync_api::WriteNode;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ bool PutImpl(StringField field, const std::string& value);
+ bool PutUniqueClientTag(const std::string& value);
+
+ // Adjusts the successor and predecessor entries so that they no longer
+ // refer to this entry.
+ bool UnlinkFromOrder();
+
+ // Kind of redundant. We should reduce the number of pointers
+ // floating around if at all possible. Could we store this in Directory?
+ // Scope: Set on construction, never changed after that.
+ WriteTransaction* const write_transaction_;
+
+ protected:
+ MutableEntry();
+
+ DISALLOW_COPY_AND_ASSIGN(MutableEntry);
+};
+
+template <typename FieldType, FieldType field_index> class LessField;
+
+class EntryKernelLessByMetaHandle {
+ public:
+ inline bool operator()(const EntryKernel& a,
+ const EntryKernel& b) const {
+ return a.ref(META_HANDLE) < b.ref(META_HANDLE);
+ }
+};
+typedef std::set<EntryKernel, EntryKernelLessByMetaHandle> EntryKernelSet;
+
+struct EntryKernelMutation {
+ EntryKernel original, mutated;
+};
+typedef std::map<int64, EntryKernelMutation> EntryKernelMutationMap;
+
+typedef browser_sync::Immutable<EntryKernelMutationMap>
+ ImmutableEntryKernelMutationMap;
+
+// A WriteTransaction has a writer tag describing which body of code is doing
+// the write. This is defined up here since WriteTransactionInfo also contains
+// one.
+enum WriterTag {
+ INVALID,
+ SYNCER,
+ AUTHWATCHER,
+ UNITTEST,
+ VACUUM_AFTER_SAVE,
+ PURGE_ENTRIES,
+ SYNCAPI
+};
+
+// Make sure to update this if you update WriterTag.
+std::string WriterTagToString(WriterTag writer_tag);
+
+struct WriteTransactionInfo {
+ WriteTransactionInfo(int64 id,
+ tracked_objects::Location location,
+ WriterTag writer,
+ ImmutableEntryKernelMutationMap mutations);
+ WriteTransactionInfo();
+ ~WriteTransactionInfo();
+
+ // Caller owns the return value.
+ base::DictionaryValue* ToValue(size_t max_mutations_size) const;
+
+ int64 id;
+ // If tracked_objects::Location becomes assignable, we can use that
+ // instead.
+ std::string location_string;
+ WriterTag writer;
+ ImmutableEntryKernelMutationMap mutations;
+};
+
+typedef
+ browser_sync::Immutable<WriteTransactionInfo>
+ ImmutableWriteTransactionInfo;
+
+// Caller owns the return value.
+base::DictionaryValue* EntryKernelMutationToValue(
+ const EntryKernelMutation& mutation);
+
+// Caller owns the return value.
+base::ListValue* EntryKernelMutationMapToValue(
+ const EntryKernelMutationMap& mutations);
+
+// How syncable indices & Indexers work.
+//
+// The syncable Directory maintains several indices on the Entries it tracks.
+// The indices follow a common pattern:
+// (a) The index allows efficient lookup of an Entry* with particular
+// field values. This is done by use of a std::set<> and a custom
+// comparator.
+// (b) There may be conditions for inclusion in the index -- for example,
+// deleted items might not be indexed.
+// (c) Because the index set contains only Entry*, one must be careful
+// to remove Entries from the set before updating the value of
+// an indexed field.
+// The traits of an index are a Comparator (to define the set ordering) and a
+// ShouldInclude function (to define the conditions for inclusion). For each
+// index, the traits are grouped into a class called an Indexer which
+// can be used as a template type parameter.
+
+// Traits type for metahandle index.
+struct MetahandleIndexer {
+ // This index is of the metahandle field values.
+ typedef LessField<MetahandleField, META_HANDLE> Comparator;
+
+ // This index includes all entries.
+ inline static bool ShouldInclude(const EntryKernel* a) {
+ return true;
+ }
+};
+
+// Traits type for ID field index.
+struct IdIndexer {
+ // This index is of the ID field values.
+ typedef LessField<IdField, ID> Comparator;
+
+ // This index includes all entries.
+ inline static bool ShouldInclude(const EntryKernel* a) {
+ return true;
+ }
+};
+
+// Traits type for unique client tag index.
+struct ClientTagIndexer {
+ // This index is of the client-tag values.
+ typedef LessField<StringField, UNIQUE_CLIENT_TAG> Comparator;
+
+ // Items are only in this index if they have a non-empty client tag value.
+ static bool ShouldInclude(const EntryKernel* a);
+};
+
+// This index contains EntryKernels ordered by parent ID and metahandle.
+// It allows efficient lookup of the children of a given parent.
+struct ParentIdAndHandleIndexer {
+ // This index is of the parent ID and metahandle. We use a custom
+ // comparator.
+ class Comparator {
+ public:
+ bool operator() (const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const;
+ };
+
+ // This index does not include deleted items.
+ static bool ShouldInclude(const EntryKernel* a);
+};
+
+// Given an Indexer providing the semantics of an index, defines the
+// set type used to actually contain the index.
+template <typename Indexer>
+struct Index {
+ typedef std::set<EntryKernel*, typename Indexer::Comparator> Set;
+};
+
+// The name Directory in this case means the entire directory
+// structure within a single user account.
+//
+// The db is protected against concurrent modification by a reader/
+// writer lock, negotiated by the ReadTransaction and WriteTransaction
+// friend classes. The in-memory indices are protected against
+// concurrent modification by the kernel lock.
+//
+// All methods which require the reader/writer lock to be held either
+// are protected and only called from friends in a transaction
+// or are public and take a Transaction* argument.
+//
+// All methods which require the kernel lock to be already held take a
+// ScopeKernelLock* argument.
+//
+// To prevent deadlock, the reader writer transaction lock must always
+// be held before acquiring the kernel lock.
+class ScopedKernelLock;
+class IdFilter;
+
+class Directory {
+ friend class BaseTransaction;
+ friend class Entry;
+ friend class MutableEntry;
+ friend class ReadTransaction;
+ friend class ReadTransactionWithoutDB;
+ friend class ScopedKernelLock;
+ friend class ScopedKernelUnlock;
+ friend class WriteTransaction;
+ friend class SyncableDirectoryTest;
+ FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
+ TakeSnapshotGetsAllDirtyHandlesTest);
+ FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
+ TakeSnapshotGetsOnlyDirtyHandlesTest);
+ FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
+ TakeSnapshotGetsMetahandlesToPurge);
+
+ public:
+ static const FilePath::CharType kSyncDatabaseFilename[];
+
+ // Various data that the Directory::Kernel we are backing (persisting data
+ // for) needs saved across runs of the application.
+ struct PersistedKernelInfo {
+ PersistedKernelInfo();
+ ~PersistedKernelInfo();
+
+ // Set the |download_progress| entry for the given model to a
+ // "first sync" start point. When such a value is sent to the server,
+ // a full download of all objects of the model will be initiated.
+ void reset_download_progress(ModelType model_type);
+
+ // Last sync timestamp fetched from the server.
+ sync_pb::DataTypeProgressMarker download_progress[MODEL_TYPE_COUNT];
+ // true iff we ever reached the end of the changelog.
+ ModelTypeSet initial_sync_ended;
+ // The store birthday we were given by the server. Contents are opaque to
+ // the client.
+ std::string store_birthday;
+ // The next local ID that has not been used with this cache-GUID.
+ int64 next_id;
+ // The persisted notification state.
+ std::string notification_state;
+ };
+
+ // What the Directory needs on initialization to create itself and its Kernel.
+ // Filled by DirectoryBackingStore::Load.
+ struct KernelLoadInfo {
+ PersistedKernelInfo kernel_info;
+ std::string cache_guid; // Created on first initialization, never changes.
+ int64 max_metahandle; // Computed (using sql MAX aggregate) on init.
+ KernelLoadInfo() : max_metahandle(0) {
+ }
+ };
+
+ // The dirty/clean state of kernel fields backed by the share_info table.
+ // This is public so it can be used in SaveChangesSnapshot for persistence.
+ enum KernelShareInfoStatus {
+ KERNEL_SHARE_INFO_INVALID,
+ KERNEL_SHARE_INFO_VALID,
+ KERNEL_SHARE_INFO_DIRTY
+ };
+
+ // When the Directory is told to SaveChanges, a SaveChangesSnapshot is
+ // constructed and forms a consistent snapshot of what needs to be sent to
+ // the backing store.
+ struct SaveChangesSnapshot {
+ SaveChangesSnapshot();
+ ~SaveChangesSnapshot();
+
+ KernelShareInfoStatus kernel_info_status;
+ PersistedKernelInfo kernel_info;
+ EntryKernelSet dirty_metas;
+ MetahandleSet metahandles_to_purge;
+ };
+
+ // Does not take ownership of |encryptor|.
+ // |report_unrecoverable_error_function| may be NULL.
+ Directory(
+ browser_sync::Encryptor* encryptor,
+ browser_sync::UnrecoverableErrorHandler* unrecoverable_error_handler,
+ browser_sync::ReportUnrecoverableErrorFunction
+ report_unrecoverable_error_function);
+ virtual ~Directory();
+
+ // Does not take ownership of |delegate|, which must not be NULL.
+ // Starts sending events to |delegate| if the returned result is
+ // OPENED. Note that events to |delegate| may be sent from *any*
+ // thread. |transaction_observer| must be initialized.
+ DirOpenResult Open(const FilePath& file_path, const std::string& name,
+ DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer);
+
+ // Same as above, but does not create a file to persist the database. This is
+ // useful for tests where we were not planning to persist this data and don't
+ // want to pay the performance penalty of using a real database.
+ DirOpenResult OpenInMemoryForTest(
+ const std::string& name, DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer);
+
+ // Stops sending events to the delegate and the transaction
+ // observer.
+ void Close();
+
+ int64 NextMetahandle();
+ // Always returns a negative id. Positive client ids are generated
+ // by the server only.
+ Id NextId();
+
+ bool good() const { return NULL != store_; }
+
+ // The download progress is an opaque token provided by the sync server
+ // to indicate the continuation state of the next GetUpdates operation.
+ void GetDownloadProgress(
+ ModelType type,
+ sync_pb::DataTypeProgressMarker* value_out) const;
+ void GetDownloadProgressAsString(
+ ModelType type,
+ std::string* value_out) const;
+ size_t GetEntriesCount() const;
+ void SetDownloadProgress(
+ ModelType type,
+ const sync_pb::DataTypeProgressMarker& value);
+
+ bool initial_sync_ended_for_type(ModelType type) const;
+ void set_initial_sync_ended_for_type(ModelType type, bool value);
+
+ const std::string& name() const { return kernel_->name; }
+
+ // (Account) Store birthday is opaque to the client, so we keep it in the
+ // format it is in the proto buffer in case we switch to a binary birthday
+ // later.
+ std::string store_birthday() const;
+ void set_store_birthday(const std::string& store_birthday);
+
+ std::string GetNotificationState() const;
+ void SetNotificationState(const std::string& notification_state);
+
+ // Unique to each account / client pair.
+ std::string cache_guid() const;
+
+ // Returns a pointer to our cryptographer. Does not transfer ownership. The
+ // cryptographer is not thread safe; it should not be accessed after the
+ // transaction has been released.
+ browser_sync::Cryptographer* GetCryptographer(const BaseTransaction* trans);
+
+ // Returns true if the directory had encountered an unrecoverable error.
+ // Note: Any function in |Directory| that can be called without holding a
+ // transaction need to check if the Directory already has an unrecoverable
+ // error on it.
+ bool unrecoverable_error_set(const BaseTransaction* trans) const;
+
+ // Called to immediately report an unrecoverable error (but don't
+ // propagate it up).
+ void ReportUnrecoverableError() {
+ if (report_unrecoverable_error_function_) {
+ report_unrecoverable_error_function_();
+ }
+ }
+
+ // Called to set the unrecoverable error on the directory and to propagate
+ // the error to upper layers.
+ void OnUnrecoverableError(const BaseTransaction* trans,
+ const tracked_objects::Location& location,
+ const std::string & message);
+
+ protected: // for friends, mainly used by Entry constructors
+ virtual EntryKernel* GetEntryByHandle(int64 handle);
+ virtual EntryKernel* GetEntryByHandle(int64 metahandle,
+ ScopedKernelLock* lock);
+ virtual EntryKernel* GetEntryById(const Id& id);
+ EntryKernel* GetEntryByServerTag(const std::string& tag);
+ virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
+ EntryKernel* GetRootEntry();
+ bool ReindexId(WriteTransaction* trans, EntryKernel* const entry,
+ const Id& new_id);
+ bool ReindexParentId(WriteTransaction* trans, EntryKernel* const entry,
+ const Id& new_parent_id);
+ void ClearDirtyMetahandles();
+
+ // These don't do semantic checking.
+ // The semantic checking is implemented higher up.
+ bool UnlinkEntryFromOrder(EntryKernel* entry,
+ WriteTransaction* trans,
+ ScopedKernelLock* lock);
+
+ DirOpenResult OpenImpl(
+ DirectoryBackingStore* store, const std::string& name,
+ DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer);
+
+ private:
+ // These private versions expect the kernel lock to already be held
+ // before calling.
+ EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
+
+ template <class T> void TestAndSet(T* kernel_data, const T* data_to_set);
+
+ public:
+ typedef std::vector<int64> ChildHandles;
+
+ // Returns the child meta handles (even those for deleted/unlinked
+ // nodes) for given parent id. Clears |result| if there are no
+ // children.
+ bool GetChildHandlesById(BaseTransaction*, const Id& parent_id,
+ ChildHandles* result);
+
+ // Returns the child meta handles (even those for deleted/unlinked
+ // nodes) for given meta handle. Clears |result| if there are no
+ // children.
+ bool GetChildHandlesByHandle(BaseTransaction*, int64 handle,
+ ChildHandles* result);
+
+ // Returns true iff |id| has children.
+ bool HasChildren(BaseTransaction* trans, const Id& id);
+
+ // Find the first child in the positional ordering under a parent,
+ // and fill in |*first_child_id| with its id. Fills in a root Id if
+ // parent has no children. Returns true if the first child was
+ // successfully found, or false if an error was encountered.
+ bool GetFirstChildId(BaseTransaction* trans, const Id& parent_id,
+ Id* first_child_id) WARN_UNUSED_RESULT;
+
+ // Find the last child in the positional ordering under a parent,
+ // and fill in |*first_child_id| with its id. Fills in a root Id if
+ // parent has no children. Returns true if the first child was
+ // successfully found, or false if an error was encountered.
+ bool GetLastChildIdForTest(BaseTransaction* trans, const Id& parent_id,
+ Id* last_child_id) WARN_UNUSED_RESULT;
+
+ // Compute a local predecessor position for |update_item|. The position
+ // is determined by the SERVER_POSITION_IN_PARENT value of |update_item|,
+ // as well as the SERVER_POSITION_IN_PARENT values of any up-to-date
+ // children of |parent_id|.
+ Id ComputePrevIdFromServerPosition(
+ const EntryKernel* update_item,
+ const syncable::Id& parent_id);
+
+ // SaveChanges works by taking a consistent snapshot of the current Directory
+ // state and indices (by deep copy) under a ReadTransaction, passing this
+ // snapshot to the backing store under no transaction, and finally cleaning
+ // up by either purging entries no longer needed (this part done under a
+ // WriteTransaction) or rolling back the dirty bits. It also uses
+ // internal locking to enforce SaveChanges operations are mutually exclusive.
+ //
+ // WARNING: THIS METHOD PERFORMS SYNCHRONOUS I/O VIA SQLITE.
+ bool SaveChanges();
+
+ // Fill in |result| with all entry kernels.
+ void GetAllEntryKernels(BaseTransaction* trans,
+ std::vector<const EntryKernel*>* result);
+
+ // Returns the number of entities with the unsynced bit set.
+ int64 unsynced_entity_count() const;
+
+ // Get GetUnsyncedMetaHandles should only be called after SaveChanges and
+ // before any new entries have been created. The intention is that the
+ // syncer should call it from its PerformSyncQueries member.
+ typedef std::vector<int64> UnsyncedMetaHandles;
+ void GetUnsyncedMetaHandles(BaseTransaction* trans,
+ UnsyncedMetaHandles* result);
+
+ // Returns all server types with unapplied updates. A subset of
+ // those types can then be passed into
+ // GetUnappliedUpdateMetaHandles() below.
+ FullModelTypeSet GetServerTypesWithUnappliedUpdates(
+ BaseTransaction* trans) const;
+
+ // Get all the metahandles for unapplied updates for a given set of
+ // server types.
+ typedef std::vector<int64> UnappliedUpdateMetaHandles;
+ void GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
+ FullModelTypeSet server_types,
+ UnappliedUpdateMetaHandles* result);
+
+ // Checks tree metadata consistency.
+ // If full_scan is false, the function will avoid pulling any entries from the
+ // db and scan entries currently in ram.
+ // If full_scan is true, all entries will be pulled from the database.
+ // No return value, CHECKs will be triggered if we're given bad
+ // information.
+ bool CheckTreeInvariants(syncable::BaseTransaction* trans,
+ bool full_scan);
+
+ bool CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const EntryKernelMutationMap& mutations);
+
+ bool CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const MetahandleSet& handles,
+ const IdFilter& idfilter);
+
+ // Purges all data associated with any entries whose ModelType or
+ // ServerModelType is found in |types|, from _both_ memory and disk.
+ // Only valid, "real" model types are allowed in |types| (see model_type.h
+ // for definitions). "Purge" is just meant to distinguish from "deleting"
+ // entries, which means something different in the syncable namespace.
+ // WARNING! This can be real slow, as it iterates over all entries.
+ // WARNING! Performs synchronous I/O.
+ virtual void PurgeEntriesWithTypeIn(ModelTypeSet types);
+
+ private:
+ // Helper to prime ids_index, parent_id_and_names_index, unsynced_metahandles
+ // and unapplied_metahandles from metahandles_index.
+ void InitializeIndices();
+
+ // Constructs a consistent snapshot of the current Directory state and
+ // indices (by deep copy) under a ReadTransaction for use in |snapshot|.
+ // See SaveChanges() for more information.
+ void TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot);
+
+ // Purges from memory any unused, safe to remove entries that were
+ // successfully deleted on disk as a result of the SaveChanges that processed
+ // |snapshot|. See SaveChanges() for more information.
+ bool VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot);
+
+ // Rolls back dirty bits in the event that the SaveChanges that
+ // processed |snapshot| failed, for example, due to no disk space.
+ void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
+
+ // For new entry creation only
+ bool InsertEntry(WriteTransaction* trans,
+ EntryKernel* entry, ScopedKernelLock* lock);
+ bool InsertEntry(WriteTransaction* trans, EntryKernel* entry);
+
+ // Used by CheckTreeInvariants
+ void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
+ bool SafeToPurgeFromMemory(WriteTransaction* trans,
+ const EntryKernel* const entry) const;
+
+ // Internal setters that do not acquire a lock internally. These are unsafe
+ // on their own; caller must guarantee exclusive access manually by holding
+ // a ScopedKernelLock.
+ void set_initial_sync_ended_for_type_unsafe(ModelType type, bool x);
+ void SetNotificationStateUnsafe(const std::string& notification_state);
+
+ Directory& operator = (const Directory&);
+
+ public:
+ typedef Index<MetahandleIndexer>::Set MetahandlesIndex;
+ typedef Index<IdIndexer>::Set IdsIndex;
+ // All entries in memory must be in both the MetahandlesIndex and
+ // the IdsIndex, but only non-deleted entries will be the
+ // ParentIdChildIndex.
+ typedef Index<ParentIdAndHandleIndexer>::Set ParentIdChildIndex;
+
+ // Contains both deleted and existing entries with tags.
+ // We can't store only existing tags because the client would create
+ // items that had a duplicated ID in the end, resulting in a DB key
+ // violation. ID reassociation would fail after an attempted commit.
+ typedef Index<ClientTagIndexer>::Set ClientTagIndex;
+
+ protected:
+ // Used by tests. |delegate| must not be NULL.
+ // |transaction_observer| must be initialized.
+ void InitKernelForTest(
+ const std::string& name,
+ DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer);
+
+ private:
+ struct Kernel {
+ // |delegate| must not be NULL. |transaction_observer| must be
+ // initialized.
+ Kernel(const std::string& name, const KernelLoadInfo& info,
+ DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer);
+
+ ~Kernel();
+
+ void AddRef(); // For convenience.
+ void Release();
+
+ // TODO(timsteele): audit use of the member and remove if possible
+ volatile base::subtle::AtomicWord refcount;
+
+ // Implements ReadTransaction / WriteTransaction using a simple lock.
+ base::Lock transaction_mutex;
+
+ // Protected by transaction_mutex. Used by WriteTransactions.
+ int64 next_write_transaction_id;
+
+ // The name of this directory.
+ std::string const name;
+
+ // Protects all members below.
+ // The mutex effectively protects all the indices, but not the
+ // entries themselves. So once a pointer to an entry is pulled
+ // from the index, the mutex can be unlocked and entry read or written.
+ //
+ // Never hold the mutex and do anything with the database or any
+ // other buffered IO. Violating this rule will result in deadlock.
+ base::Lock mutex;
+ // Entries indexed by metahandle
+ MetahandlesIndex* metahandles_index;
+ // Entries indexed by id
+ IdsIndex* ids_index;
+ ParentIdChildIndex* parent_id_child_index;
+ ClientTagIndex* client_tag_index;
+ // So we don't have to create an EntryKernel every time we want to
+ // look something up in an index. Needle in haystack metaphor.
+ EntryKernel needle;
+
+ // 3 in-memory indices on bits used extremely frequently by the syncer.
+ // |unapplied_update_metahandles| is keyed by the server model type.
+ MetahandleSet unapplied_update_metahandles[MODEL_TYPE_COUNT];
+ MetahandleSet* const unsynced_metahandles;
+ // Contains metahandles that are most likely dirty (though not
+ // necessarily). Dirtyness is confirmed in TakeSnapshotForSaveChanges().
+ MetahandleSet* const dirty_metahandles;
+
+ // When a purge takes place, we remove items from all our indices and stash
+ // them in here so that SaveChanges can persist their permanent deletion.
+ MetahandleSet* const metahandles_to_purge;
+
+ KernelShareInfoStatus info_status;
+
+ // These 3 members are backed in the share_info table, and
+ // their state is marked by the flag above.
+
+ // A structure containing the Directory state that is written back into the
+ // database on SaveChanges.
+ PersistedKernelInfo persisted_info;
+
+ // A unique identifier for this account's cache db, used to generate
+ // unique server IDs. No need to lock, only written at init time.
+ const std::string cache_guid;
+
+ // It doesn't make sense for two threads to run SaveChanges at the same
+ // time; this mutex protects that activity.
+ base::Lock save_changes_mutex;
+
+ // The next metahandle is protected by kernel mutex.
+ int64 next_metahandle;
+
+ // The delegate for directory change events. Must not be NULL.
+ DirectoryChangeDelegate* const delegate;
+
+ // The transaction observer.
+ const browser_sync::WeakHandle<TransactionObserver> transaction_observer;
+ };
+
+ // Helper method used to do searches on |parent_id_child_index|.
+ ParentIdChildIndex::iterator LocateInParentChildIndex(
+ const ScopedKernelLock& lock,
+ const Id& parent_id,
+ int64 position_in_parent,
+ const Id& item_id_for_tiebreaking);
+
+ // Return an iterator to the beginning of the range of the children of
+ // |parent_id| in the kernel's parent_id_child_index.
+ ParentIdChildIndex::iterator GetParentChildIndexLowerBound(
+ const ScopedKernelLock& lock,
+ const Id& parent_id);
+
+ // Return an iterator to just past the end of the range of the
+ // children of |parent_id| in the kernel's parent_id_child_index.
+ ParentIdChildIndex::iterator GetParentChildIndexUpperBound(
+ const ScopedKernelLock& lock,
+ const Id& parent_id);
+
+ // Append the handles of the children of |parent_id| to |result|.
+ void AppendChildHandles(
+ const ScopedKernelLock& lock,
+ const Id& parent_id, Directory::ChildHandles* result);
+
+ // Return a pointer to what is probably (but not certainly) the
+ // first child of |parent_id|, or NULL if |parent_id| definitely has
+ // no children.
+ EntryKernel* GetPossibleFirstChild(
+ const ScopedKernelLock& lock, const Id& parent_id);
+
+ // Return a pointer to what is probably (but not certainly) the last
+ // child of |parent_id|, or NULL if |parent_id| definitely has no
+ // children.
+ EntryKernel* GetPossibleLastChildForTest(
+ const ScopedKernelLock& lock, const Id& parent_id);
+
+ browser_sync::Cryptographer cryptographer_;
+
+ Kernel* kernel_;
+
+ DirectoryBackingStore* store_;
+
+ browser_sync::UnrecoverableErrorHandler* const unrecoverable_error_handler_;
+ const browser_sync::ReportUnrecoverableErrorFunction
+ report_unrecoverable_error_function_;
+ bool unrecoverable_error_set_;
+};
+
+class ScopedKernelLock {
+ public:
+ explicit ScopedKernelLock(const Directory*);
+ ~ScopedKernelLock() {}
+
+ base::AutoLock scoped_lock_;
+ Directory* const dir_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedKernelLock);
+};
+
+// Transactions are now processed FIFO with a straight lock
+class BaseTransaction {
+ friend class Entry;
+ public:
+ inline Directory* directory() const { return directory_; }
+ inline Id root_id() const { return Id(); }
+
+ virtual ~BaseTransaction();
+
+ // This should be called when a database corruption is detected and there is
+ // no way for us to recover short of wiping the database clean. When this is
+ // called we set a bool in the transaction. The caller has to unwind the
+ // stack. When the destructor for the transaction is called it acts upon the
+ // bool and calls the Directory to handle the unrecoverable error.
+ void OnUnrecoverableError(const tracked_objects::Location& location,
+ const std::string& message);
+
+ bool unrecoverable_error_set() const;
+
+ protected:
+ BaseTransaction(const tracked_objects::Location& from_here,
+ const char* name,
+ WriterTag writer,
+ Directory* directory);
+
+ void Lock();
+ void Unlock();
+
+ // This should be called before unlocking because it calls the Direcotry's
+ // OnUnrecoverableError method which is not protected by locks and could
+ // be called from any thread. Holding the transaction lock ensures only one
+ // thread could call the method at a time.
+ void HandleUnrecoverableErrorIfSet();
+
+ const tracked_objects::Location from_here_;
+ const char* const name_;
+ WriterTag writer_;
+ Directory* const directory_;
+ Directory::Kernel* const dirkernel_; // for brevity
+
+ // Error information.
+ bool unrecoverable_error_set_;
+ tracked_objects::Location unrecoverable_error_location_;
+ std::string unrecoverable_error_msg_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BaseTransaction);
+};
+
+// Locks db in constructor, unlocks in destructor.
+class ReadTransaction : public BaseTransaction {
+ public:
+ ReadTransaction(const tracked_objects::Location& from_here,
+ Directory* directory);
+
+ virtual ~ReadTransaction();
+
+ protected: // Don't allow creation on heap, except by sync API wrapper.
+ friend class sync_api::ReadTransaction;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ DISALLOW_COPY_AND_ASSIGN(ReadTransaction);
+};
+
+// Locks db in constructor, unlocks in destructor.
+class WriteTransaction : public BaseTransaction {
+ friend class MutableEntry;
+ public:
+ WriteTransaction(const tracked_objects::Location& from_here,
+ WriterTag writer, Directory* directory);
+
+ virtual ~WriteTransaction();
+
+ void SaveOriginal(const EntryKernel* entry);
+
+ protected:
+ // Overridden by tests.
+ virtual void NotifyTransactionComplete(ModelTypeSet models_with_changes);
+
+ private:
+ // Clears |mutations_|.
+ ImmutableEntryKernelMutationMap RecordMutations();
+
+ void UnlockAndNotify(const ImmutableEntryKernelMutationMap& mutations);
+
+ ModelTypeSet NotifyTransactionChangingAndEnding(
+ const ImmutableEntryKernelMutationMap& mutations);
+
+ // Only the original fields are filled in until |RecordMutations()|.
+ // We use a mutation map instead of a kernel set to avoid copying.
+ EntryKernelMutationMap mutations_;
+
+ DISALLOW_COPY_AND_ASSIGN(WriteTransaction);
+};
+
+bool IsLegalNewParent(BaseTransaction* trans, const Id& id, const Id& parentid);
+
+// This function sets only the flags needed to get this entry to sync.
+bool MarkForSyncing(syncable::MutableEntry* e);
+
+} // namespace syncable
+
+std::ostream& operator <<(std::ostream&, const syncable::Blob&);
+
+#endif // SYNC_SYNCABLE_SYNCABLE_H_
diff --git a/sync/syncable/syncable_changes_version.h b/sync/syncable/syncable_changes_version.h
new file mode 100644
index 0000000..38aae2c
--- /dev/null
+++ b/sync/syncable/syncable_changes_version.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
+#define SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
+#pragma once
+
+namespace syncable {
+
+// For the most part, the sync engine treats version numbers as opaque values.
+// However, there are parts of our code base that break this abstraction, and
+// depend on the following two invariants:
+//
+// 1. CHANGES_VERSION is less than 0.
+// 2. The server only issues positive version numbers.
+//
+// Breaking these abstractions makes some operations 10 times
+// faster. If either of these invariants change, then those queries
+// must be revisited.
+
+enum {
+ CHANGES_VERSION = -1
+};
+
+#define CHANGES_VERSION_STRING "-1"
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
diff --git a/sync/syncable/syncable_columns.h b/sync/syncable/syncable_columns.h
new file mode 100644
index 0000000..18a0215
--- /dev/null
+++ b/sync/syncable/syncable_columns.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
+#define SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
+#pragma once
+
+#include "sync/syncable/syncable.h"
+#include "sync/syncable/syncable_changes_version.h"
+
+namespace syncable {
+
+struct ColumnSpec {
+ const char* name;
+ const char* spec;
+};
+
+// Must be in exact same order as fields in syncable.
+static const ColumnSpec g_metas_columns[] = {
+ //////////////////////////////////////
+ // int64s
+ {"metahandle", "bigint primary key ON CONFLICT FAIL"},
+ {"base_version", "bigint default " CHANGES_VERSION_STRING},
+ {"server_version", "bigint default 0"},
+ {"server_position_in_parent", "bigint default 0"},
+ // This is the item ID that we store for the embedding application.
+ {"local_external_id", "bigint default 0"},
+ // These timestamps are kept in the same format as that of the
+ // protocol (ms since Unix epoch).
+ {"mtime", "bigint default 0"},
+ {"server_mtime", "bigint default 0"},
+ {"ctime", "bigint default 0"},
+ {"server_ctime", "bigint default 0"},
+ //////////////////////////////////////
+ // Ids
+ {"id", "varchar(255) default \"r\""},
+ {"parent_id", "varchar(255) default \"r\""},
+ {"server_parent_id", "varchar(255) default \"r\""},
+ {"prev_id", "varchar(255) default \"r\""},
+ {"next_id", "varchar(255) default \"r\""},
+ //////////////////////////////////////
+ // bits
+ {"is_unsynced", "bit default 0"},
+ {"is_unapplied_update", "bit default 0"},
+ {"is_del", "bit default 0"},
+ {"is_dir", "bit default 0"},
+ {"server_is_dir", "bit default 0"},
+ {"server_is_del", "bit default 0"},
+ //////////////////////////////////////
+ // Strings
+ {"non_unique_name", "varchar"},
+ {"server_non_unique_name", "varchar(255)"},
+ {"unique_server_tag", "varchar"},
+ {"unique_client_tag", "varchar"},
+ //////////////////////////////////////
+ // Blobs.
+ {"specifics", "blob"},
+ {"server_specifics", "blob"},
+ {"base_server_specifics", "blob"}
+};
+
+// At least enforce that there are equal number of column names and fields.
+COMPILE_ASSERT(arraysize(g_metas_columns) >= FIELD_COUNT, missing_column_name);
+COMPILE_ASSERT(arraysize(g_metas_columns) <= FIELD_COUNT, extra_column_names);
+
+static inline const char* ColumnName(int field) {
+ DCHECK(field < BEGIN_TEMPS);
+ return g_metas_columns[field].name;
+}
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
diff --git a/sync/syncable/syncable_enum_conversions.cc b/sync/syncable/syncable_enum_conversions.cc
new file mode 100644
index 0000000..eaf5edd
--- /dev/null
+++ b/sync/syncable/syncable_enum_conversions.cc
@@ -0,0 +1,164 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Keep this file in sync with syncable.h.
+
+#include "sync/syncable/syncable_enum_conversions.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+namespace syncable {
+
+// We can't tokenize expected_min/expected_max since it can be a
+// general expression.
+#define ASSERT_ENUM_BOUNDS(enum_min, enum_max, expected_min, expected_max) \
+ COMPILE_ASSERT(static_cast<int>(enum_min) == \
+ static_cast<int>(expected_min), \
+ enum_min##_not_expected_min); \
+ COMPILE_ASSERT(static_cast<int>(enum_max) == \
+ static_cast<int>(expected_max), \
+ enum_max##_not_expected_max);
+
+#define ENUM_CASE(enum_value) case enum_value: return #enum_value
+
+const char* GetMetahandleFieldString(MetahandleField metahandle_field) {
+ ASSERT_ENUM_BOUNDS(META_HANDLE, META_HANDLE,
+ INT64_FIELDS_BEGIN, BASE_VERSION - 1);
+ switch (metahandle_field) {
+ ENUM_CASE(META_HANDLE);
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetBaseVersionString(BaseVersion base_version) {
+ ASSERT_ENUM_BOUNDS(BASE_VERSION, BASE_VERSION,
+ META_HANDLE + 1, SERVER_VERSION - 1);
+ switch (base_version) {
+ ENUM_CASE(BASE_VERSION);
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetInt64FieldString(Int64Field int64_field) {
+ ASSERT_ENUM_BOUNDS(SERVER_VERSION, LOCAL_EXTERNAL_ID,
+ BASE_VERSION + 1, INT64_FIELDS_END - 1);
+ switch (int64_field) {
+ ENUM_CASE(SERVER_VERSION);
+ ENUM_CASE(SERVER_POSITION_IN_PARENT);
+ ENUM_CASE(LOCAL_EXTERNAL_ID);
+ case INT64_FIELDS_END: break;
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetTimeFieldString(TimeField time_field) {
+ ASSERT_ENUM_BOUNDS(SERVER_VERSION, LOCAL_EXTERNAL_ID,
+ BASE_VERSION + 1, INT64_FIELDS_END - 1);
+ switch (time_field) {
+ ENUM_CASE(MTIME);
+ ENUM_CASE(SERVER_MTIME);
+ ENUM_CASE(CTIME);
+ ENUM_CASE(SERVER_CTIME);
+ case TIME_FIELDS_END: break;
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetIdFieldString(IdField id_field) {
+ ASSERT_ENUM_BOUNDS(ID, NEXT_ID,
+ ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
+ switch (id_field) {
+ ENUM_CASE(ID);
+ ENUM_CASE(PARENT_ID);
+ ENUM_CASE(SERVER_PARENT_ID);
+ ENUM_CASE(PREV_ID);
+ ENUM_CASE(NEXT_ID);
+ case ID_FIELDS_END: break;
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetIndexedBitFieldString(IndexedBitField indexed_bit_field) {
+ ASSERT_ENUM_BOUNDS(IS_UNSYNCED, IS_UNAPPLIED_UPDATE,
+ BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
+ switch (indexed_bit_field) {
+ ENUM_CASE(IS_UNSYNCED);
+ ENUM_CASE(IS_UNAPPLIED_UPDATE);
+ case INDEXED_BIT_FIELDS_END: break;
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetIsDelFieldString(IsDelField is_del_field) {
+ ASSERT_ENUM_BOUNDS(IS_DEL, IS_DEL,
+ INDEXED_BIT_FIELDS_END, IS_DIR - 1);
+ switch (is_del_field) {
+ ENUM_CASE(IS_DEL);
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetBitFieldString(BitField bit_field) {
+ ASSERT_ENUM_BOUNDS(IS_DIR, SERVER_IS_DEL,
+ IS_DEL + 1, BIT_FIELDS_END - 1);
+ switch (bit_field) {
+ ENUM_CASE(IS_DIR);
+ ENUM_CASE(SERVER_IS_DIR);
+ ENUM_CASE(SERVER_IS_DEL);
+ case BIT_FIELDS_END: break;
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetStringFieldString(StringField string_field) {
+ ASSERT_ENUM_BOUNDS(NON_UNIQUE_NAME, UNIQUE_CLIENT_TAG,
+ STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
+ switch (string_field) {
+ ENUM_CASE(NON_UNIQUE_NAME);
+ ENUM_CASE(SERVER_NON_UNIQUE_NAME);
+ ENUM_CASE(UNIQUE_SERVER_TAG);
+ ENUM_CASE(UNIQUE_CLIENT_TAG);
+ case STRING_FIELDS_END: break;
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetProtoFieldString(ProtoField proto_field) {
+ ASSERT_ENUM_BOUNDS(SPECIFICS, BASE_SERVER_SPECIFICS,
+ PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
+ switch (proto_field) {
+ ENUM_CASE(SPECIFICS);
+ ENUM_CASE(SERVER_SPECIFICS);
+ ENUM_CASE(BASE_SERVER_SPECIFICS);
+ case PROTO_FIELDS_END: break;
+ }
+ NOTREACHED();
+ return "";
+}
+
+const char* GetBitTempString(BitTemp bit_temp) {
+ ASSERT_ENUM_BOUNDS(SYNCING, SYNCING,
+ BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
+ switch (bit_temp) {
+ ENUM_CASE(SYNCING);
+ case BIT_TEMPS_END: break;
+ }
+ NOTREACHED();
+ return "";
+}
+
+#undef ENUM_CASE
+#undef ASSERT_ENUM_BOUNDS
+
+} // namespace syncable
diff --git a/sync/syncable/syncable_enum_conversions.h b/sync/syncable/syncable_enum_conversions.h
new file mode 100644
index 0000000..ae251c8
--- /dev/null
+++ b/sync/syncable/syncable_enum_conversions.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
+#define SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
+#pragma once
+
+// Keep this file in sync with syncable.h.
+
+#include "sync/syncable/syncable.h"
+
+// Utility functions to get the string equivalent for some syncable
+// enums.
+
+namespace syncable {
+
+// The returned strings (which don't have to be freed) are in ASCII.
+// The result of passing in an invalid enum value is undefined.
+
+const char* GetMetahandleFieldString(MetahandleField metahandle_field);
+
+const char* GetBaseVersionString(BaseVersion base_version);
+
+const char* GetInt64FieldString(Int64Field int64_field);
+
+const char* GetTimeFieldString(TimeField time_field);
+
+const char* GetIdFieldString(IdField id_field);
+
+const char* GetIndexedBitFieldString(IndexedBitField indexed_bit_field);
+
+const char* GetIsDelFieldString(IsDelField is_del_field);
+
+const char* GetBitFieldString(BitField bit_field);
+
+const char* GetStringFieldString(StringField string_field);
+
+const char* GetProtoFieldString(ProtoField proto_field);
+
+const char* GetBitTempString(BitTemp bit_temp);
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
diff --git a/sync/syncable/syncable_enum_conversions_unittest.cc b/sync/syncable/syncable_enum_conversions_unittest.cc
new file mode 100644
index 0000000..8ce4a40
--- /dev/null
+++ b/sync/syncable/syncable_enum_conversions_unittest.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Keep this file in sync with syncable.h.
+
+#include "sync/syncable/syncable_enum_conversions.h"
+
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncable {
+namespace {
+
+class SyncableEnumConversionsTest : public testing::Test {
+};
+
+template <class T>
+void TestEnumStringFunction(const char* (*enum_string_fn)(T),
+ int enum_min, int enum_max) {
+ EXPECT_LE(enum_min, enum_max);
+ for (int i = enum_min; i <= enum_max; ++i) {
+ const std::string& str = enum_string_fn(static_cast<T>(i));
+ EXPECT_FALSE(str.empty());
+ }
+}
+
+TEST_F(SyncableEnumConversionsTest, GetMetahandleFieldString) {
+ TestEnumStringFunction(
+ GetMetahandleFieldString, INT64_FIELDS_BEGIN, META_HANDLE);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetBaseVersionString) {
+ TestEnumStringFunction(
+ GetBaseVersionString, META_HANDLE + 1, BASE_VERSION);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetInt64FieldString) {
+ TestEnumStringFunction(
+ GetInt64FieldString, BASE_VERSION + 1, INT64_FIELDS_END - 1);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetTimeFieldString) {
+ TestEnumStringFunction(
+ GetTimeFieldString, TIME_FIELDS_BEGIN, TIME_FIELDS_END - 1);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetIdFieldString) {
+ TestEnumStringFunction(
+ GetIdFieldString, ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetIndexedBitFieldString) {
+ TestEnumStringFunction(
+ GetIndexedBitFieldString, BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetIsDelFieldString) {
+ TestEnumStringFunction(
+ GetIsDelFieldString, INDEXED_BIT_FIELDS_END, IS_DEL);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetBitFieldString) {
+ TestEnumStringFunction(
+ GetBitFieldString, IS_DEL + 1, BIT_FIELDS_END - 1);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetStringFieldString) {
+ TestEnumStringFunction(
+ GetStringFieldString, STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetProtoFieldString) {
+ TestEnumStringFunction(
+ GetProtoFieldString, PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
+}
+
+TEST_F(SyncableEnumConversionsTest, GetBitTempString) {
+ TestEnumStringFunction(
+ GetBitTempString, BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
+}
+
+} // namespace
+} // namespace syncable
diff --git a/sync/syncable/syncable_id.cc b/sync/syncable/syncable_id.cc
new file mode 100644
index 0000000..f860cb9
--- /dev/null
+++ b/sync/syncable/syncable_id.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable_id.h"
+
+#include <iosfwd>
+
+#include "base/string_util.h"
+#include "base/values.h"
+
+using std::ostream;
+using std::string;
+
+namespace syncable {
+
+ostream& operator<<(ostream& out, const Id& id) {
+ out << id.s_;
+ return out;
+}
+
+StringValue* Id::ToValue() const {
+ return Value::CreateStringValue(s_);
+}
+
+string Id::GetServerId() const {
+ // Currently root is the string "0". We need to decide on a true value.
+ // "" would be convenient here, as the IsRoot call would not be needed.
+ if (IsRoot())
+ return "0";
+ return s_.substr(1);
+}
+
+Id Id::CreateFromServerId(const string& server_id) {
+ Id id;
+ if (server_id == "0")
+ id.s_ = "r";
+ else
+ id.s_ = string("s") + server_id;
+ return id;
+}
+
+Id Id::CreateFromClientString(const string& local_id) {
+ Id id;
+ if (local_id == "0")
+ id.s_ = "r";
+ else
+ id.s_ = string("c") + local_id;
+ return id;
+}
+
+Id Id::GetLexicographicSuccessor() const {
+ // The successor of a string is given by appending the least
+ // character in the alphabet.
+ Id id = *this;
+ id.s_.push_back(0);
+ return id;
+}
+
+bool Id::ContainsStringCaseInsensitive(
+ const std::string& lowercase_query) const {
+ DCHECK_EQ(StringToLowerASCII(lowercase_query), lowercase_query);
+ return StringToLowerASCII(s_).find(lowercase_query) != std::string::npos;
+}
+
+// static
+Id Id::GetLeastIdForLexicographicComparison() {
+ Id id;
+ id.s_.clear();
+ return id;
+}
+
+Id GetNullId() {
+ return Id(); // Currently == root.
+}
+
+} // namespace syncable
diff --git a/sync/syncable/syncable_id.h b/sync/syncable/syncable_id.h
new file mode 100644
index 0000000..34f4fcc
--- /dev/null
+++ b/sync/syncable/syncable_id.h
@@ -0,0 +1,134 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_ID_H_
+#define SYNC_SYNCABLE_SYNCABLE_ID_H_
+#pragma once
+
+#include <iosfwd>
+#include <limits>
+#include <sstream>
+#include <string>
+
+#include "base/hash_tables.h"
+
+class MockConnectionManager;
+
+namespace base {
+class StringValue;
+}
+
+namespace sql {
+class Statement;
+}
+
+namespace syncable {
+struct EntryKernel;
+class Id;
+}
+
+namespace syncable {
+
+std::ostream& operator<<(std::ostream& out, const Id& id);
+
+// For historical reasons, 3 concepts got everloaded into the Id:
+// 1. A unique, opaque identifier for the object.
+// 2. Flag specifing whether server know about this object.
+// 3. Flag for root.
+//
+// We originally wrapped an integer for this information, but now we use a
+// string. It will have one of three forms:
+// 1. c<client only opaque id> for client items that have not been committed.
+// 2. r for the root item.
+// 3. s<server provided opaque id> for items that the server knows about.
+class Id {
+ public:
+ // This constructor will be handy even when we move away from int64s, just
+ // for unit tests.
+ inline Id() : s_("r") { }
+ inline Id(const Id& that) {
+ Copy(that);
+ }
+ inline Id& operator = (const Id& that) {
+ Copy(that);
+ return *this;
+ }
+ inline void Copy(const Id& that) {
+ this->s_ = that.s_;
+ }
+ inline bool IsRoot() const {
+ return "r" == s_;
+ }
+ inline bool ServerKnows() const {
+ return s_[0] == 's' || s_ == "r";
+ }
+
+ // TODO(sync): We could use null here, but to ease conversion we use "r".
+ // fix this, this is madness :)
+ inline bool IsNull() const {
+ return IsRoot();
+ }
+ inline void Clear() {
+ s_ = "r";
+ }
+ inline int compare(const Id& that) const {
+ return s_.compare(that.s_);
+ }
+ inline bool operator == (const Id& that) const {
+ return s_ == that.s_;
+ }
+ inline bool operator != (const Id& that) const {
+ return s_ != that.s_;
+ }
+ inline bool operator < (const Id& that) const {
+ return s_ < that.s_;
+ }
+ inline bool operator > (const Id& that) const {
+ return s_ > that.s_;
+ }
+
+ const std::string& value() const {
+ return s_;
+ }
+
+ // Return the next highest ID in the lexicographic ordering. This is
+ // useful for computing upper bounds on std::sets that are ordered
+ // by operator<.
+ Id GetLexicographicSuccessor() const;
+
+ // Note: |lowercase_query| should be passed in as lower case.
+ bool ContainsStringCaseInsensitive(const std::string& lowercase_query) const;
+
+ // Dumps the ID as a value and returns it. Transfers ownership of
+ // the StringValue to the caller.
+ base::StringValue* ToValue() const;
+
+ // Three functions are used to work with our proto buffers.
+ std::string GetServerId() const;
+ static Id CreateFromServerId(const std::string& server_id);
+ // This should only be used if you get back a reference to a local
+ // id from the server. Returns a client only opaque id.
+ static Id CreateFromClientString(const std::string& local_id);
+
+ // This method returns an ID that will compare less than any valid ID.
+ // The returned ID is not a valid ID itself. This is useful for
+ // computing lower bounds on std::sets that are ordered by operator<.
+ static Id GetLeastIdForLexicographicComparison();
+
+ private:
+ friend EntryKernel* UnpackEntry(sql::Statement* statement);
+ friend void BindFields(const EntryKernel& entry,
+ sql::Statement* statement);
+ friend std::ostream& operator<<(std::ostream& out, const Id& id);
+ friend class MockConnectionManager;
+ friend class SyncableIdTest;
+
+ std::string s_;
+};
+
+Id GetNullId();
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_SYNCABLE_ID_H_
diff --git a/sync/syncable/syncable_id_unittest.cc b/sync/syncable/syncable_id_unittest.cc
new file mode 100644
index 0000000..67524b3
--- /dev/null
+++ b/sync/syncable/syncable_id_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable_id.h"
+
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/test/values_test_util.h"
+#include "base/values.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::vector;
+
+namespace syncable {
+
+using browser_sync::TestIdFactory;
+
+class SyncableIdTest : public testing::Test { };
+
+TEST(SyncableIdTest, TestIDCreation) {
+ vector<Id> v;
+ v.push_back(TestIdFactory::FromNumber(5));
+ v.push_back(TestIdFactory::FromNumber(1));
+ v.push_back(TestIdFactory::FromNumber(-5));
+ v.push_back(TestIdFactory::MakeLocal("A"));
+ v.push_back(TestIdFactory::MakeLocal("B"));
+ v.push_back(TestIdFactory::MakeServer("A"));
+ v.push_back(TestIdFactory::MakeServer("B"));
+ v.push_back(Id::CreateFromServerId("-5"));
+ v.push_back(Id::CreateFromClientString("A"));
+ v.push_back(Id::CreateFromServerId("A"));
+
+ for (vector<Id>::iterator i = v.begin(); i != v.end(); ++i) {
+ for (vector<Id>::iterator j = v.begin(); j != i; ++j) {
+ ASSERT_NE(*i, *j) << "mis equated two distinct ids";
+ }
+ ASSERT_EQ(*i, *i) << "self-equality failed";
+ Id copy1 = *i;
+ Id copy2 = *i;
+ ASSERT_EQ(copy1, copy2) << "equality after copy failed";
+ }
+}
+
+TEST(SyncableIdTest, GetLeastIdForLexicographicComparison) {
+ vector<Id> v;
+ v.push_back(Id::CreateFromServerId("z5"));
+ v.push_back(Id::CreateFromServerId("z55"));
+ v.push_back(Id::CreateFromServerId("z6"));
+ v.push_back(Id::CreateFromClientString("zA-"));
+ v.push_back(Id::CreateFromClientString("zA--"));
+ v.push_back(Id::CreateFromServerId("zA--"));
+
+ for (int i = 0; i <= 255; ++i) {
+ std::string one_character_id;
+ one_character_id.push_back(i);
+ v.push_back(Id::CreateFromClientString(one_character_id));
+ }
+
+ for (vector<Id>::iterator i = v.begin(); i != v.end(); ++i) {
+ // The following looks redundant, but we're testing a custom operator<.
+ ASSERT_LT(Id::GetLeastIdForLexicographicComparison(), *i);
+ ASSERT_NE(*i, i->GetLexicographicSuccessor());
+ ASSERT_NE(i->GetLexicographicSuccessor(), *i);
+ ASSERT_LT(*i, i->GetLexicographicSuccessor());
+ ASSERT_GT(i->GetLexicographicSuccessor(), *i);
+ for (vector<Id>::iterator j = v.begin(); j != v.end(); ++j) {
+ if (j == i)
+ continue;
+ if (*j < *i) {
+ ASSERT_LT(j->GetLexicographicSuccessor(), *i);
+ ASSERT_LT(j->GetLexicographicSuccessor(),
+ i->GetLexicographicSuccessor());
+ ASSERT_LT(*j, i->GetLexicographicSuccessor());
+ } else {
+ ASSERT_GT(j->GetLexicographicSuccessor(), *i);
+ ASSERT_GT(j->GetLexicographicSuccessor(),
+ i->GetLexicographicSuccessor());
+ ASSERT_GT(*j, i->GetLexicographicSuccessor());
+ }
+ }
+ }
+}
+
+TEST(SyncableIdTest, ToValue) {
+ base::ExpectStringValue("r", Id::CreateFromServerId("0").ToValue());
+ base::ExpectStringValue("svalue", Id::CreateFromServerId("value").ToValue());
+
+ base::ExpectStringValue("r", Id::CreateFromClientString("0").ToValue());
+ base::ExpectStringValue("cvalue",
+ Id::CreateFromClientString("value").ToValue());
+}
+
+} // namespace syncable
diff --git a/sync/syncable/syncable_mock.cc b/sync/syncable/syncable_mock.cc
new file mode 100644
index 0000000..24990d1
--- /dev/null
+++ b/sync/syncable/syncable_mock.cc
@@ -0,0 +1,20 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable_mock.h"
+
+#include "base/location.h"
+#include "sync/test/null_transaction_observer.h"
+
+MockDirectory::MockDirectory(browser_sync::UnrecoverableErrorHandler* handler)
+ : Directory(&encryptor_, handler, NULL) {
+ InitKernelForTest("myk", &delegate_, syncable::NullTransactionObserver());
+}
+
+MockDirectory::~MockDirectory() {}
+
+MockSyncableWriteTransaction::MockSyncableWriteTransaction(
+ const tracked_objects::Location& from_here, Directory *directory)
+ : WriteTransaction(from_here, syncable::UNITTEST, directory) {
+}
diff --git a/sync/syncable/syncable_mock.h b/sync/syncable/syncable_mock.h
new file mode 100644
index 0000000..27fcee8
--- /dev/null
+++ b/sync/syncable/syncable_mock.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_SYNCABLE_MOCK_H_
+#define SYNC_SYNCABLE_SYNCABLE_MOCK_H_
+#pragma once
+
+#include <string>
+
+#include "sync/syncable/syncable.h"
+#include "sync/test/fake_encryptor.h"
+#include "sync/test/null_directory_change_delegate.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using syncable::Directory;
+using syncable::EntryKernel;
+
+class MockDirectory : public Directory {
+ public:
+ explicit MockDirectory(browser_sync::UnrecoverableErrorHandler* handler);
+ virtual ~MockDirectory();
+
+ MOCK_METHOD1(GetEntryByHandle, syncable::EntryKernel*(int64));
+
+ MOCK_METHOD2(set_last_downloadstamp, void(syncable::ModelType, int64));
+
+ MOCK_METHOD1(GetEntryByClientTag,
+ syncable::EntryKernel*(const std::string&));
+
+ MOCK_METHOD1(PurgeEntriesWithTypeIn, void(syncable::ModelTypeSet));
+
+ private:
+ browser_sync::FakeEncryptor encryptor_;
+ syncable::NullDirectoryChangeDelegate delegate_;
+};
+
+class MockSyncableWriteTransaction : public syncable::WriteTransaction {
+ public:
+ MockSyncableWriteTransaction(
+ const tracked_objects::Location& from_here, Directory *directory);
+};
+
+
+#endif // SYNC_SYNCABLE_SYNCABLE_MOCK_H_
+
diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc
new file mode 100644
index 0000000..20b6840
--- /dev/null
+++ b/sync/syncable/syncable_unittest.cc
@@ -0,0 +1,1741 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/syncable.h"
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/file_path.h"
+#include "base/file_util.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop.h"
+#include "base/scoped_temp_dir.h"
+#include "base/stringprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/test/values_test_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/values.h"
+#include "sync/engine/syncproto.h"
+#include "sync/util/test_unrecoverable_error_handler.h"
+#include "sync/syncable/directory_backing_store.h"
+#include "sync/syncable/directory_change_delegate.h"
+#include "sync/syncable/on_disk_directory_backing_store.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "sync/test/engine/test_syncable_utils.h"
+#include "sync/test/fake_encryptor.h"
+#include "sync/test/null_directory_change_delegate.h"
+#include "sync/test/null_transaction_observer.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::ExpectDictBooleanValue;
+using base::ExpectDictStringValue;
+using browser_sync::FakeEncryptor;
+using browser_sync::TestIdFactory;
+using browser_sync::TestUnrecoverableErrorHandler;
+
+namespace syncable {
+
+class SyncableKernelTest : public testing::Test {};
+
+// TODO(akalin): Add unit tests for EntryKernel::ContainsString().
+
+TEST_F(SyncableKernelTest, ToValue) {
+ EntryKernel kernel;
+ scoped_ptr<DictionaryValue> value(kernel.ToValue());
+ if (value.get()) {
+ // Not much to check without repeating the ToValue() code.
+ EXPECT_TRUE(value->HasKey("isDirty"));
+ // The extra +2 is for "isDirty" and "serverModelType".
+ EXPECT_EQ(BIT_TEMPS_END - BEGIN_FIELDS + 2,
+ static_cast<int>(value->size()));
+ } else {
+ ADD_FAILURE();
+ }
+}
+
+namespace {
+void PutDataAsBookmarkFavicon(WriteTransaction* wtrans,
+ MutableEntry* e,
+ const char* bytes,
+ size_t bytes_length) {
+ sync_pb::EntitySpecifics specifics;
+ specifics.mutable_bookmark()->set_url("http://demo/");
+ specifics.mutable_bookmark()->set_favicon(bytes, bytes_length);
+ e->Put(SPECIFICS, specifics);
+}
+
+void ExpectDataFromBookmarkFaviconEquals(BaseTransaction* trans,
+ Entry* e,
+ const char* bytes,
+ size_t bytes_length) {
+ ASSERT_TRUE(e->good());
+ ASSERT_TRUE(e->Get(SPECIFICS).has_bookmark());
+ ASSERT_EQ("http://demo/", e->Get(SPECIFICS).bookmark().url());
+ ASSERT_EQ(std::string(bytes, bytes_length),
+ e->Get(SPECIFICS).bookmark().favicon());
+}
+} // namespace
+
+class SyncableGeneralTest : public testing::Test {
+ public:
+ virtual void SetUp() {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ db_path_ = temp_dir_.path().Append(
+ FILE_PATH_LITERAL("SyncableTest.sqlite3"));
+ }
+
+ virtual void TearDown() {
+ }
+ protected:
+ MessageLoop message_loop_;
+ ScopedTempDir temp_dir_;
+ NullDirectoryChangeDelegate delegate_;
+ FakeEncryptor encryptor_;
+ TestUnrecoverableErrorHandler handler_;
+ FilePath db_path_;
+};
+
+TEST_F(SyncableGeneralTest, General) {
+ Directory dir(&encryptor_, &handler_, NULL);
+ ASSERT_EQ(OPENED, dir.OpenInMemoryForTest(
+ "SimpleTest", &delegate_, NullTransactionObserver()));
+
+ int64 root_metahandle;
+ {
+ ReadTransaction rtrans(FROM_HERE, &dir);
+ Entry e(&rtrans, GET_BY_ID, rtrans.root_id());
+ ASSERT_TRUE(e.good());
+ root_metahandle = e.Get(META_HANDLE);
+ }
+
+ int64 written_metahandle;
+ const Id id = TestIdFactory::FromNumber(99);
+ std::string name = "Jeff";
+ // Test simple read operations on an empty DB.
+ {
+ ReadTransaction rtrans(FROM_HERE, &dir);
+ Entry e(&rtrans, GET_BY_ID, id);
+ ASSERT_FALSE(e.good()); // Hasn't been written yet.
+
+ Directory::ChildHandles child_handles;
+ dir.GetChildHandlesById(&rtrans, rtrans.root_id(), &child_handles);
+ EXPECT_TRUE(child_handles.empty());
+
+ dir.GetChildHandlesByHandle(&rtrans, root_metahandle, &child_handles);
+ EXPECT_TRUE(child_handles.empty());
+ }
+
+ // Test creating a new meta entry.
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), name);
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ written_metahandle = me.Get(META_HANDLE);
+ }
+
+ // Test GetChildHandles* after something is now in the DB.
+ // Also check that GET_BY_ID works.
+ {
+ ReadTransaction rtrans(FROM_HERE, &dir);
+ Entry e(&rtrans, GET_BY_ID, id);
+ ASSERT_TRUE(e.good());
+
+ Directory::ChildHandles child_handles;
+ dir.GetChildHandlesById(&rtrans, rtrans.root_id(), &child_handles);
+ EXPECT_EQ(1u, child_handles.size());
+
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i) {
+ EXPECT_EQ(*i, written_metahandle);
+ }
+
+ dir.GetChildHandlesByHandle(&rtrans, root_metahandle, &child_handles);
+ EXPECT_EQ(1u, child_handles.size());
+
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i) {
+ EXPECT_EQ(*i, written_metahandle);
+ }
+ }
+
+ // Test writing data to an entity. Also check that GET_BY_HANDLE works.
+ static const char s[] = "Hello World.";
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
+ ASSERT_TRUE(e.good());
+ PutDataAsBookmarkFavicon(&trans, &e, s, sizeof(s));
+ }
+
+ // Test reading back the contents that we just wrote.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
+ ASSERT_TRUE(e.good());
+ ExpectDataFromBookmarkFaviconEquals(&trans, &e, s, sizeof(s));
+ }
+
+ // Verify it exists in the folder.
+ {
+ ReadTransaction rtrans(FROM_HERE, &dir);
+ EXPECT_EQ(1, CountEntriesWithName(&rtrans, rtrans.root_id(), name));
+ }
+
+ // Now delete it.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
+ e.Put(IS_DEL, true);
+
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), name));
+ }
+
+ dir.SaveChanges();
+}
+
+TEST_F(SyncableGeneralTest, ChildrenOps) {
+ Directory dir(&encryptor_, &handler_, NULL);
+ ASSERT_EQ(OPENED, dir.OpenInMemoryForTest(
+ "SimpleTest", &delegate_, NullTransactionObserver()));
+
+ int64 written_metahandle;
+ const Id id = TestIdFactory::FromNumber(99);
+ std::string name = "Jeff";
+ {
+ ReadTransaction rtrans(FROM_HERE, &dir);
+ Entry e(&rtrans, GET_BY_ID, id);
+ ASSERT_FALSE(e.good()); // Hasn't been written yet.
+
+ EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
+ Id child_id;
+ EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
+ EXPECT_TRUE(child_id.IsRoot());
+ }
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), name);
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ written_metahandle = me.Get(META_HANDLE);
+ }
+
+ // Test children ops after something is now in the DB.
+ {
+ ReadTransaction rtrans(FROM_HERE, &dir);
+ Entry e(&rtrans, GET_BY_ID, id);
+ ASSERT_TRUE(e.good());
+
+ Entry child(&rtrans, GET_BY_HANDLE, written_metahandle);
+ ASSERT_TRUE(child.good());
+
+ EXPECT_TRUE(dir.HasChildren(&rtrans, rtrans.root_id()));
+ Id child_id;
+ EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
+ EXPECT_EQ(e.Get(ID), child_id);
+ }
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry me(&wtrans, GET_BY_HANDLE, written_metahandle);
+ ASSERT_TRUE(me.good());
+ me.Put(IS_DEL, true);
+ }
+
+ // Test children ops after the children have been deleted.
+ {
+ ReadTransaction rtrans(FROM_HERE, &dir);
+ Entry e(&rtrans, GET_BY_ID, id);
+ ASSERT_TRUE(e.good());
+
+ EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
+ Id child_id;
+ EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
+ EXPECT_TRUE(child_id.IsRoot());
+ }
+
+ dir.SaveChanges();
+}
+
+TEST_F(SyncableGeneralTest, ClientIndexRebuildsProperly) {
+ int64 written_metahandle;
+ TestIdFactory factory;
+ const Id id = factory.NewServerId();
+ std::string name = "cheesepuffs";
+ std::string tag = "dietcoke";
+
+ // Test creating a new meta entry.
+ {
+ Directory dir(&encryptor_, &handler_, NULL);
+ ASSERT_EQ(OPENED, dir.Open(db_path_, "IndexTest", &delegate_,
+ NullTransactionObserver()));
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), name);
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ me.Put(UNIQUE_CLIENT_TAG, tag);
+ written_metahandle = me.Get(META_HANDLE);
+ }
+ dir.SaveChanges();
+ }
+
+ // The DB was closed. Now reopen it. This will cause index regeneration.
+ {
+ Directory dir(&encryptor_, &handler_, NULL);
+ ASSERT_EQ(OPENED, dir.Open(db_path_, "IndexTest",
+ &delegate_, NullTransactionObserver()));
+
+ ReadTransaction trans(FROM_HERE, &dir);
+ Entry me(&trans, GET_BY_CLIENT_TAG, tag);
+ ASSERT_TRUE(me.good());
+ EXPECT_EQ(me.Get(ID), id);
+ EXPECT_EQ(me.Get(BASE_VERSION), 1);
+ EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), tag);
+ EXPECT_EQ(me.Get(META_HANDLE), written_metahandle);
+ }
+}
+
+TEST_F(SyncableGeneralTest, ClientIndexRebuildsDeletedProperly) {
+ TestIdFactory factory;
+ const Id id = factory.NewServerId();
+ std::string tag = "dietcoke";
+
+ // Test creating a deleted, unsynced, server meta entry.
+ {
+ Directory dir(&encryptor_, &handler_, NULL);
+ ASSERT_EQ(OPENED, dir.Open(db_path_, "IndexTest", &delegate_,
+ NullTransactionObserver()));
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), "deleted");
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ me.Put(UNIQUE_CLIENT_TAG, tag);
+ me.Put(IS_DEL, true);
+ me.Put(IS_UNSYNCED, true); // Or it might be purged.
+ }
+ dir.SaveChanges();
+ }
+
+ // The DB was closed. Now reopen it. This will cause index regeneration.
+ // Should still be present and valid in the client tag index.
+ {
+ Directory dir(&encryptor_, &handler_, NULL);
+ ASSERT_EQ(OPENED, dir.Open(db_path_, "IndexTest", &delegate_,
+ NullTransactionObserver()));
+
+ ReadTransaction trans(FROM_HERE, &dir);
+ Entry me(&trans, GET_BY_CLIENT_TAG, tag);
+ ASSERT_TRUE(me.good());
+ EXPECT_EQ(me.Get(ID), id);
+ EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), tag);
+ EXPECT_TRUE(me.Get(IS_DEL));
+ EXPECT_TRUE(me.Get(IS_UNSYNCED));
+ }
+}
+
+TEST_F(SyncableGeneralTest, ToValue) {
+ Directory dir(&encryptor_, &handler_, NULL);
+ ASSERT_EQ(OPENED, dir.OpenInMemoryForTest(
+ "SimpleTest", &delegate_, NullTransactionObserver()));
+
+ const Id id = TestIdFactory::FromNumber(99);
+ {
+ ReadTransaction rtrans(FROM_HERE, &dir);
+ Entry e(&rtrans, GET_BY_ID, id);
+ EXPECT_FALSE(e.good()); // Hasn't been written yet.
+
+ scoped_ptr<DictionaryValue> value(e.ToValue());
+ ExpectDictBooleanValue(false, *value, "good");
+ EXPECT_EQ(1u, value->size());
+ }
+
+ // Test creating a new meta entry.
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), "new");
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+
+ scoped_ptr<DictionaryValue> value(me.ToValue());
+ ExpectDictBooleanValue(true, *value, "good");
+ EXPECT_TRUE(value->HasKey("kernel"));
+ ExpectDictStringValue("Unspecified", *value, "modelType");
+ ExpectDictBooleanValue(true, *value, "existsOnClientBecauseNameIsNonEmpty");
+ ExpectDictBooleanValue(false, *value, "isRoot");
+ }
+
+ dir.SaveChanges();
+}
+
+// A Directory whose backing store always fails SaveChanges by returning false.
+class TestUnsaveableDirectory : public Directory {
+ public:
+ TestUnsaveableDirectory() : Directory(&encryptor_, &handler_, NULL) {}
+
+ class UnsaveableBackingStore : public OnDiskDirectoryBackingStore {
+ public:
+ UnsaveableBackingStore(const std::string& dir_name,
+ const FilePath& backing_filepath)
+ : OnDiskDirectoryBackingStore(dir_name, backing_filepath) { }
+ virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot) {
+ return false;
+ }
+ };
+
+ DirOpenResult OpenUnsaveable(
+ const FilePath& file_path, const std::string& name,
+ DirectoryChangeDelegate* delegate,
+ const browser_sync::WeakHandle<TransactionObserver>&
+ transaction_observer) {
+ DirectoryBackingStore *store = new UnsaveableBackingStore(name, file_path);
+ DirOpenResult result =
+ OpenImpl(store, name, delegate, transaction_observer);
+ if (OPENED != result)
+ Close();
+ return result;
+ }
+
+ private:
+ FakeEncryptor encryptor_;
+ TestUnrecoverableErrorHandler handler_;
+};
+
+// A test fixture for syncable::Directory. Uses an in-memory database to keep
+// the unit tests fast.
+class SyncableDirectoryTest : public testing::Test {
+ protected:
+ MessageLoop message_loop_;
+ static const char kName[];
+
+ virtual void SetUp() {
+ dir_.reset(new Directory(&encryptor_, &handler_, NULL));
+ ASSERT_TRUE(dir_.get());
+ ASSERT_EQ(OPENED, dir_->OpenInMemoryForTest(kName, &delegate_,
+ NullTransactionObserver()));
+ ASSERT_TRUE(dir_->good());
+ }
+
+ virtual void TearDown() {
+ dir_->SaveChanges();
+ dir_.reset();
+ }
+
+ void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result) {
+ dir_->GetAllMetaHandles(trans, result);
+ }
+
+ bool IsInDirtyMetahandles(int64 metahandle) {
+ return 1 == dir_->kernel_->dirty_metahandles->count(metahandle);
+ }
+
+ bool IsInMetahandlesToPurge(int64 metahandle) {
+ return 1 == dir_->kernel_->metahandles_to_purge->count(metahandle);
+ }
+
+ void CheckPurgeEntriesWithTypeInSucceeded(ModelTypeSet types_to_purge,
+ bool before_reload) {
+ SCOPED_TRACE(testing::Message("Before reload: ") << before_reload);
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ MetahandleSet all_set;
+ dir_->GetAllMetaHandles(&trans, &all_set);
+ EXPECT_EQ(3U, all_set.size());
+ if (before_reload)
+ EXPECT_EQ(4U, dir_->kernel_->metahandles_to_purge->size());
+ for (MetahandleSet::iterator iter = all_set.begin();
+ iter != all_set.end(); ++iter) {
+ Entry e(&trans, GET_BY_HANDLE, *iter);
+ const ModelType local_type = e.GetModelType();
+ const ModelType server_type = e.GetServerModelType();
+
+ // Note the dance around incrementing |it|, since we sometimes erase().
+ if ((IsRealDataType(local_type) &&
+ types_to_purge.Has(local_type)) ||
+ (IsRealDataType(server_type) &&
+ types_to_purge.Has(server_type))) {
+ FAIL() << "Illegal type should have been deleted.";
+ }
+ }
+ }
+
+ for (ModelTypeSet::Iterator it = types_to_purge.First();
+ it.Good(); it.Inc()) {
+ EXPECT_FALSE(dir_->initial_sync_ended_for_type(it.Get()));
+ }
+ EXPECT_FALSE(types_to_purge.Has(BOOKMARKS));
+ EXPECT_TRUE(dir_->initial_sync_ended_for_type(BOOKMARKS));
+ }
+
+ FakeEncryptor encryptor_;
+ TestUnrecoverableErrorHandler handler_;
+ scoped_ptr<Directory> dir_;
+ NullDirectoryChangeDelegate delegate_;
+
+ // Creates an empty entry and sets the ID field to a default one.
+ void CreateEntry(const std::string& entryname) {
+ CreateEntry(entryname, TestIdFactory::FromNumber(-99));
+ }
+
+ // Creates an empty entry and sets the ID field to id.
+ void CreateEntry(const std::string& entryname, const int id) {
+ CreateEntry(entryname, TestIdFactory::FromNumber(id));
+ }
+ void CreateEntry(const std::string& entryname, Id id) {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), entryname);
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(IS_UNSYNCED, true);
+ }
+
+ void ValidateEntry(BaseTransaction* trans,
+ int64 id,
+ bool check_name,
+ const std::string& name,
+ int64 base_version,
+ int64 server_version,
+ bool is_del);
+};
+
+TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) {
+ const int metas_to_create = 50;
+ MetahandleSet expected_purges;
+ MetahandleSet all_handles;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ for (int i = 0; i < metas_to_create; i++) {
+ MutableEntry e(&trans, CREATE, trans.root_id(), "foo");
+ e.Put(IS_UNSYNCED, true);
+ sync_pb::EntitySpecifics specs;
+ if (i % 2 == 0) {
+ AddDefaultFieldValue(BOOKMARKS, &specs);
+ expected_purges.insert(e.Get(META_HANDLE));
+ all_handles.insert(e.Get(META_HANDLE));
+ } else {
+ AddDefaultFieldValue(PREFERENCES, &specs);
+ all_handles.insert(e.Get(META_HANDLE));
+ }
+ e.Put(SPECIFICS, specs);
+ e.Put(SERVER_SPECIFICS, specs);
+ }
+ }
+
+ syncable::ModelTypeSet to_purge(BOOKMARKS);
+ dir_->PurgeEntriesWithTypeIn(to_purge);
+
+ Directory::SaveChangesSnapshot snapshot1;
+ base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
+ dir_->TakeSnapshotForSaveChanges(&snapshot1);
+ EXPECT_TRUE(expected_purges == snapshot1.metahandles_to_purge);
+
+ to_purge.Clear();
+ to_purge.Put(PREFERENCES);
+ dir_->PurgeEntriesWithTypeIn(to_purge);
+
+ dir_->HandleSaveChangesFailure(snapshot1);
+
+ Directory::SaveChangesSnapshot snapshot2;
+ dir_->TakeSnapshotForSaveChanges(&snapshot2);
+ EXPECT_TRUE(all_handles == snapshot2.metahandles_to_purge);
+}
+
+TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) {
+ const int metahandles_to_create = 100;
+ std::vector<int64> expected_dirty_metahandles;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ for (int i = 0; i < metahandles_to_create; i++) {
+ MutableEntry e(&trans, CREATE, trans.root_id(), "foo");
+ expected_dirty_metahandles.push_back(e.Get(META_HANDLE));
+ e.Put(IS_UNSYNCED, true);
+ }
+ }
+ // Fake SaveChanges() and make sure we got what we expected.
+ {
+ Directory::SaveChangesSnapshot snapshot;
+ base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
+ dir_->TakeSnapshotForSaveChanges(&snapshot);
+ // Make sure there's an entry for each new metahandle. Make sure all
+ // entries are marked dirty.
+ ASSERT_EQ(expected_dirty_metahandles.size(), snapshot.dirty_metas.size());
+ for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ ASSERT_TRUE(i->is_dirty());
+ }
+ dir_->VacuumAfterSaveChanges(snapshot);
+ }
+ // Put a new value with existing transactions as well as adding new ones.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ std::vector<int64> new_dirty_metahandles;
+ for (std::vector<int64>::const_iterator i =
+ expected_dirty_metahandles.begin();
+ i != expected_dirty_metahandles.end(); ++i) {
+ // Change existing entries to directories to dirty them.
+ MutableEntry e1(&trans, GET_BY_HANDLE, *i);
+ e1.Put(IS_DIR, true);
+ e1.Put(IS_UNSYNCED, true);
+ // Add new entries
+ MutableEntry e2(&trans, CREATE, trans.root_id(), "bar");
+ e2.Put(IS_UNSYNCED, true);
+ new_dirty_metahandles.push_back(e2.Get(META_HANDLE));
+ }
+ expected_dirty_metahandles.insert(expected_dirty_metahandles.end(),
+ new_dirty_metahandles.begin(), new_dirty_metahandles.end());
+ }
+ // Fake SaveChanges() and make sure we got what we expected.
+ {
+ Directory::SaveChangesSnapshot snapshot;
+ base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
+ dir_->TakeSnapshotForSaveChanges(&snapshot);
+ // Make sure there's an entry for each new metahandle. Make sure all
+ // entries are marked dirty.
+ EXPECT_EQ(expected_dirty_metahandles.size(), snapshot.dirty_metas.size());
+ for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ EXPECT_TRUE(i->is_dirty());
+ }
+ dir_->VacuumAfterSaveChanges(snapshot);
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TakeSnapshotGetsOnlyDirtyHandlesTest) {
+ const int metahandles_to_create = 100;
+
+ // half of 2 * metahandles_to_create
+ const unsigned int number_changed = 100u;
+ std::vector<int64> expected_dirty_metahandles;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ for (int i = 0; i < metahandles_to_create; i++) {
+ MutableEntry e(&trans, CREATE, trans.root_id(), "foo");
+ expected_dirty_metahandles.push_back(e.Get(META_HANDLE));
+ e.Put(IS_UNSYNCED, true);
+ }
+ }
+ dir_->SaveChanges();
+ // Put a new value with existing transactions as well as adding new ones.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ std::vector<int64> new_dirty_metahandles;
+ for (std::vector<int64>::const_iterator i =
+ expected_dirty_metahandles.begin();
+ i != expected_dirty_metahandles.end(); ++i) {
+ // Change existing entries to directories to dirty them.
+ MutableEntry e1(&trans, GET_BY_HANDLE, *i);
+ ASSERT_TRUE(e1.good());
+ e1.Put(IS_DIR, true);
+ e1.Put(IS_UNSYNCED, true);
+ // Add new entries
+ MutableEntry e2(&trans, CREATE, trans.root_id(), "bar");
+ e2.Put(IS_UNSYNCED, true);
+ new_dirty_metahandles.push_back(e2.Get(META_HANDLE));
+ }
+ expected_dirty_metahandles.insert(expected_dirty_metahandles.end(),
+ new_dirty_metahandles.begin(), new_dirty_metahandles.end());
+ }
+ dir_->SaveChanges();
+ // Don't make any changes whatsoever and ensure nothing comes back.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ for (std::vector<int64>::const_iterator i =
+ expected_dirty_metahandles.begin();
+ i != expected_dirty_metahandles.end(); ++i) {
+ MutableEntry e(&trans, GET_BY_HANDLE, *i);
+ ASSERT_TRUE(e.good());
+ // We aren't doing anything to dirty these entries.
+ }
+ }
+ // Fake SaveChanges() and make sure we got what we expected.
+ {
+ Directory::SaveChangesSnapshot snapshot;
+ base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
+ dir_->TakeSnapshotForSaveChanges(&snapshot);
+ // Make sure there are no dirty_metahandles.
+ EXPECT_EQ(0u, snapshot.dirty_metas.size());
+ dir_->VacuumAfterSaveChanges(snapshot);
+ }
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ bool should_change = false;
+ for (std::vector<int64>::const_iterator i =
+ expected_dirty_metahandles.begin();
+ i != expected_dirty_metahandles.end(); ++i) {
+ // Maybe change entries by flipping IS_DIR.
+ MutableEntry e(&trans, GET_BY_HANDLE, *i);
+ ASSERT_TRUE(e.good());
+ should_change = !should_change;
+ if (should_change) {
+ bool not_dir = !e.Get(IS_DIR);
+ e.Put(IS_DIR, not_dir);
+ e.Put(IS_UNSYNCED, true);
+ }
+ }
+ }
+ // Fake SaveChanges() and make sure we got what we expected.
+ {
+ Directory::SaveChangesSnapshot snapshot;
+ base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
+ dir_->TakeSnapshotForSaveChanges(&snapshot);
+ // Make sure there's an entry for each changed metahandle. Make sure all
+ // entries are marked dirty.
+ EXPECT_EQ(number_changed, snapshot.dirty_metas.size());
+ for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ EXPECT_TRUE(i->is_dirty());
+ }
+ dir_->VacuumAfterSaveChanges(snapshot);
+ }
+}
+
+const char SyncableDirectoryTest::kName[] = "Foo";
+
+namespace {
+
+TEST_F(SyncableDirectoryTest, TestBasicLookupNonExistantID) {
+ ReadTransaction rtrans(FROM_HERE, dir_.get());
+ Entry e(&rtrans, GET_BY_ID, TestIdFactory::FromNumber(-99));
+ ASSERT_FALSE(e.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestBasicLookupValidID) {
+ CreateEntry("rtc");
+ ReadTransaction rtrans(FROM_HERE, dir_.get());
+ Entry e(&rtrans, GET_BY_ID, TestIdFactory::FromNumber(-99));
+ ASSERT_TRUE(e.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestDelete) {
+ std::string name = "peanut butter jelly time";
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry e1(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e1.good());
+ ASSERT_TRUE(e1.Put(IS_DEL, true));
+ MutableEntry e2(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e2.good());
+ ASSERT_TRUE(e2.Put(IS_DEL, true));
+ MutableEntry e3(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e3.good());
+ ASSERT_TRUE(e3.Put(IS_DEL, true));
+
+ ASSERT_TRUE(e1.Put(IS_DEL, false));
+ ASSERT_TRUE(e2.Put(IS_DEL, false));
+ ASSERT_TRUE(e3.Put(IS_DEL, false));
+
+ ASSERT_TRUE(e1.Put(IS_DEL, true));
+ ASSERT_TRUE(e2.Put(IS_DEL, true));
+ ASSERT_TRUE(e3.Put(IS_DEL, true));
+}
+
+TEST_F(SyncableDirectoryTest, TestGetUnsynced) {
+ Directory::UnsyncedMetaHandles handles;
+ int64 handle1, handle2;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_TRUE(0 == handles.size());
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), "abba");
+ ASSERT_TRUE(e1.good());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(IS_DIR, true);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+
+ MutableEntry e2(&trans, CREATE, e1.Get(ID), "bread");
+ ASSERT_TRUE(e2.good());
+ handle2 = e2.Get(META_HANDLE);
+ e2.Put(BASE_VERSION, 1);
+ e2.Put(ID, TestIdFactory::FromNumber(102));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_TRUE(0 == handles.size());
+
+ MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e3.good());
+ e3.Put(IS_UNSYNCED, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_TRUE(1 == handles.size());
+ ASSERT_TRUE(handle1 == handles[0]);
+
+ MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(e4.good());
+ e4.Put(IS_UNSYNCED, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_TRUE(2 == handles.size());
+ if (handle1 == handles[0]) {
+ ASSERT_TRUE(handle2 == handles[1]);
+ } else {
+ ASSERT_TRUE(handle2 == handles[0]);
+ ASSERT_TRUE(handle1 == handles[1]);
+ }
+
+ MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e5.good());
+ ASSERT_TRUE(e5.Get(IS_UNSYNCED));
+ ASSERT_TRUE(e5.Put(IS_UNSYNCED, false));
+ ASSERT_FALSE(e5.Get(IS_UNSYNCED));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_TRUE(1 == handles.size());
+ ASSERT_TRUE(handle2 == handles[0]);
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestGetUnappliedUpdates) {
+ Directory::UnappliedUpdateMetaHandles handles;
+ int64 handle1, handle2;
+ const syncable::FullModelTypeSet all_types =
+ syncable::FullModelTypeSet::All();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
+ ASSERT_TRUE(0 == handles.size());
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), "abba");
+ ASSERT_TRUE(e1.good());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(IS_UNAPPLIED_UPDATE, false);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ e1.Put(IS_DIR, true);
+
+ MutableEntry e2(&trans, CREATE, e1.Get(ID), "bread");
+ ASSERT_TRUE(e2.good());
+ handle2 = e2.Get(META_HANDLE);
+ e2.Put(IS_UNAPPLIED_UPDATE, false);
+ e2.Put(BASE_VERSION, 1);
+ e2.Put(ID, TestIdFactory::FromNumber(102));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
+ ASSERT_TRUE(0 == handles.size());
+
+ MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e3.good());
+ e3.Put(IS_UNAPPLIED_UPDATE, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
+ ASSERT_TRUE(1 == handles.size());
+ ASSERT_TRUE(handle1 == handles[0]);
+
+ MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(e4.good());
+ e4.Put(IS_UNAPPLIED_UPDATE, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
+ ASSERT_TRUE(2 == handles.size());
+ if (handle1 == handles[0]) {
+ ASSERT_TRUE(handle2 == handles[1]);
+ } else {
+ ASSERT_TRUE(handle2 == handles[0]);
+ ASSERT_TRUE(handle1 == handles[1]);
+ }
+
+ MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e5.good());
+ e5.Put(IS_UNAPPLIED_UPDATE, false);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
+ ASSERT_TRUE(1 == handles.size());
+ ASSERT_TRUE(handle2 == handles[0]);
+ }
+}
+
+
+TEST_F(SyncableDirectoryTest, DeleteBug_531383) {
+ // Try to evoke a check failure...
+ TestIdFactory id_factory;
+ int64 grandchild_handle;
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry parent(&wtrans, CREATE, id_factory.root(), "Bob");
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(ID, id_factory.NewServerId());
+ parent.Put(BASE_VERSION, 1);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), "Bob");
+ ASSERT_TRUE(child.good());
+ child.Put(IS_DIR, true);
+ child.Put(ID, id_factory.NewServerId());
+ child.Put(BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, CREATE, child.Get(ID), "Bob");
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(ID, id_factory.NewServerId());
+ grandchild.Put(BASE_VERSION, 1);
+ ASSERT_TRUE(grandchild.Put(IS_DEL, true));
+ MutableEntry twin(&wtrans, CREATE, child.Get(ID), "Bob");
+ ASSERT_TRUE(twin.good());
+ ASSERT_TRUE(twin.Put(IS_DEL, true));
+ ASSERT_TRUE(grandchild.Put(IS_DEL, false));
+
+ grandchild_handle = grandchild.Get(META_HANDLE);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry grandchild(&wtrans, GET_BY_HANDLE, grandchild_handle);
+ grandchild.Put(IS_DEL, true); // Used to CHECK fail here.
+ }
+}
+
+static inline bool IsLegalNewParent(const Entry& a, const Entry& b) {
+ return IsLegalNewParent(a.trans(), a.Get(ID), b.Get(ID));
+}
+
+TEST_F(SyncableDirectoryTest, TestIsLegalNewParent) {
+ TestIdFactory id_factory;
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
+ Entry root(&wtrans, GET_BY_ID, id_factory.root());
+ ASSERT_TRUE(root.good());
+ MutableEntry parent(&wtrans, CREATE, root.Get(ID), "Bob");
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(ID, id_factory.NewServerId());
+ parent.Put(BASE_VERSION, 1);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), "Bob");
+ ASSERT_TRUE(child.good());
+ child.Put(IS_DIR, true);
+ child.Put(ID, id_factory.NewServerId());
+ child.Put(BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, CREATE, child.Get(ID), "Bob");
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(ID, id_factory.NewServerId());
+ grandchild.Put(BASE_VERSION, 1);
+
+ MutableEntry parent2(&wtrans, CREATE, root.Get(ID), "Pete");
+ ASSERT_TRUE(parent2.good());
+ parent2.Put(IS_DIR, true);
+ parent2.Put(ID, id_factory.NewServerId());
+ parent2.Put(BASE_VERSION, 1);
+ MutableEntry child2(&wtrans, CREATE, parent2.Get(ID), "Pete");
+ ASSERT_TRUE(child2.good());
+ child2.Put(IS_DIR, true);
+ child2.Put(ID, id_factory.NewServerId());
+ child2.Put(BASE_VERSION, 1);
+ MutableEntry grandchild2(&wtrans, CREATE, child2.Get(ID), "Pete");
+ ASSERT_TRUE(grandchild2.good());
+ grandchild2.Put(ID, id_factory.NewServerId());
+ grandchild2.Put(BASE_VERSION, 1);
+ // resulting tree
+ // root
+ // / |
+ // parent parent2
+ // | |
+ // child child2
+ // | |
+ // grandchild grandchild2
+ ASSERT_TRUE(IsLegalNewParent(child, root));
+ ASSERT_TRUE(IsLegalNewParent(child, parent));
+ ASSERT_FALSE(IsLegalNewParent(child, child));
+ ASSERT_FALSE(IsLegalNewParent(child, grandchild));
+ ASSERT_TRUE(IsLegalNewParent(child, parent2));
+ ASSERT_TRUE(IsLegalNewParent(child, grandchild2));
+ ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
+ ASSERT_FALSE(IsLegalNewParent(root, grandchild));
+ ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
+}
+
+TEST_F(SyncableDirectoryTest, TestEntryIsInFolder) {
+ // Create a subdir and an entry.
+ int64 entry_handle;
+ syncable::Id folder_id;
+ syncable::Id entry_id;
+ std::string entry_name = "entry";
+
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry folder(&trans, CREATE, trans.root_id(), "folder");
+ ASSERT_TRUE(folder.good());
+ EXPECT_TRUE(folder.Put(IS_DIR, true));
+ EXPECT_TRUE(folder.Put(IS_UNSYNCED, true));
+ folder_id = folder.Get(ID);
+
+ MutableEntry entry(&trans, CREATE, folder.Get(ID), entry_name);
+ ASSERT_TRUE(entry.good());
+ entry_handle = entry.Get(META_HANDLE);
+ entry.Put(IS_UNSYNCED, true);
+ entry_id = entry.Get(ID);
+ }
+
+ // Make sure we can find the entry in the folder.
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), entry_name));
+ EXPECT_EQ(1, CountEntriesWithName(&trans, folder_id, entry_name));
+
+ Entry entry(&trans, GET_BY_ID, entry_id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(entry_handle, entry.Get(META_HANDLE));
+ EXPECT_TRUE(entry.Get(NON_UNIQUE_NAME) == entry_name);
+ EXPECT_TRUE(entry.Get(PARENT_ID) == folder_id);
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestParentIdIndexUpdate) {
+ std::string child_name = "child";
+
+ WriteTransaction wt(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry parent_folder(&wt, CREATE, wt.root_id(), "folder1");
+ parent_folder.Put(IS_UNSYNCED, true);
+ EXPECT_TRUE(parent_folder.Put(IS_DIR, true));
+
+ MutableEntry parent_folder2(&wt, CREATE, wt.root_id(), "folder2");
+ parent_folder2.Put(IS_UNSYNCED, true);
+ EXPECT_TRUE(parent_folder2.Put(IS_DIR, true));
+
+ MutableEntry child(&wt, CREATE, parent_folder.Get(ID), child_name);
+ EXPECT_TRUE(child.Put(IS_DIR, true));
+ child.Put(IS_UNSYNCED, true);
+
+ ASSERT_TRUE(child.good());
+
+ EXPECT_EQ(0, CountEntriesWithName(&wt, wt.root_id(), child_name));
+ EXPECT_EQ(parent_folder.Get(ID), child.Get(PARENT_ID));
+ EXPECT_EQ(1, CountEntriesWithName(&wt, parent_folder.Get(ID), child_name));
+ EXPECT_EQ(0, CountEntriesWithName(&wt, parent_folder2.Get(ID), child_name));
+ child.Put(PARENT_ID, parent_folder2.Get(ID));
+ EXPECT_EQ(parent_folder2.Get(ID), child.Get(PARENT_ID));
+ EXPECT_EQ(0, CountEntriesWithName(&wt, parent_folder.Get(ID), child_name));
+ EXPECT_EQ(1, CountEntriesWithName(&wt, parent_folder2.Get(ID), child_name));
+}
+
+TEST_F(SyncableDirectoryTest, TestNoReindexDeletedItems) {
+ std::string folder_name = "folder";
+ std::string new_name = "new_name";
+
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry folder(&trans, CREATE, trans.root_id(), folder_name);
+ ASSERT_TRUE(folder.good());
+ ASSERT_TRUE(folder.Put(IS_DIR, true));
+ ASSERT_TRUE(folder.Put(IS_DEL, true));
+
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), folder_name));
+
+ MutableEntry deleted(&trans, GET_BY_ID, folder.Get(ID));
+ ASSERT_TRUE(deleted.good());
+ ASSERT_TRUE(deleted.Put(PARENT_ID, trans.root_id()));
+ ASSERT_TRUE(deleted.Put(NON_UNIQUE_NAME, new_name));
+
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), folder_name));
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), new_name));
+}
+
+TEST_F(SyncableDirectoryTest, TestCaseChangeRename) {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry folder(&trans, CREATE, trans.root_id(), "CaseChange");
+ ASSERT_TRUE(folder.good());
+ EXPECT_TRUE(folder.Put(PARENT_ID, trans.root_id()));
+ EXPECT_TRUE(folder.Put(NON_UNIQUE_NAME, "CASECHANGE"));
+ EXPECT_TRUE(folder.Put(IS_DEL, true));
+}
+
+// Create items of each model type, and check that GetModelType and
+// GetServerModelType return the right value.
+TEST_F(SyncableDirectoryTest, GetModelType) {
+ TestIdFactory id_factory;
+ for (int i = 0; i < MODEL_TYPE_COUNT; ++i) {
+ ModelType datatype = ModelTypeFromInt(i);
+ SCOPED_TRACE(testing::Message("Testing model type ") << datatype);
+ switch (datatype) {
+ case UNSPECIFIED:
+ case TOP_LEVEL_FOLDER:
+ continue; // Datatype isn't a function of Specifics.
+ default:
+ break;
+ }
+ sync_pb::EntitySpecifics specifics;
+ AddDefaultFieldValue(datatype, &specifics);
+
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ MutableEntry folder(&trans, CREATE, trans.root_id(), "Folder");
+ ASSERT_TRUE(folder.good());
+ folder.Put(ID, id_factory.NewServerId());
+ folder.Put(SPECIFICS, specifics);
+ folder.Put(BASE_VERSION, 1);
+ folder.Put(IS_DIR, true);
+ folder.Put(IS_DEL, false);
+ ASSERT_EQ(datatype, folder.GetModelType());
+
+ MutableEntry item(&trans, CREATE, trans.root_id(), "Item");
+ ASSERT_TRUE(item.good());
+ item.Put(ID, id_factory.NewServerId());
+ item.Put(SPECIFICS, specifics);
+ item.Put(BASE_VERSION, 1);
+ item.Put(IS_DIR, false);
+ item.Put(IS_DEL, false);
+ ASSERT_EQ(datatype, item.GetModelType());
+
+ // It's critical that deletion records retain their datatype, so that
+ // they can be dispatched to the appropriate change processor.
+ MutableEntry deleted_item(&trans, CREATE, trans.root_id(), "Deleted Item");
+ ASSERT_TRUE(item.good());
+ deleted_item.Put(ID, id_factory.NewServerId());
+ deleted_item.Put(SPECIFICS, specifics);
+ deleted_item.Put(BASE_VERSION, 1);
+ deleted_item.Put(IS_DIR, false);
+ deleted_item.Put(IS_DEL, true);
+ ASSERT_EQ(datatype, deleted_item.GetModelType());
+
+ MutableEntry server_folder(&trans, CREATE_NEW_UPDATE_ITEM,
+ id_factory.NewServerId());
+ ASSERT_TRUE(server_folder.good());
+ server_folder.Put(SERVER_SPECIFICS, specifics);
+ server_folder.Put(BASE_VERSION, 1);
+ server_folder.Put(SERVER_IS_DIR, true);
+ server_folder.Put(SERVER_IS_DEL, false);
+ ASSERT_EQ(datatype, server_folder.GetServerModelType());
+
+ MutableEntry server_item(&trans, CREATE_NEW_UPDATE_ITEM,
+ id_factory.NewServerId());
+ ASSERT_TRUE(server_item.good());
+ server_item.Put(SERVER_SPECIFICS, specifics);
+ server_item.Put(BASE_VERSION, 1);
+ server_item.Put(SERVER_IS_DIR, false);
+ server_item.Put(SERVER_IS_DEL, false);
+ ASSERT_EQ(datatype, server_item.GetServerModelType());
+
+ browser_sync::SyncEntity folder_entity;
+ folder_entity.set_id(id_factory.NewServerId());
+ folder_entity.set_deleted(false);
+ folder_entity.set_folder(true);
+ folder_entity.mutable_specifics()->CopyFrom(specifics);
+ ASSERT_EQ(datatype, folder_entity.GetModelType());
+
+ browser_sync::SyncEntity item_entity;
+ item_entity.set_id(id_factory.NewServerId());
+ item_entity.set_deleted(false);
+ item_entity.set_folder(false);
+ item_entity.mutable_specifics()->CopyFrom(specifics);
+ ASSERT_EQ(datatype, item_entity.GetModelType());
+ }
+}
+
+// A variant of SyncableDirectoryTest that uses a real sqlite database.
+class OnDiskSyncableDirectoryTest : public SyncableDirectoryTest {
+ protected:
+ // SetUp() is called before each test case is run.
+ // The sqlite3 DB is deleted before each test is run.
+ virtual void SetUp() {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ file_path_ = temp_dir_.path().Append(
+ FILE_PATH_LITERAL("Test.sqlite3"));
+ file_util::Delete(file_path_, true);
+ dir_.reset(new Directory(&encryptor_, &handler_, NULL));
+ ASSERT_TRUE(dir_.get());
+ ASSERT_EQ(OPENED, dir_->Open(file_path_, kName,
+ &delegate_, NullTransactionObserver()));
+ ASSERT_TRUE(dir_->good());
+ }
+
+ virtual void TearDown() {
+ // This also closes file handles.
+ dir_->SaveChanges();
+ dir_.reset();
+ file_util::Delete(file_path_, true);
+ }
+
+ void ReloadDir() {
+ dir_.reset(new Directory(&encryptor_, &handler_, NULL));
+ ASSERT_TRUE(dir_.get());
+ ASSERT_EQ(OPENED, dir_->Open(file_path_, kName,
+ &delegate_, NullTransactionObserver()));
+ }
+
+ void SaveAndReloadDir() {
+ dir_->SaveChanges();
+ ReloadDir();
+ }
+
+ void SwapInUnsaveableDirectory() {
+ dir_.reset(); // Delete the old directory.
+
+ // We first assign the object to a pointer of type TestUnsaveableDirectory
+ // because the OpenUnsaveable function is not available in the parent class.
+ scoped_ptr<TestUnsaveableDirectory> dir(new TestUnsaveableDirectory());
+ ASSERT_TRUE(dir.get());
+ ASSERT_EQ(OPENED, dir->OpenUnsaveable(
+ file_path_, kName, &delegate_, NullTransactionObserver()));
+
+ // Finally, move the unsaveable directory to the dir_ variable.
+ dir_ = dir.Pass();
+ }
+
+ ScopedTempDir temp_dir_;
+ FilePath file_path_;
+};
+
+TEST_F(OnDiskSyncableDirectoryTest, TestPurgeEntriesWithTypeIn) {
+ sync_pb::EntitySpecifics bookmark_specs;
+ sync_pb::EntitySpecifics autofill_specs;
+ sync_pb::EntitySpecifics preference_specs;
+ AddDefaultFieldValue(BOOKMARKS, &bookmark_specs);
+ AddDefaultFieldValue(PREFERENCES, &preference_specs);
+ AddDefaultFieldValue(AUTOFILL, &autofill_specs);
+ dir_->set_initial_sync_ended_for_type(BOOKMARKS, true);
+ dir_->set_initial_sync_ended_for_type(PREFERENCES, true);
+ dir_->set_initial_sync_ended_for_type(AUTOFILL, true);
+
+ syncable::ModelTypeSet types_to_purge(PREFERENCES, AUTOFILL);
+
+ TestIdFactory id_factory;
+ // Create some items for each type.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry item1(&trans, CREATE, trans.root_id(), "Item");
+ ASSERT_TRUE(item1.good());
+ item1.Put(SPECIFICS, bookmark_specs);
+ item1.Put(SERVER_SPECIFICS, bookmark_specs);
+ item1.Put(IS_UNSYNCED, true);
+
+ MutableEntry item2(&trans, CREATE_NEW_UPDATE_ITEM,
+ id_factory.NewServerId());
+ ASSERT_TRUE(item2.good());
+ item2.Put(SERVER_SPECIFICS, bookmark_specs);
+ item2.Put(IS_UNAPPLIED_UPDATE, true);
+
+ MutableEntry item3(&trans, CREATE, trans.root_id(), "Item");
+ ASSERT_TRUE(item3.good());
+ item3.Put(SPECIFICS, preference_specs);
+ item3.Put(SERVER_SPECIFICS, preference_specs);
+ item3.Put(IS_UNSYNCED, true);
+
+ MutableEntry item4(&trans, CREATE_NEW_UPDATE_ITEM,
+ id_factory.NewServerId());
+ ASSERT_TRUE(item4.good());
+ item4.Put(SERVER_SPECIFICS, preference_specs);
+ item4.Put(IS_UNAPPLIED_UPDATE, true);
+
+ MutableEntry item5(&trans, CREATE, trans.root_id(), "Item");
+ ASSERT_TRUE(item5.good());
+ item5.Put(SPECIFICS, autofill_specs);
+ item5.Put(SERVER_SPECIFICS, autofill_specs);
+ item5.Put(IS_UNSYNCED, true);
+
+ MutableEntry item6(&trans, CREATE_NEW_UPDATE_ITEM,
+ id_factory.NewServerId());
+ ASSERT_TRUE(item6.good());
+ item6.Put(SERVER_SPECIFICS, autofill_specs);
+ item6.Put(IS_UNAPPLIED_UPDATE, true);
+ }
+
+ dir_->SaveChanges();
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ MetahandleSet all_set;
+ GetAllMetaHandles(&trans, &all_set);
+ ASSERT_EQ(7U, all_set.size());
+ }
+
+ dir_->PurgeEntriesWithTypeIn(types_to_purge);
+
+ // We first query the in-memory data, and then reload the directory (without
+ // saving) to verify that disk does not still have the data.
+ CheckPurgeEntriesWithTypeInSucceeded(types_to_purge, true);
+ SaveAndReloadDir();
+ CheckPurgeEntriesWithTypeInSucceeded(types_to_purge, false);
+}
+
+TEST_F(OnDiskSyncableDirectoryTest, TestShareInfo) {
+ dir_->set_initial_sync_ended_for_type(AUTOFILL, true);
+ dir_->set_store_birthday("Jan 31st");
+ dir_->SetNotificationState("notification_state");
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
+ EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
+ EXPECT_EQ("Jan 31st", dir_->store_birthday());
+ EXPECT_EQ("notification_state", dir_->GetNotificationState());
+ }
+ dir_->set_store_birthday("April 10th");
+ dir_->SetNotificationState("notification_state2");
+ dir_->SaveChanges();
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
+ EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
+ EXPECT_EQ("April 10th", dir_->store_birthday());
+ EXPECT_EQ("notification_state2", dir_->GetNotificationState());
+ }
+ dir_->SetNotificationState("notification_state2");
+ // Restore the directory from disk. Make sure that nothing's changed.
+ SaveAndReloadDir();
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
+ EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
+ EXPECT_EQ("April 10th", dir_->store_birthday());
+ EXPECT_EQ("notification_state2", dir_->GetNotificationState());
+ }
+}
+
+TEST_F(OnDiskSyncableDirectoryTest,
+ TestSimpleFieldsPreservedDuringSaveChanges) {
+ Id update_id = TestIdFactory::FromNumber(1);
+ Id create_id;
+ EntryKernel create_pre_save, update_pre_save;
+ EntryKernel create_post_save, update_post_save;
+ std::string create_name = "Create";
+
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry create(&trans, CREATE, trans.root_id(), create_name);
+ MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, update_id);
+ create.Put(IS_UNSYNCED, true);
+ update.Put(IS_UNAPPLIED_UPDATE, true);
+ sync_pb::EntitySpecifics specifics;
+ specifics.mutable_bookmark()->set_favicon("PNG");
+ specifics.mutable_bookmark()->set_url("http://nowhere");
+ create.Put(SPECIFICS, specifics);
+ create_pre_save = create.GetKernelCopy();
+ update_pre_save = update.GetKernelCopy();
+ create_id = create.Get(ID);
+ }
+
+ dir_->SaveChanges();
+ dir_.reset(new Directory(&encryptor_, &handler_, NULL));
+ ASSERT_TRUE(dir_.get());
+ ASSERT_EQ(OPENED, dir_->Open(file_path_, kName,
+ &delegate_, NullTransactionObserver()));
+ ASSERT_TRUE(dir_->good());
+
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ Entry create(&trans, GET_BY_ID, create_id);
+ EXPECT_EQ(1, CountEntriesWithName(&trans, trans.root_id(), create_name));
+ Entry update(&trans, GET_BY_ID, update_id);
+ create_post_save = create.GetKernelCopy();
+ update_post_save = update.GetKernelCopy();
+ }
+ int i = BEGIN_FIELDS;
+ for ( ; i < INT64_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((Int64Field)i),
+ create_post_save.ref((Int64Field)i))
+ << "int64 field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((Int64Field)i),
+ update_post_save.ref((Int64Field)i))
+ << "int64 field #" << i << " changed during save/load";
+ }
+ for ( ; i < TIME_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((TimeField)i),
+ create_post_save.ref((TimeField)i))
+ << "time field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((TimeField)i),
+ update_post_save.ref((TimeField)i))
+ << "time field #" << i << " changed during save/load";
+ }
+ for ( ; i < ID_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((IdField)i),
+ create_post_save.ref((IdField)i))
+ << "id field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((IdField)i),
+ update_pre_save.ref((IdField)i))
+ << "id field #" << i << " changed during save/load";
+ }
+ for ( ; i < BIT_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((BitField)i),
+ create_post_save.ref((BitField)i))
+ << "Bit field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((BitField)i),
+ update_post_save.ref((BitField)i))
+ << "Bit field #" << i << " changed during save/load";
+ }
+ for ( ; i < STRING_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((StringField)i),
+ create_post_save.ref((StringField)i))
+ << "String field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((StringField)i),
+ update_post_save.ref((StringField)i))
+ << "String field #" << i << " changed during save/load";
+ }
+ for ( ; i < PROTO_FIELDS_END; ++i) {
+ EXPECT_EQ(create_pre_save.ref((ProtoField)i).SerializeAsString(),
+ create_post_save.ref((ProtoField)i).SerializeAsString())
+ << "Blob field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((ProtoField)i).SerializeAsString(),
+ update_post_save.ref((ProtoField)i).SerializeAsString())
+ << "Blob field #" << i << " changed during save/load";
+ }
+}
+
+TEST_F(OnDiskSyncableDirectoryTest, TestSaveChangesFailure) {
+ int64 handle1 = 0;
+ // Set up an item using a regular, saveable directory.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), "aguilera");
+ ASSERT_TRUE(e1.good());
+ EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(IS_DIR, true);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
+ EXPECT_TRUE(IsInDirtyMetahandles(handle1));
+ }
+ ASSERT_TRUE(dir_->SaveChanges());
+
+ // Make sure the item is no longer dirty after saving,
+ // and make a modification.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(aguilera.good());
+ EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
+ EXPECT_EQ(aguilera.Get(NON_UNIQUE_NAME), "aguilera");
+ aguilera.Put(NON_UNIQUE_NAME, "overwritten");
+ EXPECT_TRUE(aguilera.GetKernelCopy().is_dirty());
+ EXPECT_TRUE(IsInDirtyMetahandles(handle1));
+ }
+ ASSERT_TRUE(dir_->SaveChanges());
+
+ // Now do some operations using a directory for which SaveChanges will
+ // always fail.
+ SwapInUnsaveableDirectory();
+ ASSERT_TRUE(dir_->good());
+
+ int64 handle2 = 0;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(aguilera.good());
+ EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
+ EXPECT_EQ(aguilera.Get(NON_UNIQUE_NAME), "overwritten");
+ EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
+ EXPECT_FALSE(IsInDirtyMetahandles(handle1));
+ aguilera.Put(NON_UNIQUE_NAME, "christina");
+ EXPECT_TRUE(aguilera.GetKernelCopy().is_dirty());
+ EXPECT_TRUE(IsInDirtyMetahandles(handle1));
+
+ // New item.
+ MutableEntry kids_on_block(&trans, CREATE, trans.root_id(), "kids");
+ ASSERT_TRUE(kids_on_block.good());
+ handle2 = kids_on_block.Get(META_HANDLE);
+ kids_on_block.Put(BASE_VERSION, 1);
+ kids_on_block.Put(IS_DIR, true);
+ kids_on_block.Put(ID, TestIdFactory::FromNumber(102));
+ EXPECT_TRUE(kids_on_block.GetKernelCopy().is_dirty());
+ EXPECT_TRUE(IsInDirtyMetahandles(handle2));
+ }
+
+ // We are using an unsaveable directory, so this can't succeed. However,
+ // the HandleSaveChangesFailure code path should have been triggered.
+ ASSERT_FALSE(dir_->SaveChanges());
+
+ // Make sure things were rolled back and the world is as it was before call.
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ Entry e1(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e1.good());
+ EntryKernel aguilera = e1.GetKernelCopy();
+ Entry kids(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(kids.good());
+ EXPECT_TRUE(kids.GetKernelCopy().is_dirty());
+ EXPECT_TRUE(IsInDirtyMetahandles(handle2));
+ EXPECT_TRUE(aguilera.is_dirty());
+ EXPECT_TRUE(IsInDirtyMetahandles(handle1));
+ }
+}
+
+TEST_F(OnDiskSyncableDirectoryTest, TestSaveChangesFailureWithPurge) {
+ int64 handle1 = 0;
+ // Set up an item using a regular, saveable directory.
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), "aguilera");
+ ASSERT_TRUE(e1.good());
+ EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(IS_DIR, true);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ sync_pb::EntitySpecifics bookmark_specs;
+ AddDefaultFieldValue(BOOKMARKS, &bookmark_specs);
+ e1.Put(SPECIFICS, bookmark_specs);
+ e1.Put(SERVER_SPECIFICS, bookmark_specs);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
+ EXPECT_TRUE(IsInDirtyMetahandles(handle1));
+ }
+ ASSERT_TRUE(dir_->SaveChanges());
+
+ // Now do some operations using a directory for which SaveChanges will
+ // always fail.
+ SwapInUnsaveableDirectory();
+ ASSERT_TRUE(dir_->good());
+
+ syncable::ModelTypeSet set(BOOKMARKS);
+ dir_->PurgeEntriesWithTypeIn(set);
+ EXPECT_TRUE(IsInMetahandlesToPurge(handle1));
+ ASSERT_FALSE(dir_->SaveChanges());
+ EXPECT_TRUE(IsInMetahandlesToPurge(handle1));
+}
+
+} // namespace
+
+void SyncableDirectoryTest::ValidateEntry(BaseTransaction* trans,
+ int64 id,
+ bool check_name,
+ const std::string& name,
+ int64 base_version,
+ int64 server_version,
+ bool is_del) {
+ Entry e(trans, GET_BY_ID, TestIdFactory::FromNumber(id));
+ ASSERT_TRUE(e.good());
+ if (check_name)
+ ASSERT_TRUE(name == e.Get(NON_UNIQUE_NAME));
+ ASSERT_TRUE(base_version == e.Get(BASE_VERSION));
+ ASSERT_TRUE(server_version == e.Get(SERVER_VERSION));
+ ASSERT_TRUE(is_del == e.Get(IS_DEL));
+}
+
+namespace {
+
+class SyncableDirectoryManagement : public testing::Test {
+ public:
+ virtual void SetUp() {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ }
+
+ virtual void TearDown() {
+ }
+ protected:
+ MessageLoop message_loop_;
+ ScopedTempDir temp_dir_;
+ FakeEncryptor encryptor_;
+ TestUnrecoverableErrorHandler handler_;
+ NullDirectoryChangeDelegate delegate_;
+};
+
+TEST_F(SyncableDirectoryManagement, TestFileRelease) {
+ FilePath path = temp_dir_.path().Append(
+ Directory::kSyncDatabaseFilename);
+
+ syncable::Directory dir(&encryptor_, &handler_, NULL);
+ DirOpenResult result =
+ dir.Open(path, "ScopeTest", &delegate_, NullTransactionObserver());
+ ASSERT_EQ(result, OPENED);
+ dir.Close();
+
+ // Closing the directory should have released the backing database file.
+ ASSERT_TRUE(file_util::Delete(path, true));
+}
+
+class StressTransactionsDelegate : public base::PlatformThread::Delegate {
+ public:
+ StressTransactionsDelegate(Directory* dir, int thread_number)
+ : dir_(dir),
+ thread_number_(thread_number) {}
+
+ private:
+ Directory* const dir_;
+ const int thread_number_;
+
+ // PlatformThread::Delegate methods:
+ virtual void ThreadMain() {
+ int entry_count = 0;
+ std::string path_name;
+
+ for (int i = 0; i < 20; ++i) {
+ const int rand_action = rand() % 10;
+ if (rand_action < 4 && !path_name.empty()) {
+ ReadTransaction trans(FROM_HERE, dir_);
+ CHECK(1 == CountEntriesWithName(&trans, trans.root_id(), path_name));
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
+ rand() % 10));
+ } else {
+ std::string unique_name =
+ base::StringPrintf("%d.%d", thread_number_, entry_count++);
+ path_name.assign(unique_name.begin(), unique_name.end());
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_);
+ MutableEntry e(&trans, CREATE, trans.root_id(), path_name);
+ CHECK(e.good());
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
+ rand() % 20));
+ e.Put(IS_UNSYNCED, true);
+ if (e.Put(ID, TestIdFactory::FromNumber(rand())) &&
+ e.Get(ID).ServerKnows() && !e.Get(ID).IsRoot()) {
+ e.Put(BASE_VERSION, 1);
+ }
+ }
+ }
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(StressTransactionsDelegate);
+};
+
+TEST(SyncableDirectory, StressTransactions) {
+ MessageLoop message_loop;
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FakeEncryptor encryptor;
+ TestUnrecoverableErrorHandler handler;
+ NullDirectoryChangeDelegate delegate;
+ Directory dir(&encryptor, &handler, NULL);
+ FilePath path = temp_dir.path().Append(Directory::kSyncDatabaseFilename);
+ file_util::Delete(path, true);
+ std::string dirname = "stress";
+ dir.Open(path, dirname, &delegate, NullTransactionObserver());
+
+ const int kThreadCount = 7;
+ base::PlatformThreadHandle threads[kThreadCount];
+ scoped_ptr<StressTransactionsDelegate> thread_delegates[kThreadCount];
+
+ for (int i = 0; i < kThreadCount; ++i) {
+ thread_delegates[i].reset(new StressTransactionsDelegate(&dir, i));
+ ASSERT_TRUE(base::PlatformThread::Create(
+ 0, thread_delegates[i].get(), &threads[i]));
+ }
+
+ for (int i = 0; i < kThreadCount; ++i) {
+ base::PlatformThread::Join(threads[i]);
+ }
+
+ dir.Close();
+ file_util::Delete(path, true);
+}
+
+class SyncableClientTagTest : public SyncableDirectoryTest {
+ public:
+ static const int kBaseVersion = 1;
+ const char* test_name_;
+ const char* test_tag_;
+
+ SyncableClientTagTest() : test_name_("test_name"), test_tag_("dietcoke") {}
+
+ bool CreateWithDefaultTag(Id id, bool deleted) {
+ return CreateWithTag(test_tag_, id, deleted);
+ }
+
+ // Attempt to create an entry with a default tag.
+ bool CreateWithTag(const char* tag, Id id, bool deleted) {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), test_name_);
+ CHECK(me.good());
+ me.Put(ID, id);
+ if (id.ServerKnows()) {
+ me.Put(BASE_VERSION, kBaseVersion);
+ }
+ me.Put(IS_DEL, deleted);
+ me.Put(IS_UNSYNCED, true);
+ me.Put(IS_DIR, false);
+ return me.Put(UNIQUE_CLIENT_TAG, tag);
+ }
+
+ // Verify an entry exists with the default tag.
+ void VerifyTag(Id id, bool deleted) {
+ // Should still be present and valid in the client tag index.
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ Entry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
+ CHECK(me.good());
+ EXPECT_EQ(me.Get(ID), id);
+ EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), test_tag_);
+ EXPECT_EQ(me.Get(IS_DEL), deleted);
+ EXPECT_EQ(me.Get(IS_UNSYNCED), true);
+ }
+
+ protected:
+ TestIdFactory factory_;
+};
+
+TEST_F(SyncableClientTagTest, TestClientTagClear) {
+ Id server_id = factory_.NewServerId();
+ EXPECT_TRUE(CreateWithDefaultTag(server_id, false));
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
+ EXPECT_TRUE(me.good());
+ me.Put(UNIQUE_CLIENT_TAG, "");
+ }
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ Entry by_tag(&trans, GET_BY_CLIENT_TAG, test_tag_);
+ EXPECT_FALSE(by_tag.good());
+
+ Entry by_id(&trans, GET_BY_ID, server_id);
+ EXPECT_TRUE(by_id.good());
+ EXPECT_TRUE(by_id.Get(UNIQUE_CLIENT_TAG).empty());
+ }
+}
+
+TEST_F(SyncableClientTagTest, TestClientTagIndexServerId) {
+ Id server_id = factory_.NewServerId();
+ EXPECT_TRUE(CreateWithDefaultTag(server_id, false));
+ VerifyTag(server_id, false);
+}
+
+TEST_F(SyncableClientTagTest, TestClientTagIndexClientId) {
+ Id client_id = factory_.NewLocalId();
+ EXPECT_TRUE(CreateWithDefaultTag(client_id, false));
+ VerifyTag(client_id, false);
+}
+
+TEST_F(SyncableClientTagTest, TestDeletedClientTagIndexClientId) {
+ Id client_id = factory_.NewLocalId();
+ EXPECT_TRUE(CreateWithDefaultTag(client_id, true));
+ VerifyTag(client_id, true);
+}
+
+TEST_F(SyncableClientTagTest, TestDeletedClientTagIndexServerId) {
+ Id server_id = factory_.NewServerId();
+ EXPECT_TRUE(CreateWithDefaultTag(server_id, true));
+ VerifyTag(server_id, true);
+}
+
+TEST_F(SyncableClientTagTest, TestClientTagIndexDuplicateServer) {
+ EXPECT_TRUE(CreateWithDefaultTag(factory_.NewServerId(), true));
+ EXPECT_FALSE(CreateWithDefaultTag(factory_.NewServerId(), true));
+ EXPECT_FALSE(CreateWithDefaultTag(factory_.NewServerId(), false));
+ EXPECT_FALSE(CreateWithDefaultTag(factory_.NewLocalId(), false));
+ EXPECT_FALSE(CreateWithDefaultTag(factory_.NewLocalId(), true));
+}
+
+} // namespace
+} // namespace syncable
diff --git a/sync/syncable/transaction_observer.h b/sync/syncable/transaction_observer.h
new file mode 100644
index 0000000..bcade47
--- /dev/null
+++ b/sync/syncable/transaction_observer.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_
+#define SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_
+#pragma once
+
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/syncable.h"
+
+namespace syncable {
+
+class TransactionObserver {
+ public:
+ virtual void OnTransactionWrite(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ ModelTypeSet models_with_changes) = 0;
+ protected:
+ virtual ~TransactionObserver() {}
+};
+
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_
diff --git a/sync/test/DEPS b/sync/test/DEPS
new file mode 100644
index 0000000..ad776e9
--- /dev/null
+++ b/sync/test/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ # Test files can include anything from sync.
+ "+sync",
+]
diff --git a/sync/test/engine/fake_model_safe_worker_registrar.cc b/sync/test/engine/fake_model_safe_worker_registrar.cc
new file mode 100644
index 0000000..be4a0d9
--- /dev/null
+++ b/sync/test/engine/fake_model_safe_worker_registrar.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/engine/fake_model_safe_worker_registrar.h"
+
+#include "sync/test/engine/fake_model_worker.h"
+
+namespace browser_sync {
+
+FakeModelSafeWorkerRegistrar::FakeModelSafeWorkerRegistrar(
+ const ModelSafeRoutingInfo& routes) : routes_(routes) {
+ std::set<ModelSafeGroup> groups;
+ for (ModelSafeRoutingInfo::const_iterator it = routes_.begin();
+ it != routes_.end(); ++it) {
+ groups.insert(it->second);
+ }
+ // Sessions always expect a passive worker to be present.
+ groups.insert(GROUP_PASSIVE);
+
+ for (std::set<ModelSafeGroup>::const_iterator it = groups.begin();
+ it != groups.end(); ++it) {
+ workers_.push_back(make_scoped_refptr(new FakeModelWorker(*it)));
+ }
+}
+
+FakeModelSafeWorkerRegistrar::~FakeModelSafeWorkerRegistrar() {}
+
+void FakeModelSafeWorkerRegistrar::GetWorkers(
+ std::vector<ModelSafeWorker*>* out) {
+ for (std::vector<scoped_refptr<ModelSafeWorker> >::const_iterator it =
+ workers_.begin(); it != workers_.end(); ++it) {
+ out->push_back(it->get());
+ }
+}
+
+void FakeModelSafeWorkerRegistrar::GetModelSafeRoutingInfo(
+ ModelSafeRoutingInfo* out) {
+ *out = routes_;
+}
+
+} // namespace browser_sync
diff --git a/sync/test/engine/fake_model_safe_worker_registrar.h b/sync/test/engine/fake_model_safe_worker_registrar.h
new file mode 100644
index 0000000..2eff29b
--- /dev/null
+++ b/sync/test/engine/fake_model_safe_worker_registrar.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_ENGINE_FAKE_MODEL_SAFE_WORKER_REGISTRAR_H_
+#define SYNC_TEST_ENGINE_FAKE_MODEL_SAFE_WORKER_REGISTRAR_H_
+#pragma once
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "sync/engine/model_safe_worker.h"
+
+namespace browser_sync {
+
+class FakeModelSafeWorkerRegistrar : public ModelSafeWorkerRegistrar {
+ public:
+ explicit FakeModelSafeWorkerRegistrar(const ModelSafeRoutingInfo& routes);
+
+ virtual ~FakeModelSafeWorkerRegistrar();
+ virtual void GetWorkers(std::vector<ModelSafeWorker*>* out) OVERRIDE;
+ virtual void GetModelSafeRoutingInfo(ModelSafeRoutingInfo* out) OVERRIDE;
+
+ private:
+ const ModelSafeRoutingInfo routes_;
+ std::vector<scoped_refptr<ModelSafeWorker> > workers_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeModelSafeWorkerRegistrar);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_TEST_ENGINE_FAKE_MODEL_SAFE_WORKER_REGISTRAR_H_
+
diff --git a/sync/test/engine/fake_model_worker.cc b/sync/test/engine/fake_model_worker.cc
new file mode 100644
index 0000000..9938e08
--- /dev/null
+++ b/sync/test/engine/fake_model_worker.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/engine/fake_model_worker.h"
+
+namespace browser_sync {
+
+FakeModelWorker::FakeModelWorker(ModelSafeGroup group) : group_(group) {}
+
+FakeModelWorker::~FakeModelWorker() {
+ // We may need to relax this is FakeModelWorker is used in a
+ // multi-threaded test; since ModelSafeWorkers are
+ // RefCountedThreadSafe, they could theoretically be destroyed from
+ // a different thread.
+ DCHECK(non_thread_safe_.CalledOnValidThread());
+}
+
+SyncerError FakeModelWorker::DoWorkAndWaitUntilDone(
+ const WorkCallback& work) {
+ DCHECK(non_thread_safe_.CalledOnValidThread());
+ // Simply do the work on the current thread.
+ return work.Run();
+}
+
+ModelSafeGroup FakeModelWorker::GetModelSafeGroup() {
+ DCHECK(non_thread_safe_.CalledOnValidThread());
+ return group_;
+}
+
+} // namespace browser_sync
diff --git a/sync/test/engine/fake_model_worker.h b/sync/test/engine/fake_model_worker.h
new file mode 100644
index 0000000..861adc7
--- /dev/null
+++ b/sync/test/engine/fake_model_worker.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_ENGINE_FAKE_MODEL_WORKER_H_
+#define SYNC_TEST_ENGINE_FAKE_MODEL_WORKER_H_
+#pragma once
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/threading/non_thread_safe.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/util/syncer_error.h"
+
+namespace browser_sync {
+
+// Fake implementation of ModelSafeWorker that does work on the
+// current thread regardless of the group.
+class FakeModelWorker : public ModelSafeWorker {
+ public:
+ explicit FakeModelWorker(ModelSafeGroup group);
+
+ // ModelSafeWorker implementation.
+ virtual SyncerError DoWorkAndWaitUntilDone(
+ const WorkCallback& work) OVERRIDE;
+ virtual ModelSafeGroup GetModelSafeGroup() OVERRIDE;
+
+ private:
+ virtual ~FakeModelWorker();
+
+ base::NonThreadSafe non_thread_safe_;
+
+ const ModelSafeGroup group_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeModelWorker);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_TEST_ENGINE_FAKE_MODEL_WORKER_H_
+
diff --git a/sync/test/engine/mock_connection_manager.cc b/sync/test/engine/mock_connection_manager.cc
new file mode 100644
index 0000000..f73b14e
--- /dev/null
+++ b/sync/test/engine/mock_connection_manager.cc
@@ -0,0 +1,659 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Mock ServerConnectionManager class for use in client regression tests.
+
+#include "sync/test/engine/mock_connection_manager.h"
+
+#include <map>
+
+#include "base/location.h"
+#include "base/stringprintf.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/test/engine/test_id_factory.h"
+#include "sync/protocol/bookmark_specifics.pb.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using browser_sync::HttpResponse;
+using browser_sync::ServerConnectionManager;
+using browser_sync::ServerConnectionEventListener;
+using browser_sync::ServerConnectionEvent;
+using browser_sync::SyncerProtoUtil;
+using browser_sync::TestIdFactory;
+using std::map;
+using std::string;
+using sync_pb::ClientToServerMessage;
+using sync_pb::ClientToServerResponse;
+using sync_pb::CommitMessage;
+using sync_pb::CommitResponse;
+using sync_pb::CommitResponse_EntryResponse;
+using sync_pb::GetUpdatesMessage;
+using sync_pb::SyncEnums;
+using sync_pb::SyncEntity;
+using syncable::FIRST_REAL_MODEL_TYPE;
+using syncable::MODEL_TYPE_COUNT;
+using syncable::ModelType;
+using syncable::WriteTransaction;
+
+MockConnectionManager::MockConnectionManager(syncable::Directory* directory)
+ : ServerConnectionManager("unused", 0, false, "version"),
+ conflict_all_commits_(false),
+ conflict_n_commits_(0),
+ next_new_id_(10000),
+ store_birthday_("Store BDay!"),
+ store_birthday_sent_(false),
+ client_stuck_(false),
+ commit_time_rename_prepended_string_(""),
+ fail_next_postbuffer_(false),
+ directory_(directory),
+ mid_commit_observer_(NULL),
+ throttling_(false),
+ fail_with_auth_invalid_(false),
+ fail_non_periodic_get_updates_(false),
+ client_command_(NULL),
+ next_position_in_parent_(2),
+ use_legacy_bookmarks_protocol_(false),
+ num_get_updates_requests_(0) {
+ server_reachable_ = true;
+ SetNewTimestamp(0);
+}
+
+MockConnectionManager::~MockConnectionManager() {
+ EXPECT_TRUE(update_queue_.empty()) << "Unfetched updates.";
+}
+
+void MockConnectionManager::SetCommitTimeRename(string prepend) {
+ commit_time_rename_prepended_string_ = prepend;
+}
+
+void MockConnectionManager::SetMidCommitCallback(
+ const base::Closure& callback) {
+ mid_commit_callback_ = callback;
+}
+
+void MockConnectionManager::SetMidCommitObserver(
+ MockConnectionManager::MidCommitObserver* observer) {
+ mid_commit_observer_ = observer;
+}
+
+bool MockConnectionManager::PostBufferToPath(PostBufferParams* params,
+ const string& path,
+ const string& auth_token,
+ browser_sync::ScopedServerStatusWatcher* watcher) {
+ ClientToServerMessage post;
+ CHECK(post.ParseFromString(params->buffer_in));
+ last_request_.CopyFrom(post);
+ client_stuck_ = post.sync_problem_detected();
+ ClientToServerResponse response;
+ response.Clear();
+
+ if (directory_) {
+ // If the Directory's locked when we do this, it's a problem as in normal
+ // use this function could take a while to return because it accesses the
+ // network. As we can't test this we do the next best thing and hang here
+ // when there's an issue.
+ CHECK(directory_->good());
+ WriteTransaction wt(FROM_HERE, syncable::UNITTEST, directory_);
+ }
+
+ if (fail_next_postbuffer_) {
+ fail_next_postbuffer_ = false;
+ return false;
+ }
+
+ if (!server_reachable_) {
+ params->response.server_status = HttpResponse::CONNECTION_UNAVAILABLE;
+ return false;
+ }
+
+ // Default to an ok connection.
+ params->response.server_status = HttpResponse::SERVER_CONNECTION_OK;
+ response.set_error_code(SyncEnums::SUCCESS);
+ const string current_store_birthday = store_birthday();
+ response.set_store_birthday(current_store_birthday);
+ if (post.has_store_birthday() && post.store_birthday() !=
+ current_store_birthday) {
+ response.set_error_code(SyncEnums::NOT_MY_BIRTHDAY);
+ response.set_error_message("Merry Unbirthday!");
+ response.SerializeToString(&params->buffer_out);
+ store_birthday_sent_ = true;
+ return true;
+ }
+ bool result = true;
+ EXPECT_TRUE(!store_birthday_sent_ || post.has_store_birthday() ||
+ post.message_contents() == ClientToServerMessage::AUTHENTICATE);
+ store_birthday_sent_ = true;
+
+ if (post.message_contents() == ClientToServerMessage::COMMIT) {
+ ProcessCommit(&post, &response);
+ } else if (post.message_contents() == ClientToServerMessage::GET_UPDATES) {
+ ProcessGetUpdates(&post, &response);
+ } else if (post.message_contents() == ClientToServerMessage::AUTHENTICATE) {
+ ProcessAuthenticate(&post, &response, auth_token);
+ } else if (post.message_contents() == ClientToServerMessage::CLEAR_DATA) {
+ ProcessClearData(&post, &response);
+ } else {
+ EXPECT_TRUE(false) << "Unknown/unsupported ClientToServerMessage";
+ return false;
+ }
+ if (client_command_.get()) {
+ response.mutable_client_command()->CopyFrom(*client_command_.get());
+ }
+
+ {
+ base::AutoLock lock(response_code_override_lock_);
+ if (throttling_) {
+ response.set_error_code(SyncEnums::THROTTLED);
+ throttling_ = false;
+ }
+
+ if (fail_with_auth_invalid_)
+ response.set_error_code(SyncEnums::AUTH_INVALID);
+ }
+
+ response.SerializeToString(&params->buffer_out);
+ if (post.message_contents() == ClientToServerMessage::COMMIT &&
+ !mid_commit_callback_.is_null()) {
+ mid_commit_callback_.Run();
+ }
+ if (mid_commit_observer_) {
+ mid_commit_observer_->Observe();
+ }
+
+ return result;
+}
+
+bool MockConnectionManager::IsServerReachable() {
+ return true;
+}
+
+bool MockConnectionManager::IsUserAuthenticated() {
+ return true;
+}
+
+sync_pb::GetUpdatesResponse* MockConnectionManager::GetUpdateResponse() {
+ if (update_queue_.empty()) {
+ NextUpdateBatch();
+ }
+ return &update_queue_.back();
+}
+
+void MockConnectionManager::AddDefaultBookmarkData(sync_pb::SyncEntity* entity,
+ bool is_folder) {
+ if (use_legacy_bookmarks_protocol_) {
+ sync_pb::SyncEntity_BookmarkData* data = entity->mutable_bookmarkdata();
+ data->set_bookmark_folder(is_folder);
+
+ if (!is_folder) {
+ data->set_bookmark_url("http://google.com");
+ }
+ } else {
+ entity->set_folder(is_folder);
+ entity->mutable_specifics()->mutable_bookmark();
+ if (!is_folder) {
+ entity->mutable_specifics()->mutable_bookmark()->
+ set_url("http://google.com");
+ }
+ }
+}
+
+SyncEntity* MockConnectionManager::AddUpdateDirectory(int id,
+ int parent_id,
+ string name,
+ int64 version,
+ int64 sync_ts) {
+ return AddUpdateDirectory(TestIdFactory::FromNumber(id),
+ TestIdFactory::FromNumber(parent_id),
+ name,
+ version,
+ sync_ts);
+}
+
+sync_pb::ClientCommand* MockConnectionManager::GetNextClientCommand() {
+ if (!client_command_.get())
+ client_command_.reset(new sync_pb::ClientCommand());
+ return client_command_.get();
+}
+
+SyncEntity* MockConnectionManager::AddUpdateBookmark(int id, int parent_id,
+ string name, int64 version,
+ int64 sync_ts) {
+ return AddUpdateBookmark(TestIdFactory::FromNumber(id),
+ TestIdFactory::FromNumber(parent_id),
+ name,
+ version,
+ sync_ts);
+}
+
+SyncEntity* MockConnectionManager::AddUpdateSpecifics(
+ int id, int parent_id, string name, int64 version, int64 sync_ts,
+ bool is_dir, int64 position, const sync_pb::EntitySpecifics& specifics) {
+ SyncEntity* ent = AddUpdateMeta(
+ TestIdFactory::FromNumber(id).GetServerId(),
+ TestIdFactory::FromNumber(parent_id).GetServerId(),
+ name, version, sync_ts);
+ ent->set_position_in_parent(position);
+ ent->mutable_specifics()->CopyFrom(specifics);
+ ent->set_folder(is_dir);
+ return ent;
+}
+
+sync_pb::SyncEntity* MockConnectionManager::SetNigori(
+ int id, int64 version,int64 sync_ts,
+ const sync_pb::EntitySpecifics& specifics) {
+ SyncEntity* ent = GetUpdateResponse()->add_entries();
+ ent->set_id_string(TestIdFactory::FromNumber(id).GetServerId());
+ ent->set_parent_id_string(TestIdFactory::FromNumber(0).GetServerId());
+ ent->set_server_defined_unique_tag(syncable::ModelTypeToRootTag(
+ syncable::NIGORI));
+ ent->set_name("Nigori");
+ ent->set_non_unique_name("Nigori");
+ ent->set_version(version);
+ ent->set_sync_timestamp(sync_ts);
+ ent->set_mtime(sync_ts);
+ ent->set_ctime(1);
+ ent->set_position_in_parent(0);
+ ent->set_folder(false);
+ ent->mutable_specifics()->CopyFrom(specifics);
+ return ent;
+}
+
+SyncEntity* MockConnectionManager::AddUpdateFull(string id, string parent_id,
+ string name, int64 version,
+ int64 sync_ts, bool is_dir) {
+ SyncEntity* ent = AddUpdateMeta(id, parent_id, name, version, sync_ts);
+ AddDefaultBookmarkData(ent, is_dir);
+ return ent;
+}
+
+SyncEntity* MockConnectionManager::AddUpdateMeta(string id, string parent_id,
+ string name, int64 version,
+ int64 sync_ts) {
+ SyncEntity* ent = GetUpdateResponse()->add_entries();
+ ent->set_id_string(id);
+ ent->set_parent_id_string(parent_id);
+ ent->set_non_unique_name(name);
+ ent->set_name(name);
+ ent->set_version(version);
+ ent->set_sync_timestamp(sync_ts);
+ ent->set_mtime(sync_ts);
+ ent->set_ctime(1);
+ ent->set_position_in_parent(GeneratePositionInParent());
+ return ent;
+}
+
+SyncEntity* MockConnectionManager::AddUpdateDirectory(string id,
+ string parent_id,
+ string name,
+ int64 version,
+ int64 sync_ts) {
+ return AddUpdateFull(id, parent_id, name, version, sync_ts, true);
+}
+
+SyncEntity* MockConnectionManager::AddUpdateBookmark(string id,
+ string parent_id,
+ string name, int64 version,
+ int64 sync_ts) {
+ return AddUpdateFull(id, parent_id, name, version, sync_ts, false);
+}
+
+SyncEntity* MockConnectionManager::AddUpdateFromLastCommit() {
+ EXPECT_EQ(1, last_sent_commit().entries_size());
+ EXPECT_EQ(1, last_commit_response().entryresponse_size());
+ EXPECT_EQ(CommitResponse::SUCCESS,
+ last_commit_response().entryresponse(0).response_type());
+
+ if (last_sent_commit().entries(0).deleted()) {
+ AddUpdateTombstone(syncable::Id::CreateFromServerId(
+ last_sent_commit().entries(0).id_string()));
+ } else {
+ SyncEntity* ent = GetUpdateResponse()->add_entries();
+ ent->CopyFrom(last_sent_commit().entries(0));
+ ent->clear_insert_after_item_id();
+ ent->clear_old_parent_id();
+ ent->set_position_in_parent(
+ last_commit_response().entryresponse(0).position_in_parent());
+ ent->set_version(
+ last_commit_response().entryresponse(0).version());
+ ent->set_id_string(
+ last_commit_response().entryresponse(0).id_string());
+ // Tests don't currently care about the following:
+ // originator_cache_guid, originator_client_item_id, parent_id_string,
+ // name, non_unique_name.
+ }
+ return GetMutableLastUpdate();
+}
+
+void MockConnectionManager::AddUpdateTombstone(const syncable::Id& id) {
+ // Tombstones have only the ID set and dummy values for the required fields.
+ SyncEntity* ent = GetUpdateResponse()->add_entries();
+ ent->set_id_string(id.GetServerId());
+ ent->set_version(0);
+ ent->set_name("");
+ ent->set_deleted(true);
+}
+
+void MockConnectionManager::SetLastUpdateDeleted() {
+ // Tombstones have only the ID set. Wipe anything else.
+ string id_string = GetMutableLastUpdate()->id_string();
+ GetUpdateResponse()->mutable_entries()->RemoveLast();
+ AddUpdateTombstone(syncable::Id::CreateFromServerId(id_string));
+}
+
+void MockConnectionManager::SetLastUpdateOriginatorFields(
+ const string& client_id,
+ const string& entry_id) {
+ GetMutableLastUpdate()->set_originator_cache_guid(client_id);
+ GetMutableLastUpdate()->set_originator_client_item_id(entry_id);
+}
+
+void MockConnectionManager::SetLastUpdateServerTag(const string& tag) {
+ GetMutableLastUpdate()->set_server_defined_unique_tag(tag);
+}
+
+void MockConnectionManager::SetLastUpdateClientTag(const string& tag) {
+ GetMutableLastUpdate()->set_client_defined_unique_tag(tag);
+}
+
+void MockConnectionManager::SetLastUpdatePosition(int64 server_position) {
+ GetMutableLastUpdate()->set_position_in_parent(server_position);
+}
+
+void MockConnectionManager::SetNewTimestamp(int ts) {
+ next_token_ = base::StringPrintf("mock connection ts = %d", ts);
+ ApplyToken();
+}
+
+void MockConnectionManager::ApplyToken() {
+ if (!update_queue_.empty()) {
+ GetUpdateResponse()->clear_new_progress_marker();
+ sync_pb::DataTypeProgressMarker* new_marker =
+ GetUpdateResponse()->add_new_progress_marker();
+ new_marker->set_data_type_id(-1); // Invalid -- clients shouldn't see.
+ new_marker->set_token(next_token_);
+ }
+}
+
+void MockConnectionManager::SetChangesRemaining(int64 timestamp) {
+ GetUpdateResponse()->set_changes_remaining(timestamp);
+}
+
+void MockConnectionManager::ProcessGetUpdates(ClientToServerMessage* csm,
+ ClientToServerResponse* response) {
+ CHECK(csm->has_get_updates());
+ ASSERT_EQ(csm->message_contents(), ClientToServerMessage::GET_UPDATES);
+ const GetUpdatesMessage& gu = csm->get_updates();
+ num_get_updates_requests_++;
+ EXPECT_FALSE(gu.has_from_timestamp());
+ EXPECT_FALSE(gu.has_requested_types());
+
+ if (fail_non_periodic_get_updates_) {
+ EXPECT_EQ(sync_pb::GetUpdatesCallerInfo::PERIODIC,
+ gu.caller_info().source());
+ }
+
+ // Verify that the GetUpdates filter sent by the Syncer matches the test
+ // expectation.
+ for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
+ ModelType model_type = syncable::ModelTypeFromInt(i);
+ sync_pb::DataTypeProgressMarker const* progress_marker =
+ GetProgressMarkerForType(gu.from_progress_marker(), model_type);
+ EXPECT_EQ(expected_filter_.Has(model_type), (progress_marker != NULL))
+ << "Syncer requested_types differs from test expectation.";
+ if (progress_marker) {
+ EXPECT_EQ((expected_payloads_.count(model_type) > 0 ?
+ expected_payloads_[model_type] :
+ std::string()),
+ progress_marker->notification_hint());
+ }
+ }
+
+ // Verify that the items we're about to send back to the client are of
+ // the types requested by the client. If this fails, it probably indicates
+ // a test bug.
+ EXPECT_TRUE(gu.fetch_folders());
+ EXPECT_FALSE(gu.has_requested_types());
+ if (update_queue_.empty()) {
+ GetUpdateResponse();
+ }
+ sync_pb::GetUpdatesResponse* updates = &update_queue_.front();
+ for (int i = 0; i < updates->entries_size(); ++i) {
+ if (!updates->entries(i).deleted()) {
+ ModelType entry_type = syncable::GetModelType(updates->entries(i));
+ EXPECT_TRUE(
+ IsModelTypePresentInSpecifics(gu.from_progress_marker(), entry_type))
+ << "Syncer did not request updates being provided by the test.";
+ }
+ }
+
+ response->mutable_get_updates()->CopyFrom(*updates);
+
+ // Set appropriate progress markers, overriding the value squirreled
+ // away by ApplyToken().
+ std::string token = response->get_updates().new_progress_marker(0).token();
+ response->mutable_get_updates()->clear_new_progress_marker();
+ for (int i = 0; i < gu.from_progress_marker_size(); ++i) {
+ if (gu.from_progress_marker(i).token() != token) {
+ sync_pb::DataTypeProgressMarker* new_marker =
+ response->mutable_get_updates()->add_new_progress_marker();
+ new_marker->set_data_type_id(gu.from_progress_marker(i).data_type_id());
+ new_marker->set_token(token);
+ }
+ }
+
+ update_queue_.pop_front();
+}
+
+void MockConnectionManager::SetClearUserDataResponseStatus(
+ sync_pb::SyncEnums::ErrorType errortype ) {
+ // Note: this is not a thread-safe set, ok for now. NOT ok if tests
+ // run the syncer on the background thread while this method is called.
+ clear_user_data_response_errortype_ = errortype;
+}
+
+void MockConnectionManager::ProcessClearData(ClientToServerMessage* csm,
+ ClientToServerResponse* response) {
+ CHECK(csm->has_clear_user_data());
+ ASSERT_EQ(csm->message_contents(), ClientToServerMessage::CLEAR_DATA);
+ response->clear_user_data();
+ response->set_error_code(clear_user_data_response_errortype_);
+}
+
+void MockConnectionManager::ProcessAuthenticate(
+ ClientToServerMessage* csm,
+ ClientToServerResponse* response,
+ const std::string& auth_token) {
+ ASSERT_EQ(csm->message_contents(), ClientToServerMessage::AUTHENTICATE);
+ EXPECT_FALSE(auth_token.empty());
+
+ if (auth_token != valid_auth_token_) {
+ response->set_error_code(SyncEnums::AUTH_INVALID);
+ return;
+ }
+
+ response->set_error_code(SyncEnums::SUCCESS);
+ response->mutable_authenticate()->CopyFrom(auth_response_);
+ auth_response_.Clear();
+}
+
+void MockConnectionManager::SetAuthenticationResponseInfo(
+ const std::string& valid_auth_token,
+ const std::string& user_display_name,
+ const std::string& user_display_email,
+ const std::string& user_obfuscated_id) {
+ valid_auth_token_ = valid_auth_token;
+ sync_pb::UserIdentification* user = auth_response_.mutable_user();
+ user->set_display_name(user_display_name);
+ user->set_email(user_display_email);
+ user->set_obfuscated_id(user_obfuscated_id);
+}
+
+bool MockConnectionManager::ShouldConflictThisCommit() {
+ bool conflict = false;
+ if (conflict_all_commits_) {
+ conflict = true;
+ } else if (conflict_n_commits_ > 0) {
+ conflict = true;
+ --conflict_n_commits_;
+ }
+ return conflict;
+}
+
+void MockConnectionManager::ProcessCommit(ClientToServerMessage* csm,
+ ClientToServerResponse* response_buffer) {
+ CHECK(csm->has_commit());
+ ASSERT_EQ(csm->message_contents(), ClientToServerMessage::COMMIT);
+ map <string, string> changed_ids;
+ const CommitMessage& commit_message = csm->commit();
+ CommitResponse* commit_response = response_buffer->mutable_commit();
+ commit_messages_->push_back(new CommitMessage);
+ commit_messages_->back()->CopyFrom(commit_message);
+ map<string, CommitResponse_EntryResponse*> response_map;
+ for (int i = 0; i < commit_message.entries_size() ; i++) {
+ const sync_pb::SyncEntity& entry = commit_message.entries(i);
+ CHECK(entry.has_id_string());
+ string id = entry.id_string();
+ ASSERT_LT(entry.name().length(), 256ul) << " name probably too long. True "
+ "server name checking not implemented";
+ if (entry.version() == 0) {
+ // Relies on our new item string id format. (string representation of a
+ // negative number).
+ committed_ids_.push_back(syncable::Id::CreateFromClientString(id));
+ } else {
+ committed_ids_.push_back(syncable::Id::CreateFromServerId(id));
+ }
+ if (response_map.end() == response_map.find(id))
+ response_map[id] = commit_response->add_entryresponse();
+ CommitResponse_EntryResponse* er = response_map[id];
+ if (ShouldConflictThisCommit()) {
+ er->set_response_type(CommitResponse::CONFLICT);
+ continue;
+ }
+ er->set_response_type(CommitResponse::SUCCESS);
+ er->set_version(entry.version() + 1);
+ if (!commit_time_rename_prepended_string_.empty()) {
+ // Commit time rename sent down from the server.
+ er->set_name(commit_time_rename_prepended_string_ + entry.name());
+ }
+ string parent_id = entry.parent_id_string();
+ // Remap id's we've already assigned.
+ if (changed_ids.end() != changed_ids.find(parent_id)) {
+ parent_id = changed_ids[parent_id];
+ er->set_parent_id_string(parent_id);
+ }
+ if (entry.has_version() && 0 != entry.version()) {
+ er->set_id_string(id); // Allows verification.
+ } else {
+ string new_id = base::StringPrintf("mock_server:%d", next_new_id_++);
+ changed_ids[id] = new_id;
+ er->set_id_string(new_id);
+ }
+ }
+ commit_responses_->push_back(new CommitResponse(*commit_response));
+}
+
+SyncEntity* MockConnectionManager::AddUpdateDirectory(
+ syncable::Id id, syncable::Id parent_id, string name, int64 version,
+ int64 sync_ts) {
+ return AddUpdateDirectory(id.GetServerId(), parent_id.GetServerId(),
+ name, version, sync_ts);
+}
+
+SyncEntity* MockConnectionManager::AddUpdateBookmark(
+ syncable::Id id, syncable::Id parent_id, string name, int64 version,
+ int64 sync_ts) {
+ return AddUpdateBookmark(id.GetServerId(), parent_id.GetServerId(),
+ name, version, sync_ts);
+}
+
+SyncEntity* MockConnectionManager::GetMutableLastUpdate() {
+ sync_pb::GetUpdatesResponse* updates = GetUpdateResponse();
+ EXPECT_GT(updates->entries_size(), 0);
+ return updates->mutable_entries()->Mutable(updates->entries_size() - 1);
+}
+
+void MockConnectionManager::NextUpdateBatch() {
+ update_queue_.push_back(sync_pb::GetUpdatesResponse::default_instance());
+ SetChangesRemaining(0);
+ ApplyToken();
+}
+
+const CommitMessage& MockConnectionManager::last_sent_commit() const {
+ EXPECT_TRUE(!commit_messages_.empty());
+ return *commit_messages_->back();
+}
+
+const CommitResponse& MockConnectionManager::last_commit_response() const {
+ EXPECT_TRUE(!commit_responses_.empty());
+ return *commit_responses_->back();
+}
+
+void MockConnectionManager::ThrottleNextRequest(
+ ResponseCodeOverrideRequestor* visitor) {
+ base::AutoLock lock(response_code_override_lock_);
+ throttling_ = true;
+ if (visitor)
+ visitor->OnOverrideComplete();
+}
+
+void MockConnectionManager::FailWithAuthInvalid(
+ ResponseCodeOverrideRequestor* visitor) {
+ base::AutoLock lock(response_code_override_lock_);
+ fail_with_auth_invalid_ = true;
+ if (visitor)
+ visitor->OnOverrideComplete();
+}
+
+void MockConnectionManager::StopFailingWithAuthInvalid(
+ ResponseCodeOverrideRequestor* visitor) {
+ base::AutoLock lock(response_code_override_lock_);
+ fail_with_auth_invalid_ = false;
+ if (visitor)
+ visitor->OnOverrideComplete();
+}
+
+bool MockConnectionManager::IsModelTypePresentInSpecifics(
+ const google::protobuf::RepeatedPtrField<
+ sync_pb::DataTypeProgressMarker>& filter,
+ syncable::ModelType value) {
+ int data_type_id = syncable::GetSpecificsFieldNumberFromModelType(value);
+ for (int i = 0; i < filter.size(); ++i) {
+ if (filter.Get(i).data_type_id() == data_type_id) {
+ return true;
+ }
+ }
+ return false;
+}
+
+sync_pb::DataTypeProgressMarker const*
+ MockConnectionManager::GetProgressMarkerForType(
+ const google::protobuf::RepeatedPtrField<
+ sync_pb::DataTypeProgressMarker>& filter,
+ syncable::ModelType value) {
+ int data_type_id = syncable::GetSpecificsFieldNumberFromModelType(value);
+ for (int i = 0; i < filter.size(); ++i) {
+ if (filter.Get(i).data_type_id() == data_type_id) {
+ return &(filter.Get(i));
+ }
+ }
+ return NULL;
+}
+
+void MockConnectionManager::SetServerReachable() {
+ server_status_ = HttpResponse::SERVER_CONNECTION_OK;
+ server_reachable_ = true;
+
+ FOR_EACH_OBSERVER(ServerConnectionEventListener, listeners_,
+ OnServerConnectionEvent(
+ ServerConnectionEvent(server_status_, server_reachable_)));
+}
+
+void MockConnectionManager::SetServerNotReachable() {
+ server_status_ = HttpResponse::CONNECTION_UNAVAILABLE;
+ server_reachable_ = false;
+
+ FOR_EACH_OBSERVER(ServerConnectionEventListener, listeners_,
+ OnServerConnectionEvent(
+ ServerConnectionEvent(server_status_, server_reachable_)));
+}
diff --git a/sync/test/engine/mock_connection_manager.h b/sync/test/engine/mock_connection_manager.h
new file mode 100644
index 0000000..c3202a5
--- /dev/null
+++ b/sync/test/engine/mock_connection_manager.h
@@ -0,0 +1,363 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Mock ServerConnectionManager class for use in client unit tests.
+
+#ifndef SYNC_TEST_ENGINE_MOCK_CONNECTION_MANAGER_H_
+#define SYNC_TEST_ENGINE_MOCK_CONNECTION_MANAGER_H_
+#pragma once
+
+#include <bitset>
+#include <list>
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_vector.h"
+#include "sync/engine/net/server_connection_manager.h"
+#include "sync/syncable/model_type.h"
+#include "sync/syncable/model_type_payload_map.h"
+#include "sync/protocol/sync.pb.h"
+
+class MockConnectionManager : public browser_sync::ServerConnectionManager {
+ public:
+ class MidCommitObserver {
+ public:
+ virtual void Observe() = 0;
+
+ protected:
+ virtual ~MidCommitObserver() {}
+ };
+
+ explicit MockConnectionManager(syncable::Directory*);
+ virtual ~MockConnectionManager();
+
+ // Overridden ServerConnectionManager functions.
+ virtual bool PostBufferToPath(
+ PostBufferParams*,
+ const std::string& path,
+ const std::string& auth_token,
+ browser_sync::ScopedServerStatusWatcher* watcher) OVERRIDE;
+
+ virtual bool IsServerReachable() OVERRIDE;
+ virtual bool IsUserAuthenticated() OVERRIDE;
+
+ // Control of commit response.
+ void SetMidCommitCallback(const base::Closure& callback);
+ void SetMidCommitObserver(MidCommitObserver* observer);
+
+ // Set this if you want commit to perform commit time rename. Will request
+ // that the client renames all commited entries, prepending this string.
+ void SetCommitTimeRename(std::string prepend);
+
+ // Generic versions of AddUpdate functions. Tests using these function should
+ // compile for both the int64 and string id based versions of the server.
+ // The SyncEntity returned is only valid until the Sync is completed
+ // (e.g. with SyncShare.) It allows to add further entity properties before
+ // sync, using SetLastXXX() methods and/or GetMutableLastUpdate().
+ sync_pb::SyncEntity* AddUpdateDirectory(syncable::Id id,
+ syncable::Id parent_id,
+ std::string name,
+ int64 version,
+ int64 sync_ts);
+ sync_pb::SyncEntity* AddUpdateBookmark(syncable::Id id,
+ syncable::Id parent_id,
+ std::string name,
+ int64 version,
+ int64 sync_ts);
+ // Versions of the AddUpdate functions that accept integer IDs.
+ sync_pb::SyncEntity* AddUpdateDirectory(int id,
+ int parent_id,
+ std::string name,
+ int64 version,
+ int64 sync_ts);
+ sync_pb::SyncEntity* AddUpdateBookmark(int id,
+ int parent_id,
+ std::string name,
+ int64 version,
+ int64 sync_ts);
+ // New protocol versions of the AddUpdate functions.
+ sync_pb::SyncEntity* AddUpdateDirectory(std::string id,
+ std::string parent_id,
+ std::string name,
+ int64 version,
+ int64 sync_ts);
+ sync_pb::SyncEntity* AddUpdateBookmark(std::string id,
+ std::string parent_id,
+ std::string name,
+ int64 version,
+ int64 sync_ts);
+ // Versions of the AddUpdate function that accept specifics.
+ sync_pb::SyncEntity* AddUpdateSpecifics(int id, int parent_id,
+ std::string name,int64 version, int64 sync_ts, bool is_dir,
+ int64 position, const sync_pb::EntitySpecifics& specifics);
+ sync_pb::SyncEntity* SetNigori(int id, int64 version, int64 sync_ts,
+ const sync_pb::EntitySpecifics& specifics);
+
+ // Find the last commit sent by the client, and replay it for the next get
+ // updates command. This can be used to simulate the GetUpdates that happens
+ // immediately after a successful commit.
+ sync_pb::SyncEntity* AddUpdateFromLastCommit();
+
+ // Add a deleted item. Deletion records typically contain no
+ // additional information beyond the deletion, and no specifics.
+ // The server may send the originator fields.
+ void AddUpdateTombstone(const syncable::Id& id);
+
+ void SetLastUpdateDeleted();
+ void SetLastUpdateServerTag(const std::string& tag);
+ void SetLastUpdateClientTag(const std::string& tag);
+ void SetLastUpdateOriginatorFields(const std::string& client_id,
+ const std::string& entry_id);
+ void SetLastUpdatePosition(int64 position_in_parent);
+ void SetNewTimestamp(int ts);
+ void SetChangesRemaining(int64 count);
+
+ // Add a new batch of updates after the current one. Allows multiple
+ // GetUpdates responses to be buffered up, since the syncer may
+ // issue multiple requests during a sync cycle.
+ void NextUpdateBatch();
+
+ // For AUTHENTICATE responses.
+ void SetAuthenticationResponseInfo(const std::string& valid_auth_token,
+ const std::string& user_display_name,
+ const std::string& user_display_email,
+ const std::string& user_obfuscated_id);
+
+ void FailNextPostBufferToPathCall() { fail_next_postbuffer_ = true; }
+
+ void SetClearUserDataResponseStatus(sync_pb::SyncEnums::ErrorType errortype);
+
+ // A visitor class to allow a test to change some monitoring state atomically
+ // with the action of overriding the response codes sent back to the Syncer
+ // (for example, so you can say "ThrottleNextRequest, and assert no more
+ // requests are made once throttling is in effect" in one step.
+ class ResponseCodeOverrideRequestor {
+ public:
+ // Called with response_code_override_lock_ acquired.
+ virtual void OnOverrideComplete() = 0;
+
+ protected:
+ virtual ~ResponseCodeOverrideRequestor() {}
+ };
+ void ThrottleNextRequest(ResponseCodeOverrideRequestor* visitor);
+ void FailWithAuthInvalid(ResponseCodeOverrideRequestor* visitor);
+ void StopFailingWithAuthInvalid(ResponseCodeOverrideRequestor* visitor);
+ void FailNonPeriodicGetUpdates() { fail_non_periodic_get_updates_ = true; }
+
+ // Simple inspectors.
+ bool client_stuck() const { return client_stuck_; }
+
+ sync_pb::ClientCommand* GetNextClientCommand();
+
+ const std::vector<syncable::Id>& committed_ids() const {
+ return committed_ids_;
+ }
+ const std::vector<sync_pb::CommitMessage*>& commit_messages() const {
+ return commit_messages_.get();
+ }
+ const std::vector<sync_pb::CommitResponse*>& commit_responses() const {
+ return commit_responses_.get();
+ }
+ // Retrieve the last sent commit message.
+ const sync_pb::CommitMessage& last_sent_commit() const;
+
+ // Retrieve the last returned commit response.
+ const sync_pb::CommitResponse& last_commit_response() const;
+
+ // Retrieve the last request submitted to the server (regardless of type).
+ const sync_pb::ClientToServerMessage& last_request() const {
+ return last_request_;
+ }
+
+ void set_conflict_all_commits(bool value) {
+ conflict_all_commits_ = value;
+ }
+ void set_next_new_id(int value) {
+ next_new_id_ = value;
+ }
+ void set_conflict_n_commits(int value) {
+ conflict_n_commits_ = value;
+ }
+
+ void set_use_legacy_bookmarks_protocol(bool value) {
+ use_legacy_bookmarks_protocol_ = value;
+ }
+
+ void set_store_birthday(std::string new_birthday) {
+ // Multiple threads can set store_birthday_ in our tests, need to lock it to
+ // ensure atomic read/writes and avoid race conditions.
+ base::AutoLock lock(store_birthday_lock_);
+ store_birthday_ = new_birthday;
+ }
+
+ // Retrieve the number of GetUpdates requests that the mock server has
+ // seen since the last time this function was called. Can be used to
+ // verify that a GetUpdates actually did or did not happen after running
+ // the syncer.
+ int GetAndClearNumGetUpdatesRequests() {
+ int result = num_get_updates_requests_;
+ num_get_updates_requests_ = 0;
+ return result;
+ }
+
+ // Expect that GetUpdates will request exactly the types indicated in
+ // the bitset.
+ void ExpectGetUpdatesRequestTypes(
+ syncable::ModelTypeSet expected_filter) {
+ expected_filter_ = expected_filter;
+ }
+
+ void ExpectGetUpdatesRequestPayloads(
+ const syncable::ModelTypePayloadMap& payloads) {
+ expected_payloads_ = payloads;
+ }
+
+ void SetServerReachable();
+
+ void SetServerNotReachable();
+
+ // Return by copy to be thread-safe.
+ const std::string store_birthday() {
+ base::AutoLock lock(store_birthday_lock_);
+ return store_birthday_;
+ }
+
+ // Locate the most recent update message for purpose of alteration.
+ sync_pb::SyncEntity* GetMutableLastUpdate();
+
+ private:
+ sync_pb::SyncEntity* AddUpdateFull(syncable::Id id, syncable::Id parentid,
+ std::string name, int64 version,
+ int64 sync_ts,
+ bool is_dir);
+ sync_pb::SyncEntity* AddUpdateFull(std::string id,
+ std::string parentid, std::string name,
+ int64 version, int64 sync_ts,
+ bool is_dir);
+ sync_pb::SyncEntity* AddUpdateMeta(std::string id, std::string parentid,
+ std::string name, int64 version,
+ int64 sync_ts);
+
+ // Functions to handle the various types of server request.
+ void ProcessGetUpdates(sync_pb::ClientToServerMessage* csm,
+ sync_pb::ClientToServerResponse* response);
+ void ProcessAuthenticate(sync_pb::ClientToServerMessage* csm,
+ sync_pb::ClientToServerResponse* response,
+ const std::string& auth_token);
+ void ProcessCommit(sync_pb::ClientToServerMessage* csm,
+ sync_pb::ClientToServerResponse* response_buffer);
+ void ProcessClearData(sync_pb::ClientToServerMessage* csm,
+ sync_pb::ClientToServerResponse* response);
+ void AddDefaultBookmarkData(sync_pb::SyncEntity* entity, bool is_folder);
+
+ // Determine if one entry in a commit should be rejected with a conflict.
+ bool ShouldConflictThisCommit();
+
+ // Generate a numeric position_in_parent value. We use a global counter
+ // that only decreases; this simulates new objects always being added to the
+ // front of the ordering.
+ int64 GeneratePositionInParent() {
+ return next_position_in_parent_--;
+ }
+
+ // Get a mutable update response which will eventually be returned to the
+ // client.
+ sync_pb::GetUpdatesResponse* GetUpdateResponse();
+ void ApplyToken();
+
+ // Determine whether an progress marker array (like that sent in
+ // GetUpdates.from_progress_marker) indicates that a particular ModelType
+ // should be included.
+ bool IsModelTypePresentInSpecifics(
+ const google::protobuf::RepeatedPtrField<
+ sync_pb::DataTypeProgressMarker>& filter,
+ syncable::ModelType value);
+
+ sync_pb::DataTypeProgressMarker const* GetProgressMarkerForType(
+ const google::protobuf::RepeatedPtrField<
+ sync_pb::DataTypeProgressMarker>& filter,
+ syncable::ModelType value);
+
+ // All IDs that have been committed.
+ std::vector<syncable::Id> committed_ids_;
+
+ // Control of when/if we return conflicts.
+ bool conflict_all_commits_;
+ int conflict_n_commits_;
+
+ // Commit messages we've sent, and responses we've returned.
+ ScopedVector<sync_pb::CommitMessage> commit_messages_;
+ ScopedVector<sync_pb::CommitResponse> commit_responses_;
+
+ // The next id the mock will return to a commit.
+ int next_new_id_;
+
+ // The store birthday we send to the client.
+ std::string store_birthday_;
+ base::Lock store_birthday_lock_;
+ bool store_birthday_sent_;
+ bool client_stuck_;
+ std::string commit_time_rename_prepended_string_;
+
+ // Fail on the next call to PostBufferToPath().
+ bool fail_next_postbuffer_;
+
+ // Our directory. Used only to ensure that we are not holding the transaction
+ // lock when performing network I/O. Can be NULL if the test author is
+ // confident this can't happen.
+ syncable::Directory* directory_;
+
+ // The updates we'll return to the next request.
+ std::list<sync_pb::GetUpdatesResponse> update_queue_;
+ base::Closure mid_commit_callback_;
+ MidCommitObserver* mid_commit_observer_;
+
+ // The clear data response we'll return in the next response
+ sync_pb::SyncEnums::ErrorType clear_user_data_response_errortype_;
+
+ // The AUTHENTICATE response we'll return for auth requests.
+ sync_pb::AuthenticateResponse auth_response_;
+ // What we use to determine if we should return SUCCESS or BAD_AUTH_TOKEN.
+ std::string valid_auth_token_;
+
+ // Whether we are faking a server mandating clients to throttle requests.
+ // Protected by |response_code_override_lock_|.
+ bool throttling_;
+
+ // Whether we are failing all requests by returning
+ // ClientToServerResponse::AUTH_INVALID.
+ // Protected by |response_code_override_lock_|.
+ bool fail_with_auth_invalid_;
+
+ base::Lock response_code_override_lock_;
+
+ // True if we are only accepting GetUpdatesCallerInfo::PERIODIC requests.
+ bool fail_non_periodic_get_updates_;
+
+ scoped_ptr<sync_pb::ClientCommand> client_command_;
+
+ // The next value to use for the position_in_parent property.
+ int64 next_position_in_parent_;
+
+ // The default is to use the newer sync_pb::BookmarkSpecifics-style protocol.
+ // If this option is set to true, then the MockConnectionManager will
+ // use the older sync_pb::SyncEntity_BookmarkData-style protocol.
+ bool use_legacy_bookmarks_protocol_;
+
+ syncable::ModelTypeSet expected_filter_;
+
+ syncable::ModelTypePayloadMap expected_payloads_;
+
+ int num_get_updates_requests_;
+
+ std::string next_token_;
+
+ sync_pb::ClientToServerMessage last_request_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockConnectionManager);
+};
+
+#endif // SYNC_TEST_ENGINE_MOCK_CONNECTION_MANAGER_H_
diff --git a/sync/test/engine/syncer_command_test.cc b/sync/test/engine/syncer_command_test.cc
new file mode 100644
index 0000000..53af26a
--- /dev/null
+++ b/sync/test/engine/syncer_command_test.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/engine/syncer_command_test.h"
+
+namespace browser_sync {
+
+
+SyncerCommandTestBase::SyncerCommandTestBase() {
+}
+
+SyncerCommandTestBase::~SyncerCommandTestBase() {
+}
+
+void SyncerCommandTestBase::SetUp() {
+ ResetContext();
+ // The session always expects there to be a passive worker.
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_PASSIVE)));
+}
+
+void SyncerCommandTestBase::TearDown() {
+}
+
+Directory* SyncerCommandTest::directory() {
+ return dir_maker_.directory();
+}
+
+void SyncerCommandTest::SetUp() {
+ dir_maker_.SetUp();
+ SyncerCommandTestBase::SetUp();
+}
+
+void SyncerCommandTest::TearDown() {
+ dir_maker_.TearDown();
+}
+
+MockDebugInfoGetter::MockDebugInfoGetter() {
+}
+
+MockDebugInfoGetter::~MockDebugInfoGetter() {
+}
+
+MockDirectorySyncerCommandTest::MockDirectorySyncerCommandTest()
+ :mock_directory_(&handler_) {
+}
+
+MockDirectorySyncerCommandTest::~MockDirectorySyncerCommandTest() {
+}
+
+void MockDirectorySyncerCommandTest::SetUp() {
+ // The session always expects there to be a passive worker.
+ workers()->push_back(
+ make_scoped_refptr(new FakeModelWorker(GROUP_PASSIVE)));
+ ResetContext();
+}
+
+Directory* MockDirectorySyncerCommandTest::directory() {
+ return &mock_directory_;
+}
+
+} // namespace browser_sync
diff --git a/sync/test/engine/syncer_command_test.h b/sync/test/engine/syncer_command_test.h
new file mode 100644
index 0000000..3989def
--- /dev/null
+++ b/sync/test/engine/syncer_command_test.h
@@ -0,0 +1,233 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_ENGINE_SYNCER_COMMAND_TEST_H_
+#define SYNC_TEST_ENGINE_SYNCER_COMMAND_TEST_H_
+#pragma once
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop.h"
+#include "sync/engine/model_changing_syncer_command.h"
+#include "sync/engine/model_safe_worker.h"
+#include "sync/sessions/debug_info_getter.h"
+#include "sync/sessions/sync_session.h"
+#include "sync/sessions/sync_session_context.h"
+#include "sync/syncable/syncable_mock.h"
+#include "sync/test/engine/mock_connection_manager.h"
+#include "sync/test/engine/fake_model_worker.h"
+#include "sync/test/engine/test_directory_setter_upper.h"
+#include "sync/test/fake_extensions_activity_monitor.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::NiceMock;
+
+namespace browser_sync {
+
+class MockDebugInfoGetter : public browser_sync::sessions::DebugInfoGetter {
+ public:
+ MockDebugInfoGetter();
+ virtual ~MockDebugInfoGetter();
+ MOCK_METHOD1(GetAndClearDebugInfo, void(sync_pb::DebugInfo* debug_info));
+};
+
+// A test fixture that simplifies writing unit tests for individual
+// SyncerCommands, providing convenient access to a test directory
+// and a syncer session.
+class SyncerCommandTestBase : public testing::Test,
+ public sessions::SyncSession::Delegate,
+ public ModelSafeWorkerRegistrar {
+ public:
+ enum UseMockDirectory {
+ USE_MOCK_DIRECTORY
+ };
+
+ // SyncSession::Delegate implementation.
+ virtual void OnSilencedUntil(
+ const base::TimeTicks& silenced_until) OVERRIDE {
+ FAIL() << "Should not get silenced.";
+ }
+ virtual bool IsSyncingCurrentlySilenced() OVERRIDE {
+ return false;
+ }
+ virtual void OnReceivedLongPollIntervalUpdate(
+ const base::TimeDelta& new_interval) OVERRIDE {
+ FAIL() << "Should not get poll interval update.";
+ }
+ virtual void OnReceivedShortPollIntervalUpdate(
+ const base::TimeDelta& new_interval) OVERRIDE {
+ FAIL() << "Should not get poll interval update.";
+ }
+ virtual void OnReceivedSessionsCommitDelay(
+ const base::TimeDelta& new_delay) OVERRIDE {
+ FAIL() << "Should not get sessions commit delay.";
+ }
+ virtual void OnShouldStopSyncingPermanently() OVERRIDE {
+ FAIL() << "Shouldn't be called.";
+ }
+ virtual void OnSyncProtocolError(
+ const sessions::SyncSessionSnapshot& session) OVERRIDE {
+ return;
+ }
+
+ // ModelSafeWorkerRegistrar implementation.
+ virtual void GetWorkers(std::vector<ModelSafeWorker*>* out) OVERRIDE {
+ std::vector<scoped_refptr<ModelSafeWorker> >::iterator it;
+ for (it = workers_.begin(); it != workers_.end(); ++it)
+ out->push_back(*it);
+ }
+ virtual void GetModelSafeRoutingInfo(ModelSafeRoutingInfo* out) OVERRIDE {
+ ModelSafeRoutingInfo copy(routing_info_);
+ out->swap(copy);
+ }
+
+ protected:
+ SyncerCommandTestBase();
+
+ virtual ~SyncerCommandTestBase();
+ virtual void SetUp();
+ virtual void TearDown();
+
+ sessions::SyncSessionContext* context() const { return context_.get(); }
+ sessions::SyncSession::Delegate* delegate() { return this; }
+ ModelSafeWorkerRegistrar* registrar() { return this; }
+
+ // Lazily create a session requesting all datatypes with no payload.
+ sessions::SyncSession* session() {
+ syncable::ModelTypePayloadMap types =
+ syncable::ModelTypePayloadMapFromRoutingInfo(routing_info_,
+ std::string());
+ return session(sessions::SyncSourceInfo(types));
+ }
+
+ // Create a session with the provided source.
+ sessions::SyncSession* session(const sessions::SyncSourceInfo& source) {
+ if (!session_.get()) {
+ std::vector<ModelSafeWorker*> workers;
+ GetWorkers(&workers);
+ session_.reset(new sessions::SyncSession(context(), delegate(), source,
+ routing_info_, workers));
+ }
+ return session_.get();
+ }
+
+ void ClearSession() {
+ session_.reset();
+ }
+
+ void ResetContext() {
+ context_.reset(new sessions::SyncSessionContext(
+ mock_server_.get(), directory(),
+ registrar(), &extensions_activity_monitor_,
+ std::vector<SyncEngineEventListener*>(),
+ &mock_debug_info_getter_));
+ context_->set_account_name(directory()->name());
+ ClearSession();
+ }
+
+ // Install a MockServerConnection. Resets the context. By default,
+ // the context does not have a MockServerConnection attached.
+ void ConfigureMockServerConnection() {
+ mock_server_.reset(new MockConnectionManager(directory()));
+ ResetContext();
+ }
+
+ virtual syncable::Directory* directory() = 0;
+
+ std::vector<scoped_refptr<ModelSafeWorker> >* workers() {
+ return &workers_;
+ }
+
+ const ModelSafeRoutingInfo& routing_info() { return routing_info_; }
+ ModelSafeRoutingInfo* mutable_routing_info() { return &routing_info_; }
+
+ MockConnectionManager* mock_server() {
+ return mock_server_.get();
+ }
+
+ MockDebugInfoGetter* mock_debug_info_getter() {
+ return &mock_debug_info_getter_;
+ }
+
+ // Helper functions to check command.GetGroupsToChange().
+
+ void ExpectNoGroupsToChange(const ModelChangingSyncerCommand& command) {
+ EXPECT_TRUE(command.GetGroupsToChangeForTest(*session()).empty());
+ }
+
+ void ExpectGroupToChange(
+ const ModelChangingSyncerCommand& command, ModelSafeGroup group) {
+ std::set<ModelSafeGroup> expected_groups_to_change;
+ expected_groups_to_change.insert(group);
+ EXPECT_EQ(expected_groups_to_change,
+ command.GetGroupsToChangeForTest(*session()));
+ }
+
+ void ExpectGroupsToChange(
+ const ModelChangingSyncerCommand& command,
+ ModelSafeGroup group1, ModelSafeGroup group2) {
+ std::set<ModelSafeGroup> expected_groups_to_change;
+ expected_groups_to_change.insert(group1);
+ expected_groups_to_change.insert(group2);
+ EXPECT_EQ(expected_groups_to_change,
+ command.GetGroupsToChangeForTest(*session()));
+ }
+
+ void ExpectGroupsToChange(
+ const ModelChangingSyncerCommand& command,
+ ModelSafeGroup group1, ModelSafeGroup group2, ModelSafeGroup group3) {
+ std::set<ModelSafeGroup> expected_groups_to_change;
+ expected_groups_to_change.insert(group1);
+ expected_groups_to_change.insert(group2);
+ expected_groups_to_change.insert(group3);
+ EXPECT_EQ(expected_groups_to_change,
+ command.GetGroupsToChangeForTest(*session()));
+ }
+
+ private:
+ MessageLoop message_loop_;
+ scoped_ptr<sessions::SyncSessionContext> context_;
+ scoped_ptr<MockConnectionManager> mock_server_;
+ scoped_ptr<sessions::SyncSession> session_;
+ std::vector<scoped_refptr<ModelSafeWorker> > workers_;
+ ModelSafeRoutingInfo routing_info_;
+ NiceMock<MockDebugInfoGetter> mock_debug_info_getter_;
+ FakeExtensionsActivityMonitor extensions_activity_monitor_;
+ DISALLOW_COPY_AND_ASSIGN(SyncerCommandTestBase);
+};
+
+class SyncerCommandTest : public SyncerCommandTestBase {
+ public:
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+ virtual Directory* directory() OVERRIDE;
+
+ private:
+ TestDirectorySetterUpper dir_maker_;
+};
+
+class MockDirectorySyncerCommandTest : public SyncerCommandTestBase {
+ public:
+ MockDirectorySyncerCommandTest();
+ virtual ~MockDirectorySyncerCommandTest();
+ virtual Directory* directory() OVERRIDE;
+
+ MockDirectory* mock_directory() {
+ return static_cast<MockDirectory*>(directory());
+ }
+
+ virtual void SetUp() OVERRIDE;
+
+ TestUnrecoverableErrorHandler handler_;
+ MockDirectory mock_directory_;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_TEST_ENGINE_SYNCER_COMMAND_TEST_H_
diff --git a/sync/test/engine/test_directory_setter_upper.cc b/sync/test/engine/test_directory_setter_upper.cc
new file mode 100644
index 0000000..01fef5d
--- /dev/null
+++ b/sync/test/engine/test_directory_setter_upper.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/engine/test_directory_setter_upper.h"
+
+#include "base/compiler_specific.h"
+#include "base/file_util.h"
+#include "base/location.h"
+#include "base/string_util.h"
+#include "sync/syncable/syncable.h"
+#include "sync/test/null_transaction_observer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using syncable::NullTransactionObserver;
+using syncable::ReadTransaction;
+
+namespace browser_sync {
+
+TestDirectorySetterUpper::TestDirectorySetterUpper() : name_("Test") {}
+
+TestDirectorySetterUpper::~TestDirectorySetterUpper() {}
+
+void TestDirectorySetterUpper::SetUp() {
+ directory_.reset(new syncable::Directory(&encryptor_, &handler_, NULL));
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ ASSERT_EQ(syncable::OPENED, directory_->OpenInMemoryForTest(
+ name_, &delegate_, NullTransactionObserver()));
+}
+
+void TestDirectorySetterUpper::TearDown() {
+ if (!directory()->good())
+ return;
+
+ {
+ RunInvariantCheck();
+ directory()->SaveChanges();
+ RunInvariantCheck();
+ directory()->SaveChanges();
+ }
+ directory_.reset();
+
+ ASSERT_TRUE(temp_dir_.Delete());
+}
+
+void TestDirectorySetterUpper::RunInvariantCheck() {
+ {
+ // Check invariants for in-memory items.
+ ReadTransaction trans(FROM_HERE, directory());
+ directory()->CheckTreeInvariants(&trans, false);
+ }
+ {
+ // Check invariants for all items.
+ ReadTransaction trans(FROM_HERE, directory());
+ directory()->CheckTreeInvariants(&trans, true);
+ }
+}
+
+} // namespace browser_sync
diff --git a/sync/test/engine/test_directory_setter_upper.h b/sync/test/engine/test_directory_setter_upper.h
new file mode 100644
index 0000000..9244138
--- /dev/null
+++ b/sync/test/engine/test_directory_setter_upper.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A handy class that takes care of setting up and destroying a
+// syncable::Directory instance for unit tests that require one.
+//
+// The expected usage is to make this a component of your test fixture:
+//
+// class AwesomenessTest : public testing::Test {
+// public:
+// virtual void SetUp() {
+// metadb_.SetUp();
+// }
+// virtual void TearDown() {
+// metadb_.TearDown();
+// }
+// protected:
+// TestDirectorySetterUpper metadb_;
+// };
+//
+// Then, in your tests, get at the directory like so:
+//
+// TEST_F(AwesomenessTest, IsMaximal) {
+// ... now use metadb_.directory() to get at syncable::Entry objects ...
+// }
+//
+
+#ifndef SYNC_TEST_ENGINE_TEST_DIRECTORY_SETTER_UPPER_H_
+#define SYNC_TEST_ENGINE_TEST_DIRECTORY_SETTER_UPPER_H_
+#pragma once
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/scoped_temp_dir.h"
+#include "sync/util/test_unrecoverable_error_handler.h"
+#include "sync/syncable/syncable.h"
+#include "sync/test/fake_encryptor.h"
+#include "sync/test/null_directory_change_delegate.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace browser_sync {
+
+class TestDirectorySetterUpper {
+ public:
+ TestDirectorySetterUpper();
+ virtual ~TestDirectorySetterUpper();
+
+ // Create a Directory instance open it.
+ virtual void SetUp();
+
+ // Undo everything done by SetUp(): close the directory and delete the
+ // backing files. Before closing the directory, this will run the directory
+ // invariant checks and perform the SaveChanges action on the directory.
+ virtual void TearDown();
+
+ syncable::Directory* directory() { return directory_.get(); }
+
+ protected:
+ syncable::NullDirectoryChangeDelegate delegate_;
+ TestUnrecoverableErrorHandler handler_;
+
+ private:
+ void RunInvariantCheck();
+
+ ScopedTempDir temp_dir_;
+ FakeEncryptor encryptor_;
+ scoped_ptr<syncable::Directory> directory_;
+ std::string name_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestDirectorySetterUpper);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_TEST_ENGINE_TEST_DIRECTORY_SETTER_UPPER_H_
diff --git a/sync/test/engine/test_id_factory.h b/sync/test/engine/test_id_factory.h
new file mode 100644
index 0000000..cb82ce4
--- /dev/null
+++ b/sync/test/engine/test_id_factory.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A tool making it easier to create IDs for unit testing.
+
+#ifndef SYNC_TEST_ENGINE_TEST_ID_FACTORY_H_
+#define SYNC_TEST_ENGINE_TEST_ID_FACTORY_H_
+#pragma once
+
+#include <string>
+
+#include "base/string_number_conversions.h"
+#include "sync/syncable/syncable_id.h"
+
+namespace browser_sync {
+
+class TestIdFactory {
+ public:
+ TestIdFactory() : next_value_(1337000) {}
+ ~TestIdFactory() {}
+
+ // Get the root ID.
+ static syncable::Id root() {
+ return syncable::Id();
+ }
+
+ // Make an ID from a number. If the number is zero, return the root ID.
+ // If the number is positive, create a server ID based on the value. If
+ // the number is negative, create a local ID based on the value. This
+ // is deterministic, and [FromNumber(X) == FromNumber(Y)] iff [X == Y].
+ static syncable::Id FromNumber(int64 value) {
+ if (value == 0)
+ return root();
+ else if (value < 0)
+ return syncable::Id::CreateFromClientString(base::Int64ToString(value));
+ else
+ return syncable::Id::CreateFromServerId(base::Int64ToString(value));
+ }
+
+ // Create a local ID from a name.
+ static syncable::Id MakeLocal(std::string name) {
+ return syncable::Id::CreateFromClientString(std::string("lient ") + name);
+ }
+
+ // Create a server ID from a string name.
+ static syncable::Id MakeServer(std::string name) {
+ return syncable::Id::CreateFromServerId(std::string("erver ") + name);
+ }
+
+ // Autogenerate a fresh local ID.
+ syncable::Id NewLocalId() {
+ return syncable::Id::CreateFromClientString(
+ std::string("_auto ") + base::IntToString(-next_value()));
+ }
+
+ // Autogenerate a fresh server ID.
+ syncable::Id NewServerId() {
+ return syncable::Id::CreateFromServerId(
+ std::string("_auto ") + base::IntToString(next_value()));
+ }
+
+ private:
+ int next_value() {
+ return next_value_++;
+ }
+ int next_value_;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_TEST_ENGINE_TEST_ID_FACTORY_H_
+
diff --git a/sync/test/engine/test_syncable_utils.cc b/sync/test/engine/test_syncable_utils.cc
new file mode 100644
index 0000000..ab07e4c
--- /dev/null
+++ b/sync/test/engine/test_syncable_utils.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Utilities to verify the state of items in unit tests.
+
+#include "sync/test/engine/test_syncable_utils.h"
+
+#include "sync/syncable/syncable.h"
+
+using std::string;
+
+namespace syncable {
+
+int CountEntriesWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const string& name) {
+ Directory::ChildHandles child_handles;
+ rtrans->directory()->GetChildHandlesById(rtrans, parent_id, &child_handles);
+ if (child_handles.size() <= 0) {
+ return 0;
+ }
+
+ int number_of_entries_with_name = 0;
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i) {
+ Entry e(rtrans, GET_BY_HANDLE, *i);
+ CHECK(e.good());
+ if (e.Get(NON_UNIQUE_NAME) == name) {
+ ++number_of_entries_with_name;
+ }
+ }
+ return number_of_entries_with_name;
+}
+
+Id GetFirstEntryWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const string& name) {
+ Directory::ChildHandles child_handles;
+ rtrans->directory()->GetChildHandlesById(rtrans, parent_id, &child_handles);
+
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i) {
+ Entry e(rtrans, GET_BY_HANDLE, *i);
+ CHECK(e.good());
+ if (e.Get(NON_UNIQUE_NAME) == name) {
+ return e.Get(ID);
+ }
+ }
+
+ CHECK(false);
+ return Id();
+}
+
+Id GetOnlyEntryWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const string& name) {
+ CHECK(1 == CountEntriesWithName(rtrans, parent_id, name));
+ return GetFirstEntryWithName(rtrans, parent_id, name);
+}
+
+} // namespace syncable
diff --git a/sync/test/engine/test_syncable_utils.h b/sync/test/engine/test_syncable_utils.h
new file mode 100644
index 0000000..1095fc0
--- /dev/null
+++ b/sync/test/engine/test_syncable_utils.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Utilities that are useful in verifying the state of items in a
+// syncable database.
+
+#ifndef SYNC_TEST_ENGINE_TEST_SYNCABLE_UTILS_H_
+#define SYNC_TEST_ENGINE_TEST_SYNCABLE_UTILS_H_
+#pragma once
+
+#include <string>
+
+#include "sync/syncable/syncable.h"
+
+namespace syncable {
+
+class BaseTransaction;
+class Id;
+
+// Count the number of entries with a given name inside of a parent.
+// Useful to check folder structure and for porting older tests that
+// rely on uniqueness inside of folders.
+int CountEntriesWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const std::string& name);
+
+// Get the first entry ID with name in a parent. The entry *must* exist.
+Id GetFirstEntryWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const std::string& name);
+
+// Assert that there's only one entry by this name in this parent.
+// Return the Id.
+Id GetOnlyEntryWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const std::string& name);
+
+} // namespace syncable
+
+#endif // SYNC_TEST_ENGINE_TEST_SYNCABLE_UTILS_H_
diff --git a/sync/test/fake_encryptor.cc b/sync/test/fake_encryptor.cc
new file mode 100644
index 0000000..3d03050
--- /dev/null
+++ b/sync/test/fake_encryptor.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/fake_encryptor.h"
+
+#include "base/base64.h"
+
+namespace browser_sync {
+
+FakeEncryptor::~FakeEncryptor() {}
+
+bool FakeEncryptor::EncryptString(const std::string& plaintext,
+ std::string* ciphertext) {
+ return base::Base64Encode(plaintext, ciphertext);
+}
+
+bool FakeEncryptor::DecryptString(const std::string& ciphertext,
+ std::string* plaintext) {
+ return base::Base64Decode(ciphertext, plaintext);
+}
+
+} // namespace browser_sync
diff --git a/sync/test/fake_encryptor.h b/sync/test/fake_encryptor.h
new file mode 100644
index 0000000..7e21061
--- /dev/null
+++ b/sync/test/fake_encryptor.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_FAKE_ENCRYPTOR_H_
+#define SYNC_TEST_FAKE_ENCRYPTOR_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/util/encryptor.h"
+
+namespace browser_sync {
+
+// Encryptor which simply base64-encodes the plaintext to get the
+// ciphertext. Obviously, this should be used only for testing.
+class FakeEncryptor : public Encryptor {
+ public:
+ virtual ~FakeEncryptor();
+
+ virtual bool EncryptString(const std::string& plaintext,
+ std::string* ciphertext) OVERRIDE;
+
+ virtual bool DecryptString(const std::string& ciphertext,
+ std::string* plaintext) OVERRIDE;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_TEST_FAKE_ENCRYPTOR_H_
diff --git a/sync/test/fake_extensions_activity_monitor.cc b/sync/test/fake_extensions_activity_monitor.cc
new file mode 100644
index 0000000..b3e38cd
--- /dev/null
+++ b/sync/test/fake_extensions_activity_monitor.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/fake_extensions_activity_monitor.h"
+
+#include "base/logging.h"
+
+namespace browser_sync {
+
+FakeExtensionsActivityMonitor::FakeExtensionsActivityMonitor() {}
+
+FakeExtensionsActivityMonitor::~FakeExtensionsActivityMonitor() {
+ DCHECK(non_thread_safe_.CalledOnValidThread());
+}
+
+void FakeExtensionsActivityMonitor::GetAndClearRecords(Records* buffer) {
+ DCHECK(non_thread_safe_.CalledOnValidThread());
+ buffer->clear();
+ buffer->swap(records_);
+}
+
+void FakeExtensionsActivityMonitor::PutRecords(const Records& records) {
+ DCHECK(non_thread_safe_.CalledOnValidThread());
+ for (Records::const_iterator i = records.begin(); i != records.end(); ++i) {
+ records_[i->first].extension_id = i->second.extension_id;
+ records_[i->first].bookmark_write_count += i->second.bookmark_write_count;
+ }
+}
+
+} // namespace browser_sync
diff --git a/sync/test/fake_extensions_activity_monitor.h b/sync/test/fake_extensions_activity_monitor.h
new file mode 100644
index 0000000..2beb64f
--- /dev/null
+++ b/sync/test/fake_extensions_activity_monitor.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_FAKE_EXTENSIONS_ACTIVITY_MONITOR_H_
+#define SYNC_TEST_FAKE_EXTENSIONS_ACTIVITY_MONITOR_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "base/threading/non_thread_safe.h"
+#include "sync/util/extensions_activity_monitor.h"
+
+namespace browser_sync {
+
+// Fake non-thread-safe implementation of ExtensionsActivityMonitor
+// suitable to be used in single-threaded sync tests.
+class FakeExtensionsActivityMonitor : public ExtensionsActivityMonitor {
+ public:
+ FakeExtensionsActivityMonitor();
+ virtual ~FakeExtensionsActivityMonitor();
+
+ // ExtensionsActivityMonitor implementation.
+ virtual void GetAndClearRecords(Records* buffer) OVERRIDE;
+ virtual void PutRecords(const Records& records) OVERRIDE;
+
+ private:
+ Records records_;
+ base::NonThreadSafe non_thread_safe_;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_TEST_FAKE_EXTENSIONS_ACTIVITY_MONITOR_H_
diff --git a/sync/test/null_directory_change_delegate.cc b/sync/test/null_directory_change_delegate.cc
new file mode 100644
index 0000000..c1918fc
--- /dev/null
+++ b/sync/test/null_directory_change_delegate.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/null_directory_change_delegate.h"
+
+namespace syncable {
+
+NullDirectoryChangeDelegate::~NullDirectoryChangeDelegate() {}
+
+void NullDirectoryChangeDelegate::HandleCalculateChangesChangeEventFromSyncApi(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) {}
+
+void NullDirectoryChangeDelegate::HandleCalculateChangesChangeEventFromSyncer(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) {}
+
+ModelTypeSet
+ NullDirectoryChangeDelegate::HandleTransactionEndingChangeEvent(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) {
+ return ModelTypeSet();
+}
+
+void NullDirectoryChangeDelegate::HandleTransactionCompleteChangeEvent(
+ ModelTypeSet models_with_changes) {}
+
+} // namespace syncable
diff --git a/sync/test/null_directory_change_delegate.h b/sync/test/null_directory_change_delegate.h
new file mode 100644
index 0000000..884ae1a
--- /dev/null
+++ b/sync/test/null_directory_change_delegate.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_NULL_DIRECTORY_CHANGE_DELEGATE_H_
+#define SYNC_TEST_NULL_DIRECTORY_CHANGE_DELEGATE_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/syncable/directory_change_delegate.h"
+
+namespace syncable {
+
+// DirectoryChangeDelegate that does nothing in all delegate methods.
+class NullDirectoryChangeDelegate : public DirectoryChangeDelegate {
+ public:
+ virtual ~NullDirectoryChangeDelegate();
+
+ virtual void HandleCalculateChangesChangeEventFromSyncApi(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) OVERRIDE;
+ virtual void HandleCalculateChangesChangeEventFromSyncer(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) OVERRIDE;
+ virtual ModelTypeSet HandleTransactionEndingChangeEvent(
+ const ImmutableWriteTransactionInfo& write_transaction_info,
+ BaseTransaction* trans) OVERRIDE;
+ virtual void HandleTransactionCompleteChangeEvent(
+ ModelTypeSet models_with_changes) OVERRIDE;
+};
+
+} // namespace syncable
+
+#endif // SYNC_TEST_NULL_DIRECTORY_CHANGE_DELEGATE_H_
diff --git a/sync/test/null_transaction_observer.cc b/sync/test/null_transaction_observer.cc
new file mode 100644
index 0000000..7902433
--- /dev/null
+++ b/sync/test/null_transaction_observer.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/null_transaction_observer.h"
+
+#include "base/memory/weak_ptr.h"
+
+namespace syncable {
+
+browser_sync::WeakHandle<TransactionObserver> NullTransactionObserver() {
+ return browser_sync::MakeWeakHandle(base::WeakPtr<TransactionObserver>());
+}
+
+} // namespace syncable
diff --git a/sync/test/null_transaction_observer.h b/sync/test/null_transaction_observer.h
new file mode 100644
index 0000000..fbddadd
--- /dev/null
+++ b/sync/test/null_transaction_observer.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_NULL_TRANSACTION_OBSERVER_H_
+#define SYNC_TEST_NULL_TRANSACTION_OBSERVER_H_
+#pragma once
+
+#include "sync/util/weak_handle.h"
+
+namespace syncable {
+
+class TransactionObserver;
+
+// Returns an initialized weak handle to a transaction observer that
+// does nothing.
+browser_sync::WeakHandle<TransactionObserver> NullTransactionObserver();
+
+} // namespace syncable
+
+#endif // SYNC_TEST_NULL_TRANSACTION_OBSERVER_H_
diff --git a/sync/test/sessions/test_scoped_session_event_listener.h b/sync/test/sessions/test_scoped_session_event_listener.h
new file mode 100644
index 0000000..f2cdadd
--- /dev/null
+++ b/sync/test/sessions/test_scoped_session_event_listener.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_SESSIONS_TEST_SCOPED_SESSION_EVENT_LISTENER_H_
+#define SYNC_TEST_SESSIONS_TEST_SCOPED_SESSION_EVENT_LISTENER_H_
+#pragma once
+
+#include "sync/sessions/sync_session_context.h"
+
+namespace browser_sync {
+namespace sessions {
+
+// Installs a SyncEventListener to a given session context for the lifetime of
+// the TestScopedSessionEventListener.
+class TestScopedSessionEventListener {
+ public:
+ TestScopedSessionEventListener(
+ SyncSessionContext* context,
+ SyncEngineEventListener* listener)
+ : context_(context), listener_(listener) {
+ context->listeners_.AddObserver(listener);
+ }
+ ~TestScopedSessionEventListener() {
+ context_->listeners_.RemoveObserver(listener_);
+ }
+ private:
+ SyncSessionContext* context_;
+ SyncEngineEventListener* listener_;
+ DISALLOW_COPY_AND_ASSIGN(TestScopedSessionEventListener);
+};
+
+} // namespace sessions
+} // namespace browser_sync
+
+#endif // SYNC_TEST_SESSIONS_TEST_SCOPED_SESSION_EVENT_LISTENER_H_
diff --git a/sync/test/test_directory_backing_store.cc b/sync/test/test_directory_backing_store.cc
new file mode 100644
index 0000000..8a168a4
--- /dev/null
+++ b/sync/test/test_directory_backing_store.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/test/test_directory_backing_store.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+namespace syncable {
+
+TestDirectoryBackingStore::TestDirectoryBackingStore(
+ const std::string& dir_name, sql::Connection* db)
+ : DirectoryBackingStore(dir_name, db) {
+}
+
+TestDirectoryBackingStore::~TestDirectoryBackingStore() {
+ // This variant of the DirectoryBackingStore does not own its connection, so
+ // we take care to not delete it here.
+ ignore_result(db_.release());
+}
+
+DirOpenResult TestDirectoryBackingStore::Load(
+ MetahandlesIndex* entry_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) {
+ DCHECK(db_->is_open());
+
+ if (!InitializeTables())
+ return FAILED_OPEN_DATABASE;
+
+ if (!DropDeletedEntries())
+ return FAILED_DATABASE_CORRUPT;
+ if (!LoadEntries(entry_bucket))
+ return FAILED_DATABASE_CORRUPT;
+ if (!LoadInfo(kernel_load_info))
+ return FAILED_DATABASE_CORRUPT;
+
+ return OPENED;
+}
+
+} // namespace syncable
diff --git a/sync/test/test_directory_backing_store.h b/sync/test/test_directory_backing_store.h
new file mode 100644
index 0000000..c78d27e
--- /dev/null
+++ b/sync/test/test_directory_backing_store.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_TEST_TEST_DIRECTORY_BACKING_STORE_H_
+#define SYNC_TEST_TEST_DIRECTORY_BACKING_STORE_H_
+#pragma once
+
+#include "base/gtest_prod_util.h"
+#include "sync/syncable/directory_backing_store.h"
+
+namespace syncable {
+
+// This implementation of DirectoryBackingStore does not manage its own
+// database. This makes it more flexible (and more complex) than the
+// InMemoryDirectoryBackingStore.
+class TestDirectoryBackingStore : public DirectoryBackingStore {
+ public:
+ // This constructor takes a handle to a database. The caller maintains
+ // ownership of this handle.
+ //
+ // This is very brittle. You should not be using this class or this
+ // constructor unless you understand and intend to test the
+ // DirectoryBackingStore's internals.
+ TestDirectoryBackingStore(const std::string& dir_name,
+ sql::Connection* connection);
+ virtual ~TestDirectoryBackingStore();
+ virtual DirOpenResult Load(
+ MetahandlesIndex* entry_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
+
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion67To68);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion68To69);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion69To70);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion70To71);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion71To72);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion72To73);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion73To74);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion74To75);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion75To76);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion76To77);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion77To78);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, ModelTypeIds);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, Corruption);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DeleteEntries);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, GenerateCacheGUID);
+ FRIEND_TEST_ALL_PREFIXES(MigrationTest, ToCurrentVersion);
+ friend class MigrationTest;
+};
+
+} // namespace syncable
+
+#endif // SYNC_TEST_TEST_DIRECTORY_BACKING_STORE_H_
diff --git a/sync/util/DEPS b/sync/util/DEPS
new file mode 100644
index 0000000..d72656b
--- /dev/null
+++ b/sync/util/DEPS
@@ -0,0 +1,7 @@
+include_rules = [
+ "+crypto",
+ "+sync/protocol",
+ "+sync/syncable/model_type.h",
+ "+sync/syncable/model_type_test_util.h",
+ "+sync/test/fake_encryptor.h",
+]
diff --git a/sync/util/cryptographer.cc b/sync/util/cryptographer.cc
new file mode 100644
index 0000000..b3d1833
--- /dev/null
+++ b/sync/util/cryptographer.cc
@@ -0,0 +1,447 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/cryptographer.h"
+
+#include <algorithm>
+
+#include "base/base64.h"
+#include "base/logging.h"
+#include "sync/util/encryptor.h"
+
+namespace browser_sync {
+
+const char kNigoriTag[] = "google_chrome_nigori";
+
+// We name a particular Nigori instance (ie. a triplet consisting of a hostname,
+// a username, and a password) by calling Permute on this string. Since the
+// output of Permute is always the same for a given triplet, clients will always
+// assign the same name to a particular triplet.
+const char kNigoriKeyName[] = "nigori-key";
+
+Cryptographer::Observer::~Observer() {}
+
+Cryptographer::Cryptographer(Encryptor* encryptor)
+ : encryptor_(encryptor),
+ default_nigori_(NULL),
+ encrypted_types_(SensitiveTypes()),
+ encrypt_everything_(false) {
+ DCHECK(encryptor);
+}
+
+Cryptographer::~Cryptographer() {}
+
+void Cryptographer::AddObserver(Observer* observer) {
+ observers_.AddObserver(observer);
+}
+
+void Cryptographer::RemoveObserver(Observer* observer) {
+ observers_.RemoveObserver(observer);
+}
+
+void Cryptographer::Bootstrap(const std::string& restored_bootstrap_token) {
+ if (is_initialized()) {
+ NOTREACHED();
+ return;
+ }
+
+ scoped_ptr<Nigori> nigori(UnpackBootstrapToken(restored_bootstrap_token));
+ if (nigori.get())
+ AddKeyImpl(nigori.release());
+}
+
+bool Cryptographer::CanDecrypt(const sync_pb::EncryptedData& data) const {
+ return nigoris_.end() != nigoris_.find(data.key_name());
+}
+
+bool Cryptographer::CanDecryptUsingDefaultKey(
+ const sync_pb::EncryptedData& data) const {
+ return default_nigori_ && (data.key_name() == default_nigori_->first);
+}
+
+bool Cryptographer::Encrypt(
+ const ::google::protobuf::MessageLite& message,
+ sync_pb::EncryptedData* encrypted) const {
+ DCHECK(encrypted);
+ if (!default_nigori_) {
+ LOG(ERROR) << "Cryptographer not ready, failed to encrypt.";
+ return false;
+ }
+
+ std::string serialized;
+ if (!message.SerializeToString(&serialized)) {
+ LOG(ERROR) << "Message is invalid/missing a required field.";
+ return false;
+ }
+
+ if (CanDecryptUsingDefaultKey(*encrypted)) {
+ const std::string& original_serialized = DecryptToString(*encrypted);
+ if (original_serialized == serialized) {
+ DVLOG(2) << "Re-encryption unnecessary, encrypted data already matches.";
+ return true;
+ }
+ }
+
+ encrypted->set_key_name(default_nigori_->first);
+ if (!default_nigori_->second->Encrypt(serialized,
+ encrypted->mutable_blob())) {
+ LOG(ERROR) << "Failed to encrypt data.";
+ return false;
+ }
+ return true;
+}
+
+bool Cryptographer::Decrypt(const sync_pb::EncryptedData& encrypted,
+ ::google::protobuf::MessageLite* message) const {
+ DCHECK(message);
+ std::string plaintext = DecryptToString(encrypted);
+ return message->ParseFromString(plaintext);
+}
+
+std::string Cryptographer::DecryptToString(
+ const sync_pb::EncryptedData& encrypted) const {
+ NigoriMap::const_iterator it = nigoris_.find(encrypted.key_name());
+ if (nigoris_.end() == it) {
+ NOTREACHED() << "Cannot decrypt message";
+ return std::string(""); // Caller should have called CanDecrypt(encrypt).
+ }
+
+ std::string plaintext;
+ if (!it->second->Decrypt(encrypted.blob(), &plaintext)) {
+ return std::string("");
+ }
+
+ return plaintext;
+}
+
+bool Cryptographer::GetKeys(sync_pb::EncryptedData* encrypted) const {
+ DCHECK(encrypted);
+ DCHECK(!nigoris_.empty());
+
+ // Create a bag of all the Nigori parameters we know about.
+ sync_pb::NigoriKeyBag bag;
+ for (NigoriMap::const_iterator it = nigoris_.begin(); it != nigoris_.end();
+ ++it) {
+ const Nigori& nigori = *it->second;
+ sync_pb::NigoriKey* key = bag.add_key();
+ key->set_name(it->first);
+ nigori.ExportKeys(key->mutable_user_key(),
+ key->mutable_encryption_key(),
+ key->mutable_mac_key());
+ }
+
+ // Encrypt the bag with the default Nigori.
+ return Encrypt(bag, encrypted);
+}
+
+bool Cryptographer::AddKey(const KeyParams& params) {
+ // Create the new Nigori and make it the default encryptor.
+ scoped_ptr<Nigori> nigori(new Nigori);
+ if (!nigori->InitByDerivation(params.hostname,
+ params.username,
+ params.password)) {
+ NOTREACHED(); // Invalid username or password.
+ return false;
+ }
+ return AddKeyImpl(nigori.release());
+}
+
+bool Cryptographer::AddKeyFromBootstrapToken(
+ const std::string restored_bootstrap_token) {
+ // Create the new Nigori and make it the default encryptor.
+ scoped_ptr<Nigori> nigori(UnpackBootstrapToken(restored_bootstrap_token));
+ if (!nigori.get())
+ return false;
+ return AddKeyImpl(nigori.release());
+}
+
+bool Cryptographer::AddKeyImpl(Nigori* initialized_nigori) {
+ scoped_ptr<Nigori> nigori(initialized_nigori);
+ std::string name;
+ if (!nigori->Permute(Nigori::Password, kNigoriKeyName, &name)) {
+ NOTREACHED();
+ return false;
+ }
+ nigoris_[name] = make_linked_ptr(nigori.release());
+ default_nigori_ = &*nigoris_.find(name);
+ return true;
+}
+
+bool Cryptographer::SetKeys(const sync_pb::EncryptedData& encrypted) {
+ DCHECK(CanDecrypt(encrypted));
+
+ sync_pb::NigoriKeyBag bag;
+ if (!Decrypt(encrypted, &bag)) {
+ return false;
+ }
+ InstallKeys(encrypted.key_name(), bag);
+ return true;
+}
+
+void Cryptographer::SetPendingKeys(const sync_pb::EncryptedData& encrypted) {
+ DCHECK(!CanDecrypt(encrypted));
+ pending_keys_.reset(new sync_pb::EncryptedData(encrypted));
+}
+
+const sync_pb::EncryptedData& Cryptographer::GetPendingKeys() const {
+ DCHECK(has_pending_keys());
+ return *(pending_keys_.get());
+}
+
+bool Cryptographer::DecryptPendingKeys(const KeyParams& params) {
+ Nigori nigori;
+ if (!nigori.InitByDerivation(params.hostname,
+ params.username,
+ params.password)) {
+ NOTREACHED();
+ return false;
+ }
+
+ std::string plaintext;
+ if (!nigori.Decrypt(pending_keys_->blob(), &plaintext))
+ return false;
+
+ sync_pb::NigoriKeyBag bag;
+ if (!bag.ParseFromString(plaintext)) {
+ NOTREACHED();
+ return false;
+ }
+ InstallKeys(pending_keys_->key_name(), bag);
+ pending_keys_.reset();
+ return true;
+}
+
+bool Cryptographer::GetBootstrapToken(std::string* token) const {
+ DCHECK(token);
+ if (!is_initialized())
+ return false;
+
+ return PackBootstrapToken(default_nigori_->second.get(), token);
+}
+
+bool Cryptographer::PackBootstrapToken(const Nigori* nigori,
+ std::string* pack_into) const {
+ DCHECK(pack_into);
+ DCHECK(nigori);
+
+ sync_pb::NigoriKey key;
+ if (!nigori->ExportKeys(key.mutable_user_key(),
+ key.mutable_encryption_key(),
+ key.mutable_mac_key())) {
+ NOTREACHED();
+ return false;
+ }
+
+ std::string unencrypted_token;
+ if (!key.SerializeToString(&unencrypted_token)) {
+ NOTREACHED();
+ return false;
+ }
+
+ std::string encrypted_token;
+ if (!encryptor_->EncryptString(unencrypted_token, &encrypted_token)) {
+ NOTREACHED();
+ return false;
+ }
+
+ if (!base::Base64Encode(encrypted_token, pack_into)) {
+ NOTREACHED();
+ return false;
+ }
+ return true;
+}
+
+Nigori* Cryptographer::UnpackBootstrapToken(const std::string& token) const {
+ if (token.empty())
+ return NULL;
+
+ std::string encrypted_data;
+ if (!base::Base64Decode(token, &encrypted_data)) {
+ DLOG(WARNING) << "Could not decode token.";
+ return NULL;
+ }
+
+ std::string unencrypted_token;
+ if (!encryptor_->DecryptString(encrypted_data, &unencrypted_token)) {
+ DLOG(WARNING) << "Decryption of bootstrap token failed.";
+ return NULL;
+ }
+
+ sync_pb::NigoriKey key;
+ if (!key.ParseFromString(unencrypted_token)) {
+ DLOG(WARNING) << "Parsing of bootstrap token failed.";
+ return NULL;
+ }
+
+ scoped_ptr<Nigori> nigori(new Nigori);
+ if (!nigori->InitByImport(key.user_key(), key.encryption_key(),
+ key.mac_key())) {
+ NOTREACHED();
+ return NULL;
+ }
+
+ return nigori.release();
+}
+
+Cryptographer::UpdateResult Cryptographer::Update(
+ const sync_pb::NigoriSpecifics& nigori) {
+ UpdateEncryptedTypesFromNigori(nigori);
+ if (!nigori.encrypted().blob().empty()) {
+ if (CanDecrypt(nigori.encrypted())) {
+ SetKeys(nigori.encrypted());
+ return Cryptographer::SUCCESS;
+ } else {
+ SetPendingKeys(nigori.encrypted());
+ return Cryptographer::NEEDS_PASSPHRASE;
+ }
+ }
+ return Cryptographer::SUCCESS;
+}
+
+// Static
+syncable::ModelTypeSet Cryptographer::SensitiveTypes() {
+ // Both of these have their own encryption schemes, but we include them
+ // anyways.
+ syncable::ModelTypeSet types;
+ types.Put(syncable::PASSWORDS);
+ types.Put(syncable::NIGORI);
+ return types;
+}
+
+void Cryptographer::UpdateEncryptedTypesFromNigori(
+ const sync_pb::NigoriSpecifics& nigori) {
+ if (nigori.encrypt_everything()) {
+ set_encrypt_everything();
+ return;
+ }
+
+ syncable::ModelTypeSet encrypted_types(SensitiveTypes());
+ if (nigori.encrypt_bookmarks())
+ encrypted_types.Put(syncable::BOOKMARKS);
+ if (nigori.encrypt_preferences())
+ encrypted_types.Put(syncable::PREFERENCES);
+ if (nigori.encrypt_autofill_profile())
+ encrypted_types.Put(syncable::AUTOFILL_PROFILE);
+ if (nigori.encrypt_autofill())
+ encrypted_types.Put(syncable::AUTOFILL);
+ if (nigori.encrypt_themes())
+ encrypted_types.Put(syncable::THEMES);
+ if (nigori.encrypt_typed_urls())
+ encrypted_types.Put(syncable::TYPED_URLS);
+ if (nigori.encrypt_extension_settings())
+ encrypted_types.Put(syncable::EXTENSION_SETTINGS);
+ if (nigori.encrypt_extensions())
+ encrypted_types.Put(syncable::EXTENSIONS);
+ if (nigori.encrypt_search_engines())
+ encrypted_types.Put(syncable::SEARCH_ENGINES);
+ if (nigori.encrypt_sessions())
+ encrypted_types.Put(syncable::SESSIONS);
+ if (nigori.encrypt_app_settings())
+ encrypted_types.Put(syncable::APP_SETTINGS);
+ if (nigori.encrypt_apps())
+ encrypted_types.Put(syncable::APPS);
+ if (nigori.encrypt_app_notifications())
+ encrypted_types.Put(syncable::APP_NOTIFICATIONS);
+
+ // Note: the initial version with encryption did not support the
+ // encrypt_everything field. If anything more than the sensitive types were
+ // encrypted, it meant we were encrypting everything.
+ if (!nigori.has_encrypt_everything() &&
+ !Difference(encrypted_types, SensitiveTypes()).Empty()) {
+ set_encrypt_everything();
+ return;
+ }
+
+ MergeEncryptedTypes(encrypted_types);
+}
+
+void Cryptographer::UpdateNigoriFromEncryptedTypes(
+ sync_pb::NigoriSpecifics* nigori) const {
+ nigori->set_encrypt_everything(encrypt_everything_);
+ nigori->set_encrypt_bookmarks(
+ encrypted_types_.Has(syncable::BOOKMARKS));
+ nigori->set_encrypt_preferences(
+ encrypted_types_.Has(syncable::PREFERENCES));
+ nigori->set_encrypt_autofill_profile(
+ encrypted_types_.Has(syncable::AUTOFILL_PROFILE));
+ nigori->set_encrypt_autofill(encrypted_types_.Has(syncable::AUTOFILL));
+ nigori->set_encrypt_themes(encrypted_types_.Has(syncable::THEMES));
+ nigori->set_encrypt_typed_urls(
+ encrypted_types_.Has(syncable::TYPED_URLS));
+ nigori->set_encrypt_extension_settings(
+ encrypted_types_.Has(syncable::EXTENSION_SETTINGS));
+ nigori->set_encrypt_extensions(
+ encrypted_types_.Has(syncable::EXTENSIONS));
+ nigori->set_encrypt_search_engines(
+ encrypted_types_.Has(syncable::SEARCH_ENGINES));
+ nigori->set_encrypt_sessions(encrypted_types_.Has(syncable::SESSIONS));
+ nigori->set_encrypt_app_settings(
+ encrypted_types_.Has(syncable::APP_SETTINGS));
+ nigori->set_encrypt_apps(encrypted_types_.Has(syncable::APPS));
+ nigori->set_encrypt_app_notifications(
+ encrypted_types_.Has(syncable::APP_NOTIFICATIONS));
+}
+
+void Cryptographer::set_encrypt_everything() {
+ if (encrypt_everything_) {
+ DCHECK(encrypted_types_.Equals(syncable::ModelTypeSet::All()));
+ return;
+ }
+ encrypt_everything_ = true;
+ // Change |encrypted_types_| directly to avoid sending more than one
+ // notification.
+ encrypted_types_ = syncable::ModelTypeSet::All();
+ EmitEncryptedTypesChangedNotification();
+}
+
+bool Cryptographer::encrypt_everything() const {
+ return encrypt_everything_;
+}
+
+syncable::ModelTypeSet Cryptographer::GetEncryptedTypes() const {
+ return encrypted_types_;
+}
+
+void Cryptographer::MergeEncryptedTypesForTest(
+ syncable::ModelTypeSet encrypted_types) {
+ MergeEncryptedTypes(encrypted_types);
+}
+
+void Cryptographer::MergeEncryptedTypes(
+ syncable::ModelTypeSet encrypted_types) {
+ if (encrypted_types_.HasAll(encrypted_types)) {
+ return;
+ }
+ encrypted_types_ = encrypted_types;
+ EmitEncryptedTypesChangedNotification();
+}
+
+void Cryptographer::EmitEncryptedTypesChangedNotification() {
+ FOR_EACH_OBSERVER(
+ Observer, observers_,
+ OnEncryptedTypesChanged(encrypted_types_, encrypt_everything_));
+}
+
+void Cryptographer::InstallKeys(const std::string& default_key_name,
+ const sync_pb::NigoriKeyBag& bag) {
+ int key_size = bag.key_size();
+ for (int i = 0; i < key_size; ++i) {
+ const sync_pb::NigoriKey key = bag.key(i);
+ // Only use this key if we don't already know about it.
+ if (nigoris_.end() == nigoris_.find(key.name())) {
+ scoped_ptr<Nigori> new_nigori(new Nigori);
+ if (!new_nigori->InitByImport(key.user_key(),
+ key.encryption_key(),
+ key.mac_key())) {
+ NOTREACHED();
+ continue;
+ }
+ nigoris_[key.name()] = make_linked_ptr(new_nigori.release());
+ }
+ }
+ DCHECK(nigoris_.end() != nigoris_.find(default_key_name));
+ default_nigori_ = &*nigoris_.find(default_key_name);
+}
+
+} // namespace browser_sync
diff --git a/sync/util/cryptographer.h b/sync/util/cryptographer.h
new file mode 100644
index 0000000..af7b272
--- /dev/null
+++ b/sync/util/cryptographer.h
@@ -0,0 +1,247 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_CRYPTOGRAPHER_H_
+#define SYNC_UTIL_CRYPTOGRAPHER_H_
+#pragma once
+
+#include <map>
+#include <string>
+
+#include "base/gtest_prod_util.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/observer_list.h"
+#include "sync/syncable/model_type.h"
+#include "sync/util/nigori.h"
+#include "sync/protocol/encryption.pb.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+
+namespace browser_sync {
+
+class Encryptor;
+
+extern const char kNigoriTag[];
+
+// The parameters used to initialize a Nigori instance.
+struct KeyParams {
+ std::string hostname;
+ std::string username;
+ std::string password;
+};
+
+// This class manages the Nigori objects used to encrypt and decrypt sensitive
+// sync data (eg. passwords). Each Nigori object knows how to handle data
+// protected with a particular passphrase.
+//
+// Whenever an update to the Nigori sync node is received from the server,
+// SetPendingKeys should be called with the encrypted contents of that node.
+// Most likely, an updated Nigori node means that a new passphrase has been set
+// and that future node updates won't be decryptable. To remedy this, the user
+// should be prompted for the new passphrase and DecryptPendingKeys be called.
+//
+// Whenever a update to an encrypted node is received from the server,
+// CanDecrypt should be used to verify whether the Cryptographer can decrypt
+// that node. If it cannot, then the application of that update should be
+// delayed until after it can be decrypted.
+class Cryptographer {
+ public:
+ // All Observer methods are done synchronously, so they're called
+ // under a transaction (since all Cryptographer operations are done
+ // under a transaction).
+ class Observer {
+ public:
+ // Called when the set of encrypted types or the encrypt
+ // everything flag has been changed. Note that this doesn't
+ // necessarily mean that encryption has completed for the given
+ // types.
+ //
+ // |encrypted_types| will always be a superset of
+ // SensitiveTypes(). If |encrypt_everything| is true,
+ // |encrypted_types| will be the set of all known types.
+ //
+ // Until this function is called, observers can assume that the
+ // set of encrypted types is SensitiveTypes() and that the encrypt
+ // everything flag is false.
+ virtual void OnEncryptedTypesChanged(
+ syncable::ModelTypeSet encrypted_types,
+ bool encrypt_everything) = 0;
+
+ protected:
+ virtual ~Observer();
+ };
+
+ // Does not take ownership of |encryptor|.
+ explicit Cryptographer(Encryptor* encryptor);
+ ~Cryptographer();
+
+ // When update on cryptographer is called this enum tells if the
+ // cryptographer was succesfully able to update using the nigori node or if
+ // it needs a key to decrypt the nigori node.
+ enum UpdateResult {
+ SUCCESS,
+ NEEDS_PASSPHRASE
+ };
+
+ // Manage observers.
+ void AddObserver(Observer* observer);
+ void RemoveObserver(Observer* observer);
+
+ // |restored_bootstrap_token| can be provided via this method to bootstrap
+ // Cryptographer instance into the ready state (is_ready will be true).
+ // It must be a string that was previously built by the
+ // GetSerializedBootstrapToken function. It is possible that the token is no
+ // longer valid (due to server key change), in which case the normal
+ // decryption code paths will fail and the user will need to provide a new
+ // passphrase.
+ // It is an error to call this if is_ready() == true, though it is fair to
+ // never call Bootstrap at all.
+ void Bootstrap(const std::string& restored_bootstrap_token);
+
+ // Returns whether we can decrypt |encrypted| using the keys we currently know
+ // about.
+ bool CanDecrypt(const sync_pb::EncryptedData& encrypted) const;
+
+ // Returns whether |encrypted| can be decrypted using the default encryption
+ // key.
+ bool CanDecryptUsingDefaultKey(const sync_pb::EncryptedData& encrypted) const;
+
+ // Encrypts |message| into |encrypted|. Does not overwrite |encrypted| if
+ // |message| already matches the decrypted data within |encrypted| and
+ // |encrypted| was encrypted with the current default key. This avoids
+ // unnecessarily modifying |encrypted| if the change had no practical effect.
+ // Returns true unless encryption fails or |message| isn't valid (e.g. a
+ // required field isn't set).
+ bool Encrypt(const ::google::protobuf::MessageLite& message,
+ sync_pb::EncryptedData* encrypted) const;
+
+ // Decrypts |encrypted| into |message|. Returns true unless decryption fails,
+ // or |message| fails to parse the decrypted data.
+ bool Decrypt(const sync_pb::EncryptedData& encrypted,
+ ::google::protobuf::MessageLite* message) const;
+
+ // Decrypts |encrypted| and returns plaintext decrypted data. If decryption
+ // fails, returns empty string.
+ std::string DecryptToString(const sync_pb::EncryptedData& encrypted) const;
+
+ // Encrypts the set of currently known keys into |encrypted|. Returns true if
+ // successful.
+ bool GetKeys(sync_pb::EncryptedData* encrypted) const;
+
+ // Creates a new Nigori instance using |params|. If successful, |params| will
+ // become the default encryption key and be used for all future calls to
+ // Encrypt.
+ bool AddKey(const KeyParams& params);
+
+ // Same as AddKey(..), but builds the new Nigori from a previously persisted
+ // bootstrap token. This can be useful when consuming a bootstrap token
+ // with a cryptographer that has already been initialized.
+ bool AddKeyFromBootstrapToken(const std::string restored_bootstrap_token);
+
+ // Decrypts |encrypted| and uses its contents to initialize Nigori instances.
+ // Returns true unless decryption of |encrypted| fails. The caller is
+ // responsible for checking that CanDecrypt(encrypted) == true.
+ bool SetKeys(const sync_pb::EncryptedData& encrypted);
+
+ // Makes a local copy of |encrypted| to later be decrypted by
+ // DecryptPendingKeys. This should only be used if CanDecrypt(encrypted) ==
+ // false.
+ void SetPendingKeys(const sync_pb::EncryptedData& encrypted);
+
+ // Makes |pending_keys_| available to callers that may want to cache its
+ // value for later use on the UI thread. It is illegal to call this if the
+ // cryptographer has no pending keys. Like other calls that access the
+ // cryptographer, this method must be called from within a transaction.
+ const sync_pb::EncryptedData& GetPendingKeys() const;
+
+ // Attempts to decrypt the set of keys that was copied in the previous call to
+ // SetPendingKeys using |params|. Returns true if the pending keys were
+ // successfully decrypted and installed.
+ bool DecryptPendingKeys(const KeyParams& params);
+
+ bool is_initialized() const { return !nigoris_.empty() && default_nigori_; }
+
+ // Returns whether this Cryptographer is ready to encrypt and decrypt data.
+ bool is_ready() const { return is_initialized() &&
+ has_pending_keys() == false; }
+
+ // Returns whether there is a pending set of keys that needs to be decrypted.
+ bool has_pending_keys() const { return NULL != pending_keys_.get(); }
+
+ // Obtain a token that can be provided on construction to a future
+ // Cryptographer instance to bootstrap itself. Returns false if such a token
+ // can't be created (i.e. if this Cryptograhper doesn't have valid keys).
+ bool GetBootstrapToken(std::string* token) const;
+
+ // Update the cryptographer based on the contents of the nigori specifics.
+ // This updates both the encryption keys and the set of encrypted types.
+ // Returns NEEDS_PASSPHRASE if was unable to decrypt the pending keys,
+ // SUCCESS otherwise.
+ UpdateResult Update(const sync_pb::NigoriSpecifics& nigori);
+
+ // The set of types that are always encrypted.
+ static syncable::ModelTypeSet SensitiveTypes();
+
+ // Reset our set of encrypted types based on the contents of the nigori
+ // specifics.
+ void UpdateEncryptedTypesFromNigori(const sync_pb::NigoriSpecifics& nigori);
+
+ // Update the nigori to reflect the current set of encrypted types.
+ void UpdateNigoriFromEncryptedTypes(sync_pb::NigoriSpecifics* nigori) const;
+
+ // Setter/getter for whether all current and future datatypes should
+ // be encrypted. Once set you cannot unset without reading from a
+ // new nigori node. set_encrypt_everything() emits a notification
+ // the first time it's called.
+ void set_encrypt_everything();
+ bool encrypt_everything() const;
+
+ // Return the set of encrypted types.
+ syncable::ModelTypeSet GetEncryptedTypes() const;
+
+ // Forwards to MergeEncryptedTypes.
+ void MergeEncryptedTypesForTest(
+ syncable::ModelTypeSet encrypted_types);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(SyncCryptographerTest, PackUnpack);
+ typedef std::map<std::string, linked_ptr<const Nigori> > NigoriMap;
+
+ // Merges the given set of encrypted types with the existing set and emits a
+ // notification if necessary.
+ void MergeEncryptedTypes(syncable::ModelTypeSet encrypted_types);
+
+ void EmitEncryptedTypesChangedNotification();
+
+ // Helper method to instantiate Nigori instances for each set of key
+ // parameters in |bag| and setting the default encryption key to
+ // |default_key_name|.
+ void InstallKeys(const std::string& default_key_name,
+ const sync_pb::NigoriKeyBag& bag);
+
+ bool AddKeyImpl(Nigori* nigori);
+
+ // Functions to serialize + encrypt a Nigori object in an opaque format for
+ // persistence by sync infrastructure.
+ bool PackBootstrapToken(const Nigori* nigori, std::string* pack_into) const;
+ Nigori* UnpackBootstrapToken(const std::string& token) const;
+
+ Encryptor* const encryptor_;
+
+ ObserverList<Observer> observers_;
+
+ NigoriMap nigoris_; // The Nigoris we know about, mapped by key name.
+ NigoriMap::value_type* default_nigori_; // The Nigori used for encryption.
+
+ scoped_ptr<sync_pb::EncryptedData> pending_keys_;
+
+ syncable::ModelTypeSet encrypted_types_;
+ bool encrypt_everything_;
+
+ DISALLOW_COPY_AND_ASSIGN(Cryptographer);
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_CRYPTOGRAPHER_H_
diff --git a/sync/util/cryptographer_unittest.cc b/sync/util/cryptographer_unittest.cc
new file mode 100644
index 0000000..dc38e0f
--- /dev/null
+++ b/sync/util/cryptographer_unittest.cc
@@ -0,0 +1,391 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/cryptographer.h"
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/string_util.h"
+#include "sync/syncable/model_type_test_util.h"
+#include "sync/test/fake_encryptor.h"
+#include "sync/protocol/nigori_specifics.pb.h"
+#include "sync/protocol/password_specifics.pb.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+namespace {
+
+using ::testing::_;
+using ::testing::Mock;
+using ::testing::StrictMock;
+using syncable::ModelTypeSet;
+
+class MockObserver : public Cryptographer::Observer {
+ public:
+ MOCK_METHOD2(OnEncryptedTypesChanged,
+ void(syncable::ModelTypeSet, bool));
+};
+
+} // namespace
+
+class SyncCryptographerTest : public ::testing::Test {
+ protected:
+ SyncCryptographerTest() : cryptographer_(&encryptor_) {}
+
+ FakeEncryptor encryptor_;
+ Cryptographer cryptographer_;
+};
+
+TEST_F(SyncCryptographerTest, EmptyCantDecrypt) {
+ EXPECT_FALSE(cryptographer_.is_ready());
+
+ sync_pb::EncryptedData encrypted;
+ encrypted.set_key_name("foo");
+ encrypted.set_blob("bar");
+
+ EXPECT_FALSE(cryptographer_.CanDecrypt(encrypted));
+}
+
+TEST_F(SyncCryptographerTest, EmptyCantEncrypt) {
+ EXPECT_FALSE(cryptographer_.is_ready());
+
+ sync_pb::EncryptedData encrypted;
+ sync_pb::PasswordSpecificsData original;
+ EXPECT_FALSE(cryptographer_.Encrypt(original, &encrypted));
+}
+
+TEST_F(SyncCryptographerTest, MissingCantDecrypt) {
+ KeyParams params = {"localhost", "dummy", "dummy"};
+ cryptographer_.AddKey(params);
+ EXPECT_TRUE(cryptographer_.is_ready());
+
+ sync_pb::EncryptedData encrypted;
+ encrypted.set_key_name("foo");
+ encrypted.set_blob("bar");
+
+ EXPECT_FALSE(cryptographer_.CanDecrypt(encrypted));
+}
+
+TEST_F(SyncCryptographerTest, CanEncryptAndDecrypt) {
+ KeyParams params = {"localhost", "dummy", "dummy"};
+ EXPECT_TRUE(cryptographer_.AddKey(params));
+ EXPECT_TRUE(cryptographer_.is_ready());
+
+ sync_pb::PasswordSpecificsData original;
+ original.set_origin("http://example.com");
+ original.set_username_value("azure");
+ original.set_password_value("hunter2");
+
+ sync_pb::EncryptedData encrypted;
+ EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted));
+
+ sync_pb::PasswordSpecificsData decrypted;
+ EXPECT_TRUE(cryptographer_.Decrypt(encrypted, &decrypted));
+
+ EXPECT_EQ(original.SerializeAsString(), decrypted.SerializeAsString());
+}
+
+TEST_F(SyncCryptographerTest, EncryptOnlyIfDifferent) {
+ KeyParams params = {"localhost", "dummy", "dummy"};
+ EXPECT_TRUE(cryptographer_.AddKey(params));
+ EXPECT_TRUE(cryptographer_.is_ready());
+
+ sync_pb::PasswordSpecificsData original;
+ original.set_origin("http://example.com");
+ original.set_username_value("azure");
+ original.set_password_value("hunter2");
+
+ sync_pb::EncryptedData encrypted;
+ EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted));
+
+ sync_pb::EncryptedData encrypted2, encrypted3;
+ encrypted2.CopyFrom(encrypted);
+ encrypted3.CopyFrom(encrypted);
+ EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted2));
+
+ // Now encrypt with a new default key. Should overwrite the old data.
+ KeyParams params_new = {"localhost", "dummy", "dummy2"};
+ cryptographer_.AddKey(params_new);
+ EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted3));
+
+ sync_pb::PasswordSpecificsData decrypted;
+ EXPECT_TRUE(cryptographer_.Decrypt(encrypted2, &decrypted));
+ // encrypted2 should match encrypted, encrypted3 should not (due to salting).
+ EXPECT_EQ(encrypted.SerializeAsString(), encrypted2.SerializeAsString());
+ EXPECT_NE(encrypted.SerializeAsString(), encrypted3.SerializeAsString());
+ EXPECT_EQ(original.SerializeAsString(), decrypted.SerializeAsString());
+}
+
+TEST_F(SyncCryptographerTest, AddKeySetsDefault) {
+ KeyParams params1 = {"localhost", "dummy", "dummy1"};
+ EXPECT_TRUE(cryptographer_.AddKey(params1));
+ EXPECT_TRUE(cryptographer_.is_ready());
+
+ sync_pb::PasswordSpecificsData original;
+ original.set_origin("http://example.com");
+ original.set_username_value("azure");
+ original.set_password_value("hunter2");
+
+ sync_pb::EncryptedData encrypted1;
+ EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted1));
+ sync_pb::EncryptedData encrypted2;
+ EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted2));
+
+ KeyParams params2 = {"localhost", "dummy", "dummy2"};
+ EXPECT_TRUE(cryptographer_.AddKey(params2));
+ EXPECT_TRUE(cryptographer_.is_ready());
+
+ sync_pb::EncryptedData encrypted3;
+ EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted3));
+ sync_pb::EncryptedData encrypted4;
+ EXPECT_TRUE(cryptographer_.Encrypt(original, &encrypted4));
+
+ EXPECT_EQ(encrypted1.key_name(), encrypted2.key_name());
+ EXPECT_NE(encrypted1.key_name(), encrypted3.key_name());
+ EXPECT_EQ(encrypted3.key_name(), encrypted4.key_name());
+}
+
+// Crashes, Bug 55178.
+#if defined(OS_WIN)
+#define MAYBE_EncryptExportDecrypt DISABLED_EncryptExportDecrypt
+#else
+#define MAYBE_EncryptExportDecrypt EncryptExportDecrypt
+#endif
+TEST_F(SyncCryptographerTest, MAYBE_EncryptExportDecrypt) {
+ sync_pb::EncryptedData nigori;
+ sync_pb::EncryptedData encrypted;
+
+ sync_pb::PasswordSpecificsData original;
+ original.set_origin("http://example.com");
+ original.set_username_value("azure");
+ original.set_password_value("hunter2");
+
+ {
+ Cryptographer cryptographer(&encryptor_);
+
+ KeyParams params = {"localhost", "dummy", "dummy"};
+ cryptographer.AddKey(params);
+ EXPECT_TRUE(cryptographer.is_ready());
+
+ EXPECT_TRUE(cryptographer.Encrypt(original, &encrypted));
+ EXPECT_TRUE(cryptographer.GetKeys(&nigori));
+ }
+
+ {
+ Cryptographer cryptographer(&encryptor_);
+ EXPECT_FALSE(cryptographer.CanDecrypt(nigori));
+
+ cryptographer.SetPendingKeys(nigori);
+ EXPECT_FALSE(cryptographer.is_ready());
+ EXPECT_TRUE(cryptographer.has_pending_keys());
+
+ KeyParams params = {"localhost", "dummy", "dummy"};
+ EXPECT_TRUE(cryptographer.DecryptPendingKeys(params));
+ EXPECT_TRUE(cryptographer.is_ready());
+ EXPECT_FALSE(cryptographer.has_pending_keys());
+
+ sync_pb::PasswordSpecificsData decrypted;
+ EXPECT_TRUE(cryptographer.Decrypt(encrypted, &decrypted));
+ EXPECT_EQ(original.SerializeAsString(), decrypted.SerializeAsString());
+ }
+}
+
+// Crashes, Bug 55178.
+#if defined(OS_WIN)
+#define MAYBE_PackUnpack DISABLED_PackUnpack
+#else
+#define MAYBE_PackUnpack PackUnpack
+#endif
+TEST_F(SyncCryptographerTest, MAYBE_PackUnpack) {
+ Nigori nigori;
+ ASSERT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
+ std::string expected_user, expected_encryption, expected_mac;
+ ASSERT_TRUE(nigori.ExportKeys(&expected_user, &expected_encryption,
+ &expected_mac));
+
+ std::string token;
+ EXPECT_TRUE(cryptographer_.PackBootstrapToken(&nigori, &token));
+ EXPECT_TRUE(IsStringUTF8(token));
+
+ scoped_ptr<Nigori> unpacked(cryptographer_.UnpackBootstrapToken(token));
+ EXPECT_NE(static_cast<Nigori*>(NULL), unpacked.get());
+
+ std::string user_key, encryption_key, mac_key;
+ ASSERT_TRUE(unpacked->ExportKeys(&user_key, &encryption_key, &mac_key));
+
+ EXPECT_EQ(expected_user, user_key);
+ EXPECT_EQ(expected_encryption, encryption_key);
+ EXPECT_EQ(expected_mac, mac_key);
+}
+
+TEST_F(SyncCryptographerTest, NigoriEncryptionTypes) {
+ Cryptographer cryptographer2(&encryptor_);
+ sync_pb::NigoriSpecifics nigori;
+
+ StrictMock<MockObserver> observer;
+ cryptographer_.AddObserver(&observer);
+ StrictMock<MockObserver> observer2;
+ cryptographer2.AddObserver(&observer2);
+
+ // Just set the sensitive types (shouldn't trigger any
+ // notifications).
+ ModelTypeSet encrypted_types(Cryptographer::SensitiveTypes());
+ cryptographer_.MergeEncryptedTypesForTest(encrypted_types);
+ cryptographer_.UpdateNigoriFromEncryptedTypes(&nigori);
+ cryptographer2.UpdateEncryptedTypesFromNigori(nigori);
+ EXPECT_TRUE(encrypted_types.Equals(cryptographer_.GetEncryptedTypes()));
+ EXPECT_TRUE(encrypted_types.Equals(cryptographer2.GetEncryptedTypes()));
+
+ Mock::VerifyAndClearExpectations(&observer);
+ Mock::VerifyAndClearExpectations(&observer2);
+
+ EXPECT_CALL(observer,
+ OnEncryptedTypesChanged(
+ HasModelTypes(syncable::ModelTypeSet::All()),
+ false));
+ EXPECT_CALL(observer2,
+ OnEncryptedTypesChanged(
+ HasModelTypes(syncable::ModelTypeSet::All()),
+ false));
+
+ // Set all encrypted types
+ encrypted_types = syncable::ModelTypeSet::All();
+ cryptographer_.MergeEncryptedTypesForTest(encrypted_types);
+ cryptographer_.UpdateNigoriFromEncryptedTypes(&nigori);
+ cryptographer2.UpdateEncryptedTypesFromNigori(nigori);
+ EXPECT_TRUE(encrypted_types.Equals(cryptographer_.GetEncryptedTypes()));
+ EXPECT_TRUE(encrypted_types.Equals(cryptographer2.GetEncryptedTypes()));
+
+ // Receiving an empty nigori should not reset any encrypted types or trigger
+ // an observer notification.
+ Mock::VerifyAndClearExpectations(&observer);
+ nigori = sync_pb::NigoriSpecifics();
+ cryptographer_.UpdateEncryptedTypesFromNigori(nigori);
+ EXPECT_TRUE(encrypted_types.Equals(cryptographer_.GetEncryptedTypes()));
+}
+
+TEST_F(SyncCryptographerTest, EncryptEverythingExplicit) {
+ ModelTypeSet real_types = syncable::ModelTypeSet::All();
+ sync_pb::NigoriSpecifics specifics;
+ specifics.set_encrypt_everything(true);
+
+ StrictMock<MockObserver> observer;
+ cryptographer_.AddObserver(&observer);
+
+ EXPECT_CALL(observer,
+ OnEncryptedTypesChanged(
+ HasModelTypes(syncable::ModelTypeSet::All()), true));
+
+ EXPECT_FALSE(cryptographer_.encrypt_everything());
+ ModelTypeSet encrypted_types = cryptographer_.GetEncryptedTypes();
+ for (ModelTypeSet::Iterator iter = real_types.First();
+ iter.Good(); iter.Inc()) {
+ if (iter.Get() == syncable::PASSWORDS || iter.Get() == syncable::NIGORI)
+ EXPECT_TRUE(encrypted_types.Has(iter.Get()));
+ else
+ EXPECT_FALSE(encrypted_types.Has(iter.Get()));
+ }
+
+ cryptographer_.UpdateEncryptedTypesFromNigori(specifics);
+
+ EXPECT_TRUE(cryptographer_.encrypt_everything());
+ encrypted_types = cryptographer_.GetEncryptedTypes();
+ for (ModelTypeSet::Iterator iter = real_types.First();
+ iter.Good(); iter.Inc()) {
+ EXPECT_TRUE(encrypted_types.Has(iter.Get()));
+ }
+
+ // Shouldn't trigger another notification.
+ specifics.set_encrypt_everything(true);
+
+ cryptographer_.RemoveObserver(&observer);
+}
+
+TEST_F(SyncCryptographerTest, EncryptEverythingImplicit) {
+ ModelTypeSet real_types = syncable::ModelTypeSet::All();
+ sync_pb::NigoriSpecifics specifics;
+ specifics.set_encrypt_bookmarks(true); // Non-passwords = encrypt everything
+
+ StrictMock<MockObserver> observer;
+ cryptographer_.AddObserver(&observer);
+
+ EXPECT_CALL(observer,
+ OnEncryptedTypesChanged(
+ HasModelTypes(syncable::ModelTypeSet::All()), true));
+
+ EXPECT_FALSE(cryptographer_.encrypt_everything());
+ ModelTypeSet encrypted_types = cryptographer_.GetEncryptedTypes();
+ for (ModelTypeSet::Iterator iter = real_types.First();
+ iter.Good(); iter.Inc()) {
+ if (iter.Get() == syncable::PASSWORDS || iter.Get() == syncable::NIGORI)
+ EXPECT_TRUE(encrypted_types.Has(iter.Get()));
+ else
+ EXPECT_FALSE(encrypted_types.Has(iter.Get()));
+ }
+
+ cryptographer_.UpdateEncryptedTypesFromNigori(specifics);
+
+ EXPECT_TRUE(cryptographer_.encrypt_everything());
+ encrypted_types = cryptographer_.GetEncryptedTypes();
+ for (ModelTypeSet::Iterator iter = real_types.First();
+ iter.Good(); iter.Inc()) {
+ EXPECT_TRUE(encrypted_types.Has(iter.Get()));
+ }
+
+ // Shouldn't trigger another notification.
+ specifics.set_encrypt_everything(true);
+
+ cryptographer_.RemoveObserver(&observer);
+}
+
+TEST_F(SyncCryptographerTest, UnknownSensitiveTypes) {
+ ModelTypeSet real_types = syncable::ModelTypeSet::All();
+ sync_pb::NigoriSpecifics specifics;
+ // Explicitly setting encrypt everything should override logic for implicit
+ // encrypt everything.
+ specifics.set_encrypt_everything(false);
+ specifics.set_encrypt_bookmarks(true);
+
+ StrictMock<MockObserver> observer;
+ cryptographer_.AddObserver(&observer);
+
+ syncable::ModelTypeSet expected_encrypted_types =
+ Cryptographer::SensitiveTypes();
+ expected_encrypted_types.Put(syncable::BOOKMARKS);
+
+ EXPECT_CALL(observer,
+ OnEncryptedTypesChanged(
+ HasModelTypes(expected_encrypted_types), false));
+
+ EXPECT_FALSE(cryptographer_.encrypt_everything());
+ ModelTypeSet encrypted_types = cryptographer_.GetEncryptedTypes();
+ for (ModelTypeSet::Iterator iter = real_types.First();
+ iter.Good(); iter.Inc()) {
+ if (iter.Get() == syncable::PASSWORDS || iter.Get() == syncable::NIGORI)
+ EXPECT_TRUE(encrypted_types.Has(iter.Get()));
+ else
+ EXPECT_FALSE(encrypted_types.Has(iter.Get()));
+ }
+
+ cryptographer_.UpdateEncryptedTypesFromNigori(specifics);
+
+ EXPECT_FALSE(cryptographer_.encrypt_everything());
+ encrypted_types = cryptographer_.GetEncryptedTypes();
+ for (ModelTypeSet::Iterator iter = real_types.First();
+ iter.Good(); iter.Inc()) {
+ if (iter.Get() == syncable::PASSWORDS ||
+ iter.Get() == syncable::NIGORI ||
+ iter.Get() == syncable::BOOKMARKS)
+ EXPECT_TRUE(encrypted_types.Has(iter.Get()));
+ else
+ EXPECT_FALSE(encrypted_types.Has(iter.Get()));
+ }
+
+ cryptographer_.RemoveObserver(&observer);
+}
+
+} // namespace browser_sync
diff --git a/sync/util/data_encryption_win.cc b/sync/util/data_encryption_win.cc
new file mode 100644
index 0000000..bdc1e9d
--- /dev/null
+++ b/sync/util/data_encryption_win.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// NOTE: this file is Winodws specific.
+
+#include "sync/util/data_encryption_win.h"
+
+#include <windows.h>
+#include <wincrypt.h>
+
+#include <cstddef>
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+
+#pragma comment(lib, "crypt32.lib")
+
+// TODO(akalin): Merge this with similar code in
+// chrome/browser/password_manager/encryptor_win.cc. Preferably, all
+// this stuff would live in crypto/.
+
+using std::string;
+using std::vector;
+
+vector<uint8> EncryptData(const string& data) {
+ DATA_BLOB unencrypted_data = { 0 };
+ unencrypted_data.pbData = (BYTE*)(data.data());
+ unencrypted_data.cbData = data.size();
+ DATA_BLOB encrypted_data = { 0 };
+
+ if (!CryptProtectData(&unencrypted_data, L"", NULL, NULL, NULL, 0,
+ &encrypted_data))
+ LOG(ERROR) << "Encryption fails: " << data;
+
+ vector<uint8> result(encrypted_data.pbData,
+ encrypted_data.pbData + encrypted_data.cbData);
+ LocalFree(encrypted_data.pbData);
+ return result;
+}
+
+bool DecryptData(const vector<uint8>& in_data, string* out_data) {
+ DATA_BLOB encrypted_data, decrypted_data;
+ encrypted_data.pbData =
+ (in_data.empty() ? NULL : const_cast<BYTE*>(&in_data[0]));
+ encrypted_data.cbData = in_data.size();
+ LPWSTR descrip = L"";
+
+ if (!CryptUnprotectData(&encrypted_data, &descrip, NULL, NULL, NULL, 0,
+ &decrypted_data)) {
+ LOG(ERROR) << "Decryption fails: ";
+ return false;
+ } else {
+ out_data->assign(reinterpret_cast<const char*>(decrypted_data.pbData),
+ decrypted_data.cbData);
+ LocalFree(decrypted_data.pbData);
+ return true;
+ }
+}
diff --git a/sync/util/data_encryption_win.h b/sync/util/data_encryption_win.h
new file mode 100644
index 0000000..f671ad8
--- /dev/null
+++ b/sync/util/data_encryption_win.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_DATA_ENCRYPTION_WIN_H_
+#define SYNC_UTIL_DATA_ENCRYPTION_WIN_H_
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+
+using std::string;
+using std::vector;
+
+vector<uint8> EncryptData(const string& data);
+bool DecryptData(const vector<uint8>& in_data, string* out_data);
+
+#endif // SYNC_UTIL_DATA_ENCRYPTION_WIN_H_
diff --git a/sync/util/data_encryption_win_unittest.cc b/sync/util/data_encryption_win_unittest.cc
new file mode 100644
index 0000000..4fa5250
--- /dev/null
+++ b/sync/util/data_encryption_win_unittest.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/data_encryption_win.h"
+
+#include <string>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+using std::vector;
+
+namespace {
+
+TEST(SyncDataEncryption, TestEncryptDecryptOfSampleString) {
+ vector<uint8> example(EncryptData("example"));
+ ASSERT_FALSE(example.empty());
+ string result;
+ ASSERT_TRUE(DecryptData(example, &result));
+ ASSERT_TRUE(result == "example");
+}
+
+TEST(SyncDataEncryption, TestDecryptFailure) {
+ vector<uint8> example(0, 0);
+ string result;
+ ASSERT_FALSE(DecryptData(example, &result));
+}
+
+} // namespace
diff --git a/sync/util/data_type_histogram.h b/sync/util/data_type_histogram.h
new file mode 100644
index 0000000..72ab1b5
--- /dev/null
+++ b/sync/util/data_type_histogram.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_DATA_TYPE_HISTOGRAM_H_
+#define SYNC_UTIL_DATA_TYPE_HISTOGRAM_H_
+#pragma once
+
+#include "base/metrics/histogram.h"
+#include "base/time.h"
+#include "sync/syncable/model_type.h"
+
+// For now, this just implements UMA_HISTOGRAM_LONG_TIMES. This can be adjusted
+// if we feel the min, max, or bucket count amount are not appropriate.
+#define SYNC_FREQ_HISTOGRAM(name, time) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, time, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromHours(1), 50)
+
+// Helper macro for datatype specific histograms. For each datatype, invokes
+// a pre-defined PER_DATA_TYPE_MACRO(type_str), where |type_str| is the string
+// version of the datatype.
+//
+// Example usage (ignoring newlines necessary for multiline macro):
+// std::vector<syncable::ModelType> types = GetEntryTypes();
+// for (int i = 0; i < types.size(); ++i) {
+// #define PER_DATA_TYPE_MACRO(type_str)
+// UMA_HISTOGRAM_ENUMERATION("Sync." type_str "StartFailures",
+// error, max_error);
+// SYNC_DATA_TYPE_HISTOGRAM(types[i]);
+// #undef PER_DATA_TYPE_MACRO
+// }
+//
+// TODO(zea): Once visual studio supports proper variadic argument replacement
+// in macros, pass in the histogram method directly as a parameter.
+// See http://connect.microsoft.com/VisualStudio/feedback/details/380090/
+// variadic-macro-replacement#details
+#define SYNC_DATA_TYPE_HISTOGRAM(datatype) \
+ do { \
+ switch (datatype) { \
+ case syncable::BOOKMARKS: \
+ PER_DATA_TYPE_MACRO("Bookmarks"); \
+ break; \
+ case syncable::PREFERENCES: \
+ PER_DATA_TYPE_MACRO("Preferences"); \
+ break; \
+ case syncable::PASSWORDS: \
+ PER_DATA_TYPE_MACRO("Passwords"); \
+ break; \
+ case syncable::AUTOFILL: \
+ PER_DATA_TYPE_MACRO("Autofill"); \
+ break; \
+ case syncable::AUTOFILL_PROFILE: \
+ PER_DATA_TYPE_MACRO("AutofillProfiles"); \
+ break; \
+ case syncable::THEMES: \
+ PER_DATA_TYPE_MACRO("Themes"); \
+ break; \
+ case syncable::TYPED_URLS: \
+ PER_DATA_TYPE_MACRO("TypedUrls"); \
+ break; \
+ case syncable::EXTENSIONS: \
+ PER_DATA_TYPE_MACRO("Extensions"); \
+ break; \
+ case syncable::NIGORI: \
+ PER_DATA_TYPE_MACRO("Nigori"); \
+ break; \
+ case syncable::SEARCH_ENGINES: \
+ PER_DATA_TYPE_MACRO("SearchEngines"); \
+ break; \
+ case syncable::SESSIONS: \
+ PER_DATA_TYPE_MACRO("Sessions"); \
+ break; \
+ case syncable::APPS: \
+ PER_DATA_TYPE_MACRO("Apps"); \
+ break; \
+ case syncable::APP_SETTINGS: \
+ PER_DATA_TYPE_MACRO("AppSettings"); \
+ break; \
+ case syncable::EXTENSION_SETTINGS: \
+ PER_DATA_TYPE_MACRO("ExtensionSettings"); \
+ break; \
+ case syncable::APP_NOTIFICATIONS: \
+ PER_DATA_TYPE_MACRO("AppNotifications"); \
+ break; \
+ default: \
+ NOTREACHED() << "Unknown datatype " \
+ << syncable::ModelTypeToString(datatype); \
+ } \
+ } while (0)
+
+#endif // SYNC_UTIL_DATA_TYPE_HISTOGRAM_H_
diff --git a/sync/util/data_type_histogram_unittest.cc b/sync/util/data_type_histogram_unittest.cc
new file mode 100644
index 0000000..676ebb7
--- /dev/null
+++ b/sync/util/data_type_histogram_unittest.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/data_type_histogram.h"
+
+#include "base/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+class DataTypeHistogramTest : public testing::Test {
+};
+
+// Create a histogram of type HISTOGRAM_COUNTS for each model type. Nothing
+// should break.
+TEST(DataTypeHistogramTest, BasicCount) {
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i <= syncable::LAST_REAL_MODEL_TYPE; ++i) {
+ syncable::ModelType type = syncable::ModelTypeFromInt(i);
+#define PER_DATA_TYPE_MACRO(type_str) \
+ HISTOGRAM_COUNTS("Prefix" type_str "Suffix", 1);
+ SYNC_DATA_TYPE_HISTOGRAM(type);
+#undef PER_DATA_TYPE_MACRO
+ }
+}
+
+// Create a histogram of type SYNC_FREQ_HISTOGRAM for each model type. Nothing
+// should break.
+TEST(DataTypeHistogramTest, BasicFreq) {
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i <= syncable::LAST_REAL_MODEL_TYPE; ++i) {
+ syncable::ModelType type = syncable::ModelTypeFromInt(i);
+#define PER_DATA_TYPE_MACRO(type_str) \
+ SYNC_FREQ_HISTOGRAM("Prefix" type_str "Suffix", \
+ base::TimeDelta::FromSeconds(1));
+ SYNC_DATA_TYPE_HISTOGRAM(type);
+#undef PER_DATA_TYPE_MACRO
+ }
+}
+
+// Create a histogram of type UMA_HISTOGRAM_ENUMERATION for each model type.
+// Nothing should break.
+TEST(DataTypeHistogramTest, BasicEnum) {
+ enum HistTypes {
+ TYPE_1,
+ TYPE_2,
+ TYPE_COUNT,
+ };
+ for (int i = syncable::FIRST_REAL_MODEL_TYPE;
+ i <= syncable::LAST_REAL_MODEL_TYPE; ++i) {
+ syncable::ModelType type = syncable::ModelTypeFromInt(i);
+#define PER_DATA_TYPE_MACRO(type_str) \
+ UMA_HISTOGRAM_ENUMERATION("Prefix" type_str "Suffix", \
+ (i % 2 ? TYPE_1 : TYPE_2), TYPE_COUNT);
+ SYNC_DATA_TYPE_HISTOGRAM(type);
+#undef PER_DATA_TYPE_MACRO
+ }
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/util/encryptor.h b/sync/util/encryptor.h
new file mode 100644
index 0000000..1549caf
--- /dev/null
+++ b/sync/util/encryptor.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_ENCRYPTOR_H_
+#define SYNC_UTIL_ENCRYPTOR_H_
+#pragma once
+
+#include <string>
+
+namespace browser_sync {
+
+class Encryptor {
+ public:
+ // All methods below should be thread-safe.
+ virtual bool EncryptString(const std::string& plaintext,
+ std::string* ciphertext) = 0;
+
+ virtual bool DecryptString(const std::string& ciphertext,
+ std::string* plaintext) = 0;
+
+ protected:
+ virtual ~Encryptor() {}
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_ENCRYPTOR_H_
diff --git a/sync/util/enum_set.h b/sync/util/enum_set.h
new file mode 100644
index 0000000..d860623
--- /dev/null
+++ b/sync/util/enum_set.h
@@ -0,0 +1,286 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_ENUM_SET_H_
+#define SYNC_UTIL_ENUM_SET_H_
+#pragma once
+
+#include <bitset>
+#include <cstddef>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+namespace browser_sync {
+
+// Forward declarations needed for friend declarations.
+template <typename E, E MinEnumValue, E MaxEnumValue>
+class EnumSet;
+
+template <typename E, E Min, E Max>
+EnumSet<E, Min, Max> Union(EnumSet<E, Min, Max> set1,
+ EnumSet<E, Min, Max> set2);
+
+template <typename E, E Min, E Max>
+EnumSet<E, Min, Max> Intersection(EnumSet<E, Min, Max> set1,
+ EnumSet<E, Min, Max> set2);
+
+template <typename E, E Min, E Max>
+EnumSet<E, Min, Max> Difference(EnumSet<E, Min, Max> set1,
+ EnumSet<E, Min, Max> set2);
+
+// An EnumSet is a set that can hold enum values between a min and a
+// max value (inclusive of both). It's essentially a wrapper around
+// std::bitset<> with stronger type enforcement, more descriptive
+// member function names, and an iterator interface.
+//
+// If you're working with enums with a small number of possible values
+// (say, fewer than 64), you can efficiently pass around an EnumSet
+// for that enum around by value.
+
+template <typename E, E MinEnumValue, E MaxEnumValue>
+class EnumSet {
+ public:
+ typedef E EnumType;
+ static const E kMinValue = MinEnumValue;
+ static const E kMaxValue = MaxEnumValue;
+ static const size_t kValueCount = kMaxValue - kMinValue + 1;
+ COMPILE_ASSERT(kMinValue < kMaxValue,
+ min_value_must_be_less_than_max_value);
+
+ private:
+ // Declaration needed by Iterator.
+ typedef std::bitset<kValueCount> EnumBitSet;
+
+ public:
+ // Iterator is a forward-only read-only iterator for EnumSet. Its
+ // interface is deliberately distinct from an STL iterator as its
+ // semantics are substantially different.
+ //
+ // Example usage:
+ //
+ // for (EnumSet<...>::Iterator it = enums.First(); it.Good(); it.Inc()) {
+ // Process(it.Get());
+ // }
+ //
+ // The iterator must not be outlived by the set. In particular, the
+ // following is an error:
+ //
+ // EnumSet<...> SomeFn() { ... }
+ //
+ // /* ERROR */
+ // for (EnumSet<...>::Iterator it = SomeFun().First(); ...
+ //
+ // Also, there are no guarantees as to what will happen if you
+ // modify an EnumSet while traversing it with an iterator.
+ class Iterator {
+ public:
+ // A default-constructed iterator can't do anything except check
+ // Good(). You need to call First() on an EnumSet to get a usable
+ // iterator.
+ Iterator() : enums_(NULL), i_(kValueCount) {}
+ ~Iterator() {}
+
+ // Copy constructor and assignment welcome.
+
+ // Returns true iff the iterator points to an EnumSet and it
+ // hasn't yet traversed the EnumSet entirely.
+ bool Good() const {
+ return enums_ && i_ < kValueCount && enums_->test(i_);
+ }
+
+ // Returns the value the iterator currently points to. Good()
+ // must hold.
+ E Get() const {
+ CHECK(Good());
+ return FromIndex(i_);
+ }
+
+ // Moves the iterator to the next value in the EnumSet. Good()
+ // must hold. Takes linear time.
+ void Inc() {
+ CHECK(Good());
+ i_ = FindNext(i_ + 1);
+ }
+
+ private:
+ friend Iterator EnumSet::First() const;
+
+ explicit Iterator(const EnumBitSet& enums)
+ : enums_(&enums), i_(FindNext(0)) {}
+
+ size_t FindNext(size_t i) {
+ while ((i < kValueCount) && !enums_->test(i)) {
+ ++i;
+ }
+ return i;
+ }
+
+ const EnumBitSet* enums_;
+ size_t i_;
+ };
+
+ // You can construct an EnumSet with 0, 1, 2, or 3 initial values.
+
+ EnumSet() {}
+
+ explicit EnumSet(E value) {
+ Put(value);
+ }
+
+ EnumSet(E value1, E value2) {
+ Put(value1);
+ Put(value2);
+ }
+
+ EnumSet(E value1, E value2, E value3) {
+ Put(value1);
+ Put(value2);
+ Put(value3);
+ }
+
+ // Returns an EnumSet with all possible values.
+ static EnumSet All() {
+ EnumBitSet enums;
+ enums.set();
+ return EnumSet(enums);
+ }
+
+ ~EnumSet() {}
+
+ // Copy constructor and assignment welcome.
+
+ // Set operations. Put, Retain, and Remove are basically
+ // self-mutating versions of Union, Intersection, and Difference
+ // (defined below).
+
+ // Adds the given value (which must be in range) to our set.
+ void Put(E value) {
+ enums_.set(ToIndex(value));
+ }
+
+ // Adds all values in the given set to our set.
+ void PutAll(EnumSet other) {
+ enums_ |= other.enums_;
+ }
+
+ // There's no real need for a Retain(E) member function.
+
+ // Removes all values not in the given set from our set.
+ void RetainAll(EnumSet other) {
+ enums_ &= other.enums_;
+ }
+
+ // If the given value is in range, removes it from our set.
+ void Remove(E value) {
+ if (InRange(value)) {
+ enums_.reset(ToIndex(value));
+ }
+ }
+
+ // Removes all values in the given set from our set.
+ void RemoveAll(EnumSet other) {
+ enums_ &= ~other.enums_;
+ }
+
+ // Removes all values from our set.
+ void Clear() {
+ enums_.reset();
+ }
+
+ // Returns true iff the given value is in range and a member of our
+ // set.
+ bool Has(E value) const {
+ return InRange(value) && enums_.test(ToIndex(value));
+ }
+
+ // Returns true iff the given set is a subset of our set.
+ bool HasAll(EnumSet other) const {
+ return (enums_ & other.enums_) == other.enums_;
+ }
+
+ // Returns true iff our set and the given set contain exactly the
+ // same values.
+ bool Equals(const EnumSet& other) const {
+ return enums_ == other.enums_;
+ }
+
+ // Returns true iff our set is empty.
+ bool Empty() const {
+ return !enums_.any();
+ }
+
+ // Returns how many values our set has.
+ size_t Size() const {
+ return enums_.count();
+ }
+
+ // Returns an iterator pointing to the first element (if any).
+ Iterator First() const {
+ return Iterator(enums_);
+ }
+
+ private:
+ friend EnumSet Union<E, MinEnumValue, MaxEnumValue>(
+ EnumSet set1, EnumSet set2);
+ friend EnumSet Intersection<E, MinEnumValue, MaxEnumValue>(
+ EnumSet set1, EnumSet set2);
+ friend EnumSet Difference<E, MinEnumValue, MaxEnumValue>(
+ EnumSet set1, EnumSet set2);
+
+ explicit EnumSet(EnumBitSet enums) : enums_(enums) {}
+
+ static bool InRange(E value) {
+ return (value >= MinEnumValue) && (value <= MaxEnumValue);
+ }
+
+ // Converts a value to/from an index into |enums_|.
+
+ static size_t ToIndex(E value) {
+ DCHECK_GE(value, MinEnumValue);
+ DCHECK_LE(value, MaxEnumValue);
+ return value - MinEnumValue;
+ }
+
+ static E FromIndex(size_t i) {
+ DCHECK_LT(i, kValueCount);
+ return static_cast<E>(MinEnumValue + i);
+ }
+
+ EnumBitSet enums_;
+};
+
+template <typename E, E MinEnumValue, E MaxEnumValue>
+const E EnumSet<E, MinEnumValue, MaxEnumValue>::kMinValue;
+
+template <typename E, E MinEnumValue, E MaxEnumValue>
+const E EnumSet<E, MinEnumValue, MaxEnumValue>::kMaxValue;
+
+template <typename E, E MinEnumValue, E MaxEnumValue>
+const size_t EnumSet<E, MinEnumValue, MaxEnumValue>::kValueCount;
+
+// The usual set operations.
+
+template <typename E, E Min, E Max>
+EnumSet<E, Min, Max> Union(EnumSet<E, Min, Max> set1,
+ EnumSet<E, Min, Max> set2) {
+ return EnumSet<E, Min, Max>(set1.enums_ | set2.enums_);
+}
+
+template <typename E, E Min, E Max>
+EnumSet<E, Min, Max> Intersection(EnumSet<E, Min, Max> set1,
+ EnumSet<E, Min, Max> set2) {
+ return EnumSet<E, Min, Max>(set1.enums_ & set2.enums_);
+}
+
+template <typename E, E Min, E Max>
+EnumSet<E, Min, Max> Difference(EnumSet<E, Min, Max> set1,
+ EnumSet<E, Min, Max> set2) {
+ return EnumSet<E, Min, Max>(set1.enums_ & ~set2.enums_);
+}
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_ENUM_SET_H_
diff --git a/sync/util/enum_set_unittest.cc b/sync/util/enum_set_unittest.cc
new file mode 100644
index 0000000..8c32c0a
--- /dev/null
+++ b/sync/util/enum_set_unittest.cc
@@ -0,0 +1,195 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/enum_set.h"
+
+#include "base/basictypes.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+enum TestEnum {
+ TEST_0,
+ TEST_MIN = TEST_0,
+ TEST_1,
+ TEST_2,
+ TEST_3,
+ TEST_4,
+ TEST_MAX = TEST_4,
+ TEST_5
+};
+
+typedef EnumSet<TestEnum, TEST_MIN, TEST_MAX> TestEnumSet;
+
+class EnumSetTest : public ::testing::Test {};
+
+TEST_F(EnumSetTest, ClassConstants) {
+ TestEnumSet enums;
+ EXPECT_EQ(TEST_MIN, TestEnumSet::kMinValue);
+ EXPECT_EQ(TEST_MAX, TestEnumSet::kMaxValue);
+ EXPECT_EQ(static_cast<size_t>(5), TestEnumSet::kValueCount);
+}
+
+TEST_F(EnumSetTest, DefaultConstructor) {
+ const TestEnumSet enums;
+ EXPECT_TRUE(enums.Empty());
+ EXPECT_EQ(static_cast<size_t>(0), enums.Size());
+ EXPECT_FALSE(enums.Has(TEST_0));
+ EXPECT_FALSE(enums.Has(TEST_1));
+ EXPECT_FALSE(enums.Has(TEST_2));
+ EXPECT_FALSE(enums.Has(TEST_3));
+ EXPECT_FALSE(enums.Has(TEST_4));
+}
+
+TEST_F(EnumSetTest, OneArgConstructor) {
+ const TestEnumSet enums(TEST_3);
+ EXPECT_FALSE(enums.Empty());
+ EXPECT_EQ(static_cast<size_t>(1), enums.Size());
+ EXPECT_FALSE(enums.Has(TEST_0));
+ EXPECT_FALSE(enums.Has(TEST_1));
+ EXPECT_FALSE(enums.Has(TEST_2));
+ EXPECT_TRUE(enums.Has(TEST_3));
+ EXPECT_FALSE(enums.Has(TEST_4));
+}
+
+TEST_F(EnumSetTest, TwoArgConstructor) {
+ const TestEnumSet enums(TEST_3, TEST_1);
+ EXPECT_FALSE(enums.Empty());
+ EXPECT_EQ(static_cast<size_t>(2), enums.Size());
+ EXPECT_FALSE(enums.Has(TEST_0));
+ EXPECT_TRUE(enums.Has(TEST_1));
+ EXPECT_FALSE(enums.Has(TEST_2));
+ EXPECT_TRUE(enums.Has(TEST_3));
+ EXPECT_FALSE(enums.Has(TEST_4));
+}
+
+TEST_F(EnumSetTest, ThreeArgConstructor) {
+ const TestEnumSet enums(TEST_3, TEST_1, TEST_0);
+ EXPECT_FALSE(enums.Empty());
+ EXPECT_EQ(static_cast<size_t>(3), enums.Size());
+ EXPECT_TRUE(enums.Has(TEST_0));
+ EXPECT_TRUE(enums.Has(TEST_1));
+ EXPECT_FALSE(enums.Has(TEST_2));
+ EXPECT_TRUE(enums.Has(TEST_3));
+ EXPECT_FALSE(enums.Has(TEST_4));
+}
+
+TEST_F(EnumSetTest, All) {
+ const TestEnumSet enums(TestEnumSet::All());
+ EXPECT_FALSE(enums.Empty());
+ EXPECT_EQ(static_cast<size_t>(5), enums.Size());
+ EXPECT_TRUE(enums.Has(TEST_0));
+ EXPECT_TRUE(enums.Has(TEST_1));
+ EXPECT_TRUE(enums.Has(TEST_2));
+ EXPECT_TRUE(enums.Has(TEST_3));
+ EXPECT_TRUE(enums.Has(TEST_4));
+}
+
+TEST_F(EnumSetTest, Put) {
+ TestEnumSet enums(TEST_3);
+ enums.Put(TEST_2);
+ EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_2, TEST_3)));
+ enums.Put(TEST_4);
+ EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_2, TEST_3, TEST_4)));
+}
+
+TEST_F(EnumSetTest, PutAll) {
+ TestEnumSet enums(TEST_3, TEST_4);
+ enums.PutAll(TestEnumSet(TEST_2, TEST_3));
+ EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_2, TEST_3, TEST_4)));
+}
+
+TEST_F(EnumSetTest, RetainAll) {
+ TestEnumSet enums(TEST_3, TEST_4);
+ enums.RetainAll(TestEnumSet(TEST_2, TEST_3));
+ EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_3)));
+}
+
+TEST_F(EnumSetTest, Remove) {
+ TestEnumSet enums(TEST_3, TEST_4);
+ enums.Remove(TEST_0);
+ enums.Remove(TEST_2);
+ EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_3, TEST_4)));
+ enums.Remove(TEST_3);
+ EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_4)));
+ enums.Remove(TEST_4);
+ enums.Remove(TEST_5);
+ EXPECT_TRUE(enums.Empty());
+}
+
+TEST_F(EnumSetTest, RemoveAll) {
+ TestEnumSet enums(TEST_3, TEST_4);
+ enums.RemoveAll(TestEnumSet(TEST_2, TEST_3));
+ EXPECT_TRUE(enums.Equals(TestEnumSet(TEST_4)));
+}
+
+TEST_F(EnumSetTest, Clear) {
+ TestEnumSet enums(TEST_3, TEST_4);
+ enums.Clear();
+ EXPECT_TRUE(enums.Empty());
+}
+
+TEST_F(EnumSetTest, Has) {
+ const TestEnumSet enums(TEST_3, TEST_4);
+ EXPECT_FALSE(enums.Has(TEST_0));
+ EXPECT_FALSE(enums.Has(TEST_1));
+ EXPECT_FALSE(enums.Has(TEST_2));
+ EXPECT_TRUE(enums.Has(TEST_3));
+ EXPECT_TRUE(enums.Has(TEST_4));
+ EXPECT_FALSE(enums.Has(TEST_5));
+}
+
+TEST_F(EnumSetTest, HasAll) {
+ const TestEnumSet enums1(TEST_3, TEST_4);
+ const TestEnumSet enums2(TEST_2, TEST_3);
+ const TestEnumSet enums3 = Union(enums1, enums2);
+ EXPECT_TRUE(enums1.HasAll(enums1));
+ EXPECT_FALSE(enums1.HasAll(enums2));
+ EXPECT_FALSE(enums1.HasAll(enums3));
+
+ EXPECT_FALSE(enums2.HasAll(enums1));
+ EXPECT_TRUE(enums2.HasAll(enums2));
+ EXPECT_FALSE(enums2.HasAll(enums3));
+
+ EXPECT_TRUE(enums3.HasAll(enums1));
+ EXPECT_TRUE(enums3.HasAll(enums2));
+ EXPECT_TRUE(enums3.HasAll(enums3));
+}
+
+TEST_F(EnumSetTest, Iterators) {
+ const TestEnumSet enums1(TEST_3, TEST_4);
+ TestEnumSet enums2;
+ for (TestEnumSet::Iterator it = enums1.First(); it.Good(); it.Inc()) {
+ enums2.Put(it.Get());
+ }
+ EXPECT_TRUE(enums1.Equals(enums2));
+}
+
+TEST_F(EnumSetTest, Union) {
+ const TestEnumSet enums1(TEST_3, TEST_4);
+ const TestEnumSet enums2(TEST_2, TEST_3);
+ const TestEnumSet enums3 = Union(enums1, enums2);
+
+ EXPECT_TRUE(enums3.Equals(TestEnumSet(TEST_2, TEST_3, TEST_4)));
+}
+
+TEST_F(EnumSetTest, Intersection) {
+ const TestEnumSet enums1(TEST_3, TEST_4);
+ const TestEnumSet enums2(TEST_2, TEST_3);
+ const TestEnumSet enums3 = Intersection(enums1, enums2);
+
+ EXPECT_TRUE(enums3.Equals(TestEnumSet(TEST_3)));
+}
+
+TEST_F(EnumSetTest, Difference) {
+ const TestEnumSet enums1(TEST_3, TEST_4);
+ const TestEnumSet enums2(TEST_2, TEST_3);
+ const TestEnumSet enums3 = Difference(enums1, enums2);
+
+ EXPECT_TRUE(enums3.Equals(TestEnumSet(TEST_4)));
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/util/extensions_activity_monitor.cc b/sync/util/extensions_activity_monitor.cc
new file mode 100644
index 0000000..0414823
--- /dev/null
+++ b/sync/util/extensions_activity_monitor.cc
@@ -0,0 +1,16 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/extensions_activity_monitor.h"
+
+namespace browser_sync {
+
+ExtensionsActivityMonitor::Record::Record()
+ : bookmark_write_count(0U) {}
+
+ExtensionsActivityMonitor::Record::~Record() {}
+
+ExtensionsActivityMonitor::~ExtensionsActivityMonitor() {}
+
+} // namespace browser_sync
diff --git a/sync/util/extensions_activity_monitor.h b/sync/util/extensions_activity_monitor.h
new file mode 100644
index 0000000..9a5fdef
--- /dev/null
+++ b/sync/util/extensions_activity_monitor.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_EXTENSIONS_ACTIVITY_MONITOR_H_
+#define SYNC_UTIL_EXTENSIONS_ACTIVITY_MONITOR_H_
+#pragma once
+
+#include <map>
+#include <string>
+
+#include "base/basictypes.h"
+
+namespace browser_sync {
+
+// An interface to monitor usage of extensions APIs to send to sync
+// servers, with the ability to purge data once sync servers have
+// acknowledged it (successful commit response).
+//
+// All abstract methods are called from the sync thread.
+class ExtensionsActivityMonitor {
+ public:
+ // A data record of activity performed by extension |extension_id|.
+ struct Record {
+ Record();
+ ~Record();
+
+ // The human-readable ID identifying the extension responsible
+ // for the activity reported in this Record.
+ std::string extension_id;
+
+ // How many times the extension successfully invoked a write
+ // operation through the bookmarks API since the last CommitMessage.
+ uint32 bookmark_write_count;
+ };
+
+ typedef std::map<std::string, Record> Records;
+
+ // Fill |buffer| with all current records and then clear the
+ // internal records.
+ virtual void GetAndClearRecords(Records* buffer) = 0;
+
+ // Merge |records| with the current set of records, adding the
+ // bookmark write counts for common Records.
+ virtual void PutRecords(const Records& records) = 0;
+
+ protected:
+ virtual ~ExtensionsActivityMonitor();
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_EXTENSIONS_ACTIVITY_MONITOR_H_
diff --git a/sync/util/get_session_name.cc b/sync/util/get_session_name.cc
new file mode 100644
index 0000000..e37c12f
--- /dev/null
+++ b/sync/util/get_session_name.cc
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/get_session_name.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/sys_info.h"
+#include "base/task_runner.h"
+
+#if defined(OS_LINUX)
+#include "base/linux_util.h"
+#elif defined(OS_MACOSX)
+#include "sync/util/get_session_name_mac.h"
+#elif defined(OS_WIN)
+#include "sync/util/get_session_name_win.h"
+#endif
+
+namespace browser_sync {
+
+namespace {
+
+std::string GetSessionNameSynchronously() {
+ std::string session_name;
+#if defined(OS_CHROMEOS)
+ session_name = "Chromebook";
+#elif defined(OS_LINUX)
+ session_name = base::GetLinuxDistro();
+#elif defined(OS_MACOSX)
+ session_name = internal::GetHardwareModelName();
+#elif defined(OS_WIN)
+ session_name = internal::GetComputerName();
+#endif
+
+ if (session_name == "Unknown" || session_name.empty())
+ session_name = base::SysInfo::OperatingSystemName();
+
+ return session_name;
+}
+
+void FillSessionName(std::string* session_name) {
+ *session_name = GetSessionNameSynchronously();
+}
+
+void OnSessionNameFilled(
+ const base::Callback<void(const std::string&)>& done_callback,
+ std::string* session_name) {
+ done_callback.Run(*session_name);
+}
+
+} // namespace
+
+void GetSessionName(
+ const scoped_refptr<base::TaskRunner>& task_runner,
+ const base::Callback<void(const std::string&)>& done_callback) {
+ std::string* session_name = new std::string();
+ task_runner->PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&FillSessionName,
+ base::Unretained(session_name)),
+ base::Bind(&OnSessionNameFilled,
+ done_callback,
+ base::Owned(session_name)));
+}
+
+std::string GetSessionNameSynchronouslyForTesting() {
+ return GetSessionNameSynchronously();
+}
+
+} // namespace browser_sync
diff --git a/sync/util/get_session_name.h b/sync/util/get_session_name.h
new file mode 100644
index 0000000..dc53295
--- /dev/null
+++ b/sync/util/get_session_name.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_GET_SESSION_NAME_H_
+#define SYNC_UTIL_GET_SESSION_NAME_H_
+#pragma once
+
+#include <string>
+
+#include "base/callback_forward.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+class TaskRunner;
+} // namespace base
+
+namespace browser_sync {
+
+void GetSessionName(
+ const scoped_refptr<base::TaskRunner>& task_runner,
+ const base::Callback<void(const std::string&)>& done_callback);
+
+std::string GetSessionNameSynchronouslyForTesting();
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_GET_SESSION_NAME_H_
diff --git a/sync/util/get_session_name_mac.h b/sync/util/get_session_name_mac.h
new file mode 100644
index 0000000..771f48f
--- /dev/null
+++ b/sync/util/get_session_name_mac.h
@@ -0,0 +1,23 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_GET_SESSION_NAME_MAC_H_
+#define SYNC_UTIL_GET_SESSION_NAME_MAC_H_
+#pragma once
+
+#include <string>
+
+namespace browser_sync {
+namespace internal {
+
+// Returns the Hardware model name, without trailing numbers, if
+// possible. See http://www.cocoadev.com/index.pl?MacintoshModels for
+// an example list of models. If an error occurs trying to read the
+// model, this simply returns "Unknown".
+std::string GetHardwareModelName();
+
+} // namespace internal
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_GET_SESSION_NAME_MAC_H_
diff --git a/sync/util/get_session_name_mac.mm b/sync/util/get_session_name_mac.mm
new file mode 100644
index 0000000..4b46139
--- /dev/null
+++ b/sync/util/get_session_name_mac.mm
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/get_session_name_mac.h"
+
+#import <Foundation/Foundation.h>
+#import <SystemConfiguration/SCDynamicStoreCopySpecific.h>
+#include <sys/sysctl.h> // sysctlbyname()
+
+#include "base/mac/foundation_util.h"
+#include "base/mac/mac_util.h"
+#include "base/memory/scoped_nsobject.h"
+#include "base/string_util.h"
+#include "base/sys_info.h"
+#include "base/sys_string_conversions.h"
+
+@interface NSHost(SnowLeopardAPI)
+- (NSString*)localizedName;
+@end
+
+namespace browser_sync {
+namespace internal {
+
+std::string GetHardwareModelName() {
+ NSHost* myHost = [NSHost currentHost];
+ if ([myHost respondsToSelector:@selector(localizedName)])
+ return base::SysNSStringToUTF8([myHost localizedName]);
+
+ // Fallback for 10.5
+ scoped_nsobject<NSString> computerName(base::mac::CFToNSCast(
+ SCDynamicStoreCopyComputerName(NULL, NULL)));
+ if (computerName.get() != NULL)
+ return base::SysNSStringToUTF8(computerName.get());
+
+ // If all else fails, return to using a slightly nicer version of the
+ // hardware model.
+ char modelBuffer[256];
+ size_t length = sizeof(modelBuffer);
+ if (!sysctlbyname("hw.model", modelBuffer, &length, NULL, 0)) {
+ for (size_t i = 0; i < length; i++) {
+ if (IsAsciiDigit(modelBuffer[i]))
+ return std::string(modelBuffer, 0, i);
+ }
+ return std::string(modelBuffer, 0, length);
+ }
+ return "Unknown";
+}
+
+} // namespace internal
+} // namespace browser_sync
diff --git a/sync/util/get_session_name_unittest.cc b/sync/util/get_session_name_unittest.cc
new file mode 100644
index 0000000..b335cd0
--- /dev/null
+++ b/sync/util/get_session_name_unittest.cc
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/message_loop.h"
+#include "sync/util/get_session_name.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+namespace {
+
+class GetSessionNameTest : public ::testing::Test {
+ public:
+ void SetSessionNameAndQuit(const std::string& session_name) {
+ session_name_ = session_name;
+ message_loop_.Quit();
+ }
+
+ protected:
+ MessageLoop message_loop_;
+ std::string session_name_;
+};
+
+// Call GetSessionNameSynchronouslyForTesting and make sure its return
+// value looks sane.
+TEST_F(GetSessionNameTest, GetSessionNameSynchronously) {
+ const std::string& session_name = GetSessionNameSynchronouslyForTesting();
+ EXPECT_FALSE(session_name.empty());
+}
+
+// Calls GetSessionName and runs the message loop until it comes back
+// with a session name. Makes sure the returned session name is equal
+// to the return value of GetSessionNameSynchronouslyForTesting().
+TEST_F(GetSessionNameTest, GetSessionName) {
+ GetSessionName(message_loop_.message_loop_proxy(),
+ base::Bind(&GetSessionNameTest::SetSessionNameAndQuit,
+ base::Unretained(this)));
+ message_loop_.Run();
+ EXPECT_EQ(session_name_, GetSessionNameSynchronouslyForTesting());
+}
+
+} // namespace
+
+} // namespace browser_sync
diff --git a/sync/util/get_session_name_win.cc b/sync/util/get_session_name_win.cc
new file mode 100644
index 0000000..499930b
--- /dev/null
+++ b/sync/util/get_session_name_win.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/get_session_name_win.h"
+
+#include <windows.h>
+
+namespace browser_sync {
+namespace internal {
+
+std::string GetComputerName() {
+ char computer_name[MAX_COMPUTERNAME_LENGTH + 1];
+ DWORD size = sizeof(computer_name);
+ if (GetComputerNameA(computer_name, &size))
+ return computer_name;
+ return std::string();
+}
+
+} // namespace internal
+} // namespace browser_sync
diff --git a/sync/util/get_session_name_win.h b/sync/util/get_session_name_win.h
new file mode 100644
index 0000000..1d6b1eb
--- /dev/null
+++ b/sync/util/get_session_name_win.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_GET_SESSION_NAME_WIN_H_
+#define SYNC_UTIL_GET_SESSION_NAME_WIN_H_
+#pragma once
+
+#include <string>
+
+namespace browser_sync {
+namespace internal {
+
+std::string GetComputerName();
+
+} // namespace internal
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_GET_SESSION_NAME_WIN_H_
diff --git a/sync/util/immutable.h b/sync/util/immutable.h
new file mode 100644
index 0000000..6624b90
--- /dev/null
+++ b/sync/util/immutable.h
@@ -0,0 +1,262 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Immutable<T> provides an easy, cheap, and thread-safe way to pass
+// large immutable data around.
+//
+// For example, consider the following code:
+//
+// typedef std::vector<LargeObject> LargeObjectList;
+//
+// void ProcessStuff(const LargeObjectList& stuff) {
+// for (LargeObjectList::const_iterator it = stuff.begin();
+// it != stuff.end(); ++it) {
+// ... process it ...
+// }
+// }
+//
+// ...
+//
+// LargeObjectList my_stuff;
+// ... fill my_stuff with lots of LargeObjects ...
+// some_loop->PostTask(FROM_HERE, base::Bind(&ProcessStuff, my_stuff));
+//
+// The last line incurs the cost of copying my_stuff, which is
+// undesirable. Here's the above code re-written using Immutable<T>:
+//
+// void ProcessStuff(
+// const browser_sync::Immutable<LargeObjectList>& stuff) {
+// for (LargeObjectList::const_iterator it = stuff.Get().begin();
+// it != stuff.Get().end(); ++it) {
+// ... process it ...
+// }
+// }
+//
+// ...
+//
+// LargeObjectList my_stuff;
+// ... fill my_stuff with lots of LargeObjects ...
+// some_loop->PostTask(
+// FROM_HERE, base::Bind(&ProcessStuff, MakeImmutable(&my_stuff)));
+//
+// The last line, which resets my_stuff to a default-initialized
+// state, incurs only the cost of a swap of LargeObjectLists, which is
+// O(1) for most STL container implementations. The data in my_stuff
+// is ref-counted (thread-safely), so it is freed as soon as
+// ProcessStuff is finished.
+//
+// NOTE: By default, Immutable<T> relies on ADL
+// (http://en.wikipedia.org/wiki/Argument-dependent_name_lookup) to
+// find a swap() function for T, falling back to std::swap() when
+// necessary. If you overload swap() for your type in its namespace,
+// or if you specialize std::swap() for your type, (see
+// http://stackoverflow.com/questions/11562/how-to-overload-stdswap
+// for discussion) Immutable<T> should be able to find it.
+//
+// Alternatively, you could explicitly control which swap function is
+// used by providing your own traits class or using one of the
+// pre-defined ones below. See comments on traits below for details.
+//
+// NOTE: Some complexity is necessary in order to use Immutable<T>
+// with forward-declared types. See comments on traits below for
+// details.
+
+#ifndef SYNC_UTIL_IMMUTABLE_H_
+#define SYNC_UTIL_IMMUTABLE_H_
+#pragma once
+
+// For std::swap().
+#include <algorithm>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+
+namespace browser_sync {
+
+namespace internal {
+// This class is part of the Immutable implementation. DO NOT USE
+// THIS CLASS DIRECTLY YOURSELF.
+
+template <typename T, typename Traits>
+class ImmutableCore
+ : public base::RefCountedThreadSafe<ImmutableCore<T, Traits> > {
+ public:
+ // wrapper_ is always explicitly default-initialized to handle
+ // primitive types and the case where Traits::Wrapper == T.
+
+ ImmutableCore() : wrapper_() {
+ Traits::InitializeWrapper(&wrapper_);
+ }
+
+ explicit ImmutableCore(T* t) : wrapper_() {
+ Traits::InitializeWrapper(&wrapper_);
+ Traits::Swap(Traits::UnwrapMutable(&wrapper_), t);
+ }
+
+ const T& Get() const {
+ return Traits::Unwrap(wrapper_);
+ }
+
+ private:
+ ~ImmutableCore() {
+ Traits::DestroyWrapper(&wrapper_);
+ }
+ friend class base::RefCountedThreadSafe<ImmutableCore<T, Traits> >;
+
+ // This is semantically const, but we can't mark it a such as we
+ // modify it in the constructor.
+ typename Traits::Wrapper wrapper_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImmutableCore);
+};
+
+} // namespace internal
+
+// Traits usage notes
+// ------------------
+// The most common reason to use your own traits class is to provide
+// your own swap method. First, consider the pre-defined traits
+// classes HasSwapMemFn{ByRef,ByPtr} below. If neither of those work,
+// then define your own traits class inheriting from
+// DefaultImmutableTraits<YourType> (to pick up the defaults for
+// everything else) and provide your own Swap() method.
+//
+// Another reason to use your own traits class is to be able to use
+// Immutable<T> with a forward-declared type (important for protobuf
+// classes, when you want to avoid headers pulling in generated
+// headers). (This is why the Traits::Wrapper type exists; normally,
+// Traits::Wrapper is just T itself, but that needs to be changed for
+// forward-declared types.)
+//
+// For example, if you want to do this:
+//
+// my_class.h
+// ----------
+// #include ".../immutable.h"
+//
+// // Forward declaration.
+// class SomeOtherType;
+//
+// class MyClass {
+// ...
+// private:
+// // Doesn't work, as defaults traits class needs SomeOtherType's
+// // definition to be visible.
+// Immutable<SomeOtherType> foo_;
+// };
+//
+// You'll have to do this:
+//
+// my_class.h
+// ----------
+// #include ".../immutable.h"
+//
+// // Forward declaration.
+// class SomeOtherType;
+//
+// class MyClass {
+// ...
+// private:
+// struct ImmutableSomeOtherTypeTraits {
+// // scoped_ptr<SomeOtherType> won't work here, either.
+// typedef SomeOtherType* Wrapper;
+//
+// static void InitializeWrapper(Wrapper* wrapper);
+//
+// static void DestroyWrapper(Wrapper* wrapper);
+// ...
+// };
+//
+// typedef Immutable<SomeOtherType, ImmutableSomeOtherTypeTraits>
+// ImmutableSomeOtherType;
+//
+// ImmutableSomeOtherType foo_;
+// };
+//
+// my_class.cc
+// -----------
+// #include ".../some_other_type.h"
+//
+// void MyClass::ImmutableSomeOtherTypeTraits::InitializeWrapper(
+// Wrapper* wrapper) {
+// *wrapper = new SomeOtherType();
+// }
+//
+// void MyClass::ImmutableSomeOtherTypeTraits::DestroyWrapper(
+// Wrapper* wrapper) {
+// delete *wrapper;
+// }
+//
+// ...
+//
+// Also note that this incurs an additional memory allocation when you
+// create an Immutable<SomeOtherType>.
+
+template <typename T>
+struct DefaultImmutableTraits {
+ typedef T Wrapper;
+
+ static void InitializeWrapper(Wrapper* wrapper) {}
+
+ static void DestroyWrapper(Wrapper* wrapper) {}
+
+ static const T& Unwrap(const Wrapper& wrapper) { return wrapper; }
+
+ static T* UnwrapMutable(Wrapper* wrapper) { return wrapper; }
+
+ static void Swap(T* t1, T* t2) {
+ // Uses ADL (see
+ // http://en.wikipedia.org/wiki/Argument-dependent_name_lookup).
+ using std::swap;
+ swap(*t1, *t2);
+ }
+};
+
+// Most STL containers have by-reference swap() member functions,
+// although they usually already overload std::swap() to use those.
+template <typename T>
+struct HasSwapMemFnByRef : public DefaultImmutableTraits<T> {
+ static void Swap(T* t1, T* t2) {
+ t1->swap(*t2);
+ }
+};
+
+// Most Google-style objects have by-pointer Swap() member functions
+// (for example, generated protocol buffer classes).
+template <typename T>
+struct HasSwapMemFnByPtr : public DefaultImmutableTraits<T> {
+ static void Swap(T* t1, T* t2) {
+ t1->Swap(t2);
+ }
+};
+
+template <typename T, typename Traits = DefaultImmutableTraits<T> >
+class Immutable {
+ public:
+ // Puts the underlying object in a default-initialized state.
+ Immutable() : core_(new internal::ImmutableCore<T, Traits>()) {}
+
+ // Copy constructor and assignment welcome.
+
+ // Resets |t| to a default-initialized state.
+ explicit Immutable(T* t)
+ : core_(new internal::ImmutableCore<T, Traits>(t)) {}
+
+ const T& Get() const {
+ return core_->Get();
+ }
+
+ private:
+ scoped_refptr<const internal::ImmutableCore<T, Traits> > core_;
+};
+
+// Helper function to avoid having to write out template arguments.
+template <typename T>
+Immutable<T> MakeImmutable(T* t) {
+ return Immutable<T>(t);
+}
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_IMMUTABLE_H_
diff --git a/sync/util/immutable_unittest.cc b/sync/util/immutable_unittest.cc
new file mode 100644
index 0000000..aa0037b
--- /dev/null
+++ b/sync/util/immutable_unittest.cc
@@ -0,0 +1,244 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/immutable.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <deque>
+#include <list>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+// Helper class that keeps track of the token passed in at
+// construction and how many times that token is copied.
+class TokenCore : public base::RefCounted<TokenCore> {
+ public:
+ explicit TokenCore(const char* token) : token_(token), copy_count_(0) {}
+
+ const char* GetToken() const { return token_; }
+
+ void RecordCopy() { ++copy_count_; }
+
+ int GetCopyCount() const { return copy_count_; }
+
+ private:
+ friend class base::RefCounted<TokenCore>;
+
+ ~TokenCore() {}
+
+ const char* const token_;
+ int copy_count_;
+};
+
+enum SwapBehavior {
+ USE_DEFAULT_SWAP,
+ USE_FAST_SWAP_VIA_ADL,
+ USE_FAST_SWAP_VIA_SPECIALIZATION
+};
+
+const char kEmptyToken[] = "<empty token>";
+
+// Base class for various token classes, differing in swap behavior.
+template <SwapBehavior>
+class TokenBase {
+ public:
+ TokenBase() : core_(new TokenCore(kEmptyToken)) {}
+
+ explicit TokenBase(const char* token) : core_(new TokenCore(token)) {}
+
+ TokenBase(const TokenBase& other) : core_(other.core_) {
+ core_->RecordCopy();
+ }
+
+ TokenBase& operator=(const TokenBase& other) {
+ core_ = other.core_;
+ core_->RecordCopy();
+ return *this;
+ }
+
+ const char* GetToken() const {
+ return core_->GetToken();
+ }
+
+ int GetCopyCount() const {
+ return core_->GetCopyCount();
+ }
+
+ // For associative containers.
+ bool operator<(const TokenBase& other) const {
+ return std::string(GetToken()) < std::string(other.GetToken());
+ }
+
+ // STL-style swap.
+ void swap(TokenBase& other) {
+ using std::swap;
+ swap(other.core_, core_);
+ }
+
+ // Google-style swap.
+ void Swap(TokenBase* other) {
+ using std::swap;
+ swap(other->core_, core_);
+ }
+
+ private:
+ scoped_refptr<TokenCore> core_;
+};
+
+typedef TokenBase<USE_DEFAULT_SWAP> Token;
+typedef TokenBase<USE_FAST_SWAP_VIA_ADL> ADLToken;
+typedef TokenBase<USE_FAST_SWAP_VIA_SPECIALIZATION> SpecializationToken;
+
+void swap(ADLToken& t1, ADLToken& t2) {
+ t1.Swap(&t2);
+}
+
+} // namespace browser_sync
+
+// Allowed by the standard (17.4.3.1/1).
+namespace std {
+
+template <>
+void swap(browser_sync::SpecializationToken& t1,
+ browser_sync::SpecializationToken& t2) {
+ t1.Swap(&t2);
+}
+
+} // namespace
+
+namespace browser_sync {
+namespace {
+
+class ImmutableTest : public ::testing::Test {};
+
+TEST_F(ImmutableTest, Int) {
+ int x = 5;
+ Immutable<int> ix(&x);
+ EXPECT_EQ(5, ix.Get());
+ EXPECT_EQ(0, x);
+}
+
+TEST_F(ImmutableTest, IntCopy) {
+ int x = 5;
+ Immutable<int> ix = Immutable<int>(&x);
+ EXPECT_EQ(5, ix.Get());
+ EXPECT_EQ(0, x);
+}
+
+TEST_F(ImmutableTest, IntAssign) {
+ int x = 5;
+ Immutable<int> ix;
+ EXPECT_EQ(0, ix.Get());
+ ix = Immutable<int>(&x);
+ EXPECT_EQ(5, ix.Get());
+ EXPECT_EQ(0, x);
+}
+
+TEST_F(ImmutableTest, IntMakeImmutable) {
+ int x = 5;
+ Immutable<int> ix = MakeImmutable(&x);
+ EXPECT_EQ(5, ix.Get());
+ EXPECT_EQ(0, x);
+}
+
+template <typename T, typename ImmutableT>
+void RunTokenTest(const char* token, bool expect_copies) {
+ SCOPED_TRACE(token);
+ T t(token);
+ EXPECT_EQ(token, t.GetToken());
+ EXPECT_EQ(0, t.GetCopyCount());
+
+ ImmutableT immutable_t(&t);
+ EXPECT_EQ(token, immutable_t.Get().GetToken());
+ EXPECT_EQ(kEmptyToken, t.GetToken());
+ EXPECT_EQ(expect_copies, immutable_t.Get().GetCopyCount() > 0);
+ EXPECT_EQ(expect_copies, t.GetCopyCount() > 0);
+}
+
+TEST_F(ImmutableTest, Token) {
+ RunTokenTest<Token, Immutable<Token> >("Token", true /* expect_copies */);
+}
+
+TEST_F(ImmutableTest, TokenSwapMemFnByRef) {
+ RunTokenTest<Token, Immutable<Token, HasSwapMemFnByRef<Token> > >(
+ "TokenSwapMemFnByRef", false /* expect_copies */);
+}
+
+TEST_F(ImmutableTest, TokenSwapMemFnByPtr) {
+ RunTokenTest<Token, Immutable<Token, HasSwapMemFnByPtr<Token> > >(
+ "TokenSwapMemFnByPtr", false /* expect_copies */);
+}
+
+TEST_F(ImmutableTest, ADLToken) {
+ RunTokenTest<ADLToken, Immutable<ADLToken> >(
+ "ADLToken", false /* expect_copies */);
+}
+
+TEST_F(ImmutableTest, SpecializationToken) {
+ RunTokenTest<SpecializationToken, Immutable<SpecializationToken> >(
+ "SpecializationToken", false /* expect_copies */);
+}
+
+template <typename C, typename ImmutableC>
+void RunTokenContainerTest(const char* token) {
+ SCOPED_TRACE(token);
+ const Token tokens[] = { Token(), Token(token) };
+ const size_t token_count = arraysize(tokens);
+ C c(tokens, tokens + token_count);
+ const int copy_count = c.begin()->GetCopyCount();
+ EXPECT_GT(copy_count, 0);
+ for (typename C::const_iterator it = c.begin(); it != c.end(); ++it) {
+ EXPECT_EQ(copy_count, it->GetCopyCount());
+ }
+
+ // Make sure that making the container immutable doesn't incur any
+ // copies of the tokens.
+ ImmutableC immutable_c(&c);
+ EXPECT_TRUE(c.empty());
+ ASSERT_EQ(token_count, immutable_c.Get().size());
+ int i = 0;
+ for (typename C::const_iterator it = c.begin(); it != c.end(); ++it) {
+ EXPECT_EQ(tokens[i].GetToken(), it->GetToken());
+ EXPECT_EQ(copy_count, it->GetCopyCount());
+ ++i;
+ }
+}
+
+TEST_F(ImmutableTest, Vector) {
+ RunTokenContainerTest<std::vector<Token>, Immutable<std::vector<Token> > >(
+ "Vector");
+}
+
+TEST_F(ImmutableTest, VectorSwapMemFnByRef) {
+ RunTokenContainerTest<
+ std::vector<Token>,
+ Immutable<std::vector<Token>, HasSwapMemFnByRef<std::vector<Token> > > >(
+ "VectorSwapMemFnByRef");
+}
+
+TEST_F(ImmutableTest, Deque) {
+ RunTokenContainerTest<std::deque<Token>, Immutable<std::deque<Token> > >(
+ "Deque");
+}
+
+TEST_F(ImmutableTest, List) {
+ RunTokenContainerTest<std::list<Token>, Immutable<std::list<Token> > >(
+ "List");
+}
+
+TEST_F(ImmutableTest, Set) {
+ RunTokenContainerTest<std::set<Token>, Immutable<std::set<Token> > >(
+ "Set");
+}
+
+} // namespace
+} // namespace browser_sync
diff --git a/sync/util/logging.cc b/sync/util/logging.cc
new file mode 100644
index 0000000..3d2a269
--- /dev/null
+++ b/sync/util/logging.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/logging.h"
+
+#include "base/location.h"
+
+namespace browser_sync {
+
+bool VlogIsOnForLocation(const tracked_objects::Location& from_here,
+ int verbose_level) {
+ return (verbose_level <=
+ logging::GetVlogLevelHelper(
+ from_here.file_name(), ::strlen(from_here.file_name())));
+}
+
+} // namespace browser_sync
diff --git a/sync/util/logging.h b/sync/util/logging.h
new file mode 100644
index 0000000..45e901e44
--- /dev/null
+++ b/sync/util/logging.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_LOGGING_H_
+#define SYNC_UTIL_LOGGING_H_
+#pragma once
+
+#include "base/logging.h"
+
+// TODO(akalin): This probably belongs in base/ somewhere.
+
+namespace tracked_objects {
+class Location;
+} // namespace tracked_objects
+
+namespace browser_sync {
+
+bool VlogIsOnForLocation(const tracked_objects::Location& from_here,
+ int verbose_level);
+
+} // namespace browser_sync
+
+#define VLOG_LOC_STREAM(from_here, verbose_level) \
+ logging::LogMessage(from_here.file_name(), from_here.line_number(), \
+ -verbose_level).stream()
+
+#define DVLOG_LOC(from_here, verbose_level) \
+ LAZY_STREAM( \
+ VLOG_LOC_STREAM(from_here, verbose_level), \
+ ::logging::DEBUG_MODE && \
+ (VLOG_IS_ON(verbose_level) || \
+ ::browser_sync::VlogIsOnForLocation(from_here, verbose_level))) \
+
+#endif // SYNC_UTIL_LOGGING_H_
diff --git a/sync/util/nigori.cc b/sync/util/nigori.cc
new file mode 100644
index 0000000..168d8eb
--- /dev/null
+++ b/sync/util/nigori.cc
@@ -0,0 +1,256 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/nigori.h"
+
+#include <sstream>
+#include <vector>
+
+#include "base/base64.h"
+#include "base/logging.h"
+#include "base/rand_util.h"
+#include "base/string_util.h"
+#include "base/sys_byteorder.h"
+#include "crypto/encryptor.h"
+#include "crypto/hmac.h"
+
+using base::Base64Encode;
+using base::Base64Decode;
+using base::RandInt;
+using crypto::Encryptor;
+using crypto::HMAC;
+using crypto::SymmetricKey;
+
+namespace browser_sync {
+
+// NigoriStream simplifies the concatenation operation of the Nigori protocol.
+class NigoriStream {
+ public:
+ // Append the big-endian representation of the length of |value| with 32 bits,
+ // followed by |value| itself to the stream.
+ NigoriStream& operator<<(const std::string& value) {
+ uint32 size = htonl(value.size());
+ stream_.write((char *) &size, sizeof(uint32));
+ stream_ << value;
+ return *this;
+ }
+
+ // Append the big-endian representation of the length of |type| with 32 bits,
+ // followed by the big-endian representation of the value of |type|, with 32
+ // bits, to the stream.
+ NigoriStream& operator<<(const Nigori::Type type) {
+ uint32 size = htonl(sizeof(uint32));
+ stream_.write((char *) &size, sizeof(uint32));
+ uint32 value = htonl(type);
+ stream_.write((char *) &value, sizeof(uint32));
+ return *this;
+ }
+
+ std::string str() {
+ return stream_.str();
+ }
+
+ private:
+ std::ostringstream stream_;
+};
+
+// static
+const char Nigori::kSaltSalt[] = "saltsalt";
+
+Nigori::Nigori() {
+}
+
+Nigori::~Nigori() {
+}
+
+bool Nigori::InitByDerivation(const std::string& hostname,
+ const std::string& username,
+ const std::string& password) {
+ NigoriStream salt_password;
+ salt_password << username << hostname;
+
+ // Suser = PBKDF2(Username || Servername, "saltsalt", Nsalt, 8)
+ scoped_ptr<SymmetricKey> user_salt(SymmetricKey::DeriveKeyFromPassword(
+ SymmetricKey::HMAC_SHA1, salt_password.str(),
+ kSaltSalt,
+ kSaltIterations,
+ kSaltKeySizeInBits));
+ DCHECK(user_salt.get());
+
+ std::string raw_user_salt;
+ if (!user_salt->GetRawKey(&raw_user_salt))
+ return false;
+
+ // Kuser = PBKDF2(P, Suser, Nuser, 16)
+ user_key_.reset(SymmetricKey::DeriveKeyFromPassword(SymmetricKey::AES,
+ password, raw_user_salt, kUserIterations, kDerivedKeySizeInBits));
+ DCHECK(user_key_.get());
+
+ // Kenc = PBKDF2(P, Suser, Nenc, 16)
+ encryption_key_.reset(SymmetricKey::DeriveKeyFromPassword(SymmetricKey::AES,
+ password, raw_user_salt, kEncryptionIterations, kDerivedKeySizeInBits));
+ DCHECK(encryption_key_.get());
+
+ // Kmac = PBKDF2(P, Suser, Nmac, 16)
+ mac_key_.reset(SymmetricKey::DeriveKeyFromPassword(
+ SymmetricKey::HMAC_SHA1, password, raw_user_salt, kSigningIterations,
+ kDerivedKeySizeInBits));
+ DCHECK(mac_key_.get());
+
+ return user_key_.get() && encryption_key_.get() && mac_key_.get();
+}
+
+bool Nigori::InitByImport(const std::string& user_key,
+ const std::string& encryption_key,
+ const std::string& mac_key) {
+ user_key_.reset(SymmetricKey::Import(SymmetricKey::AES, user_key));
+ DCHECK(user_key_.get());
+
+ encryption_key_.reset(SymmetricKey::Import(SymmetricKey::AES,
+ encryption_key));
+ DCHECK(encryption_key_.get());
+
+ mac_key_.reset(SymmetricKey::Import(SymmetricKey::HMAC_SHA1, mac_key));
+ DCHECK(mac_key_.get());
+
+ return user_key_.get() && encryption_key_.get() && mac_key_.get();
+}
+
+// Permute[Kenc,Kmac](type || name)
+bool Nigori::Permute(Type type, const std::string& name,
+ std::string* permuted) const {
+ DCHECK_LT(0U, name.size());
+
+ NigoriStream plaintext;
+ plaintext << type << name;
+
+ Encryptor encryptor;
+ if (!encryptor.Init(encryption_key_.get(), Encryptor::CBC,
+ std::string(kIvSize, 0)))
+ return false;
+
+ std::string ciphertext;
+ if (!encryptor.Encrypt(plaintext.str(), &ciphertext))
+ return false;
+
+ std::string raw_mac_key;
+ if (!mac_key_->GetRawKey(&raw_mac_key))
+ return false;
+
+ HMAC hmac(HMAC::SHA256);
+ if (!hmac.Init(raw_mac_key))
+ return false;
+
+ std::vector<unsigned char> hash(kHashSize);
+ if (!hmac.Sign(ciphertext, &hash[0], hash.size()))
+ return false;
+
+ std::string output;
+ output.assign(ciphertext);
+ output.append(hash.begin(), hash.end());
+
+ return Base64Encode(output, permuted);
+}
+
+std::string GenerateRandomString(size_t size) {
+ // TODO(albertb): Use a secure random function.
+ std::string random(size, 0);
+ for (size_t i = 0; i < size; ++i)
+ random[i] = RandInt(0, 0xff);
+ return random;
+}
+
+// Enc[Kenc,Kmac](value)
+bool Nigori::Encrypt(const std::string& value, std::string* encrypted) const {
+ if (0U >= value.size())
+ return false;
+
+ std::string iv = GenerateRandomString(kIvSize);
+
+ Encryptor encryptor;
+ if (!encryptor.Init(encryption_key_.get(), Encryptor::CBC, iv))
+ return false;
+
+ std::string ciphertext;
+ if (!encryptor.Encrypt(value, &ciphertext))
+ return false;
+
+ std::string raw_mac_key;
+ if (!mac_key_->GetRawKey(&raw_mac_key))
+ return false;
+
+ HMAC hmac(HMAC::SHA256);
+ if (!hmac.Init(raw_mac_key))
+ return false;
+
+ std::vector<unsigned char> hash(kHashSize);
+ if (!hmac.Sign(ciphertext, &hash[0], hash.size()))
+ return false;
+
+ std::string output;
+ output.assign(iv);
+ output.append(ciphertext);
+ output.append(hash.begin(), hash.end());
+
+ return Base64Encode(output, encrypted);
+}
+
+bool Nigori::Decrypt(const std::string& encrypted, std::string* value) const {
+ std::string input;
+ if (!Base64Decode(encrypted, &input))
+ return false;
+
+ if (input.size() < kIvSize * 2 + kHashSize)
+ return false;
+
+ // The input is:
+ // * iv (16 bytes)
+ // * ciphertext (multiple of 16 bytes)
+ // * hash (32 bytes)
+ std::string iv(input.substr(0, kIvSize));
+ std::string ciphertext(input.substr(kIvSize,
+ input.size() - (kIvSize + kHashSize)));
+ std::string hash(input.substr(input.size() - kHashSize, kHashSize));
+
+ std::string raw_mac_key;
+ if (!mac_key_->GetRawKey(&raw_mac_key))
+ return false;
+
+ HMAC hmac(HMAC::SHA256);
+ if (!hmac.Init(raw_mac_key))
+ return false;
+
+ std::vector<unsigned char> expected(kHashSize);
+ if (!hmac.Sign(ciphertext, &expected[0], expected.size()))
+ return false;
+
+ if (hash.compare(0, hash.size(),
+ reinterpret_cast<char *>(&expected[0]),
+ expected.size()))
+ return false;
+
+ Encryptor encryptor;
+ if (!encryptor.Init(encryption_key_.get(), Encryptor::CBC, iv))
+ return false;
+
+ std::string plaintext;
+ if (!encryptor.Decrypt(ciphertext, value))
+ return false;
+
+ return true;
+}
+
+bool Nigori::ExportKeys(std::string* user_key,
+ std::string* encryption_key,
+ std::string* mac_key) const {
+ DCHECK(user_key);
+ DCHECK(encryption_key);
+ DCHECK(mac_key);
+
+ return user_key_->GetRawKey(user_key) &&
+ encryption_key_->GetRawKey(encryption_key) &&
+ mac_key_->GetRawKey(mac_key);
+}
+
+} // namespace browser_sync
diff --git a/sync/util/nigori.h b/sync/util/nigori.h
new file mode 100644
index 0000000..eb7dc5d
--- /dev/null
+++ b/sync/util/nigori.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_NIGORI_H_
+#define SYNC_UTIL_NIGORI_H_
+#pragma once
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "crypto/symmetric_key.h"
+
+namespace browser_sync {
+
+// A (partial) implementation of Nigori, a protocol to securely store secrets in
+// the cloud. This implementation does not support server authentication or
+// assisted key derivation.
+//
+// To store secrets securely, use the |Permute| method to derive a lookup name
+// for your secret (basically a map key), and |Encrypt| and |Decrypt| to store
+// and retrieve the secret.
+//
+// TODO: Link to doc.
+class Nigori {
+ public:
+ enum Type {
+ Password = 1,
+ };
+
+ Nigori();
+ virtual ~Nigori();
+
+ // Initialize the client with the given |hostname|, |username| and |password|.
+ bool InitByDerivation(const std::string& hostname,
+ const std::string& username,
+ const std::string& password);
+
+ // Initialize the client by importing the given keys instead of deriving new
+ // ones.
+ bool InitByImport(const std::string& user_key,
+ const std::string& encryption_key,
+ const std::string& mac_key);
+
+ // Derives a secure lookup name from |type| and |name|. If |hostname|,
+ // |username| and |password| are kept constant, a given |type| and |name| pair
+ // always yields the same |permuted| value. Note that |permuted| will be
+ // Base64 encoded.
+ bool Permute(Type type, const std::string& name, std::string* permuted) const;
+
+ // Encrypts |value|. Note that on success, |encrypted| will be Base64
+ // encoded.
+ bool Encrypt(const std::string& value, std::string* encrypted) const;
+
+ // Decrypts |value| into |decrypted|. It is assumed that |value| is Base64
+ // encoded.
+ bool Decrypt(const std::string& value, std::string* decrypted) const;
+
+ // Exports the raw derived keys.
+ bool ExportKeys(std::string* user_key,
+ std::string* encryption_key,
+ std::string* mac_key) const;
+
+ static const char kSaltSalt[]; // The salt used to derive the user salt.
+ static const size_t kSaltKeySizeInBits = 128;
+ static const size_t kDerivedKeySizeInBits = 128;
+ static const size_t kIvSize = 16;
+ static const size_t kHashSize = 32;
+
+ static const size_t kSaltIterations = 1001;
+ static const size_t kUserIterations = 1002;
+ static const size_t kEncryptionIterations = 1003;
+ static const size_t kSigningIterations = 1004;
+
+ private:
+ scoped_ptr<crypto::SymmetricKey> user_key_;
+ scoped_ptr<crypto::SymmetricKey> encryption_key_;
+ scoped_ptr<crypto::SymmetricKey> mac_key_;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_NIGORI_H_
diff --git a/sync/util/nigori_unittest.cc b/sync/util/nigori_unittest.cc
new file mode 100644
index 0000000..41f8ceb
--- /dev/null
+++ b/sync/util/nigori_unittest.cc
@@ -0,0 +1,170 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/nigori.h"
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/string_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+namespace {
+
+TEST(SyncNigoriTest, Permute) {
+ Nigori nigori;
+ EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
+
+ std::string permuted;
+ EXPECT_TRUE(nigori.Permute(Nigori::Password, "test name",
+ &permuted));
+
+ std::string expected =
+ "prewwdJj2PrGDczvmsHJEE5ndcCyVze8sY9kD5hjY/Tm"
+ "c5kOjXFK7zB3Ss4LlHjEDirMu+vh85JwHOnGrMVe+g==";
+ EXPECT_EQ(expected, permuted);
+}
+
+TEST(SyncNigoriTest, PermuteIsConstant) {
+ Nigori nigori1;
+ EXPECT_TRUE(nigori1.InitByDerivation("example.com", "username", "password"));
+
+ std::string permuted1;
+ EXPECT_TRUE(nigori1.Permute(Nigori::Password,
+ "name",
+ &permuted1));
+
+ Nigori nigori2;
+ EXPECT_TRUE(nigori2.InitByDerivation("example.com", "username", "password"));
+
+ std::string permuted2;
+ EXPECT_TRUE(nigori2.Permute(Nigori::Password,
+ "name",
+ &permuted2));
+
+ EXPECT_LT(0U, permuted1.size());
+ EXPECT_EQ(permuted1, permuted2);
+}
+
+TEST(SyncNigoriTest, EncryptDifferentIv) {
+ Nigori nigori;
+ EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
+
+ std::string plaintext("value");
+
+ std::string encrypted1;
+ EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted1));
+
+ std::string encrypted2;
+ EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted2));
+
+ EXPECT_NE(encrypted1, encrypted2);
+}
+
+TEST(SyncNigoriTest, Decrypt) {
+ Nigori nigori;
+ EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
+
+ std::string encrypted =
+ "e7+JyS6ibj6F5qqvpseukNRTZ+oBpu5iuv2VYjOfrH1dNiFLNf7Ov0"
+ "kx/zicKFn0lJcbG1UmkNWqIuR4x+quDNVuLaZGbrJPhrJuj7cokCM=";
+
+ std::string plaintext;
+ EXPECT_TRUE(nigori.Decrypt(encrypted, &plaintext));
+
+ std::string expected("test, test, 1, 2, 3");
+ EXPECT_EQ(expected, plaintext);
+}
+
+TEST(SyncNigoriTest, EncryptDecrypt) {
+ Nigori nigori;
+ EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
+
+ std::string plaintext("value");
+
+ std::string encrypted;
+ EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted));
+
+ std::string decrypted;
+ EXPECT_TRUE(nigori.Decrypt(encrypted, &decrypted));
+
+ EXPECT_EQ(plaintext, decrypted);
+}
+
+TEST(SyncNigoriTest, CorruptedIv) {
+ Nigori nigori;
+ EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
+
+ std::string plaintext("test");
+
+ std::string encrypted;
+ EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted));
+
+ // Corrupt the IV by changing one of its byte.
+ encrypted[0] = (encrypted[0] == 'a' ? 'b' : 'a');
+
+ std::string decrypted;
+ EXPECT_TRUE(nigori.Decrypt(encrypted, &decrypted));
+
+ EXPECT_NE(plaintext, decrypted);
+}
+
+TEST(SyncNigoriTest, CorruptedCiphertext) {
+ Nigori nigori;
+ EXPECT_TRUE(nigori.InitByDerivation("example.com", "username", "password"));
+
+ std::string plaintext("test");
+
+ std::string encrypted;
+ EXPECT_TRUE(nigori.Encrypt(plaintext, &encrypted));
+
+ // Corrput the ciphertext by changing one of its bytes.
+ encrypted[Nigori::kIvSize + 10] =
+ (encrypted[Nigori::kIvSize + 10] == 'a' ? 'b' : 'a');
+
+ std::string decrypted;
+ EXPECT_FALSE(nigori.Decrypt(encrypted, &decrypted));
+
+ EXPECT_NE(plaintext, decrypted);
+}
+
+// Crashes, Bug 55180.
+#if defined(OS_WIN)
+#define MAYBE_ExportImport DISABLED_ExportImport
+#else
+#define MAYBE_ExportImport ExportImport
+#endif
+TEST(SyncNigoriTest, MAYBE_ExportImport) {
+ Nigori nigori1;
+ EXPECT_TRUE(nigori1.InitByDerivation("example.com", "username", "password"));
+
+ std::string user_key;
+ std::string encryption_key;
+ std::string mac_key;
+ EXPECT_TRUE(nigori1.ExportKeys(&user_key, &encryption_key, &mac_key));
+
+ Nigori nigori2;
+ EXPECT_TRUE(nigori2.InitByImport(user_key, encryption_key, mac_key));
+
+ std::string original("test");
+ std::string plaintext;
+ std::string ciphertext;
+
+ EXPECT_TRUE(nigori1.Encrypt(original, &ciphertext));
+ EXPECT_TRUE(nigori2.Decrypt(ciphertext, &plaintext));
+ EXPECT_EQ(original, plaintext);
+
+ EXPECT_TRUE(nigori2.Encrypt(original, &ciphertext));
+ EXPECT_TRUE(nigori1.Decrypt(ciphertext, &plaintext));
+ EXPECT_EQ(original, plaintext);
+
+ std::string permuted1, permuted2;
+ EXPECT_TRUE(nigori1.Permute(Nigori::Password, original, &permuted1));
+ EXPECT_TRUE(nigori2.Permute(Nigori::Password, original, &permuted2));
+ EXPECT_EQ(permuted1, permuted2);
+}
+
+} // anonymous namespace
+} // namespace browser_sync
diff --git a/sync/util/protobuf_unittest.cc b/sync/util/protobuf_unittest.cc
new file mode 100644
index 0000000..4f654d5
--- /dev/null
+++ b/sync/util/protobuf_unittest.cc
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+#include <vector>
+
+#include "sync/protocol/test.pb.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+TEST(SyncProtobufTest, TestUnknownFields) {
+ // This tests ensures that we retain unknown fields in protocol buffers by
+ // serialising UnknownFieldsTestB, which is a superset of UnknownFieldsTestA,
+ // and checking we get back to the same message after parsing/serialising via
+ // UnknownFieldsTestA.
+ sync_pb::UnknownFieldsTestA a;
+ sync_pb::UnknownFieldsTestB b;
+ sync_pb::UnknownFieldsTestB b2;
+
+ b.set_foo(true);
+ b.set_bar(true);
+ std::string serialized;
+ ASSERT_TRUE(b.SerializeToString(&serialized));
+ ASSERT_TRUE(a.ParseFromString(serialized));
+ ASSERT_TRUE(a.foo());
+ std::string serialized2;
+ ASSERT_TRUE(a.SerializeToString(&serialized2));
+ ASSERT_TRUE(b2.ParseFromString(serialized2));
+ ASSERT_TRUE(b2.foo());
+ ASSERT_TRUE(b2.bar());
+}
+
+} // namespace
diff --git a/sync/util/report_unrecoverable_error_function.h b/sync/util/report_unrecoverable_error_function.h
new file mode 100644
index 0000000..ead73f0
--- /dev/null
+++ b/sync/util/report_unrecoverable_error_function.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_REPORT_UNRECOVERABLE_ERROR_FUNCTION_H_
+#define SYNC_UTIL_REPORT_UNRECOVERABLE_ERROR_FUNCTION_H_
+#pragma once
+
+namespace browser_sync {
+
+// A ReportUnrecoverableErrorFunction is a function that is called
+// immediately when an unrecoverable error is encountered. Unlike
+// UnrecoverableErrorHandler, it should just log the error and any
+// context surrounding it.
+typedef void (*ReportUnrecoverableErrorFunction)(void);
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_REPORT_UNRECOVERABLE_ERROR_FUNCTION_H_
diff --git a/sync/util/syncer_error.cc b/sync/util/syncer_error.cc
new file mode 100644
index 0000000..5f836ce
--- /dev/null
+++ b/sync/util/syncer_error.cc
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/syncer_error.h"
+
+#include "base/logging.h"
+
+namespace browser_sync {
+
+#define ENUM_CASE(x) case x: return #x; break;
+const char* GetSyncerErrorString(SyncerError value) {
+ switch (value) {
+ ENUM_CASE(UNSET);
+ ENUM_CASE(DIRECTORY_LOOKUP_FAILED);
+ ENUM_CASE(NETWORK_CONNECTION_UNAVAILABLE);
+ ENUM_CASE(NETWORK_IO_ERROR);
+ ENUM_CASE(SYNC_SERVER_ERROR);
+ ENUM_CASE(SYNC_AUTH_ERROR);
+ ENUM_CASE(SERVER_RETURN_INVALID_CREDENTIAL);
+ ENUM_CASE(SERVER_RETURN_UNKNOWN_ERROR);
+ ENUM_CASE(SERVER_RETURN_THROTTLED);
+ ENUM_CASE(SERVER_RETURN_TRANSIENT_ERROR);
+ ENUM_CASE(SERVER_RETURN_MIGRATION_DONE);
+ ENUM_CASE(SERVER_RETURN_CLEAR_PENDING);
+ ENUM_CASE(SERVER_RETURN_NOT_MY_BIRTHDAY);
+ ENUM_CASE(SERVER_RESPONSE_VALIDATION_FAILED);
+ ENUM_CASE(SYNCER_OK);
+ }
+ NOTREACHED();
+ return "INVALID";
+}
+#undef ENUM_CASE
+
+} // namespace browser_sync
+
diff --git a/sync/util/syncer_error.h b/sync/util/syncer_error.h
new file mode 100644
index 0000000..6613f59
--- /dev/null
+++ b/sync/util/syncer_error.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_INTERNAL_API_INCLUDES_SYNCER_ERROR_H_
+#define SYNC_INTERNAL_API_INCLUDES_SYNCER_ERROR_H_
+#pragma once
+
+namespace browser_sync {
+
+// This enum describes all the ways a SyncerCommand can fail.
+//
+// SyncerCommands do many different things, but they share a common function
+// signature. This enum, the return value for all SyncerCommands, must be able
+// to describe any possible failure for all SyncerComand.
+//
+// For convenience, functions which are invoked only by SyncerCommands may also
+// return a SyncerError. It saves us having to write a conversion function, and
+// it makes refactoring easier.
+enum SyncerError {
+ UNSET = 0, // Default value.
+ DIRECTORY_LOOKUP_FAILED, // Local directory lookup failure.
+
+ NETWORK_CONNECTION_UNAVAILABLE, // Connectivity failure.
+ NETWORK_IO_ERROR, // Response buffer read error.
+ SYNC_SERVER_ERROR, // Non auth HTTP error.
+ SYNC_AUTH_ERROR, // HTTP auth error.
+
+ // Based on values returned by server. Most are defined in sync.proto.
+ SERVER_RETURN_INVALID_CREDENTIAL,
+ SERVER_RETURN_UNKNOWN_ERROR,
+ SERVER_RETURN_THROTTLED,
+ SERVER_RETURN_TRANSIENT_ERROR,
+ SERVER_RETURN_MIGRATION_DONE,
+ SERVER_RETURN_CLEAR_PENDING,
+ SERVER_RETURN_NOT_MY_BIRTHDAY,
+ SERVER_RESPONSE_VALIDATION_FAILED,
+
+ SYNCER_OK
+};
+
+const char * GetSyncerErrorString(SyncerError);
+
+} // namespace browser_sync
+
+#endif // SYNC_INTERNAL_API_INCLUDES_SYNCER_ERROR_H_
diff --git a/sync/util/test_unrecoverable_error_handler.cc b/sync/util/test_unrecoverable_error_handler.cc
new file mode 100644
index 0000000..f7df6d4
--- /dev/null
+++ b/sync/util/test_unrecoverable_error_handler.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/test_unrecoverable_error_handler.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+TestUnrecoverableErrorHandler::TestUnrecoverableErrorHandler() {}
+
+TestUnrecoverableErrorHandler::~TestUnrecoverableErrorHandler() {}
+
+void TestUnrecoverableErrorHandler::OnUnrecoverableError(
+ const tracked_objects::Location& from_here,
+ const std::string& message) {
+ ADD_FAILURE_AT(from_here.file_name(), from_here.line_number())
+ << from_here.function_name() << ": " << message;
+}
+
+} // namespace browser_sync
+
diff --git a/sync/util/test_unrecoverable_error_handler.h b/sync/util/test_unrecoverable_error_handler.h
new file mode 100644
index 0000000..408d806
--- /dev/null
+++ b/sync/util/test_unrecoverable_error_handler.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_INTERNAL_API_INCLUDES_TEST_UNRECOVERABLE_ERROR_HANDLER_H_
+#define SYNC_INTERNAL_API_INCLUDES_TEST_UNRECOVERABLE_ERROR_HANDLER_H_
+#pragma once
+
+#include "base/compiler_specific.h"
+#include "sync/util/unrecoverable_error_handler.h"
+
+namespace browser_sync {
+
+// Implementation of UnrecoverableErrorHandler that simply adds a
+// gtest failure.
+class TestUnrecoverableErrorHandler : public UnrecoverableErrorHandler {
+ public:
+ TestUnrecoverableErrorHandler();
+ virtual ~TestUnrecoverableErrorHandler();
+
+ virtual void OnUnrecoverableError(const tracked_objects::Location& from_here,
+ const std::string& message) OVERRIDE;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_INTERNAL_API_INCLUDES_TEST_UNRECOVERABLE_ERROR_HANDLER_H_
+
diff --git a/sync/util/time.cc b/sync/util/time.cc
new file mode 100644
index 0000000..ea0f367
--- /dev/null
+++ b/sync/util/time.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/time.h"
+
+#include "base/i18n/time_formatting.h"
+#include "base/utf_string_conversions.h"
+
+namespace browser_sync {
+
+int64 TimeToProtoTime(const base::Time& t) {
+ return (t - base::Time::UnixEpoch()).InMilliseconds();
+}
+
+base::Time ProtoTimeToTime(int64 proto_t) {
+ return base::Time::UnixEpoch() + base::TimeDelta::FromMilliseconds(proto_t);
+}
+
+std::string GetTimeDebugString(const base::Time& t) {
+ return UTF16ToUTF8(base::TimeFormatFriendlyDateAndTime(t));
+}
+
+} // namespace browser_sync
diff --git a/sync/util/time.h b/sync/util/time.h
new file mode 100644
index 0000000..fc65efa
--- /dev/null
+++ b/sync/util/time.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Time-related sync functions.
+
+#ifndef SYNC_UTIL_TIME_H_
+#define SYNC_UTIL_TIME_H_
+#pragma once
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/time.h"
+
+namespace browser_sync {
+
+// Converts a time object to the format used in sync protobufs (ms
+// since the Unix epoch).
+int64 TimeToProtoTime(const base::Time& t);
+
+// Converts a time field from sync protobufs to a time object.
+base::Time ProtoTimeToTime(int64 proto_t);
+
+std::string GetTimeDebugString(const base::Time& t);
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_TIME_H_
diff --git a/sync/util/unrecoverable_error_handler.h b/sync/util/unrecoverable_error_handler.h
new file mode 100644
index 0000000..aaca1e9
--- /dev/null
+++ b/sync/util/unrecoverable_error_handler.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_UNRECOVERABLE_ERROR_HANDLER_H_
+#define SYNC_UTIL_UNRECOVERABLE_ERROR_HANDLER_H_
+#pragma once
+
+#include <string>
+
+#include "base/location.h"
+
+namespace browser_sync {
+
+class UnrecoverableErrorHandler {
+ public:
+ // Call this when normal operation detects that the chrome model and the
+ // syncer model are inconsistent, or similar. The ProfileSyncService will
+ // try to avoid doing any work to avoid crashing or corrupting things
+ // further, and will report an error status if queried.
+ virtual void OnUnrecoverableError(const tracked_objects::Location& from_here,
+ const std::string& message) = 0;
+ protected:
+ virtual ~UnrecoverableErrorHandler() { }
+};
+
+}
+
+#endif // SYNC_UTIL_UNRECOVERABLE_ERROR_HANDLER_H_
+
diff --git a/sync/util/unrecoverable_error_info.cc b/sync/util/unrecoverable_error_info.cc
new file mode 100644
index 0000000..617f1a5
--- /dev/null
+++ b/sync/util/unrecoverable_error_info.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/unrecoverable_error_info.h"
+
+namespace browser_sync {
+
+UnrecoverableErrorInfo::UnrecoverableErrorInfo()
+ : is_set_(false) {
+}
+
+UnrecoverableErrorInfo::UnrecoverableErrorInfo(
+ const tracked_objects::Location& location,
+ const std::string& message)
+ : location_(location),
+ message_(message),
+ is_set_(true) {
+}
+
+UnrecoverableErrorInfo::~UnrecoverableErrorInfo() {
+}
+
+void UnrecoverableErrorInfo::Reset(
+ const tracked_objects::Location& location,
+ const std::string& message) {
+ location_ = location;
+ message_ = message;
+ is_set_ = true;
+}
+
+bool UnrecoverableErrorInfo::IsSet() const {
+ return is_set_;
+}
+
+const tracked_objects::Location& UnrecoverableErrorInfo::location() const {
+ return location_;
+}
+
+const std::string& UnrecoverableErrorInfo::message() const {
+ return message_;
+}
+
+} // namespace browser_sync
diff --git a/sync/util/unrecoverable_error_info.h b/sync/util/unrecoverable_error_info.h
new file mode 100644
index 0000000..64b780a
--- /dev/null
+++ b/sync/util/unrecoverable_error_info.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_UTIL_UNRECOVERABLE_ERROR_INFO_H_
+#define SYNC_UTIL_UNRECOVERABLE_ERROR_INFO_H_
+// TODO(lipalani): Figure out the right location for this class so it is
+// accessible outside of sync engine as well.
+#pragma once
+
+#include <string>
+
+#include "base/location.h"
+
+namespace browser_sync {
+
+class UnrecoverableErrorInfo {
+ public:
+ UnrecoverableErrorInfo();
+ UnrecoverableErrorInfo(
+ const tracked_objects::Location& location,
+ const std::string& message);
+ ~UnrecoverableErrorInfo();
+
+ void Reset(const tracked_objects::Location& location,
+ const std::string& message);
+
+ bool IsSet() const;
+
+ const tracked_objects::Location& location() const;
+ const std::string& message() const;
+
+ private:
+ tracked_objects::Location location_;
+ std::string message_;
+ bool is_set_;
+};
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_UNRECOVERABLE_ERROR_INFO_H_
diff --git a/sync/util/weak_handle.cc b/sync/util/weak_handle.cc
new file mode 100644
index 0000000..bdf580b7
--- /dev/null
+++ b/sync/util/weak_handle.cc
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/weak_handle.h"
+
+#include <sstream>
+
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/message_loop_proxy.h"
+
+namespace browser_sync {
+
+namespace internal {
+
+WeakHandleCoreBase::WeakHandleCoreBase()
+ : owner_loop_proxy_(base::MessageLoopProxy::current()) {}
+
+bool WeakHandleCoreBase::IsOnOwnerThread() const {
+ return owner_loop_proxy_->BelongsToCurrentThread();
+}
+
+WeakHandleCoreBase::~WeakHandleCoreBase() {}
+
+void WeakHandleCoreBase::PostToOwnerThread(
+ const tracked_objects::Location& from_here,
+ const base::Closure& fn) const {
+ if (!owner_loop_proxy_->PostTask(from_here, fn)) {
+ DVLOG(1) << "Could not post task from " << from_here.ToString();
+ }
+}
+
+} // namespace internal
+
+} // namespace base
diff --git a/sync/util/weak_handle.h b/sync/util/weak_handle.h
new file mode 100644
index 0000000..653da60
--- /dev/null
+++ b/sync/util/weak_handle.h
@@ -0,0 +1,379 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Weak handles provides a way to refer to weak pointers from another
+// thread. This is useful because it is not safe to reference a weak
+// pointer from a thread other than the thread on which it was
+// created.
+//
+// Weak handles can be passed across threads, so for example, you can
+// use them to do the "real" work on one thread and get notified on
+// another thread:
+//
+// class FooIOWorker {
+// public:
+// FooIOWorker(const WeakHandle<Foo>& foo) : foo_(foo) {}
+//
+// void OnIOStart() {
+// foo_.Call(FROM_HERE, &Foo::OnIOStart);
+// }
+//
+// void OnIOEvent(IOEvent e) {
+// foo_.Call(FROM_HERE, &Foo::OnIOEvent, e);
+// }
+//
+// void OnIOError(IOError err) {
+// foo_.Call(FROM_HERE, &Foo::OnIOError, err);
+// }
+//
+// private:
+// const WeakHandle<Foo> foo_;
+// };
+//
+// class Foo : public SupportsWeakPtr<Foo>, public NonThreadSafe {
+// public:
+// Foo() {
+// SpawnFooIOWorkerOnIOThread(base::MakeWeakHandle(AsWeakPtr()));
+// }
+//
+// /* Will always be called on the correct thread, and only if this
+// object hasn't been destroyed. */
+// void OnIOStart() { DCHECK(CalledOnValidThread(); ... }
+// void OnIOEvent(IOEvent e) { DCHECK(CalledOnValidThread(); ... }
+// void OnIOError(IOError err) { DCHECK(CalledOnValidThread(); ... }
+// };
+
+#ifndef SYNC_UTIL_WEAK_HANDLE_H_
+#define SYNC_UTIL_WEAK_HANDLE_H_
+#pragma once
+
+#include <cstddef>
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+class MessageLoopProxy;
+} // namespace base
+
+namespace tracked_objects {
+class Location;
+} // namespace tracked_objects
+
+namespace browser_sync {
+
+template <typename T> class WeakHandle;
+
+namespace internal {
+// These classes are part of the WeakHandle implementation. DO NOT
+// USE THESE CLASSES DIRECTLY YOURSELF.
+
+// Adapted from base/callback_internal.h.
+
+template <typename T>
+struct ParamTraits {
+ typedef const T& ForwardType;
+};
+
+template <typename T>
+struct ParamTraits<T&> {
+ typedef T& ForwardType;
+};
+
+template <typename T, size_t n>
+struct ParamTraits<T[n]> {
+ typedef const T* ForwardType;
+};
+
+template <typename T>
+struct ParamTraits<T[]> {
+ typedef const T* ForwardType;
+};
+
+// Base class for WeakHandleCore<T> to avoid template bloat. Handles
+// the interaction with the owner thread and its message loop.
+class WeakHandleCoreBase {
+ public:
+ // Assumes the current thread is the owner thread.
+ WeakHandleCoreBase();
+
+ // May be called on any thread.
+ bool IsOnOwnerThread() const;
+
+ protected:
+ // May be destroyed on any thread.
+ ~WeakHandleCoreBase();
+
+ // May be called on any thread.
+ void PostToOwnerThread(const tracked_objects::Location& from_here,
+ const base::Closure& fn) const;
+
+ private:
+ // May be used on any thread.
+ const scoped_refptr<base::MessageLoopProxy> owner_loop_proxy_;
+
+ DISALLOW_COPY_AND_ASSIGN(WeakHandleCoreBase);
+};
+
+// WeakHandleCore<T> contains all the logic for WeakHandle<T>.
+template <typename T>
+class WeakHandleCore
+ : public WeakHandleCoreBase,
+ public base::RefCountedThreadSafe<WeakHandleCore<T> > {
+ public:
+ // Must be called on |ptr|'s owner thread, which is assumed to be
+ // the current thread.
+ explicit WeakHandleCore(const base::WeakPtr<T>& ptr) : ptr_(ptr) {}
+
+ // Must be called on |ptr_|'s owner thread.
+ base::WeakPtr<T> Get() const {
+ CHECK(IsOnOwnerThread());
+ return ptr_;
+ }
+
+ // Call(...) may be called on any thread, but all its arguments
+ // should be safe to be bound and copied across threads.
+
+ template <typename U>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(void)) const {
+ PostToOwnerThread(
+ from_here,
+ Bind(&WeakHandleCore::template DoCall0<U>, this, fn));
+ }
+
+ template <typename U, typename A1>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(A1),
+ typename ParamTraits<A1>::ForwardType a1) const {
+ PostToOwnerThread(
+ from_here,
+ Bind(&WeakHandleCore::template DoCall1<U, A1>,
+ this, fn, a1));
+ }
+
+ template <typename U, typename A1, typename A2>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(A1, A2),
+ typename ParamTraits<A1>::ForwardType a1,
+ typename ParamTraits<A2>::ForwardType a2) const {
+ PostToOwnerThread(
+ from_here,
+ Bind(&WeakHandleCore::template DoCall2<U, A1, A2>,
+ this, fn, a1, a2));
+ }
+
+ template <typename U, typename A1, typename A2, typename A3>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(A1, A2, A3),
+ typename ParamTraits<A1>::ForwardType a1,
+ typename ParamTraits<A2>::ForwardType a2,
+ typename ParamTraits<A3>::ForwardType a3) const {
+ PostToOwnerThread(
+ from_here,
+ Bind(&WeakHandleCore::template DoCall3<U, A1, A2, A3>,
+ this, fn, a1, a2, a3));
+ }
+
+ template <typename U, typename A1, typename A2, typename A3, typename A4>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(A1, A2, A3, A4),
+ typename ParamTraits<A1>::ForwardType a1,
+ typename ParamTraits<A2>::ForwardType a2,
+ typename ParamTraits<A3>::ForwardType a3,
+ typename ParamTraits<A4>::ForwardType a4) const {
+ PostToOwnerThread(
+ from_here,
+ Bind(&WeakHandleCore::template DoCall4<U, A1, A2, A3, A4>,
+ this, fn, a1, a2, a3, a4));
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<WeakHandleCore<T> >;
+
+ // May be destroyed on any thread.
+ ~WeakHandleCore() {}
+
+ // GCC 4.2.1 on OS X gets confused if all the DoCall functions are
+ // named the same, so we distinguish them.
+
+ template <typename U>
+ void DoCall0(void (U::*fn)(void)) const {
+ CHECK(IsOnOwnerThread());
+ if (!Get()) {
+ return;
+ }
+ (Get()->*fn)();
+ }
+
+ template <typename U, typename A1>
+ void DoCall1(void (U::*fn)(A1),
+ typename ParamTraits<A1>::ForwardType a1) const {
+ CHECK(IsOnOwnerThread());
+ if (!Get()) {
+ return;
+ }
+ (Get()->*fn)(a1);
+ }
+
+ template <typename U, typename A1, typename A2>
+ void DoCall2(void (U::*fn)(A1, A2),
+ typename ParamTraits<A1>::ForwardType a1,
+ typename ParamTraits<A2>::ForwardType a2) const {
+ CHECK(IsOnOwnerThread());
+ if (!Get()) {
+ return;
+ }
+ (Get()->*fn)(a1, a2);
+ }
+
+ template <typename U, typename A1, typename A2, typename A3>
+ void DoCall3(void (U::*fn)(A1, A2, A3),
+ typename ParamTraits<A1>::ForwardType a1,
+ typename ParamTraits<A2>::ForwardType a2,
+ typename ParamTraits<A3>::ForwardType a3) const {
+ CHECK(IsOnOwnerThread());
+ if (!Get()) {
+ return;
+ }
+ (Get()->*fn)(a1, a2, a3);
+ }
+
+ template <typename U, typename A1, typename A2, typename A3, typename A4>
+ void DoCall4(void (U::*fn)(A1, A2, A3, A4),
+ typename ParamTraits<A1>::ForwardType a1,
+ typename ParamTraits<A2>::ForwardType a2,
+ typename ParamTraits<A3>::ForwardType a3,
+ typename ParamTraits<A4>::ForwardType a4) const {
+ CHECK(IsOnOwnerThread());
+ if (!Get()) {
+ return;
+ }
+ (Get()->*fn)(a1, a2, a3, a4);
+ }
+
+ // Must be dereferenced only on the owner thread. May be destroyed
+ // from any thread.
+ base::WeakPtr<T> ptr_;
+
+ DISALLOW_COPY_AND_ASSIGN(WeakHandleCore);
+};
+
+} // namespace internal
+
+// May be destroyed on any thread.
+// Copying and assignment are welcome.
+template <typename T>
+class WeakHandle {
+ public:
+ // Creates an uninitialized WeakHandle.
+ WeakHandle() {}
+
+ // Creates an initialized WeakHandle from |ptr|.
+ explicit WeakHandle(const base::WeakPtr<T>& ptr)
+ : core_(new internal::WeakHandleCore<T>(ptr)) {}
+
+ // Allow conversion from WeakHandle<U> to WeakHandle<T> if U is
+ // convertible to T, but we *must* be on |other|'s owner thread.
+ // Note that this doesn't override the regular copy constructor, so
+ // that one can be called on any thread.
+ template <typename U>
+ WeakHandle(const browser_sync::WeakHandle<U>& other) // NOLINT
+ : core_(
+ other.IsInitialized() ?
+ new internal::WeakHandleCore<T>(other.Get()) :
+ NULL) {}
+
+ // Returns true iff this WeakHandle is initialized. Note that being
+ // initialized isn't a guarantee that the underlying object is still
+ // alive.
+ bool IsInitialized() const {
+ return core_.get() != NULL;
+ }
+
+ // Resets to an uninitialized WeakHandle.
+ void Reset() {
+ core_ = NULL;
+ }
+
+ // Must be called only on the underlying object's owner thread.
+ base::WeakPtr<T> Get() const {
+ CHECK(IsInitialized());
+ CHECK(core_->IsOnOwnerThread());
+ return core_->Get();
+ }
+
+ // Call(...) may be called on any thread, but all its arguments
+ // should be safe to be bound and copied across threads.
+
+ template <typename U>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(void)) const {
+ CHECK(IsInitialized());
+ core_->Call(from_here, fn);
+ }
+
+ template <typename U, typename A1>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(A1),
+ typename internal::ParamTraits<A1>::ForwardType a1) const {
+ CHECK(IsInitialized());
+ core_->Call(from_here, fn, a1);
+ }
+
+ template <typename U, typename A1, typename A2>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(A1, A2),
+ typename internal::ParamTraits<A1>::ForwardType a1,
+ typename internal::ParamTraits<A2>::ForwardType a2) const {
+ CHECK(IsInitialized());
+ core_->Call(from_here, fn, a1, a2);
+ }
+
+ template <typename U, typename A1, typename A2, typename A3>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(A1, A2, A3),
+ typename internal::ParamTraits<A1>::ForwardType a1,
+ typename internal::ParamTraits<A2>::ForwardType a2,
+ typename internal::ParamTraits<A3>::ForwardType a3) const {
+ CHECK(IsInitialized());
+ core_->Call(from_here, fn, a1, a2, a3);
+ }
+
+ template <typename U, typename A1, typename A2, typename A3, typename A4>
+ void Call(const tracked_objects::Location& from_here,
+ void (U::*fn)(A1, A2, A3, A4),
+ typename internal::ParamTraits<A1>::ForwardType a1,
+ typename internal::ParamTraits<A2>::ForwardType a2,
+ typename internal::ParamTraits<A3>::ForwardType a3,
+ typename internal::ParamTraits<A4>::ForwardType a4) const {
+ CHECK(IsInitialized());
+ core_->Call(from_here, fn, a1, a2, a3, a4);
+ }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(WeakHandleTest,
+ TypeConversionConstructor);
+ FRIEND_TEST_ALL_PREFIXES(WeakHandleTest,
+ TypeConversionConstructorAssignment);
+
+ scoped_refptr<internal::WeakHandleCore<T> > core_;
+};
+
+// Makes a WeakHandle from a WeakPtr.
+template <typename T>
+WeakHandle<T> MakeWeakHandle(const base::WeakPtr<T>& ptr) {
+ return WeakHandle<T>(ptr);
+}
+
+} // namespace browser_sync
+
+#endif // SYNC_UTIL_WEAK_HANDLE_H_
diff --git a/sync/util/weak_handle_unittest.cc b/sync/util/weak_handle_unittest.cc
new file mode 100644
index 0000000..6767df1
--- /dev/null
+++ b/sync/util/weak_handle_unittest.cc
@@ -0,0 +1,326 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/util/weak_handle.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop.h"
+#include "base/threading/thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+using ::testing::_;
+using ::testing::SaveArg;
+using ::testing::StrictMock;
+
+class Base {
+ public:
+ Base() : weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {}
+
+ WeakHandle<Base> AsWeakHandle() {
+ return MakeWeakHandle(weak_ptr_factory_.GetWeakPtr());
+ }
+
+ void Kill() {
+ weak_ptr_factory_.InvalidateWeakPtrs();
+ }
+
+ MOCK_METHOD0(Test, void());
+ MOCK_METHOD1(Test1, void(const int&));
+ MOCK_METHOD2(Test2, void(const int&, Base*));
+ MOCK_METHOD3(Test3, void(const int&, Base*, float));
+ MOCK_METHOD4(Test4, void(const int&, Base*, float, const char*));
+
+ MOCK_METHOD1(TestWithSelf, void(const WeakHandle<Base>&));
+
+ private:
+ base::WeakPtrFactory<Base> weak_ptr_factory_;
+};
+
+class Derived : public Base, public base::SupportsWeakPtr<Derived> {};
+
+class WeakHandleTest : public ::testing::Test {
+ protected:
+ virtual void TearDown() {
+ // Process any last-minute posted tasks.
+ PumpLoop();
+ }
+
+ void PumpLoop() {
+ message_loop_.RunAllPending();
+ }
+
+ static void CallTestFromOtherThread(tracked_objects::Location from_here,
+ const WeakHandle<Base>& h) {
+ base::Thread t("Test thread");
+ ASSERT_TRUE(t.Start());
+ t.message_loop()->PostTask(
+ from_here, base::Bind(&WeakHandleTest::CallTest, from_here, h));
+ }
+
+ private:
+ static void CallTest(tracked_objects::Location from_here,
+ const WeakHandle<Base>& h) {
+ h.Call(from_here, &Base::Test);
+ }
+
+ MessageLoop message_loop_;
+};
+
+TEST_F(WeakHandleTest, Uninitialized) {
+ // Default.
+ WeakHandle<int> h;
+ EXPECT_FALSE(h.IsInitialized());
+ // Copy.
+ {
+ WeakHandle<int> h2(h);
+ EXPECT_FALSE(h2.IsInitialized());
+ }
+ // Assign.
+ {
+ WeakHandle<int> h2;
+ h2 = h;
+ EXPECT_FALSE(h.IsInitialized());
+ }
+}
+
+TEST_F(WeakHandleTest, InitializedAfterDestroy) {
+ WeakHandle<Base> h;
+ {
+ StrictMock<Base> b;
+ h = b.AsWeakHandle();
+ }
+ EXPECT_TRUE(h.IsInitialized());
+ EXPECT_FALSE(h.Get());
+}
+
+TEST_F(WeakHandleTest, InitializedAfterInvalidate) {
+ StrictMock<Base> b;
+ WeakHandle<Base> h = b.AsWeakHandle();
+ b.Kill();
+ EXPECT_TRUE(h.IsInitialized());
+ EXPECT_FALSE(h.Get());
+}
+
+TEST_F(WeakHandleTest, Call) {
+ StrictMock<Base> b;
+ const char test_str[] = "test";
+ EXPECT_CALL(b, Test());
+ EXPECT_CALL(b, Test1(5));
+ EXPECT_CALL(b, Test2(5, &b));
+ EXPECT_CALL(b, Test3(5, &b, 5));
+ EXPECT_CALL(b, Test4(5, &b, 5, test_str));
+
+ WeakHandle<Base> h = b.AsWeakHandle();
+ EXPECT_TRUE(h.IsInitialized());
+
+ // Should run.
+ h.Call(FROM_HERE, &Base::Test);
+ h.Call(FROM_HERE, &Base::Test1, 5);
+ h.Call(FROM_HERE, &Base::Test2, 5, &b);
+ h.Call(FROM_HERE, &Base::Test3, 5, &b, 5);
+ h.Call(FROM_HERE, &Base::Test4, 5, &b, 5, test_str);
+ PumpLoop();
+}
+
+TEST_F(WeakHandleTest, CallAfterDestroy) {
+ {
+ StrictMock<Base> b;
+ EXPECT_CALL(b, Test()).Times(0);
+
+ WeakHandle<Base> h = b.AsWeakHandle();
+ EXPECT_TRUE(h.IsInitialized());
+
+ // Should not run.
+ h.Call(FROM_HERE, &Base::Test);
+ }
+ PumpLoop();
+}
+
+TEST_F(WeakHandleTest, CallAfterInvalidate) {
+ StrictMock<Base> b;
+ EXPECT_CALL(b, Test()).Times(0);
+
+ WeakHandle<Base> h = b.AsWeakHandle();
+ EXPECT_TRUE(h.IsInitialized());
+
+ // Should not run.
+ h.Call(FROM_HERE, &Base::Test);
+
+ b.Kill();
+ PumpLoop();
+}
+
+TEST_F(WeakHandleTest, CallThreaded) {
+ StrictMock<Base> b;
+ EXPECT_CALL(b, Test());
+
+ WeakHandle<Base> h = b.AsWeakHandle();
+ // Should run.
+ CallTestFromOtherThread(FROM_HERE, h);
+ PumpLoop();
+}
+
+TEST_F(WeakHandleTest, CallAfterDestroyThreaded) {
+ WeakHandle<Base> h;
+ {
+ StrictMock<Base> b;
+ EXPECT_CALL(b, Test()).Times(0);
+ h = b.AsWeakHandle();
+ }
+
+ // Should not run.
+ CallTestFromOtherThread(FROM_HERE, h);
+ PumpLoop();
+}
+
+TEST_F(WeakHandleTest, CallAfterInvalidateThreaded) {
+ StrictMock<Base> b;
+ EXPECT_CALL(b, Test()).Times(0);
+
+ WeakHandle<Base> h = b.AsWeakHandle();
+ b.Kill();
+ // Should not run.
+ CallTestFromOtherThread(FROM_HERE, h);
+ PumpLoop();
+}
+
+TEST_F(WeakHandleTest, DeleteOnOtherThread) {
+ StrictMock<Base> b;
+ EXPECT_CALL(b, Test()).Times(0);
+
+ WeakHandle<Base>* h = new WeakHandle<Base>(b.AsWeakHandle());
+
+ {
+ base::Thread t("Test thread");
+ ASSERT_TRUE(t.Start());
+ t.message_loop()->DeleteSoon(FROM_HERE, h);
+ }
+
+ PumpLoop();
+}
+
+void CallTestWithSelf(const WeakHandle<Base>& b1) {
+ StrictMock<Base> b2;
+ b1.Call(FROM_HERE, &Base::TestWithSelf, b2.AsWeakHandle());
+}
+
+TEST_F(WeakHandleTest, WithDestroyedThread) {
+ StrictMock<Base> b1;
+ WeakHandle<Base> b2;
+ EXPECT_CALL(b1, TestWithSelf(_)).WillOnce(SaveArg<0>(&b2));
+
+ {
+ base::Thread t("Test thread");
+ ASSERT_TRUE(t.Start());
+ t.message_loop()->PostTask(FROM_HERE,
+ base::Bind(&CallTestWithSelf,
+ b1.AsWeakHandle()));
+ }
+
+ // Calls b1.TestWithSelf().
+ PumpLoop();
+
+ // Shouldn't do anything, since the thread is gone.
+ b2.Call(FROM_HERE, &Base::Test);
+
+ // |b2| shouldn't leak when it's destroyed, even if the original
+ // thread is gone.
+}
+
+TEST_F(WeakHandleTest, InitializedAcrossCopyAssign) {
+ StrictMock<Base> b;
+ EXPECT_CALL(b, Test()).Times(3);
+
+ EXPECT_TRUE(b.AsWeakHandle().IsInitialized());
+ b.AsWeakHandle().Call(FROM_HERE, &Base::Test);
+
+ {
+ WeakHandle<Base> h(b.AsWeakHandle());
+ EXPECT_TRUE(h.IsInitialized());
+ h.Call(FROM_HERE, &Base::Test);
+ h.Reset();
+ EXPECT_FALSE(h.IsInitialized());
+ }
+
+ {
+ WeakHandle<Base> h;
+ h = b.AsWeakHandle();
+ EXPECT_TRUE(h.IsInitialized());
+ h.Call(FROM_HERE, &Base::Test);
+ h.Reset();
+ EXPECT_FALSE(h.IsInitialized());
+ }
+
+ PumpLoop();
+}
+
+TEST_F(WeakHandleTest, TypeConversionConstructor) {
+ StrictMock<Derived> d;
+ EXPECT_CALL(d, Test()).Times(2);
+
+ const WeakHandle<Derived> weak_handle = MakeWeakHandle(d.AsWeakPtr());
+
+ // Should trigger type conversion constructor.
+ const WeakHandle<Base> base_weak_handle(weak_handle);
+ // Should trigger regular copy constructor.
+ const WeakHandle<Derived> derived_weak_handle(weak_handle);
+
+ EXPECT_TRUE(base_weak_handle.IsInitialized());
+ base_weak_handle.Call(FROM_HERE, &Base::Test);
+
+ EXPECT_TRUE(derived_weak_handle.IsInitialized());
+ // Copy constructor shouldn't construct a new |core_|.
+ EXPECT_EQ(weak_handle.core_.get(), derived_weak_handle.core_.get());
+ derived_weak_handle.Call(FROM_HERE, &Base::Test);
+
+ PumpLoop();
+}
+
+TEST_F(WeakHandleTest, TypeConversionConstructorMakeWeakHandle) {
+ const base::WeakPtr<Derived> weak_ptr;
+
+ // Should trigger type conversion constructor after MakeWeakHandle.
+ WeakHandle<Base> base_weak_handle(MakeWeakHandle(weak_ptr));
+ // Should trigger regular copy constructor after MakeWeakHandle.
+ const WeakHandle<Derived> derived_weak_handle(MakeWeakHandle(weak_ptr));
+
+ EXPECT_TRUE(base_weak_handle.IsInitialized());
+ EXPECT_TRUE(derived_weak_handle.IsInitialized());
+}
+
+TEST_F(WeakHandleTest, TypeConversionConstructorAssignment) {
+ const WeakHandle<Derived> weak_handle =
+ MakeWeakHandle(Derived().AsWeakPtr());
+
+ // Should trigger type conversion constructor before the assignment.
+ WeakHandle<Base> base_weak_handle;
+ base_weak_handle = weak_handle;
+ // Should trigger regular copy constructor before the assignment.
+ WeakHandle<Derived> derived_weak_handle;
+ derived_weak_handle = weak_handle;
+
+ EXPECT_TRUE(base_weak_handle.IsInitialized());
+ EXPECT_TRUE(derived_weak_handle.IsInitialized());
+ // Copy constructor shouldn't construct a new |core_|.
+ EXPECT_EQ(weak_handle.core_.get(), derived_weak_handle.core_.get());
+}
+
+TEST_F(WeakHandleTest, TypeConversionConstructorUninitialized) {
+ const WeakHandle<Base> base_weak_handle = WeakHandle<Derived>();
+ EXPECT_FALSE(base_weak_handle.IsInitialized());
+}
+
+TEST_F(WeakHandleTest, TypeConversionConstructorUninitializedAssignment) {
+ WeakHandle<Base> base_weak_handle;
+ base_weak_handle = WeakHandle<Derived>();
+ EXPECT_FALSE(base_weak_handle.IsInitialized());
+}
+
+} // namespace browser_sync