summaryrefslogtreecommitdiffstats
path: root/chrome/browser
diff options
context:
space:
mode:
authornick@chromium.org <nick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-10 06:05:27 +0000
committernick@chromium.org <nick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-10 06:05:27 +0000
commit5852edc1b6eab234b9e048c41dd0d664ae7fc747 (patch)
tree9e5d8eb4833b76cdb11e66fc3607689e0f5e0122 /chrome/browser
parentf6059e37f8b8ac335ce18a189a13e702974a1c7e (diff)
downloadchromium_src-5852edc1b6eab234b9e048c41dd0d664ae7fc747.zip
chromium_src-5852edc1b6eab234b9e048c41dd0d664ae7fc747.tar.gz
chromium_src-5852edc1b6eab234b9e048c41dd0d664ae7fc747.tar.bz2
Initial commit of sync engine code to browser/sync.
The code is not built on any platform yet. That will arrive as a subsequent checkin. This is an implementation of the interface exposed earlier through syncapi.h. It is the client side of a sync protocol that lets users sync their browser data (currently, just bookmarks) with their Google Account. Table of contents: browser/sync/ protocol - The protocol definition, and other definitions necessary to connect to the service. syncable/ - defines a data model for syncable objects, and provides a sqlite-based backing store for this model. engine/ - includes the core sync logic, including commiting changes to the server, downloading changes from the server, resolving conflicts, other parts of the sync algorithm. engine/net - parts of the sync engine focused on the business of talking to the server. Some of this is binds a generic "server connection" interface to a concrete implementation provided by Chromium. notifier - the part of the syncer focused on the business of sending and receiving xmpp notifications. Notifications are used instead of polling to achieve very low latency change propagation. util - not necessarily sync specific utility code. Much of this is scaffolding which should either be replaced by, or merged with, the utility code in base/. BUG=none TEST=this code includes its own suite of unit tests. Review URL: http://codereview.chromium.org/194065 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@25850 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser')
-rw-r--r--chrome/browser/sync/engine/all_status.cc335
-rw-r--r--chrome/browser/sync/engine/all_status.h210
-rw-r--r--chrome/browser/sync/engine/all_status_unittest.cc24
-rw-r--r--chrome/browser/sync/engine/apply_updates_command.cc34
-rw-r--r--chrome/browser/sync/engine/apply_updates_command.h33
-rw-r--r--chrome/browser/sync/engine/apply_updates_command_unittest.cc166
-rw-r--r--chrome/browser/sync/engine/auth_watcher.cc419
-rw-r--r--chrome/browser/sync/engine/auth_watcher.h204
-rw-r--r--chrome/browser/sync/engine/authenticator.cc106
-rw-r--r--chrome/browser/sync/engine/authenticator.h106
-rw-r--r--chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc439
-rw-r--r--chrome/browser/sync/engine/build_and_process_conflict_sets_command.h64
-rw-r--r--chrome/browser/sync/engine/build_commit_command.cc143
-rw-r--r--chrome/browser/sync/engine/build_commit_command.h27
-rw-r--r--chrome/browser/sync/engine/change_reorder_buffer.cc199
-rw-r--r--chrome/browser/sync/engine/change_reorder_buffer.h100
-rw-r--r--chrome/browser/sync/engine/client_command_channel.h31
-rw-r--r--chrome/browser/sync/engine/conflict_resolution_view.cc167
-rw-r--r--chrome/browser/sync/engine/conflict_resolution_view.h123
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.cc758
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.h129
-rw-r--r--chrome/browser/sync/engine/download_updates_command.cc64
-rw-r--r--chrome/browser/sync/engine/download_updates_command.h27
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.cc242
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.h202
-rw-r--r--chrome/browser/sync/engine/model_changing_syncer_command.cc19
-rw-r--r--chrome/browser/sync/engine/model_changing_syncer_command.h50
-rw-r--r--chrome/browser/sync/engine/model_safe_worker.h45
-rw-r--r--chrome/browser/sync/engine/net/gaia_authenticator.cc483
-rw-r--r--chrome/browser/sync/engine/net/gaia_authenticator.h304
-rw-r--r--chrome/browser/sync/engine/net/gaia_authenticator_unittest.cc42
-rw-r--r--chrome/browser/sync/engine/net/http_return.h16
-rw-r--r--chrome/browser/sync/engine/net/openssl_init.cc129
-rw-r--r--chrome/browser/sync/engine/net/openssl_init.h20
-rw-r--r--chrome/browser/sync/engine/net/server_connection_manager.cc375
-rw-r--r--chrome/browser/sync/engine/net/server_connection_manager.h345
-rw-r--r--chrome/browser/sync/engine/net/syncapi_server_connection_manager.cc77
-rw-r--r--chrome/browser/sync/engine/net/syncapi_server_connection_manager.h75
-rw-r--r--chrome/browser/sync/engine/net/url_translator.cc50
-rw-r--r--chrome/browser/sync/engine/net/url_translator.h27
-rw-r--r--chrome/browser/sync/engine/post_commit_message_command.cc50
-rw-r--r--chrome/browser/sync/engine/post_commit_message_command.h27
-rw-r--r--chrome/browser/sync/engine/process_commit_response_command.cc374
-rw-r--r--chrome/browser/sync/engine/process_commit_response_command.h54
-rw-r--r--chrome/browser/sync/engine/process_updates_command.cc167
-rw-r--r--chrome/browser/sync/engine/process_updates_command.h45
-rw-r--r--chrome/browser/sync/engine/resolve_conflicts_command.cc28
-rw-r--r--chrome/browser/sync/engine/resolve_conflicts_command.h34
-rw-r--r--chrome/browser/sync/engine/sync_cycle_state.h253
-rw-r--r--chrome/browser/sync/engine/sync_process_state.cc325
-rw-r--r--chrome/browser/sync/engine/sync_process_state.h384
-rw-r--r--chrome/browser/sync/engine/syncapi.cc1565
-rw-r--r--chrome/browser/sync/engine/syncer.cc338
-rw-r--r--chrome/browser/sync/engine/syncer.h234
-rw-r--r--chrome/browser/sync/engine/syncer_command.cc54
-rw-r--r--chrome/browser/sync/engine/syncer_command.h44
-rw-r--r--chrome/browser/sync/engine/syncer_end_command.cc44
-rw-r--r--chrome/browser/sync/engine/syncer_end_command.h32
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util.cc276
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util.h73
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util_unittest.cc119
-rw-r--r--chrome/browser/sync/engine/syncer_session.h364
-rw-r--r--chrome/browser/sync/engine/syncer_status.cc15
-rw-r--r--chrome/browser/sync/engine/syncer_status.h255
-rw-r--r--chrome/browser/sync/engine/syncer_thread.cc558
-rw-r--r--chrome/browser/sync/engine/syncer_thread.h235
-rw-r--r--chrome/browser/sync/engine/syncer_thread_unittest.cc299
-rw-r--r--chrome/browser/sync/engine/syncer_types.h151
-rw-r--r--chrome/browser/sync/engine/syncer_unittest.cc4588
-rw-r--r--chrome/browser/sync/engine/syncer_util.cc845
-rw-r--r--chrome/browser/sync/engine/syncer_util.h206
-rw-r--r--chrome/browser/sync/engine/syncproto.h72
-rw-r--r--chrome/browser/sync/engine/syncproto_unittest.cc18
-rw-r--r--chrome/browser/sync/engine/update_applicator.cc98
-rw-r--r--chrome/browser/sync/engine/update_applicator.h61
-rw-r--r--chrome/browser/sync/engine/verify_updates_command.cc102
-rw-r--r--chrome/browser/sync/engine/verify_updates_command.h36
-rw-r--r--chrome/browser/sync/notifier/base/async_dns_lookup.cc133
-rw-r--r--chrome/browser/sync/notifier/base/async_dns_lookup.h49
-rw-r--r--chrome/browser/sync/notifier/base/async_network_alive.h52
-rw-r--r--chrome/browser/sync/notifier/base/fastalloc.h59
-rw-r--r--chrome/browser/sync/notifier/base/linux/network_status_detector_task_linux.cc15
-rw-r--r--chrome/browser/sync/notifier/base/linux/time_linux.cc7
-rw-r--r--chrome/browser/sync/notifier/base/nethelpers.cc42
-rw-r--r--chrome/browser/sync/notifier/base/nethelpers.h25
-rw-r--r--chrome/browser/sync/notifier/base/network_status_detector_task.cc30
-rw-r--r--chrome/browser/sync/notifier/base/network_status_detector_task.h55
-rw-r--r--chrome/browser/sync/notifier/base/network_status_detector_task_mt.cc48
-rw-r--r--chrome/browser/sync/notifier/base/network_status_detector_task_mt.h34
-rw-r--r--chrome/browser/sync/notifier/base/posix/time_posix.cc54
-rw-r--r--chrome/browser/sync/notifier/base/signal_thread_task.h92
-rw-r--r--chrome/browser/sync/notifier/base/static_assert.h19
-rw-r--r--chrome/browser/sync/notifier/base/string.cc403
-rw-r--r--chrome/browser/sync/notifier/base/string.h381
-rw-r--r--chrome/browser/sync/notifier/base/string_unittest.cc362
-rw-r--r--chrome/browser/sync/notifier/base/task_pump.cc42
-rw-r--r--chrome/browser/sync/notifier/base/task_pump.h34
-rw-r--r--chrome/browser/sync/notifier/base/time.cc360
-rw-r--r--chrome/browser/sync/notifier/base/time.h114
-rw-r--r--chrome/browser/sync/notifier/base/time_unittest.cc73
-rw-r--r--chrome/browser/sync/notifier/base/timer.cc33
-rw-r--r--chrome/browser/sync/notifier/base/timer.h40
-rw-r--r--chrome/browser/sync/notifier/base/utils.h91
-rw-r--r--chrome/browser/sync/notifier/base/win32/async_network_alive_win32.cc233
-rw-r--r--chrome/browser/sync/notifier/base/win32/time_win32.cc158
-rw-r--r--chrome/browser/sync/notifier/communicator/auth_task.cc69
-rw-r--r--chrome/browser/sync/notifier/communicator/auth_task.h77
-rw-r--r--chrome/browser/sync/notifier/communicator/auto_reconnect.cc155
-rw-r--r--chrome/browser/sync/notifier/communicator/auto_reconnect.h71
-rw-r--r--chrome/browser/sync/notifier/communicator/connection_options.cc16
-rw-r--r--chrome/browser/sync/notifier/communicator/connection_options.h55
-rw-r--r--chrome/browser/sync/notifier/communicator/connection_settings.cc126
-rw-r--r--chrome/browser/sync/notifier/communicator/connection_settings.h78
-rw-r--r--chrome/browser/sync/notifier/communicator/const_communicator.h11
-rw-r--r--chrome/browser/sync/notifier/communicator/login.cc361
-rw-r--r--chrome/browser/sync/notifier/communicator/login.h155
-rw-r--r--chrome/browser/sync/notifier/communicator/login_failure.cc45
-rw-r--r--chrome/browser/sync/notifier/communicator/login_failure.h69
-rw-r--r--chrome/browser/sync/notifier/communicator/login_settings.cc57
-rw-r--r--chrome/browser/sync/notifier/communicator/login_settings.h97
-rw-r--r--chrome/browser/sync/notifier/communicator/mailbox.cc682
-rw-r--r--chrome/browser/sync/notifier/communicator/mailbox.h166
-rw-r--r--chrome/browser/sync/notifier/communicator/mailbox_unittest.cc118
-rw-r--r--chrome/browser/sync/notifier/communicator/product_info.cc15
-rw-r--r--chrome/browser/sync/notifier/communicator/product_info.h14
-rw-r--r--chrome/browser/sync/notifier/communicator/single_login_attempt.cc562
-rw-r--r--chrome/browser/sync/notifier/communicator/single_login_attempt.h139
-rw-r--r--chrome/browser/sync/notifier/communicator/talk_auth_task.cc73
-rw-r--r--chrome/browser/sync/notifier/communicator/talk_auth_task.h62
-rw-r--r--chrome/browser/sync/notifier/communicator/xml_parse_helpers-inl.h24
-rw-r--r--chrome/browser/sync/notifier/communicator/xml_parse_helpers.cc185
-rw-r--r--chrome/browser/sync/notifier/communicator/xml_parse_helpers.h75
-rw-r--r--chrome/browser/sync/notifier/communicator/xmpp_connection_generator.cc210
-rw-r--r--chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h81
-rw-r--r--chrome/browser/sync/notifier/communicator/xmpp_log.cc111
-rw-r--r--chrome/browser/sync/notifier/communicator/xmpp_log.h45
-rw-r--r--chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.cc437
-rw-r--r--chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.h85
-rw-r--r--chrome/browser/sync/notifier/gaia_auth/gaiaauth.cc442
-rw-r--r--chrome/browser/sync/notifier/gaia_auth/gaiaauth.h129
-rw-r--r--chrome/browser/sync/notifier/gaia_auth/gaiahelper.cc236
-rw-r--r--chrome/browser/sync/notifier/gaia_auth/gaiahelper.h87
-rw-r--r--chrome/browser/sync/notifier/gaia_auth/inet_aton.h14
-rw-r--r--chrome/browser/sync/notifier/gaia_auth/sigslotrepeater.h86
-rw-r--r--chrome/browser/sync/notifier/gaia_auth/win32window.cc115
-rw-r--r--chrome/browser/sync/notifier/listener/listen_task.cc72
-rw-r--r--chrome/browser/sync/notifier/listener/listen_task.h47
-rw-r--r--chrome/browser/sync/notifier/listener/listener_unittest.cc10
-rw-r--r--chrome/browser/sync/notifier/listener/mediator_thread.h43
-rw-r--r--chrome/browser/sync/notifier/listener/mediator_thread_impl.cc278
-rw-r--r--chrome/browser/sync/notifier/listener/mediator_thread_impl.h120
-rw-r--r--chrome/browser/sync/notifier/listener/mediator_thread_mock.h74
-rw-r--r--chrome/browser/sync/notifier/listener/send_update_task.cc96
-rw-r--r--chrome/browser/sync/notifier/listener/send_update_task.h37
-rw-r--r--chrome/browser/sync/notifier/listener/subscribe_task.cc90
-rw-r--r--chrome/browser/sync/notifier/listener/subscribe_task.h39
-rw-r--r--chrome/browser/sync/notifier/listener/talk_mediator.h71
-rw-r--r--chrome/browser/sync/notifier/listener/talk_mediator_impl.cc275
-rw-r--r--chrome/browser/sync/notifier/listener/talk_mediator_impl.h117
-rw-r--r--chrome/browser/sync/notifier/listener/talk_mediator_unittest.cc176
-rw-r--r--chrome/browser/sync/protocol/proto2_to_oproto.py30
-rw-r--r--chrome/browser/sync/protocol/service_constants.h24
-rw-r--r--chrome/browser/sync/protocol/sync.proto344
-rw-r--r--chrome/browser/sync/syncable/blob.h16
-rw-r--r--chrome/browser/sync/syncable/dir_open_result.h17
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.cc657
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.h123
-rw-r--r--chrome/browser/sync/syncable/directory_event.h21
-rw-r--r--chrome/browser/sync/syncable/directory_manager.cc169
-rw-r--r--chrome/browser/sync/syncable/directory_manager.h128
-rw-r--r--chrome/browser/sync/syncable/path_name_cmp.h20
-rw-r--r--chrome/browser/sync/syncable/syncable-inl.h30
-rw-r--r--chrome/browser/sync/syncable/syncable.cc2002
-rw-r--r--chrome/browser/sync/syncable/syncable.h1419
-rw-r--r--chrome/browser/sync/syncable/syncable_changes_version.h29
-rw-r--r--chrome/browser/sync/syncable/syncable_columns.h78
-rw-r--r--chrome/browser/sync/syncable/syncable_id.cc72
-rw-r--r--chrome/browser/sync/syncable/syncable_id.h114
-rw-r--r--chrome/browser/sync/syncable/syncable_id_unittest.cc44
-rw-r--r--chrome/browser/sync/syncable/syncable_unittest.cc1554
-rw-r--r--chrome/browser/sync/util/character_set_converters-linux.cc60
-rw-r--r--chrome/browser/sync/util/character_set_converters-win32.cc62
-rw-r--r--chrome/browser/sync/util/character_set_converters.cc54
-rw-r--r--chrome/browser/sync/util/character_set_converters.h236
-rw-r--r--chrome/browser/sync/util/character_set_converters_unittest.cc168
-rw-r--r--chrome/browser/sync/util/closure.h12
-rw-r--r--chrome/browser/sync/util/compat-file-posix.cc12
-rw-r--r--chrome/browser/sync/util/compat-file-win.cc14
-rw-r--r--chrome/browser/sync/util/compat-file.h31
-rw-r--r--chrome/browser/sync/util/compat-pthread.h38
-rw-r--r--chrome/browser/sync/util/crypto_helpers.cc62
-rw-r--r--chrome/browser/sync/util/crypto_helpers.h40
-rw-r--r--chrome/browser/sync/util/crypto_helpers_unittest.cc17
-rw-r--r--chrome/browser/sync/util/data_encryption.cc51
-rw-r--r--chrome/browser/sync/util/data_encryption.h21
-rw-r--r--chrome/browser/sync/util/data_encryption_unittest.cc31
-rw-r--r--chrome/browser/sync/util/dbgq.h27
-rw-r--r--chrome/browser/sync/util/event_sys-inl.h340
-rw-r--r--chrome/browser/sync/util/event_sys.h41
-rw-r--r--chrome/browser/sync/util/event_sys_unittest.cc271
-rw-r--r--chrome/browser/sync/util/fast_dump.h60
-rw-r--r--chrome/browser/sync/util/highres_timer-linux.cc29
-rw-r--r--chrome/browser/sync/util/highres_timer-linux.h79
-rw-r--r--chrome/browser/sync/util/highres_timer-win32.cc46
-rw-r--r--chrome/browser/sync/util/highres_timer-win32.h78
-rw-r--r--chrome/browser/sync/util/highres_timer.h13
-rw-r--r--chrome/browser/sync/util/highres_timer_unittest.cc49
-rw-r--r--chrome/browser/sync/util/path_helpers-linux.cc51
-rw-r--r--chrome/browser/sync/util/path_helpers-posix.cc99
-rw-r--r--chrome/browser/sync/util/path_helpers.cc153
-rw-r--r--chrome/browser/sync/util/path_helpers.h105
-rw-r--r--chrome/browser/sync/util/path_helpers_unittest.cc131
-rw-r--r--chrome/browser/sync/util/pthread_helpers.cc162
-rw-r--r--chrome/browser/sync/util/pthread_helpers.h259
-rw-r--r--chrome/browser/sync/util/pthread_helpers_fwd.h13
-rw-r--r--chrome/browser/sync/util/query_helpers.cc282
-rw-r--r--chrome/browser/sync/util/query_helpers.h698
-rw-r--r--chrome/browser/sync/util/query_helpers_unittest.cc36
-rw-r--r--chrome/browser/sync/util/row_iterator.h122
-rw-r--r--chrome/browser/sync/util/signin.h15
-rw-r--r--chrome/browser/sync/util/sync_types.h75
-rw-r--r--chrome/browser/sync/util/user_settings-posix.cc34
-rw-r--r--chrome/browser/sync/util/user_settings-win32.cc67
-rw-r--r--chrome/browser/sync/util/user_settings.cc350
-rw-r--r--chrome/browser/sync/util/user_settings.h114
-rw-r--r--chrome/browser/sync/util/user_settings_unittest.cc86
226 files changed, 40735 insertions, 0 deletions
diff --git a/chrome/browser/sync/engine/all_status.cc b/chrome/browser/sync/engine/all_status.cc
new file mode 100644
index 0000000..e1bc5c7
--- /dev/null
+++ b/chrome/browser/sync/engine/all_status.cc
@@ -0,0 +1,335 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/all_status.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/port.h"
+#include "base/rand_util.h"
+#include "chrome/browser/sync/engine/auth_watcher.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_thread.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+
+namespace browser_sync {
+
+static const time_t kMinSyncObserveInterval = 10; // seconds
+
+// Backoff interval randomization factor.
+static const int kBackoffRandomizationFactor = 2;
+
+const char* AllStatus::GetSyncStatusString(SyncStatus icon) {
+ const char* strings[] = {"OFFLINE", "OFFLINE_UNSYNCED", "SYNCING", "READY",
+ "CONFLICT", "OFFLINE_UNUSABLE"};
+ COMPILE_ASSERT(ARRAYSIZE(strings) == ICON_STATUS_COUNT, enum_indexed_array);
+ if (icon < 0 || icon >= ARRAYSIZE(strings))
+ LOG(FATAL) << "Illegal Icon State:" << icon;
+ return strings[icon];
+}
+
+static const AllStatus::Status init_status =
+ { AllStatus::OFFLINE };
+
+static const AllStatusEvent shutdown_event =
+ { AllStatusEvent::SHUTDOWN, init_status };
+
+AllStatus::AllStatus() : channel_(new Channel(shutdown_event)),
+ status_(init_status) {
+ status_.initial_sync_ended = true;
+ status_.notifications_enabled = false;
+}
+
+AllStatus::~AllStatus() {
+ delete channel_;
+}
+
+void AllStatus::WatchConnectionManager(ServerConnectionManager* conn_mgr) {
+ conn_mgr_hookup_.reset(NewEventListenerHookup(conn_mgr->channel(), this,
+ &AllStatus::HandleServerConnectionEvent));
+}
+
+void AllStatus::WatchAuthenticator(GaiaAuthenticator* gaia) {
+ gaia_hookup_.reset(NewEventListenerHookup(gaia->channel(), this,
+ &AllStatus::HandleGaiaAuthEvent));
+}
+
+void AllStatus::WatchAuthWatcher(AuthWatcher* auth_watcher) {
+ authwatcher_hookup_.reset(
+ NewEventListenerHookup(auth_watcher->channel(), this,
+ &AllStatus::HandleAuthWatcherEvent));
+}
+
+void AllStatus::WatchSyncerThread(SyncerThread* syncer_thread) {
+ syncer_thread_hookup_.reset(
+ NewEventListenerHookup(syncer_thread->channel(), this,
+ &AllStatus::HandleSyncerEvent));
+}
+
+AllStatus::Status AllStatus::CreateBlankStatus() const {
+ Status status = status_;
+ status.syncing = true;
+ status.unsynced_count = 0;
+ status.conflicting_count = 0;
+ status.initial_sync_ended = false;
+ status.syncer_stuck = false;
+ status.max_consecutive_errors = 0;
+ status.server_broken = false;
+ status.updates_available = 0;
+ status.updates_received = 0;
+ return status;
+}
+
+AllStatus::Status AllStatus::CalcSyncing(const SyncerEvent &event) const {
+ Status status = CreateBlankStatus();
+ SyncerStatus syncerStatus(event.last_session);
+ status.unsynced_count += syncerStatus.unsynced_count();
+ status.conflicting_count += syncerStatus.conflicting_commits();
+ if (syncerStatus.current_sync_timestamp() ==
+ syncerStatus.servers_latest_timestamp()) {
+ status.conflicting_count += syncerStatus.conflicting_updates();
+ }
+ status.syncing |= syncerStatus.syncing();
+ // Show a syncer as syncing if it's got stalled updates.
+ status.syncing = event.last_session->ShouldSyncAgain();
+ status.initial_sync_ended |= syncerStatus.IsShareUsable();
+ status.syncer_stuck |= syncerStatus.syncer_stuck();
+ if (syncerStatus.consecutive_errors() > status.max_consecutive_errors)
+ status.max_consecutive_errors = syncerStatus.consecutive_errors();
+
+ // 100 is an arbitrary limit.
+ if (syncerStatus.consecutive_transient_error_commits() > 100)
+ status.server_broken = true;
+
+ status.updates_available += syncerStatus.servers_latest_timestamp();
+ status.updates_received += syncerStatus.current_sync_timestamp();
+ return status;
+}
+
+AllStatus::Status AllStatus::CalcSyncing() const {
+ return CreateBlankStatus();
+}
+
+int AllStatus::CalcStatusChanges(Status* old_status) {
+ int what_changed = 0;
+
+ // Calculate what changed and what the new icon should be.
+ if (status_.syncing != old_status->syncing)
+ what_changed |= AllStatusEvent::SYNCING;
+ if (status_.unsynced_count != old_status->unsynced_count)
+ what_changed |= AllStatusEvent::UNSYNCED_COUNT;
+ if (status_.server_up != old_status->server_up)
+ what_changed |= AllStatusEvent::SERVER_UP;
+ if (status_.server_reachable != old_status->server_reachable)
+ what_changed |= AllStatusEvent::SERVER_REACHABLE;
+ if (status_.notifications_enabled != old_status->notifications_enabled)
+ what_changed |= AllStatusEvent::NOTIFICATIONS_ENABLED;
+ if (status_.notifications_received != old_status->notifications_received)
+ what_changed |= AllStatusEvent::NOTIFICATIONS_RECEIVED;
+ if (status_.notifications_sent != old_status->notifications_sent)
+ what_changed |= AllStatusEvent::NOTIFICATIONS_SENT;
+ if (status_.initial_sync_ended != old_status->initial_sync_ended)
+ what_changed |= AllStatusEvent::INITIAL_SYNC_ENDED;
+ if (status_.authenticated != old_status->authenticated)
+ what_changed |= AllStatusEvent::AUTHENTICATED;
+
+ const bool unsynced_changes = status_.unsynced_count > 0;
+ const bool online = status_.authenticated &&
+ status_.server_reachable && status_.server_up && !status_.server_broken;
+ if (online) {
+ if (status_.syncer_stuck)
+ status_.icon = CONFLICT;
+ else if (unsynced_changes || status_.syncing)
+ status_.icon = SYNCING;
+ else
+ status_.icon = READY;
+ } else if (!status_.initial_sync_ended) {
+ status_.icon = OFFLINE_UNUSABLE;
+ } else if (unsynced_changes) {
+ status_.icon = OFFLINE_UNSYNCED;
+ } else {
+ status_.icon = OFFLINE;
+ }
+
+ if (status_.icon != old_status->icon)
+ what_changed |= AllStatusEvent::ICON;
+
+ if (0 == what_changed)
+ return 0;
+ *old_status = status_;
+ return what_changed;
+}
+
+void AllStatus::HandleGaiaAuthEvent(const GaiaAuthEvent& gaia_event) {
+ ScopedStatusLockWithNotify lock(this);
+ switch (gaia_event.what_happened) {
+ case GaiaAuthEvent::GAIA_AUTH_FAILED:
+ status_.authenticated = false;
+ break;
+ case GaiaAuthEvent::GAIA_AUTH_SUCCEEDED:
+ status_.authenticated = true;
+ break;
+ default:
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ }
+}
+
+void AllStatus::HandleAuthWatcherEvent(const AuthWatcherEvent& auth_event) {
+ ScopedStatusLockWithNotify lock(this);
+ switch (auth_event.what_happened) {
+ case AuthWatcherEvent::GAIA_AUTH_FAILED:
+ case AuthWatcherEvent::SERVICE_AUTH_FAILED:
+ case AuthWatcherEvent::SERVICE_CONNECTION_FAILED:
+ case AuthWatcherEvent::AUTHENTICATION_ATTEMPT_START:
+ status_.authenticated = false;
+ break;
+ case AuthWatcherEvent::AUTH_SUCCEEDED:
+ // If we've already calculated that the server is reachable, since we've
+ // successfully authenticated, we can be confident that the server is up.
+ if (status_.server_reachable)
+ status_.server_up = true;
+
+ if (!status_.authenticated) {
+ status_.authenticated = true;
+ status_ = CalcSyncing();
+ } else {
+ lock.set_notify_plan(DONT_NOTIFY);
+ }
+ break;
+ default:
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ }
+}
+
+void AllStatus::HandleSyncerEvent(const SyncerEvent& event) {
+ ScopedStatusLockWithNotify lock(this);
+ switch (event.what_happened) {
+ case SyncerEvent::SYNC_CYCLE_ENDED:
+ case SyncerEvent::COMMITS_SUCCEEDED:
+ break;
+ case SyncerEvent::STATUS_CHANGED:
+ status_ = CalcSyncing(event);
+ break;
+ case SyncerEvent::SHUTDOWN_USE_WITH_CARE:
+ // We're safe to use this value here because we don't call into the syncer
+ // or block on any processes.
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ case SyncerEvent::OVER_QUOTA:
+ LOG(WARNING) << "User has gone over quota.";
+ lock.NotifyOverQuota();
+ break;
+ case SyncerEvent::REQUEST_SYNC_NUDGE:
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ default:
+ LOG(ERROR) << "Unrecognized Syncer Event: " << event.what_happened;
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ }
+}
+
+void AllStatus::HandleServerConnectionEvent(
+ const ServerConnectionEvent& event) {
+ if (ServerConnectionEvent::STATUS_CHANGED == event.what_happened) {
+ ScopedStatusLockWithNotify lock(this);
+ status_.server_up = IsGoodReplyFromServer(event.connection_code);
+ status_.server_reachable = event.server_reachable;
+ }
+}
+
+void AllStatus::WatchTalkMediator(const TalkMediator* mediator) {
+ status_.notifications_enabled = false;
+ talk_mediator_hookup_.reset(
+ NewEventListenerHookup(mediator->channel(), this,
+ &AllStatus::HandleTalkMediatorEvent));
+}
+
+void AllStatus::HandleTalkMediatorEvent(
+ const TalkMediatorEvent& event) {
+ ScopedStatusLockWithNotify lock(this);
+ switch (event.what_happened) {
+ case TalkMediatorEvent::SUBSCRIPTIONS_ON:
+ status_.notifications_enabled = true;
+ break;
+ case TalkMediatorEvent::LOGOUT_SUCCEEDED:
+ case TalkMediatorEvent::SUBSCRIPTIONS_OFF:
+ case TalkMediatorEvent::TALKMEDIATOR_DESTROYED:
+ status_.notifications_enabled = false;
+ break;
+ case TalkMediatorEvent::NOTIFICATION_RECEIVED:
+ status_.notifications_received++;
+ break;
+ case TalkMediatorEvent::NOTIFICATION_SENT:
+ status_.notifications_sent++;
+ break;
+ case TalkMediatorEvent::LOGIN_SUCCEEDED:
+ default:
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ }
+}
+
+AllStatus::Status AllStatus::status() const {
+ MutexLock lock(&mutex_);
+ return status_;
+}
+
+int AllStatus::GetRecommendedDelaySeconds(int base_delay_seconds) {
+ if (base_delay_seconds >= kMaxBackoffSeconds)
+ return kMaxBackoffSeconds;
+
+ // This calculates approx. base_delay_seconds * 2 +/- base_delay_seconds / 2
+ int backoff_s = (0 == base_delay_seconds) ? 1 :
+ base_delay_seconds * kBackoffRandomizationFactor;
+
+ // Flip a coin to randomize backoff interval by +/- 50%.
+ int rand_sign = base::RandInt(0, 1) * 2 - 1;
+
+ // Truncation is adequate for rounding here.
+ backoff_s = backoff_s +
+ (rand_sign * (base_delay_seconds / kBackoffRandomizationFactor));
+
+ // Cap the backoff interval.
+ backoff_s = std::min(backoff_s, kMaxBackoffSeconds);
+
+ return backoff_s;
+}
+
+int AllStatus::GetRecommendedDelay(int base_delay_ms) const {
+ return GetRecommendedDelaySeconds(base_delay_ms / 1000) * 1000;
+}
+
+ScopedStatusLockWithNotify::ScopedStatusLockWithNotify(AllStatus* allstatus)
+ : allstatus_(allstatus), plan_(NOTIFY_IF_STATUS_CHANGED) {
+ event_.what_changed = 0;
+ allstatus->mutex_.Lock();
+ event_.status = allstatus->status_;
+}
+
+ScopedStatusLockWithNotify::~ScopedStatusLockWithNotify() {
+ if (DONT_NOTIFY == plan_) {
+ allstatus_->mutex_.Unlock();
+ return;
+ }
+ event_.what_changed |= allstatus_->CalcStatusChanges(&event_.status);
+ allstatus_->mutex_.Unlock();
+ if (event_.what_changed)
+ allstatus_->channel()->NotifyListeners(event_);
+}
+
+void ScopedStatusLockWithNotify::NotifyOverQuota() {
+ event_.what_changed |= AllStatusEvent::OVER_QUOTA;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/all_status.h b/chrome/browser/sync/engine/all_status.h
new file mode 100644
index 0000000..e7fb0ba
--- /dev/null
+++ b/chrome/browser/sync/engine/all_status.h
@@ -0,0 +1,210 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// The all status object watches various sync engine components and aggregates
+// the status of all of them into one place.
+//
+#ifndef CHROME_BROWSER_SYNC_ENGINE_ALL_STATUS_H_
+#define CHROME_BROWSER_SYNC_ENGINE_ALL_STATUS_H_
+
+#include <map>
+
+#include "base/atomicops.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+
+namespace browser_sync {
+class AuthWatcher;
+class GaiaAuthenticator;
+class ScopedStatusLockWithNotify;
+class ServerConnectionManager;
+class Syncer;
+class SyncerThread;
+class TalkMediator;
+struct AllStatusEvent;
+struct AuthWatcherEvent;
+struct GaiaAuthEvent;
+struct ServerConnectionEvent;
+struct SyncerEvent;
+struct TalkMediatorEvent;
+
+class AllStatus {
+ friend class ScopedStatusLockWithNotify;
+ public:
+ typedef EventChannel<AllStatusEvent, PThreadMutex> Channel;
+
+ // Status of the entire sync process distilled into a single enum.
+ enum SyncStatus {
+ // Can't connect to server, but there are no pending changes in
+ // our local dataase.
+ OFFLINE,
+ // Can't connect to server, and there are pending changes in our
+ // local cache.
+ OFFLINE_UNSYNCED,
+ // Connected and syncing.
+ SYNCING,
+ // Connected, no pending changes.
+ READY,
+ // Internal sync error.
+ CONFLICT,
+ // Can't connect to server, and we haven't completed the initial
+ // sync yet. So there's nothing we can do but wait for the server.
+ OFFLINE_UNUSABLE,
+ // For array sizing, etc.
+ ICON_STATUS_COUNT
+ };
+
+ struct Status {
+ SyncStatus icon;
+ int unsynced_count;
+ int conflicting_count;
+ bool syncing;
+ bool authenticated; // Successfully authenticated via gaia
+ // True if we have received at least one good reply from the server.
+ bool server_up;
+ bool server_reachable;
+ // True after a client has done a first sync.
+ bool initial_sync_ended;
+ // True if any syncer is stuck.
+ bool syncer_stuck;
+ // True if any syncer is stopped because of server issues.
+ bool server_broken;
+ // True only if the notification listener has subscribed.
+ bool notifications_enabled;
+ // Notifications counters updated by the actions in synapi.
+ int notifications_received;
+ int notifications_sent;
+ // The max number of consecutive errors from any component.
+ int max_consecutive_errors;
+ bool disk_full;
+
+ // Contains current transfer item meta handle
+ int64 current_item_meta_handle;
+ // The next two values will be equal if all updates have been received.
+ // total updates available.
+ int64 updates_available;
+ // total updates received.
+ int64 updates_received;
+ };
+
+ // Maximum interval for exponential backoff.
+ static const int kMaxBackoffSeconds = 60 * 60 * 4; // 4 hours.
+
+ AllStatus();
+ ~AllStatus();
+
+ void WatchConnectionManager(ServerConnectionManager* conn_mgr);
+ void HandleServerConnectionEvent(const ServerConnectionEvent& event);
+
+ // Both WatchAuthenticator/HandleGaiaAuthEvent and WatchAuthWatcher/
+ // HandleAuthWatcherEventachieve have the same goal; use only one of the
+ // following two. (The AuthWatcher is watched under Windows; the
+ // GaiaAuthenticator is watched under Mac/Linux.)
+ void WatchAuthenticator(GaiaAuthenticator* gaia);
+ void HandleGaiaAuthEvent(const GaiaAuthEvent& event);
+
+ void WatchAuthWatcher(AuthWatcher* auth_watcher);
+ void HandleAuthWatcherEvent(const AuthWatcherEvent& event);
+
+ void WatchSyncerThread(SyncerThread* syncer_thread);
+ void HandleSyncerEvent(const SyncerEvent& event);
+
+ void WatchTalkMediator(
+ const browser_sync::TalkMediator* talk_mediator);
+ void HandleTalkMediatorEvent(
+ const browser_sync::TalkMediatorEvent& event);
+
+ // Returns a string description of the SyncStatus (currently just the ascii
+ // version of the enum). Will LOG(FATAL) if the status us out of range.
+ static const char* GetSyncStatusString(SyncStatus status);
+
+ Channel* channel() const { return channel_; }
+
+ Status status() const;
+
+ // DDOS avoidance function. The argument and return value is in seconds
+ static int GetRecommendedDelaySeconds(int base_delay_seconds);
+
+ // This uses AllStatus' max_consecutive_errors as the error count
+ int GetRecommendedDelay(int base_delay) const;
+
+ protected:
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+ typedef std::map<Syncer*, EventListenerHookup*> Syncers;
+
+ // Examines syncer to calculate syncing and the unsynced count,
+ // and returns a Status with new values.
+ Status CalcSyncing() const;
+ Status CalcSyncing(const SyncerEvent& event) const;
+ Status CreateBlankStatus() const;
+
+ // Examines status to see what has changed, updates old_status in place.
+ int CalcStatusChanges(Status* old_status);
+
+ Status status_;
+ Channel* const channel_;
+ scoped_ptr<EventListenerHookup> conn_mgr_hookup_;
+ scoped_ptr<EventListenerHookup> gaia_hookup_;
+ scoped_ptr<EventListenerHookup> authwatcher_hookup_;
+ scoped_ptr<EventListenerHookup> syncer_thread_hookup_;
+ scoped_ptr<EventListenerHookup> diskfull_hookup_;
+ scoped_ptr<EventListenerHookup> talk_mediator_hookup_;
+
+ mutable PThreadMutex mutex_; // Protects all data members.
+};
+
+struct AllStatusEvent {
+ enum { // A bit mask of which members have changed.
+ SHUTDOWN = 0x0000,
+ ICON = 0x0001,
+ UNSYNCED_COUNT = 0x0002,
+ AUTHENTICATED = 0x0004,
+ SYNCING = 0x0008,
+ SERVER_UP = 0x0010,
+ NOTIFICATIONS_ENABLED = 0x0020,
+ INITIAL_SYNC_ENDED = 0x0080,
+ SERVER_REACHABLE = 0x0100,
+ DISK_FULL = 0x0200,
+ OVER_QUOTA = 0x0400,
+ NOTIFICATIONS_RECEIVED = 0x0800,
+ NOTIFICATIONS_SENT = 0x1000,
+ TRASH_WARNING = 0x40000,
+ };
+ int what_changed;
+ AllStatus::Status status;
+
+ typedef AllStatusEvent EventType;
+ static inline bool IsChannelShutdownEvent(const AllStatusEvent& e) {
+ return SHUTDOWN == e.what_changed;
+ }
+};
+
+enum StatusNotifyPlan {
+ NOTIFY_IF_STATUS_CHANGED,
+ // A small optimization, don't do the big compare when we know
+ // nothing has changed.
+ DONT_NOTIFY,
+};
+
+class ScopedStatusLockWithNotify {
+ public:
+ explicit ScopedStatusLockWithNotify(AllStatus* allstatus);
+ ~ScopedStatusLockWithNotify();
+ // Defaults to true, but can be explicitly reset so we don't have to
+ // do the big compare in the destructor. Small optimization.
+
+ inline void set_notify_plan(StatusNotifyPlan plan) { plan_ = plan; }
+ void NotifyOverQuota();
+ protected:
+ AllStatusEvent event_;
+ AllStatus* const allstatus_;
+ StatusNotifyPlan plan_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_ALL_STATUS_H_
diff --git a/chrome/browser/sync/engine/all_status_unittest.cc b/chrome/browser/sync/engine/all_status_unittest.cc
new file mode 100644
index 0000000..86829cd
--- /dev/null
+++ b/chrome/browser/sync/engine/all_status_unittest.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/all_status.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+TEST(AllStatus, GetRecommendedDelay) {
+ EXPECT_LE(0, AllStatus::GetRecommendedDelaySeconds(0));
+ EXPECT_LE(1, AllStatus::GetRecommendedDelaySeconds(1));
+ EXPECT_LE(50, AllStatus::GetRecommendedDelaySeconds(50));
+ EXPECT_LE(10, AllStatus::GetRecommendedDelaySeconds(10));
+ EXPECT_EQ(AllStatus::kMaxBackoffSeconds,
+ AllStatus::GetRecommendedDelaySeconds(
+ AllStatus::kMaxBackoffSeconds));
+ EXPECT_EQ(AllStatus::kMaxBackoffSeconds,
+ AllStatus::GetRecommendedDelaySeconds(
+ AllStatus::kMaxBackoffSeconds+1));
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/apply_updates_command.cc b/chrome/browser/sync/engine/apply_updates_command.cc
new file mode 100644
index 0000000..2d9f3e5
--- /dev/null
+++ b/chrome/browser/sync/engine/apply_updates_command.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/apply_updates_command.h"
+
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/update_applicator.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+ApplyUpdatesCommand::ApplyUpdatesCommand() {}
+ApplyUpdatesCommand::~ApplyUpdatesCommand() {}
+
+void ApplyUpdatesCommand::ModelChangingExecuteImpl(SyncerSession *session) {
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ syncable::Directory::UnappliedUpdateMetaHandles handles;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &handles);
+
+ UpdateApplicator applicator(session, handles.begin(), handles.end());
+ while (applicator.AttemptOneApplication(&trans)) {
+ }
+ applicator.SaveProgressIntoSessionState();
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/apply_updates_command.h b/chrome/browser/sync/engine/apply_updates_command.h
new file mode 100644
index 0000000..320e42c
--- /dev/null
+++ b/chrome/browser/sync/engine/apply_updates_command.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
+
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+class WriteTransaction;
+class MutableEntry;
+class Id;
+}
+
+namespace browser_sync {
+
+class ApplyUpdatesCommand : public ModelChangingSyncerCommand {
+ public:
+ ApplyUpdatesCommand();
+ virtual ~ApplyUpdatesCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession *session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
diff --git a/chrome/browser/sync/engine/apply_updates_command_unittest.cc b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
new file mode 100644
index 0000000..ea4e253
--- /dev/null
+++ b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
@@ -0,0 +1,166 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/apply_updates_command.h"
+#include "chrome/browser/sync/engine/sync_cycle_state.h"
+#include "chrome/browser/sync/engine/sync_process_state.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/test/sync/engine/test_directory_setter_upper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+using syncable::ScopedDirLookup;
+using syncable::WriteTransaction;
+using syncable::ReadTransaction;
+using syncable::MutableEntry;
+using syncable::Entry;
+using syncable::Id;
+using syncable::UNITTEST;
+
+namespace browser_sync {
+
+// A test fixture for tests exercising ApplyUpdatesCommand.
+class ApplyUpdatesCommandTest : public testing::Test {
+ protected:
+ ApplyUpdatesCommandTest() : next_revision_(1) {}
+ virtual ~ApplyUpdatesCommandTest() {}
+ virtual void SetUp() {
+ syncdb_.SetUp();
+ }
+ virtual void TearDown() {
+ syncdb_.TearDown();
+ }
+
+ protected:
+ // Create a new unapplied update.
+ void CreateUnappliedNewItemWithParent(const string& item_id,
+ const string& parent_id) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ Id::CreateFromServerId(item_id));
+ ASSERT_TRUE(entry.good());
+ PathString name;
+ AppendUTF8ToPathString(item_id, &name);
+ entry.Put(syncable::SERVER_VERSION, next_revision_++);
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ entry.Put(syncable::SERVER_NAME, name);
+ entry.Put(syncable::SERVER_NON_UNIQUE_NAME, name);
+ entry.Put(syncable::SERVER_PARENT_ID, Id::CreateFromServerId(parent_id));
+ entry.Put(syncable::SERVER_IS_DIR, true);
+ }
+
+ TestDirectorySetterUpper syncdb_;
+ ApplyUpdatesCommand apply_updates_command_;
+
+ private:
+ int64 next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesCommandTest);
+};
+
+TEST_F(ApplyUpdatesCommandTest, Simple) {
+ string root_server_id = syncable::kNullId.GetServerId();
+ CreateUnappliedNewItemWithParent("parent", root_server_id);
+ CreateUnappliedNewItemWithParent("child", "parent");
+
+ SyncCycleState cycle_state;
+ SyncProcessState process_state(syncdb_.manager(), syncdb_.name(),
+ NULL, NULL, NULL, NULL);
+ SyncerSession session(&cycle_state, &process_state);
+
+ apply_updates_command_.ModelChangingExecuteImpl(&session);
+
+ EXPECT_EQ(2, cycle_state.AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ EXPECT_EQ(0, process_state.ConflictingItemsSize())
+ << "Simple update shouldn't result in conflicts";
+ EXPECT_EQ(0, process_state.BlockedItemsSize())
+ << "Blocked items shouldn't be possible under any circumstances";
+ EXPECT_EQ(2, cycle_state.SuccessfullyAppliedUpdateCount())
+ << "All items should have been successfully applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, UpdateWithChildrenBeforeParents) {
+ // Set a bunch of updates which are difficult to apply in the order
+ // they're received due to dependencies on other unseen items.
+ string root_server_id = syncable::kNullId.GetServerId();
+ CreateUnappliedNewItemWithParent("a_child_created_first", "parent");
+ CreateUnappliedNewItemWithParent("x_child_created_first", "parent");
+ CreateUnappliedNewItemWithParent("parent", root_server_id);
+ CreateUnappliedNewItemWithParent("a_child_created_second", "parent");
+ CreateUnappliedNewItemWithParent("x_child_created_second", "parent");
+
+ SyncCycleState cycle_state;
+ SyncProcessState process_state(syncdb_.manager(), syncdb_.name(),
+ NULL, NULL, NULL, NULL);
+ SyncerSession session(&cycle_state, &process_state);
+
+ apply_updates_command_.ModelChangingExecuteImpl(&session);
+
+ EXPECT_EQ(5, cycle_state.AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ EXPECT_EQ(0, process_state.ConflictingItemsSize())
+ << "Simple update shouldn't result in conflicts, even if out-of-order";
+ EXPECT_EQ(0, process_state.BlockedItemsSize())
+ << "Blocked items shouldn't be possible under any circumstances";
+ EXPECT_EQ(5, cycle_state.SuccessfullyAppliedUpdateCount())
+ << "All updates should have been successfully applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, NestedItemsWithUnknownParent) {
+ // We shouldn't be able to do anything with either of these items.
+ CreateUnappliedNewItemWithParent("some_item", "unknown_parent");
+ CreateUnappliedNewItemWithParent("some_other_item", "some_item");
+
+ SyncCycleState cycle_state;
+ SyncProcessState process_state(syncdb_.manager(), syncdb_.name(),
+ NULL, NULL, NULL, NULL);
+ SyncerSession session(&cycle_state, &process_state);
+
+ apply_updates_command_.ModelChangingExecuteImpl(&session);
+
+ EXPECT_EQ(2, cycle_state.AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ EXPECT_EQ(2, process_state.ConflictingItemsSize())
+ << "All updates with an unknown ancestors should be in conflict";
+ EXPECT_EQ(0, process_state.BlockedItemsSize())
+ << "Blocked items shouldn't be possible under any circumstances";
+ EXPECT_EQ(0, cycle_state.SuccessfullyAppliedUpdateCount())
+ << "No item with an unknown ancestor should be applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, ItemsBothKnownAndUnknown) {
+ // See what happens when there's a mixture of good and bad updates.
+ string root_server_id = syncable::kNullId.GetServerId();
+ CreateUnappliedNewItemWithParent("first_unknown_item", "unknown_parent");
+ CreateUnappliedNewItemWithParent("first_known_item", root_server_id);
+ CreateUnappliedNewItemWithParent("second_unknown_item", "unknown_parent");
+ CreateUnappliedNewItemWithParent("second_known_item", "first_known_item");
+ CreateUnappliedNewItemWithParent("third_known_item", "fourth_known_item");
+ CreateUnappliedNewItemWithParent("fourth_known_item", root_server_id);
+
+ SyncCycleState cycle_state;
+ SyncProcessState process_state(syncdb_.manager(), syncdb_.name(),
+ NULL, NULL, NULL, NULL);
+ SyncerSession session(&cycle_state, &process_state);
+
+ apply_updates_command_.ModelChangingExecuteImpl(&session);
+
+ EXPECT_EQ(6, cycle_state.AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ EXPECT_EQ(2, process_state.ConflictingItemsSize())
+ << "The updates with unknown ancestors should be in conflict";
+ EXPECT_EQ(0, process_state.BlockedItemsSize())
+ << "Blocked items shouldn't be possible under any circumstances";
+ EXPECT_EQ(4, cycle_state.SuccessfullyAppliedUpdateCount())
+ << "The updates with known ancestors should be successfully applied";
+}
+
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/auth_watcher.cc b/chrome/browser/sync/engine/auth_watcher.cc
new file mode 100644
index 0000000..0c999dd
--- /dev/null
+++ b/chrome/browser/sync/engine/auth_watcher.cc
@@ -0,0 +1,419 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/auth_watcher.h"
+
+#include "base/file_util.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/engine/all_status.h"
+#include "chrome/browser/sync/engine/authenticator.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/user_settings.h"
+
+// How authentication happens:
+//
+// Kick Off:
+// The sync API looks to see if the user's name and
+// password are stored. If so, it calls authwatcher.Authenticate() with
+// them. Otherwise it fires an error event.
+//
+// On failed Gaia Auth:
+// The AuthWatcher attempts to use saved hashes to authenticate
+// locally, and on success opens the share.
+// On failure, fires an error event.
+//
+// On successful Gaia Auth:
+// AuthWatcher launches a thread to open the share and to get the
+// authentication token from the sync server.
+
+using std::pair;
+using std::string;
+using std::vector;
+
+namespace browser_sync {
+
+AuthWatcher::AuthWatcher(DirectoryManager* dirman,
+ ServerConnectionManager* scm,
+ AllStatus* allstatus,
+ const string& user_agent,
+ const string& service_id,
+ const string& gaia_url,
+ UserSettings* user_settings,
+ GaiaAuthenticator* gaia_auth,
+ TalkMediator* talk_mediator)
+ : dirman_(dirman),
+ scm_(scm),
+ allstatus_(allstatus),
+ status_(NOT_AUTHENTICATED),
+ thread_handle_valid_(false),
+ authenticating_now_(false),
+ current_attempt_trigger_(AuthWatcherEvent::USER_INITIATED),
+ user_settings_(user_settings),
+ gaia_(gaia_auth),
+ talk_mediator_(talk_mediator) {
+ connmgr_hookup_.reset(
+ NewEventListenerHookup(scm->channel(), this,
+ &AuthWatcher::HandleServerConnectionEvent));
+ AuthWatcherEvent done = { AuthWatcherEvent::AUTHWATCHER_DESTROYED };
+ channel_.reset(new Channel(done));
+}
+
+void* AuthWatcher::AuthenticationThreadStartRoutine(void* arg) {
+ ThreadParams* args = reinterpret_cast<ThreadParams*>(arg);
+ return args->self->AuthenticationThreadMain(args);
+}
+
+bool AuthWatcher::ProcessGaiaAuthSuccess() {
+ GaiaAuthenticator::AuthResults results = gaia_->results();
+
+ // We just successfully signed in again, let's clear out any residual cached
+ // login data from earlier sessions.
+ ClearAuthenticationData();
+
+ user_settings_->StoreEmailForSignin(results.email, results.primary_email);
+ user_settings_->RememberSigninType(results.email, results.signin);
+ user_settings_->RememberSigninType(results.primary_email, results.signin);
+ results.email = results.primary_email;
+ gaia_->SetUsernamePassword(results.primary_email, results.password);
+ if (!user_settings_->VerifyAgainstStoredHash(results.email, results.password))
+ user_settings_->StoreHashedPassword(results.email, results.password);
+
+ if (PERSIST_TO_DISK == results.credentials_saved) {
+ user_settings_->SetAuthTokenForService(results.email,
+ SYNC_SERVICE_NAME,
+ gaia_->auth_token());
+ }
+
+ return AuthenticateWithToken(results.email, gaia_->auth_token());
+}
+
+bool AuthWatcher::GetAuthTokenForService(const string& service_name,
+ string* service_token) {
+ string user_name;
+
+ // We special case this one by trying to return it from memory first. We
+ // do this because the user may not have checked "Remember me" and so we
+ // may not have persisted the sync service token beyond the initial
+ // login.
+ if (SYNC_SERVICE_NAME == service_name && !sync_service_token_.empty()) {
+ *service_token = sync_service_token_;
+ return true;
+ }
+
+ if (user_settings_->GetLastUserAndServiceToken(service_name, &user_name,
+ service_token)) {
+ // The casing gets preserved in some places and not in others it seems,
+ // at least I have observed different casings persisted to different DB
+ // tables.
+ if (!base::strcasecmp(user_name.c_str(),
+ user_settings_->email().c_str())) {
+ return true;
+ } else {
+ LOG(ERROR) << "ERROR: We seem to have saved credentials for someone "
+ << " other than the current user.";
+ return false;
+ }
+ }
+
+ return false;
+}
+
+const char kAuthWatcher[] = "AuthWatcher";
+
+bool AuthWatcher::AuthenticateWithToken(const string& gaia_email,
+ const string& auth_token) {
+ // Store a copy of the sync service token in memory.
+ sync_service_token_ = auth_token;
+ scm_->set_auth_token(sync_service_token_);
+
+ Authenticator auth(scm_, user_settings_);
+ Authenticator::AuthenticationResult result =
+ auth.AuthenticateToken(auth_token);
+ string email = gaia_email;
+ if (auth.display_email() && *auth.display_email()) {
+ email = auth.display_email();
+ LOG(INFO) << "Auth returned email " << email << " for gaia email " <<
+ gaia_email;
+ }
+ AuthWatcherEvent event = {AuthWatcherEvent::ILLEGAL_VALUE , 0};
+ gaia_->SetUsername(email);
+ gaia_->SetAuthToken(auth_token, SAVE_IN_MEMORY_ONLY);
+ const bool was_authenticated = NOT_AUTHENTICATED != status_;
+ switch (result) {
+ case Authenticator::SUCCESS:
+ {
+ status_ = GAIA_AUTHENTICATED;
+ PathString share_name;
+ CHECK(AppendUTF8ToPathString(email.data(), email.size(), &share_name));
+ user_settings_->SwitchUser(email);
+
+ // Set the authentication token for notifications
+ talk_mediator_->SetAuthToken(email, auth_token);
+
+ if (!was_authenticated)
+ LoadDirectoryListAndOpen(share_name);
+ NotifyAuthSucceeded(email);
+ return true;
+ }
+ case Authenticator::BAD_AUTH_TOKEN:
+ event.what_happened = AuthWatcherEvent::SERVICE_AUTH_FAILED;
+ break;
+ case Authenticator::CORRUPT_SERVER_RESPONSE:
+ case Authenticator::SERVICE_DOWN:
+ event.what_happened = AuthWatcherEvent::SERVICE_CONNECTION_FAILED;
+ break;
+ case Authenticator::USER_NOT_ACTIVATED:
+ event.what_happened = AuthWatcherEvent::SERVICE_USER_NOT_SIGNED_UP;
+ break;
+ default:
+ LOG(FATAL) << "Illegal return from AuthenticateToken";
+ return true; // keep the compiler happy
+ }
+ // Always fall back to local authentication.
+ if (was_authenticated || AuthenticateLocally(email)) {
+ if (AuthWatcherEvent::SERVICE_CONNECTION_FAILED == event.what_happened)
+ return true;
+ }
+ CHECK(event.what_happened != AuthWatcherEvent::ILLEGAL_VALUE);
+ NotifyListeners(&event);
+ return true;
+}
+
+bool AuthWatcher::AuthenticateLocally(string email) {
+ user_settings_->GetEmailForSignin(&email);
+ if (file_util::PathExists(dirman_->GetSyncDataDatabasePath())) {
+ gaia_->SetUsername(email);
+ status_ = LOCALLY_AUTHENTICATED;
+ user_settings_->SwitchUser(email);
+ PathString share_name;
+ CHECK(AppendUTF8ToPathString(email.data(), email.size(), &share_name));
+ LoadDirectoryListAndOpen(share_name);
+ NotifyAuthSucceeded(email);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AuthWatcher::AuthenticateLocally(string email, const string& password) {
+ user_settings_->GetEmailForSignin(&email);
+ return user_settings_->VerifyAgainstStoredHash(email, password)
+ && AuthenticateLocally(email);
+}
+
+void AuthWatcher::ProcessGaiaAuthFailure() {
+ GaiaAuthenticator::AuthResults results = gaia_->results();
+ if (LOCALLY_AUTHENTICATED == status_) {
+ return; // nothing todo
+ } else if (AuthenticateLocally(results.email, results.password)) {
+ // We save the "Remember me" checkbox by putting a non-null auth
+ // token into the last_user table. So if we're offline and the
+ // user checks the box, insert a bogus auth token.
+ if (PERSIST_TO_DISK == results.credentials_saved) {
+ const string auth_token("bogus");
+ user_settings_->SetAuthTokenForService(results.email,
+ SYNC_SERVICE_NAME,
+ auth_token);
+ }
+ const bool unavailable = ConnectionUnavailable == results.auth_error ||
+ Unknown == results.auth_error ||
+ ServiceUnavailable == results.auth_error;
+ if (unavailable)
+ return;
+ }
+ AuthWatcherEvent myevent = { AuthWatcherEvent::GAIA_AUTH_FAILED, &results };
+ NotifyListeners(&myevent);
+}
+
+void* AuthWatcher::AuthenticationThreadMain(ThreadParams* args) {
+ NameCurrentThreadForDebugging("SyncEngine_AuthWatcherThread");
+ {
+ // This short lock ensures our launching function (StartNewAuthAttempt) is
+ // done.
+ MutexLock lock(&mutex_);
+ current_attempt_trigger_ = args->trigger;
+ }
+ SaveCredentials save = args->persist_creds_to_disk ?
+ PERSIST_TO_DISK : SAVE_IN_MEMORY_ONLY;
+ int attempt = 0;
+ SignIn const signin = user_settings_->
+ RecallSigninType(args->email, GMAIL_SIGNIN);
+
+ if (!args->password.empty()) while (true) {
+ bool authenticated;
+ if (!args->captcha_token.empty() && !args->captcha_value.empty())
+ authenticated = gaia_->Authenticate(args->email, args->password,
+ save, true, args->captcha_token,
+ args->captcha_value, signin);
+ else
+ authenticated = gaia_->Authenticate(args->email, args->password,
+ save, true, signin);
+ if (authenticated) {
+ if (!ProcessGaiaAuthSuccess()) {
+ if (3 != ++attempt)
+ continue;
+ AuthWatcherEvent event =
+ { AuthWatcherEvent::SERVICE_CONNECTION_FAILED, 0 };
+ NotifyListeners(&event);
+ }
+ } else {
+ ProcessGaiaAuthFailure();
+ }
+ break;
+ } else if (!args->auth_token.empty()) {
+ AuthenticateWithToken(args->email, args->auth_token);
+ } else {
+ LOG(ERROR) << "Attempt to authenticate with no credentials.";
+ }
+ {
+ MutexLock lock(&mutex_);
+ authenticating_now_ = false;
+ }
+ delete args;
+ return 0;
+}
+
+void AuthWatcher::Reset() {
+ status_ = NOT_AUTHENTICATED;
+}
+
+void AuthWatcher::NotifyAuthSucceeded(const string& email) {
+ LOG(INFO) << "NotifyAuthSucceeded";
+ AuthWatcherEvent event = { AuthWatcherEvent::AUTH_SUCCEEDED };
+ event.user_email = email;
+
+ NotifyListeners(&event);
+}
+
+bool AuthWatcher::StartNewAuthAttempt(const string& email,
+ const string& password, const string& auth_token,
+ const string& captcha_token, const string& captcha_value,
+ bool persist_creds_to_disk,
+ AuthWatcherEvent::AuthenticationTrigger trigger) {
+ AuthWatcherEvent event = { AuthWatcherEvent::AUTHENTICATION_ATTEMPT_START };
+ NotifyListeners(&event);
+ MutexLock lock(&mutex_);
+ if (authenticating_now_)
+ return false;
+ if (thread_handle_valid_) {
+ int join_return = pthread_join(thread_, 0);
+ if (0 != join_return)
+ LOG(ERROR) << "pthread_join failed returning " << join_return;
+ }
+ string mail = email;
+ if (email.find('@') == string::npos) {
+ mail.push_back('@');
+ // TODO(chron): Should this be done only at the UI level?
+ mail.append(DEFAULT_SIGNIN_DOMAIN);
+ }
+ ThreadParams* args = new ThreadParams;
+ args->self = this;
+ args->email = mail;
+ args->password = password;
+ args->auth_token = auth_token;
+ args->captcha_token = captcha_token;
+ args->captcha_value = captcha_value;
+ args->persist_creds_to_disk = persist_creds_to_disk;
+ args->trigger = trigger;
+ if (0 != pthread_create(&thread_, NULL, AuthenticationThreadStartRoutine,
+ args)) {
+ LOG(ERROR) << "Failed to create auth thread.";
+ return false;
+ }
+ authenticating_now_ = true;
+ thread_handle_valid_ = true;
+ return true;
+}
+
+void AuthWatcher::WaitForAuthThreadFinish() {
+ {
+ MutexLock lock(&mutex_);
+ if (!thread_handle_valid_)
+ return;
+ }
+ pthread_join(thread_, 0);
+}
+
+void AuthWatcher::HandleServerConnectionEvent(
+ const ServerConnectionEvent& event) {
+ if (event.server_reachable &&
+ !authenticating_now_ &&
+ (event.connection_code == HttpResponse::SYNC_AUTH_ERROR ||
+ status_ == LOCALLY_AUTHENTICATED)) {
+ // We're either online or just got reconnected and want to try to
+ // authenticate. If we've got a saved token this should just work. If not
+ // the auth failure should trigger UI indications that we're not logged in.
+
+ // METRIC: If we get a SYNC_AUTH_ERROR, our token expired.
+ GaiaAuthenticator::AuthResults authresults = gaia_->results();
+ if (!StartNewAuthAttempt(authresults.email, authresults.password,
+ authresults.auth_token, "", "",
+ PERSIST_TO_DISK == authresults.credentials_saved,
+ AuthWatcherEvent::EXPIRED_CREDENTIALS))
+ LOG(INFO) << "Couldn't start a new auth attempt.";
+ }
+}
+
+bool AuthWatcher::LoadDirectoryListAndOpen(const PathString& login) {
+ LOG(INFO) << "LoadDirectoryListAndOpen(" << login << ")";
+ bool initial_sync_ended = false;
+
+ dirman_->Open(login);
+ syncable::ScopedDirLookup dir(dirman_, login);
+ if (dir.good() && dir->initial_sync_ended())
+ initial_sync_ended = true;
+
+ LOG(INFO) << "LoadDirectoryListAndOpen returning " << initial_sync_ended;
+ return initial_sync_ended;
+}
+
+AuthWatcher::~AuthWatcher() {
+ WaitForAuthThreadFinish();
+}
+
+void AuthWatcher::Authenticate(const string& email, const string& password,
+ const string& captcha_token, const string& captcha_value,
+ bool persist_creds_to_disk) {
+ LOG(INFO) << "AuthWatcher::Authenticate called";
+ WaitForAuthThreadFinish();
+
+ // We CHECK here because WaitForAuthThreadFinish should ensure there's no
+ // ongoing auth attempt.
+ string empty;
+ CHECK(StartNewAuthAttempt(email, password, empty, captcha_token,
+ captcha_value, persist_creds_to_disk,
+ AuthWatcherEvent::USER_INITIATED));
+}
+
+void AuthWatcher::Logout() {
+ scm_->ResetAuthStatus();
+ Reset();
+ WaitForAuthThreadFinish();
+ ClearAuthenticationData();
+}
+
+void AuthWatcher::ClearAuthenticationData() {
+ sync_service_token_.clear();
+ scm_->set_auth_token(sync_service_token());
+ user_settings_->ClearAllServiceTokens();
+}
+
+string AuthWatcher::email() const {
+ return gaia_->email();
+}
+
+void AuthWatcher::NotifyListeners(AuthWatcherEvent* event) {
+ event->trigger = current_attempt_trigger_;
+ channel_->NotifyListeners(*event);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/auth_watcher.h b/chrome/browser/sync/engine/auth_watcher.h
new file mode 100644
index 0000000..f1bd424d
--- /dev/null
+++ b/chrome/browser/sync/engine/auth_watcher.h
@@ -0,0 +1,204 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AuthWatcher watches authentication events and user open and close
+// events and accordingly opens and closes shares.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_AUTH_WATCHER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_AUTH_WATCHER_H_
+
+#include <map>
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+struct DirectoryManagerEvent;
+class DirectoryManager;
+}
+
+namespace browser_sync {
+class AllStatus;
+class AuthWatcher;
+class ServerConnectionManager;
+class TalkMediator;
+class URLFactory;
+class UserSettings;
+struct ServerConnectionEvent;
+
+struct AuthWatcherEvent {
+ enum WhatHappened {
+ AUTHENTICATION_ATTEMPT_START,
+ AUTHWATCHER_DESTROYED,
+ AUTH_SUCCEEDED,
+ GAIA_AUTH_FAILED,
+ SERVICE_USER_NOT_SIGNED_UP,
+ SERVICE_AUTH_FAILED,
+ SERVICE_CONNECTION_FAILED,
+ // Used in a safety check in AuthWatcher::AuthenticateWithToken()
+ ILLEGAL_VALUE,
+ };
+ WhatHappened what_happened;
+ const GaiaAuthenticator::AuthResults* auth_results;
+ // use AuthWatcherEvent as its own traits type in hookups.
+ typedef AuthWatcherEvent EventType;
+ static inline bool IsChannelShutdownEvent(const AuthWatcherEvent& event) {
+ return event.what_happened == AUTHWATCHER_DESTROYED;
+ }
+
+ // Used for AUTH_SUCCEEDED notification
+ std::string user_email;
+
+ // How was this auth attempt initiated?
+ enum AuthenticationTrigger {
+ USER_INITIATED = 0, // default value.
+ EXPIRED_CREDENTIALS,
+ };
+
+ AuthenticationTrigger trigger;
+};
+
+class AuthWatcher {
+ public:
+ // Normal progression is local -> gaia -> token
+ enum Status { LOCALLY_AUTHENTICATED, GAIA_AUTHENTICATED, NOT_AUTHENTICATED };
+ typedef syncable::DirectoryManagerEvent DirectoryManagerEvent;
+ typedef syncable::DirectoryManager DirectoryManager;
+ typedef TalkMediator TalkMediator;
+
+ AuthWatcher(DirectoryManager* dirman,
+ ServerConnectionManager* scm,
+ AllStatus* allstatus,
+ const std::string& user_agent,
+ const std::string& service_id,
+ const std::string& gaia_url,
+ UserSettings* user_settings,
+ GaiaAuthenticator* gaia_auth,
+ TalkMediator* talk_mediator);
+ ~AuthWatcher();
+
+ // Returns true if the open share has gotten zero
+ // updates from the sync server (initial sync complete.)
+ bool LoadDirectoryListAndOpen(const PathString& login);
+
+ typedef EventChannel<AuthWatcherEvent, PThreadMutex> Channel;
+
+ inline Channel* channel() const {
+ return channel_.get();
+ }
+
+ void Authenticate(const std::string& email, const std::string& password,
+ const std::string& captcha_token, const std::string& captcha_value,
+ bool persist_creds_to_disk);
+
+ void Authenticate(const std::string& email, const std::string& password,
+ bool persist_creds_to_disk) {
+ Authenticate(email, password, "", "", persist_creds_to_disk);
+ }
+
+ // Retrieves an auth token for a named service for which a long-lived token
+ // was obtained at login time. Returns true if a long-lived token can be
+ // found, false otherwise.
+ bool GetAuthTokenForService(const std::string& service_name,
+ std::string* service_token);
+
+ std::string email() const;
+ syncable::DirectoryManager* dirman() const { return dirman_; }
+ ServerConnectionManager* scm() const { return scm_; }
+ AllStatus* allstatus() const { return allstatus_; }
+ UserSettings* settings() const { return user_settings_; }
+ Status status() const { return (Status)status_; }
+
+ void Logout();
+
+ // For synchronizing other destructors.
+ void WaitForAuthThreadFinish();
+
+ protected:
+ void Reset();
+ void ClearAuthenticationData();
+
+ void NotifyAuthSucceeded(const std::string& email);
+ bool StartNewAuthAttempt(const std::string& email,
+ const std::string& password,
+ const std::string& auth_token, const std::string& captcha_token,
+ const std::string& captcha_value, bool persist_creds_to_disk,
+ AuthWatcherEvent::AuthenticationTrigger trigger);
+ void HandleServerConnectionEvent(const ServerConnectionEvent& event);
+
+ void SaveUserSettings(const std::string& username,
+ const std::string& auth_token,
+ const bool save_credentials);
+
+ // These two helpers should only be called from the auth function.
+ // returns false iff we had problems and should try GAIA_AUTH again.
+ bool ProcessGaiaAuthSuccess();
+ void ProcessGaiaAuthFailure();
+
+ // Just checks that the user has at least one local share cache.
+ bool AuthenticateLocally(std::string email);
+ // Also checks the user's password against stored password hash.
+ bool AuthenticateLocally(std::string email, const std::string& password);
+
+ // Sets the trigger member of the event and sends the event on channel_.
+ void NotifyListeners(AuthWatcherEvent* event);
+
+ const std::string& sync_service_token() const { return sync_service_token_; }
+
+ public:
+ bool AuthenticateWithToken(const std::string& email,
+ const std::string& auth_token);
+
+ protected:
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+
+ // Passed to newly created threads.
+ struct ThreadParams {
+ AuthWatcher* self;
+ std::string email;
+ std::string password;
+ std::string auth_token;
+ std::string captcha_token;
+ std::string captcha_value;
+ bool persist_creds_to_disk;
+ AuthWatcherEvent::AuthenticationTrigger trigger;
+ };
+
+ // Initial function passed to pthread_create.
+ static void* AuthenticationThreadStartRoutine(void* arg);
+ // Member function called by AuthenticationThreadStartRoutine.
+ void* AuthenticationThreadMain(struct ThreadParams* arg);
+
+ scoped_ptr<GaiaAuthenticator> const gaia_;
+ syncable::DirectoryManager* const dirman_;
+ ServerConnectionManager* const scm_;
+ scoped_ptr<EventListenerHookup> connmgr_hookup_;
+ AllStatus* const allstatus_;
+ // TODO(chron): It is incorrect to make assignments to AtomicWord.
+ volatile base::subtle::AtomicWord status_;
+ UserSettings* user_settings_;
+ TalkMediator* talk_mediator_; // Interface to the notifications engine.
+ scoped_ptr<Channel> channel_;
+
+ // We store our service token in memory as a workaround to the fact that we
+ // don't persist it when the user unchecks "remember me".
+ // We also include it on outgoing requests.
+ std::string sync_service_token_;
+
+ PThreadMutex mutex_;
+ // All members below are protected by the above mutex
+ pthread_t thread_;
+ bool thread_handle_valid_;
+ bool authenticating_now_;
+ AuthWatcherEvent::AuthenticationTrigger current_attempt_trigger_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_AUTH_WATCHER_H_
diff --git a/chrome/browser/sync/engine/authenticator.cc b/chrome/browser/sync/engine/authenticator.cc
new file mode 100644
index 0000000..cd168d2
--- /dev/null
+++ b/chrome/browser/sync/engine/authenticator.cc
@@ -0,0 +1,106 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/authenticator.h"
+
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/user_settings.h"
+
+namespace browser_sync {
+
+using std::string;
+
+Authenticator::Authenticator(ServerConnectionManager *manager,
+ UserSettings* settings)
+ : server_connection_manager_(manager), settings_(settings) {
+}
+
+Authenticator::Authenticator(ServerConnectionManager *manager)
+ : server_connection_manager_(manager), settings_(NULL) {
+}
+
+Authenticator::AuthenticationResult Authenticator::Authenticate() {
+ // TODO(sync): Pull and work with saved credentials.
+ return NO_SAVED_CREDENTIALS;
+}
+
+Authenticator::AuthenticationResult Authenticator::Authenticate(
+ string username, string password, bool save_credentials) {
+ // TODO(scrub): need to figure out if this routine is used anywhere other than
+ // the test code.
+ GaiaAuthenticator auth_service("ChromiumBrowser", "chromiumsync",
+ "https://www.google.com:443/accounts/ClientLogin");
+ const SignIn signin_type =
+ settings_->RecallSigninType(username, GMAIL_SIGNIN);
+ if (!auth_service.Authenticate(username, password, SAVE_IN_MEMORY_ONLY,
+ true, signin_type)) {
+ return UNSPECIFIC_ERROR_RETURN;
+ }
+ CHECK(!auth_service.auth_token().empty());
+ return AuthenticateToken(auth_service.auth_token());
+}
+
+COMPILE_ASSERT(sync_pb::ClientToServerResponse::ERROR_TYPE_MAX == 6,
+ client_to_server_response_errors_changed);
+
+Authenticator::AuthenticationResult Authenticator::HandleSuccessfulTokenRequest(
+ const sync_pb::UserIdentification* user) {
+ display_email_ = user->has_email() ? user->email() : "";
+ display_name_ = user->has_display_name() ? user->display_name() : "";
+ obfuscated_id_ = user->has_obfuscated_id() ? user->obfuscated_id() : "";
+ return SUCCESS;
+}
+
+Authenticator::AuthenticationResult Authenticator::AuthenticateToken(
+ string auth_token) {
+ ClientToServerMessage client_to_server_message;
+ // Used to be required for all requests.
+ client_to_server_message.set_share("");
+ client_to_server_message.set_message_contents(
+ ClientToServerMessage::AUTHENTICATE);
+
+ string tx, rx;
+ client_to_server_message.SerializeToString(&tx);
+ HttpResponse http_response;
+
+ ServerConnectionManager::PostBufferParams params =
+ { tx, &rx, &http_response };
+ if (!server_connection_manager_->PostBufferWithAuth(&params, auth_token)) {
+ LOG(WARNING) << "Error posting from authenticator:" << http_response;
+ return SERVICE_DOWN;
+ }
+ sync_pb::ClientToServerResponse response;
+ if (!response.ParseFromString(rx))
+ return CORRUPT_SERVER_RESPONSE;
+
+ switch (response.error_code()) {
+ case sync_pb::ClientToServerResponse::SUCCESS:
+ if (response.has_authenticate() && response.authenticate().has_user())
+ return HandleSuccessfulTokenRequest(&response.authenticate().user());
+ // TODO:(sync) make this CORRUPT_SERVER_RESPONSE when all servers are
+ // returning user identification at login time.
+ return SUCCESS;
+ case sync_pb::ClientToServerResponse::USER_NOT_ACTIVATED:
+ return USER_NOT_ACTIVATED;
+ case sync_pb::ClientToServerResponse::AUTH_INVALID:
+ case sync_pb::ClientToServerResponse::AUTH_EXPIRED:
+ return BAD_AUTH_TOKEN;
+ // should never happen (no birthday in this request).
+ case sync_pb::ClientToServerResponse::NOT_MY_BIRTHDAY:
+ // should never happen (auth isn't throttled).
+ case sync_pb::ClientToServerResponse::THROTTLED:
+ // should never happen (only for stores).
+ case sync_pb::ClientToServerResponse::ACCESS_DENIED:
+ default:
+ LOG(ERROR) << "Corrupt Server packet received by auth, error code " <<
+ response.error_code();
+ return CORRUPT_SERVER_RESPONSE;
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/authenticator.h b/chrome/browser/sync/engine/authenticator.h
new file mode 100644
index 0000000..6c5005b
--- /dev/null
+++ b/chrome/browser/sync/engine/authenticator.h
@@ -0,0 +1,106 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The authenticator is a cross-platform class that handles authentication for
+// the sync client.
+//
+// Current State:
+// The authenticator is currently only used to authenticate tokens using the
+// newer protocol buffer request.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_AUTHENTICATOR_H_
+#define CHROME_BROWSER_SYNC_ENGINE_AUTHENTICATOR_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/port.h"
+
+namespace sync_pb {
+class UserIdentification;
+}
+
+namespace browser_sync {
+
+class ServerConnectionManager;
+class UserSettings;
+
+class Authenticator {
+ public:
+ // Single return enum.
+ enum AuthenticationResult {
+ SUCCESS = 0,
+ // We couldn't log on because we don't have saved credentials.
+ NO_SAVED_CREDENTIALS,
+ // We can't reach auth server (i.e. we're offline or server's down).
+ NOT_CONNECTED,
+ // Server's up, but we're down.
+ SERVICE_DOWN,
+ // We contacted the server, but the response didn't make sense.
+ CORRUPT_SERVER_RESPONSE,
+ // Bad username/password.
+ BAD_CREDENTIALS,
+ // Credentials are fine, but the user hasn't signed up.
+ USER_NOT_ACTIVATED,
+
+ // Return values for internal use.
+
+ // We will never return this to the user unless they call AuthenticateToken
+ // directly. Other auth functions retry and then return
+ // CORRUPT_SERVER_RESPONSE.
+ // TODO(sync): Implement retries.
+ BAD_AUTH_TOKEN,
+ // We should never return this, it's a placeholder during development.
+ // TODO(sync): Remove this
+ UNSPECIFIC_ERROR_RETURN,
+ };
+
+ // Constructor. This class will keep the connection authenticated.
+ // TODO(sync): Make it work as described.
+ // TODO(sync): Require a UI callback mechanism.
+ Authenticator(ServerConnectionManager* manager, UserSettings* settings);
+
+ // Constructor for a simple authenticator used for programmatic login from
+ // test programs.
+ explicit Authenticator(ServerConnectionManager* manager);
+
+ // This version of Authenticate tries to use saved credentials, if we have
+ // any.
+ AuthenticationResult Authenticate();
+
+ // If save_credentials is set we save the long-lived auth token to local disk.
+ // In all cases we save the username and password in memory (if given) so we
+ // can refresh the long-lived auth token if it expires.
+ // Also we save a 10-bit hash of the password to allow offline login.
+ // TODO(sync): Make it work as described.
+ // TODO(sync): Arguments for desired domain.
+ AuthenticationResult Authenticate(std::string username, std::string password,
+ bool save_credentials);
+ // A version of the auth token to authenticate cookie portion of
+ // authentication. It uses the new proto buffer based call instead of the HTTP
+ // GET based one we currently use.
+ // Can return one of SUCCESS, SERVICE_DOWN, CORRUPT_SERVER_RESPONSE,
+ // USER_NOT_ACTIVATED or BAD_AUTH_TOKEN. See above for the meaning of these
+ // values.
+ // TODO(sync): Make this function private when we're done.
+ AuthenticationResult AuthenticateToken(std::string auth_token);
+
+ const char * display_email() const { return display_email_.c_str(); }
+ const char * display_name() const { return display_name_.c_str(); }
+ private:
+ // Stores the information in the UserIdentification returned from the server.
+ AuthenticationResult HandleSuccessfulTokenRequest(
+ const sync_pb::UserIdentification* user);
+ // The server connection manager that we're looking after.
+ ServerConnectionManager* server_connection_manager_;
+ // Returns SUCCESS or the value that should be returned to the user.
+ std::string display_email_;
+ std::string display_name_;
+ std::string obfuscated_id_;
+ UserSettings* const settings_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_AUTHENTICATOR_H_
diff --git a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
new file mode 100644
index 0000000..0eb279a
--- /dev/null
+++ b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
@@ -0,0 +1,439 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/build_and_process_conflict_sets_command.h"
+
+#include <string>
+#include <sstream>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/format_macros.h"
+#include "base/rand_util.h"
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/update_applicator.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+
+namespace browser_sync {
+
+using std::set;
+using std::string;
+using std::vector;
+
+BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSetsCommand() {}
+BuildAndProcessConflictSetsCommand::~BuildAndProcessConflictSetsCommand() {}
+
+void BuildAndProcessConflictSetsCommand::ModelChangingExecuteImpl(
+ SyncerSession *session) {
+ session->set_conflict_sets_built(BuildAndProcessConflictSets(session));
+}
+
+bool BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSets(
+ SyncerSession *session) {
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good())
+ return false;
+ bool had_single_direction_sets = false;
+ { // scope for transaction
+ syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ ConflictResolutionView conflict_view(session);
+ BuildConflictSets(&trans, &conflict_view);
+ had_single_direction_sets =
+ ProcessSingleDirectionConflictSets(&trans, session);
+ // we applied some updates transactionally, lets try syncing again.
+ if (had_single_direction_sets)
+ return true;
+ }
+ return false;
+}
+
+bool BuildAndProcessConflictSetsCommand::ProcessSingleDirectionConflictSets(
+ syncable::WriteTransaction* trans, SyncerSession* const session) {
+ bool rv = false;
+ ConflictResolutionView conflict_view(session);
+ set<ConflictSet*>::const_iterator all_sets_iterator;
+ for(all_sets_iterator = conflict_view.ConflictSetsBegin();
+ all_sets_iterator != conflict_view.ConflictSetsEnd() ; ) {
+ const ConflictSet* conflict_set = *all_sets_iterator;
+ CHECK(conflict_set->size() >= 2);
+ // We scan the set to see if it consists of changes of only one type.
+ ConflictSet::const_iterator i;
+ int unsynced_count = 0, unapplied_count = 0;
+ for (i = conflict_set->begin(); i != conflict_set->end(); ++i) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, *i);
+ CHECK(entry.good());
+ if (entry.Get(syncable::IS_UNSYNCED))
+ unsynced_count++;
+ if (entry.Get(syncable::IS_UNAPPLIED_UPDATE))
+ unapplied_count++;
+ }
+ if (conflict_set->size() == unsynced_count && 0 == unapplied_count) {
+ LOG(INFO) << "Skipped transactional commit attempt.";
+ } else if (conflict_set->size() == unapplied_count &&
+ 0 == unsynced_count &&
+ ApplyUpdatesTransactionally(trans, conflict_set, session)) {
+ rv = true;
+ }
+ ++all_sets_iterator;
+ }
+ return rv;
+}
+
+namespace {
+
+void StoreLocalDataForUpdateRollback(syncable::Entry* entry,
+ syncable::EntryKernel* backup) {
+ CHECK(!entry->Get(syncable::IS_UNSYNCED)) << " Storing Rollback data for "
+ "entry that's unsynced." << *entry ;
+ CHECK(entry->Get(syncable::IS_UNAPPLIED_UPDATE)) << " Storing Rollback data "
+ "for entry that's not an unapplied update." << *entry ;
+ *backup = entry->GetKernelCopy();
+}
+
+class UniqueNameGenerator {
+ public:
+ void Initialize() {
+ // To avoid name collisions we prefix the names with hex data derived from
+ // 64 bits of randomness.
+ int64 name_prefix = static_cast<int64>(base::RandUint64());
+ name_stem_ = StringPrintf("%0" PRId64 "x.", name_prefix);
+ }
+ string StringNameForEntry(const syncable::Entry& entry) {
+ CHECK(!name_stem_.empty());
+ std::stringstream rv;
+ rv << name_stem_ << entry.Get(syncable::ID);
+ return rv.str();
+ }
+ PathString PathStringNameForEntry(const syncable::Entry& entry) {
+ string name = StringNameForEntry(entry);
+ return PathString(name.begin(), name.end());
+ }
+
+ private:
+ string name_stem_;
+};
+
+bool RollbackEntry(syncable::WriteTransaction* trans,
+ syncable::EntryKernel* backup) {
+ syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE,
+ backup->ref(syncable::META_HANDLE));
+ CHECK(entry.good());
+ bool was_del = entry.Get(syncable::IS_DEL);
+
+ if (!entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL)))
+ return false;
+ syncable::Name name = syncable::Name::FromEntryKernel(backup);
+ if (!entry.PutParentIdAndName(backup->ref(syncable::PARENT_ID), name))
+ return false;
+
+ if (!backup->ref(syncable::IS_DEL)) {
+ if (!entry.PutPredecessor(backup->ref(syncable::PREV_ID)))
+ return false;
+ }
+
+ if (backup->ref(syncable::PREV_ID) != entry.Get(syncable::PREV_ID))
+ return false;
+
+ entry.Put(syncable::CTIME, backup->ref(syncable::CTIME));
+ entry.Put(syncable::MTIME, backup->ref(syncable::MTIME));
+ entry.Put(syncable::BASE_VERSION, backup->ref(syncable::BASE_VERSION));
+ entry.Put(syncable::IS_DIR, backup->ref(syncable::IS_DIR));
+ entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL));
+ entry.Put(syncable::ID, backup->ref(syncable::ID));
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE,
+ backup->ref(syncable::IS_UNAPPLIED_UPDATE));
+ return true;
+}
+
+class TransactionalUpdateEntryPreparer {
+ public:
+ TransactionalUpdateEntryPreparer() {
+ namegen_.Initialize();
+ }
+
+ void PrepareEntries(syncable::WriteTransaction* trans,
+ const vector<syncable::Id>* ids) {
+ vector<syncable::Id>::const_iterator it;
+ for (it = ids->begin(); it != ids->end(); ++it) {
+ syncable::MutableEntry entry(trans, syncable::GET_BY_ID, *it);
+ syncable::Name random_name(namegen_.PathStringNameForEntry(entry));
+ CHECK(entry.PutParentIdAndName(trans->root_id(), random_name));
+ }
+ }
+
+ private:
+ UniqueNameGenerator namegen_;
+ DISALLOW_COPY_AND_ASSIGN(TransactionalUpdateEntryPreparer);
+};
+
+} // namespace
+
+bool BuildAndProcessConflictSetsCommand::ApplyUpdatesTransactionally(
+ syncable::WriteTransaction* trans,
+ const vector<syncable::Id>* const update_set,
+ SyncerSession* const session) {
+ vector<int64> handles; // The handles in the |update_set| order.
+ vector<syncable::Id> rollback_ids; // Holds the same Ids as update_set, but
+ // sorted so that runs of adjacent nodes
+ // appear in order.
+ rollback_ids.reserve(update_set->size());
+ syncable::MetahandleSet rollback_ids_inserted_items; // Tracks what's added
+ // to |rollback_ids|.
+
+ vector<syncable::Id>::const_iterator it;
+ // 1. Build |rollback_ids| in the order required for successful rollback.
+ // Specifically, for positions to come out right, restoring an item
+ // requires that its predecessor in the sibling order is properly
+ // restored first.
+ // 2. Build |handles|, the list of handles for ApplyUpdates.
+ for (it = update_set->begin(); it != update_set->end(); ++it) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, *it);
+ SyncerUtil::AddPredecessorsThenItem(trans, &entry,
+ syncable::IS_UNAPPLIED_UPDATE, &rollback_ids_inserted_items,
+ &rollback_ids);
+ handles.push_back(entry.Get(syncable::META_HANDLE));
+ }
+ DCHECK_EQ(rollback_ids.size(), update_set->size());
+ DCHECK_EQ(rollback_ids_inserted_items.size(), update_set->size());
+
+ // 3. Store the information needed to rollback if the transaction fails.
+ // Do this before modifying anything to keep the next/prev values intact.
+ vector<syncable::EntryKernel> rollback_data(rollback_ids.size());
+ for (size_t i = 0; i < rollback_ids.size(); ++i) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, rollback_ids[i]);
+ StoreLocalDataForUpdateRollback(&entry, &rollback_data[i]);
+ }
+
+ // 4. Use the preparer to move things to an initial starting state where no
+ // names collide, and nothing in the set is a child of anything else. If
+ // we've correctly calculated the set, the server tree is valid and no
+ // changes have occurred locally we should be able to apply updates from this
+ // state.
+ TransactionalUpdateEntryPreparer preparer;
+ preparer.PrepareEntries(trans, update_set);
+
+ // 5. Use the usual apply updates from the special start state we've just
+ // prepared.
+ UpdateApplicator applicator(session, handles.begin(), handles.end());
+ while (applicator.AttemptOneApplication(trans)) {
+ // Keep going till all updates are applied.
+ }
+ if (!applicator.AllUpdatesApplied()) {
+ LOG(ERROR) << "Transactional Apply Failed, Rolling back.";
+ // We have to move entries into the temp dir again. e.g. if a swap was in a
+ // set with other failing updates, the swap may have gone through, meaning
+ // the roll back needs to be transactional. But as we're going to a known
+ // good state we should always succeed.
+ preparer.PrepareEntries(trans, update_set);
+
+ // Rollback all entries.
+ for (size_t i = 0; i < rollback_data.size(); ++i) {
+ CHECK(RollbackEntry(trans, &rollback_data[i]));
+ }
+ return false; // Don't save progress -- we just undid it.
+ }
+ applicator.SaveProgressIntoSessionState();
+ return true;
+}
+
+void BuildAndProcessConflictSetsCommand::BuildConflictSets(
+ syncable::BaseTransaction* trans,
+ ConflictResolutionView* view) {
+ view->CleanupSets();
+ set<syncable::Id>::iterator i = view->CommitConflictsBegin();
+ while (i != view->CommitConflictsEnd()) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, *i);
+ CHECK(entry.good());
+ if (!entry.Get(syncable::IS_UNSYNCED) &&
+ !entry.Get(syncable::IS_UNAPPLIED_UPDATE)) {
+ // This can happen very rarely. It means we had a simply conflicting item
+ // that randomly committed. We drop the entry as it's no longer
+ // conflicting.
+ view->EraseCommitConflict(i++);
+ continue;
+ }
+ if (entry.ExistsOnClientBecauseDatabaseNameIsNonEmpty() &&
+ (entry.Get(syncable::IS_DEL) || entry.Get(syncable::SERVER_IS_DEL))) {
+ // If we're deleted on client or server we can't be in a complex set.
+ ++i;
+ continue;
+ }
+ bool new_parent =
+ entry.Get(syncable::PARENT_ID) != entry.Get(syncable::SERVER_PARENT_ID);
+ bool new_name = 0 != syncable::ComparePathNames(entry.GetSyncNameValue(),
+ entry.Get(syncable::SERVER_NAME));
+ if (new_parent || new_name)
+ MergeSetsForNameClash(trans, &entry, view);
+ if (new_parent)
+ MergeSetsForIntroducedLoops(trans, &entry, view);
+ MergeSetsForNonEmptyDirectories(trans, &entry, view);
+ ++i;
+ }
+}
+
+void BuildAndProcessConflictSetsCommand::MergeSetsForNameClash(
+ syncable::BaseTransaction* trans, syncable::Entry* entry,
+ ConflictResolutionView* view) {
+ PathString server_name = entry->Get(syncable::SERVER_NAME);
+ // Uncommitted entries have no server name. We trap this because the root
+ // item has a null name and 0 parentid.
+ if (server_name.empty())
+ return;
+ syncable::Id conflicting_id =
+ SyncerUtil::GetNameConflictingItemId(
+ trans, entry->Get(syncable::SERVER_PARENT_ID), server_name);
+ if (syncable::kNullId != conflicting_id)
+ view->MergeSets(entry->Get(syncable::ID), conflicting_id);
+}
+
+void BuildAndProcessConflictSetsCommand::MergeSetsForIntroducedLoops(
+ syncable::BaseTransaction* trans, syncable::Entry* entry,
+ ConflictResolutionView* view) {
+ // This code crawls up from the item in question until it gets to the root
+ // or itself. If it gets to the root it does nothing. If it finds a loop all
+ // moved unsynced entries in the list of crawled entries have their sets
+ // merged with the entry.
+ // TODO(sync): Build test cases to cover this function when the argument
+ // list has settled.
+ syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID);
+ syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
+ if (!parent.good()) {
+ return;
+ }
+ // Don't check for loop if the server parent is deleted.
+ if (parent.Get(syncable::IS_DEL))
+ return;
+ vector<syncable::Id> conflicting_entries;
+ while (!parent_id.IsRoot()) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
+ if (!parent.good()) {
+ LOG(INFO) << "Bad parent in loop check, skipping. Bad parent id: "
+ << parent_id << " entry: " << *entry;
+ return;
+ }
+ if (parent.Get(syncable::IS_UNSYNCED) &&
+ entry->Get(syncable::PARENT_ID) !=
+ entry->Get(syncable::SERVER_PARENT_ID))
+ conflicting_entries.push_back(parent_id);
+ parent_id = parent.Get(syncable::PARENT_ID);
+ if (parent_id == entry->Get(syncable::ID))
+ break;
+ }
+ if (parent_id.IsRoot())
+ return;
+ for (size_t i = 0; i < conflicting_entries.size(); i++) {
+ view->MergeSets(entry->Get(syncable::ID), conflicting_entries[i]);
+ }
+}
+
+namespace {
+
+class ServerDeletedPathChecker {
+ public:
+ bool CausingConflict(const syncable::Entry& e,
+ const syncable::Entry& log_entry) {
+ CHECK(e.good()) << "Missing parent in path of: " << log_entry;
+ if (e.Get(syncable::IS_UNAPPLIED_UPDATE) &&
+ e.Get(syncable::SERVER_IS_DEL)) {
+ CHECK(!e.Get(syncable::IS_DEL)) << " Inconsistency in local tree. "
+ "syncable::Entry: " << e << " Leaf: " << log_entry;
+ return true;
+ } else {
+ CHECK(!e.Get(syncable::IS_DEL)) << " Deleted entry has children. "
+ "syncable::Entry: " << e << " Leaf: " << log_entry;
+ return false;
+ }
+ }
+ // returns 0 if we should stop investigating the path.
+ syncable::Id GetAndExamineParent(syncable::BaseTransaction* trans,
+ syncable::Id id,
+ syncable::Id check_id,
+ const syncable::Entry& log_entry) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID, id);
+ CHECK(parent.good()) << "Tree inconsitency, missing id" << id << " "
+ << log_entry;
+ syncable::Id parent_id = parent.Get(syncable::PARENT_ID);
+ CHECK(parent_id != check_id) << "Loop in dir tree! "
+ << log_entry << " " << parent;
+ return parent_id;
+ }
+};
+
+class LocallyDeletedPathChecker {
+ public:
+ bool CausingConflict(const syncable::Entry& e,
+ const syncable::Entry& log_entry) {
+ return e.good() && e.Get(syncable::IS_DEL) && e.Get(syncable::IS_UNSYNCED);
+ }
+ // returns 0 if we should stop investigating the path.
+ syncable::Id GetAndExamineParent(syncable::BaseTransaction* trans,
+ syncable::Id id,
+ syncable::Id check_id,
+ const syncable::Entry& log_entry) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID, id);
+ if (!parent.good())
+ return syncable::kNullId;
+ syncable::Id parent_id = parent.Get(syncable::PARENT_ID);
+ if (parent_id == check_id)
+ return syncable::kNullId;
+ return parent_id;
+ }
+};
+
+template <typename Checker>
+void CrawlDeletedTreeMergingSets(syncable::BaseTransaction* trans,
+ const syncable::Entry& entry,
+ ConflictResolutionView* view,
+ Checker checker) {
+ syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
+ syncable::Id double_step_parent_id = parent_id;
+ // This block builds sets where we've got an entry in a directory the
+ // server wants to delete.
+ //
+ // Here we're walking up the tree to find all entries that the pass checks
+ // deleted. We can be extremely strict here as anything unexpected means
+ // invariants in the local hierarchy have been broken.
+ while (!parent_id.IsRoot()) {
+ if (!double_step_parent_id.IsRoot()) {
+ // Checks to ensure we don't loop.
+ double_step_parent_id = checker.GetAndExamineParent(
+ trans, double_step_parent_id, parent_id, entry);
+ double_step_parent_id = checker.GetAndExamineParent(
+ trans, double_step_parent_id, parent_id, entry);
+ }
+ syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
+ if (checker.CausingConflict(parent, entry))
+ view->MergeSets(entry.Get(syncable::ID), parent.Get(syncable::ID));
+ else
+ break;
+ parent_id = parent.Get(syncable::PARENT_ID);
+ }
+}
+
+} // namespace
+
+void BuildAndProcessConflictSetsCommand::MergeSetsForNonEmptyDirectories(
+ syncable::BaseTransaction* trans, syncable::Entry* entry,
+ ConflictResolutionView* view) {
+ if (entry->Get(syncable::IS_UNSYNCED) && !entry->Get(syncable::IS_DEL)) {
+ ServerDeletedPathChecker checker;
+ CrawlDeletedTreeMergingSets(trans, *entry, view, checker);
+ }
+ if (entry->Get(syncable::IS_UNAPPLIED_UPDATE) &&
+ !entry->Get(syncable::SERVER_IS_DEL)) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID,
+ entry->Get(syncable::SERVER_PARENT_ID));
+ syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID);
+ if (!parent.good())
+ return;
+ LocallyDeletedPathChecker checker;
+ if (!checker.CausingConflict(parent, *entry))
+ return;
+ view->MergeSets(entry->Get(syncable::ID), parent.Get(syncable::ID));
+ CrawlDeletedTreeMergingSets(trans, parent, view, checker);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h
new file mode 100644
index 0000000..79559ba
--- /dev/null
+++ b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h
@@ -0,0 +1,64 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_BUILD_AND_PROCESS_CONFLICT_SETS_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_BUILD_AND_PROCESS_CONFLICT_SETS_COMMAND_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+
+namespace syncable {
+class BaseTransaction;
+class Entry;
+class Id;
+class MutableEntry;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class ConflictResolutionView;
+class SyncerSession;
+
+class BuildAndProcessConflictSetsCommand : public ModelChangingSyncerCommand {
+ public:
+ BuildAndProcessConflictSetsCommand();
+ virtual ~BuildAndProcessConflictSetsCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession *session);
+
+ private:
+ bool BuildAndProcessConflictSets(SyncerSession *session);
+
+ bool ProcessSingleDirectionConflictSets(
+ syncable::WriteTransaction* trans, SyncerSession* const session);
+ bool ApplyUpdatesTransactionally(
+ syncable::WriteTransaction* trans,
+ const std::vector<syncable::Id>* const update_set,
+ SyncerSession* const session);
+ void BuildAndProcessConflictSetsCommand::BuildConflictSets(
+ syncable::BaseTransaction* trans,
+ ConflictResolutionView* view);
+
+ void MergeSetsForNameClash(syncable::BaseTransaction* trans,
+ syncable::Entry* entry,
+ ConflictResolutionView* view);
+ void MergeSetsForIntroducedLoops(syncable::BaseTransaction* trans,
+ syncable::Entry* entry,
+ ConflictResolutionView* view);
+ void MergeSetsForNonEmptyDirectories(syncable::BaseTransaction* trans,
+ syncable::Entry* entry,
+ ConflictResolutionView* view);
+ void MergeSetsForPositionUpdate(syncable::BaseTransaction* trans,
+ syncable::Entry* entry,
+ ConflictResolutionView* view);
+
+ DISALLOW_COPY_AND_ASSIGN(BuildAndProcessConflictSetsCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_BUILD_AND_PROCESS_CONFLICT_SETS_COMMAND_H_
diff --git a/chrome/browser/sync/engine/build_commit_command.cc b/chrome/browser/sync/engine/build_commit_command.cc
new file mode 100644
index 0000000..f819d6c
--- /dev/null
+++ b/chrome/browser/sync/engine/build_commit_command.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/build_commit_command.h"
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::set;
+using std::string;
+using std::vector;
+using syncable::ExtendedAttribute;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::Name;
+
+namespace browser_sync {
+
+BuildCommitCommand::BuildCommitCommand() {}
+BuildCommitCommand::~BuildCommitCommand() {}
+
+void BuildCommitCommand::ExecuteImpl(SyncerSession *session) {
+ ClientToServerMessage message;
+ message.set_share(ToUTF8(session->account_name()).get_string());
+ message.set_message_contents(ClientToServerMessage::COMMIT);
+
+ CommitMessage* commit_message = message.mutable_commit();
+ commit_message->set_cache_guid(
+ session->write_transaction()->directory()->cache_guid());
+
+ const vector<Id>& commit_ids = session->commit_ids();
+ for (size_t i = 0; i < commit_ids.size(); i++) {
+ Id id = commit_ids[i];
+ SyncEntity* sync_entry =
+ static_cast<SyncEntity*>(commit_message->add_entries());
+ sync_entry->set_id(id);
+ MutableEntry meta_entry(session->write_transaction(),
+ syncable::GET_BY_ID,
+ id);
+ CHECK(meta_entry.good());
+ // this is the only change we make to the entry in this function.
+ meta_entry.Put(syncable::SYNCING, true);
+
+ Name name = meta_entry.GetName();
+ CHECK(!name.value().empty()); // Make sure this isn't an update.
+ sync_entry->set_name(ToUTF8(name.value()).get_string());
+ // Set the non_unique_name if we have one. If we do, the server ignores
+ // the |name| value (using |non_unique_name| instead), and will return
+ // in the CommitResponse a unique name if one is generated. Even though
+ // we could get away with only sending |name|, we send both because it
+ // may aid in logging.
+ if (name.value() != name.non_unique_value()) {
+ sync_entry->set_non_unique_name(
+ ToUTF8(name.non_unique_value()).get_string());
+ }
+ // deleted items with negative parent ids can be a problem so we set the
+ // parent to 0. (TODO(sync): Still true in protocol?
+ Id new_parent_id;
+ if (meta_entry.Get(syncable::IS_DEL) &&
+ !meta_entry.Get(syncable::PARENT_ID).ServerKnows()) {
+ new_parent_id = session->write_transaction()->root_id();
+ } else {
+ new_parent_id = meta_entry.Get(syncable::PARENT_ID);
+ }
+ sync_entry->set_parent_id(new_parent_id);
+ // TODO(sync): Investigate all places that think transactional commits
+ // actually exist.
+ //
+ // This is the only logic we'll need when transactional commits are
+ // moved to the server.
+ // If our parent has changes, send up the old one so the server can
+ // correctly deal with multiple parents.
+ if (new_parent_id != meta_entry.Get(syncable::SERVER_PARENT_ID) &&
+ 0 != meta_entry.Get(syncable::BASE_VERSION) &&
+ syncable::CHANGES_VERSION != meta_entry.Get(syncable::BASE_VERSION)) {
+ sync_entry->set_old_parent_id(meta_entry.Get(syncable::SERVER_PARENT_ID));
+ }
+
+ int64 version = meta_entry.Get(syncable::BASE_VERSION);
+ if (syncable::CHANGES_VERSION == version || 0 == version) {
+ // If this CHECK triggers during unit testing, check that we haven't
+ // altered an item that's an unapplied update.
+ CHECK(!id.ServerKnows()) << meta_entry;
+ sync_entry->set_version(0);
+ } else {
+ CHECK(id.ServerKnows()) << meta_entry;
+ sync_entry->set_version(meta_entry.Get(syncable::BASE_VERSION));
+ }
+ sync_entry->set_ctime(ClientTimeToServerTime(
+ meta_entry.Get(syncable::CTIME)));
+ sync_entry->set_mtime(ClientTimeToServerTime(
+ meta_entry.Get(syncable::MTIME)));
+
+ set<ExtendedAttribute> extended_attributes;
+ meta_entry.GetAllExtendedAttributes(
+ session->write_transaction(), &extended_attributes);
+ set<ExtendedAttribute>::iterator iter;
+ sync_pb::ExtendedAttributes* mutable_extended_attributes =
+ sync_entry->mutable_extended_attributes();
+ for (iter = extended_attributes.begin(); iter != extended_attributes.end();
+ ++iter) {
+ sync_pb::ExtendedAttributes_ExtendedAttribute *extended_attribute =
+ mutable_extended_attributes->add_extendedattribute();
+ extended_attribute->set_key(ToUTF8(iter->key()).get_string());
+ SyncerProtoUtil::CopyBlobIntoProtoBytes(iter->value(),
+ extended_attribute->mutable_value());
+ }
+
+ // Deletion is final on the server, let's move things and then delete them.
+ if (meta_entry.Get(syncable::IS_DEL)) {
+ sync_entry->set_deleted(true);
+ } else if (meta_entry.Get(syncable::IS_BOOKMARK_OBJECT)) {
+ sync_pb::SyncEntity_BookmarkData* bookmark =
+ sync_entry->mutable_bookmarkdata();
+ bookmark->set_bookmark_folder(meta_entry.Get(syncable::IS_DIR));
+ const Id& prev_id = meta_entry.Get(syncable::PREV_ID);
+ string prev_string = prev_id.IsRoot() ? string() : prev_id.GetServerId();
+ sync_entry->set_insert_after_item_id(prev_string);
+
+ if (!meta_entry.Get(syncable::IS_DIR)) {
+ string bookmark_url = ToUTF8(meta_entry.Get(syncable::BOOKMARK_URL));
+ bookmark->set_bookmark_url(bookmark_url);
+ SyncerProtoUtil::CopyBlobIntoProtoBytes(
+ meta_entry.Get(syncable::BOOKMARK_FAVICON),
+ bookmark->mutable_bookmark_favicon());
+ }
+ }
+ }
+ session->set_commit_message(message);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/build_commit_command.h b/chrome/browser/sync/engine/build_commit_command.h
new file mode 100644
index 0000000..445024f
--- /dev/null
+++ b/chrome/browser/sync/engine/build_commit_command.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+
+namespace browser_sync {
+
+class BuildCommitCommand : public SyncerCommand {
+ public:
+ BuildCommitCommand();
+ virtual ~BuildCommitCommand();
+
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
diff --git a/chrome/browser/sync/engine/change_reorder_buffer.cc b/chrome/browser/sync/engine/change_reorder_buffer.cc
new file mode 100644
index 0000000..a74c62e0
--- /dev/null
+++ b/chrome/browser/sync/engine/change_reorder_buffer.cc
@@ -0,0 +1,199 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/change_reorder_buffer.h"
+
+#include <limits>
+#include <queue>
+#include <set>
+#include <utility> // for pair<>
+#include <vector>
+
+#include "chrome/browser/sync/syncable/syncable.h"
+
+using std::numeric_limits;
+using std::pair;
+using std::queue;
+using std::set;
+using std::vector;
+
+namespace sync_api {
+
+// Traversal provides a way to collect a set of nodes from the syncable
+// directory structure and then traverse them, along with any intermediate
+// nodes, in a top-down fashion, starting from a single common ancestor. A
+// Traversal starts out empty and is grown by means of the ExpandToInclude
+// method. Once constructed, the top(), begin_children(), and end_children()
+// methods can be used to explore the nodes in root-to-leaf order.
+class ChangeReorderBuffer::Traversal {
+ public:
+ typedef pair<int64, int64> ParentChildLink;
+ typedef set<ParentChildLink> LinkSet;
+
+ Traversal() : top_(kInvalidId) { }
+
+ // Expand the traversal so that it includes the node indicated by
+ // |child_handle|.
+ void ExpandToInclude(syncable::BaseTransaction* trans,
+ int64 child_handle) {
+ // If |top_| is invalid, this is the first insertion -- easy.
+ if (top_ == kInvalidId) {
+ top_ = child_handle;
+ return;
+ }
+
+ int64 node_to_include = child_handle;
+ while (node_to_include != kInvalidId && node_to_include != top_) {
+ int64 node_parent = 0;
+
+ syncable::Entry node(trans, syncable::GET_BY_HANDLE, node_to_include);
+ CHECK(node.good());
+ if (node.Get(syncable::ID).IsRoot()) {
+ // If we've hit the root, and the root isn't already in the tree
+ // (it would have to be |top_| if it were), start a new expansion
+ // upwards from |top_| to unite the original traversal with the
+ // path we just added that goes from |child_handle| to the root.
+ node_to_include = top_;
+ top_ = node.Get(syncable::META_HANDLE);
+ } else {
+ // Otherwise, get the parent ID so that we can add a ParentChildLink.
+ syncable::Entry parent(trans, syncable::GET_BY_ID,
+ node.Get(syncable::PARENT_ID));
+ CHECK(parent.good());
+ node_parent = parent.Get(syncable::META_HANDLE);
+
+ ParentChildLink link(node_parent, node_to_include);
+
+ // If the link exists in the LinkSet |links_|, we don't need to search
+ // any higher; we are done.
+ if (links_.find(link) != links_.end())
+ return;
+
+ // Otherwise, extend |links_|, and repeat on the parent.
+ links_.insert(link);
+ node_to_include = node_parent;
+ }
+ }
+ }
+
+ // Return the top node of the traversal. Use this as a starting point
+ // for walking the tree.
+ int64 top() const { return top_; }
+
+ // Return an iterator corresponding to the first child (in the traversal)
+ // of the node specified by |parent_id|. Iterate this return value until
+ // it is equal to the value returned by end_children(parent_id). The
+ // enumeration thus provided is unordered.
+ LinkSet::const_iterator begin_children(int64 parent_id) const {
+ return links_.upper_bound(
+ ParentChildLink(parent_id, numeric_limits<int64>::min()));
+ }
+
+ // Return an iterator corresponding to the last child in the traversal
+ // of the node specified by |parent_id|.
+ LinkSet::const_iterator end_children(int64 parent_id) const {
+ return begin_children(parent_id + 1);
+ }
+
+ private:
+ // The topmost point in the directory hierarchy that is in the traversal,
+ // and thus the first node to be traversed. If the traversal is empty,
+ // this is kInvalidId. If the traversal contains exactly one member, |top_|
+ // will be the solitary member, and |links_| will be empty.
+ int64 top_;
+ // A set of single-level links that compose the traversal below |top_|. The
+ // (parent, child) ordering of values enables efficient lookup of children
+ // given the parent handle, which is used for top-down traversal. |links_|
+ // is expected to be connected -- every node that appears as a parent in a
+ // link must either appear as a child of another link, or else be the
+ // topmost node, |top_|.
+ LinkSet links_;
+
+ DISALLOW_COPY_AND_ASSIGN(Traversal);
+};
+
+void ChangeReorderBuffer::GetAllChangesInTreeOrder(
+ const BaseTransaction* sync_trans,
+ vector<ChangeRecord>* changelist) {
+ syncable::BaseTransaction* trans = sync_trans->GetWrappedTrans();
+
+ // Step 1: Iterate through the operations, doing three things:
+ // (a) Push deleted items straight into the |changelist|.
+ // (b) Construct a traversal spanning all non-deleted items.
+ // (c) Construct a set of all parent nodes of any position changes.
+ set<int64> parents_of_position_changes;
+ Traversal traversal;
+
+ OperationMap::const_iterator i;
+ for (i = operations_.begin(); i != operations_.end(); ++i) {
+ if (i->second == OP_DELETE) {
+ ChangeRecord record;
+ record.id = i->first;
+ record.action = ChangeRecord::ACTION_DELETE;
+ changelist->push_back(record);
+ } else {
+ traversal.ExpandToInclude(trans, i->first);
+ if (i->second == OP_ADD ||
+ i->second == OP_UPDATE_POSITION_AND_PROPERTIES) {
+ ReadNode node(sync_trans);
+ CHECK(node.InitByIdLookup(i->first));
+ parents_of_position_changes.insert(node.GetParentId());
+ }
+ }
+ }
+
+ // Step 2: Breadth-first expansion of the traversal, enumerating children in
+ // the syncable sibling order if there were any position updates.
+ queue<int64> to_visit;
+ to_visit.push(traversal.top());
+ while (!to_visit.empty()) {
+ int64 next = to_visit.front();
+ to_visit.pop();
+
+ // If the node has an associated action, output a change record.
+ i = operations_.find(next);
+ if (i != operations_.end()) {
+ ChangeRecord record;
+ record.id = next;
+ if (i->second == OP_ADD)
+ record.action = ChangeRecord::ACTION_ADD;
+ else
+ record.action = ChangeRecord::ACTION_UPDATE;
+ changelist->push_back(record);
+ }
+
+ // Now add the children of |next| to |to_visit|.
+ if (parents_of_position_changes.find(next) ==
+ parents_of_position_changes.end()) {
+ // No order changes on this parent -- traverse only the nodes listed
+ // in the traversal (and not in sibling order).
+ Traversal::LinkSet::const_iterator j = traversal.begin_children(next);
+ Traversal::LinkSet::const_iterator end = traversal.end_children(next);
+ for (; j != end; ++j) {
+ CHECK(j->first == next);
+ to_visit.push(j->second);
+ }
+ } else {
+ // There were ordering changes on the children of this parent, so
+ // enumerate all the children in the sibling order.
+ syncable::Entry parent(trans, syncable::GET_BY_HANDLE, next);
+ syncable::Id id = trans->directory()->
+ GetFirstChildId(trans, parent.Get(syncable::ID));
+ while (!id.IsRoot()) {
+ syncable::Entry child(trans, syncable::GET_BY_ID, id);
+ CHECK(child.good());
+ int64 handle = child.Get(syncable::META_HANDLE);
+ to_visit.push(handle);
+ // If there is no operation on this child node, record it as as an
+ // update, so that the listener gets notified of all nodes in the new
+ // ordering.
+ if (operations_.find(handle) == operations_.end())
+ operations_[handle] = OP_UPDATE_POSITION_AND_PROPERTIES;
+ id = child.Get(syncable::NEXT_ID);
+ }
+ }
+ }
+}
+
+} // namespace sync_api
diff --git a/chrome/browser/sync/engine/change_reorder_buffer.h b/chrome/browser/sync/engine/change_reorder_buffer.h
new file mode 100644
index 0000000..ddea1b6
--- /dev/null
+++ b/chrome/browser/sync/engine/change_reorder_buffer.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Defines ChangeReorderBuffer, which can be used to sort a list of item
+// actions to achieve the ordering constraint required by the SyncObserver
+// interface of the SyncAPI.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_CHANGE_REORDER_BUFFER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_CHANGE_REORDER_BUFFER_H_
+
+#include <map>
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncapi.h"
+
+namespace sync_api {
+
+// ChangeReorderBuffer is a utility type which accepts an unordered set
+// of changes (via its Push methods), and yields a vector of ChangeRecords
+// (via the GetAllChangesInTreeOrder method) that are in the order that
+// the SyncObserver expects them to be. A buffer is initially empty.
+//
+// The ordering produced by ChangeReorderBuffer is as follows:
+// (a) All Deleted items appear first.
+// (b) For Updated and/or Added items, parents appear before their children.
+// (c) When there are changes to the sibling order (this means Added items,
+// or Updated items with the |position_changed| parameter set to true),
+// all siblings under a parent will appear in the output, even if they
+// are not explicitly pushed. The sibling order will be preserved in
+// the output list -- items will appear before their sibling-order
+// successors.
+// (d) When there are no changes to the sibling order under a parent node,
+// the sibling order is not necessarily preserved in the output for
+// its children.
+class ChangeReorderBuffer {
+ public:
+ typedef SyncManager::ChangeRecord ChangeRecord;
+ ChangeReorderBuffer() { }
+
+ // Insert an item, identified by the metahandle |id|, into the reorder
+ // buffer. This item will appear in the output list as an ACTION_ADD
+ // ChangeRecord.
+ void PushAddedItem(int64 id) {
+ operations_[id] = OP_ADD;
+ }
+
+ // Insert an item, identified by the metahandle |id|, into the reorder
+ // buffer. This item will appear in the output list as an ACTION_DELETE
+ // ChangeRecord.
+ void PushDeletedItem(int64 id) {
+ operations_[id] = OP_DELETE;
+ }
+
+ // Insert an item, identified by the metahandle |id|, into the reorder
+ // buffer. This item will appear in the output list as an ACTION_UPDATE
+ // ChangeRecord. Also, if |position_changed| is true, all siblings of this
+ // item will appear in the output list as well; if it wasn't explicitly
+ // pushed, the siblings will have an ACTION_UPDATE ChangeRecord.
+ void PushUpdatedItem(int64 id, bool position_changed) {
+ operations_[id] = position_changed ? OP_UPDATE_POSITION_AND_PROPERTIES :
+ OP_UPDATE_PROPERTIES_ONLY;
+ }
+
+ // Reset the buffer, forgetting any pushed items, so that it can be used
+ // again to reorder a new set of changes.
+ void Clear() {
+ operations_.clear();
+ }
+
+ bool IsEmpty() const {
+ return operations_.empty();
+ }
+
+ // Output a reordered list of changes to |changelist| using the items
+ // that were pushed into the reorder buffer. |sync_trans| is used
+ // to determine the ordering.
+ void GetAllChangesInTreeOrder(const BaseTransaction* sync_trans,
+ std::vector<ChangeRecord>* changelist);
+
+ private:
+ class Traversal;
+ enum Operation {
+ OP_ADD, // AddedItem.
+ OP_DELETE, // DeletedItem.
+ OP_UPDATE_PROPERTIES_ONLY, // UpdatedItem with position_changed=0.
+ OP_UPDATE_POSITION_AND_PROPERTIES, // UpdatedItem with position_changed=1.
+ };
+ typedef std::map<int64, Operation> OperationMap;
+
+ // Stores the items that have been pushed into the buffer, and the
+ // type of operation that was associated with them.
+ OperationMap operations_;
+
+ DISALLOW_COPY_AND_ASSIGN(ChangeReorderBuffer);
+};
+
+} // namespace sync_api
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_CHANGE_REORDER_BUFFER_H_
diff --git a/chrome/browser/sync/engine/client_command_channel.h b/chrome/browser/sync/engine/client_command_channel.h
new file mode 100644
index 0000000..2f91a9b
--- /dev/null
+++ b/chrome/browser/sync/engine/client_command_channel.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_CLIENT_COMMAND_CHANNEL_H_
+#define CHROME_BROWSER_SYNC_ENGINE_CLIENT_COMMAND_CHANNEL_H_
+
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/util/event_sys.h"
+
+namespace browser_sync {
+
+// Commands for the client come back in sync responses, which is kind
+// of inconvenient because some services (like the bandwidth throttler)
+// want to know about them. So to avoid explicit dependencies on this
+// protocol behavior, the syncer dumps all client commands onto a shared
+// client command channel.
+
+struct ClientCommandChannelTraits {
+ typedef const sync_pb::ClientCommand* EventType;
+ static inline bool IsChannelShutdownEvent(const EventType &event) {
+ return 0 == event;
+ }
+};
+
+typedef EventChannel<ClientCommandChannelTraits, PThreadMutex>
+ ClientCommandChannel;
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_CLIENT_COMMAND_CHANNEL_H_
diff --git a/chrome/browser/sync/engine/conflict_resolution_view.cc b/chrome/browser/sync/engine/conflict_resolution_view.cc
new file mode 100644
index 0000000..aca5d89
--- /dev/null
+++ b/chrome/browser/sync/engine/conflict_resolution_view.cc
@@ -0,0 +1,167 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+
+#include <map>
+#include <set>
+
+#include "chrome/browser/sync/engine/sync_process_state.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+
+using std::map;
+using std::set;
+
+namespace browser_sync {
+
+ConflictResolutionView::ConflictResolutionView(SyncerSession* session)
+ : process_state_(session->sync_process_state_) {}
+
+int ConflictResolutionView::conflicting_updates() const {
+ return process_state_->conflicting_updates();
+}
+
+int ConflictResolutionView::conflicting_commits() const {
+ return process_state_->conflicting_commits();
+}
+
+void ConflictResolutionView::set_conflicting_commits(const int val) {
+ process_state_->set_conflicting_commits(val);
+}
+
+int ConflictResolutionView::num_sync_cycles() const {
+ return process_state_->num_sync_cycles_;
+}
+
+void ConflictResolutionView::increment_num_sync_cycles() {
+ ++(process_state_->num_sync_cycles_);
+}
+
+void ConflictResolutionView::zero_num_sync_cycles() {
+ process_state_->num_sync_cycles_ = 0;
+}
+
+int64 ConflictResolutionView::current_sync_timestamp() const {
+ return process_state_->current_sync_timestamp();
+}
+
+int64 ConflictResolutionView::servers_latest_timestamp() const {
+ return process_state_->servers_latest_timestamp();
+}
+
+ // True iff we're stuck. User should contact support.
+bool ConflictResolutionView::syncer_stuck() const {
+ return process_state_->syncer_stuck();
+}
+
+void ConflictResolutionView::set_syncer_stuck(const bool val) {
+ process_state_->set_syncer_stuck(val);
+}
+
+IdToConflictSetMap::const_iterator ConflictResolutionView::IdToConflictSetFind(
+ const syncable::Id& the_id) const {
+ return process_state_->IdToConflictSetFind(the_id);
+}
+
+IdToConflictSetMap::const_iterator
+ ConflictResolutionView::IdToConflictSetBegin() const {
+ return process_state_->IdToConflictSetBegin();
+}
+
+IdToConflictSetMap::const_iterator
+ ConflictResolutionView::IdToConflictSetEnd() const {
+ return process_state_->IdToConflictSetEnd();
+}
+
+IdToConflictSetMap::size_type
+ ConflictResolutionView::IdToConflictSetSize() const {
+ return process_state_->IdToConflictSetSize();
+}
+
+const ConflictSet*
+ ConflictResolutionView::IdToConflictSetGet(const syncable::Id& the_id) {
+ return process_state_->IdToConflictSetGet(the_id);
+}
+
+set<ConflictSet*>::const_iterator
+ ConflictResolutionView::ConflictSetsBegin() const {
+ return process_state_->ConflictSetsBegin();
+}
+
+set<ConflictSet*>::const_iterator
+ ConflictResolutionView::ConflictSetsEnd() const {
+ return process_state_->ConflictSetsEnd();
+}
+
+set<ConflictSet*>::size_type
+ ConflictResolutionView::ConflictSetsSize() const {
+ return process_state_->ConflictSetsSize();
+}
+
+void ConflictResolutionView::MergeSets(const syncable::Id& set1,
+ const syncable::Id& set2) {
+ process_state_->MergeSets(set1, set2);
+}
+
+void ConflictResolutionView::CleanupSets() {
+ process_state_->CleanupSets();
+}
+
+bool ConflictResolutionView::HasCommitConflicts() const {
+ return process_state_->HasConflictingItems();
+}
+
+bool ConflictResolutionView::HasBlockedItems() const {
+ return process_state_->HasBlockedItems();
+}
+
+int ConflictResolutionView::CommitConflictsSize() const {
+ return process_state_->ConflictingItemsSize();
+}
+
+int ConflictResolutionView::BlockedItemsSize() const {
+ return process_state_->BlockedItemsSize();
+}
+
+void ConflictResolutionView::AddCommitConflict(const syncable::Id& the_id) {
+ process_state_->AddConflictingItem(the_id);
+}
+
+void ConflictResolutionView::AddBlockedItem(const syncable::Id& the_id) {
+ process_state_->AddBlockedItem(the_id);
+}
+
+void ConflictResolutionView::EraseCommitConflict(
+ set<syncable::Id>::iterator it) {
+ process_state_->EraseConflictingItem(it);
+}
+
+void ConflictResolutionView::EraseBlockedItem(
+ set<syncable::Id>::iterator it) {
+ process_state_->EraseBlockedItem(it);
+}
+
+set<syncable::Id>::iterator
+ConflictResolutionView::CommitConflictsBegin() const {
+ return process_state_->ConflictingItemsBegin();
+}
+
+set<syncable::Id>::iterator
+ConflictResolutionView::BlockedItemsBegin() const {
+ return process_state_->BlockedItemsBegin();
+}
+
+set<syncable::Id>::iterator
+ ConflictResolutionView::CommitConflictsEnd() const {
+ return process_state_->ConflictingItemsEnd();
+}
+
+set<syncable::Id>::iterator
+ ConflictResolutionView::BlockedItemsEnd() const {
+ return process_state_->BlockedItemsEnd();
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/conflict_resolution_view.h b/chrome/browser/sync/engine/conflict_resolution_view.h
new file mode 100644
index 0000000..a60af65
--- /dev/null
+++ b/chrome/browser/sync/engine/conflict_resolution_view.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Conflict resolution view is intended to provide a restricted
+// view of the sync cycle state for the conflict resolver. Since the
+// resolver doesn't get to see all of the SyncProcess, we can allow
+// it to operate on a subsection of the data.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLUTION_VIEW_H_
+#define CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLUTION_VIEW_H_
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+
+namespace syncable {
+class Id;
+}
+
+namespace browser_sync {
+
+class SyncCycleState;
+class SyncProcessState;
+class SyncerSession;
+
+class ConflictResolutionView {
+ // THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+ public:
+
+ explicit ConflictResolutionView(SyncProcessState* state)
+ : process_state_(state) {
+ }
+
+ explicit ConflictResolutionView(SyncerSession* session);
+
+ ~ConflictResolutionView() {}
+
+ int conflicting_updates() const;
+
+ // TODO(sync) can successful commit go in session?
+ int successful_commits() const;
+
+ void increment_successful_commits();
+
+ void zero_successful_commits();
+
+ int conflicting_commits() const;
+
+ void set_conflicting_commits(const int val);
+
+ int num_sync_cycles() const;
+
+ void increment_num_sync_cycles();
+
+ void zero_num_sync_cycles();
+
+ // True iff we're stuck. Something has gone wrong with the syncer.
+ bool syncer_stuck() const;
+
+ void set_syncer_stuck(const bool val);
+
+ int64 current_sync_timestamp() const;
+
+ int64 servers_latest_timestamp() const;
+
+ IdToConflictSetMap::const_iterator IdToConflictSetFind(
+ const syncable::Id& the_id) const;
+
+ IdToConflictSetMap::const_iterator IdToConflictSetBegin() const;
+
+ IdToConflictSetMap::const_iterator IdToConflictSetEnd() const;
+
+ IdToConflictSetMap::size_type IdToConflictSetSize() const;
+
+ const ConflictSet* IdToConflictSetGet(const syncable::Id& the_id);
+
+ std::set<ConflictSet*>::const_iterator ConflictSetsBegin() const;
+
+ std::set<ConflictSet*>::const_iterator ConflictSetsEnd() const;
+
+ std::set<ConflictSet*>::size_type ConflictSetsSize() const;
+
+ void MergeSets(const syncable::Id& set1, const syncable::Id& set2);
+
+ void CleanupSets();
+
+ bool HasCommitConflicts() const;
+
+ bool HasBlockedItems() const;
+
+ int CommitConflictsSize() const;
+
+ int BlockedItemsSize() const;
+
+ void AddCommitConflict(const syncable::Id& the_id);
+
+ void AddBlockedItem(const syncable::Id& the_id);
+
+ void EraseCommitConflict(std::set<syncable::Id>::iterator it);
+
+ void EraseBlockedItem(std::set<syncable::Id>::iterator it);
+
+ std::set<syncable::Id>::iterator CommitConflictsBegin() const;
+
+ std::set<syncable::Id>::iterator BlockedItemsBegin() const;
+
+ std::set<syncable::Id>::iterator CommitConflictsEnd() const;
+
+ std::set<syncable::Id>::iterator BlockedItemsEnd() const;
+
+ private:
+ SyncProcessState* process_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConflictResolutionView);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLUTION_VIEW_H_
diff --git a/chrome/browser/sync/engine/conflict_resolver.cc b/chrome/browser/sync/engine/conflict_resolver.cc
new file mode 100644
index 0000000..9bfe419
--- /dev/null
+++ b/chrome/browser/sync/engine/conflict_resolver.cc
@@ -0,0 +1,758 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+
+#include <map>
+#include <set>
+
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+
+using std::map;
+using std::set;
+using syncable::BaseTransaction;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::Name;
+using syncable::ScopedDirLookup;
+using syncable::SyncName;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+const int SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT = 8;
+
+ConflictResolver::ConflictResolver() {
+}
+
+ConflictResolver::~ConflictResolver() {
+}
+
+namespace {
+// TODO(ncarter): Remove title/path conflicts and the code to resolve them.
+// This is historical cruft that seems to be actually reached by some users.
+inline PathString GetConflictPathnameBase(PathString base) {
+ time_t time_since = time(NULL);
+ struct tm* now = localtime(&time_since);
+ // Use a fixed format as the locale's format may include '/' characters or
+ // other illegal characters.
+ PathString date = IntToPathString(now->tm_year + 1900);
+ date.append(PSTR("-"));
+ ++now->tm_mon; // tm_mon is 0-based.
+ if (now->tm_mon < 10)
+ date.append(PSTR("0"));
+ date.append(IntToPathString(now->tm_mon));
+ date.append(PSTR("-"));
+ if (now->tm_mday < 10)
+ date.append(PSTR("0"));
+ date.append(IntToPathString(now->tm_mday));
+ return base + PSTR(" (Edited on ") + date + PSTR(")");
+}
+
+// TODO(ncarter): Remove title/path conflicts and the code to resolve them.
+Name FindNewName(BaseTransaction* trans,
+ Id parent_id,
+ const SyncName& original_name) {
+ const PathString name = original_name.value();
+ // 255 is defined in our spec.
+ const int allowed_length = kSyncProtocolMaxNameLengthBytes;
+ // TODO(sync): How do we get length on other platforms? The limit is
+ // checked in java on the server, so it's not the number of glyphs its the
+ // number of 16 bit characters in the UTF-16 representation.
+
+ // 10 characters for 32 bit numbers + 2 characters for brackets means 12
+ // characters should be more than enough for the name. Doubling this ensures
+ // that we will have enough space.
+ COMPILE_ASSERT(kSyncProtocolMaxNameLengthBytes >= 24,
+ maximum_name_too_short);
+ CHECK(name.length() <= allowed_length);
+
+ if (!Entry(trans,
+ syncable::GET_BY_PARENTID_AND_DBNAME,
+ parent_id,
+ name).good())
+ return Name::FromSyncName(original_name);
+ PathString base = name;
+ PathString ext;
+ PathString::size_type ext_index = name.rfind('.');
+ if (PathString::npos != ext_index && 0 != ext_index &&
+ name.length() - ext_index < allowed_length / 2) {
+ base = name.substr(0, ext_index);
+ ext = name.substr(ext_index);
+ }
+
+ PathString name_base = GetConflictPathnameBase(base);
+ if (name_base.length() + ext.length() > allowed_length) {
+ name_base.resize(allowed_length - ext.length());
+ TrimPathStringToValidCharacter(&name_base);
+ }
+ PathString new_name = name_base + ext;
+ int n = 2;
+ while (Entry(trans,
+ syncable::GET_BY_PARENTID_AND_DBNAME,
+ parent_id,
+ new_name).good()) {
+ PathString local_ext = PSTR("(");
+ local_ext.append(IntToPathString(n));
+ local_ext.append(PSTR(")"));
+ local_ext.append(ext);
+ if (name_base.length() + local_ext.length() > allowed_length) {
+ name_base.resize(allowed_length - local_ext.length());
+ TrimPathStringToValidCharacter(&name_base);
+ }
+ new_name = name_base + local_ext;
+ n++;
+ }
+
+ CHECK(new_name.length() <= kSyncProtocolMaxNameLengthBytes);
+ return Name(new_name);
+}
+
+} // namespace
+
+void ConflictResolver::IgnoreLocalChanges(MutableEntry* entry) {
+ // An update matches local actions, merge the changes.
+ // This is a little fishy because we don't actually merge them.
+ // In the future we should do a 3-way merge.
+ LOG(INFO) << "Server and local changes match, merging:" << entry;
+ // With IS_UNSYNCED false, changes should be merged.
+ // METRIC simple conflict resolved by merge.
+ entry->Put(syncable::IS_UNSYNCED, false);
+}
+
+void ConflictResolver::OverwriteServerChanges(WriteTransaction* trans,
+ MutableEntry * entry) {
+ // This is similar to an overwrite from the old client.
+ // This is equivalent to a scenario where we got the update before we'd
+ // made our local client changes.
+ // TODO(chron): This is really a general property clobber. We clobber
+ // the server side property. Perhaps we should actually do property merging.
+ entry->Put(syncable::BASE_VERSION, entry->Get(syncable::SERVER_VERSION));
+ entry->Put(syncable::IS_UNAPPLIED_UPDATE, false);
+ // METRIC conflict resolved by overwrite.
+}
+
+ConflictResolver::ProcessSimpleConflictResult
+ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
+ Id id,
+ SyncerSession* session) {
+ MutableEntry entry(trans, syncable::GET_BY_ID, id);
+ // Must be good as the entry won't have been cleaned up.
+ CHECK(entry.good());
+ // If an update fails, locally we have to be in a set or unsynced. We're not
+ // in a set here, so we must be unsynced.
+ if (!entry.Get(syncable::IS_UNSYNCED))
+ return NO_SYNC_PROGRESS;
+ if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE)) {
+ if (!entry.Get(syncable::PARENT_ID).ServerKnows()) {
+ LOG(INFO) << "Item conflicting because its parent not yet committed. "
+ "Id: "<< id;
+ } else {
+ LOG(INFO) << "No set for conflicting entry id " << id << ". There should "
+ "be an update/commit that will fix this soon. This message should "
+ "not repeat.";
+ }
+ return NO_SYNC_PROGRESS;
+ }
+ if (entry.Get(syncable::IS_DEL) && entry.Get(syncable::SERVER_IS_DEL)) {
+ // we've both deleted it, so lets just drop the need to commit/update this
+ // entry.
+ entry.Put(syncable::IS_UNSYNCED, false);
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, false);
+ // we've made changes, but they won't help syncing progress.
+ // METRIC simple conflict resolved by merge.
+ return NO_SYNC_PROGRESS;
+ }
+
+ if (!entry.Get(syncable::SERVER_IS_DEL)) {
+ // TODO(chron): Should we check more fields? Since IS_UNSYNCED is
+ // turned on, this is really probably enough as fields will be overwritten.
+ // Check if there's no changes.
+
+ // Verbose but easier to debug.
+ bool name_matches = entry.SyncNameMatchesServerName();
+ bool parent_matches = entry.Get(syncable::PARENT_ID) ==
+ entry.Get(syncable::SERVER_PARENT_ID);
+ bool entry_deleted = entry.Get(syncable::IS_DEL);
+
+ if (!entry_deleted && name_matches && parent_matches) {
+ LOG(INFO) << "Resolving simple conflict, ignoring local changes for:"
+ << entry;
+ IgnoreLocalChanges(&entry);
+ } else {
+ LOG(INFO) << "Resolving simple conflict, overwriting server"
+ " changes for:" << entry;
+ OverwriteServerChanges(trans, &entry);
+ }
+ return SYNC_PROGRESS;
+ } else { // SERVER_IS_DEL is true
+ // If a server deleted folder has local contents we should be in a set.
+ if (entry.Get(syncable::IS_DIR)) {
+ Directory::ChildHandles children;
+ trans->directory()->GetChildHandles(trans,
+ entry.Get(syncable::ID),
+ &children);
+ if (0 != children.size()) {
+ LOG(INFO) << "Entry is a server deleted directory with local contents, "
+ "should be in a set. (race condition).";
+ return NO_SYNC_PROGRESS;
+ }
+ }
+ // METRIC conflict resolved by entry split;
+
+ // If the entry's deleted on the server, we can have a directory here.
+ entry.Put(syncable::IS_UNSYNCED, true);
+
+ SyncerUtil::SplitServerInformationIntoNewEntry(trans, &entry);
+
+ MutableEntry server_update(trans, syncable::GET_BY_ID, id);
+ CHECK(server_update.good());
+ CHECK(server_update.Get(syncable::META_HANDLE) !=
+ entry.Get(syncable::META_HANDLE))
+ << server_update << entry;
+
+ return SYNC_PROGRESS;
+ }
+}
+
+namespace {
+
+bool NamesCollideWithChildrenOfFolder(BaseTransaction* trans,
+ const Directory::ChildHandles& children,
+ Id folder_id) {
+ Directory::ChildHandles::const_iterator i = children.begin();
+ while (i != children.end()) {
+ Entry child(trans, syncable::GET_BY_HANDLE, *i);
+ CHECK(child.good());
+ if (Entry(trans,
+ syncable::GET_BY_PARENTID_AND_DBNAME,
+ folder_id,
+ child.GetName().db_value()).good())
+ return true;
+ ++i;
+ }
+ return false;
+}
+
+void GiveEntryNewName(WriteTransaction* trans,
+ MutableEntry* entry) {
+ using namespace syncable;
+ Name new_name =
+ FindNewName(trans, entry->Get(syncable::PARENT_ID), entry->GetName());
+ LOG(INFO) << "Resolving name clash, renaming " << *entry << " to "
+ << new_name.db_value();
+ entry->PutName(new_name);
+ CHECK(entry->Get(syncable::IS_UNSYNCED));
+}
+
+} // namespace
+
+bool ConflictResolver::AttemptItemMerge(WriteTransaction* trans,
+ MutableEntry* locally_named,
+ MutableEntry* server_named) {
+ // To avoid complications we only merge new entries with server entries.
+ if (locally_named->Get(syncable::IS_DIR) !=
+ server_named->Get(syncable::SERVER_IS_DIR) ||
+ locally_named->Get(syncable::ID).ServerKnows() ||
+ locally_named->Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ server_named->Get(syncable::IS_UNSYNCED))
+ return false;
+ Id local_id = locally_named->Get(syncable::ID);
+ Id desired_id = server_named->Get(syncable::ID);
+ if (locally_named->Get(syncable::IS_DIR)) {
+ // Extra work for directory name clash. We have to make sure we don't have
+ // clashing child items, and update the parent id the children of the new
+ // entry.
+ Directory::ChildHandles children;
+ trans->directory()->GetChildHandles(trans, local_id, &children);
+ if (NamesCollideWithChildrenOfFolder(trans, children, desired_id))
+ return false;
+
+ LOG(INFO) << "Merging local changes to: " << desired_id << ". "
+ << *locally_named;
+
+ server_named->Put(syncable::ID, trans->directory()->NextId());
+ Directory::ChildHandles::iterator i;
+ for (i = children.begin() ; i != children.end() ; ++i) {
+ MutableEntry child_entry(trans, syncable::GET_BY_HANDLE, *i);
+ CHECK(child_entry.good());
+ CHECK(child_entry.Put(syncable::PARENT_ID, desired_id));
+ CHECK(child_entry.Put(syncable::IS_UNSYNCED, true));
+ Id id = child_entry.Get(syncable::ID);
+ // we only note new entries for quicker merging next round.
+ if (!id.ServerKnows())
+ children_of_merged_dirs_.insert(id);
+ }
+ } else {
+ if (!server_named->Get(syncable::IS_DEL))
+ return false;
+ }
+
+ LOG(INFO) << "Identical client and server items merging server changes. " <<
+ *locally_named << " server: " << *server_named;
+
+ // Clear server_named's server data and mark it deleted so it goes away
+ // quietly because it's now identical to a deleted local entry.
+ // locally_named takes on the ID of the server entry.
+ server_named->Put(syncable::ID, trans->directory()->NextId());
+ locally_named->Put(syncable::ID, desired_id);
+ locally_named->Put(syncable::IS_UNSYNCED, false);
+ CopyServerFields(server_named, locally_named);
+ ClearServerData(server_named);
+ server_named->Put(syncable::IS_DEL, true);
+ server_named->Put(syncable::BASE_VERSION, 0);
+ CHECK(SUCCESS ==
+ SyncerUtil::AttemptToUpdateEntryWithoutMerge(
+ trans, locally_named, NULL, NULL));
+ return true;
+}
+
+ConflictResolver::ServerClientNameClashReturn
+ConflictResolver::ProcessServerClientNameClash(WriteTransaction* trans,
+ MutableEntry* locally_named,
+ MutableEntry* server_named,
+ SyncerSession* session) {
+ if (!locally_named->ExistsOnClientBecauseDatabaseNameIsNonEmpty())
+ return NO_CLASH; // locally_named is a server update.
+ if (locally_named->Get(syncable::IS_DEL) ||
+ server_named->Get(syncable::SERVER_IS_DEL)) {
+ return NO_CLASH;
+ }
+ if (locally_named->Get(syncable::PARENT_ID) !=
+ server_named->Get(syncable::SERVER_PARENT_ID)) {
+ return NO_CLASH; // different parents
+ }
+
+ PathString name = locally_named->GetSyncNameValue();
+ if (0 != syncable::ComparePathNames(name,
+ server_named->Get(syncable::SERVER_NAME))) {
+ return NO_CLASH; // different names.
+ }
+
+ // First try to merge.
+ if (AttemptItemMerge(trans, locally_named, server_named)) {
+ // METRIC conflict resolved by merge
+ return SOLVED;
+ }
+ // We need to rename.
+ if (!locally_named->Get(syncable::IS_UNSYNCED)) {
+ LOG(ERROR) << "Locally named part of a name conflict not unsynced?";
+ locally_named->Put(syncable::IS_UNSYNCED, true);
+ }
+ if (!server_named->Get(syncable::IS_UNAPPLIED_UPDATE)) {
+ LOG(ERROR) << "Server named part of a name conflict not an update?";
+ }
+ GiveEntryNewName(trans, locally_named);
+
+ // METRIC conflict resolved by rename
+ return SOLVED;
+}
+
+ConflictResolver::ServerClientNameClashReturn
+ConflictResolver::ProcessNameClashesInSet(WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ SyncerSession* session) {
+ ConflictSet::const_iterator i,j;
+ for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
+ MutableEntry entryi(trans, syncable::GET_BY_ID, *i);
+ if (!entryi.Get(syncable::IS_UNSYNCED) &&
+ !entryi.Get(syncable::IS_UNAPPLIED_UPDATE))
+ // This set is broken / doesn't make sense, this may be transient.
+ return BOGUS_SET;
+ for (j = conflict_set->begin() ; *i != *j ; ++j) {
+ MutableEntry entryj(trans, syncable::GET_BY_ID, *j);
+ ServerClientNameClashReturn rv =
+ ProcessServerClientNameClash(trans, &entryi, &entryj, session);
+ if (NO_CLASH == rv)
+ rv = ProcessServerClientNameClash(trans, &entryj, &entryi, session);
+ if (NO_CLASH != rv)
+ return rv;
+ }
+ }
+ return NO_CLASH;
+}
+
+ConflictResolver::ConflictSetCountMapKey ConflictResolver::GetSetKey(
+ ConflictSet* set) {
+ // TODO(sync): Come up with a better scheme for set hashing. This scheme
+ // will make debugging easy.
+ // If this call to sort is removed, we need to add one before we use
+ // binary_search in ProcessConflictSet
+ sort(set->begin(), set->end());
+ std::stringstream rv;
+ for(ConflictSet::iterator i = set->begin() ; i != set->end() ; ++i )
+ rv << *i << ".";
+ return rv.str();
+}
+
+namespace {
+
+bool AttemptToFixCircularConflict(WriteTransaction* trans,
+ ConflictSet* conflict_set) {
+ ConflictSet::const_iterator i, j;
+ for(i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
+ MutableEntry entryi(trans, syncable::GET_BY_ID, *i);
+ if (entryi.Get(syncable::PARENT_ID) ==
+ entryi.Get(syncable::SERVER_PARENT_ID) ||
+ !entryi.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ !entryi.Get(syncable::IS_DIR)) {
+ continue;
+ }
+ Id parentid = entryi.Get(syncable::SERVER_PARENT_ID);
+ // Create the entry here as it's the only place we could ever get a parentid
+ // that doesn't correspond to a real entry.
+ Entry parent(trans, syncable::GET_BY_ID, parentid);
+ if (!parent.good()) // server parent update not received yet
+ continue;
+ // This loop walks upwards from the server parent. If we hit the root (0)
+ // all is well. If we hit the entry we're examining it means applying the
+ // parent id would cause a loop. We don't need more general loop detection
+ // because we know our local tree is valid.
+ while (!parentid.IsRoot()) {
+ Entry parent(trans, syncable::GET_BY_ID, parentid);
+ CHECK(parent.good());
+ if (parentid == *i)
+ break; // it's a loop
+ parentid = parent.Get(syncable::PARENT_ID);
+ }
+ if (parentid.IsRoot())
+ continue;
+ LOG(INFO) << "Overwriting server changes to avoid loop: " << entryi;
+ entryi.Put(syncable::BASE_VERSION, entryi.Get(syncable::SERVER_VERSION));
+ entryi.Put(syncable::IS_UNSYNCED, true);
+ entryi.Put(syncable::IS_UNAPPLIED_UPDATE, false);
+ // METRIC conflict resolved by breaking dir loop.
+ return true;
+ }
+ return false;
+}
+
+bool AttemptToFixUnsyncedEntryInDeletedServerTree(WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ const Entry& entry) {
+ if (!entry.Get(syncable::IS_UNSYNCED) || entry.Get(syncable::IS_DEL))
+ return false;
+ Id parentid = entry.Get(syncable::PARENT_ID);
+ MutableEntry parent(trans, syncable::GET_BY_ID, parentid);
+ if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ !parent.Get(syncable::SERVER_IS_DEL) ||
+ !binary_search(conflict_set->begin(), conflict_set->end(), parentid))
+ return false;
+ // We're trying to commit into a directory tree that's been deleted.
+ // To solve this we recreate the directory tree.
+ //
+ // We do this in two parts, first we ensure the tree is unaltered since the
+ // conflict was detected.
+ Id id = parentid;
+ while (!id.IsRoot()) {
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
+ break;
+ Entry parent(trans, syncable::GET_BY_ID, id);
+ if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ !parent.Get(syncable::SERVER_IS_DEL))
+ return false;
+ id = parent.Get(syncable::PARENT_ID);
+ }
+ // Now we fix up the entries.
+ id = parentid;
+ while (!id.IsRoot()) {
+ MutableEntry parent(trans, syncable::GET_BY_ID, id);
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
+ break;
+ LOG(INFO) << "Giving directory a new id so we can undelete it "
+ << parent;
+ ClearServerData(&parent);
+ SyncerUtil::ChangeEntryIDAndUpdateChildren(trans, &parent,
+ trans->directory()->NextId());
+ parent.Put(syncable::BASE_VERSION, 0);
+ parent.Put(syncable::IS_UNSYNCED, true);
+ id = parent.Get(syncable::PARENT_ID);
+ // METRIC conflict resolved by recreating dir tree.
+ }
+ return true;
+}
+
+bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ const Entry& entry) {
+ if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ entry.Get(syncable::SERVER_IS_DEL))
+ return false;
+ Id parent_id = entry.Get(syncable::SERVER_PARENT_ID);
+ MutableEntry parent(trans, syncable::GET_BY_ID, parent_id);
+ if (!parent.good() || !parent.Get(syncable::IS_DEL) ||
+ !binary_search(conflict_set->begin(), conflict_set->end(), parent_id)) {
+ return false;
+ }
+ // We've deleted a directory tree that's got contents on the server.
+ // We recreate the directory to solve the problem.
+ //
+ // We do this in two parts, first we ensure the tree is unaltered since
+ // the conflict was detected.
+ Id id = parent_id;
+ // As we will be crawling the path of deleted entries there's a chance
+ // we'll end up having to reparent an item as there will be an invalid
+ // parent.
+ Id reroot_id = syncable::kNullId;
+ // similarly crawling deleted items means we risk loops.
+ int loop_detection = conflict_set->size();
+ while (!id.IsRoot() && --loop_detection >= 0) {
+ Entry parent(trans, syncable::GET_BY_ID, id);
+ // If we get a bad parent, or a parent that's deleted on client and
+ // server we recreate the hierarchy in the root.
+ if (!parent.good()) {
+ reroot_id = id;
+ break;
+ }
+ CHECK(parent.Get(syncable::IS_DIR));
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id)) {
+ // We've got to an entry that's not in the set. If it has been
+ // deleted between set building and this point in time we
+ // return false. If it had been deleted earlier it would have been
+ // in the set.
+ // TODO(sync): Revisit syncer code organization to see if
+ // conflict resolution can be done in the same transaction as set
+ // building.
+ if (parent.Get(syncable::IS_DEL))
+ return false;
+ break;
+ }
+ if (!parent.Get(syncable::IS_DEL) ||
+ parent.Get(syncable::SERVER_IS_DEL) ||
+ !parent.Get(syncable::IS_UNSYNCED)) {
+ return false;
+ }
+ id = parent.Get(syncable::PARENT_ID);
+ }
+ // If we find we've been looping we re-root the hierarchy.
+ if (loop_detection < 0)
+ if (id == entry.Get(syncable::ID))
+ reroot_id = entry.Get(syncable::PARENT_ID);
+ else
+ reroot_id = id;
+ // Now we fix things up by undeleting all the folders in the item's
+ // path.
+ id = parent_id;
+ while (!id.IsRoot() && id != reroot_id) {
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
+ break;
+ MutableEntry entry(trans, syncable::GET_BY_ID, id);
+ Id parent_id = entry.Get(syncable::PARENT_ID);
+ if (parent_id == reroot_id)
+ parent_id = trans->root_id();
+ Name old_name = entry.GetName();
+ Name new_name = FindNewName(trans, parent_id, old_name);
+ LOG(INFO) << "Undoing our deletion of " << entry <<
+ ", will have name " << new_name.db_value();
+ if (new_name != old_name || parent_id != entry.Get(syncable::PARENT_ID))
+ CHECK(entry.PutParentIdAndName(parent_id, new_name));
+ entry.Put(syncable::IS_DEL, false);
+ id = entry.Get(syncable::PARENT_ID);
+ // METRIC conflict resolved by recreating dir tree.
+ }
+ return true;
+}
+
+bool AttemptToFixRemovedDirectoriesWithContent(WriteTransaction* trans,
+ ConflictSet* conflict_set) {
+ ConflictSet::const_iterator i,j;
+ for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
+ Entry entry(trans, syncable::GET_BY_ID, *i);
+ if (AttemptToFixUnsyncedEntryInDeletedServerTree(trans,
+ conflict_set, entry)) {
+ return true;
+ }
+ if (AttemptToFixUpdateEntryInDeletedLocalTree(trans, conflict_set, entry))
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+bool ConflictResolver::ProcessConflictSet(WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ int conflict_count,
+ SyncerSession* session) {
+ int set_size = conflict_set->size();
+ if (set_size < 2) {
+ LOG(WARNING) << "Skipping conflict set because it has size " << set_size;
+ // We can end up with sets of size one if we have a new item in a set that
+ // we tried to commit transactionally. This should not be a persistent
+ // situation.
+ return false;
+ }
+ if (conflict_count < 3) {
+ // Avoid resolving sets that could be the result of transient conflicts.
+ // Transient conflicts can occur because the client or server can be
+ // slightly out of date.
+ return false;
+ }
+
+ LOG(INFO) << "Fixing a set containing " << set_size << " items";
+
+ ServerClientNameClashReturn rv = ProcessNameClashesInSet(trans, conflict_set,
+ session);
+ if (SOLVED == rv)
+ return true;
+ if (NO_CLASH != rv)
+ return false;
+
+ // Fix circular conflicts.
+ if (AttemptToFixCircularConflict(trans, conflict_set))
+ return true;
+ // Check for problems involving contents of removed folders.
+ if (AttemptToFixRemovedDirectoriesWithContent(trans, conflict_set))
+ return true;
+ return false;
+}
+
+
+template <typename InputIt>
+bool ConflictResolver::LogAndSignalIfConflictStuck(
+ BaseTransaction* trans,
+ int attempt_count,
+ InputIt begin,
+ InputIt end,
+ ConflictResolutionView* view) {
+ if (attempt_count < SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT)
+ return false;
+
+ // Don't signal stuck if we're not up to date.
+ if (view->servers_latest_timestamp() != view->current_sync_timestamp())
+ return false;
+
+ LOG(ERROR) << "[BUG] Conflict set cannot be resolved, has "
+ << end - begin << " items:";
+ for (InputIt i = begin ; i != end ; ++i) {
+ Entry e(trans, syncable::GET_BY_ID, *i);
+ if (e.good())
+ LOG(ERROR) << " " << e;
+ else
+ LOG(ERROR) << " Bad ID:" << *i;
+ }
+
+ view->set_syncer_stuck(true);
+
+ return true;
+ // TODO(sync): If we're stuck for a while we need to alert the user,
+ // clear cache or reset syncing. At the very least we should stop trying
+ // something that's obviously not working.
+}
+
+bool ConflictResolver::ResolveSimpleConflicts(const ScopedDirLookup& dir,
+ ConflictResolutionView* view,
+ SyncerSession *session) {
+ WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ bool forward_progress = false;
+ // First iterate over simple conflict items (those that belong to no set).
+ set<Id>::const_iterator conflicting_item_it;
+ for (conflicting_item_it = view->CommitConflictsBegin();
+ conflicting_item_it != view->CommitConflictsEnd() ;
+ ++conflicting_item_it) {
+ Id id = *conflicting_item_it;
+ map<Id, ConflictSet*>::const_iterator item_set_it =
+ view->IdToConflictSetFind(id);
+ if (item_set_it == view->IdToConflictSetEnd() ||
+ 0 == item_set_it->second) {
+ // We have a simple conflict.
+ switch(ProcessSimpleConflict(&trans, id, session)) {
+ case NO_SYNC_PROGRESS:
+ {
+ int conflict_count = (simple_conflict_count_map_[id] += 2);
+ bool stuck = LogAndSignalIfConflictStuck(&trans, conflict_count,
+ &id, &id + 1, view);
+ break;
+ }
+ case SYNC_PROGRESS:
+ forward_progress = true;
+ break;
+ }
+ }
+ }
+ // Reduce the simple_conflict_count for each item currently tracked.
+ SimpleConflictCountMap::iterator i = simple_conflict_count_map_.begin();
+ while (i != simple_conflict_count_map_.end()) {
+ if (0 == --(i->second))
+ simple_conflict_count_map_.erase(i++);
+ else
+ ++i;
+ }
+ return forward_progress;
+}
+
+bool ConflictResolver::ResolveConflicts(const ScopedDirLookup& dir,
+ ConflictResolutionView* view,
+ SyncerSession *session) {
+ if (view->HasBlockedItems()) {
+ LOG(INFO) << "Delaying conflict resolution, have " <<
+ view->BlockedItemsSize() << " blocked items.";
+ return false;
+ }
+ bool rv = false;
+ if (ResolveSimpleConflicts(dir, view, session))
+ rv = true;
+ WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ set<Id> children_of_dirs_merged_last_round;
+ std::swap(children_of_merged_dirs_, children_of_dirs_merged_last_round);
+ set<ConflictSet*>::const_iterator set_it;
+ for (set_it = view->ConflictSetsBegin();
+ set_it != view->ConflictSetsEnd();
+ set_it++) {
+ ConflictSet* conflict_set = *set_it;
+ ConflictSetCountMapKey key = GetSetKey(conflict_set);
+ conflict_set_count_map_[key] += 2;
+ int conflict_count = conflict_set_count_map_[key];
+ // Keep a metric for new sets.
+ if (2 == conflict_count) {
+ // METRIC conflict sets seen ++
+ }
+ // See if this set contains entries whose parents were merged last round.
+ if (SortedCollectionsIntersect(children_of_dirs_merged_last_round.begin(),
+ children_of_dirs_merged_last_round.end(),
+ conflict_set->begin(),
+ conflict_set->end())) {
+ LOG(INFO) << "Accelerating resolution for hierarchical merge.";
+ conflict_count += 2;
+ }
+ // See if we should process this set.
+ if (ProcessConflictSet(&trans, conflict_set, conflict_count, session)) {
+ rv = true;
+ }
+ SyncerStatus status(session);
+ bool stuck = LogAndSignalIfConflictStuck(&trans, conflict_count,
+ conflict_set->begin(),
+ conflict_set->end(), view);
+ }
+ if (rv) {
+ // This code means we don't signal that syncing is stuck when any conflict
+ // resolution has occured.
+ // TODO(sync): As this will also reduce our sensitivity to problem
+ // conditions and increase the time for cascading resolutions we may have to
+ // revisit this code later, doing something more intelligent.
+ conflict_set_count_map_.clear();
+ simple_conflict_count_map_.clear();
+ }
+ ConflictSetCountMap::iterator i = conflict_set_count_map_.begin();
+ while (i != conflict_set_count_map_.end()) {
+ if (0 == --i->second) {
+ conflict_set_count_map_.erase(i++);
+ // METRIC self resolved conflict sets ++.
+ } else {
+ ++i;
+ }
+ }
+ return rv;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/conflict_resolver.h b/chrome/browser/sync/engine/conflict_resolver.h
new file mode 100644
index 0000000..7959106
--- /dev/null
+++ b/chrome/browser/sync/engine/conflict_resolver.h
@@ -0,0 +1,129 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A class that watches the syncer and attempts to resolve any conflicts that
+// occur.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLVER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLVER_H_
+
+#include <list>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace syncable {
+class BaseTransaction;
+class Id;
+class MutableEntry;
+class ScopedDirLookup;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class ConflictResolver {
+ friend class SyncerTest;
+ FRIEND_TEST(SyncerTest, ConflictResolverMergeOverwritesLocalEntry);
+ public:
+ ConflictResolver();
+ ~ConflictResolver();
+ // Called by the syncer at the end of a update/commit cycle.
+ // Returns true if the syncer should try to apply its updates again.
+ bool ResolveConflicts(const syncable::ScopedDirLookup& dir,
+ ConflictResolutionView* view,
+ SyncerSession *session);
+
+ // Called by ProcessServerClientNameClash. Returns true if it's merged the
+ // items, false otherwise. Does not re-check preconditions covered in
+ // ProcessServerClientNameClash (i.e. it assumes a name clash).
+ bool AttemptItemMerge(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* local_entry,
+ syncable::MutableEntry* server_entry);
+
+ private:
+ // We keep a map to record how often we've seen each conflict set. We use this
+ // to screen out false positives caused by transient server or client states,
+ // and to allow us to try to make smaller changes to fix situations before
+ // moving onto more drastic solutions.
+ typedef std::string ConflictSetCountMapKey;
+ typedef std::map<ConflictSetCountMapKey, int> ConflictSetCountMap;
+ typedef std::map<syncable::Id, int> SimpleConflictCountMap;
+
+ enum ProcessSimpleConflictResult {
+ NO_SYNC_PROGRESS, // No changes to advance syncing made.
+ SYNC_PROGRESS, // Progress made.
+ };
+
+ enum ServerClientNameClashReturn {
+ NO_CLASH,
+ SOLUTION_DEFERRED,
+ SOLVED,
+ BOGUS_SET,
+ };
+
+ // Get a key for the given set. NB: May reorder set contents.
+ // The key is currently not very efficient, but will ease debugging.
+ ConflictSetCountMapKey GetSetKey(ConflictSet* conflict_set);
+
+ void IgnoreLocalChanges(syncable::MutableEntry * entry);
+ void OverwriteServerChanges(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ ProcessSimpleConflictResult ProcessSimpleConflict(
+ syncable::WriteTransaction* trans,
+ syncable::Id id,
+ SyncerSession* session);
+
+ bool ResolveSimpleConflicts(const syncable::ScopedDirLookup& dir,
+ ConflictResolutionView* view,
+ SyncerSession* session);
+
+ bool ProcessConflictSet(syncable::WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ int conflict_count,
+ SyncerSession* session);
+
+ // Gives any unsynced entries in the given set new names if possible.
+ bool RenameUnsyncedEntries(syncable::WriteTransaction* trans,
+ ConflictSet* conflict_set);
+
+ ServerClientNameClashReturn ProcessServerClientNameClash(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* locally_named,
+ syncable::MutableEntry* server_named,
+ SyncerSession* session);
+ ServerClientNameClashReturn ProcessNameClashesInSet(
+ syncable::WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ SyncerSession* session);
+
+ // Returns true if we're stuck
+ template <typename InputIt>
+ bool LogAndSignalIfConflictStuck(syncable::BaseTransaction* trans,
+ int attempt_count,
+ InputIt start, InputIt end,
+ ConflictResolutionView* view);
+
+ ConflictSetCountMap conflict_set_count_map_;
+ SimpleConflictCountMap simple_conflict_count_map_;
+
+ // Contains the ids of uncommitted items that are children of entries merged
+ // in the previous cycle. This is used to speed up the merge resolution of
+ // deep trees. Used to happen in store refresh.
+ // TODO(chron): Can we get rid of this optimization?
+ std::set<syncable::Id> children_of_merged_dirs_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConflictResolver);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLVER_H_
diff --git a/chrome/browser/sync/engine/download_updates_command.cc b/chrome/browser/sync/engine/download_updates_command.cc
new file mode 100644
index 0000000..0d84275
--- /dev/null
+++ b/chrome/browser/sync/engine/download_updates_command.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/download_updates_command.h"
+
+#include <string>
+
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using syncable::ScopedDirLookup;
+
+namespace browser_sync {
+
+using std::string;
+
+DownloadUpdatesCommand::DownloadUpdatesCommand() {}
+DownloadUpdatesCommand::~DownloadUpdatesCommand() {}
+
+void DownloadUpdatesCommand::ExecuteImpl(SyncerSession *session) {
+ ClientToServerMessage client_to_server_message;
+ ClientToServerResponse update_response;
+
+ client_to_server_message.set_share(
+ static_cast<const string&>(ToUTF8(session->account_name())));
+ client_to_server_message.set_message_contents(
+ ClientToServerMessage::GET_UPDATES);
+ GetUpdatesMessage* get_updates =
+ client_to_server_message.mutable_get_updates();
+
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ LOG(INFO) << "Getting updates from ts " << dir->last_sync_timestamp();
+ get_updates->set_from_timestamp(dir->last_sync_timestamp());
+
+ // Set GetUpdatesMessage.GetUpdatesCallerInfo information.
+ get_updates->mutable_caller_info()->set_source(session->TestAndSetSource());
+ get_updates->mutable_caller_info()->set_notifications_enabled(
+ session->notifications_enabled());
+
+ bool ok = SyncerProtoUtil::PostClientToServerMessage(
+ &client_to_server_message,
+ &update_response,
+ session);
+
+ if (!ok) {
+ SyncerStatus status(session);
+ status.increment_consecutive_problem_get_updates();
+ status.increment_consecutive_errors();
+ LOG(ERROR) << "PostClientToServerMessage() failed";
+ return;
+ }
+ session->set_update_response(update_response);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/download_updates_command.h b/chrome/browser/sync/engine/download_updates_command.h
new file mode 100644
index 0000000..2f48cb8
--- /dev/null
+++ b/chrome/browser/sync/engine/download_updates_command.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+
+namespace browser_sync {
+
+// Downloads updates from the server and places them in the SyncerSession.
+class DownloadUpdatesCommand : public SyncerCommand {
+ public:
+ DownloadUpdatesCommand();
+ virtual ~DownloadUpdatesCommand();
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DownloadUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.cc b/chrome/browser/sync/engine/get_commit_ids_command.cc
new file mode 100644
index 0000000..612b40c
--- /dev/null
+++ b/chrome/browser/sync/engine/get_commit_ids_command.cc
@@ -0,0 +1,242 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/get_commit_ids_command.h"
+
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::set;
+using std::vector;
+
+namespace browser_sync {
+
+GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size)
+ : requested_commit_batch_size_(commit_batch_size) {}
+
+GetCommitIdsCommand::~GetCommitIdsCommand() {}
+
+void GetCommitIdsCommand::ExecuteImpl(SyncerSession *session) {
+ // Gather the full set of unsynced items and store it in the session.
+ // They are not in the correct order for commit.
+ syncable::Directory::UnsyncedMetaHandles all_unsynced_handles;
+ SyncerUtil::GetUnsyncedEntries(session->write_transaction(),
+ &all_unsynced_handles);
+ session->set_unsynced_handles(all_unsynced_handles);
+
+ BuildCommitIds(session);
+
+ const vector<syncable::Id>& verified_commit_ids =
+ ordered_commit_set_.GetCommitIds();
+
+ for (size_t i = 0; i < verified_commit_ids.size(); i++)
+ LOG(INFO) << "Debug commit batch result:" << verified_commit_ids[i];
+
+ session->set_commit_ids(verified_commit_ids);
+}
+
+void GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Id parent_id) {
+ using namespace syncable;
+ OrderedCommitSet item_dependencies;
+
+ // Climb the tree adding entries leaf -> root.
+ while (!parent_id.ServerKnows()) {
+ Entry parent(trans, GET_BY_ID, parent_id);
+ CHECK(parent.good()) << "Bad user-only parent in item path.";
+ int64 handle = parent.Get(META_HANDLE);
+ if (ordered_commit_set_.HaveCommitItem(handle) ||
+ item_dependencies.HaveCommitItem(handle)) {
+ break;
+ }
+ if (!AddItemThenPredecessors(trans, &parent, IS_UNSYNCED,
+ &item_dependencies)) {
+ break; // Parent was already present in the set.
+ }
+ parent_id = parent.Get(PARENT_ID);
+ }
+
+ // Reverse what we added to get the correct order.
+ ordered_commit_set_.AppendReverse(item_dependencies);
+}
+
+bool GetCommitIdsCommand::AddItem(syncable::Entry* item,
+ OrderedCommitSet* result) {
+ int64 item_handle = item->Get(syncable::META_HANDLE);
+ if (result->HaveCommitItem(item_handle) ||
+ ordered_commit_set_.HaveCommitItem(item_handle)) {
+ return false;
+ }
+ result->AddCommitItem(item_handle, item->Get(syncable::ID));
+ return true;
+}
+
+bool GetCommitIdsCommand::AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ OrderedCommitSet* result) {
+ if (!AddItem(item, result))
+ return false;
+ if (item->Get(syncable::IS_DEL))
+ return true; // Deleted items have no predecessors.
+
+ syncable::Id prev_id = item->Get(syncable::PREV_ID);
+ while (!prev_id.IsRoot()) {
+ syncable::Entry prev(trans, syncable::GET_BY_ID, prev_id);
+ CHECK(prev.good()) << "Bad id when walking predecessors.";
+ if (!prev.Get(inclusion_filter))
+ break;
+ if (!AddItem(&prev, result))
+ break;
+ prev_id = prev.Get(syncable::PREV_ID);
+ }
+ return true;
+}
+
+void GetCommitIdsCommand::AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter) {
+ OrderedCommitSet item_dependencies;
+ AddItemThenPredecessors(trans, item, inclusion_filter, &item_dependencies);
+
+ // Reverse what we added to get the correct order.
+ ordered_commit_set_.AppendReverse(item_dependencies);
+}
+
+bool GetCommitIdsCommand::IsCommitBatchFull() {
+ return ordered_commit_set_.Size() >= requested_commit_batch_size_;
+}
+
+void GetCommitIdsCommand::AddCreatesAndMoves(SyncerSession *session) {
+ // Add moves and creates, and prepend their uncommitted parents.
+ for (CommitMetahandleIterator iterator(session, &ordered_commit_set_);
+ !IsCommitBatchFull() && iterator.Valid();
+ iterator.Increment()) {
+ int64 metahandle = iterator.Current();
+
+ syncable::Entry entry(session->write_transaction(),
+ syncable::GET_BY_HANDLE,
+ metahandle);
+ if (!entry.Get(syncable::IS_DEL)) {
+ AddUncommittedParentsAndTheirPredecessors(
+ session->write_transaction(), entry.Get(syncable::PARENT_ID));
+ AddPredecessorsThenItem(session->write_transaction(), &entry,
+ syncable::IS_UNSYNCED);
+ }
+ }
+
+ // It's possible that we overcommitted while trying to expand dependent
+ // items. If so, truncate the set down to the allowed size.
+ ordered_commit_set_.Truncate(requested_commit_batch_size_);
+}
+
+void GetCommitIdsCommand::AddDeletes(SyncerSession *session) {
+ set<syncable::Id> legal_delete_parents;
+
+ for (CommitMetahandleIterator iterator(session, &ordered_commit_set_);
+ !IsCommitBatchFull() && iterator.Valid();
+ iterator.Increment()) {
+ int64 metahandle = iterator.Current();
+
+ syncable::Entry entry(session->write_transaction(),
+ syncable::GET_BY_HANDLE,
+ metahandle);
+
+ if (entry.Get(syncable::IS_DEL)) {
+ syncable::Entry parent(session->write_transaction(),
+ syncable::GET_BY_ID,
+ entry.Get(syncable::PARENT_ID));
+ // If the parent is deleted and unsynced, then any children of that
+ // parent don't need to be added to the delete queue.
+ //
+ // Note: the parent could be synced if there was an update deleting a
+ // folder when we had a deleted all items in it.
+ // We may get more updates, or we may want to delete the entry.
+ if (parent.good() &&
+ parent.Get(syncable::IS_DEL) &&
+ parent.Get(syncable::IS_UNSYNCED)) {
+ // However, if an entry is moved, these rules can apply differently.
+ //
+ // If the entry was moved, then the destination parent was deleted,
+ // then we'll miss it in the roll up. We have to add it in manually.
+ // TODO(chron): Unit test for move / delete cases:
+ // Case 1: Locally moved, then parent deleted
+ // Case 2: Server moved, then locally issue recursive delete.
+ if (entry.Get(syncable::ID).ServerKnows() &&
+ entry.Get(syncable::PARENT_ID) !=
+ entry.Get(syncable::SERVER_PARENT_ID)) {
+ LOG(INFO) << "Inserting moved and deleted entry, will be missed by"
+ " delete roll." << entry.Get(syncable::ID);
+
+ ordered_commit_set_.AddCommitItem(metahandle,
+ entry.Get(syncable::ID));
+ }
+
+ // Skip this entry since it's a child of a parent that will be
+ // deleted. The server will unroll the delete and delete the
+ // child as well.
+ continue;
+ }
+
+ legal_delete_parents.insert(entry.Get(syncable::PARENT_ID));
+ }
+ }
+
+ // We could store all the potential entries with a particular parent during
+ // the above scan, but instead we rescan here. This is less efficient, but
+ // we're dropping memory alloc/dealloc in favor of linear scans of recently
+ // examined entries.
+ //
+ // Scan through the UnsyncedMetaHandles again. If we have a deleted
+ // entry, then check if the parent is in legal_delete_parents.
+ //
+ // Parent being in legal_delete_parents means for the child:
+ // a recursive delete is not currently happening (no recent deletes in same
+ // folder)
+ // parent did expect at least one old deleted child
+ // parent was not deleted
+
+ for (CommitMetahandleIterator iterator(session, &ordered_commit_set_);
+ !IsCommitBatchFull() && iterator.Valid();
+ iterator.Increment()) {
+ int64 metahandle = iterator.Current();
+ syncable::MutableEntry entry(session->write_transaction(),
+ syncable::GET_BY_HANDLE,
+ metahandle);
+ if (entry.Get(syncable::IS_DEL)) {
+ syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
+ if (legal_delete_parents.count(parent_id)) {
+ ordered_commit_set_.AddCommitItem(metahandle, entry.Get(syncable::ID));
+ }
+ }
+ }
+}
+
+void GetCommitIdsCommand::BuildCommitIds(SyncerSession *session) {
+ // Commits follow these rules:
+ // 1. Moves or creates are preceded by needed folder creates, from
+ // root to leaf. For folders whose contents are ordered, moves
+ // and creates appear in order.
+ // 2. Moves/Creates before deletes.
+ // 3. Deletes, collapsed.
+ // We commit deleted moves under deleted items as moves when collapsing
+ // delete trees.
+
+ // Add moves and creates, and prepend their uncommitted parents.
+ AddCreatesAndMoves(session);
+
+ // Add all deletes.
+ AddDeletes(session);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.h b/chrome/browser/sync/engine/get_commit_ids_command.h
new file mode 100644
index 0000000..2d80a04
--- /dev/null
+++ b/chrome/browser/sync/engine/get_commit_ids_command.h
@@ -0,0 +1,202 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
+
+#include <vector>
+#include <utility>
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::pair;
+using std::vector;
+
+namespace browser_sync {
+
+class GetCommitIdsCommand : public SyncerCommand {
+ friend class SyncerTest;
+
+ public:
+ explicit GetCommitIdsCommand(int commit_batch_size);
+ virtual ~GetCommitIdsCommand();
+
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ // Returns a vector of IDs that should be committed.
+ void BuildCommitIds(SyncerSession *session);
+
+ // These classes are public for testing.
+ // TODO(ncarter): This code is more generic than just Commit and can
+ // be reused elsewhere (e.g. PositionalRunBuilder, ChangeReorderBuffer
+ // do similar things). Merge all these implementations.
+ class OrderedCommitSet {
+ public:
+ // TODO(chron): Reserve space according to batch size?
+ OrderedCommitSet() {}
+ ~OrderedCommitSet() {}
+
+ bool HaveCommitItem(const int64 metahandle) const {
+ return inserted_metahandles_.count(metahandle) > 0;
+ }
+
+ void AddCommitItem(const int64 metahandle, const syncable::Id& commit_id) {
+ if (!HaveCommitItem(metahandle)) {
+ inserted_metahandles_.insert(metahandle);
+ metahandle_order_.push_back(metahandle);
+ commit_ids_.push_back(commit_id);
+ }
+ }
+
+ const vector<syncable::Id>& GetCommitIds() const {
+ return commit_ids_;
+ }
+
+ pair<int64, syncable::Id> GetCommitItemAt(const int position) const {
+ DCHECK(position < Size());
+ return pair<int64, syncable::Id> (
+ metahandle_order_[position], commit_ids_[position]);
+ }
+
+ int Size() const {
+ return commit_ids_.size();
+ }
+
+ void AppendReverse(const OrderedCommitSet& other) {
+ for (int i = other.Size() - 1; i >= 0; i--) {
+ pair<int64, syncable::Id> item = other.GetCommitItemAt(i);
+ AddCommitItem(item.first, item.second);
+ }
+ }
+
+ void Truncate(size_t max_size) {
+ if (max_size < metahandle_order_.size()) {
+ for (size_t i = max_size; i < metahandle_order_.size(); ++i) {
+ inserted_metahandles_.erase(metahandle_order_[i]);
+ }
+ commit_ids_.resize(max_size);
+ metahandle_order_.resize(max_size);
+ }
+ }
+
+ private:
+ // These three lists are different views of the same data; e.g they are
+ // isomorphic.
+ syncable::MetahandleSet inserted_metahandles_;
+ vector<syncable::Id> commit_ids_;
+ vector<int64> metahandle_order_;
+
+ DISALLOW_COPY_AND_ASSIGN(OrderedCommitSet);
+ };
+
+
+ // TODO(chron): Remove writes from this iterator. As a warning, this
+ // iterator causes writes to entries and so isn't a pure iterator.
+ // It will do Put(IS_UNSYNCED) as well as add things to the blocked
+ // session list. Refactor this out later.
+ class CommitMetahandleIterator {
+ public:
+ // TODO(chron): Cache ValidateCommitEntry responses across iterators to save
+ // UTF8 conversion and filename checking
+ CommitMetahandleIterator(SyncerSession* session,
+ OrderedCommitSet* commit_set)
+ : session_(session),
+ commit_set_(commit_set) {
+ handle_iterator_ = session->unsynced_handles().begin();
+
+ // TODO(chron): Remove writes from this iterator.
+ DCHECK(session->has_open_write_transaction());
+
+ if (Valid() && !ValidateMetahandleForCommit(*handle_iterator_))
+ Increment();
+ }
+ ~CommitMetahandleIterator() {}
+
+ int64 Current() const {
+ DCHECK(Valid());
+ return *handle_iterator_;
+ }
+
+ bool Increment() {
+ if (!Valid())
+ return false;
+
+ for (++handle_iterator_;
+ handle_iterator_ != session_->unsynced_handles().end();
+ ++handle_iterator_) {
+ if (ValidateMetahandleForCommit(*handle_iterator_))
+ return true;
+ }
+
+ return false;
+ }
+
+ bool Valid() const {
+ return !(handle_iterator_ == session_->unsynced_handles().end());
+ }
+
+ private:
+ bool ValidateMetahandleForCommit(int64 metahandle) {
+ if (commit_set_->HaveCommitItem(metahandle))
+ return false;
+
+ // We should really not WRITE in this iterator, but we can fix that
+ // later. ValidateCommitEntry writes to the DB, and we add the
+ // blocked items. We should move that somewhere else later.
+ syncable::MutableEntry entry(session_->write_transaction(),
+ syncable::GET_BY_HANDLE, metahandle);
+ VerifyCommitResult verify_result =
+ SyncerUtil::ValidateCommitEntry(&entry);
+ if (verify_result == VERIFY_BLOCKED) {
+ session_->AddBlockedItem(entry.Get(syncable::ID)); // TODO(chron): Ew.
+ } else if (verify_result == VERIFY_UNSYNCABLE) {
+ // drop unsyncable entries.
+ entry.Put(syncable::IS_UNSYNCED, false);
+ }
+ return verify_result == VERIFY_OK;
+ }
+
+ SyncerSession* session_;
+ vector<int64>::const_iterator handle_iterator_;
+ OrderedCommitSet* commit_set_;
+
+ DISALLOW_COPY_AND_ASSIGN(CommitMetahandleIterator);
+ };
+
+ private:
+ void AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Id parent_id);
+
+ // OrderedCommitSet helpers for adding predecessors in order.
+ // TODO(ncarter): Refactor these so that the |result| parameter goes
+ // away, and AddItem doesn't need to consider two OrderedCommitSets.
+ bool AddItem(syncable::Entry* item, OrderedCommitSet* result);
+ bool AddItemThenPredecessors(syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ OrderedCommitSet* result);
+ void AddPredecessorsThenItem(syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter);
+
+ bool IsCommitBatchFull();
+
+ void AddCreatesAndMoves(SyncerSession *session);
+
+ void AddDeletes(SyncerSession *session);
+
+ OrderedCommitSet ordered_commit_set_;
+
+ int requested_commit_batch_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(GetCommitIdsCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
diff --git a/chrome/browser/sync/engine/model_changing_syncer_command.cc b/chrome/browser/sync/engine/model_changing_syncer_command.cc
new file mode 100644
index 0000000..09b0782
--- /dev/null
+++ b/chrome/browser/sync/engine/model_changing_syncer_command.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+
+#include "chrome/browser/sync/engine/model_safe_worker.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/closure.h"
+
+namespace browser_sync {
+
+void ModelChangingSyncerCommand::ExecuteImpl(SyncerSession *session) {
+ work_session_ = session;
+ session->model_safe_worker()->DoWorkAndWaitUntilDone(
+ NewCallback(this, &ModelChangingSyncerCommand::StartChangingModel));
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/model_changing_syncer_command.h b/chrome/browser/sync/engine/model_changing_syncer_command.h
new file mode 100644
index 0000000..32361090
--- /dev/null
+++ b/chrome/browser/sync/engine/model_changing_syncer_command.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+
+namespace browser_sync {
+
+// An abstract SyncerCommand which dispatches its Execute step to the
+// model-safe worker thread. Classes derived from ModelChangingSyncerCommand
+// instead of SyncerCommand must implement ModelChangingExecuteImpl instead of
+// ExecuteImpl, but otherwise, the contract is the same.
+//
+// A command should derive from ModelChangingSyncerCommand instead of
+// SyncerCommand whenever the operation might change any client-visible
+// fields on any syncable::Entry. If the operation involves creating a
+// WriteTransaction, this is a sign that ModelChangingSyncerCommand is likely
+// necessary.
+class ModelChangingSyncerCommand : public SyncerCommand {
+ public:
+ ModelChangingSyncerCommand() : work_session_(NULL) { }
+ virtual ~ModelChangingSyncerCommand() { }
+
+ // SyncerCommand implementation. Sets work_session to session.
+ virtual void ExecuteImpl(SyncerSession* session);
+
+ // wrapper so implementations don't worry about storing work_session
+ void StartChangingModel() {
+ ModelChangingExecuteImpl(work_session_);
+ }
+
+ // Abstract method to be implemented by subclasses.
+ virtual void ModelChangingExecuteImpl(SyncerSession* session) = 0;
+
+ private:
+ // ExecuteImpl is expected to be run by SyncerCommand to set work_session.
+ // StartChangingModel is called to start this command running.
+ // Implementations will implement ModelChangingExecuteImpl and not
+ // worry about storing the session or setting it. They are given work_session.
+ SyncerSession* work_session_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelChangingSyncerCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
diff --git a/chrome/browser/sync/engine/model_safe_worker.h b/chrome/browser/sync/engine/model_safe_worker.h
new file mode 100644
index 0000000..ff470ac
--- /dev/null
+++ b/chrome/browser/sync/engine/model_safe_worker.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_MODEL_SAFE_WORKER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_MODEL_SAFE_WORKER_H_
+
+#include "chrome/browser/sync/util/closure.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+// The Syncer uses a ModelSafeWorker for all tasks that could potentially
+// modify syncable entries (e.g under a WriteTransaction). The ModelSafeWorker
+// only knows how to do one thing, and that is take some work (in a fully
+// pre-bound callback) and have it performed (as in Run()) from a thread which
+// is guaranteed to be "model-safe", where "safe" refers to not allowing us to
+// cause an embedding application model to fall out of sync with the
+// syncable::Directory due to a race.
+class ModelSafeWorker {
+ public:
+ ModelSafeWorker() { }
+ virtual ~ModelSafeWorker() { }
+
+ // Any time the Syncer performs model modifications (e.g employing a
+ // WriteTransaction), it should be done by this method to ensure it is done
+ // from a model-safe thread.
+ //
+ // TODO(timsteele): For now this is non-reentrant, meaning the work being
+ // done should be at a high enough level in the stack that
+ // DoWorkAndWaitUntilDone won't be called again by invoking Run() on |work|.
+ // This is not strictly necessary; it may be best to call
+ // DoWorkAndWaitUntilDone at lower levels, such as within ApplyUpdates, but
+ // this is sufficient to simplify and test out our dispatching approach.
+ virtual void DoWorkAndWaitUntilDone(Closure* work) {
+ work->Run(); // By default, do the work on the current thread.
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ModelSafeWorker);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_MODEL_SAFE_WORKER_H_
diff --git a/chrome/browser/sync/engine/net/gaia_authenticator.cc b/chrome/browser/sync/engine/net/gaia_authenticator.cc
new file mode 100644
index 0000000..7276cec
--- /dev/null
+++ b/chrome/browser/sync/engine/net/gaia_authenticator.cc
@@ -0,0 +1,483 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/port.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/engine/all_status.h"
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/engine/net/url_translator.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "googleurl/src/gurl.h"
+
+using std::pair;
+using std::string;
+using std::vector;
+
+// TODO(timsteele): Integrate the following two functions to string_util.h or
+// somewhere that makes them unit-testable.
+bool SplitStringIntoKeyValues(const string& line,
+ char key_value_delimiter,
+ string* key, vector<string>* values) {
+ key->clear();
+ values->clear();
+
+ // find the key string
+ int end_key_pos = line.find_first_of(key_value_delimiter);
+ if (end_key_pos == string::npos) {
+ DLOG(INFO) << "cannot parse key from line: " << line;
+ return false; // no key
+ }
+ key->assign(line, 0, end_key_pos);
+
+ // find the values string
+ string remains(line, end_key_pos, line.size() - end_key_pos);
+ int begin_values_pos = remains.find_first_not_of(key_value_delimiter);
+ if (begin_values_pos == string::npos) {
+ DLOG(INFO) << "cannot parse value from line: " << line;
+ return false; // no value
+ }
+ string values_string(remains, begin_values_pos,
+ remains.size() - begin_values_pos);
+
+ // construct the values vector
+ values->push_back(values_string);
+ return true;
+}
+
+bool SplitStringIntoKeyValuePairs(const string& line,
+ char key_value_delimiter,
+ char key_value_pair_delimiter,
+ vector<pair<string, string> >* kv_pairs) {
+ kv_pairs->clear();
+
+ vector<string> pairs;
+ SplitString(line, key_value_pair_delimiter, &pairs);
+
+ bool success = true;
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ string key;
+ vector<string> value;
+ if (!SplitStringIntoKeyValues(pairs[i],
+ key_value_delimiter,
+ &key, &value)) {
+ // Don't return here, to allow for keys without associated
+ // values; just record that our split failed.
+ success = false;
+ }
+ DCHECK_LE(value.size(), 1);
+ kv_pairs->push_back(make_pair(key, value.empty()? "" : value[0]));
+ }
+ return success;
+}
+
+namespace browser_sync {
+
+static const char kGaiaV1IssueAuthTokenPath[] = "/accounts/IssueAuthToken";
+
+static const char kGetUserInfoPath[] = "/accounts/GetUserInfo";
+
+// Sole constructor with initializers for all fields.
+GaiaAuthenticator::GaiaAuthenticator(const string& user_agent,
+ const string& service_id,
+ const string& gaia_url)
+ : user_agent_(user_agent),
+ service_id_(service_id),
+ gaia_url_(gaia_url),
+ request_count_(0),
+ early_auth_attempt_count_(0),
+ delay_(0),
+ next_allowed_auth_attempt_time_(0) {
+ GaiaAuthEvent done = { GaiaAuthEvent::GAIA_AUTHENTICATOR_DESTROYED, None,
+ this };
+ channel_ = new Channel(done);
+}
+
+GaiaAuthenticator::~GaiaAuthenticator() {
+ delete channel_;
+}
+
+bool GaiaAuthenticator::LaunchAuthenticate(const AuthParams& params,
+ bool synchronous) {
+ if (synchronous)
+ return AuthenticateImpl(params);
+ AuthParams* copy = new AuthParams;
+ *copy = params;
+ pthread_t thread_id;
+ int result = pthread_create(&thread_id, 0, &GaiaAuthenticator::ThreadMain,
+ copy);
+ if (result)
+ return false;
+ return true;
+}
+
+
+void* GaiaAuthenticator::ThreadMain(void* arg) {
+ NameCurrentThreadForDebugging("SyncEngine_GaiaAuthenticatorThread");
+ AuthParams* const params = reinterpret_cast<AuthParams*>(arg);
+ params->authenticator->AuthenticateImpl(*params);
+ delete params;
+ return 0;
+}
+
+// mutex_ must be entered before calling this function.
+GaiaAuthenticator::AuthParams GaiaAuthenticator::MakeParams(
+ const string& user_name,
+ const string& password,
+ SaveCredentials should_save_credentials,
+ const string& captcha_token,
+ const string& captcha_value,
+ SignIn try_first) {
+ AuthParams params;
+ params.request_id = ++request_count_;
+ params.email = user_name;
+ params.password = password;
+ params.should_save_credentials = should_save_credentials;
+ params.captcha_token = captcha_token;
+ params.captcha_value = captcha_value;
+ params.authenticator = this;
+ params.try_first = try_first;
+ return params;
+}
+
+bool GaiaAuthenticator::Authenticate(const string& user_name,
+ const string& password,
+ SaveCredentials should_save_credentials,
+ bool synchronous,
+ const string& captcha_token,
+ const string& captcha_value,
+ SignIn try_first) {
+ mutex_.Lock();
+ AuthParams const params =
+ MakeParams(user_name, password, should_save_credentials, captcha_token,
+ captcha_value, try_first);
+ mutex_.Unlock();
+ return LaunchAuthenticate(params, synchronous);
+}
+
+bool GaiaAuthenticator::AuthenticateImpl(const AuthParams& params) {
+ AuthResults results;
+ const bool succeeded = AuthenticateImpl(params, &results);
+ mutex_.Lock();
+ if (params.request_id == request_count_) {
+ auth_results_ = results;
+ GaiaAuthEvent event = { succeeded ? GaiaAuthEvent::GAIA_AUTH_SUCCEEDED
+ : GaiaAuthEvent::GAIA_AUTH_FAILED,
+ results.auth_error, this };
+ mutex_.Unlock();
+ channel_->NotifyListeners(event);
+ } else {
+ mutex_.Unlock();
+ }
+ return succeeded;
+}
+
+// This method makes an HTTP request to the Gaia server, and calls other
+// methods to help parse the response. If authentication succeeded, then
+// Gaia-issued cookies are available in the respective variables; if
+// authentication failed, then the exact error is available as an enum. If the
+// client wishes to save the credentials, the last parameter must be true.
+// If a subsequent request is made with fresh credentials, the saved credentials
+// are wiped out; any subsequent request to the zero-parameter overload of this
+// method preserves the saved credentials.
+bool GaiaAuthenticator::AuthenticateImpl(const AuthParams& params,
+ AuthResults* results) {
+ results->credentials_saved = params.should_save_credentials;
+ results->auth_error = ConnectionUnavailable;
+ // Save credentials if so requested.
+ if (params.should_save_credentials != DONT_SAVE_CREDENTIALS) {
+ results->email = params.email.data();
+ results->password = params.password;
+ } else { // Explicitly clear previously-saved credentials.
+ results->email = "";
+ results->password = "";
+ }
+
+ // The aim of this code is to start failing requests if due to a logic error
+ // in the program we're hammering GAIA.
+ time_t now = time(0);
+ if (now > next_allowed_auth_attempt_time_) {
+ next_allowed_auth_attempt_time_ = now + 1;
+ // If we're more than 2 minutes past the allowed time we reset the early
+ // attempt count.
+ if (now - next_allowed_auth_attempt_time_ > 2 * 60) {
+ delay_ = 1;
+ early_auth_attempt_count_ = 0;
+ }
+ } else {
+ ++early_auth_attempt_count_;
+ // Allow 3 attempts, but then limit.
+ if (early_auth_attempt_count_ > 3) {
+ delay_ = AllStatus::GetRecommendedDelaySeconds(delay_);
+ next_allowed_auth_attempt_time_ = now + delay_;
+ return false;
+ }
+ }
+
+ return PerformGaiaRequest(params, results);
+}
+
+bool GaiaAuthenticator::PerformGaiaRequest(const AuthParams& params,
+ AuthResults* results) {
+ GURL gaia_auth_url(gaia_url_);
+
+ string post_body;
+ post_body += "Email=" + CgiEscapeString(params.email);
+ post_body += "&Passwd=" + CgiEscapeString(params.password);
+ post_body += "&source=" + CgiEscapeString(user_agent_);
+ post_body += "&service=" + service_id_;
+ if (!params.captcha_token.empty() && !params.captcha_value.empty()) {
+ post_body += "&logintoken=" + CgiEscapeString(params.captcha_token);
+ post_body += "&logincaptcha=" + CgiEscapeString(params.captcha_value);
+ }
+ post_body += "&PersistentCookie=true";
+ // We set it to GOOGLE (and not HOSTED or HOSTED_OR_GOOGLE) because we only
+ // allow consumer logins.
+ post_body += "&accountType=GOOGLE";
+
+ string message_text;
+ unsigned long server_response_code;
+ if (!Post(gaia_auth_url, post_body, &server_response_code,
+ &message_text)) {
+ results->auth_error = ConnectionUnavailable;
+ return false;
+ }
+
+ // Parse reply in two different ways, depending on if request failed or
+ // succeeded.
+ if (RC_FORBIDDEN == server_response_code) {
+ ExtractAuthErrorFrom(message_text, results);
+ return false;
+ } else if (RC_REQUEST_OK == server_response_code) {
+ ExtractTokensFrom(message_text, results);
+ const bool old_gaia =
+ results->auth_token.empty() && !results->lsid.empty();
+ const bool long_lived_token =
+ params.should_save_credentials == PERSIST_TO_DISK;
+ if ((old_gaia || long_lived_token) &&
+ !IssueAuthToken(results, service_id_, long_lived_token))
+ return false;
+
+ return LookupEmail(results);
+ } else {
+ results->auth_error = Unknown;
+ return false;
+ }
+}
+
+bool GaiaAuthenticator::LookupEmail(AuthResults* results) {
+ // Use the provided Gaia server, but change the path to what V1 expects.
+ GURL url(gaia_url_); // Gaia server
+ GURL::Replacements repl;
+ // Needs to stay in scope till GURL is out of scope
+ string path(kGetUserInfoPath);
+ repl.SetPathStr(path);
+ url = url.ReplaceComponents(repl);
+
+ string post_body;
+ post_body += "LSID=";
+ post_body += CgiEscapeString(results->lsid);
+
+ unsigned long server_response_code;
+ string message_text;
+ if (!Post(url, post_body, &server_response_code, &message_text)) {
+ return false;
+ }
+
+ // Check if we received a valid AuthToken; if not, ignore it.
+ if (RC_FORBIDDEN == server_response_code) {
+ // Server says we're not authenticated.
+ ExtractAuthErrorFrom(message_text, results);
+ return false;
+ } else if (RC_REQUEST_OK == server_response_code) {
+ typedef vector<pair<string, string> > Tokens;
+ Tokens tokens;
+ SplitStringIntoKeyValuePairs(message_text, '=', '\n', &tokens);
+ for (Tokens::iterator i = tokens.begin(); i != tokens.end(); ++i) {
+ if ("accountType" == i->first) {
+ // We never authenticate an email as a hosted account.
+ DCHECK_EQ("GOOGLE", i->second);
+ results->signin = GMAIL_SIGNIN;
+ } else if ("email" == i->first) {
+ results->primary_email = i->second;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+// We need to call this explicitly when we need to obtain a long-lived session
+// token.
+bool GaiaAuthenticator::IssueAuthToken(AuthResults* results,
+ const string& service_id,
+ bool long_lived) {
+ // Use the provided Gaia server, but change the path to what V1 expects.
+ GURL url(gaia_url_); // Gaia server
+ GURL::Replacements repl;
+ // Needs to stay in scope till GURL is out of scope
+ string path(kGaiaV1IssueAuthTokenPath);
+ repl.SetPathStr(path);
+ url = url.ReplaceComponents(repl);
+
+ string post_body;
+ post_body += "LSID=";
+ post_body += CgiEscapeString(results->lsid);
+ post_body += "&service=" + service_id;
+ if (long_lived) {
+ post_body += "&Session=true";
+ }
+
+ unsigned long server_response_code;
+ string message_text;
+ if (!Post(url, post_body,
+ &server_response_code, &message_text)) {
+ return false;
+ }
+
+ // Check if we received a valid AuthToken; if not, ignore it.
+ if (RC_FORBIDDEN == server_response_code) {
+ // Server says we're not authenticated.
+ ExtractAuthErrorFrom(message_text, results);
+ return false;
+ } else if (RC_REQUEST_OK == server_response_code) {
+ // Note that the format of message_text is different from what is returned
+ // in the first request, or to the sole request that is made to Gaia V2.
+ // Specifically, the entire string is the AuthToken, and looks like:
+ // "<token>" rather than "AuthToken=<token>". Thus, we need not use
+ // ExtractTokensFrom(...), but simply assign the token.
+ int last_index = message_text.length() - 1;
+ if ('\n' == message_text[last_index])
+ message_text.erase(last_index);
+ results->auth_token = message_text;
+ return true;
+ }
+ return false;
+}
+
+// TOOD(sync): This passing around of AuthResults makes it really unclear who
+// actually owns the authentication state and when it is valid, but this is
+// endemic to this implementation. We should fix this.
+bool GaiaAuthenticator::AuthenticateService(const string& service_id,
+ const string& sid,
+ const string& lsid,
+ string* other_service_cookie) {
+ // Copy the AuthResults structure and overload the auth_token field
+ // in the copy, local_results, to mean the auth_token for service_id.
+ AuthResults local_results;
+ local_results.sid = sid;
+ local_results.lsid = lsid;
+
+ if (!IssueAuthToken(&local_results, service_id, true)) {
+ LOG(ERROR) << "[AUTH] Failed to obtain cookie for " << service_id;
+ return false;
+ }
+
+ swap(*other_service_cookie, local_results.auth_token);
+ return true;
+}
+
+// Helper method that extracts tokens from a successful reply, and saves them
+// in the right fields.
+void GaiaAuthenticator::ExtractTokensFrom(const string& response,
+ AuthResults* results) {
+ vector<pair<string, string> > tokens;
+ SplitStringIntoKeyValuePairs(response, '=', '\n', &tokens);
+ for (vector<pair<string, string> >::iterator i = tokens.begin();
+ i != tokens.end(); ++i) {
+ if (i->first == "SID") {
+ results->sid = i->second;
+ } else if (i->first == "LSID") {
+ results->lsid = i->second;
+ } else if (i->first == "Auth") {
+ results->auth_token = i->second;
+ }
+ }
+}
+
+// Helper method that extracts tokens from a failure response, and saves them
+// in the right fields.
+void GaiaAuthenticator::ExtractAuthErrorFrom(const string& response,
+ AuthResults* results) {
+ vector<pair<string, string> > tokens;
+ SplitStringIntoKeyValuePairs(response, '=', '\n', &tokens);
+ for (vector<pair<string, string> >::iterator i = tokens.begin();
+ i != tokens.end(); ++i) {
+ if (i->first == "Error") {
+ results->error_msg = i->second;
+ } else if (i->first == "Url") {
+ results->auth_error_url = i->second;
+ } else if (i->first == "CaptchaToken") {
+ results->captcha_token = i->second;
+ } else if (i->first == "CaptchaUrl") {
+ results->captcha_url = i->second;
+ }
+ }
+
+ // Convert string error messages to enum values. Each case has two different
+ // strings; the first one is the most current and the second one is
+ // deprecated, but available.
+ const string& error_msg = results->error_msg;
+ if (error_msg == "BadAuthentication" || error_msg == "badauth") {
+ results->auth_error = BadAuthentication;
+ } else if (error_msg == "NotVerified" || error_msg == "nv") {
+ results->auth_error = NotVerified;
+ } else if (error_msg == "TermsNotAgreed" || error_msg == "tna") {
+ results->auth_error = TermsNotAgreed;
+ } else if (error_msg == "Unknown" || error_msg == "unknown") {
+ results->auth_error = Unknown;
+ } else if (error_msg == "AccountDeleted" || error_msg == "adel") {
+ results->auth_error = AccountDeleted;
+ } else if (error_msg == "AccountDisabled" || error_msg == "adis") {
+ results->auth_error = AccountDisabled;
+ } else if (error_msg == "CaptchaRequired" || error_msg == "cr") {
+ results->auth_error = CaptchaRequired;
+ } else if (error_msg == "ServiceUnavailable" || error_msg == "ire") {
+ results->auth_error = ServiceUnavailable;
+ }
+}
+
+// Reset all stored credentials, perhaps in preparation for letting a different
+// user sign in.
+void GaiaAuthenticator::ResetCredentials() {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ AuthResults blank;
+ auth_results_ = blank;
+}
+
+void GaiaAuthenticator::SetUsernamePassword(const string& username,
+ const string& password) {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ auth_results_.password = password;
+ auth_results_.email = username;
+}
+
+void GaiaAuthenticator::SetUsername(const string& username) {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ auth_results_.email = username;
+}
+
+void GaiaAuthenticator::SetAuthToken(const string& auth_token,
+ SaveCredentials save) {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ auth_results_.auth_token = auth_token;
+ auth_results_.credentials_saved = save;
+}
+
+bool GaiaAuthenticator::Authenticate(const string& user_name,
+ const string& password,
+ SaveCredentials should_save_credentials,
+ bool synchronous, SignIn try_first) {
+ const string empty;
+ return Authenticate(user_name, password, should_save_credentials, synchronous,
+ empty, empty, try_first);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/net/gaia_authenticator.h b/chrome/browser/sync/engine/net/gaia_authenticator.h
new file mode 100644
index 0000000..e18984c
--- /dev/null
+++ b/chrome/browser/sync/engine/net/gaia_authenticator.h
@@ -0,0 +1,304 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Use this class to authenticate users with Gaia and access cookies sent
+// by the Gaia servers.
+//
+// Sample usage:
+// GaiaAuthenticator gaia_auth("User-Agent", SYNC_SERVICE_NAME,
+// browser_sync::kExternalGaiaUrl);
+// if (gaia_auth.Authenticate("email", "passwd", SAVE_IN_MEMORY_ONLY,
+// true)) { // Synchronous
+// // Do something with: gaia_auth.auth_token(), or gaia_auth.sid(),
+// // or gaia_auth.lsid()
+// }
+//
+// Sample asynchonous usage:
+// GaiaAuthenticator gaia_auth("User-Agent", SYNC_SERVICE_NAME,
+// browser_sync::kExternalGaiaUrl);
+// EventListenerHookup* hookup = NewListenerHookup(gaia_auth.channel(),
+// this, &OnAuthenticate);
+// gaia_auth.Authenticate("email", "passwd", true, false);
+// // OnAuthenticate() will get called with result;
+//
+// Credentials can also be preserved for subsequent requests, though these are
+// saved in plain-text in memory, and not very secure on client systems. The
+// email address associated with the Gaia account can be read; the password is
+// write-only.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_GAIA_AUTHENTICATOR_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_GAIA_AUTHENTICATOR_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/signin.h"
+#include "googleurl/src/gurl.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace browser_sync {
+
+static const char kGaiaUrl[] =
+ "https://www.google.com:443/accounts/ClientLogin";
+
+// Use of the following enum is odd. GaiaAuthenticator only looks at
+// and DONT_SAVE_CREDENTIALS and SAVE_IN_MEMORY_ONLY (PERSIST_TO_DISK is == to
+// SAVE_IN_MEMORY_ONLY for GaiaAuthenticator). The sync engine never uses
+// DONT_SAVE_CREDENTIALS. AuthWatcher does look in GaiaAuthenticator's results
+// object to decide if it should save credentials to disk. This currently
+// works so I'm leaving the odd dance alone.
+
+enum SaveCredentials {
+ DONT_SAVE_CREDENTIALS,
+ SAVE_IN_MEMORY_ONLY,
+ PERSIST_TO_DISK // Saved in both memory and disk
+};
+
+// Error codes from Gaia. These will be set correctly for both Gaia V1
+// (/ClientAuth) and V2 (/ClientLogin)
+enum AuthenticationError {
+ None = 0,
+ BadAuthentication = 1,
+ NotVerified = 2,
+ TermsNotAgreed = 3,
+ Unknown = 4,
+ AccountDeleted = 5,
+ AccountDisabled = 6,
+ CaptchaRequired = 7,
+ ServiceUnavailable = 8,
+ // Errors generated by this class not Gaia.
+ CredentialsNotSet = 9,
+ ConnectionUnavailable = 10
+};
+
+class GaiaAuthenticator;
+
+struct GaiaAuthEvent {
+ enum {
+ GAIA_AUTH_FAILED,
+ GAIA_AUTH_SUCCEEDED,
+ GAIA_AUTHENTICATOR_DESTROYED
+ }
+ what_happened;
+ AuthenticationError error;
+ const GaiaAuthenticator* authenticator;
+
+ // Lets us use GaiaAuthEvent as its own traits type in hookups.
+ typedef GaiaAuthEvent EventType;
+ static inline bool IsChannelShutdownEvent(const GaiaAuthEvent& event) {
+ return event.what_happened == GAIA_AUTHENTICATOR_DESTROYED;
+ }
+};
+
+// GaiaAuthenticator can be used to pass user credentials to Gaia and obtain
+// cookies set by the Gaia servers.
+class GaiaAuthenticator {
+ FRIEND_TEST(GaiaAuthenticatorTest, TestNewlineAtEndOfAuthTokenRemoved);
+ public:
+
+ // Since GaiaAuthenticator can be used for any service, or by any client, you
+ // must include a user-agent and a service-id when creating one. The
+ // user_agent is a short string used for simple log analysis. gaia_url is used
+ // to choose the server to authenticate with (e.g.
+ // http://www.google.com/accounts/ClientLogin).
+ GaiaAuthenticator(const std::string& user_agent,
+ const std::string& service_id,
+ const std::string& gaia_url);
+
+ virtual ~GaiaAuthenticator();
+
+ // Pass credentials to authenticate with, or use saved credentials via an
+ // overload. If authentication succeeds, you can retrieve the authentication
+ // token via the respective accessors. Returns a boolean indicating whether
+ // authentication succeeded or not.
+ bool Authenticate(const std::string& user_name, const std::string& password,
+ SaveCredentials should_save_credentials, bool synchronous,
+ const std::string& captcha_token,
+ const std::string& captcha_value,
+ SignIn try_first);
+
+ bool Authenticate(const std::string& user_name, const std::string& password,
+ SaveCredentials should_save_credentials, bool synchronous,
+ SignIn try_first);
+
+ bool AuthenticateService(const std::string& service_id,
+ const std::string& sid,
+ const std::string& lsid,
+ std::string* other_service_cookie);
+
+ // Resets all stored cookies to their default values.
+ void ResetCredentials();
+
+ void SetUsernamePassword(const std::string& username,
+ const std::string& password);
+
+ void SetUsername(const std::string& username);
+
+ void SetAuthToken(const std::string& auth_token, SaveCredentials);
+
+ struct AuthResults {
+ SaveCredentials credentials_saved;
+ std::string email;
+ std::string password;
+
+ // Fields that store various cookies.
+ std::string sid;
+ std::string lsid;
+ std::string auth_token;
+
+ std::string primary_email;
+
+ // Fields for items returned when authentication fails.
+ std::string error_msg;
+ enum AuthenticationError auth_error;
+ std::string auth_error_url;
+ std::string captcha_token;
+ std::string captcha_url;
+ SignIn signin;
+
+ AuthResults () : credentials_saved(DONT_SAVE_CREDENTIALS),
+ auth_error(None) { }
+ };
+
+ protected:
+
+ struct AuthParams {
+ GaiaAuthenticator* authenticator;
+ uint32 request_id;
+ SaveCredentials should_save_credentials;
+ std::string email;
+ std::string password;
+ std::string captcha_token;
+ std::string captcha_value;
+ SignIn try_first;
+ };
+
+ // mutex_ must be entered before calling this function.
+ AuthParams MakeParams(const std::string& user_name,
+ const std::string& password,
+ SaveCredentials should_save_credentials,
+ const std::string& captcha_token,
+ const std::string& captcha_value,
+ SignIn try_first);
+
+ // The real Authenticate implementations.
+ bool AuthenticateImpl(const AuthParams& params);
+ bool AuthenticateImpl(const AuthParams& params, AuthResults* results);
+ bool PerformGaiaRequest(const AuthParams& params, AuthResults* results);
+ bool LaunchAuthenticate(const AuthParams& params, bool synchronous);
+ static void *ThreadMain(void *arg);
+
+ // virtual for testing purposes
+ virtual bool Post(const GURL& url, const std::string& post_body,
+ unsigned long* response_code, std::string* response_body) {
+ return false;
+ }
+
+ // Caller should fill in results->LSID before calling. Result in
+ // results->primary_email.
+ bool LookupEmail(AuthResults* results);
+
+ public:
+ // Retrieve email
+ inline std::string email() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.email;
+ }
+
+ // Retrieve password
+ inline std::string password() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.password;
+ }
+
+ // Retrieve AuthToken, if previously authenticated; otherwise returns "".
+ inline std::string auth_token() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.auth_token;
+ }
+
+ // Retrieve SID cookie. For details, see the Google Accounts documentation.
+ inline std::string sid() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.sid;
+ }
+
+ // Retrieve LSID cookie. For details, see the Google Accounts documentation.
+ inline std::string lsid() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.lsid;
+ }
+
+ // Get last authentication error.
+ inline enum AuthenticationError auth_error() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.auth_error;
+ }
+
+ inline std::string auth_error_url() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.auth_error_url;
+ }
+
+ inline std::string captcha_token() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.captcha_token;
+ }
+
+ inline std::string captcha_url() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.captcha_url;
+ }
+
+ inline AuthResults results() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_;
+ }
+
+ typedef EventChannel<GaiaAuthEvent, PThreadMutex> Channel;
+
+ inline Channel* channel() const {
+ return channel_;
+ }
+
+ private:
+ bool IssueAuthToken(AuthResults* results, const std::string& service_id,
+ bool long_lived_token);
+
+ // Helper method to parse response when authentication succeeds.
+ void ExtractTokensFrom(const std::string& response, AuthResults* results);
+ // Helper method to parse response when authentication fails.
+ void ExtractAuthErrorFrom(const std::string& response, AuthResults* results);
+
+ // Fields for the obvious data items.
+ const std::string user_agent_;
+ const std::string service_id_;
+ const std::string gaia_url_;
+
+ AuthResults auth_results_;
+
+ // When multiple async requests are running, only the one that started most
+ // recently updates the values.
+ //
+ // Note that even though this code was written to handle multiple requests
+ // simultaneously, the sync code issues auth requests one at a time.
+ uint32 request_count_;
+
+ Channel* channel_;
+
+ // Used to compute backoff time for next allowed authentication.
+ int delay_; // In seconds.
+ time_t next_allowed_auth_attempt_time_;
+ int early_auth_attempt_count_;
+
+ // Protects auth_results_, and request_count_.
+ mutable PThreadMutex mutex_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_GAIA_AUTHENTICATOR_H_
diff --git a/chrome/browser/sync/engine/net/gaia_authenticator_unittest.cc b/chrome/browser/sync/engine/net/gaia_authenticator_unittest.cc
new file mode 100644
index 0000000..c7c6eb8
--- /dev/null
+++ b/chrome/browser/sync/engine/net/gaia_authenticator_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+
+#include <string>
+
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/util/sync_types.h"
+#include "googleurl/src/gurl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+
+namespace browser_sync {
+
+class GaiaAuthenticatorTest : public testing::Test { };
+
+class GaiaAuthMock : public GaiaAuthenticator {
+ public:
+ GaiaAuthMock() : GaiaAuthenticator("useragent",
+ "serviceid",
+ "http://gaia_url") {}
+ ~GaiaAuthMock() {}
+ protected:
+ bool Post(const GURL& url, const string& post_body,
+ unsigned long* response_code, string* response_body) {
+ *response_code = RC_REQUEST_OK;
+ response_body->assign("body\n");
+ return true;
+ }
+};
+
+TEST(GaiaAuthenticatorTest, TestNewlineAtEndOfAuthTokenRemoved) {
+ GaiaAuthMock mock_auth;
+ GaiaAuthenticator::AuthResults results;
+ EXPECT_TRUE(mock_auth.IssueAuthToken(&results, "sid", true));
+ EXPECT_EQ(0, results.auth_token.compare("body"));
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/net/http_return.h b/chrome/browser/sync/engine/net/http_return.h
new file mode 100644
index 0000000..fd5167b
--- /dev/null
+++ b/chrome/browser/sync/engine/net/http_return.h
@@ -0,0 +1,16 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_HTTP_RETURN_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_HTTP_RETURN_H_
+
+namespace browser_sync {
+enum HTTPReturnCode {
+ RC_REQUEST_OK = 200,
+ RC_UNAUTHORIZED = 401,
+ RC_FORBIDDEN = 403,
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_HTTP_RETURN_H_
diff --git a/chrome/browser/sync/engine/net/openssl_init.cc b/chrome/browser/sync/engine/net/openssl_init.cc
new file mode 100644
index 0000000..afaf006
--- /dev/null
+++ b/chrome/browser/sync/engine/net/openssl_init.cc
@@ -0,0 +1,129 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// OpenSSL multi-threading initialization
+
+#include "chrome/browser/sync/engine/net/openssl_init.h"
+
+#include <openssl/crypto.h>
+
+#include "base/logging.h"
+#include "chrome/browser/sync/util/compat-pthread.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+
+// OpenSSL requires multithreading callbacks to be initialized prior to using
+// the library so that it can manage thread locking as necessary.
+
+// Dynamic lock type
+//
+// This needs to be a struct and in global scope because OpenSSL relies on some
+// macro magic.
+struct CRYPTO_dynlock_value {
+ PThreadMutex mutex;
+ void Lock() {
+ mutex.Lock();
+ }
+ void Unlock() {
+ mutex.Unlock();
+ }
+};
+
+namespace {
+
+// This array stores all of the mutexes available to OpenSSL
+PThreadMutex* mutex_buf = NULL;
+
+// OpenSSL mutex handling callback functions
+
+// OpenSSL Callback - Locks/unlocks the specified mutex held by OpenSSL.
+void OpenSslMutexLockControl(int mode, int n, const char* file, int line) {
+ if (mode & CRYPTO_LOCK) {
+ mutex_buf[n].Lock();
+ } else {
+ mutex_buf[n].Unlock();
+ }
+}
+
+// OpenSSL Callback - Returns the thread ID
+unsigned long OpenSslGetThreadID(void) {
+ return GetCurrentThreadId();
+}
+
+// Dynamic locking functions
+
+// Allocate a new lock
+struct CRYPTO_dynlock_value* dyn_create_function(const char* file, int line) {
+ return new CRYPTO_dynlock_value;
+}
+
+void dyn_lock_function(int mode, struct CRYPTO_dynlock_value* lock,
+ const char* file, int line) {
+ if (mode & CRYPTO_LOCK) {
+ lock->Lock();
+ } else {
+ lock->Unlock();
+ }
+}
+
+void dyn_destroy_function(struct CRYPTO_dynlock_value* lock,
+ const char* file, int line) {
+ delete lock;
+}
+
+} // namespace
+
+// We want to log the version of the OpenSSL library being used, in particular
+// for the case where it's dynamically linked. We want the version from the
+// library, not from the header files. It seems the OpenSSL folks haven't
+// bothered with an accessor for this, so we just pluck it out.
+#ifdef OS_WINDOWS
+// TODO(sync): Figure out how to get the SSL version string on Windows.
+const char* SSL_version_str = "UNKNOWN";
+#else
+extern const char* SSL_version_str;
+#endif
+
+namespace browser_sync {
+
+// Initializes the OpenSSL multithreading callbacks. This isn't thread-safe,
+// but it is called early enough that it doesn't matter.
+void InitOpenSslMultithreading() {
+ LOG(INFO) << "Using OpenSSL headers version " << OPENSSL_VERSION_TEXT
+ << ", lib version " << SSL_version_str;
+
+ if (mutex_buf)
+ return;
+
+ mutex_buf = new PThreadMutex[CRYPTO_num_locks()];
+ CHECK(NULL != mutex_buf);
+
+ // OpenSSL has only one single global set of callbacks, so this
+ // initialization must be done only once, even though the OpenSSL lib may be
+ // used by multiple modules (jingle jabber connections and P2P tunnels).
+ CRYPTO_set_id_callback(OpenSslGetThreadID);
+ CRYPTO_set_locking_callback(OpenSslMutexLockControl);
+
+ CRYPTO_set_dynlock_create_callback(dyn_create_function);
+ CRYPTO_set_dynlock_lock_callback(dyn_lock_function);
+ CRYPTO_set_dynlock_destroy_callback(dyn_destroy_function);
+}
+
+// Cleans up the OpenSSL multithreading callbacks.
+void CleanupOpenSslMultithreading() {
+ if (!mutex_buf) {
+ return;
+ }
+
+ CRYPTO_set_dynlock_create_callback(NULL);
+ CRYPTO_set_dynlock_lock_callback(NULL);
+ CRYPTO_set_dynlock_destroy_callback(NULL);
+
+ CRYPTO_set_id_callback(NULL);
+ CRYPTO_set_locking_callback(NULL);
+
+ delete [] mutex_buf;
+ mutex_buf = NULL;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/net/openssl_init.h b/chrome/browser/sync/engine/net/openssl_init.h
new file mode 100644
index 0000000..8cd4558
--- /dev/null
+++ b/chrome/browser/sync/engine/net/openssl_init.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// OpenSSL multi-threading initialization
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_OPENSSL_INIT_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_OPENSSL_INIT_H_
+
+namespace browser_sync {
+
+// Initializes the OpenSSL multithreading callbacks. Returns false on failure.
+void InitOpenSslMultithreading();
+
+// Cleans up the OpenSSL multithreading callbacks.
+void CleanupOpenSslMultithreading();
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_OPENSSL_INIT_H_
diff --git a/chrome/browser/sync/engine/net/server_connection_manager.cc b/chrome/browser/sync/engine/net/server_connection_manager.cc
new file mode 100644
index 0000000..42b380b
--- /dev/null
+++ b/chrome/browser/sync/engine/net/server_connection_manager.cc
@@ -0,0 +1,375 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+
+#include <errno.h>
+
+#include <ostream>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/engine/net/url_translator.h"
+#include "chrome/browser/sync/engine/syncapi.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+
+namespace browser_sync {
+
+using std::ostream;
+using std::string;
+using std::vector;
+
+static const char kSyncServerSyncPath[] = "/command/";
+
+// At the /time/ path of the sync server, we expect to find a very simple
+// time of day service that we can use to synchronize the local clock with
+// server time.
+static const char kSyncServerGetTimePath[] = "/time";
+
+static const ServerConnectionEvent shutdown_event =
+ { ServerConnectionEvent::SHUTDOWN, HttpResponse::CONNECTION_UNAVAILABLE,
+ false };
+
+typedef PThreadScopedLock<PThreadMutex> MutexLock;
+
+struct ServerConnectionManager::PlatformMembers {
+ explicit PlatformMembers(const string& user_agent) { }
+ void Kill() { }
+ void Reset() { }
+ void Reset(MutexLock*) { }
+};
+
+bool ServerConnectionManager::Post::ReadBufferResponse(
+ string* buffer_out, HttpResponse* response, bool require_response) {
+ if (RC_REQUEST_OK != response->response_code) {
+ response->server_status = HttpResponse::SYNC_SERVER_ERROR;
+ return false;
+ }
+
+ if (require_response && (1 > response->content_length))
+ return false;
+
+ const int64 bytes_read = ReadResponse(buffer_out, response->content_length);
+ if (bytes_read != response->content_length) {
+ response->server_status = HttpResponse::IO_ERROR;
+ return false;
+ }
+ return true;
+}
+
+bool ServerConnectionManager::Post::ReadDownloadResponse(
+ HttpResponse* response, string* buffer_out) {
+ const int64 bytes_read = ReadResponse(buffer_out, response->content_length);
+
+ if (bytes_read != response->content_length) {
+ LOG(ERROR) << "Mismatched content lengths, server claimed " <<
+ response->content_length << ", but sent " << bytes_read;
+ response->server_status = HttpResponse::IO_ERROR;
+ return false;
+ }
+ return true;
+}
+
+namespace {
+ string StripTrailingSlash(const string& s) {
+ int stripped_end_pos = s.size();
+ if (s.at(stripped_end_pos - 1) == '/') {
+ stripped_end_pos = stripped_end_pos - 1;
+ }
+
+ return s.substr(0, stripped_end_pos);
+ }
+} // namespace
+
+// TODO(chron): Use a GURL instead of string concatenation.
+ string ServerConnectionManager::Post::MakeConnectionURL(
+ const string& sync_server, const string& path,
+ bool use_ssl) const {
+ string connection_url = (use_ssl ? "https://" : "http://");
+ connection_url += sync_server;
+ connection_url = StripTrailingSlash(connection_url);
+ connection_url += path;
+
+ return connection_url;
+}
+
+int ServerConnectionManager::Post::ReadResponse(string* out_buffer,
+ int length) {
+ int bytes_read = buffer_.length();
+ CHECK(length <= bytes_read);
+ out_buffer->assign(buffer_);
+ return bytes_read;
+}
+
+// A helper class that automatically notifies when the status changes:
+struct WatchServerStatus {
+ WatchServerStatus(ServerConnectionManager* conn_mgr, HttpResponse* response)
+ : conn_mgr_(conn_mgr), response_(response),
+ reset_count_(conn_mgr->reset_count_),
+ server_reachable_(conn_mgr->server_reachable_) {
+ response->server_status = conn_mgr->server_status_;
+ }
+ ~WatchServerStatus() {
+ // Don't update the status of the connection if it has been reset.
+ // TODO(timsteele): Do we need this? Is this used by multiple threads?
+ if (reset_count_ != conn_mgr_->reset_count_)
+ return;
+ if (conn_mgr_->server_status_ != response_->server_status) {
+ conn_mgr_->server_status_ = response_->server_status;
+ conn_mgr_->NotifyStatusChanged();
+ return;
+ }
+ // Notify if we've gone on or offline.
+ if (server_reachable_ != conn_mgr_->server_reachable_)
+ conn_mgr_->NotifyStatusChanged();
+ }
+ ServerConnectionManager* const conn_mgr_;
+ HttpResponse* const response_;
+ // TODO(timsteele): Should this be Barrier:AtomicIncrement?
+ base::subtle::AtomicWord reset_count_;
+ bool server_reachable_;
+};
+
+ServerConnectionManager::ServerConnectionManager(
+ const string& server, int port, bool use_ssl, const string& user_agent,
+ const string& client_id)
+ : sync_server_(server), sync_server_port_(port),
+ channel_(new Channel(shutdown_event)),
+ server_status_(HttpResponse::NONE), server_reachable_(false),
+ client_id_(client_id), use_ssl_(use_ssl),
+ user_agent_(user_agent),
+ platform_(new PlatformMembers(user_agent)),
+ reset_count_(0), error_count_(0),
+ terminate_all_io_(false),
+ proto_sync_path_(kSyncServerSyncPath),
+ get_time_path_(kSyncServerGetTimePath) {
+}
+
+ServerConnectionManager::~ServerConnectionManager() {
+ delete channel_;
+ delete platform_;
+ shutdown_event_mutex_.Lock();
+ int result = pthread_cond_broadcast(&shutdown_event_condition_.condvar_);
+ shutdown_event_mutex_.Unlock();
+ if (result) {
+ LOG(ERROR) << "Error signaling shutdown_event_condition_ last error = "
+ << result;
+ }
+}
+
+void ServerConnectionManager::NotifyStatusChanged() {
+ ServerConnectionEvent event = { ServerConnectionEvent::STATUS_CHANGED,
+ server_status_,
+ server_reachable_ };
+ channel_->NotifyListeners(event);
+}
+
+// Uses currently set auth token. Set by AuthWatcher.
+bool ServerConnectionManager::PostBufferWithCachedAuth(
+ const PostBufferParams* params) {
+ string path =
+ MakeSyncServerPath(proto_sync_path(), MakeSyncQueryString(client_id_));
+ return PostBufferToPath(params, path, auth_token_);
+}
+
+bool ServerConnectionManager::PostBufferWithAuth(const PostBufferParams* params,
+ const string& auth_token) {
+ string path = MakeSyncServerPath(proto_sync_path(),
+ MakeSyncQueryString(client_id_));
+
+ return PostBufferToPath(params, path, auth_token);
+}
+
+bool ServerConnectionManager::PostBufferToPath(const PostBufferParams* params,
+ const string& path,
+ const string& auth_token) {
+ WatchServerStatus watcher(this, params->response);
+ scoped_ptr<Post> post(MakePost());
+ post->set_timing_info(params->timing_info);
+ bool ok = post->Init(path.c_str(), auth_token, params->buffer_in,
+ params->response);
+
+ if (!ok || RC_REQUEST_OK != params->response->response_code) {
+ IncrementErrorCount();
+ return false;
+ }
+
+ if (post->ReadBufferResponse(params->buffer_out, params->response, true)) {
+ params->response->server_status = HttpResponse::SERVER_CONNECTION_OK;
+ server_reachable_ = true;
+ return true;
+ }
+ return false;
+}
+
+bool ServerConnectionManager::CheckTime(int32* out_time) {
+ // Verify that the server really is reachable by checking the time. We need
+ // to do this because of wifi interstitials that intercept messages from the
+ // client and return HTTP OK instead of a redirect.
+ HttpResponse response;
+ WatchServerStatus watcher(this, &response);
+ string post_body = "command=get_time";
+
+ // We only retry the CheckTime call if we were reset during the CheckTime
+ // attempt. We only try 3 times in case we're in a reset loop elsewhere.
+ base::subtle::AtomicWord start_reset_count = reset_count_ - 1;
+ for (int i = 0 ; i < 3 && start_reset_count != reset_count_ ; i++) {
+ start_reset_count = reset_count_;
+ scoped_ptr<Post> post(MakePost());
+
+ // Note that the server's get_time path doesn't require authentication.
+ string get_time_path =
+ MakeSyncServerPath(kSyncServerGetTimePath, post_body);
+ LOG(INFO) << "Requesting get_time from:" << get_time_path;
+
+ string blank_post_body;
+ bool ok = post->Init(get_time_path.c_str(), blank_post_body,
+ blank_post_body, &response);
+ if (!ok) {
+ LOG(INFO) << "Unable to check the time";
+ continue;
+ }
+ string time_response;
+ time_response.resize(response.content_length);
+ ok = post->ReadDownloadResponse(&response, &time_response);
+ if (!ok || string::npos !=
+ time_response.find_first_not_of("0123456789")) {
+ LOG(ERROR) << "unable to read a non-numeric response from get_time:"
+ << time_response;
+ continue;
+ }
+ *out_time = atoi(time_response.c_str());
+ LOG(INFO) << "Server was reachable.";
+ return true;
+ }
+ IncrementErrorCount();
+ return false;
+}
+
+bool ServerConnectionManager::IsServerReachable() {
+ int32 time;
+ return CheckTime(&time);
+}
+
+bool ServerConnectionManager::IsUserAuthenticated() {
+ return IsGoodReplyFromServer(server_status_);
+}
+
+bool ServerConnectionManager::CheckServerReachable() {
+ const bool server_is_reachable = IsServerReachable();
+ if (server_reachable_ != server_is_reachable) {
+ server_reachable_ = server_is_reachable;
+ NotifyStatusChanged();
+ }
+ return server_is_reachable;
+}
+
+void ServerConnectionManager::kill() {
+ {
+ MutexLock lock(&terminate_all_io_mutex_);
+ terminate_all_io_ = true;
+ }
+ platform_->Kill();
+ shutdown_event_mutex_.Lock();
+ int result = pthread_cond_broadcast(&shutdown_event_condition_.condvar_);
+ shutdown_event_mutex_.Unlock();
+ if (result) {
+ LOG(ERROR) << "Error signaling shutdown_event_condition_ last error = "
+ << result;
+ }
+}
+
+void ServerConnectionManager::ResetAuthStatus() {
+ ResetConnection();
+ server_status_ = HttpResponse::NONE;
+ NotifyStatusChanged();
+}
+
+void ServerConnectionManager::ResetConnection() {
+ base::subtle::NoBarrier_AtomicIncrement(&reset_count_, 1);
+ platform_->Reset();
+}
+
+bool ServerConnectionManager::IncrementErrorCount() {
+#ifdef OS_WINDOWS
+ error_count_mutex_.Lock();
+ error_count_++;
+
+ if (error_count_ > kMaxConnectionErrorsBeforeReset) {
+ error_count_ = 0;
+
+ // Be careful with this mutex because calling out to other methods can
+ // result in being called back. Unlock it here to prevent any potential
+ // double-acquisitions.
+ error_count_mutex_.Unlock();
+
+ if (!IsServerReachable()) {
+ LOG(WARNING) << "Too many connection failures, server is not reachable. "
+ << "Resetting connections.";
+ ResetConnection();
+ } else {
+ LOG(WARNING) << "Multiple connection failures while server is reachable.";
+ }
+ return false;
+ }
+
+ error_count_mutex_.Unlock();
+ return true;
+#endif
+ return true;
+}
+
+void ServerConnectionManager::SetServerParameters(const string& server_url,
+ int port, bool use_ssl) {
+ {
+ ParametersLock lock(&server_parameters_mutex_);
+ sync_server_ = server_url;
+ sync_server_port_ = port;
+ use_ssl_ = use_ssl;
+ }
+ platform_->Reset();
+}
+
+// Returns the current server parameters in server_url and port.
+void ServerConnectionManager::GetServerParameters(string* server_url,
+ int* port, bool* use_ssl) {
+ ParametersLock lock(&server_parameters_mutex_);
+ if (server_url != NULL)
+ *server_url = sync_server_;
+ if (port != NULL)
+ *port = sync_server_port_;
+ if (use_ssl != NULL)
+ *use_ssl = use_ssl_;
+}
+
+bool FillMessageWithShareDetails(sync_pb::ClientToServerMessage* csm,
+ syncable::DirectoryManager* manager,
+ const PathString &share) {
+ syncable::ScopedDirLookup dir(manager, share);
+ if (!dir.good()) {
+ LOG(INFO) << "Dir lookup failed";
+ return false;
+ }
+ string birthday = dir->store_birthday();
+ if (!birthday.empty())
+ csm->set_store_birthday(birthday);
+ csm->set_share(ToUTF8(share).get_string());
+ return true;
+}
+
+} // namespace browser_sync
+
+std::ostream& operator << (std::ostream& s,
+ const struct browser_sync::HttpResponse& hr) {
+ s << " Response Code (bogus on error): " << hr.response_code;
+ s << " Content-Length (bogus on error): " << hr.content_length;
+ s << " Server Status: " << hr.server_status;
+ return s;
+}
diff --git a/chrome/browser/sync/engine/net/server_connection_manager.h b/chrome/browser/sync/engine/net/server_connection_manager.h
new file mode 100644
index 0000000..8093d45
--- /dev/null
+++ b/chrome/browser/sync/engine/net/server_connection_manager.h
@@ -0,0 +1,345 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/scoped_ptr.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/signin.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+class WriteTransaction;
+class DirectoryManager;
+}
+
+namespace sync_pb {
+class ClientToServerMessage;
+};
+
+struct RequestTimingInfo;
+
+namespace browser_sync {
+
+class ClientToServerMessage;
+
+// How many connection errors are accepted before network handles are closed
+// and reopened.
+static const int32 kMaxConnectionErrorsBeforeReset = 10;
+
+// HttpResponse gathers the relevant output properties of an HTTP request.
+// Depending on the value of the server_status code, response_code, and
+// content_length may not be valid.
+struct HttpResponse {
+ enum ServerConnectionCode {
+ // For uninitialized state.
+ NONE,
+
+ // CONNECTION_UNAVAILABLE is returned when InternetConnect() fails.
+ CONNECTION_UNAVAILABLE,
+
+ // IO_ERROR is returned when reading/writing to a buffer has failed.
+ IO_ERROR,
+
+ // SYNC_SERVER_ERROR is returned when the HTTP status code indicates that
+ // a non-auth error has occured.
+ SYNC_SERVER_ERROR,
+
+ // SYNC_AUTH_ERROR is returned when the HTTP status code indicates that an
+ // auth error has occured (i.e. a 401)
+ SYNC_AUTH_ERROR,
+
+ // All the following connection codes are valid responses from the server.
+ // Means the server is up. If you update this list, be sure to also update
+ // IsGoodReplyFromServer().
+
+ // SERVER_CONNECTION_OK is returned when request was handled correctly.
+ SERVER_CONNECTION_OK,
+
+ // RETRY is returned when a Commit request fails with a RETRY response from
+ // the server.
+ //
+ // TODO(idana): the server no longer returns RETRY so we should remove this
+ // value.
+ RETRY,
+ };
+
+ // The HTTP Status code.
+ int64 response_code;
+
+ // The value of the Content-length header.
+ int64 content_length;
+
+ // The size of a download request's payload.
+ int64 payload_length;
+
+ // Identifies the type of failure, if any.
+ ServerConnectionCode server_status;
+};
+
+inline bool IsGoodReplyFromServer(HttpResponse::ServerConnectionCode code) {
+ return code >= HttpResponse::SERVER_CONNECTION_OK;
+}
+
+struct ServerConnectionEvent {
+ enum { SHUTDOWN, STATUS_CHANGED } what_happened;
+ HttpResponse::ServerConnectionCode connection_code;
+ bool server_reachable;
+
+ // Traits.
+ typedef ServerConnectionEvent EventType;
+ static inline bool IsChannelShutdownEvent(const EventType& event) {
+ return SHUTDOWN == event.what_happened;
+ }
+};
+
+struct WatchServerStatus;
+
+// Use this class to interact with the sync server.
+// The ServerConnectionManager currently supports POSTing protocol buffers.
+//
+// *** This class is thread safe. In fact, you should consider creating only
+// one instance for every server that you need to talk to.
+class ServerConnectionManager {
+ friend class Post;
+ friend struct WatchServerStatus;
+ public:
+ typedef EventChannel<ServerConnectionEvent, PThreadMutex> Channel;
+
+ // The lifetime of the GaiaAuthenticator must be longer than the instance
+ // of the ServerConnectionManager that you're creating.
+ ServerConnectionManager(const std::string& server, int port, bool use_ssl,
+ const std::string& user_agent,
+ const std::string& client_id);
+
+ virtual ~ServerConnectionManager();
+
+ // buffer_in - will be POSTed
+ // buffer_out - string will be overwritten with response
+ struct PostBufferParams {
+ const std::string& buffer_in;
+ std::string* buffer_out;
+ HttpResponse* response;
+ RequestTimingInfo* timing_info;
+ };
+
+ // Abstract class providing network-layer functionality to the
+ // ServerConnectionManager. Subclasses implement this using an HTTP stack of
+ // their choice.
+ class Post {
+ public:
+ explicit Post(ServerConnectionManager* scm) : scm_(scm), timing_info_(0) {
+ }
+ virtual ~Post() { }
+
+ // Called to initialize and perform an HTTP POST.
+ virtual bool Init(const char* path, const std::string& auth_token,
+ const std::string& payload,
+ HttpResponse* response) = 0;
+
+ bool ReadBufferResponse(std::string* buffer_out, HttpResponse* response,
+ bool require_response);
+ bool ReadDownloadResponse(HttpResponse* response, std::string* buffer_out);
+
+ void set_timing_info(RequestTimingInfo* timing_info) {
+ timing_info_ = timing_info;
+ }
+ RequestTimingInfo* timing_info() { return timing_info_; }
+
+ protected:
+ std::string MakeConnectionURL(const std::string& sync_server,
+ const std::string& path, bool use_ssl) const;
+
+ void GetServerParams(std::string* server, int* server_port,
+ bool* use_ssl) {
+ ServerConnectionManager::ParametersLock lock(
+ &scm_->server_parameters_mutex_);
+ server->assign(scm_->sync_server_);
+ *server_port = scm_->sync_server_port_;
+ *use_ssl = scm_->use_ssl_;
+ }
+
+ std::string buffer_;
+ ServerConnectionManager* scm_;
+
+ private:
+ int ReadResponse(void* buffer, int length);
+ int ReadResponse(std::string* buffer, int length);
+ RequestTimingInfo* timing_info_;
+ };
+
+ // POSTS buffer_in and reads a response into buffer_out. Uses our currently
+ // set auth token in our headers.
+ //
+ // Returns true if executed successfully.
+ virtual bool PostBufferWithCachedAuth(const PostBufferParams* params);
+
+ // POSTS buffer_in and reads a response into buffer_out. Add a specific auth
+ // token to http headers.
+ //
+ // Returns true if executed successfully.
+ virtual bool PostBufferWithAuth(const PostBufferParams* params,
+ const std::string& auth_token);
+
+ // Checks the time on the server. Returns false if the request failed. |time|
+ // is an out parameter that stores the value returned from the server.
+ virtual bool CheckTime(int32* out_time);
+
+ // Returns true if sync_server_ is reachable. This method verifies that the
+ // server is pingable and that traffic can be sent to and from it.
+ virtual bool IsServerReachable();
+
+ // Returns true if user has been successfully authenticated.
+ virtual bool IsUserAuthenticated();
+
+ // Updates status and broadcasts events on change.
+ bool CheckServerReachable();
+
+ // Signal the shutdown event to notify listeners.
+ virtual void kill();
+
+ inline Channel* channel() const { return channel_; }
+
+ inline std::string user_agent() const { return user_agent_; }
+
+ inline HttpResponse::ServerConnectionCode server_status() const {
+ return server_status_;
+ }
+
+ inline bool server_reachable() const { return server_reachable_; }
+
+ void ResetAuthStatus();
+
+ void ResetConnection();
+
+ void NotifyStatusChanged();
+
+ const std::string client_id() const { return client_id_; }
+
+ void SetDomainFromSignIn(SignIn signin_type, const std::string& signin);
+
+ // This changes the server info used by the connection manager. This allows
+ // a single client instance to talk to different backing servers. This is
+ // typically called during / after authentication so that the server url
+ // can be a function of the user's login id. A side effect of this call is
+ // that ResetConnection is called.
+ void SetServerParameters(const std::string& server_url, int port,
+ bool use_ssl);
+
+ // Returns the current server parameters in server_url, port and use_ssl.
+ void GetServerParameters(std::string* server_url, int* port, bool* use_ssl);
+
+ bool terminate_all_io() const {
+ PThreadScopedLock<PThreadMutex> lock(&terminate_all_io_mutex_);
+ return terminate_all_io_;
+ }
+
+ // Factory method to create a Post object we can use for communication with
+ // the server.
+ virtual Post* MakePost() {
+ return NULL; // For testing.
+ };
+
+ void set_auth_token(const std::string& auth_token) {
+ auth_token_.assign(auth_token);
+ }
+
+ protected:
+
+ PThreadMutex shutdown_event_mutex_;
+ PThreadCondVar shutdown_event_condition_;
+
+ // Protects access to sync_server_, sync_server_port_ and use_ssl_:
+ mutable PThreadMutex server_parameters_mutex_;
+ typedef PThreadScopedLock<PThreadMutex> ParametersLock;
+
+ // The sync_server_ is the server that requests will be made to.
+ std::string sync_server_;
+
+ // The sync_server_port_ is the port that HTTP requests will be made on.
+ int sync_server_port_;
+
+ // The unique id of the user's client.
+ const std::string client_id_;
+
+ // The user-agent string for HTTP.
+ std::string user_agent_;
+
+ // Indicates whether or not requests should be made using HTTPS.
+ bool use_ssl_;
+
+ // The paths we post to.
+ mutable PThreadMutex path_mutex_;
+ typedef PThreadScopedLock<PThreadMutex> ScopedPathLock;
+
+ std::string proto_sync_path_;
+ std::string get_time_path_;
+
+ // The auth token to use in authenticated requests. Set by the AuthWatcher.
+ std::string auth_token_;
+
+ inline std::string proto_sync_path() const {
+ ScopedPathLock lock(&path_mutex_);
+ return proto_sync_path_;
+ }
+ std::string get_time_path() const {
+ ScopedPathLock lock(&path_mutex_);
+ return get_time_path_;
+ }
+
+ // Called wherever a failure should be taken as an indication that we may
+ // be experiencing connection difficulties.
+ virtual bool IncrementErrorCount();
+ mutable PThreadMutex error_count_mutex_; // Protects error_count_
+ int error_count_; // Tracks the number of connection errors.
+
+ protected:
+ Channel* const channel_;
+ // Volatile so various threads can call server_status() without
+ // synchronization.
+ volatile HttpResponse::ServerConnectionCode server_status_;
+ bool server_reachable_;
+
+ struct PlatformMembers; // Contains platform specific member vars.
+ PlatformMembers* const platform_;
+
+ // A counter that is incremented everytime ResetAuthStatus() is called.
+ volatile base::subtle::AtomicWord reset_count_;
+
+ // NOTE: Tests rely on this protected function being virtual.
+ //
+ // Internal PostBuffer base function.
+ virtual bool PostBufferToPath(const PostBufferParams*,
+ const std::string& path,
+ const std::string& auth_token);
+
+ private:
+ mutable PThreadMutex terminate_all_io_mutex_;
+ bool terminate_all_io_; // when set to true, terminate all connections asap
+ DISALLOW_COPY_AND_ASSIGN(ServerConnectionManager);
+};
+
+// Fills a ClientToServerMessage with the appropriate share and birthday
+// settings.
+bool FillMessageWithShareDetails(sync_pb::ClientToServerMessage* csm,
+ syncable::DirectoryManager* manager,
+ const PathString &share);
+
+} // namespace browser_sync
+
+std::ostream& operator<<(std::ostream& s,
+ const struct browser_sync::HttpResponse& hr);
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
diff --git a/chrome/browser/sync/engine/net/syncapi_server_connection_manager.cc b/chrome/browser/sync/engine/net/syncapi_server_connection_manager.cc
new file mode 100644
index 0000000..19981de
--- /dev/null
+++ b/chrome/browser/sync/engine/net/syncapi_server_connection_manager.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/net/syncapi_server_connection_manager.h"
+
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/engine/syncapi.h"
+
+using browser_sync::HttpResponse;
+using std::string;
+
+namespace sync_api {
+
+bool SyncAPIBridgedPost::Init(const char* path, const string& auth_token,
+ const string& payload, HttpResponse* response) {
+ string sync_server;
+ int sync_server_port = 0;
+ bool use_ssl = false;
+ GetServerParams(&sync_server, &sync_server_port, &use_ssl);
+ std::string connection_url = MakeConnectionURL(sync_server, path, use_ssl);
+
+ HttpPostProviderInterface* http = factory_->Create();
+ http->SetUserAgent(scm_->user_agent().c_str());
+ http->SetURL(connection_url.c_str(), sync_server_port);
+
+ if (!auth_token.empty()) {
+ string headers = "Authorization: GoogleLogin auth=" + auth_token;
+ http->SetExtraRequestHeaders(headers.c_str());
+ }
+
+ // Must be octet-stream, or the payload may be parsed for a cookie.
+ http->SetPostPayload("application/octet-stream", payload.length(),
+ payload.data());
+
+ // Issue the POST, blocking until it finishes.
+ int os_error_code = 0;
+ int response_code = 0;
+ if (!http->MakeSynchronousPost(&os_error_code, &response_code)) {
+ LOG(INFO) << "Http POST failed, error returns: " << os_error_code;
+ response->server_status = HttpResponse::IO_ERROR;
+ return false;
+ }
+
+ // We got a server response, copy over response codes and content.
+ response->response_code = response_code;
+ response->content_length =
+ static_cast<int64>(http->GetResponseContentLength());
+ response->payload_length =
+ static_cast<int64>(http->GetResponseContentLength());
+ if (response->response_code < 400)
+ response->server_status = HttpResponse::SERVER_CONNECTION_OK;
+ else if (response->response_code == browser_sync::RC_UNAUTHORIZED)
+ response->server_status = HttpResponse::SYNC_AUTH_ERROR;
+ else
+ response->server_status = HttpResponse::SYNC_SERVER_ERROR;
+
+ // Write the content into our buffer.
+ buffer_.assign(http->GetResponseContent(), http->GetResponseContentLength());
+
+ // We're done with the HttpPostProvider.
+ factory_->Destroy(http);
+ return true;
+}
+
+SyncAPIServerConnectionManager::~SyncAPIServerConnectionManager() {
+ delete post_provider_factory_;
+}
+
+void SyncAPIServerConnectionManager::SetHttpPostProviderFactory(
+ HttpPostProviderFactory* factory) {
+ if (post_provider_factory_)
+ delete post_provider_factory_;
+ post_provider_factory_ = factory;
+}
+
+} // namespace sync_api
diff --git a/chrome/browser/sync/engine/net/syncapi_server_connection_manager.h b/chrome/browser/sync/engine/net/syncapi_server_connection_manager.h
new file mode 100644
index 0000000..84a355e
--- /dev/null
+++ b/chrome/browser/sync/engine/net/syncapi_server_connection_manager.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
+
+#include <string>
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+
+namespace sync_api {
+
+class HttpPostProviderFactory;
+
+// This provides HTTP Post functionality through the interface provided
+// to the sync API by the application hosting the syncer backend.
+class SyncAPIBridgedPost :
+ public browser_sync::ServerConnectionManager::Post {
+ public:
+ SyncAPIBridgedPost(browser_sync::ServerConnectionManager* scm,
+ HttpPostProviderFactory* factory)
+ : Post(scm), factory_(factory) {
+ }
+
+ virtual ~SyncAPIBridgedPost() { }
+
+ virtual bool Init(const char* path,
+ const std::string& auth_token,
+ const std::string& payload,
+ browser_sync::HttpResponse* response);
+
+ private:
+ // Pointer to the factory we use for creating HttpPostProviders. We do not
+ // own |factory_|.
+ HttpPostProviderFactory* factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncAPIBridgedPost);
+};
+
+// A ServerConnectionManager subclass used by the syncapi layer. We use a
+// subclass so that we can override MakePost() to generate a POST object using
+// an instance of the HttpPostProviderFactory class.
+class SyncAPIServerConnectionManager :
+ public browser_sync::ServerConnectionManager {
+ public:
+ SyncAPIServerConnectionManager(const std::string& server,
+ int port,
+ bool use_ssl,
+ const std::string& client_version,
+ const std::string& client_id)
+ : ServerConnectionManager(server, port, use_ssl, client_version,
+ client_id),
+ post_provider_factory_(NULL) {
+ }
+
+ virtual ~SyncAPIServerConnectionManager();
+
+ // This method gives ownership of |factory| to |this|.
+ void SetHttpPostProviderFactory(HttpPostProviderFactory* factory);
+ protected:
+ virtual Post* MakePost() {
+ return new SyncAPIBridgedPost(this, post_provider_factory_);
+ }
+ private:
+ // A factory creating concrete HttpPostProviders for use whenever we need to
+ // issue a POST to sync servers.
+ HttpPostProviderFactory* post_provider_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncAPIServerConnectionManager);
+};
+
+} // namespace sync_api
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
diff --git a/chrome/browser/sync/engine/net/url_translator.cc b/chrome/browser/sync/engine/net/url_translator.cc
new file mode 100644
index 0000000..0931c36
--- /dev/null
+++ b/chrome/browser/sync/engine/net/url_translator.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Contains the definition of a few helper functions used for generating sync
+// URLs.
+
+#include "chrome/browser/sync/engine/net/url_translator.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "net/base/escape.h"
+
+using std::string;
+
+namespace browser_sync {
+
+namespace {
+// Parameters that the server understands. (here, a-Z)
+const char kParameterAuthToken[] = "auth";
+const char kParameterClientID[] = "client_id";
+}
+
+// Convenience wrappers around CgiEscapePath().
+string CgiEscapeString(const char* src) {
+ return CgiEscapeString(string(src));
+}
+
+string CgiEscapeString(const string& src) {
+ return EscapePath(src);
+}
+
+// This method appends the query string to the sync server path.
+string MakeSyncServerPath(const string& path, const string& query_string) {
+ string result = path;
+ result.append("?");
+ result.append(query_string);
+ return result;
+}
+
+string MakeSyncQueryString(const string& client_id) {
+ string query;
+ query += kParameterClientID;
+ query += "=" + CgiEscapeString(client_id);
+ return query;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/net/url_translator.h b/chrome/browser/sync/engine/net/url_translator.h
new file mode 100644
index 0000000..717e15b
--- /dev/null
+++ b/chrome/browser/sync/engine/net/url_translator.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Contains the declaration of a few helper functions used for generating sync
+// URLs.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_URL_TRANSLATOR_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_URL_TRANSLATOR_H_
+
+#include <string>
+
+namespace browser_sync {
+
+// Convenience wrappers around CgiEscapePath(), used by gaia_auth.
+std::string CgiEscapeString(const char* src);
+std::string CgiEscapeString(const std::string& src);
+
+// This method appends the query string to the sync server path.
+std::string MakeSyncServerPath(const std::string& path,
+ const std::string& query_string);
+
+std::string MakeSyncQueryString(const std::string& client_id);
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_URL_TRANSLATOR_H_
diff --git a/chrome/browser/sync/engine/post_commit_message_command.cc b/chrome/browser/sync/engine/post_commit_message_command.cc
new file mode 100644
index 0000000..3807607
--- /dev/null
+++ b/chrome/browser/sync/engine/post_commit_message_command.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/post_commit_message_command.h"
+
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+PostCommitMessageCommand::PostCommitMessageCommand() {}
+PostCommitMessageCommand::~PostCommitMessageCommand() {}
+
+void PostCommitMessageCommand::ExecuteImpl(SyncerSession *session) {
+ if (session->commit_ids_empty())
+ return; // nothing to commit
+ ClientToServerResponse response;
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good())
+ return;
+ if (!SyncerProtoUtil::PostClientToServerMessage(session->commit_message(),
+ &response, session)) {
+ // None of our changes got through, let's clear sync flags and wait for
+ // another list update.
+ SyncerStatus status(session);
+ status.increment_consecutive_problem_commits();
+ status.increment_consecutive_errors();
+ syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ // TODO(sync): why set this flag, it seems like a bad side-effect?
+ const vector<syncable::Id>& commit_ids = session->commit_ids();
+ for (size_t i = 0; i < commit_ids.size(); i++) {
+ syncable::MutableEntry entry(&trans, syncable::GET_BY_ID, commit_ids[i]);
+ entry.Put(syncable::SYNCING, false);
+ }
+ return;
+ } else {
+ session->set_item_committed();
+ }
+ session->set_commit_response(response);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/post_commit_message_command.h b/chrome/browser/sync/engine/post_commit_message_command.h
new file mode 100644
index 0000000..87aa4d7
--- /dev/null
+++ b/chrome/browser/sync/engine/post_commit_message_command.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+class PostCommitMessageCommand : public SyncerCommand {
+ public:
+ PostCommitMessageCommand();
+ virtual ~PostCommitMessageCommand();
+
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PostCommitMessageCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
diff --git a/chrome/browser/sync/engine/process_commit_response_command.cc b/chrome/browser/sync/engine/process_commit_response_command.cc
new file mode 100644
index 0000000..6a2a177
--- /dev/null
+++ b/chrome/browser/sync/engine/process_commit_response_command.cc
@@ -0,0 +1,374 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/process_commit_response_command.h"
+
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+using syncable::ScopedDirLookup;
+using syncable::WriteTransaction;
+using syncable::MutableEntry;
+using syncable::Entry;
+using syncable::Name;
+using syncable::SyncName;
+using syncable::DBName;
+
+using std::set;
+using std::vector;
+
+using syncable::BASE_VERSION;
+using syncable::GET_BY_ID;
+using syncable::ID;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::PARENT_ID;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SYNCER;
+using syncable::SYNCING;
+
+namespace browser_sync {
+
+void IncrementErrorCounters(SyncerStatus status) {
+ status.increment_consecutive_problem_commits();
+ status.increment_consecutive_errors();
+}
+void ResetErrorCounters(SyncerStatus status) {
+ status.zero_consecutive_problem_commits();
+ status.zero_consecutive_errors();
+}
+
+ProcessCommitResponseCommand::ProcessCommitResponseCommand() {}
+ProcessCommitResponseCommand::~ProcessCommitResponseCommand() {}
+
+void ProcessCommitResponseCommand::ModelChangingExecuteImpl(
+ SyncerSession *session) {
+ // TODO(sync): This function returns if it sees problems. We probably want
+ // to flag the need for an update or similar.
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ const ClientToServerResponse& response = session->commit_response();
+ const vector<syncable::Id>& commit_ids = session->commit_ids();
+
+ // TODO(sync): move counters out of here.
+ SyncerStatus status(session);
+
+ if (!response.has_commit()) {
+ // TODO(sync): What if we didn't try to commit anything?
+ LOG(WARNING) << "Commit response has no commit body!";
+ IncrementErrorCounters(status);
+ return;
+ }
+
+ const CommitResponse& cr = response.commit();
+ int commit_count = commit_ids.size();
+ if (cr.entryresponse_size() != commit_count) {
+ LOG(ERROR) << "Commit response has wrong number of entries! Expected:" <<
+ commit_count << " Got:" << cr.entryresponse_size();
+ for (int i = 0 ; i < cr.entryresponse_size() ; i++) {
+ LOG(ERROR) << "Response #" << i << " Value: " <<
+ cr.entryresponse(i).response_type();
+ if (cr.entryresponse(i).has_error_message())
+ LOG(ERROR) << " " << cr.entryresponse(i).error_message();
+ }
+ IncrementErrorCounters(status);
+ return;
+ }
+
+ // If we try to commit a parent and child together and the parent conflicts
+ // the child will have a bad parent causing an error. As this is not a
+ // critical error, we trap it and don't LOG(ERROR). To enable this we keep
+ // a map of conflicting new folders.
+ int transient_error_commits = 0;
+ int conflicting_commits = 0;
+ int error_commits = 0;
+ int successes = 0;
+ bool over_quota = false;
+ set<syncable::Id> conflicting_new_folder_ids;
+ set<syncable::Id> deleted_folders;
+ bool truncated_commit_logged = false;
+ { // Scope for WriteTransaction
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+ for (int i = 0; i < cr.entryresponse_size(); i++) {
+ CommitResponse::RESPONSE_TYPE response_type =
+ ProcessSingleCommitResponse(&trans, cr.entryresponse(i),
+ commit_ids[i],
+ &conflicting_new_folder_ids,
+ &deleted_folders, session);
+ switch (response_type) {
+ case CommitResponse::INVALID_MESSAGE:
+ ++error_commits;
+ break;
+ case CommitResponse::CONFLICT:
+ ++conflicting_commits;
+ session->AddCommitConflict(commit_ids[i]);
+ break;
+ case CommitResponse::SUCCESS:
+ // TODO(sync): worry about sync_rate_ rate calc?
+ ++successes;
+ status.increment_successful_commits();
+ break;
+ case CommitResponse::OVER_QUOTA:
+ over_quota = true;
+ // We handle over quota like a retry.
+ case CommitResponse::RETRY:
+ session->AddBlockedItem(commit_ids[i]);
+ break;
+ case CommitResponse::TRANSIENT_ERROR:
+ ++transient_error_commits;
+ break;
+ default:
+ LOG(FATAL) << "Bad return from ProcessSingleCommitResponse";
+ }
+ }
+ }
+
+ // TODO(sync): move status reporting elsewhere.
+ status.set_conflicting_commits(conflicting_commits);
+ status.set_error_commits(error_commits);
+ if (0 == successes) {
+ status.increment_consecutive_transient_error_commits_by(
+ transient_error_commits);
+ status.increment_consecutive_errors_by(transient_error_commits);
+ } else {
+ status.zero_consecutive_transient_error_commits();
+ status.zero_consecutive_errors();
+ }
+ // If all commits are errors count it as an error.
+ if (commit_count == error_commits) {
+ // A larger error step than normal because a POST just succeeded.
+ status.TallyBigNewError();
+ }
+ if (commit_count != (conflicting_commits + error_commits +
+ transient_error_commits)) {
+ ResetErrorCounters(status);
+ }
+ SyncerUtil::MarkDeletedChildrenSynced(dir, &deleted_folders);
+ session->set_over_quota(over_quota);
+
+ return;
+}
+
+void LogServerError(const CommitResponse_EntryResponse& res) {
+ if (res.has_error_message())
+ LOG(ERROR) << " " << res.error_message();
+ else
+ LOG(ERROR) << " No detailed error message returned from server";
+}
+
+CommitResponse::RESPONSE_TYPE
+ProcessCommitResponseCommand::ProcessSingleCommitResponse(
+ syncable::WriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
+ const syncable::Id& pre_commit_id,
+ std::set<syncable::Id>* conflicting_new_folder_ids,
+ set<syncable::Id>* deleted_folders,
+ SyncerSession* const session) {
+
+ const CommitResponse_EntryResponse& server_entry =
+ *static_cast<const CommitResponse_EntryResponse*>(&pb_server_entry);
+ MutableEntry local_entry(trans, GET_BY_ID, pre_commit_id);
+ CHECK(local_entry.good());
+ bool syncing_was_set = local_entry.Get(SYNCING);
+ local_entry.Put(SYNCING, false);
+
+ CommitResponse::RESPONSE_TYPE response = (CommitResponse::RESPONSE_TYPE)
+ server_entry.response_type();
+ if (!CommitResponse::RESPONSE_TYPE_IsValid(response)) {
+ LOG(ERROR) << "Commit response has unknown response type! Possibly out "
+ "of date client?";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+ if (CommitResponse::TRANSIENT_ERROR == response) {
+ LOG(INFO) << "Transient Error Committing: " << local_entry;
+ LogServerError(server_entry);
+ return CommitResponse::TRANSIENT_ERROR;
+ }
+ if (CommitResponse::INVALID_MESSAGE == response) {
+ LOG(ERROR) << "Error Commiting: " << local_entry;
+ LogServerError(server_entry);
+ return response;
+ }
+ if (CommitResponse::CONFLICT == response) {
+ LOG(INFO) << "Conflict Committing: " << local_entry;
+ if (!pre_commit_id.ServerKnows() && local_entry.Get(IS_DIR)) {
+ conflicting_new_folder_ids->insert(pre_commit_id);
+ }
+ return response;
+ }
+ if (CommitResponse::RETRY == response) {
+ LOG(INFO) << "Retry Committing: " << local_entry;
+ return response;
+ }
+ if (CommitResponse::OVER_QUOTA == response) {
+ LOG(INFO) << "Hit Quota Committing: " << local_entry;
+ return response;
+ }
+ if (!server_entry.has_id_string()) {
+ LOG(ERROR) << "Commit response has no id";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+
+ // implied by the IsValid call above, but here for clarity.
+ DCHECK_EQ(CommitResponse::SUCCESS, response) << response;
+ // Check to see if we've been given the ID of an existing entry. If so treat
+ // it as an error response and retry later.
+ if (pre_commit_id != server_entry.id()) {
+ Entry e(trans, GET_BY_ID, server_entry.id());
+ if (e.good()) {
+ LOG(ERROR) << "Got duplicate id when commiting id: " << pre_commit_id <<
+ ". Treating as an error return";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+ }
+
+ if (server_entry.version() == 0) {
+ LOG(WARNING) << "Server returned a zero version on a commit response.";
+ }
+
+ ProcessSuccessfulCommitResponse(trans, server_entry, pre_commit_id,
+ &local_entry, syncing_was_set,
+ deleted_folders, session);
+ return response;
+}
+
+void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
+ syncable::WriteTransaction* trans,
+ const CommitResponse_EntryResponse& server_entry,
+ const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ bool syncing_was_set, set<syncable::Id>* deleted_folders,
+ SyncerSession* const session) {
+ int64 old_version = local_entry->Get(BASE_VERSION);
+ int64 new_version = server_entry.version();
+ bool bad_commit_version = false;
+ // TODO(sync): The !server_entry.has_id_string() clauses below were
+ // introduced when working with the new protocol.
+ if (!pre_commit_id.ServerKnows())
+ bad_commit_version = 0 == new_version;
+ else
+ bad_commit_version = old_version > new_version;
+ if (bad_commit_version) {
+ LOG(ERROR) << "Bad version in commit return for " << *local_entry <<
+ " new_id:" << server_entry.id() << " new_version:" <<
+ server_entry.version();
+ return;
+ }
+ if (server_entry.id() != pre_commit_id) {
+ if (pre_commit_id.ServerKnows()) {
+ // TODO(sync): In future it's possible that we'll want the opportunity
+ // to do a server triggered move aside here.
+ LOG(ERROR) << " ID change but not committing a new entry. " <<
+ pre_commit_id << " became " << server_entry.id() << ".";
+ return;
+ }
+ if (!server_entry.id().ServerKnows()) {
+ LOG(ERROR) << " New entries id < 0." << pre_commit_id << " became " <<
+ server_entry.id() << ".";
+ return;
+ }
+ MutableEntry same_id(trans, GET_BY_ID, server_entry.id());
+ // We should trap this before this function.
+ CHECK(!same_id.good()) << "ID clash with id " << server_entry.id() <<
+ " during commit " << same_id;
+ SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ trans, local_entry, server_entry.id());
+ LOG(INFO) << "Changing ID to " << server_entry.id();
+ }
+
+ local_entry->Put(BASE_VERSION, new_version);
+ LOG(INFO) << "Commit is changing base version of " <<
+ local_entry->Get(ID) << " to: " << new_version;
+
+ if (local_entry->Get(IS_UNAPPLIED_UPDATE)) {
+ // This is possible, but very unlikely.
+ local_entry->Put(IS_UNAPPLIED_UPDATE, false);
+ }
+
+ if (server_entry.has_name()) {
+ if (syncing_was_set) {
+ PerformCommitTimeNameAside(trans, server_entry, local_entry);
+ } else {
+ // IS_UNSYNCED will ensure that this entry gets committed again,
+ // even if we skip this name aside. IS_UNSYNCED was probably previously
+ // set, but let's just set it anyway.
+ local_entry->Put(IS_UNSYNCED, true);
+ LOG(INFO) << "Skipping commit time name aside because" <<
+ " entry was changed during commit.";
+ }
+ }
+
+ if (syncing_was_set && server_entry.has_position_in_parent()) {
+ // The server has the final say on positioning, so apply the absolute
+ // position that it returns.
+ local_entry->Put(SERVER_POSITION_IN_PARENT,
+ server_entry.position_in_parent());
+
+ // We just committed successfully, so we assume that the position
+ // value we got applies to the PARENT_ID we submitted.
+ syncable::Id new_prev = SyncerUtil::ComputePrevIdFromServerPosition(
+ trans, local_entry, local_entry->Get(PARENT_ID));
+ if (!local_entry->PutPredecessor(new_prev)) {
+ LOG(WARNING) << "PutPredecessor failed after successful commit";
+ }
+ }
+
+ if (syncing_was_set) {
+ local_entry->Put(IS_UNSYNCED, false);
+ }
+ if (local_entry->Get(IS_DIR) && local_entry->Get(IS_DEL)) {
+ deleted_folders->insert(local_entry->Get(ID));
+ }
+}
+
+void ProcessCommitResponseCommand::PerformCommitTimeNameAside(
+ syncable::WriteTransaction* trans,
+ const CommitResponse_EntryResponse& server_entry,
+ syncable::MutableEntry* local_entry) {
+ Name old_name(local_entry->GetName());
+
+ // Ensure that we don't collide with an existing entry.
+ SyncName server_name =
+ SyncerProtoUtil::NameFromCommitEntryResponse(server_entry);
+
+ LOG(INFO) << "Server provided committed name:" << server_name.value();
+ if (!server_name.value().empty() &&
+ static_cast<SyncName&>(old_name) != server_name) {
+ LOG(INFO) << "Server name differs from local name, attempting"
+ << " commit time name aside.";
+
+ DBName db_name(server_name.value());
+ db_name.MakeOSLegal();
+
+ // This is going to produce ~1 names instead of (Edited) names.
+ // Since this should be EXTREMELY rare, we do this for now.
+ db_name.MakeNoncollidingForEntry(trans, local_entry->Get(SERVER_PARENT_ID),
+ local_entry);
+
+ CHECK(!db_name.empty());
+
+ LOG(INFO) << "Server commit moved aside entry: " << old_name.db_value()
+ << " to new name " << db_name;
+
+ // Should be safe since we're in a "commit lock."
+ local_entry->PutName(Name::FromDBNameAndSyncName(db_name, server_name));
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/process_commit_response_command.h b/chrome/browser/sync/engine/process_commit_response_command.h
new file mode 100644
index 0000000..a025428
--- /dev/null
+++ b/chrome/browser/sync/engine/process_commit_response_command.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
+
+#include <set>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+
+namespace syncable {
+class Id;
+class WriteTransaction;
+class MutableEntry;
+}
+
+namespace browser_sync {
+
+class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
+ public:
+
+ ProcessCommitResponseCommand();
+ virtual ~ProcessCommitResponseCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession* session);
+ private:
+ CommitResponse::RESPONSE_TYPE ProcessSingleCommitResponse(
+ syncable::WriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
+ const syncable::Id& pre_commit_id, std::set<syncable::Id>*
+ conflicting_new_directory_ids,
+ std::set<syncable::Id>* deleted_folders,
+ SyncerSession* const session);
+
+ void ProcessSuccessfulCommitResponse(syncable::WriteTransaction* trans,
+ const CommitResponse_EntryResponse& server_entry,
+ const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ bool syncing_was_set, std::set<syncable::Id>* deleted_folders,
+ SyncerSession* const session);
+
+ void PerformCommitTimeNameAside(
+ syncable::WriteTransaction* trans,
+ const CommitResponse_EntryResponse& server_entry,
+ syncable::MutableEntry* local_entry);
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
diff --git a/chrome/browser/sync/engine/process_updates_command.cc b/chrome/browser/sync/engine/process_updates_command.cc
new file mode 100644
index 0000000..6d5973c
--- /dev/null
+++ b/chrome/browser/sync/engine/process_updates_command.cc
@@ -0,0 +1,167 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/process_updates_command.h"
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+ProcessUpdatesCommand::ProcessUpdatesCommand() {}
+ProcessUpdatesCommand::~ProcessUpdatesCommand() {}
+
+void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncerSession* session) {
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ SyncerStatus status(session);
+
+ const GetUpdatesResponse updates = session->update_response().get_updates();
+ const int update_count = updates.entries_size();
+
+ LOG(INFO) << "Get updates from ts " << dir->last_sync_timestamp() <<
+ " returned " << update_count << " updates.";
+
+ if (updates.has_newest_timestamp()) {
+ int64 newest_timestamp = updates.newest_timestamp();
+ LOG(INFO) << "Newest Timestamp:" << newest_timestamp;
+ status.set_servers_latest_timestamp(newest_timestamp);
+ }
+
+ int64 new_timestamp = 0;
+ if (updates.has_new_timestamp()) {
+ new_timestamp = updates.new_timestamp();
+ LOG(INFO) << "Get Updates got new timestamp: " << new_timestamp;
+ if (0 == update_count) {
+ if (new_timestamp > dir->last_sync_timestamp()) {
+ dir->set_last_sync_timestamp(new_timestamp);
+ session->set_timestamp_dirty();
+ }
+ return;
+ }
+ }
+
+ if (0 == status.servers_latest_timestamp()) {
+ // Hack since new server never gives us the server's latest
+ // timestamp. But if a getupdates returns zero, then we know we
+ // are up to date.
+ status.set_servers_latest_timestamp(status.current_sync_timestamp());
+ }
+ // If we have updates that are ALL supposed to be skipped, we don't want
+ // to get them again. In fact, the account's final updates are all
+ // supposed to be skipped and we DON'T step past them, we will sync forever
+ int64 latest_skip_timestamp = 0;
+ bool any_non_skip_results = false;
+ vector<VerifiedUpdate>::iterator it;
+ for (it = session->VerifiedUpdatesBegin();
+ it < session->VerifiedUpdatesEnd();
+ ++it) {
+ const sync_pb::SyncEntity update = it->second;
+
+ any_non_skip_results = (it->first != VERIFY_SKIP);
+ if (!any_non_skip_results) {
+ // ALL updates were to be skipped, including this one
+ if (update.sync_timestamp() > latest_skip_timestamp) {
+ latest_skip_timestamp = update.sync_timestamp();
+ }
+ } else {
+ latest_skip_timestamp = 0;
+ }
+
+ if (it->first != VERIFY_SUCCESS && it->first != VERIFY_UNDELETE)
+ continue;
+ switch (ProcessUpdate(dir, update)) {
+ case SUCCESS_PROCESSED:
+ case SUCCESS_STORED:
+ // We can update the timestamp because we store the update
+ // even if we can't apply it now.
+ if (update.sync_timestamp() > new_timestamp)
+ new_timestamp = update.sync_timestamp();
+ break;
+ }
+
+ }
+
+ if (latest_skip_timestamp > new_timestamp)
+ new_timestamp = latest_skip_timestamp;
+
+ if (new_timestamp > dir->last_sync_timestamp()) {
+ dir->set_last_sync_timestamp(new_timestamp);
+ session->set_timestamp_dirty();
+ }
+
+ status.zero_consecutive_problem_get_updates();
+ status.zero_consecutive_errors();
+ status.set_current_sync_timestamp(dir->last_sync_timestamp());
+ status.set_syncing(true);
+ return;
+}
+
+namespace {
+// returns true if the entry is still ok to process
+bool ReverifyEntry(syncable::WriteTransaction* trans, const SyncEntity& entry,
+ syncable::MutableEntry* same_id) {
+
+ const bool deleted = entry.has_deleted() && entry.deleted();
+ const bool is_directory = entry.IsFolder();
+ const bool is_bookmark = entry.has_bookmarkdata();
+
+ return VERIFY_SUCCESS ==
+ SyncerUtil::VerifyUpdateConsistency(trans,
+ entry,
+ same_id,
+ deleted,
+ is_directory,
+ is_bookmark);
+}
+} // anonymous namespace
+
+// TODO(sync): Refactor this code.
+// Process a single update. Will avoid touching global state.
+ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
+ const syncable::ScopedDirLookup& dir, const sync_pb::SyncEntity& pb_entry) {
+
+ const SyncEntity& entry = *static_cast<const SyncEntity*>(&pb_entry);
+ using namespace syncable;
+ syncable::Id id = entry.id();
+ SyncName name = SyncerProtoUtil::NameFromSyncEntity(entry);
+
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+
+ SyncerUtil::CreateNewEntry(&trans, id);
+
+ // We take a two step approach. First we store the entries data in the
+ // server fields of a local entry and then move the data to the local fields
+ MutableEntry update_entry(&trans, GET_BY_ID, id);
+ // TODO(sync): do we need to run ALL these checks, or is a mere version
+ // check good enough?
+ if (!ReverifyEntry(&trans, entry, &update_entry)) {
+ return SUCCESS_PROCESSED; // the entry has become irrelevant
+ }
+
+ SyncerUtil::UpdateServerFieldsFromUpdate(&update_entry, entry, name);
+
+ if (update_entry.Get(SERVER_VERSION) == update_entry.Get(BASE_VERSION) &&
+ !update_entry.Get(IS_UNSYNCED)) {
+ CHECK(SyncerUtil::ServerAndLocalEntriesMatch(
+ &update_entry)) << update_entry;
+ }
+ return SUCCESS_PROCESSED;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/process_updates_command.h b/chrome/browser/sync/engine/process_updates_command.h
new file mode 100644
index 0000000..a6cee34
--- /dev/null
+++ b/chrome/browser/sync/engine/process_updates_command.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
+
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+
+namespace syncable {
+class ScopedDirLookup;
+}
+
+namespace sync_pb {
+class SyncEntity;
+}
+
+namespace browser_sync {
+
+class SyncerSession;
+
+// A syncer command for processing updates.
+//
+// Preconditions - updates in the SyncerSesssion have been downloaded
+// and verified.
+//
+// Postconditions - All of the verified SyncEntity data will be copied to
+// the server fields of the corresponding syncable entries.
+class ProcessUpdatesCommand : public ModelChangingSyncerCommand {
+ public:
+ ProcessUpdatesCommand();
+ virtual ~ProcessUpdatesCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession* session);
+ ServerUpdateProcessingResult ProcessUpdate(
+ const syncable::ScopedDirLookup& dir,
+ const sync_pb::SyncEntity& pb_entry);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
diff --git a/chrome/browser/sync/engine/resolve_conflicts_command.cc b/chrome/browser/sync/engine/resolve_conflicts_command.cc
new file mode 100644
index 0000000..6caf9b4
--- /dev/null
+++ b/chrome/browser/sync/engine/resolve_conflicts_command.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/resolve_conflicts_command.h"
+
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+
+namespace browser_sync {
+
+ResolveConflictsCommand::ResolveConflictsCommand() {}
+ResolveConflictsCommand::~ResolveConflictsCommand() {}
+
+void ResolveConflictsCommand::ModelChangingExecuteImpl(
+ SyncerSession* session) {
+ if (!session->resolver())
+ return;
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good())
+ return;
+ ConflictResolutionView conflict_view(session);
+ session->set_conflicts_resolved(
+ session->resolver()->ResolveConflicts(dir, &conflict_view, session));
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/resolve_conflicts_command.h b/chrome/browser/sync/engine/resolve_conflicts_command.h
new file mode 100644
index 0000000..a75c631
--- /dev/null
+++ b/chrome/browser/sync/engine/resolve_conflicts_command.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
+
+
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+#include "base/basictypes.h"
+
+namespace syncable {
+class WriteTransaction;
+class MutableEntry;
+class Id;
+}
+namespace browser_sync {
+
+class SyncerSession;
+
+class ResolveConflictsCommand : public ModelChangingSyncerCommand {
+ public:
+ ResolveConflictsCommand();
+ virtual ~ResolveConflictsCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession* session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ResolveConflictsCommand);
+};
+} // namespace browser_sync
+
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
diff --git a/chrome/browser/sync/engine/sync_cycle_state.h b/chrome/browser/sync/engine/sync_cycle_state.h
new file mode 100644
index 0000000..7d38670c
--- /dev/null
+++ b/chrome/browser/sync/engine/sync_cycle_state.h
@@ -0,0 +1,253 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// The sync process consists of a sequence of sync cycles, each of which
+// (hopefully) moves the client into closer synchronization with the server.
+// This class holds state that is pertinent to a single sync cycle.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNC_CYCLE_STATE_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNC_CYCLE_STATE_H_
+
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+
+namespace syncable {
+class WriteTransaction;
+class Id;
+} // namespace syncable
+
+namespace browser_sync {
+
+typedef std::pair<VerifyResult, sync_pb::SyncEntity> VerifiedUpdate;
+typedef std::pair<UpdateAttemptResponse, syncable::Id> AppliedUpdate;
+
+// This is the type declaration for the eventsys channel that the syncer
+// uses to send events to other system components.
+struct SyncerEvent;
+
+// SyncCycleState holds the entire state of a single sync cycle;
+// GetUpdates, Commit, and Conflict Resolution. After said cycle, the
+// State may contain items that were unable to be processed because of
+// errors.
+class SyncCycleState {
+ public:
+ SyncCycleState()
+ : write_transaction_(NULL),
+ conflict_sets_built_(false),
+ conflicts_resolved_(false),
+ items_committed_(false),
+ over_quota_(false),
+ dirty_(true),
+ timestamp_dirty_(false) {}
+
+ void set_update_response(const ClientToServerResponse& update_response) {
+ update_response_.CopyFrom(update_response);
+ }
+
+ const ClientToServerResponse& update_response() const {
+ return update_response_;
+ }
+
+ void set_commit_response(const ClientToServerResponse& commit_response) {
+ commit_response_.CopyFrom(commit_response);
+ }
+
+ const ClientToServerResponse& commit_response() const {
+ return commit_response_;
+ }
+
+ void AddVerifyResult(const VerifyResult& verify_result,
+ const sync_pb::SyncEntity& entity) {
+ verified_updates_.push_back(std::make_pair(verify_result, entity));
+ }
+
+ bool HasVerifiedUpdates() const {
+ return !verified_updates_.empty();
+ }
+
+ // Log a successful or failing update attempt.
+ void AddAppliedUpdate(const UpdateAttemptResponse& response,
+ const syncable::Id& id) {
+ applied_updates_.push_back(std::make_pair(response, id));
+ }
+
+ bool HasAppliedUpdates() const {
+ return !applied_updates_.empty();
+ }
+
+ std::vector<AppliedUpdate>::iterator AppliedUpdatesBegin() {
+ return applied_updates_.begin();
+ }
+
+ std::vector<VerifiedUpdate>::iterator VerifiedUpdatesBegin() {
+ return verified_updates_.begin();
+ }
+
+ std::vector<AppliedUpdate>::iterator AppliedUpdatesEnd() {
+ return applied_updates_.end();
+ }
+
+ std::vector<VerifiedUpdate>::iterator VerifiedUpdatesEnd() {
+ return verified_updates_.end();
+ }
+
+ // Returns the number of update application attempts. This includes
+ // both failures and successes.
+ int AppliedUpdatesSize() const {
+ return applied_updates_.size();
+ }
+
+ // Count the number of successful update applications that have happend
+ // this cycle. Note that if an item is successfully applied twice,
+ // it will be double counted here.
+ int SuccessfullyAppliedUpdateCount() const {
+ int count = 0;
+ for (std::vector<AppliedUpdate>::const_iterator it =
+ applied_updates_.begin();
+ it != applied_updates_.end();
+ ++it) {
+ if (it->first == SUCCESS)
+ count++;
+ }
+ return count;
+ }
+
+ int VerifiedUpdatesSize() const {
+ return verified_updates_.size();
+ }
+
+ const std::vector<int64>& unsynced_handles() const {
+ return unsynced_handles_;
+ }
+
+ void set_unsynced_handles(const std::vector<int64>& unsynced_handles) {
+ UpdateDirty(unsynced_handles != unsynced_handles_);
+ unsynced_handles_ = unsynced_handles;
+ }
+
+ int64 unsynced_count() const { return unsynced_handles_.size(); }
+
+ const std::vector<syncable::Id>& commit_ids() const { return commit_ids_; }
+
+ void set_commit_ids(const std::vector<syncable::Id>& commit_ids) {
+ commit_ids_ = commit_ids;
+ }
+
+ bool commit_ids_empty() const { return commit_ids_.empty(); }
+
+ // The write transaction must be deleted by the caller of this function.
+ void set_write_transaction(syncable::WriteTransaction* write_transaction) {
+ DCHECK(!write_transaction_) << "Forgot to clear the write transaction.";
+ write_transaction_ = write_transaction;
+ }
+
+ syncable::WriteTransaction* write_transaction() const {
+ return write_transaction_;
+ }
+
+ bool has_open_write_transaction() { return write_transaction_ != NULL; }
+
+ // sets the write transaction to null, but doesn't free the memory.
+ void ClearWriteTransaction() { write_transaction_ = NULL; }
+
+ ClientToServerMessage* commit_message() { return &commit_message_; }
+
+ void set_commit_message(ClientToServerMessage message) {
+ commit_message_ = message;
+ }
+
+ void set_conflict_sets_built(bool b) {
+ conflict_sets_built_ = b;
+ }
+
+ bool conflict_sets_built() const {
+ return conflict_sets_built_;
+ }
+
+ void set_conflicts_resolved(bool b) {
+ conflicts_resolved_ = b;
+ }
+
+ bool conflicts_resolved() const {
+ return conflicts_resolved_;
+ }
+
+ void set_over_quota(bool b) {
+ UpdateDirty(b != over_quota_);
+ over_quota_ = b;
+ }
+
+ bool over_quota() const {
+ return over_quota_;
+ }
+
+ void set_items_committed(bool b) { items_committed_ = b; }
+
+ void set_item_committed() { items_committed_ |= true; }
+
+ bool items_committed() const { return items_committed_; }
+
+
+ // Returns true if this object has been modified since last SetClean() call
+ bool IsDirty() const { return dirty_; }
+
+ // Call to tell this status object that its new state has been seen
+ void SetClean() { dirty_ = false; }
+
+ // Indicate that we've made a change to directory timestamp.
+ void set_timestamp_dirty() {
+ timestamp_dirty_ = true;
+ }
+
+ bool is_timestamp_dirty() const {
+ return timestamp_dirty_;
+ }
+
+
+ private:
+ void UpdateDirty(bool new_info) { dirty_ |= new_info; }
+
+ // download updates supplies:
+ ClientToServerResponse update_response_;
+ ClientToServerResponse commit_response_;
+ ClientToServerMessage commit_message_;
+
+ syncable::WriteTransaction* write_transaction_;
+ std::vector<int64> unsynced_handles_;
+ std::vector<syncable::Id> commit_ids_;
+
+ // At a certain point during the sync process we'll want to build the
+ // conflict sets. This variable tracks whether or not that has happened.
+ bool conflict_sets_built_;
+ bool conflicts_resolved_;
+ bool items_committed_;
+ bool over_quota_;
+
+ // If we've set the timestamp to a new value during this cycle.
+ bool timestamp_dirty_;
+
+ bool dirty_;
+
+ // some container for updates that failed verification
+ std::vector<VerifiedUpdate> verified_updates_;
+
+ // Stores the result of the various ApplyUpdate attempts we've made.
+ // May contain duplicate entries.
+ std::vector<AppliedUpdate> applied_updates_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncCycleState);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNC_CYCLE_STATE_H_
diff --git a/chrome/browser/sync/engine/sync_process_state.cc b/chrome/browser/sync/engine/sync_process_state.cc
new file mode 100644
index 0000000..6f76eee
--- /dev/null
+++ b/chrome/browser/sync/engine/sync_process_state.cc
@@ -0,0 +1,325 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#include "chrome/browser/sync/engine/sync_process_state.h"
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+
+using std::map;
+using std::set;
+using std::vector;
+
+namespace browser_sync {
+
+SyncProcessState::SyncProcessState(const SyncProcessState& counts)
+ : account_name_(counts.account_name_),
+ dirman_(counts.dirman_),
+ syncer_event_channel_(counts.syncer_event_channel_),
+ connection_manager_(counts.connection_manager_),
+ resolver_(counts.resolver_),
+ model_safe_worker_(counts.model_safe_worker_) {
+ *this = counts;
+}
+
+SyncProcessState::SyncProcessState(syncable::DirectoryManager* dirman,
+ PathString account_name,
+ ServerConnectionManager* connection_manager,
+ ConflictResolver* const resolver,
+ SyncerEventChannel* syncer_event_channel,
+ ModelSafeWorker* model_safe_worker)
+ : account_name_(account_name),
+ dirman_(dirman),
+ syncer_event_channel_(syncer_event_channel),
+ connection_manager_(connection_manager),
+ model_safe_worker_(model_safe_worker),
+ resolver_(resolver),
+ syncer_stuck_(false),
+ num_sync_cycles_(0),
+ silenced_until_(0),
+ error_rate_(0),
+ current_sync_timestamp_(0),
+ servers_latest_timestamp_(0),
+ error_commits_(0),
+ stalled_commits_(0),
+ conflicting_commits_(0),
+ consecutive_problem_get_updates_(0),
+ consecutive_problem_commits_(0),
+ consecutive_transient_error_commits_(0),
+ consecutive_errors_(0),
+ successful_commits_(0),
+ dirty_(false),
+ auth_dirty_(false),
+ auth_failed_(false),
+ invalid_store_(false) {
+ syncable::ScopedDirLookup dir(dirman_, account_name_);
+
+ // The directory must be good here.
+ LOG_IF(ERROR, !dir.good());
+ syncing_ = !dir->initial_sync_ended();
+
+ // If we have never synced then we are invalid until made otherwise.
+ set_invalid_store((dir->last_sync_timestamp() <= 0));
+}
+
+SyncProcessState& SyncProcessState::operator=(const SyncProcessState& counts) {
+ if (this == &counts) {
+ return *this;
+ }
+ CleanupSets();
+ num_sync_cycles_ = counts.num_sync_cycles_;
+ silenced_until_ = counts.silenced_until_;
+ error_rate_ = counts.error_rate_;
+ current_sync_timestamp_ = counts.current_sync_timestamp_;
+ servers_latest_timestamp_ = counts.servers_latest_timestamp_;
+ error_commits_ = counts.error_commits_;
+ stalled_commits_ = counts.stalled_commits_;
+ conflicting_commits_ = counts.conflicting_commits_;
+ consecutive_problem_get_updates_ =
+ counts.consecutive_problem_get_updates_;
+ consecutive_problem_commits_ =
+ counts.consecutive_problem_commits_;
+ consecutive_transient_error_commits_ =
+ counts.consecutive_transient_error_commits_;
+ consecutive_errors_ = counts.consecutive_errors_;
+ conflicting_item_ids_ = counts.conflicting_item_ids_;
+ blocked_item_ids_ = counts.blocked_item_ids_;
+ successful_commits_ = counts.successful_commits_;
+ syncer_stuck_ = counts.syncer_stuck_;
+
+ // TODO(chron): Is it safe to set these?
+ //
+ // Pointers:
+ //
+ // connection_manager_
+ // account_name_
+ // dirman_
+ // model_safe_worker_
+ // syncer_event_channel_
+ //
+ // Status members:
+ // syncing_
+ // invalid_store_
+ // syncer_stuck_
+ // got_zero_updates_
+ // dirty_
+ // auth_dirty_
+ // auth_failed_
+
+ for (set<ConflictSet*>::const_iterator it =
+ counts.ConflictSetsBegin();
+ counts.ConflictSetsEnd() != it; ++it) {
+ const ConflictSet* old_set = *it;
+ ConflictSet* const new_set = new ConflictSet(*old_set);
+ conflict_sets_.insert(new_set);
+
+ for (ConflictSet::const_iterator setit = new_set->begin();
+ new_set->end() != setit; ++setit) {
+ id_to_conflict_set_[*setit] = new_set;
+ }
+ }
+ return *this;
+}
+
+// status maintenance functions
+void SyncProcessState::set_invalid_store(const bool val) {
+ UpdateDirty(val != invalid_store_);
+ invalid_store_ = val;
+}
+
+void SyncProcessState::set_syncer_stuck(const bool val) {
+ UpdateDirty(val != syncer_stuck_);
+ syncer_stuck_ = val;
+}
+
+void SyncProcessState::set_syncing(const bool val) {
+ UpdateDirty(val != syncing_);
+ syncing_ = val;
+}
+
+// Returns true if got zero updates has been set on the directory.
+bool SyncProcessState::IsShareUsable() const {
+ syncable::ScopedDirLookup dir(dirman(), account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return false;
+ }
+ return dir->initial_sync_ended();
+}
+
+void SyncProcessState::set_current_sync_timestamp(const int64 val) {
+ UpdateDirty(val != current_sync_timestamp_);
+ current_sync_timestamp_ = val;
+}
+
+void SyncProcessState::set_servers_latest_timestamp(const int64 val) {
+ UpdateDirty(val != servers_latest_timestamp_);
+ servers_latest_timestamp_ = val;
+}
+
+void SyncProcessState::set_error_commits(const int val) {
+ UpdateDirty(val != error_commits_);
+ error_commits_ = val;
+}
+
+void SyncProcessState::set_stalled_commits(const int val) {
+ UpdateDirty(val != conflicting_commits_);
+ conflicting_commits_ = val;
+}
+
+void SyncProcessState::set_conflicting_commits(const int val) {
+ UpdateDirty(val != stalled_commits_);
+ stalled_commits_ = val;
+}
+
+// WEIRD COUNTER functions
+void SyncProcessState::increment_consecutive_problem_get_updates() {
+ UpdateDirty(true);
+ ++consecutive_problem_get_updates_;
+}
+
+void SyncProcessState::zero_consecutive_problem_get_updates() {
+ UpdateDirty(0 != consecutive_problem_get_updates_);
+ consecutive_problem_get_updates_ = 0;
+}
+
+void SyncProcessState::increment_consecutive_problem_commits() {
+ UpdateDirty(true);
+ ++consecutive_problem_commits_;
+}
+
+void SyncProcessState::zero_consecutive_problem_commits() {
+ UpdateDirty(0 != consecutive_problem_commits_);
+ consecutive_problem_commits_ = 0;
+}
+
+void SyncProcessState::increment_consecutive_transient_error_commits_by(
+ int value) {
+ UpdateDirty(0 != value);
+ consecutive_transient_error_commits_ += value;
+}
+
+void SyncProcessState::zero_consecutive_transient_error_commits() {
+ UpdateDirty(0 != consecutive_transient_error_commits_);
+ consecutive_transient_error_commits_ = 0;
+}
+
+void SyncProcessState::increment_consecutive_errors_by(int value) {
+ UpdateDirty(0 != value);
+ consecutive_errors_ += value;
+}
+
+void SyncProcessState::zero_consecutive_errors() {
+ UpdateDirty(0 != consecutive_errors_);
+ consecutive_errors_ = 0;
+}
+
+void SyncProcessState::increment_successful_commits() {
+ UpdateDirty(true);
+ ++successful_commits_;
+}
+
+void SyncProcessState::zero_successful_commits() {
+ UpdateDirty(0 != successful_commits_);
+ successful_commits_ = 0;
+}
+
+// Methods for managing error rate tracking
+void SyncProcessState::TallyNewError() {
+ UpdateDirty(true);
+ error_rate_ += (65536 - error_rate_) >> 2;
+}
+
+void SyncProcessState::TallyBigNewError() {
+ UpdateDirty(true);
+ error_rate_ += (65536 - error_rate_) >> 2;
+}
+
+void SyncProcessState::ForgetOldError() {
+ error_rate_ -= error_rate_ >> 2;
+}
+
+void SyncProcessState::CheckErrorRateTooHigh() {
+ UpdateDirty(error_rate_ > ERROR_THRESHOLD);
+}
+
+
+void SyncProcessState::MergeSets(const syncable::Id& id1,
+ const syncable::Id& id2) {
+ // There are no single item sets, we just leave those entries == 0
+ vector<syncable::Id>* set1 = id_to_conflict_set_[id1];
+ vector<syncable::Id>* set2 = id_to_conflict_set_[id2];
+ vector<syncable::Id>* rv = 0;
+ if (0 == set1 && 0 == set2) {
+ // neither item currently has a set so we build one.
+ rv = new vector<syncable::Id>();
+ rv->push_back(id1);
+ if (id1 != id2) {
+ rv->push_back(id2);
+ } else {
+ LOG(WARNING) << "[BUG] Attempting to merge two identical conflict ids.";
+ }
+ conflict_sets_.insert(rv);
+ } else if (0 == set1) {
+ // add the item to the existing set.
+ rv = set2;
+ rv->push_back(id1);
+ } else if (0 == set2) {
+ // add the item to the existing set.
+ rv = set1;
+ rv->push_back(id2);
+ } else if (set1 == set2) {
+ // It's the same set already
+ return;
+ } else {
+ // merge the two sets.
+ rv = set1;
+ // point all the second sets id's back to the first.
+ vector<syncable::Id>::iterator i;
+ for (i = set2->begin() ; i != set2->end() ; ++i) {
+ id_to_conflict_set_[*i] = rv;
+ }
+ // copy the second set to the first.
+ rv->insert(rv->end(), set2->begin(), set2->end());
+ conflict_sets_.erase(set2);
+ delete set2;
+ }
+ id_to_conflict_set_[id1] = id_to_conflict_set_[id2] = rv;
+}
+
+void SyncProcessState::CleanupSets() {
+ // Clean up all the sets.
+ set<ConflictSet*>::iterator i;
+ for (i = conflict_sets_.begin(); i != conflict_sets_.end(); i++) {
+ delete *i;
+ }
+ conflict_sets_.clear();
+ id_to_conflict_set_.clear();
+}
+
+SyncProcessState::~SyncProcessState() {
+ CleanupSets();
+}
+
+void SyncProcessState::AuthFailed() {
+ // dirty if the last one DIDN'T fail.
+ UpdateAuthDirty(true != auth_failed_);
+ auth_failed_ = true;
+}
+
+void SyncProcessState::AuthSucceeded() {
+ // dirty if the last one DID fail.
+ UpdateAuthDirty(false != auth_failed_);
+ auth_failed_ = false;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/sync_process_state.h b/chrome/browser/sync/engine/sync_process_state.h
new file mode 100644
index 0000000..32c6808
--- /dev/null
+++ b/chrome/browser/sync/engine/sync_process_state.h
@@ -0,0 +1,384 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// The sync process consists of a sequence of sync cycles, each of which
+// (hopefully) moves the client into closer synchronization with the server.
+// While SyncCycleState holds state that is pertinent to a single sync cycle,
+// this data structure holds state that must be passed from cycle to cycle.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNC_PROCESS_STATE_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNC_PROCESS_STATE_H_
+
+#include <map>
+#include <set>
+#include <utility> // for pair<>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/port.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace browser_sync {
+
+class ConflictResolver;
+class ModelSafeWorker;
+
+class SyncProcessState {
+ FRIEND_TEST(SyncerSyncProcessState, MergeSetsTest);
+ FRIEND_TEST(SyncerTest, CopySyncProcessState);
+ public:
+ ~SyncProcessState();
+ SyncProcessState(
+ syncable::DirectoryManager* dirman,
+ PathString account_name,
+ ServerConnectionManager* connection_manager,
+ ConflictResolver* const resolver,
+ SyncerEventChannel* syncer_event_channel,
+ ModelSafeWorker* model_safe_worker);
+
+ // intentionally not 'explicit' b/c it's a copy ctor:
+ SyncProcessState(const SyncProcessState& counts);
+ SyncProcessState& operator=(const SyncProcessState& that);
+
+ PathString account_name() const { return account_name_; }
+
+ syncable::DirectoryManager* dirman() const { return dirman_; }
+
+ ServerConnectionManager* connection_manager()
+ const {
+ return connection_manager_;
+ }
+
+ ConflictResolver* resolver() const { return resolver_; }
+
+ ModelSafeWorker* model_safe_worker() { return model_safe_worker_; }
+
+ SyncerEventChannel* syncer_event_channel() const {
+ return syncer_event_channel_;
+ }
+
+ // Functions that deal with conflict set stuff
+ IdToConflictSetMap::const_iterator IdToConflictSetFind(
+ const syncable::Id& the_id) const {
+ return id_to_conflict_set_.find(the_id);
+ }
+
+ IdToConflictSetMap::const_iterator IdToConflictSetBegin() const {
+ return id_to_conflict_set_.begin();
+ }
+
+ IdToConflictSetMap::const_iterator IdToConflictSetEnd() const {
+ return id_to_conflict_set_.end();
+ }
+
+ IdToConflictSetMap::size_type IdToConflictSetSize() const {
+ return id_to_conflict_set_.size();
+ }
+
+ const ConflictSet* IdToConflictSetGet(const syncable::Id& the_id) {
+ return id_to_conflict_set_[the_id];
+ }
+
+ std::set<ConflictSet*>::const_iterator ConflictSetsBegin() const {
+ return conflict_sets_.begin();
+ }
+
+ std::set<ConflictSet*>::const_iterator ConflictSetsEnd() const {
+ return conflict_sets_.end();
+ }
+
+ std::set<ConflictSet*>::size_type ConflictSetsSize() const {
+ return conflict_sets_.size();
+ }
+
+ void MergeSets(const syncable::Id& set1, const syncable::Id& set2);
+
+ void CleanupSets();
+ // END conflict set functions
+
+ // item id set manipulation functions
+ bool HasConflictingItems() const {
+ return !conflicting_item_ids_.empty();
+ }
+
+ bool HasBlockedItems() const {
+ return !blocked_item_ids_.empty();
+ }
+
+ int ConflictingItemsSize() const {
+ return conflicting_item_ids_.size();
+ }
+
+ int BlockedItemsSize() const {
+ return blocked_item_ids_.size();
+ }
+
+ void AddConflictingItem(const syncable::Id& the_id) {
+ std::pair<std::set<syncable::Id>::iterator, bool> ret =
+ conflicting_item_ids_.insert(the_id);
+ UpdateDirty(ret.second);
+ }
+
+ void AddBlockedItem(const syncable::Id& the_id) {
+ std::pair<std::set<syncable::Id>::iterator, bool> ret =
+ blocked_item_ids_.insert(the_id);
+ UpdateDirty(ret.second);
+ }
+
+ void EraseConflictingItem(std::set<syncable::Id>::iterator it) {
+ UpdateDirty(true);
+ conflicting_item_ids_.erase(it);
+ }
+
+ void EraseBlockedItem(std::set<syncable::Id>::iterator it) {
+ UpdateDirty(true);
+ blocked_item_ids_.erase(it);
+ }
+
+ void EraseConflictingItem(const syncable::Id& the_id) {
+ int items_erased = conflicting_item_ids_.erase(the_id);
+ UpdateDirty(0 != items_erased);
+ }
+
+ void EraseBlockedItem(const syncable::Id& the_id) {
+ int items_erased = blocked_item_ids_.erase(the_id);
+ UpdateDirty(0 != items_erased);
+ }
+
+ std::set<syncable::Id>::iterator ConflictingItemsBegin() {
+ return conflicting_item_ids_.begin();
+ }
+
+ std::set<syncable::Id>::iterator BlockedItemsBegin() {
+ return blocked_item_ids_.begin();
+ }
+
+ std::set<syncable::Id>::iterator ConflictingItemsEnd() {
+ return conflicting_item_ids_.end();
+ }
+
+ std::set<syncable::Id>::iterator BlockedItemsEnd() {
+ return blocked_item_ids_.end();
+ }
+
+ void SetConflictingItems(const std::set<syncable::Id>& s) {
+ UpdateDirty(true);
+ conflicting_item_ids_ = s;
+ }
+
+ void SetBlockedItems(const std::set<syncable::Id>& s) {
+ UpdateDirty(true);
+ blocked_item_ids_ = s;
+ }
+ // END item id set manipulation functions
+
+ // Assorted other state info
+ int conflicting_updates() const { return conflicting_item_ids_.size(); }
+
+ int num_sync_cycles_;
+
+ // When we're over bandwidth quota, we don't update until past this time.
+ time_t silenced_until_;
+
+ // Info that is tracked purely for status reporting
+
+ // During inital sync these two members can be used to measure sync progress.
+ int64 current_sync_timestamp() const { return current_sync_timestamp_; }
+
+ int64 servers_latest_timestamp() const { return servers_latest_timestamp_; }
+
+ void set_current_sync_timestamp(const int64 val);
+
+ void set_servers_latest_timestamp(const int64 val);
+
+ bool invalid_store() const { return invalid_store_; }
+
+ void set_invalid_store(const bool val);
+
+ bool syncer_stuck() const { return syncer_stuck_; }
+
+ void set_syncer_stuck(const bool val);
+
+ bool syncing() const { return syncing_; }
+
+ void set_syncing(const bool val);
+
+ bool IsShareUsable() const;
+
+ int error_commits() const { return error_commits_; }
+
+ void set_error_commits(const int val);
+
+ int conflicting_commits() const { return conflicting_commits_; }
+
+ void set_conflicting_commits(const int val);
+
+ int stalled_commits() const { return stalled_commits_; }
+
+ void set_stalled_commits(const int val);
+
+ // WEIRD COUNTER manipulation functions
+ int consecutive_problem_get_updates() const {
+ return consecutive_problem_get_updates_;
+ }
+
+ void increment_consecutive_problem_get_updates();
+
+ void zero_consecutive_problem_get_updates();
+
+ int consecutive_problem_commits() const {
+ return consecutive_problem_commits_;
+ }
+
+ void increment_consecutive_problem_commits();
+
+ void zero_consecutive_problem_commits();
+
+ int consecutive_transient_error_commits() const {
+ return consecutive_transient_error_commits_;
+ }
+
+ void increment_consecutive_transient_error_commits_by(int value);
+
+ void zero_consecutive_transient_error_commits();
+
+ int consecutive_errors() const { return consecutive_errors_; }
+
+ void increment_consecutive_errors_by(int value);
+
+ void zero_consecutive_errors();
+
+ int successful_commits() const { return successful_commits_; }
+
+ void increment_successful_commits();
+
+ void zero_successful_commits();
+ // end WEIRD COUNTER manipulation functions
+
+ // Methods for managing error rate tracking
+ void TallyNewError();
+
+ void TallyBigNewError();
+
+ void ForgetOldError();
+
+ void CheckErrorRateTooHigh();
+
+ // Methods for tracking authentication state
+ void AuthFailed();
+ void AuthSucceeded();
+
+ // Returns true if this object has been modified since last SetClean() call
+ bool IsDirty() const { return dirty_; }
+
+ // Call to tell this status object that its new state has been seen
+ void SetClean() { dirty_ = false; }
+
+ // Returns true if auth status has been modified since last SetClean() call
+ bool IsAuthDirty() const { return auth_dirty_; }
+
+ // Call to tell this status object that its auth state has been seen
+ void SetAuthClean() { auth_dirty_ = false; }
+
+ private:
+ // for testing
+ SyncProcessState()
+ : account_name_(PSTR("")),
+ dirman_(NULL),
+ syncer_event_channel_(NULL),
+ connection_manager_(NULL),
+ model_safe_worker_(NULL),
+ resolver_(NULL),
+ syncer_stuck_(false),
+ num_sync_cycles_(0),
+ silenced_until_(0),
+ error_rate_(0),
+ current_sync_timestamp_(0),
+ servers_latest_timestamp_(0),
+ error_commits_(0),
+ stalled_commits_(0),
+ conflicting_commits_(0),
+ consecutive_problem_get_updates_(0),
+ consecutive_problem_commits_(0),
+ consecutive_transient_error_commits_(0),
+ consecutive_errors_(0),
+ successful_commits_(0),
+ dirty_(false),
+ auth_dirty_(false),
+ auth_failed_(false),
+ syncing_(false),
+ invalid_store_(false) {}
+
+ ServerConnectionManager *connection_manager_;
+ const PathString account_name_;
+ syncable::DirectoryManager* const dirman_;
+ ConflictResolver* const resolver_;
+ ModelSafeWorker* const model_safe_worker_;
+
+ // For sending notifications from sync commands out to observers of the
+ // Syncer.
+ SyncerEventChannel* syncer_event_channel_;
+
+ // TODO(sync): move away from sets if it makes more sense.
+ std::set<syncable::Id> conflicting_item_ids_;
+ std::set<syncable::Id> blocked_item_ids_;
+ std::map<syncable::Id, ConflictSet*> id_to_conflict_set_;
+ std::set<ConflictSet*> conflict_sets_;
+
+ // Status information, as opposed to state info that may also be exposed for
+ // status reporting purposes.
+ static const int ERROR_THRESHOLD = 500;
+ int error_rate_; // A EMA in the range [0,65536)
+ int64 current_sync_timestamp_; // During inital sync these two members
+ int64 servers_latest_timestamp_; // can be used to measure sync progress.
+
+ // There remains sync state updating in:
+ // CommitUnsyncedEntries
+ bool syncing_;
+
+ // True when we get such an INVALID_STORE error from the server.
+ bool invalid_store_;
+ // True iff we're stuck. User should contact support.
+ bool syncer_stuck_;
+ // counts of various commit return values.
+ int error_commits_;
+ int conflicting_commits_;
+ int stalled_commits_;
+
+ // WEIRD COUNTERS
+ // Two variables that track the # on consecutive problem requests.
+ // consecutive_problem_get_updates_ resets when we get any updates (not on
+ // pings) and increments whenever the request fails.
+ int consecutive_problem_get_updates_;
+ // consecutive_problem_commits_ resets whenever we commit any number of
+ // items and increments whenever all commits fail for any reason.
+ int consecutive_problem_commits_;
+ // number of commits hitting transient errors since the last successful
+ // commit.
+ int consecutive_transient_error_commits_;
+ // Incremented when get_updates fails, commit fails, and when
+ // hitting transient errors. When any of these succeed, this counter
+ // is reset.
+ // TODO(chron): Reduce number of weird counters we use.
+ int consecutive_errors_;
+ int successful_commits_;
+
+ bool dirty_;
+ bool auth_dirty_;
+ bool auth_failed_;
+
+ void UpdateDirty(bool new_info) { dirty_ |= new_info; }
+
+ void UpdateAuthDirty(bool new_info) { auth_dirty_ |= new_info; }
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNC_PROCESS_STATE_H_
diff --git a/chrome/browser/sync/engine/syncapi.cc b/chrome/browser/sync/engine/syncapi.cc
new file mode 100644
index 0000000..3f6eb06
--- /dev/null
+++ b/chrome/browser/sync/engine/syncapi.cc
@@ -0,0 +1,1565 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncapi.h"
+
+#if defined(OS_WINDOWS)
+#include <windows.h>
+#include <iphlpapi.h>
+#endif
+
+#include <iomanip>
+#include <list>
+#include <string>
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/basictypes.h"
+#include "base/scoped_ptr.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/engine/all_status.h"
+#include "chrome/browser/sync/engine/auth_watcher.h"
+#include "chrome/browser/sync/engine/change_reorder_buffer.h"
+#include "chrome/browser/sync/engine/client_command_channel.h"
+#include "chrome/browser/sync/engine/model_safe_worker.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/net/syncapi_server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_thread.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator_impl.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/closure.h"
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/user_settings.h"
+#include "googleurl/src/gurl.h"
+
+using browser_sync::AllStatus;
+using browser_sync::AllStatusEvent;
+using browser_sync::AuthWatcher;
+using browser_sync::AuthWatcherEvent;
+using browser_sync::ClientCommandChannel;
+using browser_sync::Syncer;
+using browser_sync::SyncerEvent;
+using browser_sync::SyncerStatus;
+using browser_sync::SyncerThread;
+using browser_sync::UserSettings;
+using browser_sync::TalkMediator;
+using browser_sync::TalkMediatorImpl;
+using std::list;
+using std::hex;
+using std::string;
+using std::vector;
+using syncable::Directory;
+using syncable::DirectoryManager;
+
+static const int kServerReachablePollingIntervalMsec = 60000 * 60;
+static const int kThreadExitTimeoutMsec = 60000;
+static const int kSSLPort = 443;
+
+// We shouldn't call InitLogFiles more than once since that will cause a crash.
+// So we use a global state variable to avoid that. This doesn't work in case
+// of multiple threads, and if some other part also tries to call InitLogFiles
+// apart from this file. But this is okay for now since this is the only
+// place we call InitLogFiles.
+namespace {
+static bool g_log_files_initialized = false;
+static base::AtExitManager g_at_exit_manager; // Necessary for NewCallback
+} // empty namespace
+
+struct ThreadParams {
+ browser_sync::ServerConnectionManager* conn_mgr;
+#if defined(OS_WINDOWS)
+ HANDLE exit_flag;
+#endif
+};
+
+// This thread calls CheckServerReachable() whenever a change occurs
+// in the table that maps IP addresses to interfaces, for example when
+// the user unplugs his network cable.
+void* AddressWatchThread(void* arg) {
+ NameCurrentThreadForDebugging("SyncEngine_AddressWatcher");
+ LOG(INFO) << "starting the address watch thread";
+ const ThreadParams* const params = reinterpret_cast<const ThreadParams*>(arg);
+#if defined(OS_WINDOWS)
+ OVERLAPPED overlapped = {0};
+ overlapped.hEvent = CreateEvent(NULL, FALSE, TRUE, NULL);
+ HANDLE file;
+ DWORD rc = WAIT_OBJECT_0;
+ while (true) {
+ // Only call NotifyAddrChange() after the IP address has changed or if this
+ // is the first time through the loop.
+ if (WAIT_OBJECT_0 == rc) {
+ ResetEvent(overlapped.hEvent);
+ DWORD notify_result = NotifyAddrChange(&file, &overlapped);
+ if (ERROR_IO_PENDING != notify_result) {
+ LOG(ERROR) << "NotifyAddrChange() returned unexpected result "
+ << hex << notify_result;
+ break;
+ }
+ }
+ HANDLE events[] = { overlapped.hEvent, params->exit_flag };
+ rc = WaitForMultipleObjects(ARRAYSIZE(events), events, FALSE,
+ kServerReachablePollingIntervalMsec);
+
+ // If the exit flag was signaled, the thread will exit.
+ if (WAIT_OBJECT_0 + 1 == rc)
+ break;
+
+ params->conn_mgr->CheckServerReachable();
+ }
+ CloseHandle(overlapped.hEvent);
+#else
+ // TODO(zork): Add this functionality to Linux.
+#endif
+ LOG(INFO) << "The address watch thread has stopped";
+ return 0;
+}
+
+namespace sync_api {
+class ModelSafeWorkerBridge;
+
+static const PSTR_CHAR kBookmarkSyncUserSettingsDatabase[] =
+ PSTR("BookmarkSyncSettings.sqlite3");
+static const PSTR_CHAR kDefaultNameForNewNodes[] = PSTR(" ");
+
+// The list of names which are reserved for use by the server.
+static const char16* kForbiddenServerNames[] =
+ { STRING16(""), STRING16("."), STRING16("..") };
+
+//////////////////////////////////////////////////////////////////////////
+// Static helper functions.
+
+// Helper function to look up the int64 metahandle of an object given the ID
+// string.
+static int64 IdToMetahandle(syncable::BaseTransaction* trans,
+ const syncable::Id& id) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, id);
+ if (!entry.good())
+ return kInvalidId;
+ return entry.Get(syncable::META_HANDLE);
+}
+
+// Checks whether |name| is a server-illegal name followed by zero or more space
+// characters. The three server-illegal names are the empty string, dot, and
+// dot-dot. Very long names (>255 bytes in UTF-8 Normalization Form C) are
+// also illegal, but are not considered here.
+static bool IsNameServerIllegalAfterTrimming(const string16& name) {
+ size_t untrimmed_count = name.find_last_not_of(' ') + 1;
+ for (int i = 0; i < arraysize(kForbiddenServerNames); ++i) {
+ if (name.compare(0, untrimmed_count, kForbiddenServerNames[i]) == 0)
+ return true;
+ }
+ return false;
+}
+
+static bool EndsWithSpace(const string16& string) {
+ return !string.empty() && *string.rbegin() == ' ';
+}
+
+static inline void String16ToPathString(const sync_char16 *in,
+ PathString *out) {
+ string16 in_str(in);
+#if defined(OS_WINDOWS)
+ out->assign(in_str);
+#else
+ UTF16ToUTF8(in_str.c_str(), in_str.length(), out);
+#endif
+}
+
+static inline void PathStringToString16(const PathString& in, string16* out) {
+#if defined(OS_WINDOWS)
+ out->assign(in);
+#else
+ UTF8ToUTF16(in.c_str(), in.length(), out);
+#endif
+}
+
+// When taking a name from the syncapi, append a space if it matches the
+// pattern of a server-illegal name followed by zero or more spaces.
+static void SyncAPINameToServerName(const sync_char16 *sync_api_name,
+ PathString* out) {
+ String16ToPathString(sync_api_name, out);
+ string16 sync_api_name_str(sync_api_name);
+ if (IsNameServerIllegalAfterTrimming(sync_api_name_str))
+ out->append(PSTR(" "));
+}
+
+// In the reverse direction, if a server name matches the pattern of a
+// server-illegal name followed by one or more spaces, remove the trailing
+// space.
+static void ServerNameToSyncAPIName(const PathString& server_name,
+ string16*out) {
+ string16 server_name_str;
+ PathStringToString16(server_name, &server_name_str);
+ if (IsNameServerIllegalAfterTrimming(server_name_str) &&
+ EndsWithSpace(server_name_str))
+ out->assign(server_name_str, 0, server_name_str.size() - 1);
+ else
+ out->assign(server_name_str);
+}
+
+// A UserShare encapsulates the syncable pieces that represent an authenticated
+// user and their data (share).
+// This encompasses all pieces required to build transaction objects on the
+// syncable share.
+struct UserShare {
+ // The DirectoryManager itself, which is the parent of Transactions and can
+ // be shared across multiple threads (unlike Directory).
+ scoped_ptr<DirectoryManager> dir_manager;
+
+ // The username of the sync user. This is empty until we have performed at
+ // least one successful GAIA authentication with this username, which means
+ // on first-run it is empty until an AUTH_SUCCEEDED event and on future runs
+ // it is set as soon as the client instructs us to authenticate for the last
+ // known valid user (AuthenticateForLastKnownUser()).
+ // Stored as a PathString to avoid string conversions each time a transaction
+ // is created.
+ PathString authenticated_name;
+};
+
+////////////////////////////////////
+// BaseNode member definitions.
+
+// BaseNode::BaseNodeInternal provides storage for member Get() functions that
+// need to return pointers (e.g. strings).
+struct BaseNode::BaseNodeInternal {
+ string16 url;
+ string16 title;
+ Directory::ChildHandles child_handles;
+ syncable::Blob favicon;
+};
+
+BaseNode::BaseNode() : data_(new BaseNode::BaseNodeInternal) {}
+
+BaseNode::~BaseNode() {
+ delete data_;
+}
+
+int64 BaseNode::GetParentId() const {
+ return IdToMetahandle(GetTransaction()->GetWrappedTrans(),
+ GetEntry()->Get(syncable::PARENT_ID));
+}
+
+int64 BaseNode::GetId() const {
+ return GetEntry()->Get(syncable::META_HANDLE);
+}
+
+bool BaseNode::GetIsFolder() const {
+ return GetEntry()->Get(syncable::IS_DIR);
+}
+
+const sync_char16* BaseNode::GetTitle() const {
+ // Store the string in data_ so that the returned pointer is valid.
+ ServerNameToSyncAPIName(GetEntry()->GetName().non_unique_value(),
+ &data_->title);
+ return data_->title.c_str();
+}
+
+const sync_char16* BaseNode::GetURL() const {
+ // Store the string in data_ so that the returned pointer is valid.
+ PathStringToString16(GetEntry()->Get(syncable::BOOKMARK_URL), &data_->url);
+ return data_->url.c_str();
+}
+
+const int64* BaseNode::GetChildIds(size_t* child_count) const {
+ DCHECK(child_count);
+ Directory* dir = GetTransaction()->GetLookup();
+ dir->GetChildHandles(GetTransaction()->GetWrappedTrans(),
+ GetEntry()->Get(syncable::ID), &data_->child_handles);
+
+ *child_count = data_->child_handles.size();
+ return (data_->child_handles.empty()) ? NULL : &data_->child_handles[0];
+}
+
+int64 BaseNode::GetPredecessorId() const {
+ syncable::Id id_string = GetEntry()->Get(syncable::PREV_ID);
+ if (id_string.IsRoot())
+ return kInvalidId;
+ return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
+}
+
+int64 BaseNode::GetSuccessorId() const {
+ syncable::Id id_string = GetEntry()->Get(syncable::NEXT_ID);
+ if (id_string.IsRoot())
+ return kInvalidId;
+ return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
+}
+
+int64 BaseNode::GetFirstChildId() const {
+ syncable::Directory* dir = GetTransaction()->GetLookup();
+ syncable::BaseTransaction* trans = GetTransaction()->GetWrappedTrans();
+ syncable::Id id_string =
+ dir->GetFirstChildId(trans, GetEntry()->Get(syncable::ID));
+ if (id_string.IsRoot())
+ return kInvalidId;
+ return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
+}
+
+const unsigned char* BaseNode::GetFaviconBytes(size_t* size_in_bytes) {
+ data_->favicon = GetEntry()->Get(syncable::BOOKMARK_FAVICON);
+ *size_in_bytes = data_->favicon.size();
+ if (*size_in_bytes)
+ return &(data_->favicon[0]);
+ else
+ return NULL;
+}
+
+int64 BaseNode::GetExternalId() const {
+ return GetEntry()->Get(syncable::LOCAL_EXTERNAL_ID);
+}
+
+////////////////////////////////////
+// WriteNode member definitions
+void WriteNode::SetIsFolder(bool folder) {
+ if (entry_->Get(syncable::IS_DIR) == folder)
+ return; // Skip redundant changes.
+
+ entry_->Put(syncable::IS_DIR, folder);
+ MarkForSyncing();
+}
+
+void WriteNode::SetTitle(const sync_char16* title) {
+ PathString server_legal_name;
+ SyncAPINameToServerName(title, &server_legal_name);
+ syncable::SyncName sync_name(server_legal_name);
+ syncable::DBName db_name(sync_name.value());
+ db_name.MakeOSLegal();
+ db_name.MakeNoncollidingForEntry(transaction_->GetWrappedTrans(),
+ entry_->Get(syncable::PARENT_ID), entry_);
+
+ syncable::Name new_name = syncable::Name::FromDBNameAndSyncName(db_name,
+ sync_name);
+ if (new_name == entry_->GetName())
+ return; // Skip redundant changes.
+
+ entry_->PutName(new_name);
+ MarkForSyncing();
+}
+
+void WriteNode::SetURL(const sync_char16* url) {
+ PathString url_string;
+ String16ToPathString(url, &url_string);
+ if (url_string == entry_->Get(syncable::BOOKMARK_URL))
+ return; // Skip redundant changes.
+
+ entry_->Put(syncable::BOOKMARK_URL, url_string);
+ MarkForSyncing();
+}
+
+void WriteNode::SetExternalId(int64 id) {
+ if (GetExternalId() != id)
+ entry_->Put(syncable::LOCAL_EXTERNAL_ID, id);
+}
+
+WriteNode::WriteNode(WriteTransaction* transaction)
+ : entry_(NULL), transaction_(transaction) {
+ DCHECK(transaction);
+}
+
+WriteNode::~WriteNode() {
+ delete entry_;
+}
+
+// Find an existing node matching the ID |id|, and bind this WriteNode
+// to it. Return true on success.
+bool WriteNode::InitByIdLookup(int64 id) {
+ DCHECK(!entry_) << "Init called twice";
+ DCHECK_NE(id, kInvalidId);
+ entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
+ syncable::GET_BY_HANDLE, id);
+ return (entry_->good() && !entry_->Get(syncable::IS_DEL));
+}
+
+// Create a new node with default properties, and bind this WriteNode to it.
+// Return true on success.
+bool WriteNode::InitByCreation(const BaseNode& parent,
+ const BaseNode* predecessor) {
+ DCHECK(!entry_) << "Init called twice";
+ // |predecessor| must be a child of |parent| or NULL.
+ if (predecessor && predecessor->GetParentId() != parent.GetId()) {
+ DCHECK(false);
+ return false;
+ }
+
+ syncable::Id parent_id = parent.GetEntry()->Get(syncable::ID);
+
+ // Start out with a dummy name, but make it unique. We expect
+ // the caller to set a meaningful name after creation.
+ syncable::DBName dummy(kDefaultNameForNewNodes);
+ dummy.MakeOSLegal();
+ dummy.MakeNoncollidingForEntry(transaction_->GetWrappedTrans(), parent_id,
+ NULL);
+
+ entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
+ syncable::CREATE, parent_id, dummy);
+
+ if (!entry_->good())
+ return false;
+
+ // Entries are untitled folders by default.
+ entry_->Put(syncable::IS_DIR, true);
+ // TODO(ncarter): Naming this bit IS_BOOKMARK_OBJECT is a bit unfortunate,
+ // since the rest of SyncAPI is essentially bookmark-agnostic.
+ entry_->Put(syncable::IS_BOOKMARK_OBJECT, true);
+
+ // Now set the predecessor, which sets IS_UNSYNCED as necessary.
+ PutPredecessor(predecessor);
+
+ return true;
+}
+
+bool WriteNode::SetPosition(const BaseNode& new_parent,
+ const BaseNode* predecessor) {
+ // |predecessor| must be a child of |new_parent| or NULL.
+ if (predecessor && predecessor->GetParentId() != new_parent.GetId()) {
+ DCHECK(false);
+ return false;
+ }
+
+ syncable::Id new_parent_id = new_parent.GetEntry()->Get(syncable::ID);
+
+ // Filter out redundant changes if both the parent and the predecessor match.
+ if (new_parent_id == entry_->Get(syncable::PARENT_ID)) {
+ const syncable::Id& old = entry_->Get(syncable::PREV_ID);
+ if ((!predecessor && old.IsRoot()) ||
+ (predecessor && (old == predecessor->GetEntry()->Get(syncable::ID)))) {
+ return true;
+ }
+ }
+
+ // Discard the old database name, derive a new database name from the sync
+ // name, and make it legal and unique.
+ syncable::Name name = syncable::Name::FromSyncName(GetEntry()->GetName());
+ name.db_value().MakeOSLegal();
+ name.db_value().MakeNoncollidingForEntry(GetTransaction()->GetWrappedTrans(),
+ new_parent_id, entry_);
+
+ // Atomically change the parent and name. This will fail if it would
+ // introduce a cycle in the hierarchy.
+ if (!entry_->PutParentIdAndName(new_parent_id, name))
+ return false;
+
+ // Now set the predecessor, which sets IS_UNSYNCED as necessary.
+ PutPredecessor(predecessor);
+
+ return true;
+}
+
+const syncable::Entry* WriteNode::GetEntry() const {
+ return entry_;
+}
+
+const BaseTransaction* WriteNode::GetTransaction() const {
+ return transaction_;
+}
+
+void WriteNode::Remove() {
+ entry_->Put(syncable::IS_DEL, true);
+ MarkForSyncing();
+}
+
+void WriteNode::PutPredecessor(const BaseNode* predecessor) {
+ syncable::Id predecessor_id = predecessor ?
+ predecessor->GetEntry()->Get(syncable::ID) : syncable::Id();
+ entry_->PutPredecessor(predecessor_id);
+ // Mark this entry as unsynced, to wake up the syncer.
+ MarkForSyncing();
+}
+
+void WriteNode::SetFaviconBytes(const unsigned char* bytes,
+ size_t size_in_bytes) {
+ syncable::Blob new_favicon(bytes, bytes + size_in_bytes);
+ if (new_favicon == entry_->Get(syncable::BOOKMARK_FAVICON))
+ return; // Skip redundant changes.
+
+ entry_->Put(syncable::BOOKMARK_FAVICON, new_favicon);
+ MarkForSyncing();
+}
+
+void WriteNode::MarkForSyncing() {
+ syncable::MarkForSyncing(entry_);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// ReadNode member definitions
+ReadNode::ReadNode(const BaseTransaction* transaction)
+ : entry_(NULL), transaction_(transaction) {
+ DCHECK(transaction);
+}
+
+ReadNode::~ReadNode() {
+ delete entry_;
+}
+
+void ReadNode::InitByRootLookup() {
+ DCHECK(!entry_) << "Init called twice";
+ syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
+ entry_ = new syncable::Entry(trans, syncable::GET_BY_ID, trans->root_id());
+ if (!entry_->good())
+ DCHECK(false) << "Could not lookup root node for reading.";
+}
+
+bool ReadNode::InitByIdLookup(int64 id) {
+ DCHECK(!entry_) << "Init called twice";
+ DCHECK_NE(id, kInvalidId);
+ syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
+ entry_ = new syncable::Entry(trans, syncable::GET_BY_HANDLE, id);
+ if (!entry_->good())
+ return false;
+ if (entry_->Get(syncable::IS_DEL))
+ return false;
+ LOG_IF(WARNING, !entry_->Get(syncable::IS_BOOKMARK_OBJECT))
+ << "SyncAPI InitByIdLookup referencing non-bookmark object.";
+ return true;
+}
+
+const syncable::Entry* ReadNode::GetEntry() const {
+ return entry_;
+}
+
+const BaseTransaction* ReadNode::GetTransaction() const {
+ return transaction_;
+}
+
+bool ReadNode::InitByTagLookup(const sync_char16* tag) {
+ DCHECK(!entry_) << "Init called twice";
+ PathString tag_string;
+ String16ToPathString(tag, &tag_string);
+ if (tag_string.empty())
+ return false;
+ syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
+ entry_ = new syncable::Entry(trans, syncable::GET_BY_TAG, tag_string);
+ if (!entry_->good())
+ return false;
+ if (entry_->Get(syncable::IS_DEL))
+ return false;
+ LOG_IF(WARNING, !entry_->Get(syncable::IS_BOOKMARK_OBJECT))
+ << "SyncAPI InitByTagLookup referencing non-bookmark object.";
+ return true;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+// ReadTransaction member definitions
+ReadTransaction::ReadTransaction(UserShare* share)
+ : BaseTransaction(share),
+ transaction_(NULL) {
+ transaction_ = new syncable::ReadTransaction(GetLookup(), __FILE__, __LINE__);
+}
+
+ReadTransaction::~ReadTransaction() {
+ delete transaction_;
+}
+
+syncable::BaseTransaction* ReadTransaction::GetWrappedTrans() const {
+ return transaction_;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// WriteTransaction member definitions
+WriteTransaction::WriteTransaction(UserShare* share)
+ : BaseTransaction(share),
+ transaction_(NULL) {
+ transaction_ = new syncable::WriteTransaction(GetLookup(), syncable::SYNCAPI,
+ __FILE__, __LINE__);
+}
+
+WriteTransaction::~WriteTransaction() {
+ delete transaction_;
+}
+
+syncable::BaseTransaction* WriteTransaction::GetWrappedTrans() const {
+ return transaction_;
+}
+
+// An implementation of Visitor that we use to "visit" the
+// ModelSafeWorkerInterface provided by a client of this API. The object we
+// visit is responsible for calling DoWork, which will invoke Run() on it's
+// cached work closure.
+class ModelSafeWorkerVisitor : public ModelSafeWorkerInterface::Visitor {
+ public:
+ explicit ModelSafeWorkerVisitor(Closure* work) : work_(work) { }
+ virtual ~ModelSafeWorkerVisitor() { }
+
+ // ModelSafeWorkerInterface::Visitor implementation.
+ virtual void DoWork() {
+ work_->Run();
+ }
+
+ private:
+ // The work to be done. We run this on DoWork and it cleans itself up
+ // after it is run.
+ Closure* work_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelSafeWorkerVisitor);
+};
+
+// This class is declared in the cc file to allow inheritance from sync types.
+// The ModelSafeWorkerBridge is a liason between a syncapi-client defined
+// ModelSafeWorkerInterface and the actual ModelSafeWorker used by the Syncer
+// for the current SyncManager.
+class ModelSafeWorkerBridge : public browser_sync::ModelSafeWorker {
+ public:
+ // Takes ownership of |worker|.
+ explicit ModelSafeWorkerBridge(ModelSafeWorkerInterface* worker)
+ : worker_(worker) {
+ }
+ virtual ~ModelSafeWorkerBridge() { }
+
+ // Overriding ModelSafeWorker.
+ virtual void DoWorkAndWaitUntilDone(Closure* work) {
+ // When the syncer has work to be done, we forward it to our worker who
+ // will invoke DoWork on |visitor| when appropriate (from model safe
+ // thread).
+ ModelSafeWorkerVisitor visitor(work);
+ worker_->CallDoWorkFromModelSafeThreadAndWait(&visitor);
+ }
+
+ private:
+ // The worker that we can forward work requests to, to ensure the work
+ // is performed on an appropriate model safe thread.
+ scoped_ptr<ModelSafeWorkerInterface> worker_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelSafeWorkerBridge);
+};
+
+// A GaiaAuthenticator that uses HttpPostProviders instead of CURL.
+class BridgedGaiaAuthenticator : public browser_sync::GaiaAuthenticator {
+ public:
+ BridgedGaiaAuthenticator(const string& user_agent, const string& service_id,
+ const string& gaia_url,
+ HttpPostProviderFactory* factory)
+ : GaiaAuthenticator(user_agent, service_id, gaia_url),
+ gaia_source_(user_agent), post_factory_(factory) {
+ }
+
+ virtual ~BridgedGaiaAuthenticator() {
+ }
+
+ virtual bool Post(const GURL& url, const string& post_body,
+ unsigned long* response_code, string* response_body) {
+ string connection_url = "https://";
+ connection_url += url.host();
+ connection_url += url.path();
+ HttpPostProviderInterface* http = post_factory_->Create();
+ http->SetUserAgent(gaia_source_.c_str());
+ // SSL is on 443 for Gaia Posts always.
+ http->SetURL(connection_url.c_str(), kSSLPort);
+ http->SetPostPayload("application/x-www-form-urlencoded",
+ post_body.length(), post_body.c_str());
+
+ int os_error_code = 0;
+ int int_response_code = 0;
+ if (!http->MakeSynchronousPost(&os_error_code, &int_response_code)) {
+ LOG(INFO) << "Http POST failed, error returns: " << os_error_code;
+ return false;
+ }
+ *response_code = static_cast<int>(int_response_code);
+ response_body->assign(http->GetResponseContent(),
+ http->GetResponseContentLength());
+ post_factory_->Destroy(http);
+ return true;
+ }
+ private:
+ const std::string gaia_source_;
+ scoped_ptr<HttpPostProviderFactory> post_factory_;
+ DISALLOW_COPY_AND_ASSIGN(BridgedGaiaAuthenticator);
+};
+
+//////////////////////////////////////////////////////////////////////////
+// SyncManager's implementation: SyncManager::SyncInternal
+class SyncManager::SyncInternal {
+ public:
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+ explicit SyncInternal(SyncManager* sync_manager)
+ : observer_(NULL),
+ command_channel_(0),
+ auth_problem_(AUTH_PROBLEM_NONE),
+ sync_manager_(sync_manager),
+ notification_pending_(false),
+ initialized_(false) {
+ }
+
+ ~SyncInternal() { }
+
+ bool Init(const PathString& database_location,
+ const std::string& sync_server_and_path,
+ int port,
+ const char* gaia_service_id,
+ const char* gaia_source,
+ bool use_ssl,
+ HttpPostProviderFactory* post_factory,
+ HttpPostProviderFactory* auth_post_factory,
+ ModelSafeWorkerInterface* model_safe_worker,
+ bool attempt_last_user_authentication,
+ const char* user_agent);
+
+ // Tell sync engine to submit credentials to GAIA for verification and start
+ // the syncing process on success. Successful GAIA authentication will kick
+ // off the following chain of events:
+ // 1. Cause sync engine to open the syncer database.
+ // 2. Trigger the AuthWatcher to create a Syncer for the directory and call
+ // SyncerThread::SyncDirectory; the SyncerThread will block until (4).
+ // 3. Tell the ServerConnectionManager to pass the newly received GAIA auth
+ // token to a sync server to obtain a sync token.
+ // 4. On receipt of this token, the ServerConnectionManager broadcasts
+ // a server-reachable event, which will unblock the SyncerThread,
+ // and the rest is the future.
+ //
+ // If authentication fails, an event will be broadcast all the way up to
+ // the SyncManager::Observer. It may, in turn, decide to try again with new
+ // credentials. Calling this method again is the appropriate course of action
+ // to "retry".
+ void Authenticate(const std::string& username, const std::string& password);
+
+ // Call periodically from a database-safe thread to persist recent changes
+ // to the syncapi model.
+ void SaveChanges();
+
+ // This listener is called upon completion of a syncable transaction, and
+ // builds the list of sync-engine initiated changes that will be forwarded to
+ // the SyncManager's Observers.
+ void HandleChangeEvent(const syncable::DirectoryChangeEvent& event);
+ void HandleTransactionCompleteChangeEvent(
+ const syncable::DirectoryChangeEvent& event);
+ void HandleCalculateChangesChangeEventFromSyncApi(
+ const syncable::DirectoryChangeEvent& event);
+ void HandleCalculateChangesChangeEventFromSyncer(
+ const syncable::DirectoryChangeEvent& event);
+
+ // This listener is called by the syncer channel for all syncer events.
+ void HandleSyncerEvent(const SyncerEvent& event);
+
+ // We have a direct hookup to the authwatcher to be notified for auth failures
+ // on startup, to serve our UI needs.
+ void HandleAuthWatcherEvent(const AuthWatcherEvent& event);
+
+ // Accessors for the private members.
+ DirectoryManager* dir_manager() { return share_.dir_manager.get(); }
+ SyncAPIServerConnectionManager* connection_manager() {
+ return connection_manager_.get();
+ }
+ SyncerThread* syncer_thread() { return syncer_thread_.get(); }
+ TalkMediator* talk_mediator() { return talk_mediator_.get(); }
+ AuthWatcher* auth_watcher() { return auth_watcher_.get(); }
+ AllStatus* allstatus() { return &allstatus_; }
+ void set_observer(Observer* observer) { observer_ = observer; }
+ UserShare* GetUserShare() { return &share_; }
+
+ // Return the currently active (validated) username as a PathString for
+ // use with syncable types.
+ const PathString& username_for_share() const {
+ return share_.authenticated_name;
+ }
+
+ // Returns the authenticated username from our AuthWatcher in UTF8.
+ // See SyncManager::GetAuthenticatedUsername for details.
+ const char* GetAuthenticatedUsername();
+
+ // Note about SyncManager::Status implementation: Status is a trimmed
+ // down AllStatus::Status, augmented with authentication failure information
+ // gathered from the internal AuthWatcher. The sync UI itself hooks up to
+ // various sources like the AuthWatcher individually, but with syncapi we try
+ // to keep everything status-related in one place. This means we have to
+ // privately manage state about authentication failures, and whenever the
+ // status or status summary is requested we aggregate this state with
+ // AllStatus::Status information.
+ Status ComputeAggregatedStatus();
+ Status::Summary ComputeAggregatedStatusSummary();
+
+ // See SyncManager::SetupForTestMode for information.
+ void SetupForTestMode(const sync_char16* test_username);
+
+ // See SyncManager::Shutdown for information.
+ void Shutdown();
+
+ // Whether we're initialized to the point of being able to accept changes
+ // (and hence allow transaction creation). See initialized_ for details.
+ bool initialized() const {
+ MutexLock lock(&initialized_mutex_);
+ return initialized_;
+ }
+ private:
+ // Try to authenticate using persisted credentials from a previous successful
+ // authentication. If no such credentials exist, calls OnAuthError on
+ // the client to collect credentials. Otherwise, there exist local
+ // credentials that were once used for a successful auth, so we'll try to
+ // re-use these.
+ // Failure of that attempt will be communicated as normal using
+ // OnAuthError. Since this entry point will bypass normal GAIA
+ // authentication and try to authenticate directly with the sync service
+ // using a cached token, authentication failure will generally occur due to
+ // expired credentials, or possibly because of a password change.
+ void AuthenticateForLastKnownUser();
+
+ // Helper to call OnAuthError when no authentication credentials
+ // are available.
+ void RaiseAuthNeededEvent();
+
+ // Helper to set initialized_ to true and raise an event to clients to
+ // notify that initialization is complete and it is safe to send us changes.
+ // If already initialized, this is a no-op.
+ void MarkAndNotifyInitializationComplete();
+
+ // Determine if the parents or predecessors differ between the old and new
+ // versions of an entry stored in |a| and |b|. Note that a node's index
+ // may change without its NEXT_ID changing if the node at NEXT_ID also
+ // moved (but the relative order is unchanged). To handle such cases,
+ // we rely on the caller to treat a position update on any sibling as
+ // updating the positions of all siblings.
+ static bool BookmarkPositionsDiffer(const syncable::EntryKernel& a,
+ const syncable::Entry& b) {
+ if (a.ref(syncable::NEXT_ID) != b.Get(syncable::NEXT_ID))
+ return true;
+ if (a.ref(syncable::PARENT_ID) != b.Get(syncable::PARENT_ID))
+ return true;
+ return false;
+ }
+
+ // Determine if any of the fields made visible to clients of the Sync API
+ // differ between the versions of an entry stored in |a| and |b|.
+ // A return value of false means that it should be OK to ignore this change.
+ static bool BookmarkPropertiesDiffer(const syncable::EntryKernel& a,
+ const syncable::Entry& b) {
+ if (a.ref(syncable::NAME) != b.Get(syncable::NAME))
+ return true;
+ if (a.ref(syncable::UNSANITIZED_NAME) != b.Get(syncable::UNSANITIZED_NAME))
+ return true;
+ if (a.ref(syncable::IS_DIR) != b.Get(syncable::IS_DIR))
+ return true;
+ if (a.ref(syncable::BOOKMARK_URL) != b.Get(syncable::BOOKMARK_URL))
+ return true;
+ if (a.ref(syncable::BOOKMARK_FAVICON) != b.Get(syncable::BOOKMARK_FAVICON))
+ return true;
+ if (BookmarkPositionsDiffer(a, b))
+ return true;
+ return false;
+ }
+
+ // We couple the DirectoryManager and username together in a UserShare member
+ // so we can return a handle to share_ to clients of the API for use when
+ // constructing any transaction type.
+ UserShare share_;
+
+ // A cached string for callers of GetAuthenticatedUsername. We just store the
+ // last result of auth_watcher_->email() here and change it on future calls,
+ // because callers of GetAuthenticatedUsername are supposed to copy the value
+ // if they need it for longer than the scope of the call.
+ std::string cached_auth_watcher_email_;
+
+ // A wrapper around a sqlite store used for caching authentication data,
+ // last user information, current sync-related URLs, and more.
+ scoped_ptr<UserSettings> user_settings_;
+
+ // Observer registered via SetObserver/RemoveObserver.
+ // WARNING: This can be NULL!
+ Observer* observer_;
+
+ // A sink for client commands from the syncer needed to create a SyncerThread.
+ ClientCommandChannel command_channel_;
+
+ // The ServerConnectionManager used to abstract communication between
+ // the client (the Syncer) and the sync server.
+ scoped_ptr<SyncAPIServerConnectionManager> connection_manager_;
+
+ // The thread that runs the Syncer. Needs to be explicitly Start()ed.
+ scoped_ptr<SyncerThread> syncer_thread_;
+
+ // Notification (xmpp) handler.
+ scoped_ptr<TalkMediator> talk_mediator_;
+
+ // A multi-purpose status watch object that aggregates stats from various
+ // sync components.
+ AllStatus allstatus_;
+
+ // AuthWatcher kicks off the authentication process and follows it through
+ // phase 1 (GAIA) to phase 2 (sync engine). As part of this work it determines
+ // the initial connectivity and causes the server connection event to be
+ // broadcast, which signals the syncer thread to start syncing.
+ // It has a heavy duty constructor requiring boilerplate so we heap allocate.
+ scoped_ptr<AuthWatcher> auth_watcher_;
+
+ // A store of change records produced by HandleChangeEvent during the
+ // CALCULATE_CHANGES step, and to be processed, and forwarded to the
+ // observer, by HandleChangeEvent during the TRANSACTION_COMPLETE step.
+ ChangeReorderBuffer change_buffer_;
+
+ // The event listener hookup that is registered for HandleChangeEvent.
+ scoped_ptr<EventListenerHookup> dir_change_hookup_;
+
+ // The event listener hookup registered for HandleSyncerEvent.
+ scoped_ptr<EventListenerHookup> syncer_event_;
+
+ // The event listener hookup registered for HandleAuthWatcherEvent.
+ scoped_ptr<EventListenerHookup> authwatcher_hookup_;
+
+ // Our cache of a recent authentication problem. If no authentication problem
+ // occurred, or if the last problem encountered has been cleared (by a
+ // subsequent AuthWatcherEvent), this is set to AUTH_PROBLEM_NONE.
+ AuthProblem auth_problem_;
+
+ // The sync dir_manager to which we belong.
+ SyncManager* const sync_manager_;
+
+ // Parameters for our thread listening to network status changes.
+ ThreadParams address_watch_params_;
+ thread_handle address_watch_thread_;
+
+ // True if the next SyncCycle should notify peers of an update.
+ bool notification_pending_;
+
+ // Set to true once Init has been called, and we know of an authenticated
+ // valid) username either from a fresh authentication attempt (as in
+ // first-use case) or from a previous attempt stored in our UserSettings
+ // (as in the steady-state), and the syncable::Directory has been opened,
+ // meaning we are ready to accept changes. Protected by initialized_mutex_
+ // as it can get read/set by both the SyncerThread and the AuthWatcherThread.
+ bool initialized_;
+ mutable PThreadMutex initialized_mutex_;
+};
+
+SyncManager::SyncManager() {
+ data_ = new SyncInternal(this);
+}
+
+bool SyncManager::Init(const sync_char16* database_location,
+ const char* sync_server_and_path,
+ int sync_server_port,
+ const char* gaia_service_id,
+ const char* gaia_source,
+ bool use_ssl,
+ HttpPostProviderFactory* post_factory,
+ HttpPostProviderFactory* auth_post_factory,
+ ModelSafeWorkerInterface* model_safe_worker,
+ bool attempt_last_user_authentication,
+ const char* user_agent) {
+ DCHECK(database_location);
+ DCHECK(post_factory);
+
+ PathString db_path;
+ String16ToPathString(database_location, &db_path);
+ string server_string(sync_server_and_path);
+ return data_->Init(db_path,
+ server_string,
+ sync_server_port,
+ gaia_service_id,
+ gaia_source,
+ use_ssl,
+ post_factory,
+ auth_post_factory,
+ model_safe_worker,
+ attempt_last_user_authentication,
+ user_agent);
+}
+
+void SyncManager::Authenticate(const char* username, const char* password) {
+ data_->Authenticate(std::string(username), std::string(password));
+}
+
+const char* SyncManager::GetAuthenticatedUsername() {
+ if (!data_)
+ return NULL;
+ return data_->GetAuthenticatedUsername();
+}
+
+const char* SyncManager::SyncInternal::GetAuthenticatedUsername() {
+ cached_auth_watcher_email_ = browser_sync::ToUTF8(
+ username_for_share()).get_string();
+ return cached_auth_watcher_email_.c_str();
+}
+
+bool SyncManager::SyncInternal::Init(
+ const PathString& database_location,
+ const std::string& sync_server_and_path,
+ int port,
+ const char* gaia_service_id,
+ const char* gaia_source,
+ bool use_ssl, HttpPostProviderFactory* post_factory,
+ HttpPostProviderFactory* auth_post_factory,
+ ModelSafeWorkerInterface* model_safe_worker,
+ bool attempt_last_user_authentication,
+ const char* user_agent) {
+
+ if (!g_log_files_initialized) {
+ // TODO(timsteele): Call InitLogFiles() or equivalent.
+ g_log_files_initialized = true;
+ }
+
+ // Set up UserSettings, creating the db if necessary. We need this to
+ // instantiate a URLFactory to give to the Syncer.
+ PathString settings_db_file = AppendSlash(database_location) +
+ kBookmarkSyncUserSettingsDatabase;
+ user_settings_.reset(new UserSettings());
+ if (!user_settings_->Init(settings_db_file))
+ return false;
+
+ share_.dir_manager.reset(new DirectoryManager(database_location));
+
+ string client_id = user_settings_->GetClientId();
+ connection_manager_.reset(new SyncAPIServerConnectionManager(
+ sync_server_and_path, port, use_ssl, user_agent, client_id));
+
+ // TODO(timsteele): This is temporary windows crap needed to listen for
+ // network status changes. We should either pump this up to the embedder to
+ // do (and call us in CheckServerReachable, for ex), or at least make this
+ // platform independent in here.
+ // TODO(ncarter): When this gets cleaned up, the implementation of
+ // CreatePThread can also be removed.
+#if defined(OS_WINDOWS)
+ HANDLE exit_flag = CreateEvent(NULL, TRUE /*manual reset*/, FALSE, NULL);
+ address_watch_params_.exit_flag = exit_flag;
+#endif
+ address_watch_params_.conn_mgr = connection_manager();
+ address_watch_thread_ = CreatePThread(AddressWatchThread,
+ &address_watch_params_);
+ DCHECK(NULL != address_watch_thread_);
+
+ // Hand over the bridged POST factory to be owned by the connection
+ // dir_manager.
+ connection_manager()->SetHttpPostProviderFactory(post_factory);
+
+ // Watch various objects for aggregated status.
+ allstatus()->WatchConnectionManager(connection_manager());
+
+ std::string gaia_url = browser_sync::kGaiaUrl;
+ const char* service_id = gaia_service_id ?
+ gaia_service_id : SYNC_SERVICE_NAME;
+
+ talk_mediator_.reset(new TalkMediatorImpl());
+ allstatus()->WatchTalkMediator(talk_mediator());
+
+ BridgedGaiaAuthenticator* gaia_auth = new BridgedGaiaAuthenticator(
+ gaia_source, service_id, gaia_url, auth_post_factory);
+
+ auth_watcher_.reset(new AuthWatcher(dir_manager(),
+ connection_manager(),
+ &allstatus_,
+ gaia_source,
+ service_id,
+ gaia_url,
+ user_settings_.get(),
+ gaia_auth,
+ talk_mediator()));
+
+ talk_mediator()->WatchAuthWatcher(auth_watcher());
+ allstatus()->WatchAuthWatcher(auth_watcher());
+ authwatcher_hookup_.reset(NewEventListenerHookup(auth_watcher_->channel(),
+ this, &SyncInternal::HandleAuthWatcherEvent));
+
+ // Tell the SyncerThread to use the ModelSafeWorker for bookmark model work.
+ // We set up both sides of the "bridge" here, with the ModelSafeWorkerBridge
+ // on the Syncer side, and |model_safe_worker| on the API client side.
+ ModelSafeWorkerBridge* worker = new ModelSafeWorkerBridge(model_safe_worker);
+
+ syncer_thread_.reset(new SyncerThread(&command_channel_,
+ dir_manager(),
+ connection_manager(),
+ &allstatus_,
+ worker));
+ syncer_thread()->WatchTalkMediator(talk_mediator());
+ allstatus()->WatchSyncerThread(syncer_thread());
+
+ syncer_thread()->Start(); // Start the syncer thread. This won't actually
+ // result in any syncing until at least the
+ // DirectoryManager broadcasts the OPENED event,
+ // and a valid server connection is detected.
+
+ if (attempt_last_user_authentication)
+ AuthenticateForLastKnownUser();
+ return true;
+}
+
+void SyncManager::SyncInternal::MarkAndNotifyInitializationComplete() {
+ // There is only one real time we need this mutex. If we get an auth
+ // success, and before the initial sync ends we get an auth failure. In this
+ // case we'll be listening to both the AuthWatcher and Syncer, and it's a race
+ // between their respective threads to call MarkAndNotify. We need to make
+ // sure the observer is notified once and only once.
+ {
+ MutexLock lock(&initialized_mutex_);
+ if (initialized_)
+ return;
+ initialized_ = true;
+ }
+
+ // Notify that initialization is complete.
+ if (observer_)
+ observer_->OnInitializationComplete();
+}
+
+void SyncManager::SyncInternal::Authenticate(const std::string& username,
+ const std::string& password) {
+ DCHECK(username_for_share().empty() ||
+ (username == browser_sync::ToUTF8(username_for_share()).get_string()))
+ << "Username change from valid username detected";
+ if (allstatus()->status().authenticated)
+ return;
+ if (password.empty()) {
+ // TODO(timsteele): Seems like this shouldn't be needed, but auth_watcher
+ // currently drops blank password attempts on the floor and doesn't update
+ // state; it only LOGs an error in this case. We want to make sure we set
+ // our AuthProblem state to denote an error.
+ RaiseAuthNeededEvent();
+ }
+ auth_watcher()->Authenticate(username, password, true);
+}
+
+void SyncManager::SyncInternal::AuthenticateForLastKnownUser() {
+ std::string username;
+ std::string auth_token;
+ if (!(auth_watcher()->settings()->GetLastUserAndServiceToken(
+ SYNC_SERVICE_NAME, &username, &auth_token))) {
+ RaiseAuthNeededEvent();
+ return;
+ }
+
+ browser_sync::ToPathString s(username);
+ if (s.good()) {
+ share_.authenticated_name = s.get_string16();
+ } else {
+ RaiseAuthNeededEvent();
+ return;
+ }
+
+ // We optimize by opening the directory before the "fresh" authentication
+ // attempt completes so that we can immediately begin processing changes.
+ if (!dir_manager()->Open(username_for_share())) {
+ DCHECK(false) << "Had last known user but could not open directory";
+ return;
+ }
+
+ // Set the sync data type so that the server only sends us bookmarks
+ // changes.
+ {
+ syncable::ScopedDirLookup lookup(dir_manager(), username_for_share());
+ if (!lookup.good()) {
+ DCHECK(false) << "ScopedDirLookup failed on successfully opened dir";
+ return;
+ }
+ if (lookup->initial_sync_ended())
+ MarkAndNotifyInitializationComplete();
+ }
+
+ auth_watcher()->AuthenticateWithToken(username, auth_token);
+}
+
+void SyncManager::SyncInternal::RaiseAuthNeededEvent() {
+ auth_problem_ = AUTH_PROBLEM_INVALID_GAIA_CREDENTIALS;
+ if (observer_)
+ observer_->OnAuthProblem(auth_problem_);
+}
+
+SyncManager::~SyncManager() {
+ delete data_;
+}
+
+void SyncManager::SetObserver(Observer* observer) {
+ data_->set_observer(observer);
+}
+
+void SyncManager::RemoveObserver() {
+ data_->set_observer(NULL);
+}
+
+void SyncManager::Shutdown() {
+ data_->Shutdown();
+}
+
+void SyncManager::SyncInternal::Shutdown() {
+ // First reset the AuthWatcher in case an auth attempt is in progress so that
+ // it terminates gracefully before we shutdown and close other components.
+ // Otherwise the attempt can complete after we've closed the directory, for
+ // example, and cause initialization to continue, which is bad.
+ auth_watcher_.reset();
+
+ if (syncer_thread()) {
+ if (!syncer_thread()->Stop(kThreadExitTimeoutMsec))
+ DCHECK(false) << "Unable to stop the syncer, it won't be happy...";
+ }
+
+ // Shutdown the xmpp buzz connection.
+ LOG(INFO) << "P2P: Mediator logout started.";
+ if (talk_mediator()) {
+ talk_mediator()->Logout();
+ }
+ LOG(INFO) << "P2P: Mediator logout completed.";
+
+ if (dir_manager()) {
+ dir_manager()->FinalSaveChangesForAll();
+ dir_manager()->Close(username_for_share());
+ }
+
+ // Reset the DirectoryManager and UserSettings so they relinquish sqlite
+ // handles to backing files.
+ share_.dir_manager.reset();
+ user_settings_.reset();
+
+ // We don't want to process any more events.
+ dir_change_hookup_.reset();
+ syncer_event_.reset();
+ authwatcher_hookup_.reset();
+
+#if defined(OS_WINDOWS)
+ // Stop the address watch thread by signaling the exit flag.
+ // TODO(timsteele): Same as todo in Init().
+ SetEvent(address_watch_params_.exit_flag);
+ const DWORD wait_result = WaitForSingleObject(address_watch_thread_,
+ kThreadExitTimeoutMsec);
+ LOG_IF(ERROR, WAIT_FAILED == wait_result) << "Waiting for addr change thread "
+ "to exit failed. GetLastError(): " << hex << GetLastError();
+ LOG_IF(ERROR, WAIT_TIMEOUT == wait_result) << "Thread exit timeout expired";
+ CloseHandle(address_watch_params_.exit_flag);
+#endif
+}
+
+// Listen to model changes, filter out ones initiated by the sync API, and
+// saves the rest (hopefully just backend Syncer changes resulting from
+// ApplyUpdates) to data_->changelist.
+void SyncManager::SyncInternal::HandleChangeEvent(
+ const syncable::DirectoryChangeEvent& event) {
+ if (event.todo == syncable::DirectoryChangeEvent::TRANSACTION_COMPLETE) {
+ HandleTransactionCompleteChangeEvent(event);
+ return;
+ } else if (event.todo == syncable::DirectoryChangeEvent::CALCULATE_CHANGES) {
+ if (event.writer == syncable::SYNCAPI) {
+ HandleCalculateChangesChangeEventFromSyncApi(event);
+ return;
+ }
+ HandleCalculateChangesChangeEventFromSyncer(event);
+ return;
+ } else if (event.todo == syncable::DirectoryChangeEvent::SHUTDOWN) {
+ dir_change_hookup_.reset();
+ }
+}
+
+void SyncManager::SyncInternal::HandleTransactionCompleteChangeEvent(
+ const syncable::DirectoryChangeEvent& event) {
+ DCHECK_EQ(event.todo, syncable::DirectoryChangeEvent::TRANSACTION_COMPLETE);
+ // This notification happens immediately after a syncable WriteTransaction
+ // falls out of scope.
+ if (change_buffer_.IsEmpty() || !observer_)
+ return;
+
+ ReadTransaction trans(GetUserShare());
+ vector<ChangeRecord> ordered_changes;
+ change_buffer_.GetAllChangesInTreeOrder(&trans, &ordered_changes);
+ if (!ordered_changes.empty()) {
+ observer_->OnChangesApplied(&trans, &ordered_changes[0],
+ ordered_changes.size());
+ }
+ change_buffer_.Clear();
+}
+
+void SyncManager::SyncInternal::HandleCalculateChangesChangeEventFromSyncApi(
+ const syncable::DirectoryChangeEvent& event) {
+ // We have been notified about a user action changing the bookmark model.
+ DCHECK_EQ(event.todo, syncable::DirectoryChangeEvent::CALCULATE_CHANGES);
+ DCHECK_EQ(event.writer, syncable::SYNCAPI);
+ LOG_IF(WARNING, !change_buffer_.IsEmpty()) <<
+ "CALCULATE_CHANGES called with unapplied old changes.";
+
+ bool exists_unsynced_items = false;
+ for (syncable::OriginalEntries::const_iterator i = event.originals->begin();
+ i != event.originals->end() && !exists_unsynced_items;
+ ++i) {
+ int64 id = i->ref(syncable::META_HANDLE);
+ syncable::Entry e(event.trans, syncable::GET_BY_HANDLE, id);
+ DCHECK(e.good());
+
+ if (e.IsRoot()) {
+ // Ignore root object, should it ever change.
+ continue;
+ } else if (!e.Get(syncable::IS_BOOKMARK_OBJECT)) {
+ // Ignore non-bookmark objects.
+ continue;
+ } else if (e.Get(syncable::IS_UNSYNCED)) {
+ // Unsynced items will cause us to nudge the the syncer.
+ exists_unsynced_items = true;
+ }
+ }
+ if (exists_unsynced_items && syncer_thread()) {
+ syncer_thread()->NudgeSyncer(200, SyncerThread::kLocal); // 1/5 a second.
+ }
+}
+
+void SyncManager::SyncInternal::HandleCalculateChangesChangeEventFromSyncer(
+ const syncable::DirectoryChangeEvent& event) {
+ // We only expect one notification per sync step, so change_buffer_ should
+ // contain no pending entries.
+ DCHECK_EQ(event.todo, syncable::DirectoryChangeEvent::CALCULATE_CHANGES);
+ DCHECK_EQ(event.writer, syncable::SYNCER);
+ LOG_IF(WARNING, !change_buffer_.IsEmpty()) <<
+ "CALCULATE_CHANGES called with unapplied old changes.";
+
+ for (syncable::OriginalEntries::const_iterator i = event.originals->begin();
+ i != event.originals->end(); ++i) {
+ int64 id = i->ref(syncable::META_HANDLE);
+ syncable::Entry e(event.trans, syncable::GET_BY_HANDLE, id);
+ bool existed_before = !i->ref(syncable::IS_DEL);
+ bool exists_now = e.good() && !e.Get(syncable::IS_DEL);
+ DCHECK(e.good());
+
+ // Ignore root object, should it ever change.
+ if (e.IsRoot())
+ continue;
+ // Ignore non-bookmark objects.
+ if (!e.Get(syncable::IS_BOOKMARK_OBJECT))
+ continue;
+
+ if (exists_now && !existed_before)
+ change_buffer_.PushAddedItem(id);
+ else if (!exists_now && existed_before)
+ change_buffer_.PushDeletedItem(id);
+ else if (exists_now && existed_before && BookmarkPropertiesDiffer(*i, e))
+ change_buffer_.PushUpdatedItem(id, BookmarkPositionsDiffer(*i, e));
+ }
+}
+
+SyncManager::Status::Summary
+SyncManager::SyncInternal::ComputeAggregatedStatusSummary() {
+ switch (allstatus()->status().icon) {
+ case AllStatus::OFFLINE:
+ return Status::OFFLINE;
+ case AllStatus::OFFLINE_UNSYNCED:
+ return Status::OFFLINE_UNSYNCED;
+ case AllStatus::SYNCING:
+ return Status::SYNCING;
+ case AllStatus::READY:
+ return Status::READY;
+ case AllStatus::CONFLICT:
+ return Status::CONFLICT;
+ case AllStatus::OFFLINE_UNUSABLE:
+ return Status::OFFLINE_UNUSABLE;
+ default:
+ return Status::INVALID;
+ }
+}
+
+SyncManager::Status SyncManager::SyncInternal::ComputeAggregatedStatus() {
+ Status return_status =
+ { ComputeAggregatedStatusSummary(),
+ allstatus()->status().authenticated,
+ allstatus()->status().server_up,
+ allstatus()->status().server_reachable,
+ allstatus()->status().server_broken,
+ allstatus()->status().notifications_enabled,
+ allstatus()->status().notifications_received,
+ allstatus()->status().notifications_sent,
+ allstatus()->status().unsynced_count,
+ allstatus()->status().conflicting_count,
+ allstatus()->status().syncing,
+ allstatus()->status().initial_sync_ended,
+ allstatus()->status().syncer_stuck,
+ allstatus()->status().updates_available,
+ allstatus()->status().updates_received,
+ allstatus()->status().disk_full,
+ allstatus()->status().max_consecutive_errors};
+ return return_status;
+}
+
+void SyncManager::SyncInternal::HandleSyncerEvent(const SyncerEvent& event) {
+ if (!initialized()) {
+ // We get here if A) We have successfully authenticated at least once (
+ // because we attach HandleSyncerEvent only once we receive notification of
+ // successful authentication [locally or otherwise]), but B) the initial
+ // sync had not completed at that time.
+ if (SyncerStatus(event.last_session).IsShareUsable())
+ MarkAndNotifyInitializationComplete();
+ return;
+ }
+
+ if (!observer_)
+ return;
+
+ // Only send an event if this is due to a cycle ending and this cycle
+ // concludes a canonical "sync" process; that is, based on what is known
+ // locally we are "all happy" and up-to-date. There may be new changes on
+ // the server, but we'll get them on a subsequent sync.
+ //
+ // Notifications are sent at the end of every sync cycle, regardless of
+ // whether we should sync again.
+ if (event.what_happened == SyncerEvent::SYNC_CYCLE_ENDED) {
+ if (!event.last_session->ShouldSyncAgain()) {
+ observer_->OnSyncCycleCompleted();
+ }
+
+ // TODO(chron): Consider changing this back to track ShouldSyncAgain
+ // Only notify peers if a commit has occurred and change the bookmark model.
+ if (event.last_session && event.last_session->items_committed()) {
+ notification_pending_ = true;
+ }
+
+ // SyncCycles are started by the following events: creation of the syncer,
+ // (re)connection to buzz, local changes, peer notifications of updates.
+ // Peers will be notified of changes made while there is no buzz connection
+ // immediately after a connection has been re-established.
+ // the next sync cycle.
+ // TODO(brg): Move this to TalkMediatorImpl as a SyncerThread event hook.
+ if (notification_pending_ && talk_mediator()) {
+ LOG(INFO) << "Sending XMPP notification...";
+ bool success = talk_mediator()->SendNotification();
+ if (success) {
+ notification_pending_ = false;
+ }
+ } else {
+ LOG(INFO) << "Didn't send XMPP notification!"
+ << " event.last_session: " << event.last_session
+ << " event.last_session->items_committed(): "
+ << event.last_session->items_committed()
+ << " talk_mediator(): " << talk_mediator();
+ }
+ }
+}
+
+void SyncManager::SyncInternal::HandleAuthWatcherEvent(
+ const AuthWatcherEvent& event) {
+ // We don't care about an authentication attempt starting event, and we
+ // don't want to reset our state to AUTH_PROBLEM_NONE because the fact that
+ // an _attempt_ is starting doesn't change the fact that we have an auth
+ // problem.
+ if (event.what_happened == AuthWatcherEvent::AUTHENTICATION_ATTEMPT_START)
+ return;
+ // We clear our last auth problem cache on new auth watcher events, and only
+ // set it to indicate a problem state for certain AuthWatcherEvent types.
+ auth_problem_ = AUTH_PROBLEM_NONE;
+ switch (event.what_happened) {
+ case AuthWatcherEvent::AUTH_SUCCEEDED:
+ // We now know the supplied username and password were valid. If this
+ // wasn't the first sync, authenticated_name should already be assigned.
+ if (username_for_share().empty()) {
+ browser_sync::ToPathString s(event.user_email);
+ if (s.good())
+ share_.authenticated_name = s.get_string16();
+ }
+
+ DCHECK(LowerCaseEqualsASCII(browser_sync::ToUTF8(
+ username_for_share()).get_string(),
+ StringToLowerASCII(event.user_email).c_str()))
+ << "username_for_share= "
+ << browser_sync::ToUTF8(username_for_share())
+ << ", event.user_email= " << event.user_email;
+
+ if (observer_)
+ observer_->OnAuthProblem(AUTH_PROBLEM_NONE);
+
+ // Hook up the DirectoryChangeEvent listener, HandleChangeEvent.
+ {
+ syncable::ScopedDirLookup lookup(dir_manager(), username_for_share());
+ if (!lookup.good()) {
+ DCHECK(false) << "ScopedDirLookup creation failed; unable to hook "
+ << "up directory change event listener!";
+ return;
+ }
+ dir_change_hookup_.reset(NewEventListenerHookup(
+ lookup->changes_channel(), this,
+ &SyncInternal::HandleChangeEvent));
+
+ if (lookup->initial_sync_ended())
+ MarkAndNotifyInitializationComplete();
+ }
+ {
+ // Start watching the syncer channel directly here.
+ DCHECK(syncer_thread() != NULL);
+ syncer_event_.reset(NewEventListenerHookup(syncer_thread()->channel(),
+ this, &SyncInternal::HandleSyncerEvent));
+ }
+ return;
+ // Authentication failures translate to Status::AuthProblem events.
+ case AuthWatcherEvent::GAIA_AUTH_FAILED: // Invalid GAIA credentials.
+ case AuthWatcherEvent::SERVICE_AUTH_FAILED: // Expired GAIA credentials.
+ auth_problem_ = AUTH_PROBLEM_INVALID_GAIA_CREDENTIALS;
+ break;
+ case AuthWatcherEvent::SERVICE_USER_NOT_SIGNED_UP:
+ auth_problem_ = AUTH_PROBLEM_USER_NOT_SIGNED_UP;
+ break;
+ case AuthWatcherEvent::SERVICE_CONNECTION_FAILED:
+ auth_problem_ = AUTH_PROBLEM_CONNECTION_FAILED;
+ break;
+ default: // We don't care about the many other AuthWatcherEvent types.
+ return;
+ }
+
+ // Fire notification that the status changed due to an authentication error.
+ if (observer_)
+ observer_->OnAuthProblem(auth_problem_);
+}
+
+SyncManager::Status::Summary SyncManager::GetStatusSummary() const {
+ return data_->ComputeAggregatedStatusSummary();
+}
+
+SyncManager::Status SyncManager::GetDetailedStatus() const {
+ return data_->ComputeAggregatedStatus();
+}
+
+SyncManager::SyncInternal* SyncManager::GetImpl() const { return data_; }
+
+void SyncManager::SaveChanges() {
+ data_->SaveChanges();
+}
+
+void SyncManager::SyncInternal::SaveChanges() {
+ syncable::ScopedDirLookup lookup(dir_manager(), username_for_share());
+ if (!lookup.good()) {
+ DCHECK(false) << "ScopedDirLookup creation failed; Unable to SaveChanges";
+ return;
+ }
+ lookup->SaveChanges();
+}
+
+void SyncManager::SetupForTestMode(const sync_char16* test_username) {
+ DCHECK(data_) << "SetupForTestMode requires initialization";
+ data_->SetupForTestMode(test_username);
+}
+
+void SyncManager::SyncInternal::SetupForTestMode(
+ const sync_char16* test_username) {
+ String16ToPathString(test_username, &share_.authenticated_name);
+
+ if (!dir_manager()->Open(username_for_share()))
+ DCHECK(false) << "Could not open directory when running in test mode";
+
+ // Hook up the DirectoryChangeEvent listener, HandleChangeEvent.
+ {
+ syncable::ScopedDirLookup lookup(dir_manager(), username_for_share());
+ if (!lookup.good()) {
+ DCHECK(false) << "ScopedDirLookup creation failed; unable to hook "
+ << "up directory change event listener!";
+ return;
+ }
+ dir_change_hookup_.reset(NewEventListenerHookup(
+ lookup->changes_channel(), this,
+ &SyncInternal::HandleChangeEvent));
+ }
+ MarkAndNotifyInitializationComplete();
+}
+
+//////////////////////////////////////////////////////////////////////////
+// BaseTransaction member definitions
+BaseTransaction::BaseTransaction(UserShare* share)
+ : lookup_(NULL) {
+ DCHECK(share && share->dir_manager.get());
+ lookup_ = new syncable::ScopedDirLookup(share->dir_manager.get(),
+ share->authenticated_name);
+ if (!(lookup_->good()))
+ DCHECK(false) << "ScopedDirLookup failed on valid DirManager.";
+}
+BaseTransaction::~BaseTransaction() {
+ delete lookup_;
+}
+
+UserShare* SyncManager::GetUserShare() const {
+ DCHECK(data_->initialized()) << "GetUserShare requires initialization!";
+ return data_->GetUserShare();
+}
+
+} // namespace sync_api
diff --git a/chrome/browser/sync/engine/syncer.cc b/chrome/browser/sync/engine/syncer.cc
new file mode 100644
index 0000000..0b02e2e
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer.cc
@@ -0,0 +1,338 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#include "chrome/browser/sync/engine/syncer.h"
+
+#include "base/format_macros.h"
+#include "chrome/browser/sync/engine/apply_updates_command.h"
+#include "chrome/browser/sync/engine/build_and_process_conflict_sets_command.h"
+#include "chrome/browser/sync/engine/build_commit_command.h"
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/download_updates_command.h"
+#include "chrome/browser/sync/engine/get_commit_ids_command.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/post_commit_message_command.h"
+#include "chrome/browser/sync/engine/process_commit_response_command.h"
+#include "chrome/browser/sync/engine/process_updates_command.h"
+#include "chrome/browser/sync/engine/resolve_conflicts_command.h"
+#include "chrome/browser/sync/engine/syncer_end_command.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/engine/verify_updates_command.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable-inl.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+using sync_pb::ClientCommand;
+using syncable::Blob;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::SERVER_BOOKMARK_FAVICON;
+using syncable::SERVER_BOOKMARK_URL;
+using syncable::SERVER_CTIME;
+using syncable::SERVER_IS_BOOKMARK_OBJECT;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_IS_DIR;
+using syncable::SERVER_MTIME;
+using syncable::SERVER_NAME;
+using syncable::SERVER_NON_UNIQUE_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_VERSION;
+using syncable::SYNCER;
+using syncable::ScopedDirLookup;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+Syncer::Syncer(
+ syncable::DirectoryManager* dirman,
+ const PathString &account_name,
+ ServerConnectionManager* connection_manager,
+ ModelSafeWorker* model_safe_worker)
+ : account_name_(account_name),
+ early_exit_requested_(false),
+ max_commit_batch_size_(kDefaultMaxCommitBatchSize),
+ connection_manager_(connection_manager),
+ dirman_(dirman),
+ silenced_until_(0),
+ command_channel_(NULL),
+ model_safe_worker_(model_safe_worker),
+ updates_source_(sync_pb::GetUpdatesCallerInfo::UNKNOWN),
+ notifications_enabled_(false),
+ pre_conflict_resolution_function_(NULL) {
+ SyncerEvent shutdown = { SyncerEvent::SHUTDOWN_USE_WITH_CARE };
+ syncer_event_channel_.reset(new SyncerEventChannel(shutdown));
+ shutdown_channel_.reset(new ShutdownChannel(this));
+
+ ScopedDirLookup dir(dirman_, account_name_);
+ // The directory must be good here.
+ CHECK(dir.good());
+}
+
+Syncer::~Syncer() {}
+
+void Syncer::RequestNudge(int milliseconds) {
+ SyncerEvent event;
+ event.what_happened = SyncerEvent::REQUEST_SYNC_NUDGE;
+ event.nudge_delay_milliseconds = milliseconds;
+ channel()->NotifyListeners(event);
+}
+
+bool Syncer::SyncShare() {
+ SyncProcessState state(dirman_, account_name_, connection_manager_,
+ &resolver_, syncer_event_channel_.get(),
+ model_safe_worker());
+ return SyncShare(&state);
+}
+
+bool Syncer::SyncShare(SyncProcessState *process_state) {
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, process_state);
+ session.set_source(TestAndSetUpdatesSource());
+ session.set_notifications_enabled(notifications_enabled());
+ SyncShare(&session, SYNCER_BEGIN, SYNCER_END);
+ return session.ShouldSyncAgain();
+}
+
+bool Syncer::SyncShare(SyncerStep first_step, SyncerStep last_step) {
+ SyncCycleState cycle_state;
+ SyncProcessState state(dirman_, account_name_, connection_manager_,
+ &resolver_, syncer_event_channel_.get(),
+ model_safe_worker());
+ SyncerSession session(&cycle_state, &state);
+ SyncShare(&session, first_step, last_step);
+ return session.ShouldSyncAgain();
+}
+
+void Syncer::SyncShare(SyncerSession *session) {
+ SyncShare(session, SYNCER_BEGIN, SYNCER_END);
+}
+
+void Syncer::SyncShare(SyncerSession *session,
+ const SyncerStep first_step,
+ const SyncerStep last_step) {
+ SyncerStep current_step = first_step;
+
+ SyncerStep next_step;
+ while (!ExitRequested()) {
+ switch (current_step) {
+ case SYNCER_BEGIN:
+ LOG(INFO) << "Syncer Begin";
+ next_step = DOWNLOAD_UPDATES;
+ break;
+ case DOWNLOAD_UPDATES: {
+ LOG(INFO) << "Downloading Updates";
+ DownloadUpdatesCommand download_updates;
+ download_updates.Execute(session);
+ next_step = PROCESS_CLIENT_COMMAND;
+ break;
+ }
+ case PROCESS_CLIENT_COMMAND: {
+ LOG(INFO) << "Processing Client Command";
+ ProcessClientCommand(session);
+ next_step = VERIFY_UPDATES;
+ break;
+ }
+ case VERIFY_UPDATES: {
+ LOG(INFO) << "Verifying Updates";
+ VerifyUpdatesCommand verify_updates;
+ verify_updates.Execute(session);
+ next_step = PROCESS_UPDATES;
+ break;
+ }
+ case PROCESS_UPDATES: {
+ LOG(INFO) << "Processing Updates";
+ ProcessUpdatesCommand process_updates;
+ process_updates.Execute(session);
+ // We should download all of the updates before attempting to process
+ // them.
+ if (session->CountUpdates() == 0) {
+ next_step = APPLY_UPDATES;
+ } else {
+ next_step = DOWNLOAD_UPDATES;
+ }
+ break;
+ }
+ case APPLY_UPDATES: {
+ LOG(INFO) << "Applying Updates";
+ ApplyUpdatesCommand apply_updates;
+ apply_updates.Execute(session);
+ next_step = BUILD_COMMIT_REQUEST;
+ break;
+ }
+ // These two steps are combined since they are executed within the same
+ // write transaction.
+ case BUILD_COMMIT_REQUEST: {
+ SyncerStatus status(session);
+ status.set_syncing(true);
+
+ LOG(INFO) << "Processing Commit Request";
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+ SyncerSession::ScopedSetWriteTransaction set_trans(session, &trans);
+
+ LOG(INFO) << "Getting the Commit IDs";
+ GetCommitIdsCommand get_commit_ids_command(max_commit_batch_size_);
+ get_commit_ids_command.Execute(session);
+
+ if (!session->commit_ids().empty()) {
+ LOG(INFO) << "Building a commit message";
+ BuildCommitCommand build_commit_command;
+ build_commit_command.Execute(session);
+
+ next_step = POST_COMMIT_MESSAGE;
+ } else {
+ next_step = BUILD_AND_PROCESS_CONFLICT_SETS;
+ }
+
+ break;
+ }
+ case POST_COMMIT_MESSAGE: {
+ LOG(INFO) << "Posting a commit request";
+ PostCommitMessageCommand post_commit_command;
+ post_commit_command.Execute(session);
+ next_step = PROCESS_COMMIT_RESPONSE;
+ break;
+ }
+ case PROCESS_COMMIT_RESPONSE: {
+ LOG(INFO) << "Processing the commit response";
+ ProcessCommitResponseCommand process_response_command;
+ process_response_command.Execute(session);
+ next_step = BUILD_AND_PROCESS_CONFLICT_SETS;
+ break;
+ }
+ case BUILD_AND_PROCESS_CONFLICT_SETS: {
+ LOG(INFO) << "Building and Processing Conflict Sets";
+ BuildAndProcessConflictSetsCommand build_process_conflict_sets;
+ build_process_conflict_sets.Execute(session);
+ if (session->conflict_sets_built())
+ next_step = SYNCER_END;
+ else
+ next_step = RESOLVE_CONFLICTS;
+ break;
+ }
+ case RESOLVE_CONFLICTS: {
+ LOG(INFO) << "Resolving Conflicts";
+
+ // Trigger the pre_conflict_resolution_function_, which is a testing
+ // hook for the unit tests, if it is non-NULL.
+ if (pre_conflict_resolution_function_) {
+ ScopedDirLookup dir(dirman_, account_name_);
+ if (!dir.good()) {
+ LOG(ERROR) << "Bad dir lookup in syncer loop";
+ return;
+ }
+ pre_conflict_resolution_function_(dir);
+ }
+
+ ResolveConflictsCommand resolve_conflicts_command;
+ resolve_conflicts_command.Execute(session);
+ if (session->HasConflictingUpdates())
+ next_step = APPLY_UPDATES_TO_RESOLVE_CONFLICTS;
+ else
+ next_step = SYNCER_END;
+ break;
+ }
+ case APPLY_UPDATES_TO_RESOLVE_CONFLICTS: {
+ LOG(INFO) << "Applying updates to resolve conflicts";
+ ApplyUpdatesCommand apply_updates;
+ int num_conflicting_updates = session->conflicting_update_count();
+ apply_updates.Execute(session);
+ int post_facto_conflicting_updates =
+ session->conflicting_update_count();
+ session->set_conflicts_resolved(session->conflicts_resolved() ||
+ num_conflicting_updates > post_facto_conflicting_updates);
+ if (session->conflicts_resolved())
+ next_step = RESOLVE_CONFLICTS;
+ else
+ next_step = SYNCER_END;
+ break;
+ }
+ case SYNCER_END: {
+ LOG(INFO) << "Syncer End";
+ SyncerEndCommand syncer_end_command;
+ // This will set "syncing" to false, and send out a notification.
+ syncer_end_command.Execute(session);
+ goto post_while;
+ }
+ default:
+ LOG(ERROR) << "Unknown command: " << current_step;
+ }
+ if (last_step == current_step)
+ break;
+ current_step = next_step;
+ }
+ post_while:
+ // Copy any lingering useful state out of the session.
+ silenced_until_ = session->silenced_until();
+ return;
+}
+
+void Syncer::ProcessClientCommand(SyncerSession *session) {
+ if (!session->update_response().has_client_command())
+ return;
+ const ClientCommand command = session->update_response().client_command();
+ if (command_channel_)
+ command_channel_->NotifyListeners(&command);
+
+ // The server limits the number of items a client can commit in one batch.
+ if (command.has_max_commit_batch_size())
+ max_commit_batch_size_ = command.max_commit_batch_size();
+}
+
+void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest) {
+ dest->Put(SERVER_NAME, src->Get(SERVER_NAME));
+ dest->Put(SERVER_NON_UNIQUE_NAME, src->Get(SERVER_NON_UNIQUE_NAME));
+ dest->Put(SERVER_PARENT_ID, src->Get(SERVER_PARENT_ID));
+ dest->Put(SERVER_MTIME, src->Get(SERVER_MTIME));
+ dest->Put(SERVER_CTIME, src->Get(SERVER_CTIME));
+ dest->Put(SERVER_VERSION, src->Get(SERVER_VERSION));
+ dest->Put(SERVER_IS_DIR, src->Get(SERVER_IS_DIR));
+ dest->Put(SERVER_IS_DEL, src->Get(SERVER_IS_DEL));
+ dest->Put(SERVER_IS_BOOKMARK_OBJECT, src->Get(SERVER_IS_BOOKMARK_OBJECT));
+ dest->Put(IS_UNAPPLIED_UPDATE, src->Get(IS_UNAPPLIED_UPDATE));
+ dest->Put(SERVER_BOOKMARK_URL, src->Get(SERVER_BOOKMARK_URL));
+ dest->Put(SERVER_BOOKMARK_FAVICON, src->Get(SERVER_BOOKMARK_FAVICON));
+ dest->Put(SERVER_POSITION_IN_PARENT, src->Get(SERVER_POSITION_IN_PARENT));
+}
+
+void ClearServerData(syncable::MutableEntry* entry) {
+ entry->Put(SERVER_NAME, PSTR(""));
+ entry->Put(SERVER_NON_UNIQUE_NAME, PSTR(""));
+ entry->Put(SERVER_PARENT_ID, syncable::kNullId);
+ entry->Put(SERVER_MTIME, 0);
+ entry->Put(SERVER_CTIME, 0);
+ entry->Put(SERVER_VERSION, 0);
+ entry->Put(SERVER_IS_DIR, false);
+ entry->Put(SERVER_IS_DEL, false);
+ entry->Put(SERVER_IS_BOOKMARK_OBJECT, false);
+ entry->Put(IS_UNAPPLIED_UPDATE, false);
+ entry->Put(SERVER_BOOKMARK_URL, PSTR(""));
+ entry->Put(SERVER_BOOKMARK_FAVICON, Blob());
+ entry->Put(SERVER_POSITION_IN_PARENT, 0);
+}
+
+std::string SyncEntityDebugString(const sync_pb::SyncEntity& entry) {
+ return StringPrintf("id: %s, parent_id: %s, "
+ "version: %"PRId64"d, "
+ "mtime: %" PRId64"d (client: %" PRId64"d), "
+ "ctime: %" PRId64"d (client: %" PRId64"d), "
+ "name: %s, sync_timestamp: %" PRId64"d, "
+ "%s ",
+ entry.id_string().c_str(),
+ entry.parent_id_string().c_str(),
+ entry.version(),
+ entry.mtime(), ServerTimeToClientTime(entry.mtime()),
+ entry.ctime(), ServerTimeToClientTime(entry.ctime()),
+ entry.name().c_str(), entry.sync_timestamp(),
+ entry.deleted() ? "deleted, ":"");
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer.h b/chrome/browser/sync/engine/syncer.h
new file mode 100644
index 0000000..f546f20
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer.h
@@ -0,0 +1,234 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/client_command_channel.h"
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_event.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace syncable {
+class Directory;
+class DirectoryManager;
+class Entry;
+class Id;
+class MutableEntry;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class ModelSafeWorker;
+class ServerConnectionManager;
+class SyncProcessState;
+class SyncerSession;
+class URLFactory;
+struct HttpResponse;
+
+static const int kDefaultMaxCommitBatchSize = 25;
+
+enum SyncerStep {
+ SYNCER_BEGIN,
+ DOWNLOAD_UPDATES,
+ PROCESS_CLIENT_COMMAND,
+ VERIFY_UPDATES,
+ PROCESS_UPDATES,
+ APPLY_UPDATES,
+ BUILD_COMMIT_REQUEST,
+ POST_COMMIT_MESSAGE,
+ PROCESS_COMMIT_RESPONSE,
+ BUILD_AND_PROCESS_CONFLICT_SETS,
+ RESOLVE_CONFLICTS,
+ APPLY_UPDATES_TO_RESOLVE_CONFLICTS,
+ SYNCER_END
+};
+
+// A Syncer provides a control interface for driving the individual steps
+// of the sync cycle. Each cycle (hopefully) moves the client into closer
+// synchronization with the server. The individual steps are modeled
+// as SyncerCommands, and the ordering of the steps is expressed using
+// the SyncerStep enum.
+//
+// A Syncer instance expects to run on a dedicated thread. Calls
+// to SyncShare() may take an unbounded amount of time, as SyncerCommands
+// may block on network i/o, on lock contention, or on tasks posted to
+// other threads.
+class Syncer {
+ public:
+ typedef std::vector<int64> UnsyncedMetaHandles;
+ typedef void (*TestCallbackFunction)(syncable::Directory* dir);
+
+ // The constructor may be called from a thread that is not the Syncer's
+ // dedicated thread, to allow some flexibility in the setup.
+ Syncer(
+ syncable::DirectoryManager* dirman,
+ const PathString &account_name,
+ ServerConnectionManager* connection_manager,
+ ModelSafeWorker* model_safe_worker);
+
+ ~Syncer();
+
+ // Called by other threads to tell the syncer to stop what it's doing
+ // and return early from SyncShare, if possible.
+ bool ExitRequested() { return early_exit_requested_; }
+ void RequestEarlyExit() { early_exit_requested_ = true; }
+
+ // SyncShare(...) variants cause one sync cycle to occur. The return value
+ // indicates whether we should sync again.
+ // The zero-argument version of SyncShare is provided for unit tests.
+ // When |sync_process_state| is provided, it is used as the syncer state
+ // for the sync cycle. It is treated as an input/output parameter.
+ // When |first_step| and |last_step| are provided, this means to perform
+ // a partial sync cycle, stopping after |last_step| is performed.
+ bool SyncShare();
+ bool SyncShare(SyncProcessState *sync_process_state);
+ bool SyncShare(SyncerStep first_step, SyncerStep last_step);
+
+ // Limit the batch size of commit operations to a specified number of items.
+ void set_max_commit_batch_size(int x) { max_commit_batch_size_ = x; }
+
+ ConflictResolver* conflict_resolver() { return &resolver_; }
+
+ PathString account_name() { return account_name_; }
+
+ SyncerEventChannel* channel() const { return syncer_event_channel_.get(); }
+
+ ShutdownChannel* shutdown_channel() const { return shutdown_channel_.get(); }
+
+ ModelSafeWorker* model_safe_worker() { return model_safe_worker_; }
+
+ // Syncer will take ownership of this channel and it will be destroyed along
+ // with the Syncer instance.
+ void set_shutdown_channel(ShutdownChannel* channel) {
+ shutdown_channel_.reset(channel);
+ }
+
+ void set_command_channel(ClientCommandChannel* channel) {
+ command_channel_ = channel;
+ }
+
+ // Volatile reader for the source member of the syncer session object. The
+ // value is set to the SYNC_CYCLE_CONTINUATION value to signal that it has
+ // been read.
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE TestAndSetUpdatesSource() {
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE old_source =
+ updates_source_;
+ set_updates_source(sync_pb::GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION);
+ return old_source;
+ }
+
+ void set_updates_source(
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE source) {
+ updates_source_ = source;
+ }
+
+ bool notifications_enabled() const {
+ return notifications_enabled_;
+ }
+
+ void set_notifications_enabled(bool state) {
+ notifications_enabled_ = state;
+ }
+
+ private:
+ void RequestNudge(int milliseconds);
+
+ // Implements the PROCESS_CLIENT_COMMAND syncer step.
+ void ProcessClientCommand(SyncerSession *session);
+
+ void SyncShare(SyncerSession* session);
+ void SyncShare(SyncerSession* session,
+ SyncerStep first_step,
+ SyncerStep last_step);
+
+ PathString account_name_;
+ bool early_exit_requested_;
+
+ int32 max_commit_batch_size_;
+
+ ServerConnectionManager* connection_manager_;
+
+ ConflictResolver resolver_;
+ syncable::DirectoryManager* const dirman_;
+
+ // When we're over bandwidth quota, we don't update until past this time.
+ time_t silenced_until_;
+
+ scoped_ptr<SyncerEventChannel> syncer_event_channel_;
+ scoped_ptr<ShutdownChannel> shutdown_channel_;
+ ClientCommandChannel* command_channel_;
+
+ // A worker capable of processing work closures on a thread that is
+ // guaranteed to be safe for model modifications. This is created and owned
+ // by the SyncerThread that created us.
+ ModelSafeWorker* model_safe_worker_;
+
+ // The source of the last nudge.
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE updates_source_;
+
+ // True only if the notification channel is authorized and open.
+ bool notifications_enabled_;
+
+ // A callback hook used in unittests to simulate changes between conflict set
+ // building and conflict resolution.
+ TestCallbackFunction pre_conflict_resolution_function_;
+
+ FRIEND_TEST(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted3);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingAndNewParent);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingAndNewParentAndChild);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingCounterexample);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingWithNesting);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingWithNewItems);
+ FRIEND_TEST(SyncerTest, TestGetUnsyncedAndSimpleCommit);
+
+ DISALLOW_COPY_AND_ASSIGN(Syncer);
+};
+
+// Inline utility functions.
+
+// Given iterator ranges from two collections sorted according to a
+// common strict weak ordering, return true if the two ranges contain
+// any common items, and false if they do not.
+// This function is in this header so that it can be tested.
+template <class Iterator1, class Iterator2>
+bool SortedCollectionsIntersect(Iterator1 begin1, Iterator1 end1,
+ Iterator2 begin2, Iterator2 end2) {
+ Iterator1 i1 = begin1;
+ Iterator2 i2 = begin2;
+ while (i1 != end1 && i2 != end2) {
+ if (*i1 == *i2)
+ return true;
+ if (*i1 > *i2)
+ ++i2;
+ else
+ ++i1;
+ }
+ return false;
+}
+// Utility function declarations.
+void SplitServerInformationIntoNewEntry(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest);
+void ClearServerData(syncable::MutableEntry* entry);
+
+// Get update contents as a string. Intended for logging, and intended
+// to have a smaller footprint than the protobuf's built-in pretty printer.
+std::string SyncEntityDebugString(const sync_pb::SyncEntity& entry);
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_H_
diff --git a/chrome/browser/sync/engine/syncer_command.cc b/chrome/browser/sync/engine/syncer_command.cc
new file mode 100644
index 0000000..9f05b64
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_command.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+SyncerCommand::SyncerCommand() {}
+SyncerCommand::~SyncerCommand() {}
+
+void SyncerCommand::Execute(SyncerSession *session) {
+ ExecuteImpl(session);
+ SendNotifications(session);
+}
+
+void SyncerCommand::SendNotifications(SyncerSession *session) {
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+
+ SyncerStatus status(session);
+
+ if (status.IsDirty()) {
+ SyncerEvent event = { SyncerEvent::STATUS_CHANGED};
+ event.last_session = session;
+ session->syncer_event_channel()->NotifyListeners(event);
+ if (status.over_quota()) {
+ SyncerEvent quota_event = {SyncerEvent::OVER_QUOTA};
+ quota_event.last_session = session;
+ session->syncer_event_channel()->NotifyListeners(quota_event);
+ }
+ status.SetClean();
+ }
+ if (status.IsAuthDirty()) {
+ ServerConnectionEvent event;
+ event.what_happened = ServerConnectionEvent::STATUS_CHANGED;
+ event.server_reachable = true;
+ event.connection_code = HttpResponse::SYNC_AUTH_ERROR;
+ session->connection_manager()->channel()->NotifyListeners(event);
+ status.SetAuthClean();
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_command.h b/chrome/browser/sync/engine/syncer_command.h
new file mode 100644
index 0000000..3fcff7d
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_command.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_COMMAND_H_
+
+#include "base/basictypes.h"
+
+namespace browser_sync {
+
+class SyncerSession;
+
+// Implementation of a simple command pattern intended to be driven by the
+// Syncer. SyncerCommand is abstract and all subclasses must
+// implement ExecuteImpl(). This is done so that chunks of syncer operation
+// can be unit tested.
+//
+// Example Usage:
+//
+// SyncerSession session = ...;
+// SyncerCommand *cmd = SomeCommandFactory.createCommand(...);
+// cmd->Execute(session);
+// delete cmd;
+//
+
+class SyncerCommand {
+ public:
+ SyncerCommand();
+ virtual ~SyncerCommand();
+
+ // Execute dispatches to a derived class's ExecuteImpl.
+ void Execute(SyncerSession *session);
+
+ // ExecuteImpl is where derived classes actually do work.
+ virtual void ExecuteImpl(SyncerSession *session) = 0;
+ private:
+ void SendNotifications(SyncerSession *session);
+ DISALLOW_COPY_AND_ASSIGN(SyncerCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_COMMAND_H_
diff --git a/chrome/browser/sync/engine/syncer_end_command.cc b/chrome/browser/sync/engine/syncer_end_command.cc
new file mode 100644
index 0000000..f25cec8
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_end_command.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#include "chrome/browser/sync/engine/syncer_end_command.h"
+
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+
+namespace browser_sync {
+
+SyncerEndCommand::SyncerEndCommand() {}
+SyncerEndCommand::~SyncerEndCommand() {}
+
+void SyncerEndCommand::ExecuteImpl(SyncerSession* session) {
+ ConflictResolutionView conflict_view(session);
+ conflict_view.increment_num_sync_cycles();
+ SyncerStatus status(session);
+ status.set_syncing(false);
+
+ if (!session->ShouldSyncAgain()) {
+ // This might be the first time we've fully completed a sync cycle.
+ DCHECK(session->got_zero_updates());
+
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+
+ // This gets persisted to the directory's backing store.
+ dir->set_initial_sync_ended(true);
+ }
+
+ SyncerEvent event = { SyncerEvent::SYNC_CYCLE_ENDED };
+ event.last_session = session;
+ session->syncer_event_channel()->NotifyListeners(event);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_end_command.h b/chrome/browser/sync/engine/syncer_end_command.h
new file mode 100644
index 0000000..904bac4
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_end_command.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_END_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_END_COMMAND_H_
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_command.h"
+
+namespace browser_sync {
+
+class SyncerSession;
+
+// A syncer command for wrapping up a sync cycle.
+//
+// Preconditions - syncing is complete
+//
+// Postconditions - The UI has been told that we're done syncing
+
+class SyncerEndCommand : public SyncerCommand {
+ public:
+ SyncerEndCommand();
+ virtual ~SyncerEndCommand();
+
+ virtual void ExecuteImpl(SyncerSession* session);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SyncerEndCommand);
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_END_COMMAND_H_
diff --git a/chrome/browser/sync/engine/syncer_proto_util.cc b/chrome/browser/sync/engine/syncer_proto_util.cc
new file mode 100644
index 0000000..38ee50d
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_proto_util.cc
@@ -0,0 +1,276 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable-inl.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+using std::string;
+using std::stringstream;
+using syncable::BASE_VERSION;
+using syncable::CTIME;
+using syncable::ID;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNSYNCED;
+using syncable::MTIME;
+using syncable::PARENT_ID;
+using syncable::ScopedDirLookup;
+using syncable::SyncName;
+
+namespace browser_sync {
+
+namespace {
+
+// Time to backoff syncing after receiving a throttled response.
+static const int kSyncDelayAfterThrottled = 2 * 60 * 60; // 2 hours
+
+// Verifies the store birthday, alerting/resetting as appropriate if there's a
+// mismatch.
+bool VerifyResponseBirthday(const ScopedDirLookup& dir,
+ const ClientToServerResponse* response) {
+ // Process store birthday.
+ if (!response->has_store_birthday())
+ return true;
+ string birthday = dir->store_birthday();
+ if (response->store_birthday() == birthday)
+ return true;
+ LOG(INFO) << "New store birthday: " << response->store_birthday();
+ if (!birthday.empty()) {
+ LOG(ERROR) << "Birthday changed, showing syncer stuck";
+ return false;
+ }
+ dir->set_store_birthday(response->store_birthday());
+ return true;
+}
+
+void LogResponseProfilingData(const ClientToServerResponse& response) {
+ if (response.has_profiling_data()) {
+ stringstream response_trace;
+ response_trace << "Server response trace:";
+
+ if (response.profiling_data().has_user_lookup_time()) {
+ response_trace << " " << "user lookup: " <<
+ response.profiling_data().user_lookup_time() << "ms";
+ }
+
+ if (response.profiling_data().has_meta_data_write_time()) {
+ response_trace << " " << "meta write: " <<
+ response.profiling_data().meta_data_write_time() << "ms";
+ }
+
+ if (response.profiling_data().has_meta_data_read_time()) {
+ response_trace << " " << "meta read: " <<
+ response.profiling_data().meta_data_read_time() << "ms";
+ }
+
+ if (response.profiling_data().has_file_data_write_time()) {
+ response_trace << " " << "file write: " <<
+ response.profiling_data().file_data_write_time() << "ms";
+ }
+
+ if (response.profiling_data().has_file_data_read_time()) {
+ response_trace << " " << "file read: " <<
+ response.profiling_data().file_data_read_time() << "ms";
+ }
+
+ if (response.profiling_data().has_total_request_time()) {
+ response_trace << " " << "total time: " <<
+ response.profiling_data().total_request_time() << "ms";
+ }
+ LOG(INFO) << response_trace.str();
+ }
+}
+
+} // namespace
+
+// static
+bool SyncerProtoUtil::PostClientToServerMessage(
+ ClientToServerMessage* msg,
+ ClientToServerResponse* response,
+ SyncerSession *session) {
+
+ bool rv = false;
+ string tx, rx;
+ CHECK(response);
+
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good())
+ return false;
+ string birthday = dir->store_birthday();
+ if (!birthday.empty()) {
+ msg->set_store_birthday(birthday);
+ } else {
+ LOG(INFO) << "no birthday set";
+ }
+
+ msg->SerializeToString(&tx);
+ HttpResponse http_response;
+ ServerConnectionManager::PostBufferParams params = {
+ tx, &rx, &http_response
+ };
+
+ if (!session->connection_manager()->PostBufferWithCachedAuth(&params)) {
+ LOG(WARNING) << "Error posting from syncer:" << http_response;
+ } else {
+ rv = response->ParseFromString(rx);
+ }
+ SyncerStatus status(session);
+ if (rv) {
+ if (!VerifyResponseBirthday(dir, response)) {
+ // TODO(ncarter): Add a unit test for the case where the syncer
+ // becomes stuck due to a bad birthday.
+ status.set_syncer_stuck(true);
+ return false;
+ }
+
+ // We use an exponential moving average to determine the rate of errors.
+ // It's more reactive to recent situations and uses no extra storage.
+ status.ForgetOldError();
+ // If we're decaying send out an update.
+ status.CheckErrorRateTooHigh();
+
+ switch (response->error_code()) {
+ case ClientToServerResponse::SUCCESS:
+ if (!response->has_store_birthday() && birthday.empty()) {
+ LOG(ERROR) <<
+ "Server didn't provide birthday in proto buffer response.";
+ rv = false;
+ }
+ LogResponseProfilingData(*response);
+ break;
+ case ClientToServerResponse::USER_NOT_ACTIVATED:
+ case ClientToServerResponse::AUTH_INVALID:
+ case ClientToServerResponse::ACCESS_DENIED:
+ LOG(INFO) << "Authentication expired, re-requesting";
+ LOG(INFO) << "Not implemented in syncer yet!!!";
+ status.AuthFailed();
+ rv = false;
+ break;
+ case ClientToServerResponse::NOT_MY_BIRTHDAY:
+ LOG(WARNING) << "Not my birthday return.";
+ rv = false;
+ break;
+ case ClientToServerResponse::THROTTLED:
+ LOG(WARNING) << "Client silenced by server.";
+ session->set_silenced_until(time(0) + kSyncDelayAfterThrottled);
+ rv = false;
+ break;
+ }
+
+ } else if (session->connection_manager()->IsServerReachable()) {
+ status.TallyNewError();
+ }
+ return rv;
+}
+
+// static
+bool SyncerProtoUtil::Compare(const syncable::Entry& local_entry,
+ const SyncEntity& server_entry) {
+ SyncName name = NameFromSyncEntity(server_entry);
+
+ CHECK(local_entry.Get(ID) == server_entry.id()) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+ CHECK(server_entry.version() == local_entry.Get(BASE_VERSION)) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+ CHECK(!local_entry.Get(IS_UNSYNCED)) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+
+ if (local_entry.Get(IS_DEL) && server_entry.deleted())
+ return true;
+ if (!ClientAndServerTimeMatch(local_entry.Get(CTIME), server_entry.ctime())) {
+ LOG(WARNING) << "ctime mismatch";
+ return false;
+ }
+
+ // These checks are somewhat prolix, but they're easier to debug than
+ // a big boolean statement.
+ SyncName client_name = local_entry.GetName();
+ if (client_name != name) {
+ LOG(WARNING) << "Client name mismatch";
+ return false;
+ }
+ if (local_entry.Get(PARENT_ID) != server_entry.parent_id()) {
+ LOG(WARNING) << "Parent ID mismatch";
+ return false;
+ }
+ if (local_entry.Get(IS_DIR) != server_entry.IsFolder()) {
+ LOG(WARNING) << "Dir field mismatch";
+ return false;
+ }
+ if (local_entry.Get(IS_DEL) != server_entry.deleted()) {
+ LOG(WARNING) << "Deletion mismatch";
+ return false;
+ }
+ if (!local_entry.Get(IS_DIR) &&
+ !ClientAndServerTimeMatch(local_entry.Get(MTIME),
+ server_entry.mtime())) {
+ LOG(WARNING) << "mtime mismatch";
+ return false;
+ }
+
+ return true;
+}
+
+// static
+void SyncerProtoUtil::CopyProtoBytesIntoBlob(const std::string& proto_bytes,
+ syncable::Blob* blob) {
+ syncable::Blob proto_blob(proto_bytes.begin(), proto_bytes.end());
+ blob->swap(proto_blob);
+}
+
+// static
+bool SyncerProtoUtil::ProtoBytesEqualsBlob(const std::string& proto_bytes,
+ const syncable::Blob& blob) {
+ if (proto_bytes.size() != blob.size())
+ return false;
+ return std::equal(proto_bytes.begin(), proto_bytes.end(), blob.begin());
+}
+
+// static
+void SyncerProtoUtil::CopyBlobIntoProtoBytes(const syncable::Blob& blob,
+ std::string* proto_bytes) {
+ std::string blob_string(blob.begin(), blob.end());
+ proto_bytes->swap(blob_string);
+}
+
+// static
+syncable::SyncName SyncerProtoUtil::NameFromSyncEntity(
+ const SyncEntity &entry) {
+ SyncName result(PSTR(""));
+
+ AppendUTF8ToPathString(entry.name(), &result.value());
+ if (entry.has_non_unique_name()) {
+ AppendUTF8ToPathString(entry.non_unique_name(),
+ &result.non_unique_value());
+ } else {
+ result.non_unique_value() = result.value();
+ }
+ return result;
+}
+
+// static
+syncable::SyncName SyncerProtoUtil::NameFromCommitEntryResponse(
+ const CommitResponse_EntryResponse& entry) {
+ SyncName result(PSTR(""));
+
+ AppendUTF8ToPathString(entry.name(), &result.value());
+ if (entry.has_non_unique_name()) {
+ AppendUTF8ToPathString(entry.non_unique_name(),
+ &result.non_unique_value());
+ } else {
+ result.non_unique_value() = result.value();
+ }
+ return result;
+}
+
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_proto_util.h b/chrome/browser/sync/engine/syncer_proto_util.h
new file mode 100644
index 0000000..ecee903
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_proto_util.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
+
+#include <string>
+
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/sync_types.h"
+#include "chrome/browser/sync/syncable/blob.h"
+
+namespace syncable {
+class Entry;
+class ScopedDirLookup;
+class SyncName;
+} // namespace syncable
+
+namespace sync_pb {
+class ClientToServerResponse;
+} // namespace sync_pb
+
+namespace browser_sync {
+
+class ClientToServerMessage;
+class SyncerSession;
+class SyncEntity;
+class CommitResponse_EntryResponse;
+
+class SyncerProtoUtil {
+ public:
+ // Posts the given message and fills the buffer with the returned value.
+ // Returns true on success. Also handles store birthday verification:
+ // session->status()->syncer_stuck_ is set true if the birthday is
+ // incorrect. A false value will always be returned if birthday is bad.
+ static bool PostClientToServerMessage(ClientToServerMessage* msg,
+ sync_pb::ClientToServerResponse* response, SyncerSession *session);
+
+ // Compares a syncable Entry to SyncEntity, returns true iff
+ // the data is identical.
+ //
+ // TODO(sync): The places where this function is used are arguable big
+ // causes of the fragility, because there's a tendency to freak out
+ // the moment the local and server values diverge. However, this almost
+ // always indicates a sync bug somewhere earlier in the sync cycle.
+ static bool Compare(const syncable::Entry& local_entry,
+ const SyncEntity& server_entry);
+
+ // Utility methods for converting between syncable::Blobs and protobuf
+ // byte fields.
+ static void CopyProtoBytesIntoBlob(const std::string& proto_bytes,
+ syncable::Blob* blob);
+ static bool ProtoBytesEqualsBlob(const std::string& proto_bytes,
+ const syncable::Blob& blob);
+ static void CopyBlobIntoProtoBytes(const syncable::Blob& blob,
+ std::string* proto_bytes);
+
+ // Extract the name fields from a sync entity.
+ static syncable::SyncName NameFromSyncEntity(
+ const SyncEntity& entry);
+
+ // Extract the name fields from a commit entry response.
+ static syncable::SyncName NameFromCommitEntryResponse(
+ const CommitResponse_EntryResponse& entry);
+
+ private:
+ SyncerProtoUtil() {}
+ DISALLOW_COPY_AND_ASSIGN(SyncerProtoUtil);
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
diff --git a/chrome/browser/sync/engine/syncer_proto_util_unittest.cc b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
new file mode 100644
index 0000000..c11a4ca
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
@@ -0,0 +1,119 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/blob.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using syncable::Blob;
+using syncable::SyncName;
+
+namespace browser_sync {
+
+TEST(SyncerProtoUtil, TestBlobToProtocolBufferBytesUtilityFunctions) {
+ unsigned char test_data1[] = {1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 4, 2, 9};
+ unsigned char test_data2[] = {1, 99, 3, 4, 5, 6, 7, 8, 0, 1, 4, 2, 9};
+ unsigned char test_data3[] = {99, 2, 3, 4, 5, 6, 7, 8};
+
+ syncable::Blob test_blob1, test_blob2, test_blob3;
+ for (int i = 0; i < arraysize(test_data1); ++i)
+ test_blob1.push_back(test_data1[i]);
+ for (int i = 0; i < arraysize(test_data2); ++i)
+ test_blob2.push_back(test_data2[i]);
+ for (int i = 0; i < arraysize(test_data3); ++i)
+ test_blob3.push_back(test_data3[i]);
+
+ string test_message1(reinterpret_cast<char*>(test_data1),
+ arraysize(test_data1));
+ string test_message2(reinterpret_cast<char*>(test_data2),
+ arraysize(test_data2));
+ string test_message3(reinterpret_cast<char*>(test_data3),
+ arraysize(test_data3));
+
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob1));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob2));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob3));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob1));
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob2));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob3));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob1));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob2));
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob3));
+
+ Blob blob1_copy;
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ blob1_copy));
+ SyncerProtoUtil::CopyProtoBytesIntoBlob(test_message1, &blob1_copy);
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ blob1_copy));
+
+ std::string message2_copy;
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(message2_copy,
+ test_blob2));
+ SyncerProtoUtil::CopyBlobIntoProtoBytes(test_blob2, &message2_copy);
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(message2_copy,
+ test_blob2));
+}
+
+// Tests NameFromSyncEntity and NameFromCommitEntryResponse when only the
+// name field is provided.
+TEST(SyncerProtoUtil, NameExtractionOneName) {
+ SyncEntity one_name_entity;
+ CommitResponse_EntryResponse one_name_response;
+
+ PathString one_name_string(PSTR("Eggheadednesses"));
+ one_name_entity.set_name("Eggheadednesses");
+ one_name_response.set_name("Eggheadednesses");
+
+ SyncName name_a = SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
+ EXPECT_EQ(one_name_string, name_a.value());
+ EXPECT_EQ(one_name_string, name_a.non_unique_value());
+
+ SyncName name_b =
+ SyncerProtoUtil::NameFromCommitEntryResponse(one_name_response);
+ EXPECT_EQ(one_name_string, name_b.value());
+ EXPECT_EQ(one_name_string, name_b.non_unique_value());
+
+ EXPECT_TRUE(name_a == name_b);
+}
+
+// Tests NameFromSyncEntity and NameFromCommitEntryResponse when both the
+// name field and the non_unique_name fields are provided.
+TEST(SyncerProtoUtil, NameExtractionTwoNames) {
+ SyncEntity two_name_entity;
+ CommitResponse_EntryResponse two_name_response;
+
+ PathString two_name_string_unique(PSTR("Oxyphenbutazone"));
+ two_name_entity.set_name("Oxyphenbutazone");
+ two_name_response.set_name("Oxyphenbutazone");
+ PathString two_name_string(PSTR("Neuroanatomists"));
+ two_name_entity.set_non_unique_name("Neuroanatomists");
+ two_name_response.set_non_unique_name("Neuroanatomists");
+
+ SyncName name_a = SyncerProtoUtil::NameFromSyncEntity(two_name_entity);
+ EXPECT_EQ(two_name_string_unique, name_a.value());
+ EXPECT_EQ(two_name_string, name_a.non_unique_value());
+
+ SyncName name_b =
+ SyncerProtoUtil::NameFromCommitEntryResponse(two_name_response);
+ EXPECT_EQ(two_name_string_unique, name_b.value());
+ EXPECT_EQ(two_name_string, name_b.non_unique_value());
+
+ EXPECT_TRUE(name_a == name_b);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_session.h b/chrome/browser/sync/engine/syncer_session.h
new file mode 100644
index 0000000..e90930f
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_session.h
@@ -0,0 +1,364 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// SyncerSession holds the entire state of a single sync cycle;
+// GetUpdates, Commit, and Conflict Resolution. After said cycle, the
+// Session may contain items that were unable to be processed because of
+// errors.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_SESSION_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_SESSION_H_
+
+#include <utility>
+#include <vector>
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/sync_cycle_state.h"
+#include "chrome/browser/sync/engine/sync_process_state.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace browser_sync {
+
+class ConflictResolver;
+class ModelSafeWorker;
+class ServerConnectionManager;
+class SyncerStatus;
+struct SyncerEvent;
+
+class SyncerSession {
+ friend class ConflictResolutionView;
+ friend class SyncerStatus;
+ public:
+ // A utility to set the session's write transaction member,
+ // and later clear it when it the utility falls out of scope.
+ class ScopedSetWriteTransaction {
+ public:
+ ScopedSetWriteTransaction(SyncerSession* session,
+ syncable::WriteTransaction* trans)
+ : session_(session) {
+ session_->set_write_transaction(trans);
+ }
+ ~ScopedSetWriteTransaction() {
+ session_->ClearWriteTransaction();
+ }
+ private:
+ SyncerSession* session_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedSetWriteTransaction);
+ };
+
+ SyncerSession(SyncCycleState* cycle_state, SyncProcessState* process_state)
+ : sync_process_state_(process_state),
+ sync_cycle_state_(cycle_state),
+ source_(sync_pb::GetUpdatesCallerInfo::UNKNOWN),
+ notifications_enabled_(false) {
+ DCHECK(NULL != process_state);
+ DCHECK(NULL != cycle_state);
+ }
+
+ // Perhaps this should dictate the next step. (ie, don't do apply if you
+ // didn't get any from download). or put it in the while loop.
+ void set_update_response(const ClientToServerResponse& update_response) {
+ sync_cycle_state_->set_update_response(update_response);
+ }
+
+ const ClientToServerResponse& update_response() const {
+ return sync_cycle_state_->update_response();
+ }
+
+ void set_commit_response(const ClientToServerResponse& commit_response) {
+ sync_cycle_state_->set_commit_response(commit_response);
+ }
+
+ const ClientToServerResponse& commit_response() const {
+ return sync_cycle_state_->commit_response();
+ }
+
+ void AddVerifyResult(const VerifyResult& verify_result,
+ const sync_pb::SyncEntity& entity) {
+ sync_cycle_state_->AddVerifyResult(verify_result, entity);
+ }
+
+ bool HasVerifiedUpdates() const {
+ return sync_cycle_state_->HasVerifiedUpdates();
+ }
+
+ void AddAppliedUpdate(const UpdateAttemptResponse& response,
+ const syncable::Id& id) {
+ sync_cycle_state_->AddAppliedUpdate(response, id);
+ }
+
+ bool HasAppliedUpdates() const {
+ return sync_cycle_state_->HasAppliedUpdates();
+ }
+
+ PathString account_name() const {
+ return sync_process_state_->account_name();
+ }
+
+ syncable::DirectoryManager* dirman() const {
+ return sync_process_state_->dirman();
+ }
+
+ ServerConnectionManager* connection_manager() const {
+ return sync_process_state_->connection_manager();
+ }
+
+ ConflictResolver* resolver() const {
+ return sync_process_state_->resolver();
+ }
+
+ SyncerEventChannel* syncer_event_channel() const {
+ return sync_process_state_->syncer_event_channel();
+ }
+
+ int conflicting_update_count() const {
+ return sync_process_state_->conflicting_updates();
+ }
+
+ time_t silenced_until() const {
+ return sync_process_state_->silenced_until_;
+ }
+
+ void set_silenced_until(time_t silenced_until) const {
+ sync_process_state_->silenced_until_ = silenced_until;
+ }
+
+ const std::vector<int64>& unsynced_handles() const {
+ return sync_cycle_state_->unsynced_handles();
+ }
+
+ void set_unsynced_handles(const std::vector<int64>& unsynced_handles) {
+ sync_cycle_state_->set_unsynced_handles(unsynced_handles);
+ }
+
+ int64 unsynced_count() const { return sync_cycle_state_->unsynced_count(); }
+
+ const std::vector<syncable::Id>& commit_ids() const {
+ return sync_cycle_state_->commit_ids();
+ }
+
+ void set_commit_ids(const std::vector<syncable::Id>& commit_ids) {
+ sync_cycle_state_->set_commit_ids(commit_ids);
+ }
+
+ bool commit_ids_empty() const {
+ return sync_cycle_state_->commit_ids_empty();
+ }
+
+ syncable::WriteTransaction* write_transaction() const {
+ return sync_cycle_state_->write_transaction();
+ }
+
+ bool has_open_write_transaction() const {
+ return sync_cycle_state_->has_open_write_transaction();
+ }
+
+ ClientToServerMessage* commit_message() const {
+ return sync_cycle_state_->commit_message();
+ }
+
+ void set_commit_message(const ClientToServerMessage& message) {
+ sync_cycle_state_->set_commit_message(message);
+ }
+
+ bool HasRemainingItemsToCommit() const {
+ return commit_ids().size() < unsynced_handles().size();
+ }
+
+ void AddCommitConflict(const syncable::Id& the_id) {
+ sync_process_state_->AddConflictingItem(the_id);
+ }
+
+ void AddBlockedItem(const syncable::Id& the_id) {
+ sync_process_state_->AddBlockedItem(the_id);
+ }
+
+ void EraseCommitConflict(const syncable::Id& the_id) {
+ sync_process_state_->EraseConflictingItem(the_id);
+ }
+
+ void EraseBlockedItem(const syncable::Id& the_id) {
+ sync_process_state_->EraseBlockedItem(the_id);
+ }
+
+ // Returns true if at least one update application failed due to
+ // a conflict during this sync cycle.
+ bool HasConflictingUpdates() const {
+ std::vector<AppliedUpdate>::const_iterator it;
+ for (it = sync_cycle_state_->AppliedUpdatesBegin();
+ it < sync_cycle_state_->AppliedUpdatesEnd();
+ ++it) {
+ if (it->first == CONFLICT) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ std::vector<VerifiedUpdate>::iterator VerifiedUpdatesBegin() const {
+ return sync_cycle_state_->VerifiedUpdatesBegin();
+ }
+
+ std::vector<VerifiedUpdate>::iterator VerifiedUpdatesEnd() const {
+ return sync_cycle_state_->VerifiedUpdatesEnd();
+ }
+
+ // Returns the number of updates received from the sync server.
+ int64 CountUpdates() const {
+ if (update_response().has_get_updates()) {
+ return update_response().get_updates().entries().size();
+ } else {
+ return 0;
+ }
+ }
+
+ bool got_zero_updates() const {
+ return CountUpdates() == 0;
+ }
+
+ void DumpSessionInfo() const {
+ LOG(INFO) << "Dumping session info";
+ if (update_response().has_get_updates()) {
+ LOG(INFO) << update_response().get_updates().entries().size()
+ << " updates downloaded by last get_updates";
+ } else {
+ LOG(INFO) << "No update response found";
+ }
+ LOG(INFO) << sync_cycle_state_->VerifiedUpdatesSize()
+ << " updates verified";
+ LOG(INFO) << sync_cycle_state_->AppliedUpdatesSize() << " updates applied";
+ LOG(INFO) << count_blocked_updates() << " updates blocked by open entry";
+ LOG(INFO) << commit_ids().size() << " items to commit";
+ LOG(INFO) << unsynced_count() << " unsynced items";
+ }
+
+ int64 count_blocked_updates() const {
+ std::vector<AppliedUpdate>::const_iterator it;
+ int64 count = 0;
+ for (it = sync_cycle_state_->AppliedUpdatesBegin();
+ it < sync_cycle_state_->AppliedUpdatesEnd();
+ ++it) {
+ if (it->first == BLOCKED) {
+ ++count;
+ }
+ }
+ return count;
+ }
+
+ void set_conflict_sets_built(const bool b) {
+ sync_cycle_state_->set_conflict_sets_built(b);
+ }
+
+ bool conflict_sets_built() const {
+ return sync_cycle_state_->conflict_sets_built();
+ }
+
+ void set_conflicts_resolved(const bool b) {
+ sync_cycle_state_->set_conflicts_resolved(b);
+ }
+
+ bool conflicts_resolved() const {
+ return sync_cycle_state_->conflicts_resolved();
+ }
+
+ ModelSafeWorker* model_safe_worker() const {
+ return sync_process_state_->model_safe_worker();
+ }
+
+ void set_items_committed(const bool b) {
+ sync_cycle_state_->set_items_committed(b);
+ }
+
+ void set_item_committed() {
+ sync_cycle_state_->set_item_committed();
+ }
+
+ bool items_committed() const {
+ return sync_cycle_state_->items_committed();
+ }
+
+ void set_over_quota(const bool b) {
+ sync_cycle_state_->set_over_quota(b);
+ }
+
+ // Volitile reader for the source member of the syncer session object. The
+ // value is set to the SYNC_CYCLE_CONTINUATION value to signal that it has
+ // been read.
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE TestAndSetSource() {
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE old_source =
+ source_;
+ set_source(sync_pb::GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION);
+ return old_source;
+ }
+
+ void set_source(sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE source) {
+ source_ = source;
+ }
+
+ bool notifications_enabled() const {
+ return notifications_enabled_;
+ }
+
+ void set_notifications_enabled(const bool state) {
+ notifications_enabled_ = state;
+ }
+
+ void set_timestamp_dirty() {
+ sync_cycle_state_->set_timestamp_dirty();
+ }
+
+ bool timestamp_dirty() const {
+ return sync_cycle_state_->is_timestamp_dirty();
+ }
+
+ // TODO(chron): Unit test for this method.
+ // returns true iff this session contains data that should go through
+ // the sync engine again.
+ bool ShouldSyncAgain() const {
+ return (HasRemainingItemsToCommit() &&
+ sync_process_state_->successful_commits() > 0) ||
+ conflict_sets_built() ||
+ conflicts_resolved() ||
+ // Or, we have conflicting updates, but we're making progress on
+ // resolving them...
+ !got_zero_updates() ||
+ timestamp_dirty();
+ }
+
+ private:
+ // The write transaction must be destructed by the caller of this function.
+ // Here, we just clear the reference.
+ void set_write_transaction(syncable::WriteTransaction* write_transaction) {
+ sync_cycle_state_->set_write_transaction(write_transaction);
+ }
+
+ // sets the write transaction to null, but doesn't free the memory.
+ void ClearWriteTransaction() {
+ sync_cycle_state_->ClearWriteTransaction();
+ }
+
+ SyncProcessState* sync_process_state_;
+ SyncCycleState* sync_cycle_state_;
+
+ // The source for initiating this syncer session.
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE source_;
+
+ // True if notifications are enabled when this session was created.
+ bool notifications_enabled_;
+
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingCounterexample);
+ DISALLOW_COPY_AND_ASSIGN(SyncerSession);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_SESSION_H_
diff --git a/chrome/browser/sync/engine/syncer_status.cc b/chrome/browser/sync/engine/syncer_status.cc
new file mode 100644
index 0000000..f356bcd
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_status.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+
+namespace browser_sync {
+SyncerStatus::SyncerStatus(SyncerSession* s) {
+ sync_process_state_ = s->sync_process_state_;
+ sync_cycle_state_ = s->sync_cycle_state_;
+}
+SyncerStatus::~SyncerStatus() {}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_status.h b/chrome/browser/sync/engine/syncer_status.h
new file mode 100644
index 0000000..4f70ecd
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_status.h
@@ -0,0 +1,255 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// TODO(sync): We eventually want to fundamentally change how we represent
+// status and inform the UI about the ways in which our status has changed.
+// Right now, we're just trying to keep the various command classes from
+// having to worry about this class.
+//
+// The UI will request that we fill this struct so it can show the current
+// sync state.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_STATUS_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_STATUS_H_
+
+#include "base/atomicops.h"
+#include "base/port.h"
+#include "chrome/browser/sync/engine/sync_cycle_state.h"
+#include "chrome/browser/sync/engine/sync_process_state.h"
+
+namespace browser_sync {
+class SyncerSession;
+
+class SyncerStatus {
+ public:
+ explicit SyncerStatus(SyncCycleState* cycle_state, SyncProcessState* state)
+ : sync_process_state_(state),
+ sync_cycle_state_(cycle_state){}
+ explicit SyncerStatus(SyncerSession* s);
+ ~SyncerStatus();
+
+ bool invalid_store() const {
+ return sync_process_state_->invalid_store();
+ }
+
+ void set_invalid_store(const bool val) {
+ sync_process_state_->set_invalid_store(val);
+ }
+
+ bool syncer_stuck() const {
+ return sync_process_state_->syncer_stuck();
+ }
+
+ void set_syncer_stuck(const bool val) {
+ sync_process_state_->set_syncer_stuck(val);
+ }
+
+ bool syncing() const {
+ return sync_process_state_->syncing();
+ }
+
+ void set_syncing(const bool val) {
+ sync_process_state_->set_syncing(val);
+ }
+
+ bool IsShareUsable() const {
+ return sync_process_state_->IsShareUsable();
+ }
+
+ // During initial sync these two members can be used to
+ // measure sync progress.
+ int64 current_sync_timestamp() const {
+ return sync_process_state_->current_sync_timestamp();
+ }
+
+ void set_current_sync_timestamp(const int64 val) {
+ sync_process_state_->set_current_sync_timestamp(val);
+ }
+
+ int64 servers_latest_timestamp() const {
+ return sync_process_state_->servers_latest_timestamp();
+ }
+
+ void set_servers_latest_timestamp(const int64 val) {
+ sync_process_state_->set_servers_latest_timestamp(val);
+ }
+
+ int64 unsynced_count() const {
+ return sync_cycle_state_->unsynced_count();
+ }
+
+ int conflicting_updates() const {
+ return sync_process_state_->conflicting_updates();
+ }
+
+ int conflicting_commits() const {
+ return sync_process_state_->conflicting_commits();
+ }
+
+ void set_conflicting_commits(const int val) {
+ sync_process_state_->set_conflicting_commits(val);
+ }
+
+ int BlockedItemsSize() const {
+ return sync_process_state_->BlockedItemsSize();
+ }
+
+ // derive from sync_process_state blocked_item_ids_
+ int stalled_updates() const {
+ return sync_process_state_->BlockedItemsSize();
+ }
+
+ // in sync_process_state
+ int error_commits() const {
+ return sync_process_state_->error_commits();
+ }
+
+ void set_error_commits(const int val) {
+ sync_process_state_->set_error_commits(val);
+ }
+
+ // WEIRD COUNTER manipulation functions
+ int consecutive_problem_get_updates() const {
+ return sync_process_state_->consecutive_problem_get_updates();
+ }
+
+ void increment_consecutive_problem_get_updates() {
+ sync_process_state_->increment_consecutive_problem_get_updates();
+ }
+
+ void zero_consecutive_problem_get_updates() {
+ sync_process_state_->zero_consecutive_problem_get_updates();
+ }
+
+ int consecutive_problem_commits() const {
+ return sync_process_state_->consecutive_problem_commits();
+ }
+
+ void increment_consecutive_problem_commits() {
+ sync_process_state_->increment_consecutive_problem_commits();
+ }
+
+ void zero_consecutive_problem_commits() {
+ sync_process_state_->zero_consecutive_problem_commits();
+ }
+
+ int consecutive_transient_error_commits() const {
+ return sync_process_state_->consecutive_transient_error_commits();
+ }
+
+ void increment_consecutive_transient_error_commits_by(int value) {
+ sync_process_state_->increment_consecutive_transient_error_commits_by(
+ value);
+ }
+
+ void zero_consecutive_transient_error_commits() {
+ sync_process_state_->zero_consecutive_transient_error_commits();
+ }
+
+ int consecutive_errors() const {
+ return sync_process_state_->consecutive_errors();
+ }
+
+ void increment_consecutive_errors() {
+ increment_consecutive_errors_by(1);
+ }
+
+ void increment_consecutive_errors_by(int value) {
+ sync_process_state_->increment_consecutive_errors_by(value);
+ }
+
+ void zero_consecutive_errors() {
+ sync_process_state_->zero_consecutive_errors();
+ }
+
+ int successful_commits() const {
+ return sync_process_state_->successful_commits();
+ }
+
+ void increment_successful_commits() {
+ sync_process_state_->increment_successful_commits();
+ }
+
+ void zero_successful_commits() {
+ sync_process_state_->zero_successful_commits();
+ }
+ // end WEIRD COUNTER manipulation functions
+
+ bool over_quota() const { return sync_cycle_state_->over_quota(); }
+
+ // Methods for managing error rate tracking in sync_process_state
+ void TallyNewError() {
+ sync_process_state_->TallyNewError();
+ }
+
+ void TallyBigNewError() {
+ sync_process_state_->TallyBigNewError();
+ }
+
+ void ForgetOldError() {
+ sync_process_state_->ForgetOldError();
+ }
+
+ void CheckErrorRateTooHigh() {
+ sync_process_state_->CheckErrorRateTooHigh();
+ }
+
+ void AuthFailed() { sync_process_state_->AuthFailed(); }
+
+ void AuthSucceeded() { sync_process_state_->AuthSucceeded(); }
+
+ // Returns true if this object has been modified since last SetClean() call
+ bool IsDirty() const {
+ return sync_cycle_state_->IsDirty() || sync_process_state_->IsDirty();
+ }
+
+ // Returns true if auth status has been modified since last SetClean() call
+ bool IsAuthDirty() const { return sync_process_state_->IsAuthDirty(); }
+
+ // Call to tell this status object that its new state has been seen
+ void SetClean() {
+ sync_process_state_->SetClean();
+ sync_cycle_state_->SetClean();
+ }
+
+ // Call to tell this status object that its auth state has been seen
+ void SetAuthClean() { sync_process_state_->SetAuthClean(); }
+
+ void DumpStatusInfo() const {
+ LOG(INFO) << "Dumping status info: " << (IsDirty() ? "DIRTY" : "CLEAN");
+
+ LOG(INFO) << "invalid store = " << invalid_store();
+ LOG(INFO) << "syncer_stuck = " << syncer_stuck();
+ LOG(INFO) << "syncing = " << syncing();
+ LOG(INFO) << "over_quota = " << over_quota();
+
+ LOG(INFO) << "current_sync_timestamp = " << current_sync_timestamp();
+ LOG(INFO) << "servers_latest_timestamp = " << servers_latest_timestamp();
+ LOG(INFO) << "unsynced_count = " << unsynced_count();
+ LOG(INFO) << "conflicting_updates = " << conflicting_updates();
+ LOG(INFO) << "conflicting_commits = " << conflicting_commits();
+ LOG(INFO) << "BlockedItemsSize = " << BlockedItemsSize();
+ LOG(INFO) << "stalled_updates = " << stalled_updates();
+ LOG(INFO) << "error_commits = " << error_commits();
+
+ LOG(INFO) << "consecutive_problem_get_updates = "
+ << consecutive_problem_get_updates();
+ LOG(INFO) << "consecutive_problem_commits = "
+ << consecutive_problem_commits();
+ LOG(INFO) << "consecutive_transient_error_commits = "
+ << consecutive_transient_error_commits();
+ LOG(INFO) << "consecutive_errors = " << consecutive_errors();
+ LOG(INFO) << "successful_commits = " << successful_commits();
+ }
+
+ private:
+
+ SyncCycleState *sync_cycle_state_;
+ SyncProcessState *sync_process_state_;
+
+};
+} // namespace browser_sync
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_STATUS_H_
diff --git a/chrome/browser/sync/engine/syncer_thread.cc b/chrome/browser/sync/engine/syncer_thread.cc
new file mode 100644
index 0000000..e0832a7
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_thread.cc
@@ -0,0 +1,558 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_thread.h"
+
+#ifdef OS_MACOSX
+#include <CoreFoundation/CFNumber.h>
+#include <IOKit/IOTypes.h>
+#include <IOKit/IOKitLib.h>
+#endif
+
+#include <algorithm>
+#include <map>
+#include <queue>
+
+#include "chrome/browser/sync/engine/auth_watcher.h"
+#include "chrome/browser/sync/engine/model_safe_worker.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator_impl.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+
+using std::priority_queue;
+using std::min;
+
+static inline bool operator < (const timespec& a, const timespec& b) {
+ return a.tv_sec == b.tv_sec ? a.tv_nsec < b.tv_nsec : a.tv_sec < b.tv_sec;
+}
+
+namespace {
+
+// returns the amount of time since the user last interacted with
+// the computer, in milliseconds
+int UserIdleTime() {
+#ifdef OS_WINDOWS
+ LASTINPUTINFO last_input_info;
+ last_input_info.cbSize = sizeof(LASTINPUTINFO);
+
+ // get time in windows ticks since system start of last activity
+ BOOL b = ::GetLastInputInfo(&last_input_info);
+ if (b == TRUE)
+ return ::GetTickCount() - last_input_info.dwTime;
+#elif defined(OS_MACOSX)
+ // It would be great to do something like:
+ //
+ // return 1000 *
+ // CGEventSourceSecondsSinceLastEventType(
+ // kCGEventSourceStateCombinedSessionState,
+ // kCGAnyInputEventType);
+ //
+ // Unfortunately, CGEvent* lives in ApplicationServices, and we're a daemon
+ // and can't link that high up the food chain. Thus this mucking in IOKit.
+
+ io_service_t hid_service =
+ IOServiceGetMatchingService(kIOMasterPortDefault,
+ IOServiceMatching("IOHIDSystem"));
+ if (!hid_service) {
+ LOG(WARNING) << "Could not obtain IOHIDSystem";
+ return 0;
+ }
+
+ CFTypeRef object = IORegistryEntryCreateCFProperty(hid_service,
+ CFSTR("HIDIdleTime"),
+ kCFAllocatorDefault,
+ 0);
+ if (!object) {
+ LOG(WARNING) << "Could not get IOHIDSystem's HIDIdleTime property";
+ IOObjectRelease(hid_service);
+ return 0;
+ }
+
+ int64 idle_time; // in nanoseconds
+ Boolean success;
+ if (CFGetTypeID(object) == CFNumberGetTypeID()) {
+ success = CFNumberGetValue((CFNumberRef)object,
+ kCFNumberSInt64Type,
+ &idle_time);
+ } else {
+ LOG(WARNING) << "IOHIDSystem's HIDIdleTime property isn't a number!";
+ }
+
+ CFRelease(object);
+ IOObjectRelease(hid_service);
+
+ if (!success) {
+ LOG(WARNING) << "Could not get IOHIDSystem's HIDIdleTime property's value";
+ return 0;
+ } else {
+ return idle_time / 1000000; // nano to milli
+ }
+#else
+ static bool was_logged = false;
+ if (!was_logged) {
+ was_logged = true;
+ LOG(INFO) << "UserIdleTime unimplemented on this platform, "
+ "synchronization will not throttle when user idle";
+ }
+#endif
+
+ return 0;
+}
+
+} // namespace
+
+namespace browser_sync {
+
+bool SyncerThread::NudgeSyncer(int milliseconds_from_now, NudgeSource source) {
+ MutexLock lock(&mutex_);
+ if (syncer_ == NULL) {
+ return false;
+ }
+ NudgeSyncImpl(milliseconds_from_now, source);
+ return true;
+}
+
+void* RunSyncerThread(void* syncer_thread) {
+ return (reinterpret_cast<SyncerThread*>(syncer_thread))->ThreadMain();
+}
+
+SyncerThread::SyncerThread(
+ ClientCommandChannel* command_channel,
+ syncable::DirectoryManager* mgr,
+ ServerConnectionManager* connection_manager,
+ AllStatus* all_status,
+ ModelSafeWorker* model_safe_worker)
+ : dirman_(mgr), scm_(connection_manager),
+ syncer_(NULL), syncer_events_(NULL), thread_running_(false),
+ syncer_short_poll_interval_seconds_(kDefaultShortPollIntervalSeconds),
+ syncer_long_poll_interval_seconds_(kDefaultLongPollIntervalSeconds),
+ syncer_polling_interval_(kDefaultShortPollIntervalSeconds),
+ syncer_max_interval_(kDefaultMaxPollIntervalMs),
+ stop_syncer_thread_(false), connected_(false), conn_mgr_hookup_(NULL),
+ p2p_authenticated_(false), p2p_subscribed_(false),
+ allstatus_(all_status), talk_mediator_hookup_(NULL),
+ command_channel_(command_channel), directory_manager_hookup_(NULL),
+ model_safe_worker_(model_safe_worker),
+ client_command_hookup_(NULL), disable_idle_detection_(false) {
+
+ SyncerEvent shutdown = { SyncerEvent::SHUTDOWN_USE_WITH_CARE };
+ syncer_event_channel_.reset(new SyncerEventChannel(shutdown));
+
+ if (dirman_) {
+ directory_manager_hookup_.reset(NewEventListenerHookup(
+ dirman_->channel(), this, &SyncerThread::HandleDirectoryManagerEvent));
+ }
+
+ if (scm_) {
+ WatchConnectionManager(scm_);
+ }
+
+ if (command_channel_) {
+ WatchClientCommands(command_channel_);
+ }
+}
+
+SyncerThread::~SyncerThread() {
+ client_command_hookup_.reset();
+ conn_mgr_hookup_.reset();
+ syncer_event_channel_.reset();
+ directory_manager_hookup_.reset();
+ syncer_events_.reset();
+ delete syncer_;
+ talk_mediator_hookup_.reset();
+ CHECK(!thread_running_);
+}
+
+// Creates and starts a syncer thread.
+// Returns true if it creates a thread or if there's currently a thread
+// running and false otherwise.
+bool SyncerThread::Start() {
+ MutexLock lock(&mutex_);
+ if (thread_running_) {
+ return true;
+ }
+ thread_running_ =
+ (0 == pthread_create(&thread_, NULL, RunSyncerThread, this));
+ if (thread_running_) {
+ pthread_detach(thread_);
+ }
+ return thread_running_;
+}
+
+// Stop processing. A max wait of at least 2*server RTT time is recommended.
+// returns true if we stopped, false otherwise.
+bool SyncerThread::Stop(int max_wait) {
+ MutexLock lock(&mutex_);
+ if (!thread_running_)
+ return true;
+ stop_syncer_thread_ = true;
+ if (NULL != syncer_) {
+ // try to early exit the syncer
+ syncer_->RequestEarlyExit();
+ }
+ pthread_cond_broadcast(&changed_.condvar_);
+ timespec deadline = { time(NULL) + (max_wait / 1000), 0 };
+ do {
+ const int wait_result = max_wait < 0 ?
+ pthread_cond_wait(&changed_.condvar_, &mutex_.mutex_) :
+ pthread_cond_timedwait(&changed_.condvar_, &mutex_.mutex_,
+ &deadline);
+ if (ETIMEDOUT == wait_result) {
+ LOG(ERROR) << "SyncerThread::Stop timed out. Problems likely.";
+ return false;
+ }
+ } while (thread_running_);
+ return true;
+}
+
+void SyncerThread::WatchClientCommands(ClientCommandChannel* channel) {
+ PThreadScopedLock<PThreadMutex> lock(&mutex_);
+ client_command_hookup_.reset(NewEventListenerHookup(channel, this,
+ &SyncerThread::HandleClientCommand));
+}
+
+void SyncerThread::HandleClientCommand(ClientCommandChannel::EventType
+ event) {
+ if (!event) {
+ return;
+ }
+
+ // mutex not really necessary for these
+ if (event->has_set_sync_poll_interval()) {
+ syncer_short_poll_interval_seconds_ = event->set_sync_poll_interval();
+ }
+
+ if (event->has_set_sync_long_poll_interval()) {
+ syncer_long_poll_interval_seconds_ = event->set_sync_long_poll_interval();
+ }
+}
+
+void SyncerThread::ThreadMainLoop() {
+ // Use the short poll value by default.
+ int poll_seconds = syncer_short_poll_interval_seconds_;
+ int user_idle_milliseconds = 0;
+ timespec last_sync_time = { 0 };
+ bool initial_sync_for_thread = true;
+ bool continue_sync_cycle = false;
+
+ while (!stop_syncer_thread_) {
+ if (!connected_) {
+ LOG(INFO) << "Syncer thread waiting for connection.";
+ while (!connected_ && !stop_syncer_thread_)
+ pthread_cond_wait(&changed_.condvar_, &mutex_.mutex_);
+ LOG_IF(INFO, connected_) << "Syncer thread found connection.";
+ continue;
+ }
+
+ if (syncer_ == NULL) {
+ LOG(INFO) << "Syncer thread waiting for database initialization.";
+ while (syncer_ == NULL && !stop_syncer_thread_)
+ pthread_cond_wait(&changed_.condvar_, &mutex_.mutex_);
+ LOG_IF(INFO, !(syncer_ == NULL)) << "Syncer was found after DB started.";
+ continue;
+ }
+
+ timespec const next_poll = { last_sync_time.tv_sec + poll_seconds,
+ last_sync_time.tv_nsec };
+ const timespec wake_time =
+ !nudge_queue_.empty() && nudge_queue_.top().first < next_poll ?
+ nudge_queue_.top().first : next_poll;
+ LOG(INFO) << "wake time is " << wake_time.tv_sec;
+ LOG(INFO) << "next poll is " << next_poll.tv_sec;
+
+ const int error = pthread_cond_timedwait(&changed_.condvar_, &mutex_.mutex_,
+ &wake_time);
+ if (ETIMEDOUT != error) {
+ continue; // Check all the conditions again.
+ }
+
+ const timespec now = GetPThreadAbsoluteTime(0);
+
+ // Handle a nudge, caused by either a notification or a local bookmark
+ // event. This will also update the source of the following SyncMain call.
+ UpdateNudgeSource(now, &continue_sync_cycle, &initial_sync_for_thread);
+
+ LOG(INFO) << "Calling Sync Main at time " << now.tv_sec;
+ SyncMain(syncer_);
+ last_sync_time = now;
+
+ LOG(INFO) << "Updating the next polling time after SyncMain";
+ poll_seconds = CalculatePollingWaitTime(allstatus_->status(),
+ poll_seconds,
+ &user_idle_milliseconds,
+ &continue_sync_cycle);
+ }
+}
+
+// We check how long the user's been idle and sync less often if the
+// machine is not in use. The aim is to reduce server load.
+int SyncerThread::CalculatePollingWaitTime(
+ const AllStatus::Status& status,
+ int last_poll_wait, // in s
+ int* user_idle_milliseconds,
+ bool* continue_sync_cycle) {
+ bool is_continuing_sync_cyle = *continue_sync_cycle;
+ *continue_sync_cycle = false;
+
+ // Determine if the syncer has unfinished work to do from allstatus_.
+ const bool syncer_has_work_to_do =
+ status.updates_available > status.updates_received
+ || status.unsynced_count > 0;
+ LOG(INFO) << "syncer_has_work_to_do is " << syncer_has_work_to_do;
+
+ // First calculate the expected wait time, figuring in any backoff because of
+ // user idle time. next_wait is in seconds
+ syncer_polling_interval_ = (!status.notifications_enabled) ?
+ syncer_short_poll_interval_seconds_ :
+ syncer_long_poll_interval_seconds_;
+ int default_next_wait = syncer_polling_interval_;
+ int actual_next_wait = default_next_wait;
+
+ if (syncer_has_work_to_do) {
+ // Provide exponential backoff due to consecutive errors, else attempt to
+ // complete the work as soon as possible.
+ if (!is_continuing_sync_cyle) {
+ actual_next_wait = AllStatus::GetRecommendedDelaySeconds(0);
+ } else {
+ actual_next_wait = AllStatus::GetRecommendedDelaySeconds(last_poll_wait);
+ }
+ *continue_sync_cycle = true;
+ } else if (!status.notifications_enabled) {
+ // Ensure that we start exponential backoff from our base polling
+ // interval when we are not continuing a sync cycle.
+ last_poll_wait = std::max(last_poll_wait, syncer_polling_interval_);
+
+ // Did the user start interacting with the computer again?
+ // If so, revise our idle time (and probably next_sync_time) downwards
+ int new_idle_time = disable_idle_detection_ ? 0 : UserIdleTime();
+ if (new_idle_time < *user_idle_milliseconds) {
+ *user_idle_milliseconds = new_idle_time;
+ }
+ actual_next_wait = CalculateSyncWaitTime(last_poll_wait * 1000,
+ *user_idle_milliseconds) / 1000;
+ DCHECK_GE(actual_next_wait, default_next_wait);
+ }
+
+ LOG(INFO) << "Sync wait: idle " << default_next_wait
+ << " non-idle or backoff " << actual_next_wait << ".";
+
+ return actual_next_wait;
+}
+
+void* SyncerThread::ThreadMain() {
+ NameCurrentThreadForDebugging("SyncEngine_SyncerThread");
+ mutex_.Lock();
+ ThreadMainLoop();
+ thread_running_ = false;
+ pthread_cond_broadcast(&changed_.condvar_);
+ mutex_.Unlock();
+ LOG(INFO) << "Syncer thread exiting.";
+ return 0;
+}
+
+void SyncerThread::SyncMain(Syncer* syncer) {
+ CHECK(syncer);
+ mutex_.Unlock();
+ while (syncer->SyncShare()) {
+ LOG(INFO) << "Looping in sync share";
+ }
+ LOG(INFO) << "Done looping in sync share";
+
+ mutex_.Lock();
+}
+
+void SyncerThread::UpdateNudgeSource(const timespec& now,
+ bool* continue_sync_cycle,
+ bool* initial_sync) {
+ bool nudged = false;
+ NudgeSource nudge_source = kUnknown;
+ // Has the previous sync cycle completed?
+ if (continue_sync_cycle) {
+ nudge_source = kContinuation;
+ }
+ // Update the nudge source if a new nudge has come through during the
+ // previous sync cycle.
+ while (!nudge_queue_.empty() && !(now < nudge_queue_.top().first)) {
+ if (!nudged) {
+ nudge_source = nudge_queue_.top().second;
+ *continue_sync_cycle = false; // Reset the continuation token on nudge.
+ nudged = true;
+ }
+ nudge_queue_.pop();
+ }
+ SetUpdatesSource(nudged, nudge_source, initial_sync);
+}
+
+void SyncerThread::SetUpdatesSource(bool nudged, NudgeSource nudge_source,
+ bool* initial_sync) {
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE updates_source =
+ sync_pb::GetUpdatesCallerInfo::UNKNOWN;
+ if (*initial_sync) {
+ updates_source = sync_pb::GetUpdatesCallerInfo::FIRST_UPDATE;
+ *initial_sync = false;
+ } else if (!nudged) {
+ updates_source = sync_pb::GetUpdatesCallerInfo::PERIODIC;
+ } else {
+ switch (nudge_source) {
+ case kNotification:
+ updates_source = sync_pb::GetUpdatesCallerInfo::NOTIFICATION;
+ break;
+ case kLocal:
+ updates_source = sync_pb::GetUpdatesCallerInfo::LOCAL;
+ break;
+ case kContinuation:
+ updates_source = sync_pb::GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION;
+ break;
+ case kUnknown:
+ default:
+ updates_source = sync_pb::GetUpdatesCallerInfo::UNKNOWN;
+ break;
+ }
+ }
+ syncer_->set_updates_source(updates_source);
+}
+
+void SyncerThread::HandleSyncerEvent(const SyncerEvent& event) {
+ MutexLock lock(&mutex_);
+ channel()->NotifyListeners(event);
+ if (SyncerEvent::REQUEST_SYNC_NUDGE != event.what_happened) {
+ return;
+ }
+ NudgeSyncImpl(event.nudge_delay_milliseconds, kUnknown);
+}
+
+void SyncerThread::HandleDirectoryManagerEvent(
+ const syncable::DirectoryManagerEvent& event) {
+ LOG(INFO) << "Handling a directory manager event";
+ if (syncable::DirectoryManagerEvent::OPENED == event.what_happened) {
+ MutexLock lock(&mutex_);
+ LOG(INFO) << "Syncer starting up for: " << event.dirname;
+ // The underlying database structure is ready, and we should create
+ // the syncer.
+ CHECK(syncer_ == NULL);
+ syncer_ =
+ new Syncer(dirman_, event.dirname, scm_, model_safe_worker_.get());
+
+ syncer_->set_command_channel(command_channel_);
+ syncer_events_.reset(NewEventListenerHookup(
+ syncer_->channel(), this, &SyncerThread::HandleSyncerEvent));
+ pthread_cond_broadcast(&changed_.condvar_);
+ }
+}
+
+static inline void CheckConnected(bool* connected,
+ HttpResponse::ServerConnectionCode code,
+ pthread_cond_t* condvar) {
+ if (*connected) {
+ if (HttpResponse::CONNECTION_UNAVAILABLE == code) {
+ *connected = false;
+ pthread_cond_broadcast(condvar);
+ }
+ } else {
+ if (HttpResponse::SERVER_CONNECTION_OK == code) {
+ *connected = true;
+ pthread_cond_broadcast(condvar);
+ }
+ }
+}
+
+void SyncerThread::WatchConnectionManager(ServerConnectionManager* conn_mgr) {
+ conn_mgr_hookup_.reset(NewEventListenerHookup(conn_mgr->channel(), this,
+ &SyncerThread::HandleServerConnectionEvent));
+ CheckConnected(&connected_, conn_mgr->server_status(),
+ &changed_.condvar_);
+}
+
+void SyncerThread::HandleServerConnectionEvent(
+ const ServerConnectionEvent& event) {
+ if (ServerConnectionEvent::STATUS_CHANGED == event.what_happened) {
+ MutexLock lock(&mutex_);
+ CheckConnected(&connected_, event.connection_code,
+ &changed_.condvar_);
+ }
+}
+
+SyncerEventChannel* SyncerThread::channel() {
+ return syncer_event_channel_.get();
+}
+
+// inputs and return value in milliseconds
+int SyncerThread::CalculateSyncWaitTime(int last_interval, int user_idle_ms) {
+ // syncer_polling_interval_ is in seconds
+ int syncer_polling_interval_ms = syncer_polling_interval_ * 1000;
+
+ // This is our default and lower bound.
+ int next_wait = syncer_polling_interval_ms;
+
+ // Get idle time, bounded by max wait.
+ int idle = min(user_idle_ms, syncer_max_interval_);
+
+ // If the user has been idle for a while,
+ // we'll start decreasing the poll rate.
+ if (idle >= kPollBackoffThresholdMultiplier * syncer_polling_interval_ms) {
+ next_wait = std::min(AllStatus::GetRecommendedDelaySeconds(
+ last_interval / 1000), syncer_max_interval_ / 1000) * 1000;
+ }
+
+ return next_wait;
+}
+
+// Called with mutex_ already locked
+void SyncerThread::NudgeSyncImpl(int milliseconds_from_now,
+ NudgeSource source) {
+ const timespec nudge_time = GetPThreadAbsoluteTime(milliseconds_from_now);
+ NudgeObject nudge_object(nudge_time, source);
+ nudge_queue_.push(nudge_object);
+ pthread_cond_broadcast(&changed_.condvar_);
+}
+
+void SyncerThread::WatchTalkMediator(TalkMediator* mediator) {
+ talk_mediator_hookup_.reset(
+ NewEventListenerHookup(
+ mediator->channel(),
+ this,
+ &SyncerThread::HandleTalkMediatorEvent));
+}
+
+void SyncerThread::HandleTalkMediatorEvent(const TalkMediatorEvent& event) {
+ MutexLock lock(&mutex_);
+ switch (event.what_happened) {
+ case TalkMediatorEvent::LOGIN_SUCCEEDED:
+ LOG(INFO) << "P2P: Login succeeded.";
+ p2p_authenticated_ = true;
+ break;
+ case TalkMediatorEvent::LOGOUT_SUCCEEDED:
+ LOG(INFO) << "P2P: Login succeeded.";
+ p2p_authenticated_ = false;
+ break;
+ case TalkMediatorEvent::SUBSCRIPTIONS_ON:
+ LOG(INFO) << "P2P: Subscriptions successfully enabled.";
+ p2p_subscribed_ = true;
+ if (NULL != syncer_) {
+ LOG(INFO) << "Subscriptions on. Nudging syncer for initial push.";
+ NudgeSyncImpl(0, kLocal);
+ }
+ break;
+ case TalkMediatorEvent::SUBSCRIPTIONS_OFF:
+ LOG(INFO) << "P2P: Subscriptions are not enabled.";
+ p2p_subscribed_ = false;
+ break;
+ case TalkMediatorEvent::NOTIFICATION_RECEIVED:
+ LOG(INFO) << "P2P: Updates on server, pushing syncer";
+ if (NULL != syncer_) {
+ NudgeSyncImpl(0, kNotification);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (NULL != syncer_) {
+ syncer_->set_notifications_enabled(p2p_authenticated_ && p2p_subscribed_);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_thread.h b/chrome/browser/sync/engine/syncer_thread.h
new file mode 100644
index 0000000..30172d0
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_thread.h
@@ -0,0 +1,235 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A class to run the syncer on a thread.
+//
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_THREAD_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_THREAD_H_
+
+#include <list>
+#include <map>
+#include <queue>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/all_status.h"
+#include "chrome/browser/sync/engine/client_command_channel.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+class EventListenerHookup;
+
+namespace syncable {
+class DirectoryManager;
+struct DirectoryManagerEvent;
+}
+
+namespace browser_sync {
+
+class ModelSafeWorker;
+class ServerConnectionManager;
+class Syncer;
+class TalkMediator;
+class URLFactory;
+struct ServerConnectionEvent;
+struct SyncerEvent;
+struct SyncerShutdownEvent;
+struct TalkMediatorEvent;
+
+class SyncerThread {
+ FRIEND_TEST(SyncerThreadTest, CalculateSyncWaitTime);
+ FRIEND_TEST(SyncerThreadTest, CalculatePollingWaitTime);
+
+ public:
+ friend class SyncerThreadTest;
+
+ enum NudgeSource {
+ kUnknown = 0,
+ kNotification,
+ kLocal,
+ kContinuation
+ };
+
+ // Server can overwrite these values via client commands.
+ // Standard short poll. This is used when XMPP is off.
+ static const int kDefaultShortPollIntervalSeconds = 60;
+ // Long poll is used when XMPP is on.
+ static const int kDefaultLongPollIntervalSeconds = 3600;
+ // 30 minutes by default. If exponential backoff kicks in, this is
+ // the longest possible poll interval.
+ static const int kDefaultMaxPollIntervalMs = 30 * 60 * 1000;
+
+ SyncerThread(
+ ClientCommandChannel* command_channel,
+ syncable::DirectoryManager* mgr,
+ ServerConnectionManager* connection_manager,
+ AllStatus* all_status,
+ ModelSafeWorker* model_safe_worker);
+ ~SyncerThread();
+
+ void WatchConnectionManager(ServerConnectionManager* conn_mgr);
+ // Creates and starts a syncer thread.
+ // Returns true if it creates a thread or if there's currently a thread
+ // running and false otherwise.
+ bool Start();
+
+ // Stop processing. A max wait of at least 2*server RTT time is recommended.
+ // returns true if we stopped, false otherwise.
+ bool Stop(int max_wait);
+
+ // Nudges the syncer to sync with a delay specified. This API is for access
+ // from the SyncerThread's controller and will cause a mutex lock.
+ bool NudgeSyncer(int milliseconds_from_now, NudgeSource source);
+
+ // Registers this thread to watch talk mediator events.
+ void WatchTalkMediator(TalkMediator* talk_mediator);
+
+ void WatchClientCommands(ClientCommandChannel* channel);
+
+ SyncerEventChannel* channel();
+
+ private:
+ // A few members to gate the rate at which we nudge the syncer.
+ enum {
+ kNudgeRateLimitCount = 6,
+ kNudgeRateLimitTime = 180,
+ };
+
+ // A queue of all scheduled nudges. One insertion for every call to
+ // NudgeQueue().
+ typedef std::pair<timespec, NudgeSource> NudgeObject;
+
+ struct IsTimeSpecGreater {
+ inline bool operator() (const NudgeObject& lhs, const NudgeObject& rhs) {
+ return lhs.first.tv_sec == rhs.first.tv_sec ?
+ lhs.first.tv_nsec > rhs.first.tv_nsec :
+ lhs.first.tv_sec > rhs.first.tv_sec;
+ }
+ };
+
+ typedef std::priority_queue<NudgeObject,
+ std::vector<NudgeObject>, IsTimeSpecGreater>
+ NudgeQueue;
+
+ // Threshold multipler for how long before user should be considered idle.
+ static const int kPollBackoffThresholdMultiplier = 10;
+
+ friend void* RunSyncerThread(void* syncer_thread);
+ void* Run();
+ void HandleDirectoryManagerEvent(
+ const syncable::DirectoryManagerEvent& event);
+ void HandleSyncerEvent(const SyncerEvent& event);
+ void HandleClientCommand(ClientCommandChannel::EventType event);
+
+ void HandleServerConnectionEvent(const ServerConnectionEvent& event);
+
+ void HandleTalkMediatorEvent(const TalkMediatorEvent& event);
+
+ void* ThreadMain();
+ void ThreadMainLoop();
+
+ void SyncMain(Syncer* syncer);
+
+ // Calculates the next sync wait time in seconds. last_poll_wait is the time
+ // duration of the previous polling timeout which was used.
+ // user_idle_milliseconds is updated by this method, and is a report of the
+ // full amount of time since the last period of activity for the user. The
+ // continue_sync_cycle parameter is used to determine whether or not we are
+ // calculating a polling wait time that is a continuation of an sync cycle
+ // which terminated while the syncer still had work to do.
+ int CalculatePollingWaitTime(
+ const AllStatus::Status& status,
+ int last_poll_wait, // in s
+ int* user_idle_milliseconds,
+ bool* continue_sync_cycle);
+ // Helper to above function, considers effect of user idle time.
+ int CalculateSyncWaitTime(int last_wait, int user_idle_ms);
+
+ // Sets the source value of the controlled syncer's updates_source value.
+ // The initial sync boolean is updated if read as a sentinel. The following
+ // two methods work in concert to achieve this goal.
+ void UpdateNudgeSource(const timespec& now, bool* continue_sync_cycle,
+ bool* initial_sync);
+ void SetUpdatesSource(bool nudged, NudgeSource nudge_source,
+ bool* initial_sync);
+
+ // for unit tests only
+ void DisableIdleDetection() { disable_idle_detection_ = true; }
+
+ // false when we want to stop the thread.
+ bool stop_syncer_thread_;
+
+ // we use one mutex for all members except the channel.
+ PThreadMutex mutex_;
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+
+ // Handle of the running thread.
+ pthread_t thread_;
+ bool thread_running_;
+
+ // Gets signaled whenever a thread outside of the syncer thread
+ // changes a member variable.
+ PThreadCondVar changed_;
+
+ // State of the server connection
+ bool connected_;
+
+ // State of the notification framework is tracked by these values.
+ bool p2p_authenticated_;
+ bool p2p_subscribed_;
+
+ scoped_ptr<EventListenerHookup> client_command_hookup_;
+ scoped_ptr<EventListenerHookup> conn_mgr_hookup_;
+ const AllStatus* allstatus_;
+
+ Syncer* syncer_;
+
+ syncable::DirectoryManager* dirman_;
+ ServerConnectionManager* scm_;
+
+ // Modifiable versions of kDefaultLongPollIntervalSeconds which can be
+ // updated by the server.
+ int syncer_short_poll_interval_seconds_;
+ int syncer_long_poll_interval_seconds_;
+
+ // The time we wait between polls in seconds. This is used as lower bound on
+ // our wait time. Updated once per loop from the command line flag.
+ int syncer_polling_interval_;
+
+ // The upper bound on the nominal wait between polls in seconds. Note that
+ // this bounds the "nominal" poll interval, while the the actual interval
+ // also takes previous failures into account.
+ int syncer_max_interval_;
+
+ scoped_ptr<SyncerEventChannel> syncer_event_channel_;
+
+ // This causes syncer to start syncing ASAP. If the rate of requests is
+ // too high the request will be silently dropped. mutex_ should be held when
+ // this is called.
+ void NudgeSyncImpl(int milliseconds_from_now, NudgeSource source);
+
+ NudgeQueue nudge_queue_;
+
+ scoped_ptr<EventListenerHookup> talk_mediator_hookup_;
+ ClientCommandChannel* const command_channel_;
+ scoped_ptr<EventListenerHookup> directory_manager_hookup_;
+ scoped_ptr<EventListenerHookup> syncer_events_;
+
+ // Handles any tasks that will result in model changes (modifications of
+ // syncable::Entries). Pass this to the syncer created and managed by |this|.
+ // Only non-null in syncapi case.
+ scoped_ptr<ModelSafeWorker> model_safe_worker_;
+
+ // Useful for unit tests
+ bool disable_idle_detection_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncerThread);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_THREAD_H_
diff --git a/chrome/browser/sync/engine/syncer_thread_unittest.cc b/chrome/browser/sync/engine/syncer_thread_unittest.cc
new file mode 100644
index 0000000..dd81176
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_thread_unittest.cc
@@ -0,0 +1,299 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <list>
+#include <map>
+#include <set>
+#include <strstream>
+
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/syncer_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+class SyncerThreadTest : public testing::Test {
+ protected:
+ SyncerThreadTest() {}
+ virtual ~SyncerThreadTest() {}
+ virtual void SetUp() {}
+ virtual void TearDown() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SyncerThreadTest);
+};
+
+TEST_F(SyncerThreadTest, Construction) {
+ SyncerThread syncer_thread(NULL, NULL, NULL, NULL, NULL);
+}
+
+TEST_F(SyncerThreadTest, CalculateSyncWaitTime) {
+ SyncerThread syncer_thread(NULL, NULL, NULL, NULL, NULL);
+ syncer_thread.DisableIdleDetection();
+
+ // Syncer_polling_interval_ is less than max poll interval
+ int syncer_polling_interval = 1; // Needed since AssertionResult is not a
+ // friend of SyncerThread
+ syncer_thread.syncer_polling_interval_ = syncer_polling_interval;
+
+ // user_idle_ms is less than 10 * (syncer_polling_interval*1000).
+ ASSERT_EQ(syncer_polling_interval * 1000,
+ syncer_thread.CalculateSyncWaitTime(1000, 0));
+ ASSERT_EQ(syncer_polling_interval * 1000,
+ syncer_thread.CalculateSyncWaitTime(1000, 1));
+
+ // user_idle_ms is ge than 10 * (syncer_polling_interval*1000).
+ int last_poll_time = 2000;
+ ASSERT_LE(last_poll_time,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time, 10000));
+ ASSERT_GE(last_poll_time*3,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time, 10000));
+ ASSERT_LE(last_poll_time,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time, 100000));
+ ASSERT_GE(last_poll_time*3,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time, 100000));
+
+ // Maximum backoff time should be syncer_max_interval.
+ int near_threshold = SyncerThread::kDefaultMaxPollIntervalMs / 2 - 1;
+ int threshold = SyncerThread::kDefaultMaxPollIntervalMs;
+ int over_threshold = SyncerThread::kDefaultMaxPollIntervalMs + 1;
+ ASSERT_LE(near_threshold,
+ syncer_thread.CalculateSyncWaitTime(near_threshold, 10000));
+ ASSERT_GE(SyncerThread::kDefaultMaxPollIntervalMs,
+ syncer_thread.CalculateSyncWaitTime(near_threshold, 10000));
+ ASSERT_EQ(SyncerThread::kDefaultMaxPollIntervalMs,
+ syncer_thread.CalculateSyncWaitTime(threshold, 10000));
+ ASSERT_EQ(SyncerThread::kDefaultMaxPollIntervalMs,
+ syncer_thread.CalculateSyncWaitTime(over_threshold, 10000));
+
+ // Possible idle time must be capped by syncer_max_interval.
+ int over_sync_max_interval =
+ SyncerThread::kDefaultMaxPollIntervalMs + 1;
+ syncer_polling_interval = over_sync_max_interval / 100; // so 1000* is right
+ syncer_thread.syncer_polling_interval_ = syncer_polling_interval;
+ ASSERT_EQ(syncer_polling_interval * 1000,
+ syncer_thread.CalculateSyncWaitTime(1000, over_sync_max_interval));
+ syncer_polling_interval = 1;
+ syncer_thread.syncer_polling_interval_ = syncer_polling_interval;
+ ASSERT_LE(last_poll_time,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time,
+ over_sync_max_interval));
+ ASSERT_GE(last_poll_time * 3,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time,
+ over_sync_max_interval));
+}
+
+TEST_F(SyncerThreadTest, CalculatePollingWaitTime) {
+ // Set up the environment
+ int user_idle_milliseconds_param = 0;
+
+ SyncerThread syncer_thread(NULL, NULL, NULL, NULL, NULL);
+ syncer_thread.DisableIdleDetection();
+
+ // Notifications disabled should result in a polling interval of
+ // kDefaultShortPollInterval
+ {
+ AllStatus::Status status = {};
+ status.notifications_enabled = 0;
+ bool continue_sync_cycle_param = false;
+
+ // No work and no backoff.
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+
+ // In this case the continue_sync_cycle is turned off.
+ continue_sync_cycle_param = true;
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+
+ // TODO(brg) : Find a way to test exponential backoff is inoperable.
+ // Exponential backoff should be turned on when notifications are disabled
+ // but this can not be tested since we can not set the last input info.
+ }
+
+ // Notifications enabled should result in a polling interval of
+ // SyncerThread::kDefaultLongPollIntervalSeconds
+ {
+ AllStatus::Status status = {};
+ status.notifications_enabled = 1;
+ bool continue_sync_cycle_param = false;
+
+ // No work and no backoff.
+ ASSERT_EQ(SyncerThread::kDefaultLongPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+
+ // In this case the continue_sync_cycle is turned off.
+ continue_sync_cycle_param = true;
+ ASSERT_EQ(SyncerThread::kDefaultLongPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+
+ // TODO(brg) : Find a way to test exponential backoff.
+ // Exponential backoff should be turned off when notifications are enabled,
+ // but this can not be tested since we can not set the last input info.
+ }
+
+ // There are two states which can cause a continuation, either the updates
+ // available do not match the updates received, or the unsynced count is
+ // non-zero.
+ {
+ AllStatus::Status status = {};
+ status.updates_available = 1;
+ status.updates_received = 0;
+ bool continue_sync_cycle_param = false;
+
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ continue_sync_cycle_param = false;
+ ASSERT_GE(3, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_GE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ status.updates_received = 1;
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 10,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+ }
+
+ {
+ AllStatus::Status status = {};
+ status.unsynced_count = 1;
+ bool continue_sync_cycle_param = false;
+
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ continue_sync_cycle_param = false;
+ ASSERT_GE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ status.unsynced_count = 0;
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 4,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+ }
+
+ // Regression for exponential backoff reset when the
+ // syncer is nudged.
+ {
+ AllStatus::Status status = {};
+ status.unsynced_count = 1;
+ bool continue_sync_cycle_param = false;
+
+ // Expect move from default polling interval to exponential backoff due to
+ // unsynced_count != 0.
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 3600,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ continue_sync_cycle_param = false;
+ ASSERT_GE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 3600,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ // Expect exponential backoff.
+ ASSERT_LE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 2,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_GE(6, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 2,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ // A nudge resets the continue_sync_cycle_param value, so our backoff
+ // should return to the minimum.
+ continue_sync_cycle_param = false;
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 3600,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ continue_sync_cycle_param = false;
+ ASSERT_GE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 3600,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ // Setting unsynced_count = 0 returns us to the default polling interval.
+ status.unsynced_count = 0;
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 4,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_types.h b/chrome/browser/sync/engine/syncer_types.h
new file mode 100644
index 0000000..0d61984
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_types.h
@@ -0,0 +1,151 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_TYPES_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_TYPES_H_
+
+#include <map>
+#include <vector>
+
+#include "chrome/browser/sync/util/event_sys.h"
+
+namespace syncable {
+class BaseTransaction;
+class Id;
+}
+
+// The intent of this is to keep all shared data types and enums
+// for the syncer in a single place without having dependencies between
+// other files.
+namespace browser_sync {
+
+class SyncProcessState;
+class SyncCycleState;
+class SyncerSession;
+class Syncer;
+
+enum UpdateAttemptResponse {
+ // Update was applied or safely ignored
+ SUCCESS,
+
+ // This state is deprecated.
+ // TODO(sync): Remove this state.
+ BLOCKED,
+
+ // Conflicts with the local data representation.
+ // This can also mean that the entry doesn't currently make sense
+ // if we applied it.
+ CONFLICT,
+
+ // This return value is only returned by AttemptToUpdateEntryWithoutMerge
+ // if we have a name conflict. Users of AttemptToUpdateEntry should never
+ // see this return value, we'll return CONFLICT.
+ NAME_CONFLICT,
+};
+
+enum ServerUpdateProcessingResult {
+ // Success. Update applied and stored in SERVER_* fields or dropped if
+ // irrelevant.
+ SUCCESS_PROCESSED,
+
+ // Success. Update details stored in SERVER_* fields, but wasn't applied.
+ SUCCESS_STORED,
+
+ // Update is illegally inconsistent with earlier updates. e.g. A bookmark
+ // becoming a folder.
+ FAILED_INCONSISTENT,
+
+ // Update is illegal when considered alone. e.g. broken UTF-8 in the name.
+ FAILED_CORRUPT,
+
+ // Only used by VerifyUpdate. Indicates that an update is valid. As
+ // VerifyUpdate cannot return SUCCESS_STORED, we reuse the value.
+ SUCCESS_VALID = SUCCESS_STORED
+};
+
+// Different results from the verify phase will yield different
+// methods of processing in the ProcessUpdates phase. The SKIP
+// result means the entry doesn't go to the ProcessUpdates phase.
+enum VerifyResult {
+ VERIFY_FAIL,
+ VERIFY_SUCCESS,
+ VERIFY_UNDELETE,
+ VERIFY_SKIP,
+ VERIFY_UNDECIDED
+};
+
+enum VerifyCommitResult {
+ VERIFY_BLOCKED,
+ VERIFY_UNSYNCABLE,
+ VERIFY_OK,
+};
+
+struct SyncerEvent {
+ typedef SyncerEvent EventType;
+
+ enum EventCause {
+ COMMITS_SUCCEEDED, // Count is stored in successful_commit_count.
+
+ STATUS_CHANGED,
+
+ // Take care not to wait in shutdown handlers for the syncer to stop as it
+ // causes a race in the event system. Use SyncerShutdownEvent instead.
+ SHUTDOWN_USE_WITH_CARE,
+
+ // We're over our quota.
+ OVER_QUOTA,
+
+ // This event is how the syncer requests that it be synced.
+ REQUEST_SYNC_NUDGE,
+
+ // We have reached the SYNCER_END state in the main sync loop.
+ // Check the SyncerSession for information like whether we need to continue
+ // syncing (SyncerSession::ShouldSyncAgain).
+ SYNC_CYCLE_ENDED,
+ };
+
+ static bool IsChannelShutdownEvent(const SyncerEvent& e) {
+ return SHUTDOWN_USE_WITH_CARE == e.what_happened;
+ }
+
+ // This is used to put SyncerEvents into sorted STL structures.
+ bool operator < (const SyncerEvent& r) const {
+ return this->what_happened < r.what_happened;
+ }
+
+ EventCause what_happened;
+
+ // The last session used for syncing.
+ SyncerSession* last_session;
+
+ int successful_commit_count;
+
+ // How many milliseconds later should the syncer kick in?
+ // for REQUEST_SYNC_NUDGE only.
+ int nudge_delay_milliseconds;
+};
+
+struct SyncerShutdownEvent {
+ typedef Syncer* EventType;
+ static bool IsChannelShutdownEvent(Syncer* syncer) {
+ return true;
+ }
+};
+
+typedef EventChannel<SyncerEvent, PThreadMutex> SyncerEventChannel;
+
+typedef EventChannel<SyncerShutdownEvent, PThreadMutex> ShutdownChannel;
+
+// This struct is passed between parts of the syncer during the processing of
+// one sync loop. It lives on the stack. We don't expose the number of
+// conflicts during SyncShare as the conflicts may be solved automatically
+// by the conflict resolver.
+typedef std::vector<syncable::Id> ConflictSet;
+
+typedef std::map<syncable::Id, ConflictSet*> IdToConflictSetMap;
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_TYPES_H_
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
new file mode 100644
index 0000000..27bdb9b
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -0,0 +1,4588 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+//
+// Syncer unit tests. Unfortunately a lot of these tests
+// are outdated and need to be reworked and updated.
+
+#include <list>
+#include <map>
+#include <set>
+#include <strstream>
+
+#include "base/at_exit.h"
+
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/client_command_channel.h"
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/get_commit_ids_command.h"
+#include "chrome/browser/sync/engine/model_safe_worker.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/process_updates_command.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/test/sync/engine/mock_server_connection.h"
+#include "chrome/test/sync/engine/test_directory_setter_upper.h"
+#include "chrome/test/sync/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::map;
+using std::multimap;
+using std::set;
+using std::string;
+
+namespace browser_sync {
+
+using syncable::BaseTransaction;
+using syncable::Blob;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::ExtendedAttribute;
+using syncable::ExtendedAttributeKey;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::MutableExtendedAttribute;
+using syncable::ReadTransaction;
+using syncable::ScopedDirLookup;
+using syncable::WriteTransaction;
+
+using syncable::BASE_VERSION;
+using syncable::CREATE;
+using syncable::CREATE_NEW_UPDATE_ITEM;
+using syncable::GET_BY_HANDLE;
+using syncable::GET_BY_ID;
+using syncable::GET_BY_PARENTID_AND_NAME;
+using syncable::GET_BY_PATH;
+using syncable::GET_BY_TAG;
+using syncable::ID;
+using syncable::IS_BOOKMARK_OBJECT;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::META_HANDLE;
+using syncable::MTIME;
+using syncable::NAME;
+using syncable::NEXT_ID;
+using syncable::PARENT_ID;
+using syncable::PREV_ID;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_VERSION;
+using syncable::SINGLETON_TAG;
+using syncable::UNITTEST;
+using syncable::UNSANITIZED_NAME;
+
+namespace {
+const char* kTestData = "Hello World!";
+const int kTestDataLen = 12;
+const int64 kTestLogRequestTimestamp = 123456;
+} // namespace
+
+
+class SyncerTest : public testing::Test {
+ protected:
+ SyncerTest() : client_command_channel_(0) {
+ }
+
+ void HandleClientCommand(const sync_pb::ClientCommand* event) {
+ last_client_command_ = *event;
+ }
+
+ void HandleSyncerEvent(SyncerEvent event) {
+ LOG(INFO) << "HandleSyncerEvent in unittest " << event.what_happened;
+ // we only test for entry-specific events, not status changed ones.
+ switch (event.what_happened) {
+ case SyncerEvent::STATUS_CHANGED:
+ // fall through
+ case SyncerEvent::SYNC_CYCLE_ENDED:
+ // fall through
+ case SyncerEvent::COMMITS_SUCCEEDED:
+ return;
+ case SyncerEvent::SHUTDOWN_USE_WITH_CARE:
+ case SyncerEvent::OVER_QUOTA:
+ case SyncerEvent::REQUEST_SYNC_NUDGE:
+ LOG(INFO) << "Handling event type " << event.what_happened;
+ break;
+ default:
+ CHECK(false) << "Handling unknown error type in unit tests!!";
+ }
+ syncer_events_.insert(event);
+ }
+
+ void LoopSyncShare(Syncer* syncer) {
+ SyncProcessState state(syncdb_.manager(), syncdb_.name(),
+ mock_server_.get(),
+ syncer->conflict_resolver(),
+ syncer->channel(),
+ syncer->model_safe_worker());
+ bool should_loop = false;
+ int loop_iterations = 0;
+ do {
+ ASSERT_LT(++loop_iterations, 100) << "infinite loop detected. please fix";
+ should_loop = syncer->SyncShare(&state);
+ } while (should_loop);
+ }
+
+ virtual void SetUp() {
+ syncdb_.SetUp();
+
+ mock_server_.reset(
+ new MockConnectionManager(syncdb_.manager(), syncdb_.name()));
+ model_safe_worker_.reset(new ModelSafeWorker());
+ // Safe to pass NULL as Authwatcher for now since the code path that
+ // uses it is not unittested yet.
+ syncer_ = new Syncer(syncdb_.manager(), syncdb_.name(),
+ mock_server_.get(),
+ model_safe_worker_.get());
+ CHECK(syncer_->channel());
+
+ hookup_.reset(NewEventListenerHookup(syncer_->channel(), this,
+ &SyncerTest::HandleSyncerEvent));
+
+ command_channel_hookup_.reset(NewEventListenerHookup(
+ &client_command_channel_, this, &SyncerTest::HandleClientCommand));
+ syncer_->set_command_channel(&client_command_channel_);
+
+ state_.reset(new SyncProcessState(syncdb_.manager(), syncdb_.name(),
+ mock_server_.get(),
+ syncer_->conflict_resolver(),
+ syncer_->channel(),
+ syncer_->model_safe_worker()));
+
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ syncable::Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, trans.root_id(), &children);
+ ASSERT_EQ(0, children.size());
+ syncer_events_.clear();
+ root_id_ = ids_.root();
+ parent_id_ = ids_.MakeServer("parent id");
+ child_id_ = ids_.MakeServer("child id");
+ }
+
+ virtual void TearDown() {
+ mock_server_.reset();
+ hookup_.reset();
+ command_channel_hookup_.reset();
+ delete syncer_;
+ syncdb_.TearDown();
+ }
+ void WriteTestDataToEntry(WriteTransaction* trans, MutableEntry* entry) {
+ EXPECT_FALSE(entry->Get(IS_DIR));
+ EXPECT_FALSE(entry->Get(IS_DEL));
+ Blob test_value(kTestData, kTestData + kTestDataLen);
+ ExtendedAttributeKey key(entry->Get(META_HANDLE), PSTR("DATA"));
+ MutableExtendedAttribute attr(trans, CREATE, key);
+ attr.mutable_value()->swap(test_value);
+ entry->Put(syncable::IS_UNSYNCED, true);
+ }
+ void VerifyTestDataInEntry(BaseTransaction* trans, Entry* entry) {
+ EXPECT_FALSE(entry->Get(IS_DIR));
+ EXPECT_FALSE(entry->Get(IS_DEL));
+ Blob test_value(kTestData, kTestData + kTestDataLen);
+ ExtendedAttributeKey key(entry->Get(META_HANDLE), PSTR("DATA"));
+ ExtendedAttribute attr(trans, GET_BY_HANDLE, key);
+ EXPECT_FALSE(attr.is_deleted());
+ EXPECT_EQ(test_value, attr.value());
+ }
+ bool SyncerStuck(SyncProcessState *state) {
+ SyncerStatus status(NULL, state);
+ return status.syncer_stuck();
+ }
+ void SyncRepeatedlyToTriggerConflictResolution(SyncProcessState *state) {
+ // We should trigger after less than 6 syncs, but we want to avoid brittle
+ // tests.
+ for (int i = 0 ; i < 6 ; ++i)
+ syncer_->SyncShare(state);
+ }
+ void SyncRepeatedlyToTriggerStuckSignal(SyncProcessState *state) {
+ // We should trigger after less than 10 syncs, but we want to avoid brittle
+ // tests.
+ for (int i = 0 ; i < 12 ; ++i)
+ syncer_->SyncShare(state);
+ }
+
+ // Enumeration of alterations to entries for commit ordering tests.
+ enum EntryFeature {
+ LIST_END = 0, // Denotes the end of the list of features from below.
+ SYNCED, // Items are unsynced by default
+ DELETED,
+ OLD_MTIME,
+ MOVED_FROM_ROOT,
+ };
+
+ struct CommitOrderingTest {
+ // expected commit index.
+ int commit_index;
+ // Details about the item
+ syncable::Id id;
+ syncable::Id parent_id;
+ EntryFeature features[10];
+
+ static const CommitOrderingTest LAST_COMMIT_ITEM;
+ };
+
+ void RunCommitOrderingTest(CommitOrderingTest* test) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ map<int, syncable::Id> expected_positions;
+ { // Transaction scope.
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ while (!test->id.IsRoot()) {
+ if (test->commit_index >= 0) {
+ map<int, syncable::Id>::value_type entry(test->commit_index,
+ test->id);
+ bool double_position = !expected_positions.insert(entry).second;
+ ASSERT_FALSE(double_position) << "Two id's expected at one position";
+ }
+ string utf8_name = test->id.GetServerId();
+ PathString name(utf8_name.begin(), utf8_name.end());
+ MutableEntry entry(&trans, CREATE, test->parent_id, name);
+ entry.Put(syncable::ID, test->id);
+ if (test->id.ServerKnows()) {
+ entry.Put(BASE_VERSION, 5);
+ entry.Put(SERVER_VERSION, 5);
+ entry.Put(SERVER_PARENT_ID, test->parent_id);
+ }
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ // Set the time to 30 seconds in the future to reduce the chance of
+ // flaky tests.
+ int64 now_server_time = ClientTimeToServerTime(syncable::Now());
+ int64 now_plus_30s = ServerTimeToClientTime(now_server_time + 30000);
+ int64 now_minus_2h = ServerTimeToClientTime(now_server_time - 7200000);
+ entry.Put(syncable::MTIME, now_plus_30s);
+ for (int i = 0 ; i < ARRAYSIZE(test->features) ; ++i) {
+ switch (test->features[i]) {
+ case LIST_END:
+ break;
+ case SYNCED:
+ entry.Put(syncable::IS_UNSYNCED, false);
+ break;
+ case DELETED:
+ entry.Put(syncable::IS_DEL, true);
+ break;
+ case OLD_MTIME:
+ entry.Put(MTIME, now_minus_2h);
+ break;
+ case MOVED_FROM_ROOT:
+ entry.Put(SERVER_PARENT_ID, trans.root_id());
+ break;
+ default:
+ FAIL() << "Bad value in CommitOrderingTest list";
+ }
+ }
+ test++;
+ }
+ }
+ LoopSyncShare(syncer_);
+ ASSERT_EQ(expected_positions.size(), mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ for (size_t i = 0; i < expected_positions.size(); ++i) {
+ EXPECT_EQ(1, expected_positions.count(i));
+ EXPECT_EQ(expected_positions[i], mock_server_->committed_ids()[i]);
+ }
+ }
+
+ void DoTruncationTest(const ScopedDirLookup& dir,
+ const vector<int64>& unsynced_handle_view,
+ const vector<syncable::Id>& expected_id_order) {
+ // The expected order is "x", "b", "c", "e", truncated appropriately.
+ for (size_t limit = expected_id_order.size() + 2; limit > 0; --limit) {
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ SyncerSession::ScopedSetWriteTransaction set_trans(&session, &wtrans);
+ session.set_unsynced_handles(unsynced_handle_view);
+
+ GetCommitIdsCommand command(limit);
+ command.BuildCommitIds(&session);
+ vector<syncable::Id> output = command.ordered_commit_set_.GetCommitIds();
+ int truncated_size = std::min(limit, expected_id_order.size());
+ ASSERT_EQ(truncated_size, output.size());
+ for (int i = 0; i < truncated_size; ++i) {
+ ASSERT_EQ(expected_id_order[i], output[i])
+ << "At index " << i << " with batch size limited to " << limit;
+ }
+ }
+ }
+
+ int64 CreateUnsyncedDirectory(const PathString& entry_name,
+ const string& idstring) {
+ return CreateUnsyncedDirectory(entry_name,
+ syncable::Id::CreateFromServerId(idstring));
+ }
+
+ int64 CreateUnsyncedDirectory(const PathString& entry_name,
+ const syncable::Id& id) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, syncable::CREATE, wtrans.root_id(),
+ entry_name);
+ EXPECT_TRUE(entry.good());
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::BASE_VERSION, id.ServerKnows() ? 1 : 0);
+ entry.Put(syncable::ID, id);
+ return entry.Get(META_HANDLE);
+ }
+
+ // Some ids to aid tests. Only the root one's value is specific. The rest
+ // are named for test clarity.
+ syncable::Id root_id_;
+ syncable::Id parent_id_;
+ syncable::Id child_id_;
+
+ TestIdFactory ids_;
+
+ TestDirectorySetterUpper syncdb_;
+ scoped_ptr<MockConnectionManager> mock_server_;
+ scoped_ptr<EventListenerHookup> hookup_;
+ scoped_ptr<EventListenerHookup> command_channel_hookup_;
+ ClientCommandChannel client_command_channel_;
+
+ Syncer* syncer_;
+ scoped_ptr<SyncProcessState> state_;
+ scoped_ptr<ModelSafeWorker> model_safe_worker_;
+ std::set<SyncerEvent> syncer_events_;
+ sync_pb::ClientCommand last_client_command_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncerTest);
+};
+
+TEST_F(SyncerTest, TestCallGatherUnsyncedEntries) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ Syncer::UnsyncedMetaHandles handles;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ }
+ ASSERT_EQ(0, handles.size());
+ }
+ // TODO(sync): When we can dynamically connect and disconnect the mock
+ // ServerConnectionManager test disconnected GetUnsyncedEntries here. It's a
+ // regression for a very old bug.
+}
+
+TEST_F(SyncerTest, GetCommitIdsCommandTruncates) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ int64 handle_c = CreateUnsyncedDirectory(PSTR("C"), ids_.MakeLocal("c"));
+ int64 handle_x = CreateUnsyncedDirectory(PSTR("X"), ids_.MakeLocal("x"));
+ int64 handle_b = CreateUnsyncedDirectory(PSTR("B"), ids_.MakeLocal("b"));
+ int64 handle_d = CreateUnsyncedDirectory(PSTR("D"), ids_.MakeLocal("d"));
+ int64 handle_e = CreateUnsyncedDirectory(PSTR("E"), ids_.MakeLocal("e"));
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry_x(&wtrans, GET_BY_HANDLE, handle_x);
+ MutableEntry entry_b(&wtrans, GET_BY_HANDLE, handle_b);
+ MutableEntry entry_c(&wtrans, GET_BY_HANDLE, handle_c);
+ MutableEntry entry_d(&wtrans, GET_BY_HANDLE, handle_d);
+ MutableEntry entry_e(&wtrans, GET_BY_HANDLE, handle_e);
+ entry_x.Put(IS_BOOKMARK_OBJECT, true);
+ entry_b.Put(IS_BOOKMARK_OBJECT, true);
+ entry_c.Put(IS_BOOKMARK_OBJECT, true);
+ entry_d.Put(IS_BOOKMARK_OBJECT, true);
+ entry_e.Put(IS_BOOKMARK_OBJECT, true);
+ entry_b.Put(PARENT_ID, entry_x.Get(ID));
+ entry_c.Put(PARENT_ID, entry_x.Get(ID));
+ entry_c.PutPredecessor(entry_b.Get(ID));
+ entry_d.Put(PARENT_ID, entry_b.Get(ID));
+ entry_e.Put(PARENT_ID, entry_c.Get(ID));
+ }
+
+ // The arrangement is now: x (b (d) c (e)).
+ vector<int64> unsynced_handle_view;
+ vector<syncable::Id> expected_order;
+ // The expected order is "x", "b", "c", "e", truncated appropriately.
+ unsynced_handle_view.push_back(handle_e);
+ expected_order.push_back(ids_.MakeLocal("x"));
+ expected_order.push_back(ids_.MakeLocal("b"));
+ expected_order.push_back(ids_.MakeLocal("c"));
+ expected_order.push_back(ids_.MakeLocal("e"));
+ DoTruncationTest(dir, unsynced_handle_view, expected_order);
+}
+
+// TODO(chron): More corner case unit tests around validation
+TEST_F(SyncerTest, TestCommitMetahandleIterator) {
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ SyncerSession::ScopedSetWriteTransaction set_trans(&session, &wtrans);
+
+ GetCommitIdsCommand::OrderedCommitSet commit_set;
+ GetCommitIdsCommand::CommitMetahandleIterator iterator(&session,
+ &commit_set);
+ EXPECT_FALSE(iterator.Valid());
+ EXPECT_FALSE(iterator.Increment());
+ }
+
+ {
+ vector<int64> session_metahandles;
+ session_metahandles.push_back(
+ CreateUnsyncedDirectory(PSTR("test1"), "testid1"));
+ session_metahandles.push_back(
+ CreateUnsyncedDirectory(PSTR("test2"), "testid2"));
+ session_metahandles.push_back(
+ CreateUnsyncedDirectory(PSTR("test3"), "testid3"));
+ session.set_unsynced_handles(session_metahandles);
+
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ SyncerSession::ScopedSetWriteTransaction set_trans(&session, &wtrans);
+ GetCommitIdsCommand::OrderedCommitSet commit_set;
+ GetCommitIdsCommand::CommitMetahandleIterator iterator(&session,
+ &commit_set);
+
+ EXPECT_TRUE(iterator.Valid());
+ EXPECT_EQ(iterator.Current(), session_metahandles[0]);
+ EXPECT_TRUE(iterator.Increment());
+
+ EXPECT_TRUE(iterator.Valid());
+ EXPECT_EQ(iterator.Current(), session_metahandles[1]);
+ EXPECT_TRUE(iterator.Increment());
+
+ EXPECT_TRUE(iterator.Valid());
+ EXPECT_EQ(iterator.Current(), session_metahandles[2]);
+ EXPECT_FALSE(iterator.Increment());
+
+ EXPECT_FALSE(iterator.Valid());
+ }
+}
+
+TEST_F(SyncerTest, TestGetUnsyncedAndSimpleCommit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ PathString xattr_key = PSTR("key");
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ PSTR("Pete"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::BASE_VERSION, 1);
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, PSTR("Pete"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::ID, child_id_);
+ child.Put(syncable::BASE_VERSION, 1);
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(2, session.unsynced_count());
+ ASSERT_EQ(2, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
+ {
+ ReadTransaction rt(dir, __FILE__, __LINE__);
+ Entry entry(&rt, syncable::GET_BY_ID, child_id_);
+ ASSERT_TRUE(entry.good());
+ VerifyTestDataInEntry(&rt, &entry);
+ }
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoItemsTall) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-1001), ids_.FromNumber(-1000)},
+ {0, ids_.FromNumber(-1000), ids_.FromNumber(0)},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTall) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
+ {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
+ {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTallLimitedSize) {
+ syncer_->set_max_commit_batch_size(2);
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
+ {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
+ {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItem) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleUncommittedDeletedItem) {
+ CommitOrderingTest items[] = {
+ {-1, ids_.FromNumber(-1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest,
+ TestCommitListOrderingSingleLongDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoLongDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrdering3LongDeletedItemsWithSizeLimit) {
+ syncer_->set_max_commit_batch_size(2);
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {1, ids_.FromNumber(1001), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {2, ids_.FromNumber(1002), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoDeletedItemsWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingComplexDeletionScenario) {
+ CommitOrderingTest items[] = {
+ { 0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
+ {1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
+ {2, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest,
+ TestCommitListOrderingComplexDeletionScenarioWith2RecentDeletes) {
+ CommitOrderingTest items[] = {
+ { 0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
+ {1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
+ {2, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
+ {3, ids_.FromNumber(1005), ids_.FromNumber(1003), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingDeleteMovedItems) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {0, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME,
+ MOVED_FROM_ROOT}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingWithNesting) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ int64 now_server_time = ClientTimeToServerTime(syncable::Now());
+ int64 now_minus_2h = ServerTimeToClientTime(now_server_time - 7200000);
+
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ {
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ PSTR("Bob"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, ids_.FromNumber(100));
+ parent.Put(syncable::BASE_VERSION, 1);
+ MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(100),
+ PSTR("Bob"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, ids_.FromNumber(101));
+ child.Put(syncable::BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, syncable::CREATE, ids_.FromNumber(101),
+ PSTR("Bob"));
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(syncable::ID, ids_.FromNumber(102));
+ grandchild.Put(syncable::IS_UNSYNCED, true);
+ grandchild.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ // Create three deleted items which deletions we expect to
+ // be sent to the server.
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ PSTR("Pete"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::IS_DEL, true);
+ parent.Put(syncable::ID, ids_.FromNumber(103));
+ parent.Put(syncable::BASE_VERSION, 1);
+ parent.Put(syncable::MTIME, now_minus_2h);
+ MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(103),
+ PSTR("Pete"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::IS_DEL, true);
+ child.Put(syncable::ID, ids_.FromNumber(104));
+ child.Put(syncable::BASE_VERSION, 1);
+ child.Put(syncable::MTIME, now_minus_2h);
+ MutableEntry grandchild(&wtrans, syncable::CREATE, ids_.FromNumber(104),
+ PSTR("Pete"));
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(syncable::IS_UNSYNCED, true);
+ grandchild.Put(syncable::ID, ids_.FromNumber(105));
+ grandchild.Put(syncable::IS_DEL, true);
+ grandchild.Put(syncable::IS_DIR, false);
+ grandchild.Put(syncable::BASE_VERSION, 1);
+ grandchild.Put(syncable::MTIME, now_minus_2h);
+ }
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(6, session.unsynced_count());
+ ASSERT_EQ(6, mock_server_->committed_ids().size());
+ // This test will NOT unroll deletes because SERVER_PARENT_ID is not set.
+ // It will treat these like moves.
+ vector<syncable::Id> commit_ids(mock_server_->committed_ids());
+ EXPECT_EQ(ids_.FromNumber(100), commit_ids[0]);
+ EXPECT_EQ(ids_.FromNumber(101), commit_ids[1]);
+ EXPECT_EQ(ids_.FromNumber(102), commit_ids[2]);
+ // We don't guarantee the delete orders in this test, only that they occur
+ // at the end.
+ std::sort(commit_ids.begin() + 3, commit_ids.end());
+ EXPECT_EQ(ids_.FromNumber(103), commit_ids[3]);
+ EXPECT_EQ(ids_.FromNumber(104), commit_ids[4]);
+ EXPECT_EQ(ids_.FromNumber(105), commit_ids[5]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("1"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("2"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, child_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, PSTR("A"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, ids_.FromNumber(102));
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, PSTR("B"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, ids_.FromNumber(-103));
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, child_id_, PSTR("A"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, ids_.FromNumber(-104));
+ MutableEntry child(&wtrans, syncable::CREATE, child_id_, PSTR("B"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, ids_.FromNumber(105));
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(6, session.unsynced_count());
+ ASSERT_EQ(6, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
+ EXPECT_EQ(ids_.FromNumber(102), mock_server_->committed_ids()[2]);
+ EXPECT_EQ(ids_.FromNumber(-103), mock_server_->committed_ids()[3]);
+ EXPECT_EQ(ids_.FromNumber(-104), mock_server_->committed_ids()[4]);
+ EXPECT_EQ(ids_.FromNumber(105), mock_server_->committed_ids()[5]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+
+ syncable::Id child2_id = ids_.NewServerId();
+
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("P"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child1(&wtrans, syncable::CREATE, parent_id_, PSTR("1"));
+ ASSERT_TRUE(child1.good());
+ child1.Put(syncable::IS_UNSYNCED, true);
+ child1.Put(syncable::ID, child_id_);
+ MutableEntry child2(&wtrans, syncable::CREATE, parent_id_, PSTR("2"));
+ ASSERT_TRUE(child2.good());
+ child2.Put(syncable::IS_UNSYNCED, true);
+ child2.Put(syncable::ID, child2_id);
+ parent.Put(syncable::BASE_VERSION, 1);
+ child1.Put(syncable::BASE_VERSION, 1);
+ child2.Put(syncable::BASE_VERSION, 1);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(3, session.unsynced_count());
+ ASSERT_EQ(3, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
+ EXPECT_EQ(child2_id, mock_server_->committed_ids()[2]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("1"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+
+ syncable::Id parent2_id = ids_.NewLocalId();
+ syncable::Id child2_id = ids_.NewServerId();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, PSTR("A"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent2_id);
+ MutableEntry child(&wtrans, syncable::CREATE, parent2_id, PSTR("B"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, child2_id);
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(3, session.unsynced_count());
+ ASSERT_EQ(3, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(parent2_id, mock_server_->committed_ids()[1]);
+ EXPECT_EQ(child2_id, mock_server_->committed_ids()[2]);
+ {
+ ReadTransaction rtrans(dir, __FILE__, __LINE__);
+ PathChar path[] = { '1', *kPathSeparator, 'A', 0};
+ Entry entry_1A(&rtrans, syncable::GET_BY_PATH, path);
+ ASSERT_TRUE(entry_1A.good());
+ Entry item_parent2(&rtrans, syncable::GET_BY_ID, parent2_id);
+ ASSERT_FALSE(item_parent2.good());
+ Entry item_child2(&rtrans, syncable::GET_BY_ID, child2_id);
+ EXPECT_EQ(entry_1A.Get(syncable::ID), item_child2.Get(syncable::PARENT_ID));
+ EXPECT_TRUE(entry_1A.Get(syncable::ID).ServerKnows());
+ }
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("1"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+ int64 meta_handle_a, meta_handle_b;
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, PSTR("A"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, ids_.FromNumber(-101));
+ meta_handle_a = parent.Get(syncable::META_HANDLE);
+ MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(-101),
+ PSTR("B"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, ids_.FromNumber(-102));
+ meta_handle_b = child.Get(syncable::META_HANDLE);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(3, session.unsynced_count());
+ ASSERT_EQ(3, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(ids_.FromNumber(-101), mock_server_->committed_ids()[1]);
+ EXPECT_EQ(ids_.FromNumber(-102), mock_server_->committed_ids()[2]);
+ {
+ ReadTransaction rtrans(dir, __FILE__, __LINE__);
+ PathChar path[] = { '1', *kPathSeparator, 'A', 0};
+ Entry entry_1A(&rtrans, syncable::GET_BY_PATH, path);
+ ASSERT_TRUE(entry_1A.good());
+ Entry entry_id_minus_101(&rtrans, syncable::GET_BY_ID,
+ ids_.FromNumber(-101));
+ ASSERT_FALSE(entry_id_minus_101.good());
+ Entry entry_b(&rtrans, syncable::GET_BY_HANDLE, meta_handle_b);
+ EXPECT_EQ(entry_1A.Get(syncable::ID), entry_b.Get(syncable::PARENT_ID));
+ EXPECT_TRUE(entry_1A.Get(syncable::ID).ServerKnows());
+ }
+}
+
+TEST_F(SyncerTest, UpdateWithZeroLengthName) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // One illegal update
+ mock_server_->AddUpdateDirectory(1, 0, "", 1, 10);
+ // And one legal one that we're going to delete.
+ mock_server_->AddUpdateDirectory(2, 0, "FOO", 1, 10);
+ syncer_->SyncShare();
+ // Delete the legal one. The new update has a null name.
+ mock_server_->AddUpdateDirectory(2, 0, "", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+}
+
+#ifdef OS_WINDOWS
+TEST_F(SyncerTest, NameSanitizationWithClientRename) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "okay", 1, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("okay"));
+ ASSERT_TRUE(e.good());
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "prn", 1, 20);
+ syncer_->SyncShare();
+ {
+ WriteTransaction tr(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("prn~1"));
+ ASSERT_TRUE(e.good());
+ e.PutName(syncable::Name(PSTR("printer")));
+ e.Put(syncable::IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare();
+ {
+ vector<CommitMessage*>::const_reverse_iterator it =
+ mock_server_->commit_messages().rbegin();
+ ASSERT_TRUE(mock_server_->commit_messages().rend() != it);
+ const sync_pb::SyncEntity *const *s = (*it)->entries().data();
+ int s_len = (*it)->entries_size();
+ ASSERT_EQ(1, s_len);
+ ASSERT_EQ("printer", (*s)[0].name());
+ }
+}
+
+TEST_F(SyncerTest, NameSanitizationWithCascade) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "prn~1", 1, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("prn~1"));
+ ASSERT_TRUE(e.good());
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "prn", 1, 20);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("prn~2"));
+ ASSERT_TRUE(e.good());
+ }
+ mock_server_->AddUpdateDirectory(3, 0, "prn~2", 1, 30);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("prn~3"));
+ ASSERT_TRUE(e.good());
+ }
+}
+
+TEST_F(SyncerTest, GetStuckWithConflictingSanitizedNames) {
+ // We should get stuck here because we get two server updates with exactly the
+ // same name.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "foo:", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateDirectory(2, 0, "foo:", 1, 20);
+ SyncRepeatedlyToTriggerStuckSignal(state_.get());
+ EXPECT_TRUE(SyncerStuck(state_.get()));
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, MergeFolderWithSanitizedNameMatches) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, wtrans.root_id(), PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ parent.Put(UNSANITIZED_NAME, PSTR("Folder:"));
+ }
+ mock_server_->AddUpdateDirectory(100, 0, "Folder:", 10, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, trans.root_id(), &children);
+ EXPECT_EQ(1, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
+ EXPECT_EQ(0, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0, unsynced.size());
+ syncer_events_.clear();
+ }
+}
+
+// These two tests are the same as the two above, but they introduce case
+// changes.
+TEST_F(SyncerTest, GetStuckWithSanitizedNamesThatDifferOnlyByCase) {
+ // We should get stuck here because we get two server updates with exactly the
+ // same name.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "FOO:", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateDirectory(2, 0, "foo:", 1, 20);
+ SyncRepeatedlyToTriggerStuckSignal(state_.get());
+ EXPECT_TRUE(SyncerStuck(state_.get()));
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, MergeFolderWithSanitizedNameThatDiffersOnlyByCase) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, wtrans.root_id(), PSTR("FOLDER"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ parent.Put(UNSANITIZED_NAME, PSTR("FOLDER:"));
+ }
+ mock_server_->AddUpdateDirectory(100, 0, "Folder:", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ syncer_->SyncShare(); // Good gracious, these tests are not so good.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, trans.root_id(), &children);
+ EXPECT_EQ(1, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
+ EXPECT_EQ(0, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0, unsynced.size());
+ syncer_events_.clear();
+ }
+}
+#else // Mac / Linux ...
+
+TEST_F(SyncerTest, NameSanitizationWithClientRename) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "okay", 1, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("okay"));
+ ASSERT_TRUE(e.good());
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "a/b", 1, 20);
+ syncer_->SyncShare();
+ {
+ WriteTransaction tr(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("a:b"));
+ ASSERT_TRUE(e.good());
+ e.PutName(syncable::Name(PSTR("ab")));
+ e.Put(syncable::IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare();
+ {
+ vector<CommitMessage*>::const_reverse_iterator it =
+ mock_server_->commit_messages().rbegin();
+ ASSERT_TRUE(mock_server_->commit_messages().rend() != it);
+ const sync_pb::SyncEntity *const *s = (*it)->entries().data();
+ int s_len = (*it)->entries_size();
+ ASSERT_EQ(1, s_len);
+ ASSERT_EQ("ab", (*s)[0].name());
+ }
+}
+#endif
+
+namespace {
+void VerifyExistsWithNameInRoot(syncable::Directory *dir,
+ const PathString &name,
+ const string &entry,
+ int line) {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ name);
+ EXPECT_TRUE(e.good()) << "failed on call from " << entry << ":" << line;
+}
+} // namespace
+
+TEST_F(SyncerTest, ExtendedAttributeWithNullCharacter) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ int xattr_count = 2;
+ PathString xattr_keys[] = { PSTR("key"), PSTR("key2") };
+ syncable::Blob xattr_values[2];
+ char* value[] = { "value", "val\0ue" };
+ int value_length[] = { 5, 6 };
+ for (int i = 0; i < xattr_count; i++) {
+ for (int j = 0; j < value_length[i]; j++)
+ xattr_values[i].push_back(value[i][j]);
+ }
+ sync_pb::SyncEntity* ent =
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateExtendedAttributes(
+ ent, xattr_keys, xattr_values, xattr_count);
+
+ // Add some other items.
+ mock_server_->AddUpdateBookmark(2, 0, "fred", 2, 10);
+ mock_server_->AddUpdateBookmark(3, 0, "sue", 15, 10);
+
+ syncer_->SyncShare();
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry1(&trans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry1.good());
+ EXPECT_EQ(1, entry1.Get(syncable::BASE_VERSION));
+ EXPECT_EQ(1, entry1.Get(syncable::SERVER_VERSION));
+ set<ExtendedAttribute> client_extended_attributes;
+ entry1.GetAllExtendedAttributes(&trans, &client_extended_attributes);
+ EXPECT_EQ(xattr_count, client_extended_attributes.size());
+ for (int i = 0; i < xattr_count; i++) {
+ ExtendedAttributeKey key(entry1.Get(syncable::META_HANDLE), xattr_keys[i]);
+ ExtendedAttribute expected_xattr(&trans, syncable::GET_BY_HANDLE, key);
+ EXPECT_TRUE(expected_xattr.good());
+ for (int j = 0; j < value_length[i]; ++j) {
+ EXPECT_EQ(xattr_values[i][j],
+ static_cast<char>(expected_xattr.value().at(j)));
+ }
+ }
+ Entry entry2(&trans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry2.good());
+ Entry entry3(&trans, syncable::GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(entry3.good());
+}
+
+TEST_F(SyncerTest, TestBasicUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ string id = "some_id";
+ string parent_id = "0";
+ string name = "in_root";
+ int64 version = 10;
+ int64 timestamp = 10;
+ mock_server_->AddUpdateDirectory(id, parent_id, name, version, timestamp);
+
+ syncer_->SyncShare(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ EXPECT_EQ(0, status.stalled_updates());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID,
+ syncable::Id::CreateFromServerId("some_id"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DIR));
+ EXPECT_EQ(entry.Get(SERVER_VERSION), version);
+ EXPECT_EQ(entry.Get(BASE_VERSION), version);
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, IllegalAndLegalUpdates) {
+ Id root = ids_.root();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Should apply just fine.
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 10, 10);
+
+ // Name clash: this is a conflict.
+ mock_server_->AddUpdateDirectory(2, 0, "in_root", 10, 10);
+
+ // Unknown parent: should never be applied. "-80" is a legal server ID,
+ // because any string sent by the server is a legal server ID in the sync
+ // protocol, but it's not the ID of any item known to the client. This
+ // update should succeed validation, but be stuck in the unapplied state
+ // until an item with the server ID "-80" arrives.
+ mock_server_->AddUpdateDirectory(3, -80, "bad_parent", 10, 10);
+
+ syncer_->SyncShare(state_.get());
+
+ ConflictResolutionView conflict_view(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ // Ids 2 and 3 are expected to be in conflict now.
+ EXPECT_EQ(2, conflict_view.conflicting_updates());
+ EXPECT_EQ(0, status.stalled_updates());
+
+ // These entries will be used in the second set of updates.
+ mock_server_->AddUpdateDirectory(4, 0, "newer_version", 20, 10);
+ mock_server_->AddUpdateDirectory(5, 0, "circular1", 10, 10);
+ mock_server_->AddUpdateDirectory(6, 5, "circular2", 10, 10);
+ mock_server_->AddUpdateDirectory(9, 3, "bad_parent_child", 10, 10);
+ mock_server_->AddUpdateDirectory(100, 9, "bad_parent_child2", 10, 10);
+ mock_server_->AddUpdateDirectory(10, 0, "dir_to_bookmark", 10, 10);
+
+ syncer_->SyncShare(state_.get());
+ // The three items with an unresolved parent should be unapplied (3, 9, 100).
+ // The name clash should also still be in conflict.
+ EXPECT_EQ(4, conflict_view.conflicting_updates());
+ EXPECT_EQ(0, status.stalled_updates());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(name_clash.good());
+ EXPECT_TRUE(name_clash.Get(IS_UNAPPLIED_UPDATE));
+
+ Entry bad_parent(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(bad_parent.good());
+ EXPECT_TRUE(name_clash.Get(IS_UNAPPLIED_UPDATE))
+ << "child of unknown parent should be in conflict";
+
+ Entry bad_parent_child(&trans, GET_BY_ID, ids_.FromNumber(9));
+ ASSERT_TRUE(bad_parent_child.good());
+ EXPECT_TRUE(bad_parent_child.Get(IS_UNAPPLIED_UPDATE))
+ << "grandchild of unknown parent should be in conflict";
+
+ Entry bad_parent_child2(&trans, GET_BY_ID, ids_.FromNumber(100));
+ ASSERT_TRUE(bad_parent_child2.good());
+ EXPECT_TRUE(bad_parent_child2.Get(IS_UNAPPLIED_UPDATE))
+ << "great-grandchild of unknown parent should be in conflict";
+ }
+
+ // Updating 1 should unblock the clashing item 2.
+ mock_server_->AddUpdateDirectory(1, 0, "new_name", 20, 20);
+
+ // Moving 5 under 6 will create a cycle: a conflict.
+ mock_server_->AddUpdateDirectory(5, 6, "circular3", 20, 20);
+
+ // Flip the is_dir bit: should fail verify & be dropped.
+ mock_server_->AddUpdateBookmark(10, 0, "dir_to_bookmark", 20, 20);
+ syncer_->SyncShare(state_.get());
+
+ // Version number older than last known: should fail verify & be dropped.
+ mock_server_->AddUpdateDirectory(4, 0, "old_version", 10, 10);
+ syncer_->SyncShare(state_.get());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry still_a_dir(&trans, GET_BY_ID, ids_.FromNumber(10));
+ ASSERT_TRUE(still_a_dir.good());
+ EXPECT_FALSE(still_a_dir.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(10, still_a_dir.Get(BASE_VERSION));
+ EXPECT_EQ(10, still_a_dir.Get(SERVER_VERSION));
+ EXPECT_TRUE(still_a_dir.Get(IS_DIR));
+
+ Entry rename(&trans, GET_BY_PARENTID_AND_NAME, root, PSTR("new_name"));
+ ASSERT_TRUE(rename.good());
+ EXPECT_FALSE(rename.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(ids_.FromNumber(1), rename.Get(ID));
+ EXPECT_EQ(20, rename.Get(BASE_VERSION));
+
+ Entry unblocked(&trans, GET_BY_PARENTID_AND_NAME, root, PSTR("in_root"));
+ ASSERT_TRUE(unblocked.good());
+ EXPECT_FALSE(unblocked.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(ids_.FromNumber(2), unblocked.Get(ID));
+ EXPECT_EQ(10, unblocked.Get(BASE_VERSION));
+
+ Entry ignored_old_version(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(ignored_old_version.good());
+ EXPECT_EQ(ignored_old_version.Get(NAME), PSTR("newer_version"));
+ EXPECT_FALSE(ignored_old_version.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(20, ignored_old_version.Get(BASE_VERSION));
+
+ Entry circular_parent_issue(&trans, GET_BY_ID, ids_.FromNumber(5));
+ ASSERT_TRUE(circular_parent_issue.good());
+ EXPECT_TRUE(circular_parent_issue.Get(IS_UNAPPLIED_UPDATE))
+ << "circular move should be in conflict";
+ EXPECT_EQ(circular_parent_issue.Get(PARENT_ID), root_id_);
+ EXPECT_EQ(circular_parent_issue.Get(SERVER_PARENT_ID), ids_.FromNumber(6));
+ EXPECT_EQ(10, circular_parent_issue.Get(BASE_VERSION));
+
+ Entry circular_parent_target(&trans, GET_BY_ID, ids_.FromNumber(6));
+ ASSERT_TRUE(circular_parent_target.good());
+ EXPECT_FALSE(circular_parent_target.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(circular_parent_issue.Get(ID),
+ circular_parent_target.Get(PARENT_ID));
+ EXPECT_EQ(10, circular_parent_target.Get(BASE_VERSION));
+ }
+
+ EXPECT_EQ(0, syncer_events_.size());
+ EXPECT_EQ(4, conflict_view.conflicting_updates());
+}
+
+TEST_F(SyncerTest, CommitTimeRename) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a folder and an entry
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&trans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ MutableEntry entry(&trans, CREATE, parent.Get(ID), PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Mix in a directory creation too for later
+ mock_server_->AddUpdateDirectory(2, 0, "dir_in_root", 10, 10);
+ mock_server_->SetCommitTimeRename("renamed_");
+ syncer_->SyncShare();
+
+ // Verify it was correctly renamed
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry_folder(&trans, GET_BY_PATH, PSTR("renamed_Folder"));
+ ASSERT_TRUE(entry_folder.good());
+
+ Entry entry_new(&trans, GET_BY_PATH,
+ PSTR("renamed_Folder") + PathString(kPathSeparator)
+ + PSTR("renamed_new_entry"));
+ ASSERT_TRUE(entry_new.good());
+
+ // And that the unrelated directory creation worked without a rename
+ Entry new_dir(&trans, GET_BY_PATH, PSTR("dir_in_root"));
+ EXPECT_TRUE(new_dir.good());
+ }
+}
+
+
+TEST_F(SyncerTest, CommitTimeRenameI18N) {
+ // This is utf-8 for the diacritized Internationalization
+ const char* i18nString = "\xc3\x8e\xc3\xb1\x74\xc3\xa9\x72\xc3\xb1"
+ "\xc3\xa5\x74\xc3\xae\xc3\xb6\xc3\xb1\xc3\xa5\x6c\xc3\xae"
+ "\xc2\x9e\xc3\xa5\x74\xc3\xae\xc3\xb6\xc3\xb1";
+
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a folder and entry
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&trans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ MutableEntry entry(&trans, CREATE, parent.Get(ID), PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Mix in a directory creation too for later
+ mock_server_->AddUpdateDirectory(2, 0, "dir_in_root", 10, 10);
+ mock_server_->SetCommitTimeRename(i18nString);
+ syncer_->SyncShare();
+
+ // Verify it was correctly renamed
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ PathString expectedFolder;
+ AppendUTF8ToPathString(i18nString, &expectedFolder);
+ AppendUTF8ToPathString("Folder", &expectedFolder);
+ Entry entry_folder(&trans, GET_BY_PATH, expectedFolder);
+ ASSERT_TRUE(entry_folder.good());
+ PathString expected = expectedFolder + PathString(kPathSeparator);
+ AppendUTF8ToPathString(i18nString, &expected);
+ AppendUTF8ToPathString("new_entry", &expected);
+
+ Entry entry_new(&trans, GET_BY_PATH, expected);
+ ASSERT_TRUE(entry_new.good());
+
+ // And that the unrelated directory creation worked without a rename
+ Entry new_dir(&trans, GET_BY_PATH, PSTR("dir_in_root"));
+ EXPECT_TRUE(new_dir.good());
+ }
+}
+
+TEST_F(SyncerTest, CommitTimeRenameCollision) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a folder to collide with
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry collider(&trans, CREATE, root_id_, PSTR("renamed_Folder"));
+ ASSERT_TRUE(collider.good());
+ collider.Put(IS_DIR, true);
+ collider.Put(IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare(); // Now we have a folder.
+
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(folder.good());
+ folder.Put(IS_DIR, true);
+ folder.Put(IS_UNSYNCED, true);
+ }
+
+ mock_server_->set_next_new_id(30000);
+ mock_server_->SetCommitTimeRename("renamed_");
+ syncer_->SyncShare(); // Should collide and rename aside.
+ // This case will only occur if we got a commit time rename aside
+ // and the server attempts to rename to an entry that we know about, but it
+ // does not.
+
+ // Verify it was correctly renamed; one of them should have a sanitized name.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry collider_folder(&trans, GET_BY_PARENTID_AND_NAME, root_id_,
+ PSTR("renamed_Folder"));
+ EXPECT_EQ(collider_folder.Get(UNSANITIZED_NAME), PSTR(""));
+ ASSERT_TRUE(collider_folder.good());
+
+ // ID is generated by next_new_id_ and server mock prepending of strings.
+ Entry entry_folder(&trans, GET_BY_ID,
+ syncable::Id::CreateFromServerId("mock_server:30000"));
+ ASSERT_TRUE(entry_folder.good());
+ // A little arbitrary but nothing we can do about that.
+ EXPECT_EQ(entry_folder.Get(NAME), PSTR("renamed_Folder~1"));
+ EXPECT_EQ(entry_folder.Get(UNSANITIZED_NAME), PSTR("renamed_Folder"));
+ }
+}
+
+
+// A commit with a lost response produces an update that has to be reunited with
+// its parent.
+TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a folder in the root.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_folder"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_DIR, true);
+ entry.Put(IS_UNSYNCED, true);
+ }
+
+ // Verify it and pull the ID out of the folder
+ syncable::Id folder_id;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_folder"));
+ ASSERT_TRUE(entry.good());
+ folder_id = entry.Get(ID);
+ ASSERT_TRUE(!folder_id.ServerKnows());
+ }
+
+ // Create an entry in the newly created folder.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, folder_id, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Verify it and pull the ID out of the entry
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, folder_id,
+ PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value
+ int64 timestamp = 20; // arbitrary value.
+ int64 size = 20; // arbitrary.
+ syncable::Id new_folder_id =
+ syncable::Id::CreateFromServerId("folder_server_id");
+
+ // the following update should cause the folder to both apply the update, as
+ // well as reassociate the id
+ mock_server_->AddUpdateDirectory(new_folder_id, root_id_,
+ "new_folder", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ dir->cache_guid(), folder_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Alright! Apply that update!
+ syncer_->SyncShare();
+ {
+ // The folder's ID should have been updated.
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry folder(&trans, GET_BY_PATH, PSTR("new_folder"));
+ ASSERT_TRUE(folder.good());
+ EXPECT_EQ(new_version, folder.Get(BASE_VERSION));
+ EXPECT_EQ(new_folder_id, folder.Get(ID));
+ EXPECT_TRUE(folder.Get(ID).ServerKnows());
+
+ // We changed the id of the parent, old lookups should fail.
+ Entry bad_entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, folder_id,
+ PSTR("new_entry"));
+ EXPECT_FALSE(bad_entry.good());
+
+ // The child's parent should have changed as well.
+ Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, new_folder_id,
+ PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(!entry.Get(ID).ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+}
+
+// A commit with a lost response produces an update that has to be reunited with
+// its parent.
+TEST_F(SyncerTest, CommitReuniteUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create an entry in the root.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ // Verify it and pull the ID out
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value
+ int64 timestamp = 20; // arbitrary value.
+ syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
+
+ // Generate an update from the server with a relevant ID reassignment.
+ mock_server_->AddUpdateBookmark(new_entry_id, root_id_,
+ "new_entry", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ dir->cache_guid(), entry_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Alright! Apply that update!
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(new_version, entry.Get(BASE_VERSION));
+ EXPECT_EQ(new_entry_id, entry.Get(ID));
+ }
+}
+
+// A commit with a lost response must work even if the local entry
+// was deleted before the update is applied. We should not duplicate the local
+// entry in this case, but just create another one alongside.
+// We may wish to examine this behavior in the future as it can create hanging
+// uploads that never finish, that must be cleaned up on the server side
+// after some time.
+TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a entry in the root.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ // Verify it and pull the ID out
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value
+ int64 timestamp = 20; // arbitrary value.
+ int64 size = 20; // arbitrary.
+ syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
+
+ // Generate an update from the server with a relevant ID reassignment.
+ mock_server_->AddUpdateBookmark(new_entry_id, root_id_,
+ "new_entry", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ dir->cache_guid(),
+ entry_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Purposefully delete the entry now before the update application finishes.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::IS_DEL, true);
+ }
+
+ // Just don't CHECK fail in sync, have the update split.
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_DEL));
+
+ Entry old_entry(&trans, GET_BY_ID, entry_id);
+ ASSERT_TRUE(old_entry.good());
+ EXPECT_TRUE(old_entry.Get(IS_DEL));
+ }
+}
+
+// TODO(chron): Add more unsanitized name tests
+TEST_F(SyncerTest, ConflictMatchingEntryHandlesUnsanitizedNames) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A/A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B/B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(IS_UNAPPLIED_UPDATE, true);
+ A.Put(SERVER_VERSION, 20);
+
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNAPPLIED_UPDATE, true);
+ B.Put(SERVER_VERSION, 20);
+ }
+ LoopSyncShare(syncer_);
+ syncer_events_.clear();
+ mock_server_->set_conflict_all_commits(false);
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ EXPECT_EQ(A.Get(IS_UNSYNCED), false);
+ EXPECT_EQ(A.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(A.Get(SERVER_VERSION), 20);
+
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_EQ(B.Get(IS_UNSYNCED), false);
+ EXPECT_EQ(B.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(B.Get(SERVER_VERSION), 20);
+ }
+}
+
+TEST_F(SyncerTest, ConflictMatchingEntryHandlesNormalNames) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(IS_UNAPPLIED_UPDATE, true);
+ A.Put(SERVER_VERSION, 20);
+
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNAPPLIED_UPDATE, true);
+ B.Put(SERVER_VERSION, 20);
+ }
+ LoopSyncShare(syncer_);
+ syncer_events_.clear();
+ mock_server_->set_conflict_all_commits(false);
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ EXPECT_EQ(A.Get(IS_UNSYNCED), false);
+ EXPECT_EQ(A.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(A.Get(SERVER_VERSION), 20);
+
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_EQ(B.Get(IS_UNSYNCED), false);
+ EXPECT_EQ(B.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(B.Get(SERVER_VERSION), 20);
+ }
+}
+
+TEST_F(SyncerTest, ReverseFolderOrderingTest) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(4, 3, "ggchild", 10, 10);
+ mock_server_->AddUpdateDirectory(3, 2, "gchild", 10, 10);
+ mock_server_->AddUpdateDirectory(5, 4, "gggchild", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "child", 10, 10);
+ mock_server_->AddUpdateDirectory(1, 0, "parent", 10, 10);
+ LoopSyncShare(syncer_);
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry child(&trans, syncable::GET_BY_PARENTID_AND_NAME, ids_.FromNumber(4),
+ PSTR("gggchild"));
+ ASSERT_TRUE(child.good());
+}
+
+bool CreateFolderInBob(Directory* dir) {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
+ PSTR("bob"));
+ MutableEntry entry2(&trans, syncable::CREATE, bob.Get(syncable::ID),
+ PSTR("bob"));
+ CHECK(entry2.good());
+ entry2.Put(syncable::IS_DIR, true);
+ entry2.Put(syncable::IS_UNSYNCED, true);
+ return true;
+}
+
+TEST_F(SyncerTest, EntryCreatedInNewFolderMidSync) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, syncable::CREATE, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ }
+ mock_server_->SetMidCommitCallbackFunction(CreateFolderInBob);
+ syncer_->SyncShare(BUILD_COMMIT_REQUEST, SYNCER_END);
+ EXPECT_EQ(1, mock_server_->committed_ids().size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ PathChar path[] = {*kPathSeparator, 'b', 'o', 'b', 0};
+ Entry entry(&trans, syncable::GET_BY_PATH, path);
+ ASSERT_TRUE(entry.good());
+ PathChar path2[] = {*kPathSeparator, 'b', 'o', 'b',
+ *kPathSeparator, 'b', 'o', 'b', 0};
+ Entry entry2(&trans, syncable::GET_BY_PATH, path2);
+ ASSERT_TRUE(entry2.good());
+ }
+}
+
+bool TouchFredAndGingerInRoot(Directory* dir) {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry fred(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
+ PSTR("fred"));
+ CHECK(fred.good());
+ // Equivalent to touching the entry
+ fred.Put(syncable::IS_UNSYNCED, true);
+ fred.Put(syncable::SYNCING, false);
+ MutableEntry ginger(&trans, syncable::GET_BY_PARENTID_AND_NAME,
+ trans.root_id(), PSTR("ginger"));
+ CHECK(ginger.good());
+ ginger.Put(syncable::IS_UNSYNCED, true);
+ ginger.Put(syncable::SYNCING, false);
+ return true;
+}
+
+TEST_F(SyncerTest, NegativeIDInUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(-10, 0, "bad", 40, 40);
+ syncer_->SyncShare();
+ // The negative id would make us CHECK!
+}
+
+TEST_F(SyncerTest, UnappliedUpdateOnCreatedItemItemDoesNotCrash) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ // Create an item.
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry fred_match(&trans, CREATE, trans.root_id(),
+ PSTR("fred_match"));
+ ASSERT_TRUE(fred_match.good());
+ WriteTestDataToEntry(&trans, &fred_match);
+ }
+ // Commit it.
+ syncer_->SyncShare();
+ EXPECT_EQ(1, mock_server_->committed_ids().size());
+ mock_server_->set_conflict_all_commits(true);
+ syncable::Id fred_match_id;
+ {
+ // Now receive a change from outside.
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry fred_match(&trans, GET_BY_PATH, PSTR("fred_match"));
+ ASSERT_TRUE(fred_match.good());
+ EXPECT_TRUE(fred_match.Get(ID).ServerKnows());
+ fred_match_id = fred_match.Get(ID);
+ mock_server_->AddUpdateBookmark(fred_match_id, trans.root_id(),
+ "fred_match", 40, 40);
+ }
+ // Run the syncer.
+ for (int i = 0 ; i < 30 ; ++i) {
+ syncer_->SyncShare();
+ }
+}
+
+TEST_F(SyncerTest, NameClashWithResolverInconsistentUpdates) {
+ // I'm unsure what the client should really do when the scenario in this old
+ // test occurs. The set of updates we've received are not consistent.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ const char* base_name = "name_clash_with_resolver";
+ const char* full_name = "name_clash_with_resolver.htm";
+ PathChar* base_name_p = PSTR("name_clash_with_resolver");
+ mock_server_->AddUpdateBookmark(1, 0, full_name, 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, full_name, 10, 10);
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare();
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare();
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+
+ EXPECT_EQ(base_name_p, id1name.substr(0, strlen(base_name)));
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(id1name.length() - 4));
+ EXPECT_LE(id1name.length(), 200ul);
+ EXPECT_EQ(PSTR("name_clash_with_resolver.htm"), id2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, NameClashWithResolver) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ const char* base_name = "name_clash_with_resolver";
+ const char* full_name = "name_clash_with_resolver.htm";
+ PathChar* base_name_p = PSTR("name_clash_with_resolver");
+ PathChar* full_name_p = PSTR("name_clash_with_resolver.htm");
+ mock_server_->AddUpdateBookmark(1, 0, "fred", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ entry.Put(NAME, full_name_p);
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, full_name, 10, 10);
+ // We do NOT use LoopSyncShare here because of the way that
+ // mock_server_->conflict_n_commits works.
+ // It will only conflict the first n commits, so if we let the syncer loop,
+ // the second commit of the update will succeed even though it shouldn't.
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare(state_.get());
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare(state_.get());
+ EXPECT_EQ(0, syncer_events_.size());
+ syncer_events_.clear();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+
+ EXPECT_EQ(base_name_p, id1name.substr(0, strlen(base_name)));
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(id1name.length() - 4));
+ EXPECT_LE(id1name.length(), 200ul);
+ EXPECT_EQ(full_name_p, id2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, VeryLongNameClashWithResolver) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ string name;
+ PathString name_w;
+ name.resize(250, 'X');
+ name_w.resize(250, 'X');
+ name.append(".htm");
+ name_w.append(PSTR(".htm"));
+ mock_server_->AddUpdateBookmark(1, 0, "fred", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ entry.Put(NAME, name_w);
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, name, 10, 10);
+ mock_server_->set_conflict_n_commits(1);
+ // We do NOT use LoopSyncShare here because of the way that
+ // mock_server_->conflict_n_commits works.
+ // It will only conflict the first n commits, so if we let the syncer loop,
+ // the second commit of the update will succeed even though it shouldn't.
+ syncer_->SyncShare(state_.get());
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare(state_.get());
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(id1name.length() - 4));
+ EXPECT_EQ(name_w, id2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, NameClashWithResolverAndDotStartedName) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, ".bob.htm", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(NAME, PSTR(".htm"));
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(2, 0, ".htm", 10, 10);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(0, 4));
+ EXPECT_EQ(PSTR(".htm"), id2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, ThreeNamesClashWithResolver) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(1, 0, "in_root.htm", 10, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ ASSERT_FALSE(entry.Get(IS_DEL));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, "in_root.htm", 10, 10);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ ASSERT_FALSE(entry.Get(IS_DEL));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(3, 0, "in_root.htm", 10, 10);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(entry.good());
+ ASSERT_FALSE(entry.Get(IS_DEL));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(4, 0, "in_root.htm", 10, 10);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ ASSERT_TRUE(id3.good());
+ ASSERT_TRUE(id4.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id4.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+ ASSERT_GE(id1name.length(), 4ul);
+ EXPECT_EQ(PSTR("in_root"), id1name.substr(0, 7));
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(id1name.length() - 4));
+ EXPECT_NE(PSTR("in_root.htm"), id1.Get(NAME));
+ PathString id2name = id2.Get(NAME);
+ ASSERT_GE(id2name.length(), 4ul);
+ EXPECT_EQ(PSTR("in_root"), id2name.substr(0, 7));
+ EXPECT_EQ(PSTR(".htm"), id2name.substr(id2name.length() - 4));
+ EXPECT_NE(PSTR("in_root.htm"), id2.Get(NAME));
+ PathString id3name = id3.Get(NAME);
+ ASSERT_GE(id3name.length(), 4ul);
+ EXPECT_EQ(PSTR("in_root"), id3name.substr(0, 7));
+ EXPECT_EQ(PSTR(".htm"), id3name.substr(id3name.length() - 4));
+ EXPECT_NE(PSTR("in_root.htm"), id3.Get(NAME));
+ EXPECT_EQ(PSTR("in_root.htm"), id4.Get(NAME));
+ }
+}
+
+/**
+ * In the event that we have a double changed entry, that is
+ * changed on both the client and the server, the conflict resolver
+ * should just drop one of them and accept the other.
+ */
+TEST_F(SyncerTest, DoublyChangedWithResolver) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 5);
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, PSTR("Pete.htm"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::ID, child_id_);
+ child.Put(syncable::BASE_VERSION, 10);
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+ mock_server_->AddUpdateBookmark(child_id_, parent_id_, "Pete.htm", 11, 10);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ syncable::Directory::ChildHandles children;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ dir->GetChildHandles(&trans, parent_id_, &children);
+ // We expect the conflict resolver to just clobber the entry.
+ Entry child(&trans, syncable::GET_BY_ID, child_id_);
+ ASSERT_TRUE(child.good());
+ EXPECT_TRUE(child.Get(syncable::IS_UNSYNCED));
+ EXPECT_FALSE(child.Get(syncable::IS_UNAPPLIED_UPDATE));
+ }
+
+ // Only one entry, since we just overwrite one.
+ EXPECT_EQ(1, children.size());
+ syncer_events_.clear();
+}
+
+// We got this repro case when someone was editing entries
+// while sync was occuring. The entry had changed out underneath
+// the user.
+TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ int64 test_time = 123456;
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, syncable::CREATE, root_id_, PSTR("Pete"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(ID).ServerKnows());
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::MTIME, test_time);
+ }
+ syncer_->SyncShare();
+ syncable::Id id;
+ int64 version;
+ int64 server_position_in_parent;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
+ PSTR("Pete"));
+ ASSERT_TRUE(entry.good());
+ id = entry.Get(ID);
+ EXPECT_TRUE(id.ServerKnows());
+ version = entry.Get(BASE_VERSION);
+ server_position_in_parent = entry.Get(SERVER_POSITION_IN_PARENT);
+ }
+ mock_server_->AddUpdateDirectory(id, root_id_, "Pete", version, 10);
+ mock_server_->SetLastUpdatePosition(server_position_in_parent);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, syncable::GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(entry.Get(MTIME), test_time);
+ }
+}
+
+TEST_F(SyncerTest, ParentAndChildBothMatch) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("test.htm"));
+ ASSERT_TRUE(child.good());
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+ mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder", 10, 10);
+ mock_server_->AddUpdateBookmark(child_id_, parent_id_, "test.htm", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, root_id_, &children);
+ EXPECT_EQ(1, children.size());
+ dir->GetChildHandles(&trans, parent_id_, &children);
+ EXPECT_EQ(1, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
+ EXPECT_EQ(0, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0, unsynced.size());
+ syncer_events_.clear();
+ }
+}
+
+TEST_F(SyncerTest, CommittingNewDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("bob"));
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare();
+ EXPECT_EQ(0, mock_server_->committed_ids().size());
+}
+
+// Original problem synopsis:
+// Check failed: entry->Get(BASE_VERSION) <= entry->Get(SERVER_VERSION)
+// Client creates entry, client finishes committing entry. Between
+// commit and getting update back, we delete the entry.
+// We get the update for the entry, but the local one was modified
+// so we store the entry but don't apply it. IS_UNAPPLIED_UPDATE is set.
+// We commit deletion and get a new version number.
+// We apply unapplied updates again before we get the update about the deletion.
+// This means we have an unapplied update where server_version < base_version.
+TEST_F(SyncerTest, UnappliedUpdateDuringCommit) {
+ // This test is a little fake
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("bob"));
+ entry.Put(ID, ids_.FromNumber(20));
+ entry.Put(BASE_VERSION, 1);
+ entry.Put(SERVER_VERSION, 1);
+ entry.Put(SERVER_PARENT_ID, ids_.FromNumber(9999)); // bad parent
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(IS_UNAPPLIED_UPDATE, true);
+ entry.Put(IS_DEL, false);
+ }
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ EXPECT_EQ(0, status.conflicting_updates());
+ syncer_events_.clear();
+}
+
+// Original problem synopsis:
+// Illegal parent
+// Unexpected error during sync if we:
+// make a new folder bob
+// wait for sync
+// make a new folder fred
+// move bob into fred
+// remove bob
+// remove fred
+// if no syncing occured midway, bob will have an illegal parent
+TEST_F(SyncerTest, DeletingEntryInFolder) {
+ // This test is a little fake
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("existing"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_DIR, true);
+ entry.Put(IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare(state_.get());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry newfolder(&trans, CREATE, trans.root_id(), PSTR("new"));
+ ASSERT_TRUE(newfolder.good());
+ newfolder.Put(IS_DIR, true);
+ newfolder.Put(IS_UNSYNCED, true);
+
+ MutableEntry existing(&trans, GET_BY_PATH, PSTR("existing"));
+ ASSERT_TRUE(existing.good());
+ existing.Put(PARENT_ID, newfolder.Get(ID));
+ existing.Put(IS_UNSYNCED, true);
+ EXPECT_TRUE(existing.Get(ID).ServerKnows());
+
+ newfolder.Put(IS_DEL, true);
+ existing.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ EXPECT_EQ(0, status.error_commits());
+ EXPECT_EQ(0, status.conflicting_commits());
+ EXPECT_EQ(0, status.BlockedItemsSize());
+}
+
+// TODO(sync): Is this test useful anymore?
+TEST_F(SyncerTest, DeletingEntryWithLocalEdits) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry newfolder(&trans, CREATE, ids_.FromNumber(1), PSTR("local"));
+ ASSERT_TRUE(newfolder.good());
+ newfolder.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare(SYNCER_BEGIN, APPLY_UPDATES);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry_by_path(&trans, syncable::GET_BY_PATH,
+ PathString(PSTR("bob")) + kPathSeparator + PSTR("local"));
+ ASSERT_TRUE(entry_by_path.good());
+ }
+}
+
+TEST_F(SyncerTest, FolderSwapUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20);
+ mock_server_->AddUpdateDirectory(7801, 0, "fred", 2, 20);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("fred"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, CorruptUpdateBadFolderSwapUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(4096, 0, "alice", 1, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("bob"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("fred"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("alice"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ }
+ mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20);
+ mock_server_->AddUpdateDirectory(7801, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(4096, 0, "bob", 2, 20);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("bob"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("fred"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("alice"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(chron): New set of folder swap commit tests that don't rely
+// on transactional commits.
+TEST_F(SyncerTest, DISABLED_FolderSwapCommit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
+ id1.Put(IS_UNSYNCED, true);
+ id2.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ ASSERT_EQ(2, mock_server_->commit_messages().size());
+ CommitMessage* m0 = mock_server_->commit_messages()[0];
+ CommitMessage* m1 = mock_server_->commit_messages()[1];
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("fred"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_FALSE(id1.Get(IS_UNSYNCED));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_FALSE(id2.Get(IS_UNSYNCED));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(chron): New set of folder swap commit tests that don't rely
+// on transactional commits.
+TEST_F(SyncerTest, DISABLED_DualFolderSwapCommit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
+ mock_server_->AddUpdateDirectory(4, 0, "greg", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ MutableEntry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ MutableEntry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ ASSERT_TRUE(id3.good());
+ ASSERT_TRUE(id4.good());
+ EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
+ EXPECT_FALSE(id3.Put(NAME, PSTR("greg")));
+ EXPECT_TRUE(id3.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id4.Put(NAME, PSTR("sue")));
+ EXPECT_TRUE(id3.Put(NAME, PSTR("greg")));
+ id1.Put(IS_UNSYNCED, true);
+ id2.Put(IS_UNSYNCED, true);
+ id3.Put(IS_UNSYNCED, true);
+ id4.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ ASSERT_EQ(4, mock_server_->commit_messages().size());
+ CommitMessage* m0 = mock_server_->commit_messages()[0];
+ CommitMessage* m1 = mock_server_->commit_messages()[1];
+ CommitMessage* m2 = mock_server_->commit_messages()[2];
+ CommitMessage* m3 = mock_server_->commit_messages()[3];
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("fred"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_FALSE(id1.Get(IS_UNSYNCED));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_FALSE(id2.Get(IS_UNSYNCED));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("greg"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ EXPECT_FALSE(id3.Get(IS_UNSYNCED));
+ Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(id4.good());
+ EXPECT_EQ(PSTR("sue"), id4.Get(NAME));
+ EXPECT_EQ(root_id_, id4.Get(PARENT_ID));
+ EXPECT_FALSE(id4.Get(IS_UNSYNCED));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(chron): New set of folder swap commit tests that don't rely
+// on transactional commits.
+TEST_F(SyncerTest, DISABLED_TripleFolderRotateCommit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ MutableEntry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ ASSERT_TRUE(id3.good());
+ EXPECT_FALSE(id1.Put(NAME, PSTR("sue")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
+ EXPECT_TRUE(id3.Put(NAME, PSTR("fred")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("sue")));
+ id1.Put(IS_UNSYNCED, true);
+ id2.Put(IS_UNSYNCED, true);
+ id3.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ ASSERT_EQ(2, mock_server_->commit_messages().size());
+ CommitMessage* m0 = mock_server_->commit_messages()[0];
+ CommitMessage* m1 = mock_server_->commit_messages()[1];
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("sue"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_FALSE(id1.Get(IS_UNSYNCED));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_FALSE(id2.Get(IS_UNSYNCED));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("fred"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ EXPECT_FALSE(id3.Get(IS_UNSYNCED));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(chron): New set of folder swap commit tests that don't rely
+// on transactional commits.
+TEST_F(SyncerTest, DISABLED_ServerAndClientSwap) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
+ mock_server_->AddUpdateDirectory(4, 0, "greg", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
+ id1.Put(IS_UNSYNCED, true);
+ id2.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(3, 0, "greg", 2, 20);
+ mock_server_->AddUpdateDirectory(4, 0, "sue", 2, 20);
+ syncer_->SyncShare();
+ ASSERT_EQ(2, mock_server_->commit_messages().size());
+ CommitMessage* m0 = mock_server_->commit_messages()[0];
+ CommitMessage* m1 = mock_server_->commit_messages()[1];
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("fred"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_FALSE(id1.Get(IS_UNSYNCED));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_FALSE(id2.Get(IS_UNSYNCED));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("greg"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ EXPECT_FALSE(id3.Get(IS_UNSYNCED));
+ Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(id4.good());
+ EXPECT_EQ(PSTR("sue"), id4.Get(NAME));
+ EXPECT_EQ(root_id_, id4.Get(PARENT_ID));
+ EXPECT_FALSE(id4.Get(IS_UNSYNCED));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, CommitManyItemsInOneGo) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ uint32 max_batches = 3;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * max_batches;
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ for (uint32 i = 0; i < items_to_commit; i++) {
+ string nameutf8 = StringPrintf("%d", i);
+ PathString name(nameutf8.begin(), nameutf8.end());
+ MutableEntry e(&trans, CREATE, trans.root_id(), name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ }
+ }
+ uint32 num_loops = 0;
+ while (syncer_->SyncShare()) {
+ num_loops++;
+ ASSERT_LT(num_loops, max_batches * 2);
+ }
+ EXPECT_GE(mock_server_->commit_messages().size(), max_batches);
+}
+
+TEST_F(SyncerTest, HugeConflict) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ PathString name = PSTR("f");
+ int item_count = 30; // We should be able to do 300 or 3000 w/o issue.
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ syncable::Id last_id = trans.root_id();
+ for (int i = 0; i < item_count ; i++) {
+ MutableEntry e(&trans, CREATE, last_id, name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ last_id = e.Get(ID);
+ }
+ }
+ syncer_->SyncShare();
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_PARENTID_AND_NAME, root_id_, name);
+ syncable::Id in_root = e.Get(ID);
+ syncable::Id last_id = e.Get(ID);
+ for (int i = 0; i < item_count - 1 ; i++) {
+ MutableEntry e(&trans, GET_BY_PARENTID_AND_NAME, last_id, name);
+ ASSERT_TRUE(e.good());
+ mock_server_->AddUpdateDirectory(in_root, root_id_, "BOB", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ if (0 == i)
+ e.Put(IS_UNSYNCED, true);
+ last_id = e.Get(ID);
+ }
+ }
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ CHECK(dir.good());
+}
+
+TEST_F(SyncerTest, CaseChangeNameClashConflict) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(e.good());
+ e.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(1, 0, "BOB", 2, 20);
+ syncer_->SyncShare(); // USED TO CAUSE AN ASSERT
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, UnsyncedItemAndUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(2, 0, "bob", 2, 20);
+ syncer_->SyncShare(); // USED TO CAUSE AN ASSERT
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, FolderMergeWithChildNameClash) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ syncable::Id local_folder_id, root_id;
+ mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder2", 10, 10);
+ mock_server_->AddUpdateBookmark(child_id_, parent_id_, "Bookmark", 10, 10);
+ syncer_->SyncShare();
+ int64 local_folder_handle;
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ local_folder_id = parent.Get(ID);
+ local_folder_handle = parent.Get(META_HANDLE);
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("Bookmark"));
+ ASSERT_TRUE(child.good());
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+ mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, root_id_, &children);
+ ASSERT_EQ(2, children.size());
+ Entry parent(&trans, GET_BY_ID, parent_id_);
+ ASSERT_TRUE(parent.good());
+ EXPECT_EQ(parent.Get(NAME), PSTR("Folder"));
+ if (local_folder_handle == children[0]) {
+ EXPECT_EQ(children[1], parent.Get(META_HANDLE));
+ } else {
+ EXPECT_EQ(children[0], parent.Get(META_HANDLE));
+ EXPECT_EQ(children[1], local_folder_handle);
+ }
+ dir->GetChildHandles(&trans, local_folder_id, &children);
+ EXPECT_EQ(1, children.size());
+ dir->GetChildHandles(&trans, parent_id_, &children);
+ EXPECT_EQ(1, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
+ EXPECT_EQ(0, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(2, unsynced.size());
+ }
+ mock_server_->set_conflict_all_commits(false);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0, unsynced.size());
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "Foo.htm", 10, 10);
+ syncer_->SyncShare();
+ int64 local_folder_handle;
+ syncable::Id local_folder_id;
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry new_entry(&wtrans, CREATE, wtrans.root_id(), PSTR("Bar.htm"));
+ ASSERT_TRUE(new_entry.good());
+ local_folder_id = new_entry.Get(ID);
+ local_folder_handle = new_entry.Get(META_HANDLE);
+ new_entry.Put(IS_UNSYNCED, true);
+ MutableEntry old(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(old.good());
+ WriteTestDataToEntry(&wtrans, &old);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_events_.clear();
+}
+
+// Circular links should be resolved by the server.
+TEST_F(SyncerTest, SiblingDirectoriesBecomeCircular) {
+ // we don't currently resolve this. This test ensures we don't
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ ASSERT_TRUE(A.Put(PARENT_ID, ids_.FromNumber(2)));
+ ASSERT_TRUE(A.Put(NAME, PSTR("B")));
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "A", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_events_.clear();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_EQ(A.Get(NAME), PSTR("B"));
+ EXPECT_EQ(B.Get(NAME), PSTR("B"));
+ }
+}
+
+TEST_F(SyncerTest, ConflictSetClassificationError) {
+ // This code used to cause a CHECK failure because we incorrectly thought
+ // a set was only unapplied updates.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(IS_UNAPPLIED_UPDATE, true);
+ A.Put(SERVER_NAME, PSTR("B"));
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNAPPLIED_UPDATE, true);
+ B.Put(SERVER_NAME, PSTR("A"));
+ }
+ syncer_->SyncShare();
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, SwapEntryNames) {
+ // Simple transaction test
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNSYNCED, true);
+ ASSERT_TRUE(A.Put(NAME, PSTR("C")));
+ ASSERT_TRUE(B.Put(NAME, PSTR("A")));
+ ASSERT_TRUE(A.Put(NAME, PSTR("B")));
+ }
+ syncer_->SyncShare();
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, DualDeletionWithNewItemNameClash) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateBookmark(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ WriteTestDataToEntry(&trans, &B);
+ B.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, "A", 11, 11);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_FALSE(B.Get(IS_UNSYNCED));
+ EXPECT_FALSE(B.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, FixDirectoryLoopConflict) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ WriteTestDataToEntry(&trans, &bob);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 2, 10);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(bob.good());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(bob.Get(ID).ServerKnows());
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_DEL));
+ }
+ syncer_events_.clear();
+}
+
+// This test is disabled because we actually enforce the opposite behavior in:
+// ConflictResolverMergesLocalDeleteAndServerUpdate for bookmarks.
+TEST_F(SyncerTest, DISABLED_ResolveWeDeletedTheyWrote) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 2, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(bob.good());
+ EXPECT_EQ(bob.Get(ID), ids_.FromNumber(1));
+ EXPECT_FALSE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_DEL));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, ServerDeletingFolderWeHaveMovedSomethingInto) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(ncarter): This test is bogus, but it actually seems to hit an
+// interesting case the 4th time SyncShare is called.
+TEST_F(SyncerTest, DISABLED_ServerDeletingFolderWeHaveAnOpenEntryIn) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare(state_.get());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ WriteTestDataToEntry(&trans, &bob);
+ }
+ syncer_->SyncShare(state_.get());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ EXPECT_FALSE(bob.Get(IS_UNSYNCED));
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_events_.clear();
+ // These SyncShares would cause a CHECK because we'd think we were stuck.
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, WeMovedSomethingIntoAFolderServerHasDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_EQ(fred.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+namespace {
+
+int move_bob_count;
+
+bool MoveBobIntoID2(Directory* dir) {
+ int first_count = move_bob_count;
+ if (--move_bob_count > 0)
+ return false;
+ int second_count = move_bob_count;
+ if (move_bob_count == 0) {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ Entry alice(&trans, GET_BY_ID, TestIdFactory::FromNumber(2));
+ CHECK(alice.good());
+ CHECK(!alice.Get(IS_DEL));
+ MutableEntry bob(&trans, GET_BY_ID, TestIdFactory::FromNumber(1));
+ CHECK(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, alice.Get(ID));
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+TEST_F(SyncerTest,
+ WeMovedSomethingIntoAFolderServerHasDeletedAndWeRenamed) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ fred.Put(IS_UNSYNCED, true);
+ fred.Put(NAME, PSTR("Alice"));
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ // This test is a little brittle. We want to move the item into the folder
+ // such that we think we're dealing with a simple conflict, but in reality
+ // it's actually a conflict set.
+ move_bob_count = 2;
+ mock_server_->SetMidCommitCallbackFunction(MoveBobIntoID2);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry alice(&trans, GET_BY_PATH, PSTR("Alice"));
+ ASSERT_TRUE(alice.good());
+ EXPECT_TRUE(alice.Get(IS_UNSYNCED));
+ EXPECT_FALSE(alice.Get(ID).ServerKnows());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(bob.Get(PARENT_ID), alice.Get(ID));
+ EXPECT_EQ(alice.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+
+TEST_F(SyncerTest,
+ WeMovedADirIntoAndCreatedAnEntryInAFolderServerHasDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ syncable::Id new_item_id;
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ MutableEntry new_item(&trans, CREATE, ids_.FromNumber(2), PSTR("new_item"));
+ WriteTestDataToEntry(&trans, &new_item);
+ new_item_id = new_item.Get(ID);
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ PathChar path[] = {'f', 'r', 'e', 'd', *kPathSeparator,
+ 'n', 'e', 'w', '_', 'i', 't', 'e', 'm', 0};
+ Entry new_item(&trans, GET_BY_PATH, path);
+ EXPECT_TRUE(new_item.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_EQ(fred.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, ServerMovedSomethingIntoAFolderWeHaveDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, ServerMovedAFolderIntoAFolderWeHaveDeletedAndMovedIntoIt) {
+ // This test combines circular folders and deleted parents.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_DEL));
+ EXPECT_EQ(fred.Get(PARENT_ID), root_id_);
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, NewServerItemInAFolderWeHaveDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ joe.Put(IS_UNSYNCED, true);
+ joe.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(fred.good());
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(joe.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), joe.Get(ID));
+ EXPECT_EQ(joe.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted2) {
+ // The difference here is that the hierarchy's not in the root. We have
+ // another entry that shouldn't be touched.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
+ mock_server_->AddUpdateDirectory(1, 4, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ joe.Put(IS_UNSYNCED, true);
+ joe.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(fred.good());
+ Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(susan.good());
+ EXPECT_FALSE(susan.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(joe.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), joe.Get(ID));
+ EXPECT_EQ(joe.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), susan.Get(ID));
+ EXPECT_EQ(susan.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+namespace {
+
+int countown_till_delete = 0;
+
+void DeleteSusanInRoot(Directory* dir) {
+ ASSERT_GT(countown_till_delete, 0);
+ if (0 != --countown_till_delete)
+ return;
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry susan(&trans, GET_BY_PATH, PSTR("susan"));
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, susan.Get(ID), &children);
+ ASSERT_EQ(0, children.size());
+ susan.Put(IS_DEL, true);
+ susan.Put(IS_UNSYNCED, true);
+}
+
+} // namespace
+
+TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted3) {
+ // Same as 2, except we deleted the folder the set is in between set building
+ // and conflict resolution.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
+ mock_server_->AddUpdateDirectory(1, 4, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ joe.Put(IS_UNSYNCED, true);
+ joe.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ countown_till_delete = 2;
+ syncer_->pre_conflict_resolution_function_ = DeleteSusanInRoot;
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(fred.good());
+ Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(susan.good());
+ EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(susan.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(joe.Get(IS_UNSYNCED));
+ }
+ EXPECT_EQ(0, countown_till_delete);
+ syncer_->pre_conflict_resolution_function_ = 0;
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(fred.good());
+ Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(susan.good());
+ EXPECT_TRUE(susan.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(joe.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), joe.Get(ID));
+ EXPECT_EQ(joe.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), susan.Get(ID));
+ EXPECT_EQ(susan.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 2, "alice", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(3)); // Move into alice.
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->AddUpdateDirectory(3, 0, "alice", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ PathChar path[] = {'f', 'r', 'e', 'd', *kPathSeparator,
+ 'a', 'l', 'i', 'c', 'e', 0};
+ Entry alice(&trans, GET_BY_PATH, path);
+ ASSERT_TRUE(alice.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(alice.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_FALSE(alice.Get(ID).ServerKnows());
+ EXPECT_EQ(alice.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), alice.Get(ID));
+ EXPECT_EQ(fred.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted2) {
+ // The difference here is that the hierarchy's not in the root. We have
+ // another entry that shouldn't be touched.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 4, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 2, "alice", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(3)); // Move into alice.
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->AddUpdateDirectory(3, 0, "alice", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ PathChar path[] = {'s', 'u', 's', 'a', 'n', *kPathSeparator,
+ 'f', 'r', 'e', 'd', 0};
+ Entry fred(&trans, GET_BY_PATH, path);
+ ASSERT_TRUE(fred.good());
+ PathChar path2[] = {'s', 'u', 's', 'a', 'n', *kPathSeparator,
+ 'f', 'r', 'e', 'd', *kPathSeparator,
+ 'a', 'l', 'i', 'c', 'e', 0};
+ Entry alice(&trans, GET_BY_PATH, path2);
+ ASSERT_TRUE(alice.good());
+ Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(susan.good());
+ Entry susan_by_path(&trans, GET_BY_PATH, PSTR("susan"));
+ ASSERT_TRUE(susan.good());
+ EXPECT_FALSE(susan.Get(IS_UNSYNCED));
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(alice.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_FALSE(alice.Get(ID).ServerKnows());
+ EXPECT_EQ(alice.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), alice.Get(ID));
+ EXPECT_EQ(fred.Get(PARENT_ID), susan.Get(ID));
+ EXPECT_EQ(susan.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+// This test is to reproduce a check failure. Sometimes we would get a
+// bad ID back when creating an entry.
+TEST_F(SyncerTest, DuplicateIDReturn) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(folder.good());
+ folder.Put(IS_UNSYNCED, true);
+ folder.Put(IS_DIR, true);
+ MutableEntry folder2(&trans, CREATE, trans.root_id(), PSTR("fred"));
+ ASSERT_TRUE(folder2.good());
+ folder2.Put(IS_UNSYNCED, false);
+ folder2.Put(IS_DIR, true);
+ folder2.Put(BASE_VERSION, 3);
+ folder2.Put(ID, syncable::Id::CreateFromServerId("mock_server:10000"));
+ }
+ mock_server_->set_next_new_id(10000);
+ EXPECT_EQ(1, dir->unsynced_entity_count());
+ syncer_->SyncShare(); // we get back a bad id in here (should never happen).
+ EXPECT_EQ(1, dir->unsynced_entity_count());
+ syncer_->SyncShare(); // another bad id in here.
+ EXPECT_EQ(0, dir->unsynced_entity_count());
+ syncer_events_.clear();
+}
+
+// This test is not very useful anymore. It used to trigger
+// a more interesting condition.
+TEST_F(SyncerTest, SimpleConflictOnAnEntry) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, CREATE, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ WriteTestDataToEntry(&trans, &bob);
+ }
+ syncer_->SyncShare();
+ syncable::Id bobid;
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_PATH, PSTR("bob"));
+ ASSERT_TRUE(bob.good());
+ EXPECT_FALSE(bob.Get(IS_UNSYNCED));
+ bob.Put(IS_UNSYNCED, true);
+ bobid = bob.Get(ID);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "jim", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ SyncRepeatedlyToTriggerConflictResolution(state_.get());
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, DeletedEntryWithBadParentInLoopCalculation) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ // This is valid, because the parent could have gone away a long time ago.
+ bob.Put(PARENT_ID, ids_.FromNumber(54));
+ bob.Put(IS_DEL, true);
+ bob.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 1, 10);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+}
+
+TEST_F(SyncerTest, ConflictResolverMergeOverwritesLocalEntry) {
+ // This test would die because it would rename
+ // a entry to a name that was taken in the namespace
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+
+ ConflictSet conflict_set;
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry local_deleted(&trans, CREATE, trans.root_id(), PSTR("name"));
+ local_deleted.Put(ID, ids_.FromNumber(1));
+ local_deleted.Put(BASE_VERSION, 1);
+ local_deleted.Put(IS_DEL, true);
+ local_deleted.Put(IS_UNSYNCED, true);
+
+ MutableEntry in_the_way(&trans, CREATE, trans.root_id(), PSTR("name"));
+ in_the_way.Put(ID, ids_.FromNumber(2));
+ in_the_way.Put(BASE_VERSION, 1);
+
+ MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, ids_.FromNumber(3));
+ update.Put(BASE_VERSION, 1);
+ update.Put(SERVER_NAME, PSTR("name"));
+ update.Put(PARENT_ID, ids_.FromNumber(0));
+ update.Put(IS_UNAPPLIED_UPDATE, true);
+
+ conflict_set.push_back(ids_.FromNumber(1));
+ conflict_set.push_back(ids_.FromNumber(3));
+ }
+ {
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ syncer_->conflict_resolver()->ProcessConflictSet(&trans, &conflict_set, 50,
+ &session);
+ }
+}
+
+TEST_F(SyncerTest, ConflictResolverMergesLocalDeleteAndServerUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry local_deleted(&trans, CREATE, trans.root_id(), PSTR("name"));
+ local_deleted.Put(ID, ids_.FromNumber(1));
+ local_deleted.Put(BASE_VERSION, 1);
+ local_deleted.Put(IS_DEL, true);
+ local_deleted.Put(IS_DIR, false);
+ local_deleted.Put(IS_UNSYNCED, true);
+ local_deleted.Put(IS_BOOKMARK_OBJECT, true);
+ }
+
+ mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10);
+
+ // We don't care about actually committing, just the resolution
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry local_deleted(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_EQ(local_deleted.Get(BASE_VERSION), 10);
+ EXPECT_EQ(local_deleted.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(local_deleted.Get(IS_UNSYNCED), true);
+ EXPECT_EQ(local_deleted.Get(IS_DEL), true);
+ EXPECT_EQ(local_deleted.Get(IS_DIR), false);
+ }
+}
+
+// See what happens if the IS_DIR bit gets flipped. This can cause us
+// all kinds of disasters.
+TEST_F(SyncerTest, UpdateFlipsTheFolderBit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+
+ // Local object: a deleted directory (container), revision 1, unsynced.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry local_deleted(&trans, CREATE, trans.root_id(), PSTR("name"));
+ local_deleted.Put(ID, ids_.FromNumber(1));
+ local_deleted.Put(BASE_VERSION, 1);
+ local_deleted.Put(IS_DEL, true);
+ local_deleted.Put(IS_DIR, true);
+ local_deleted.Put(IS_UNSYNCED, true);
+ }
+
+ // Server update: entry-type object (not a container), revision 10.
+ mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10);
+
+ // Don't attempt to commit
+ mock_server_->set_conflict_all_commits(true);
+
+ // The syncer should not attempt to apply the invalid update.
+ syncer_->SyncShare();
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry local_deleted(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_EQ(local_deleted.Get(BASE_VERSION), 1);
+ EXPECT_EQ(local_deleted.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(local_deleted.Get(IS_UNSYNCED), true);
+ EXPECT_EQ(local_deleted.Get(IS_DEL), true);
+ EXPECT_EQ(local_deleted.Get(IS_DIR), true);
+ }
+}
+
+TEST(SyncerSyncProcessState, MergeSetsTest) {
+ TestIdFactory id_factory;
+ syncable::Id id[7];
+ for (int i = 1; i < 7; i++) {
+ id[i] = id_factory.NewServerId();
+ }
+ SyncProcessState c;
+ c.MergeSets(id[1], id[2]);
+ c.MergeSets(id[2], id[3]);
+ c.MergeSets(id[4], id[5]);
+ c.MergeSets(id[5], id[6]);
+ EXPECT_EQ(6, c.IdToConflictSetSize());
+ for (int i = 1; i < 7; i++) {
+ EXPECT_TRUE(NULL != c.IdToConflictSetGet(id[i]));
+ EXPECT_EQ(c.IdToConflictSetGet(id[(i & ~3) + 1]),
+ c.IdToConflictSetGet(id[i]));
+ }
+ c.MergeSets(id[1], id[6]);
+ for (int i = 1; i < 7; i++) {
+ EXPECT_TRUE(NULL != c.IdToConflictSetGet(id[i]));
+ EXPECT_EQ(c.IdToConflictSetGet(id[1]), c.IdToConflictSetGet(id[i]));
+ }
+
+ // Check dupes don't cause double sets
+ SyncProcessState identical_set;
+ identical_set.MergeSets(id[1], id[1]);
+ EXPECT_EQ(identical_set.IdToConflictSetSize(), 1);
+ EXPECT_EQ(identical_set.IdToConflictSetGet(id[1])->size(), 1);
+}
+
+// Bug Synopsis:
+// Merge conflict resolution will merge a new local entry
+// with another entry that needs updates, resulting in CHECK.
+TEST_F(SyncerTest, MergingExistingItems) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(1, 0, "base", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("Copy of base"));
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50);
+ SyncRepeatedlyToTriggerConflictResolution(state_.get());
+}
+
+// In this test a long changelog contains a child at the start of the changelog
+// and a parent at the end. While these updates are in progress the client would
+// appear stuck.
+TEST_F(SyncerTest, LongChangelistCreatesFakeOrphanedEntries) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ const int DEPTH = 400;
+ // First we an item in a folder in the root. However the folder won't come
+ // till much later.
+ mock_server_->AddUpdateDirectory(99999, 1, "stuck", 1, 1);
+ mock_server_->SetNewestTimestamp(DEPTH);
+ syncer_->SyncShare(state_.get());
+
+ // Very long changelist. We should never be stuck.
+ for (int i = 0; i < DEPTH; i++) {
+ mock_server_->SetNewTimestamp(i);
+ mock_server_->SetNewestTimestamp(DEPTH);
+ syncer_->SyncShare(state_.get());
+ EXPECT_FALSE(SyncerStuck(state_.get()));
+ }
+ // And finally the folder.
+ mock_server_->AddUpdateDirectory(1, 0, "folder", 1, 1);
+ mock_server_->SetNewestTimestamp(DEPTH);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ // Check that everything's as expected after the commit.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("folder"));
+ ASSERT_TRUE(entry.good());
+ Entry child(&trans, GET_BY_PARENTID_AND_NAME, entry.Get(ID), PSTR("stuck"));
+ EXPECT_TRUE(child.good());
+ }
+}
+
+TEST_F(SyncerTest, DontMergeTwoExistingItems) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(1, 0, "base", 10, 10);
+ mock_server_->AddUpdateBookmark(2, 0, "base2", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Put(NAME, PSTR("Copy of base")));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50);
+ SyncRepeatedlyToTriggerConflictResolution(state_.get());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_FALSE(entry1.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry1.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry1.Get(IS_DEL));
+ Entry entry2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ EXPECT_FALSE(entry2.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(entry2.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry2.Get(IS_DEL));
+ EXPECT_NE(entry1.Get(NAME), entry2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 1, 1);
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 1, 2);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 2, 3);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ }
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 2, 4);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+ // This used to be rejected as it's an undeletion.
+ // Now, it results in moving the delete path aside.
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 3, 5);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ EXPECT_TRUE(entry.Get(IS_UNAPPLIED_UPDATE));
+ }
+}
+
+TEST_F(SyncerTest, TestMoveSanitizedNamedFolder) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 1, 1);
+ mock_server_->AddUpdateDirectory(2, 0, ":::", 1, 2);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Put(PARENT_ID, ids_.FromNumber(1)));
+ EXPECT_TRUE(entry.Put(IS_UNSYNCED, true));
+ }
+ syncer_->SyncShare();
+ // We use the same sync ts as before so our times match up.
+ mock_server_->AddUpdateDirectory(2, 1, ":::", 2, 2);
+ syncer_->SyncShare();
+}
+
+TEST_F(SyncerTest, QuicklyMergeDualCreatedHierarchy) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ int depth = 10;
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ syncable::Id parent = root_id_;
+ for (int i = 0 ; i < depth ; ++i) {
+ MutableEntry entry(&trans, CREATE, parent, PSTR("folder"));
+ entry.Put(IS_DIR, true);
+ entry.Put(IS_UNSYNCED, true);
+ parent = entry.Get(ID);
+ }
+ }
+ for (int i = 0 ; i < depth ; ++i) {
+ mock_server_->AddUpdateDirectory(i + 1, i, "folder", 1, 1);
+ }
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ EXPECT_LT(status.consecutive_problem_commits(), 5);
+ EXPECT_EQ(0, dir->unsynced_entity_count());
+}
+
+TEST(SortedCollectionsIntersect, SortedCollectionsIntersectTest) {
+ int negative[] = {-3, -2, -1};
+ int straddle[] = {-1, 0, 1};
+ int positive[] = {1, 2, 3};
+ EXPECT_TRUE(SortedCollectionsIntersect(negative, negative + 3,
+ straddle, straddle + 3));
+ EXPECT_FALSE(SortedCollectionsIntersect(negative, negative + 3,
+ positive, positive + 3));
+ EXPECT_TRUE(SortedCollectionsIntersect(straddle, straddle + 3,
+ positive, positive + 3));
+ EXPECT_FALSE(SortedCollectionsIntersect(straddle + 2, straddle + 3,
+ positive, positive));
+ EXPECT_FALSE(SortedCollectionsIntersect(straddle, straddle + 3,
+ positive + 1, positive + 1));
+ EXPECT_TRUE(SortedCollectionsIntersect(straddle, straddle + 3,
+ positive, positive + 1));
+}
+
+// Don't crash when this occurs.
+TEST_F(SyncerTest, UpdateWhereParentIsNotAFolder) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "B", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "BookmarkParent", 10, 10);
+ // Used to cause a CHECK
+ syncer_->SyncShare();
+ {
+ ReadTransaction rtrans(dir, __FILE__, __LINE__);
+ Entry good_entry(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(good_entry.good());
+ EXPECT_FALSE(good_entry.Get(IS_UNAPPLIED_UPDATE));
+ Entry bad_parent(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(bad_parent.good());
+ EXPECT_TRUE(bad_parent.Get(IS_UNAPPLIED_UPDATE));
+ }
+}
+
+const char kRootId[] = "0";
+
+TEST_F(SyncerTest, DirectoryUpdateTest) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory("in_root_id", kRootId,
+ "in_root_name", 2, 2);
+ mock_server_->AddUpdateDirectory("in_in_root_id", "in_root_id",
+ "in_in_root_name", 3, 3);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ // Entry will have been dropped.
+ Entry by_path(&trans, GET_BY_PATH, PSTR("in_root_name"));
+ EXPECT_TRUE(by_path.good());
+ Entry by_path2(&trans, GET_BY_PATH, PSTR("in_root_name") +
+ PathString(kPathSeparator) +
+ PSTR("in_in_root_name"));
+ EXPECT_TRUE(by_path2.good());
+ }
+}
+
+TEST_F(SyncerTest, DirectoryCommitTest) {
+ syncable::Id in_root, in_dir;
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, root_id_, PSTR("foo"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ in_root = parent.Get(syncable::ID);
+ MutableEntry child(&wtrans, syncable::CREATE, parent.Get(ID), PSTR("bar"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ in_dir = parent.Get(syncable::ID);
+ }
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry by_path(&trans, GET_BY_PATH, PSTR("foo"));
+ ASSERT_TRUE(by_path.good());
+ EXPECT_NE(by_path.Get(syncable::ID), in_root);
+ Entry by_path2(&trans, GET_BY_PATH, PSTR("foo") +
+ PathString(kPathSeparator) +
+ PSTR("bar"));
+ ASSERT_TRUE(by_path2.good());
+ EXPECT_NE(by_path2.Get(syncable::ID), in_dir);
+ }
+}
+
+namespace {
+
+void CheckEntryVersion(syncable::DirectoryManager* dirmgr, PathString name) {
+ ScopedDirLookup dir(dirmgr, name);
+ ASSERT_TRUE(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("foo"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(entry.Get(BASE_VERSION), 1);
+}
+
+} // namespace
+
+TEST_F(SyncerTest, ConflictSetSizeReducedToOne) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(2, 0, "in_root", 1, 1);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry oentry(&trans, GET_BY_PATH, PSTR("in_root"));
+ ASSERT_TRUE(oentry.good());
+ oentry.Put(NAME, PSTR("old_in_root"));
+ WriteTestDataToEntry(&trans, &oentry);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("in_root"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ // This SyncShare call used to result in a CHECK failure.
+ syncer_->SyncShare();
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, TestClientCommand) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ using sync_pb::ClientCommand;
+
+ ClientCommand* command = mock_server_->GetNextClientCommand();
+ command->set_set_sync_poll_interval(8);
+ command->set_set_sync_long_poll_interval(800);
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 1, 1);
+ syncer_->SyncShare();
+
+ EXPECT_TRUE(last_client_command_.has_set_sync_poll_interval());
+ EXPECT_TRUE(last_client_command_.has_set_sync_long_poll_interval());
+ EXPECT_EQ(8, last_client_command_.set_sync_poll_interval());
+ EXPECT_EQ(800, last_client_command_.set_sync_long_poll_interval());
+
+ command = mock_server_->GetNextClientCommand();
+ command->set_set_sync_poll_interval(180);
+ command->set_set_sync_long_poll_interval(190);
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 1, 1);
+ syncer_->SyncShare();
+
+ EXPECT_TRUE(last_client_command_.has_set_sync_poll_interval());
+ EXPECT_TRUE(last_client_command_.has_set_sync_long_poll_interval());
+ EXPECT_EQ(180, last_client_command_.set_sync_poll_interval());
+ EXPECT_EQ(190, last_client_command_.set_sync_long_poll_interval());
+}
+
+TEST_F(SyncerTest, EnsureWeSendUpOldParent) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "folder_one", 1, 1);
+ mock_server_->AddUpdateDirectory(2, 0, "folder_two", 1, 1);
+ syncer_->SyncShare();
+ {
+ // A moved entry should send an old parent.
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_PATH, PSTR("folder_one"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(PARENT_ID, ids_.FromNumber(2));
+ entry.Put(IS_UNSYNCED, true);
+ // A new entry should send no parent.
+ MutableEntry create(&trans, CREATE, trans.root_id(), PSTR("new_folder"));
+ create.Put(IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare();
+ const sync_pb::CommitMessage& commit = mock_server_->last_sent_commit();
+ ASSERT_EQ(2, commit.entries_size());
+ EXPECT_EQ(commit.entries(0).parent_id_string(), "2");
+ EXPECT_EQ(commit.entries(0).old_parent_id(), "0");
+ EXPECT_FALSE(commit.entries(1).has_old_parent_id());
+}
+
+TEST_F(SyncerTest, Test64BitVersionSupport) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ int64 really_big_int = std::numeric_limits<int64>::max() - 12;
+ const PathString name(PSTR("ringo's dang orang ran rings around my o-ring"));
+
+ // Try writing max int64 to the version fields of a meta entry.
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, syncable::CREATE, wtrans.root_id(), name);
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::BASE_VERSION, really_big_int);
+ entry.Put(syncable::SERVER_VERSION, really_big_int);
+ entry.Put(syncable::ID, syncable::Id::CreateFromServerId("ID"));
+ }
+ // Now read it back out and make sure the value is max int64.
+ ReadTransaction rtrans(dir, __FILE__, __LINE__);
+ Entry entry(&rtrans, syncable::GET_BY_PATH, name);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(really_big_int, entry.Get(syncable::BASE_VERSION));
+}
+
+TEST_F(SyncerTest, TestDSStoreDirectorySyncsNormally) {
+ syncable::Id item_id = parent_id_;
+ mock_server_->AddUpdateDirectory(item_id,
+ root_id_, ".DS_Store", 1, 1);
+ syncer_->SyncShare();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry ds_dir(&trans, GET_BY_PATH, PSTR(".DS_Store"));
+ ASSERT_TRUE(ds_dir.good());
+}
+
+TEST_F(SyncerTest, TestSimpleUndelete) {
+ Id id = ids_.MakeServer("undeletion item"), root = ids_.root();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ // let there be an entry from the server.
+ mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10);
+ syncer_->SyncShare();
+ // check it out and delete it
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ // delete it locally
+ entry.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare();
+ // Confirm we see IS_DEL and not SERVER_IS_DEL.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+ syncer_->SyncShare();
+ // Update from server confirming deletion
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 11);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+ // IS_DEL AND SERVER_IS_DEL now both true.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_TRUE(entry.Get(SERVER_IS_DEL));
+ }
+ // Undelete from server
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12);
+ syncer_->SyncShare();
+ // IS_DEL and SERVER_IS_DEL now both false.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
+ Id id = ids_.MakeServer("undeletion item"), root = ids_.root();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ // let there be a entry, from the server.
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10);
+ syncer_->SyncShare();
+ // check it out and delete it
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ // delete it locally
+ entry.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare();
+ // Confirm we see IS_DEL and not SERVER_IS_DEL.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+ syncer_->SyncShare();
+ // Say we do not get an update from server confirming deletion.
+ // Undelete from server
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12);
+ syncer_->SyncShare();
+ // IS_DEL and SERVER_IS_DEL now both false.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteIgnoreCorrectlyUnappliedUpdate) {
+ Id id1 = ids_.MakeServer("first"), id2 = ids_.MakeServer("second");
+ Id root = ids_.root();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ // duplicate! expect path clashing!
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(id1, root, "foo", 1, 10);
+ mock_server_->AddUpdateBookmark(id2, root, "foo", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateBookmark(id2, root, "foo2", 1, 10);
+ syncer_->SyncShare(); // Now just don't explode.
+}
+
+TEST_F(SyncerTest, CopySyncProcessState) {
+ scoped_ptr<SyncProcessState> b;
+ {
+ SyncProcessState a;
+ a.MergeSets(ids_.FromNumber(1), ids_.FromNumber(2));
+ a.MergeSets(ids_.FromNumber(2), ids_.FromNumber(3));
+ a.MergeSets(ids_.FromNumber(4), ids_.FromNumber(5));
+ EXPECT_EQ(a.ConflictSetsSize(), 2);
+ {
+ SyncProcessState b = a;
+ b = b;
+ EXPECT_EQ(b.ConflictSetsSize(), 2);
+ }
+ EXPECT_EQ(a.ConflictSetsSize(), 2);
+ a.MergeSets(ids_.FromNumber(3), ids_.FromNumber(4));
+ EXPECT_EQ(a.ConflictSetsSize(), 1);
+ b.reset(new SyncProcessState(a));
+ }
+ EXPECT_EQ(b->ConflictSetsSize(), 1);
+}
+
+TEST_F(SyncerTest, SingletonTagUpdates) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ // As a hurdle, introduce an item whose name is the same as the
+ // tag value we'll use later.
+ int64 hurdle_handle = CreateUnsyncedDirectory(PSTR("bob"), "id_bob");
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
+ ASSERT_TRUE(hurdle.good());
+ ASSERT_TRUE(!hurdle.Get(IS_DEL));
+ ASSERT_TRUE(hurdle.Get(SINGLETON_TAG).empty());
+ ASSERT_TRUE(hurdle.GetName().value() == PSTR("bob"));
+
+ // Try to lookup by the tagname. These should fail.
+ Entry tag_alpha(&trans, GET_BY_TAG, PSTR("alpha"));
+ EXPECT_FALSE(tag_alpha.good());
+ Entry tag_bob(&trans, GET_BY_TAG, PSTR("bob"));
+ EXPECT_FALSE(tag_bob.good());
+ }
+
+ // Now download some tagged items as updates.
+ mock_server_->AddUpdateDirectory(1, 0, "update1", 1, 10);
+ mock_server_->SetLastUpdateSingletonTag("alpha");
+ mock_server_->AddUpdateDirectory(2, 0, "update2", 2, 20);
+ mock_server_->SetLastUpdateSingletonTag("bob");
+ syncer_->SyncShare();
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ // The new items should be applied as new entries, and we should be able
+ // to look them up by their tag values.
+ Entry tag_alpha(&trans, GET_BY_TAG, PSTR("alpha"));
+ ASSERT_TRUE(tag_alpha.good());
+ ASSERT_TRUE(!tag_alpha.Get(IS_DEL));
+ ASSERT_TRUE(tag_alpha.Get(SINGLETON_TAG) == PSTR("alpha"));
+ ASSERT_TRUE(tag_alpha.GetName().value() == PSTR("update1"));
+ Entry tag_bob(&trans, GET_BY_TAG, PSTR("bob"));
+ ASSERT_TRUE(tag_bob.good());
+ ASSERT_TRUE(!tag_bob.Get(IS_DEL));
+ ASSERT_TRUE(tag_bob.Get(SINGLETON_TAG) == PSTR("bob"));
+ ASSERT_TRUE(tag_bob.GetName().value() == PSTR("update2"));
+ // The old item should be unchanged.
+ Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
+ ASSERT_TRUE(hurdle.good());
+ ASSERT_TRUE(!hurdle.Get(IS_DEL));
+ ASSERT_TRUE(hurdle.Get(SINGLETON_TAG).empty());
+ ASSERT_TRUE(hurdle.GetName().value() == PSTR("bob"));
+ }
+}
+
+namespace {
+
+class SyncerPositionUpdateTest : public SyncerTest {
+ public:
+ SyncerPositionUpdateTest() : next_update_id_(1), next_revision_(1) {}
+
+ protected:
+ void ExpectLocalItemsInServerOrder() {
+ if (position_map_.empty())
+ return;
+
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Id prev_id;
+ DCHECK(prev_id.IsRoot());
+ PosMap::iterator next = position_map_.begin();
+ for (PosMap::iterator i = next++; i != position_map_.end(); ++i) {
+ Id id = i->second;
+ Entry entry_with_id(&trans, GET_BY_ID, id);
+ EXPECT_TRUE(entry_with_id.good());
+ EXPECT_EQ(entry_with_id.Get(PREV_ID), prev_id);
+ EXPECT_EQ(entry_with_id.Get(SERVER_POSITION_IN_PARENT), i->first);
+ if (next == position_map_.end()) {
+ EXPECT_TRUE(entry_with_id.Get(NEXT_ID).IsRoot());
+ } else {
+ EXPECT_EQ(entry_with_id.Get(NEXT_ID), next->second);
+ next++;
+ }
+ prev_id = id;
+ }
+ }
+
+ void AddRootItemWithPosition(int64 position) {
+ string id = string("ServerId") + Int64ToString(next_update_id_++);
+ string name = "my name is my id -- " + id;
+ int revision = next_revision_++;
+ mock_server_->AddUpdateDirectory(id, kRootId, name, revision, revision);
+ mock_server_->SetLastUpdatePosition(position);
+ position_map_.insert(
+ PosMap::value_type(position, Id::CreateFromServerId(id)));
+ }
+ private:
+ typedef multimap<int64, Id> PosMap;
+ PosMap position_map_;
+ int next_update_id_;
+ int next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(SyncerPositionUpdateTest);
+};
+
+} // namespace
+
+TEST_F(SyncerPositionUpdateTest, InOrderPositive) {
+ // Add a bunch of items in increasing order, starting with just
+ // positive position values.
+ AddRootItemWithPosition(100);
+ AddRootItemWithPosition(199);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(400);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, InOrderNegative) {
+ // Test negative position values, but in increasing order.
+ AddRootItemWithPosition(-400);
+ AddRootItemWithPosition(-201);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(100);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, ReverseOrder) {
+ // Test when items are sent in the reverse order.
+ AddRootItemWithPosition(400);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(100);
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(-201);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(-400);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, RandomOrderInBatches) {
+ // Mix it all up, interleaving position values,
+ // and try multiple batches of updates.
+ AddRootItemWithPosition(400);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(-400);
+ AddRootItemWithPosition(100);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(-201);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+
+ AddRootItemWithPosition(-144);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+}
+
+namespace {
+
+class SyncerPositionTiebreakingTest : public SyncerTest {
+ public:
+ SyncerPositionTiebreakingTest()
+ : low_id_(Id::CreateFromServerId("A")),
+ mid_id_(Id::CreateFromServerId("M")),
+ high_id_(Id::CreateFromServerId("Z")),
+ next_revision_(1) {
+ DCHECK(low_id_ < mid_id_);
+ DCHECK(mid_id_ < high_id_);
+ DCHECK(low_id_ < high_id_);
+ }
+
+ // Adds the item by its Id, using a constant value for the position
+ // so that the syncer has to resolve the order some other way.
+ void Add(const Id& id) {
+ int revision = next_revision_++;
+ mock_server_->AddUpdateDirectory(id.GetServerId(), kRootId,
+ id.GetServerId(), revision, revision);
+ // The update position doesn't vary.
+ mock_server_->SetLastUpdatePosition(90210);
+ }
+
+ void ExpectLocalOrderIsByServerId() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Id null_id;
+ Entry low(&trans, GET_BY_ID, low_id_);
+ Entry mid(&trans, GET_BY_ID, mid_id_);
+ Entry high(&trans, GET_BY_ID, high_id_);
+ EXPECT_TRUE(low.good());
+ EXPECT_TRUE(mid.good());
+ EXPECT_TRUE(high.good());
+ EXPECT_EQ(low.Get(PREV_ID), null_id);
+ EXPECT_EQ(mid.Get(PREV_ID), low_id_);
+ EXPECT_EQ(high.Get(PREV_ID), mid_id_);
+ EXPECT_EQ(high.Get(NEXT_ID), null_id);
+ EXPECT_EQ(mid.Get(NEXT_ID), high_id_);
+ EXPECT_EQ(low.Get(NEXT_ID), mid_id_);
+ }
+
+ protected:
+ // When there's a tiebreak on the numeric position, it's supposed to be
+ // broken by string comparison of the ids. These ids are in increasing
+ // order.
+ const Id low_id_;
+ const Id mid_id_;
+ const Id high_id_;
+
+ private:
+ int next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(SyncerPositionTiebreakingTest);
+};
+
+} // namespace
+
+TEST_F(SyncerPositionTiebreakingTest, LowMidHigh) {
+ Add(low_id_);
+ Add(mid_id_);
+ Add(high_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, LowHighMid) {
+ Add(low_id_);
+ Add(high_id_);
+ Add(mid_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, HighMidLow) {
+ Add(high_id_);
+ Add(mid_id_);
+ Add(low_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, HighLowMid) {
+ Add(high_id_);
+ Add(low_id_);
+ Add(mid_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, MidHighLow) {
+ Add(mid_id_);
+ Add(high_id_);
+ Add(low_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, MidLowHigh) {
+ Add(mid_id_);
+ Add(low_id_);
+ Add(high_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+const SyncerTest::CommitOrderingTest
+SyncerTest::CommitOrderingTest::LAST_COMMIT_ITEM = {-1, TestIdFactory::root()};
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_util.cc b/chrome/browser/sync/engine/syncer_util.cc
new file mode 100644
index 0000000..75f7b82
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_util.cc
@@ -0,0 +1,845 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_util.h"
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using syncable::BASE_VERSION;
+using syncable::BOOKMARK_FAVICON;
+using syncable::BOOKMARK_URL;
+using syncable::Blob;
+using syncable::CHANGES_VERSION;
+using syncable::CREATE;
+using syncable::CREATE_NEW_UPDATE_ITEM;
+using syncable::CTIME;
+using syncable::ComparePathNames;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::ExtendedAttributeKey;
+using syncable::GET_BY_HANDLE;
+using syncable::GET_BY_ID;
+using syncable::GET_BY_PARENTID_AND_DBNAME;
+using syncable::ID;
+using syncable::IS_BOOKMARK_OBJECT;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::Id;
+using syncable::META_HANDLE;
+using syncable::MTIME;
+using syncable::MutableEntry;
+using syncable::MutableExtendedAttribute;
+using syncable::NEXT_ID;
+using syncable::Name;
+using syncable::PARENT_ID;
+using syncable::PREV_ID;
+using syncable::ReadTransaction;
+using syncable::SERVER_BOOKMARK_FAVICON;
+using syncable::SERVER_BOOKMARK_URL;
+using syncable::SERVER_CTIME;
+using syncable::SERVER_IS_BOOKMARK_OBJECT;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_IS_DIR;
+using syncable::SERVER_MTIME;
+using syncable::SERVER_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_VERSION;
+using syncable::SINGLETON_TAG;
+using syncable::SYNCER;
+using syncable::SyncName;
+using syncable::UNSANITIZED_NAME;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+using std::string;
+using std::vector;
+
+// TODO(ncarter): Remove unique-in-parent title support and name conflicts.
+// static
+syncable::Id SyncerUtil::GetNameConflictingItemId(
+ syncable::BaseTransaction* trans,
+ const syncable::Id& parent_id,
+ const PathString& server_name) {
+
+ Entry same_path(trans, GET_BY_PARENTID_AND_DBNAME, parent_id, server_name);
+ if (same_path.good() && !same_path.GetName().HasBeenSanitized())
+ return same_path.Get(ID);
+ Name doctored_name(server_name);
+ doctored_name.db_value().MakeOSLegal();
+ if (!doctored_name.HasBeenSanitized())
+ return syncable::kNullId;
+ Directory::ChildHandles children;
+ trans->directory()->GetChildHandles(trans, parent_id, &children);
+ Directory::ChildHandles::iterator i = children.begin();
+ while (i != children.end()) {
+ Entry child_entry(trans, GET_BY_HANDLE, *i++);
+ CHECK(child_entry.good());
+ if (0 == ComparePathNames(child_entry.Get(UNSANITIZED_NAME), server_name))
+ return child_entry.Get(ID);
+ }
+ return syncable::kNullId;
+}
+
+// Returns the number of unsynced entries.
+// static
+int SyncerUtil::GetUnsyncedEntries(syncable::BaseTransaction* trans,
+ vector<int64> *handles) {
+ trans->directory()->GetUnsyncedMetaHandles(trans, handles);
+ LOG_IF(INFO, handles->size() > 0)
+ << "Have " << handles->size() << " unsynced items.";
+ return handles->size();
+}
+
+// static
+void SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id,
+ syncable::Directory::ChildHandles* children) {
+ syncable::Id old_id = entry->Get(ID);
+ if (!entry->Put(ID, new_id)) {
+ Entry old_entry(trans, GET_BY_ID, new_id);
+ CHECK(old_entry.good());
+ LOG(FATAL) << "Attempt to change ID to " << new_id
+ << " conflicts with existing entry.\n\n"
+ << *entry << "\n\n" << old_entry;
+ }
+ if (entry->Get(IS_DIR)) {
+ // Get all child entries of the old id
+ trans->directory()->GetChildHandles(trans, old_id, children);
+ Directory::ChildHandles::iterator i = children->begin();
+ while (i != children->end()) {
+ MutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
+ CHECK(child_entry.good());
+ CHECK(child_entry.Put(PARENT_ID, new_id));
+ }
+ }
+ // Update Id references on the previous and next nodes in the sibling
+ // order. Do this by reinserting into the linked list; the first
+ // step in PutPredecessor is to Unlink from the existing order, which
+ // will overwrite the stale Id value from the adjacent nodes.
+ if (entry->Get(PREV_ID) == entry->Get(NEXT_ID) &&
+ entry->Get(PREV_ID) == old_id) {
+ // We just need a shallow update to |entry|'s fields since it is already
+ // self looped.
+ entry->Put(NEXT_ID, new_id);
+ entry->Put(PREV_ID, new_id);
+ } else {
+ entry->PutPredecessor(entry->Get(PREV_ID));
+ }
+}
+
+// static
+void SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id) {
+ syncable::Directory::ChildHandles children;
+ ChangeEntryIDAndUpdateChildren(trans, entry, new_id, &children);
+}
+
+// static
+void SyncerUtil::AttemptReuniteLostCommitResponses(
+ syncable::WriteTransaction* trans,
+ const SyncEntity& server_entry,
+ const string& client_id) {
+ // If a commit succeeds, but the response does not come back fast enough
+ // then the syncer might assume that it was never committed.
+ // The server will track the client that sent up the original commit and
+ // return this in a get updates response. When this matches a local
+ // uncommitted item, we must mutate our local item and version to pick up
+ // the committed version of the same item whose commit response was lost.
+ // There is however still a race condition if the server has not
+ // completed the commit by the time the syncer tries to get updates
+ // again. To mitigate this, we need to have the server time out in
+ // a reasonable span, our commit batches have to be small enough
+ // to process within our HTTP response "assumed alive" time.
+
+ // We need to check if we have a that didn't get its server
+ // id updated correctly. The server sends down a client ID
+ // and a local (negative) id. If we have a entry by that
+ // description, we should update the ID and version to the
+ // server side ones to avoid multiple commits to the same name.
+ if (server_entry.has_originator_cache_guid() &&
+ server_entry.originator_cache_guid() == client_id) {
+ syncable::Id server_id = syncable::Id::CreateFromClientString(
+ server_entry.originator_client_item_id());
+ CHECK(!server_id.ServerKnows());
+ syncable::MutableEntry local_entry(trans, GET_BY_ID, server_id);
+
+ // If it exists, then our local client lost a commit response.
+ if (local_entry.good() && !local_entry.Get(IS_DEL)) {
+ int64 old_version = local_entry.Get(BASE_VERSION);
+ int64 new_version = server_entry.version();
+ CHECK(old_version <= 0);
+ CHECK(new_version > 0);
+ // Otherwise setting the base version could cause a consistency failure.
+ // An entry should never be version 0 and SYNCED.
+ CHECK(local_entry.Get(IS_UNSYNCED));
+
+ // just a quick sanity check
+ CHECK(!local_entry.Get(ID).ServerKnows());
+
+ LOG(INFO) << "Reuniting lost commit response IDs" <<
+ " server id: " << server_entry.id() << " local id: " <<
+ local_entry.Get(ID) << " new version: " << new_version;
+
+ local_entry.Put(BASE_VERSION, new_version);
+
+ ChangeEntryIDAndUpdateChildren(trans, &local_entry, server_entry.id());
+
+ // We need to continue normal processing on this update after we
+ // reunited its ID.
+ }
+ // !local_entry.Good() means we don't have a left behind entry for this
+ // ID. We successfully committed before. In the future we should get rid
+ // of this system and just have client side generated IDs as a whole.
+ }
+}
+
+// static
+UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntry(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ SyncerSession* const session) {
+
+ syncable::Id conflicting_id;
+ UpdateAttemptResponse result =
+ AttemptToUpdateEntryWithoutMerge(trans, entry, session,
+ &conflicting_id);
+ if (result != NAME_CONFLICT) {
+ return result;
+ }
+ syncable::MutableEntry same_path(trans, syncable::GET_BY_ID, conflicting_id);
+ CHECK(same_path.good());
+
+ ConflictResolver* resolver = session->resolver();
+
+ if (resolver &&
+ resolver->AttemptItemMerge(trans, &same_path, entry)) {
+ return SUCCESS;
+ }
+ LOG(INFO) << "Not updating item, path collision. Update:\n" << *entry
+ << "\nSame Path:\n" << same_path;
+ return CONFLICT;
+}
+
+// static
+UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntryWithoutMerge(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ SyncerSession* const session, syncable::Id* const conflicting_id) {
+
+ CHECK(entry->good());
+ if (!entry->Get(IS_UNAPPLIED_UPDATE))
+ return SUCCESS; // No work to do
+ syncable::Id id = entry->Get(ID);
+
+ if (entry->Get(IS_UNSYNCED)) {
+ LOG(INFO) << "Skipping update, returning conflict for: " << id
+ << " ; it's unsynced.";
+ return CONFLICT;
+ }
+ if (!entry->Get(SERVER_IS_DEL)) {
+ syncable::Id new_parent = entry->Get(SERVER_PARENT_ID);
+ Entry parent(trans, GET_BY_ID, new_parent);
+ // A note on non-directory parents:
+ // We catch most unfixable tree invariant errors at update receipt time,
+ // however we deal with this case here because we may receive the child
+ // first then the illegal parent. Instead of dealing with it twice in
+ // different ways we deal with it once here to reduce the amount of code and
+ // potential errors.
+ if (!parent.good() || parent.Get(IS_DEL) || !parent.Get(IS_DIR)) {
+ return CONFLICT;
+ }
+ if (entry->Get(PARENT_ID) != new_parent) {
+ if (!entry->Get(IS_DEL) && !IsLegalNewParent(trans, id, new_parent)) {
+ LOG(INFO) << "Not updating item " << id << ", illegal new parent "
+ "(would cause loop).";
+ return CONFLICT;
+ }
+ }
+ PathString server_name = entry->Get(SERVER_NAME);
+ syncable::Id conflict_id =
+ SyncerUtil::GetNameConflictingItemId(trans,
+ entry->Get(SERVER_PARENT_ID),
+ server_name);
+ if (conflict_id != syncable::kNullId && conflict_id != id) {
+ if (conflicting_id)
+ *conflicting_id = conflict_id;
+ return NAME_CONFLICT;
+ }
+ } else if (entry->Get(IS_DIR)) {
+ Directory::ChildHandles handles;
+ trans->directory()->GetChildHandles(trans, id, &handles);
+ if (!handles.empty()) {
+ // If we have still-existing children, then we need to deal with
+ // them before we can process this change.
+ LOG(INFO) << "Not deleting directory; it's not empty " << *entry;
+ return CONFLICT;
+ }
+ }
+
+ int64 old_version = entry->Get(BASE_VERSION);
+ SyncerUtil::UpdateLocalDataFromServerData(trans, entry);
+
+ return SUCCESS;
+}
+
+// Pass in name and checksum because of UTF8 conversion.
+// static
+void SyncerUtil::UpdateServerFieldsFromUpdate(
+ MutableEntry* local_entry,
+ const SyncEntity& server_entry,
+ const SyncName& name) {
+ if (server_entry.deleted()) {
+ // The server returns very lightweight replies for deletions, so
+ // we don't clobber a bunch of fields on delete.
+ local_entry->Put(SERVER_IS_DEL, true);
+ local_entry->Put(SERVER_VERSION,
+ std::max(local_entry->Get(SERVER_VERSION),
+ local_entry->Get(BASE_VERSION)) + 1L);
+ local_entry->Put(IS_UNAPPLIED_UPDATE, true);
+ return;
+ }
+
+ CHECK(local_entry->Get(ID) == server_entry.id())
+ << "ID Changing not supported here";
+ local_entry->Put(SERVER_PARENT_ID, server_entry.parent_id());
+ local_entry->PutServerName(name);
+ local_entry->Put(SERVER_VERSION, server_entry.version());
+ local_entry->Put(SERVER_CTIME,
+ ServerTimeToClientTime(server_entry.ctime()));
+ local_entry->Put(SERVER_MTIME,
+ ServerTimeToClientTime(server_entry.mtime()));
+ local_entry->Put(SERVER_IS_BOOKMARK_OBJECT, server_entry.has_bookmarkdata());
+ local_entry->Put(SERVER_IS_DIR, server_entry.IsFolder());
+ if (server_entry.has_singleton_tag()) {
+ PathString tag;
+ AppendUTF8ToPathString(server_entry.singleton_tag(), &tag);
+ local_entry->Put(SINGLETON_TAG, tag);
+ }
+ if (server_entry.has_bookmarkdata() && !server_entry.deleted()) {
+ const SyncEntity::BookmarkData& bookmark = server_entry.bookmarkdata();
+ if (bookmark.has_bookmark_url()) {
+ PathString url;
+ AppendUTF8ToPathString(bookmark.bookmark_url(), &url);
+ local_entry->Put(SERVER_BOOKMARK_URL, url);
+ }
+ if (bookmark.has_bookmark_favicon()) {
+ Blob favicon_blob;
+ SyncerProtoUtil::CopyProtoBytesIntoBlob(bookmark.bookmark_favicon(),
+ &favicon_blob);
+ local_entry->Put(SERVER_BOOKMARK_FAVICON, favicon_blob);
+ }
+ }
+ if (server_entry.has_position_in_parent()) {
+ local_entry->Put(SERVER_POSITION_IN_PARENT,
+ server_entry.position_in_parent());
+ }
+
+ local_entry->Put(SERVER_IS_DEL, server_entry.deleted());
+ // We only mark the entry as unapplied if its version is greater than
+ // the local data. If we're processing the update that corresponds to one of
+ // our commit we don't apply it as time differences may occur.
+ if (server_entry.version() > local_entry->Get(BASE_VERSION)) {
+ local_entry->Put(IS_UNAPPLIED_UPDATE, true);
+ }
+ ApplyExtendedAttributes(local_entry, server_entry);
+}
+
+// static
+void SyncerUtil::ApplyExtendedAttributes(
+ syncable::MutableEntry* local_entry,
+ const SyncEntity& server_entry) {
+ local_entry->DeleteAllExtendedAttributes(local_entry->trans());
+ if (server_entry.has_extended_attributes()) {
+ const sync_pb::ExtendedAttributes & extended_attributes =
+ server_entry.extended_attributes();
+ for (int i = 0; i < extended_attributes.extendedattribute_size(); i++) {
+ PathString pathstring_key;
+ AppendUTF8ToPathString(
+ extended_attributes.extendedattribute(i).key(), &pathstring_key);
+ ExtendedAttributeKey key(local_entry->Get(META_HANDLE), pathstring_key);
+ MutableExtendedAttribute local_attribute(local_entry->trans(),
+ CREATE, key);
+ SyncerProtoUtil::CopyProtoBytesIntoBlob(
+ extended_attributes.extendedattribute(i).value(),
+ local_attribute.mutable_value());
+ }
+ }
+}
+
+// Creates a new Entry iff no Entry exists with the given id.
+// static
+void SyncerUtil::CreateNewEntry(syncable::WriteTransaction *trans,
+ const syncable::Id& id) {
+ syncable::MutableEntry entry(trans, syncable::GET_BY_ID, id);
+ if (!entry.good()) {
+ syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ id);
+ }
+}
+
+// static
+bool SyncerUtil::ServerAndLocalOrdersMatch(syncable::Entry* entry) {
+ // Find the closest up-to-date local sibling by walking the linked list.
+ syncable::Id local_up_to_date_predecessor = entry->Get(PREV_ID);
+ while (!local_up_to_date_predecessor.IsRoot()) {
+ Entry local_prev(entry->trans(), GET_BY_ID, local_up_to_date_predecessor);
+ if (!local_prev.good() || local_prev.Get(IS_DEL))
+ return false;
+ if (!local_prev.Get(IS_UNAPPLIED_UPDATE) && !local_prev.Get(IS_UNSYNCED))
+ break;
+ local_up_to_date_predecessor = local_prev.Get(PREV_ID);
+ }
+ // Now find the closest up-to-date sibling in the server order.
+
+ syncable::Id server_up_to_date_predecessor =
+ ComputePrevIdFromServerPosition(entry->trans(), entry,
+ entry->Get(SERVER_PARENT_ID));
+ return server_up_to_date_predecessor == local_up_to_date_predecessor;
+}
+
+// static
+bool SyncerUtil::ServerAndLocalEntriesMatch(syncable::Entry* entry) {
+ if (!ClientAndServerTimeMatch(
+ entry->Get(CTIME), ClientTimeToServerTime(entry->Get(SERVER_CTIME)))) {
+ LOG(WARNING) << "Client and server time mismatch";
+ return false;
+ }
+ if (entry->Get(IS_DEL) && entry->Get(SERVER_IS_DEL))
+ return true;
+ // Name should exactly match here.
+ if (!entry->SyncNameMatchesServerName()) {
+ LOG(WARNING) << "Unsanitized name mismatch";
+ return false;
+ }
+
+ if (entry->Get(PARENT_ID) != entry->Get(SERVER_PARENT_ID) ||
+ entry->Get(IS_DIR) != entry->Get(SERVER_IS_DIR) ||
+ entry->Get(IS_DEL) != entry->Get(SERVER_IS_DEL)) {
+ LOG(WARNING) << "Metabit mismatch";
+ return false;
+ }
+
+ if (!ServerAndLocalOrdersMatch(entry)) {
+ LOG(WARNING) << "Server/local ordering mismatch";
+ return false;
+ }
+
+ if (entry->Get(IS_BOOKMARK_OBJECT)) {
+ if (!entry->Get(IS_DIR)) {
+ if (entry->Get(BOOKMARK_URL) != entry->Get(SERVER_BOOKMARK_URL)) {
+ LOG(WARNING) << "Bookmark URL mismatch";
+ return false;
+ }
+ }
+ }
+ if (entry->Get(IS_DIR))
+ return true;
+ // For historical reasons, a folder's MTIME changes when its contents change.
+ // TODO(ncarter): Remove the special casing of MTIME.
+ bool time_match = ClientAndServerTimeMatch(entry->Get(MTIME),
+ ClientTimeToServerTime(entry->Get(SERVER_MTIME)));
+ if (!time_match) {
+ LOG(WARNING) << "Time mismatch";
+ }
+ return time_match;
+}
+
+// static
+void SyncerUtil::SplitServerInformationIntoNewEntry(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry) {
+ syncable::Id id = entry->Get(ID);
+ ChangeEntryIDAndUpdateChildren(trans, entry, trans->directory()->NextId());
+ entry->Put(BASE_VERSION, 0);
+
+ MutableEntry new_entry(trans, CREATE_NEW_UPDATE_ITEM, id);
+ CopyServerFields(entry, &new_entry);
+ ClearServerData(entry);
+
+ LOG(INFO) << "Splitting server information, local entry: " << *entry <<
+ " server entry: " << new_entry;
+}
+
+// This function is called on an entry when we can update the user-facing data
+// from the server data.
+// static
+void SyncerUtil::UpdateLocalDataFromServerData(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry) {
+ CHECK(!entry->Get(IS_UNSYNCED));
+ CHECK(entry->Get(IS_UNAPPLIED_UPDATE));
+ LOG(INFO) << "Updating entry : " << *entry;
+ entry->Put(IS_BOOKMARK_OBJECT, entry->Get(SERVER_IS_BOOKMARK_OBJECT));
+ // This strange dance around the IS_DEL flag
+ // avoids problems when setting the name.
+ if (entry->Get(SERVER_IS_DEL)) {
+ entry->Put(IS_DEL, true);
+ } else {
+ Name name = Name::FromSyncName(entry->GetServerName());
+ name.db_value().MakeOSLegal();
+ bool was_doctored_before_made_noncolliding = name.HasBeenSanitized();
+ name.db_value().MakeNoncollidingForEntry(trans,
+ entry->Get(SERVER_PARENT_ID),
+ entry);
+ bool was_doctored = name.HasBeenSanitized();
+ if (was_doctored) {
+ // If we're changing the name of entry, either its name
+ // should be illegal, or some other entry should have an unsanitized
+ // name. There's should be a CHECK in every code path.
+ Entry blocking_entry(trans, GET_BY_PARENTID_AND_DBNAME,
+ entry->Get(SERVER_PARENT_ID),
+ name.value());
+ if (blocking_entry.good())
+ CHECK(blocking_entry.GetName().HasBeenSanitized());
+ else
+ CHECK(was_doctored_before_made_noncolliding);
+ }
+ CHECK(entry->PutParentIdAndName(entry->Get(SERVER_PARENT_ID), name))
+ << "Name Clash in UpdateLocalDataFromServerData: "
+ << *entry;
+ CHECK(entry->Put(IS_DEL, false));
+ Id new_predecessor = ComputePrevIdFromServerPosition(trans, entry,
+ entry->Get(SERVER_PARENT_ID));
+ CHECK(entry->PutPredecessor(new_predecessor))
+ << " Illegal predecessor after converting from server position.";
+ }
+
+ entry->Put(CTIME, entry->Get(SERVER_CTIME));
+ entry->Put(MTIME, entry->Get(SERVER_MTIME));
+ entry->Put(BASE_VERSION, entry->Get(SERVER_VERSION));
+ entry->Put(IS_DIR, entry->Get(SERVER_IS_DIR));
+ entry->Put(IS_DEL, entry->Get(SERVER_IS_DEL));
+ entry->Put(BOOKMARK_URL, entry->Get(SERVER_BOOKMARK_URL));
+ entry->Put(BOOKMARK_FAVICON, entry->Get(SERVER_BOOKMARK_FAVICON));
+ entry->Put(IS_UNAPPLIED_UPDATE, false);
+}
+
+// static
+VerifyCommitResult SyncerUtil::ValidateCommitEntry(
+ syncable::MutableEntry* entry) {
+ syncable::Id id = entry->Get(ID);
+ if (id == entry->Get(PARENT_ID)) {
+ CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry;
+ // If the root becomes unsynced it can cause us problems.
+ LOG(ERROR) << "Root item became unsynced " << *entry;
+ return VERIFY_UNSYNCABLE;
+ }
+ if (entry->IsRoot()) {
+ LOG(ERROR) << "Permanent item became unsynced " << *entry;
+ return VERIFY_UNSYNCABLE;
+ }
+ if (entry->Get(IS_DEL) && !entry->Get(ID).ServerKnows()) {
+ // drop deleted uncommitted entries.
+ return VERIFY_UNSYNCABLE;
+ }
+ return VERIFY_OK;
+}
+
+// static
+bool SyncerUtil::AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ vector<syncable::Id>* commit_ids) {
+
+ if (!inserted_items->insert(item->Get(META_HANDLE)).second)
+ return false;
+ commit_ids->push_back(item->Get(ID));
+ if (item->Get(IS_DEL))
+ return true; // Deleted items have no predecessors.
+
+ Id prev_id = item->Get(PREV_ID);
+ while (!prev_id.IsRoot()) {
+ Entry prev(trans, GET_BY_ID, prev_id);
+ CHECK(prev.good()) << "Bad id when walking predecessors.";
+ if (!prev.Get(inclusion_filter))
+ break;
+ if (!inserted_items->insert(prev.Get(META_HANDLE)).second)
+ break;
+ commit_ids->push_back(prev_id);
+ prev_id = prev.Get(PREV_ID);
+ }
+ return true;
+}
+
+// static
+void SyncerUtil::AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ vector<syncable::Id>* commit_ids) {
+
+ vector<syncable::Id>::size_type initial_size = commit_ids->size();
+ if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items,
+ commit_ids))
+ return;
+ // Reverse what we added to get the correct order.
+ std::reverse(commit_ids->begin() + initial_size, commit_ids->end());
+}
+
+// TODO(ncarter): This is redundant to some code in GetCommitIdsCommand. Unify
+// them.
+// static
+void SyncerUtil::AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::MetahandleSet* inserted_items,
+ vector<syncable::Id>* commit_ids,
+ syncable::Id parent_id) {
+ vector<syncable::Id>::size_type intial_commit_ids_size = commit_ids->size();
+ // Climb the tree adding entries leaf -> root.
+ while (!parent_id.ServerKnows()) {
+ Entry parent(trans, GET_BY_ID, parent_id);
+ CHECK(parent.good()) << "Bad user-only parent in item path.";
+ if (!AddItemThenPredecessors(trans, &parent, IS_UNSYNCED, inserted_items,
+ commit_ids))
+ break; // Parent was already present in |inserted_items|.
+ parent_id = parent.Get(PARENT_ID);
+ }
+ // Reverse what we added to get the correct order.
+ std::reverse(commit_ids->begin() + intial_commit_ids_size, commit_ids->end());
+}
+
+// static
+void SyncerUtil::MarkDeletedChildrenSynced(
+ const syncable::ScopedDirLookup &dir,
+ std::set<syncable::Id>* deleted_folders) {
+ // There's two options here.
+ // 1. Scan deleted unsynced entries looking up their pre-delete tree for any
+ // of the deleted folders.
+ // 2. Take each folder and do a tree walk of all entries underneath it.
+ // #2 has a lower big O cost, but writing code to limit the time spent inside
+ // the transaction during each step is simpler with 1. Changing this decision
+ // may be sensible if this code shows up in profiling.
+ if (deleted_folders->empty())
+ return;
+ Directory::UnsyncedMetaHandles handles;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ dir->GetUnsyncedMetaHandles(&trans, &handles);
+ }
+ if (handles.empty())
+ return;
+ Directory::UnsyncedMetaHandles::iterator it;
+ for (it = handles.begin() ; it != handles.end() ; ++it) {
+ // Single transaction / entry we deal with.
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ if (!entry.Get(IS_UNSYNCED) || !entry.Get(IS_DEL))
+ continue;
+ syncable::Id id = entry.Get(PARENT_ID);
+ while (id != trans.root_id()) {
+ if (deleted_folders->find(id) != deleted_folders->end()) {
+ // We've synced the deletion of this deleted entries parent
+ entry.Put(IS_UNSYNCED, false);
+ break;
+ }
+ Entry parent(&trans, GET_BY_ID, id);
+ if (!parent.good() || !parent.Get(IS_DEL))
+ break;
+ id = parent.Get(PARENT_ID);
+ }
+ }
+}
+
+// static
+VerifyResult SyncerUtil::VerifyNewEntry(
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id,
+ const bool deleted) {
+ if (same_id->good()) {
+ // Not a new entry.
+ return VERIFY_UNDECIDED;
+ }
+ if (deleted) {
+ // Deletion of an item we've never seen can be ignored.
+ return VERIFY_SKIP;
+ }
+
+ return VERIFY_SUCCESS;
+}
+
+// Assumes we have an existing entry; check here for updates that break
+// consistency rules.
+// static
+VerifyResult SyncerUtil::VerifyUpdateConsistency(
+ syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id,
+ const bool deleted,
+ const bool is_directory,
+ const bool has_bookmark_data) {
+
+ CHECK(same_id->good());
+
+ // If the entry is a delete, we don't really need to worry at this stage.
+ if (deleted)
+ return VERIFY_SUCCESS;
+
+ if (same_id->Get(SERVER_VERSION) > 0) {
+ // Then we've had an update for this entry before.
+ if (is_directory != same_id->Get(SERVER_IS_DIR) ||
+ has_bookmark_data != same_id->Get(SERVER_IS_BOOKMARK_OBJECT)) {
+ if (same_id->Get(IS_DEL)) { // if we've deleted the item, we don't care.
+ return VERIFY_SKIP;
+ } else {
+ LOG(ERROR) << "Server update doesn't agree with previous updates. ";
+ LOG(ERROR) << " Entry: " << *same_id;
+ LOG(ERROR) << " Update: " << SyncEntityDebugString(entry);
+ return VERIFY_FAIL;
+ }
+ }
+
+ if (!deleted &&
+ (same_id->Get(SERVER_IS_DEL) ||
+ (!same_id->Get(IS_UNSYNCED) && same_id->Get(IS_DEL) &&
+ same_id->Get(BASE_VERSION) > 0))) {
+ // An undelete. The latter case in the above condition is for
+ // when the server does not give us an update following the
+ // commit of a delete, before undeleting. Undeletion is possible
+ // in the server's storage backend, so it's possible on the client,
+ // though not expected to be something that is commonly possible.
+ VerifyResult result =
+ SyncerUtil::VerifyUndelete(trans, entry, same_id);
+ if (VERIFY_UNDECIDED != result)
+ return result;
+ }
+ }
+ if (same_id->Get(BASE_VERSION) > 0) {
+ // We've committed this entry in the past.
+ if (is_directory != same_id->Get(IS_DIR) ||
+ has_bookmark_data != same_id->Get(IS_BOOKMARK_OBJECT)) {
+ LOG(ERROR) << "Server update doesn't agree with committed item. ";
+ LOG(ERROR) << " Entry: " << *same_id;
+ LOG(ERROR) << " Update: " << SyncEntityDebugString(entry);
+ return VERIFY_FAIL;
+ }
+ if (same_id->Get(BASE_VERSION) == entry.version() &&
+ !same_id->Get(IS_UNSYNCED) &&
+ !SyncerProtoUtil::Compare(*same_id, entry)) {
+ // TODO(sync): This constraint needs to be relaxed. For now it's OK to
+ // fail the verification and deal with it when we ApplyUpdates.
+ LOG(ERROR) << "Server update doesn't match local data with same "
+ "version. A bug should be filed. Entry: " << *same_id <<
+ "Update: " << SyncEntityDebugString(entry);
+ return VERIFY_FAIL;
+ }
+ if (same_id->Get(SERVER_VERSION) > entry.version()) {
+ LOG(WARNING) << "We've already seen a more recent update from the server";
+ LOG(WARNING) << " Entry: " << *same_id;
+ LOG(WARNING) << " Update: " << SyncEntityDebugString(entry);
+ return VERIFY_SKIP;
+ }
+ }
+ return VERIFY_SUCCESS;
+}
+
+// Assumes we have an existing entry; verify an update that seems to be
+// expressing an 'undelete'
+// static
+VerifyResult SyncerUtil::VerifyUndelete(syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id) {
+ CHECK(same_id->good());
+ LOG(INFO) << "Server update is attempting undelete. " << *same_id
+ << "Update:" << SyncEntityDebugString(entry);
+ // Move the old one aside and start over. It's too tricky to
+ // get the old one back into a state that would pass
+ // CheckTreeInvariants().
+ if (same_id->Get(IS_DEL)) {
+ same_id->Put(ID, trans->directory()->NextId());
+ same_id->Put(BASE_VERSION, CHANGES_VERSION);
+ same_id->Put(SERVER_VERSION, 0);
+ return VERIFY_SUCCESS;
+ }
+ if (entry.version() < same_id->Get(SERVER_VERSION)) {
+ LOG(WARNING) << "Update older than current server version for" <<
+ *same_id << "Update:" << SyncEntityDebugString(entry);
+ return VERIFY_SUCCESS; // Expected in new sync protocol.
+ }
+ return VERIFY_UNDECIDED;
+}
+
+// static
+syncable::Id SyncerUtil::ComputePrevIdFromServerPosition(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* update_item,
+ const syncable::Id& parent_id) {
+ const int64 position_in_parent = update_item->Get(SERVER_POSITION_IN_PARENT);
+
+ // TODO(ncarter): This computation is linear in the number of children, but
+ // we could make it logarithmic if we kept an index on server position.
+ syncable::Id closest_sibling;
+ syncable::Id next_id = trans->directory()->GetFirstChildId(trans, parent_id);
+ while (!next_id.IsRoot()) {
+ syncable::Entry candidate(trans, GET_BY_ID, next_id);
+ if (!candidate.good()) {
+ LOG(WARNING) << "Should not happen";
+ return closest_sibling;
+ }
+ next_id = candidate.Get(NEXT_ID);
+
+ // Defensively prevent self-comparison.
+ if (candidate.Get(META_HANDLE) == update_item->Get(META_HANDLE)) {
+ continue;
+ }
+
+ // Ignore unapplied updates -- they might not even be server-siblings.
+ if (candidate.Get(IS_UNAPPLIED_UPDATE)) {
+ continue;
+ }
+
+ // Unsynced items don't have a valid server position.
+ if (!candidate.Get(IS_UNSYNCED)) {
+ // If |candidate| is after |update_entry| according to the server
+ // ordering, then we're done. ID is the tiebreaker.
+ if ((candidate.Get(SERVER_POSITION_IN_PARENT) > position_in_parent) ||
+ (candidate.Get(SERVER_POSITION_IN_PARENT) == position_in_parent) &&
+ (candidate.Get(ID) > update_item->Get(ID))) {
+ return closest_sibling;
+ }
+ }
+
+ // We can't trust the SERVER_ fields of unsynced items, but they are
+ // potentially legitimate local predecessors. In the case where
+ // |update_item| and an unsynced item wind up in the same insertion
+ // position, we need to choose how to order them. The following check puts
+ // the unapplied update first; removing it would put the unsynced item(s)
+ // first.
+ if (candidate.Get(IS_UNSYNCED)) {
+ continue;
+ }
+
+ // |update_entry| is considered to be somewhere after |candidate|, so
+ // store it as the upper bound.
+ closest_sibling = candidate.Get(ID);
+ }
+
+ return closest_sibling;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_util.h b/chrome/browser/sync/engine/syncer_util.h
new file mode 100644
index 0000000..91e0c814
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_util.h
@@ -0,0 +1,206 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Utility functions manipulating syncable::Entries, intended for use by
+// the syncer.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_UTIL_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_UTIL_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+class SyncerSession;
+class SyncEntity;
+
+
+class SyncerUtil {
+ public:
+ // TODO(ncarter): Remove unique-in-parent title support and name conflicts.
+ static syncable::Id GetNameConflictingItemId(
+ syncable::BaseTransaction* trans,
+ const syncable::Id& parent_id,
+ const PathString& server_name);
+
+ static void ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id,
+ syncable::Directory::ChildHandles* children);
+
+ // Returns the number of unsynced entries.
+ static int GetUnsyncedEntries(syncable::BaseTransaction* trans,
+ std::vector<int64> *handles);
+
+ static void ChangeEntryIDAndUpdateChildren(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id);
+
+ static void AttemptReuniteLostCommitResponses(
+ syncable::WriteTransaction* trans,
+ const SyncEntity& server_entry,
+ const std::string& client_id);
+
+ static UpdateAttemptResponse AttemptToUpdateEntry(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ SyncerSession* const session);
+
+ static UpdateAttemptResponse AttemptToUpdateEntryWithoutMerge(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ SyncerSession* const session, syncable::Id* const conflicting_id);
+
+ // Pass in name to avoid redundant UTF8 conversion.
+ static void UpdateServerFieldsFromUpdate(
+ syncable::MutableEntry* local_entry,
+ const SyncEntity& server_entry,
+ const syncable::SyncName& name);
+
+ static void ApplyExtendedAttributes(
+ syncable::MutableEntry* local_entry,
+ const SyncEntity& server_entry);
+
+ // Creates a new Entry iff no Entry exists with the given id.
+ static void CreateNewEntry(syncable::WriteTransaction *trans,
+ const syncable::Id& id);
+
+ static bool ServerAndLocalEntriesMatch(syncable::Entry* entry);
+
+ static void SplitServerInformationIntoNewEntry(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ // This function is called on an entry when we can update the user-facing data
+ // from the server data.
+ static void UpdateLocalDataFromServerData(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ static VerifyCommitResult ValidateCommitEntry(syncable::MutableEntry* entry);
+
+ static VerifyResult VerifyNewEntry(const SyncEntity& entry,
+ syncable::MutableEntry* same_id,
+ const bool deleted);
+
+ // Assumes we have an existing entry; check here for updates that break
+ // consistency rules.
+ static VerifyResult VerifyUpdateConsistency(syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id,
+ const bool deleted,
+ const bool is_directory,
+ const bool is_bookmark);
+
+ // Assumes we have an existing entry; verify an update that seems to be
+ // expressing an 'undelete'
+ static VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id);
+
+ // Compute a local predecessor position for |update_item|. The position
+ // is determined by the SERVER_POSITION_IN_PARENT value of |update_item|,
+ // as well as the SERVER_POSITION_IN_PARENT values of any up-to-date
+ // children of |parent_id|.
+ static syncable::Id ComputePrevIdFromServerPosition(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* update_item,
+ const syncable::Id& parent_id);
+
+ // Append |item|, followed by a chain of its predecessors selected by
+ // |inclusion_filter|, to the |commit_ids| vector and tag them as included by
+ // storing in the set |inserted_items|. |inclusion_filter| (typically one of
+ // IS_UNAPPLIED_UPDATE or IS_UNSYNCED) selects which type of predecessors to
+ // include. Returns true if |item| was added, and false if it was already in
+ // the list.
+ //
+ // Use AddPredecessorsThenItem instead of this method if you want the
+ // item to be the last, rather than first, item appended.
+ static bool AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids);
+
+ // Exactly like AddItemThenPredecessors, except items are appended in the
+ // reverse (and generally more useful) order: a chain of predecessors from
+ // far to near, and finally the item.
+ static void AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids);
+
+ static void AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids,
+ syncable::Id parent_id);
+
+ static void MarkDeletedChildrenSynced(
+ const syncable::ScopedDirLookup &dir,
+ std::set<syncable::Id>* deleted_folders);
+
+ // Examine the up-to-date predecessors of this item according to the server
+ // position, and then again according to the local position. Return true
+ // if they match. For an up-to-date item, this should be the case.
+ static bool ServerAndLocalOrdersMatch(syncable::Entry* entry);
+
+ private:
+ // Private ctor/dtor since this class shouldn't be instantiated.
+ SyncerUtil() {}
+ virtual ~SyncerUtil() {}
+ DISALLOW_COPY_AND_ASSIGN(SyncerUtil);
+};
+
+#ifndef OS_WINDOWS
+
+// time.h on Linux and Mac both return seconds since the epoch, this should
+// be converted to milliseconds.
+inline int64 ServerTimeToClientTime(int64 server_time) {
+ return server_time / GG_LONGLONG(1000);
+}
+
+inline int64 ClientTimeToServerTime(int64 client_time) {
+ return client_time * GG_LONGLONG(1000);
+}
+
+// As we truncate server times on the client for posix and on the server for
+// windows we need two ClientAndServerTimeMatch fucntions.
+inline bool ClientAndServerTimeMatch(int64 client_time, int64 server_time) {
+ // Compare at the coarser timescale (client)
+ return client_time == ServerTimeToClientTime(server_time);
+}
+#else
+// The sync server uses Java Times (ms since 1970)
+// and the client uses FILETIMEs (ns since 1601) so we need to convert
+// between the timescales.
+inline int64 ServerTimeToClientTime(int64 server_time) {
+ return server_time * GG_LONGLONG(10000) + GG_LONGLONG(116444736000000000);
+}
+
+inline int64 ClientTimeToServerTime(int64 client_time) {
+ return (client_time - GG_LONGLONG(116444736000000000)) / GG_LONGLONG(10000);
+}
+
+inline bool ClientAndServerTimeMatch(int64 client_time, int64 server_time) {
+ // Compare at the coarser timescale (server)
+ return ClientTimeToServerTime(client_time) == server_time;
+}
+#endif
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_UTIL_H_
diff --git a/chrome/browser/sync/engine/syncproto.h b/chrome/browser/sync/engine/syncproto.h
new file mode 100644
index 0000000..fe05a75
--- /dev/null
+++ b/chrome/browser/sync/engine/syncproto.h
@@ -0,0 +1,72 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Wrappers to help us work with ids and protobuffers.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCPROTO_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCPROTO_H_
+
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+
+namespace browser_sync {
+
+template<class Base>
+class IdWrapper : public Base {
+ public:
+ syncable::Id id() const {
+ return syncable::Id::CreateFromServerId(Base::id_string());
+ }
+ void set_id(const syncable::Id& id) {
+ Base::set_id_string(id.GetServerId());
+ }
+};
+
+// These wrapper classes contain no data, so their super
+// classes can be cast to them directly.
+class SyncEntity : public IdWrapper<sync_pb::SyncEntity> {
+ public:
+ void set_parent_id(const syncable::Id& id) {
+ set_parent_id_string(id.GetServerId());
+ }
+ syncable::Id parent_id() const {
+ return syncable::Id::CreateFromServerId(parent_id_string());
+ }
+ void set_old_parent_id(const syncable::Id& id) {
+ IdWrapper<sync_pb::SyncEntity>::set_old_parent_id(
+ id.GetServerId());
+ }
+ syncable::Id old_parent_id() const {
+ return syncable::Id::CreateFromServerId(
+ sync_pb::SyncEntity::old_parent_id());
+ }
+ // Binary predicate helper to determine whether an Entity represents a folder
+ // or non-folder object. Use this instead of checking these properties
+ // directly, because the addition of bookmarks to the protobuf schema
+ // makes the check slightly more tricky.
+ bool IsFolder() const {
+ return (!has_bookmarkdata() || bookmarkdata().bookmark_folder());
+ }
+};
+
+class CommitResponse_EntryResponse
+ : public IdWrapper<sync_pb::CommitResponse_EntryResponse> {
+};
+
+class ClientToServerMessage : public sync_pb::ClientToServerMessage {
+ public:
+ ClientToServerMessage() {
+ set_protocol_version(protocol_version());
+ }
+};
+
+typedef sync_pb::CommitMessage CommitMessage;
+typedef sync_pb::ClientToServerResponse ClientToServerResponse;
+typedef sync_pb::CommitResponse CommitResponse;
+typedef sync_pb::GetUpdatesResponse GetUpdatesResponse;
+typedef sync_pb::GetUpdatesMessage GetUpdatesMessage;
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCPROTO_H_
diff --git a/chrome/browser/sync/engine/syncproto_unittest.cc b/chrome/browser/sync/engine/syncproto_unittest.cc
new file mode 100644
index 0000000..951b852
--- /dev/null
+++ b/chrome/browser/sync/engine/syncproto_unittest.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+class SyncProtoTest : public testing::Test {
+};
+
+TEST_F(SyncProtoTest, ProtocolVersionPresent) {
+ ClientToServerMessage csm;
+ EXPECT_TRUE(csm.has_protocol_version());
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/update_applicator.cc b/chrome/browser/sync/engine/update_applicator.cc
new file mode 100644
index 0000000..17e6b36
--- /dev/null
+++ b/chrome/browser/sync/engine/update_applicator.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/update_applicator.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+UpdateApplicator::UpdateApplicator(SyncerSession* session,
+ const vi64iter& begin,
+ const vi64iter& end)
+ : session_(session), begin_(begin), end_(end), pointer_(begin),
+ progress_(false) {
+ size_t item_count = end - begin;
+ LOG(INFO) << "UpdateApplicator created for " << item_count << " items.";
+ successful_ids_.reserve(item_count);
+ }
+
+// returns true if there's more to do.
+bool UpdateApplicator::AttemptOneApplication(
+ syncable::WriteTransaction* trans) {
+ // If there are no updates left to consider, we're done.
+ if (end_ == begin_)
+ return false;
+ if (pointer_ == end_) {
+ if (!progress_)
+ return false;
+
+ LOG(INFO) << "UpdateApplicator doing additional pass.";
+ pointer_ = begin_;
+ progress_ = false;
+
+ // Clear the tracked failures to avoid double-counting.
+ conflicting_ids_.clear();
+ blocked_ids_.clear();
+ }
+ syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE, *pointer_);
+ UpdateAttemptResponse updateResponse =
+ SyncerUtil::AttemptToUpdateEntry(trans, &entry, session_);
+ switch (updateResponse) {
+ case SUCCESS:
+ --end_;
+ *pointer_ = *end_;
+ progress_ = true;
+ successful_ids_.push_back(entry.Get(syncable::ID));
+ break;
+ case CONFLICT:
+ pointer_++;
+ conflicting_ids_.push_back(entry.Get(syncable::ID));
+ break;
+ case BLOCKED:
+ pointer_++;
+ blocked_ids_.push_back(entry.Get(syncable::ID));
+ break;
+ }
+ LOG(INFO) << "Apply Status for " << entry.Get(syncable::META_HANDLE)
+ << " is " << updateResponse;
+
+ return true;
+}
+
+bool UpdateApplicator::AllUpdatesApplied() const {
+ return conflicting_ids_.empty() && blocked_ids_.empty() &&
+ begin_ == end_;
+}
+
+void UpdateApplicator::SaveProgressIntoSessionState() {
+ DCHECK(begin_ == end_ || ((pointer_ == end_) && !progress_))
+ << "SaveProgress called before updates exhausted.";
+
+ vector<syncable::Id>::const_iterator i;
+ for (i = conflicting_ids_.begin(); i != conflicting_ids_.end(); ++i) {
+ session_->EraseBlockedItem(*i);
+ session_->AddCommitConflict(*i);
+ session_->AddAppliedUpdate(CONFLICT, *i);
+ }
+ for (i = blocked_ids_.begin(); i != blocked_ids_.end(); ++i) {
+ session_->AddBlockedItem(*i);
+ session_->EraseCommitConflict(*i);
+ session_->AddAppliedUpdate(BLOCKED, *i);
+ }
+ for (i = successful_ids_.begin(); i != successful_ids_.end(); ++i) {
+ session_->EraseCommitConflict(*i);
+ session_->EraseBlockedItem(*i);
+ session_->AddAppliedUpdate(SUCCESS, *i);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/update_applicator.h b/chrome/browser/sync/engine/update_applicator.h
new file mode 100644
index 0000000..3d500171
--- /dev/null
+++ b/chrome/browser/sync/engine/update_applicator.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// An UpdateApplicator is used to iterate over a number of unapplied
+// updates, applying them to the client using the given syncer session.
+//
+// UpdateApplicator might resemble an iterator, but it actually keeps retrying
+// failed updates until no remaining updates can be successfully applied.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_UPDATE_APPLICATOR_H_
+#define CHROME_BROWSER_SYNC_ENGINE_UPDATE_APPLICATOR_H_
+
+#include <vector>
+#include <set>
+
+#include "base/basictypes.h"
+#include "base/port.h"
+
+namespace syncable {
+class Id;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class SyncerSession;
+
+class UpdateApplicator {
+ public:
+ typedef std::vector<int64>::iterator vi64iter;
+
+ UpdateApplicator(SyncerSession* session, const vi64iter& begin,
+ const vi64iter& end);
+ // returns true if there's more we can do.
+ bool AttemptOneApplication(syncable::WriteTransaction* trans);
+ // return true if we've applied all updates.
+ bool AllUpdatesApplied() const;
+
+ // This class does not automatically save its progress into the
+ // SyncerSession -- to get that to happen, call this method after
+ // update application is finished (i.e., when AttemptOneAllocation
+ // stops returning true).
+ void SaveProgressIntoSessionState();
+
+ private:
+ SyncerSession* const session_;
+ vi64iter const begin_;
+ vi64iter end_;
+ vi64iter pointer_;
+ bool progress_;
+
+ // Track the result of the various items.
+ std::vector<syncable::Id> conflicting_ids_;
+ std::vector<syncable::Id> blocked_ids_;
+ std::vector<syncable::Id> successful_ids_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_UPDATE_APPLICATOR_H_
diff --git a/chrome/browser/sync/engine/verify_updates_command.cc b/chrome/browser/sync/engine/verify_updates_command.cc
new file mode 100644
index 0000000..dee544d
--- /dev/null
+++ b/chrome/browser/sync/engine/verify_updates_command.cc
@@ -0,0 +1,102 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#include "chrome/browser/sync/engine/verify_updates_command.h"
+
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+using syncable::ScopedDirLookup;
+using syncable::SyncName;
+using syncable::WriteTransaction;
+
+using syncable::GET_BY_ID;
+using syncable::SYNCER;
+
+VerifyUpdatesCommand::VerifyUpdatesCommand() {}
+VerifyUpdatesCommand::~VerifyUpdatesCommand() {}
+
+void VerifyUpdatesCommand::ExecuteImpl(SyncerSession *session) {
+ LOG(INFO) << "Beginning Update Verification";
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+ GetUpdatesResponse updates = session->update_response().get_updates();
+ int update_count = updates.entries().size();
+
+ LOG(INFO) << update_count << " entries to verify";
+ for (int i = 0; i < update_count; i++) {
+ const SyncEntity entry =
+ *reinterpret_cast<const SyncEntity *>(&(updates.entries(i)));
+ // Needs to be done separately in order to make sure the update processing
+ // still happens like normal. We should really just use one type of
+ // ID in fact, there isn't actually a need for server_knows and not IDs.
+ SyncerUtil::AttemptReuniteLostCommitResponses(&trans, entry,
+ trans.directory()->cache_guid());
+ VerifyResult result = VerifyUpdate(&trans, entry);
+ session->AddVerifyResult(result, entry);
+ }
+}
+
+VerifyResult VerifyUpdatesCommand::VerifyUpdate(
+ syncable::WriteTransaction* trans, const SyncEntity& entry) {
+ syncable::Id id = entry.id();
+
+ const bool deleted = entry.has_deleted() && entry.deleted();
+ const bool is_directory = entry.IsFolder();
+ const bool is_bookmark = entry.has_bookmarkdata();
+
+ if (!id.ServerKnows()) {
+ LOG(ERROR) << "Illegal negative id in received updates";
+ return VERIFY_FAIL;
+ }
+ if (!entry.parent_id().ServerKnows()) {
+ LOG(ERROR) << "Illegal parent id in received updates";
+ return VERIFY_FAIL;
+ }
+ {
+ SyncName name = SyncerProtoUtil::NameFromSyncEntity(entry);
+ if ((name.value().empty() || name.non_unique_value().empty()) &&
+ !deleted) {
+ LOG(ERROR) << "Zero length name in non-deleted update";
+ return VERIFY_FAIL;
+ }
+ }
+
+ syncable::MutableEntry same_id(trans, GET_BY_ID, id);
+ VerifyResult result = VERIFY_UNDECIDED;
+ result = SyncerUtil::VerifyNewEntry(entry, &same_id, deleted);
+
+ if (VERIFY_UNDECIDED == result) {
+ if (deleted)
+ result = VERIFY_SUCCESS;
+ }
+
+ // If we have an existing entry, we check here for updates that break
+ // consistency rules.
+ if (VERIFY_UNDECIDED == result) {
+ result = SyncerUtil::VerifyUpdateConsistency(trans, entry, &same_id,
+ deleted, is_directory, is_bookmark);
+ }
+
+ if (VERIFY_UNDECIDED == result)
+ return VERIFY_SUCCESS; // No news is good news.
+ else
+ return result; // This might be VERIFY_SUCCESS as well
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/verify_updates_command.h b/chrome/browser/sync/engine/verify_updates_command.h
new file mode 100644
index 0000000..c7970e9
--- /dev/null
+++ b/chrome/browser/sync/engine/verify_updates_command.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_
+
+#include "base/basictypes.h"
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+class WriteTransaction;
+}
+
+namespace browser_sync {
+
+// Verifies the response from a GetUpdates request. All invalid updates
+// will be noted in the SyncerSession after this command is executed.
+class VerifyUpdatesCommand : public SyncerCommand {
+ public:
+ VerifyUpdatesCommand();
+ virtual ~VerifyUpdatesCommand();
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ VerifyResult VerifyUpdate(syncable::WriteTransaction* trans,
+ const SyncEntity& entry);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VerifyUpdatesCommand);
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_
diff --git a/chrome/browser/sync/notifier/base/async_dns_lookup.cc b/chrome/browser/sync/notifier/base/async_dns_lookup.cc
new file mode 100644
index 0000000..0d3ce87
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/async_dns_lookup.cc
@@ -0,0 +1,133 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/async_dns_lookup.h"
+
+#ifdef POSIX
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netdb.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#endif // POSIX
+
+#include <vector>
+
+#include "chrome/browser/sync/notifier/base/nethelpers.h"
+#include "chrome/browser/sync/notifier/gaia_auth/inet_aton.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+#include "talk/base/socketaddress.h"
+#include "talk/base/thread.h"
+
+enum { MSG_TIMEOUT = talk_base::SignalThread::ST_MSG_FIRST_AVAILABLE };
+
+#ifndef WIN32
+const int WSAHOST_NOT_FOUND = 11001; // follows the format in winsock2.h
+#endif // WIN32
+
+namespace notifier {
+
+AsyncDNSLookup::AsyncDNSLookup(const talk_base::SocketAddress& server)
+ : server_(new talk_base::SocketAddress(server)),
+ error_(0) {
+ // Timeout after 5 seconds.
+ talk_base::Thread::Current()->PostDelayed(5000, this, MSG_TIMEOUT);
+}
+
+AsyncDNSLookup::~AsyncDNSLookup() {
+}
+
+void AsyncDNSLookup::DoWork() {
+ std::string hostname(server_->IPAsString());
+
+ in_addr addr;
+ if (inet_aton(hostname.c_str(), &addr)) {
+ talk_base::CritScope scope(&cs_);
+ ip_list_.push_back(talk_base::NetworkToHost32(addr.s_addr));
+ } else {
+ LOG_F(LS_VERBOSE) << "(" << hostname << ")";
+ hostent ent;
+ char buffer[8192];
+ int errcode = 0;
+ hostent* host = SafeGetHostByName(hostname.c_str(), &ent,
+ buffer, sizeof(buffer),
+ &errcode);
+ talk_base::Thread::Current()->Clear(this, MSG_TIMEOUT);
+ if (host) {
+ talk_base::CritScope scope(&cs_);
+
+ // Check to see if this already timed out.
+ if (error_ == 0) {
+ for (int index = 0; true; ++index) {
+ uint32* addr = reinterpret_cast<uint32*>(host->h_addr_list[index]);
+ if (addr == 0) { // 0 = end of list
+ break;
+ }
+ uint32 ip = talk_base::NetworkToHost32(*addr);
+ LOG_F(LS_VERBOSE) << "(" << hostname << ") resolved to: "
+ << talk_base::SocketAddress::IPToString(ip);
+ ip_list_.push_back(ip);
+ }
+ // Maintain the invariant that either the list is not empty
+ // or the error is non zero when we are done with processing
+ // the dnslookup.
+ if (ip_list_.empty() && error_ == 0) {
+ error_ = WSAHOST_NOT_FOUND;
+ }
+ }
+ FreeHostEnt(host);
+ } else {
+ { // Scoping for the critical section.
+ talk_base::CritScope scope(&cs_);
+
+ // Check to see if this already timed out.
+ if (error_ == 0) {
+ error_ = errcode;
+ }
+ }
+ LOG_F(LS_ERROR) << "(" << hostname << ") error: " << error_;
+ }
+ }
+}
+
+void AsyncDNSLookup::OnMessage(talk_base::Message* message) {
+ ASSERT(message);
+ if (message->message_id == MSG_TIMEOUT) {
+ OnTimeout();
+ } else {
+ talk_base::SignalThread::OnMessage(message);
+ }
+}
+
+void AsyncDNSLookup::OnTimeout() {
+ // Allow the scope for the critical section to be the whole
+ // method, just to be sure that the worker thread can't exit
+ // while we are doing SignalWorkDone (because that could possibly
+ // cause the class to be deleted).
+ talk_base::CritScope scope(&cs_);
+
+ // Check to see if the ip list was already filled (or errored out).
+ if (!ip_list_.empty() || error_ != 0) {
+ return;
+ }
+
+ // Worker thread is taking too long so timeout.
+ error_ = WSAHOST_NOT_FOUND;
+
+ // Rely on the caller to do the Release/Destroy.
+ //
+ // Doing this signal while holding cs_ won't cause a deadlock because
+ // the AsyncDNSLookup::DoWork thread doesn't have any locks at this point,
+ // and it is the only thread being held up by this.
+ SignalWorkDone(this);
+
+ // Ensure that no more "WorkDone" signaling is done.
+ // Don't call Release or Destroy since that was already done
+ // by the callback.
+ SignalWorkDone.disconnect_all();
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/async_dns_lookup.h b/chrome/browser/sync/notifier/base/async_dns_lookup.h
new file mode 100644
index 0000000..123d311
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/async_dns_lookup.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_ASYNC_DNS_LOOKUP_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_ASYNC_DNS_LOOKUP_H_
+
+#include <vector>
+
+#include "base/scoped_ptr.h"
+#include "talk/base/signalthread.h"
+
+namespace talk_base {
+class SocketAddress;
+class Task;
+}
+
+namespace notifier {
+
+class AsyncDNSLookup : public talk_base::SignalThread {
+ public:
+ explicit AsyncDNSLookup(const talk_base::SocketAddress& server);
+ virtual ~AsyncDNSLookup();
+
+ const int error() const {
+ return error_;
+ }
+
+ const std::vector<uint32>& ip_list() const {
+ return ip_list_;
+ }
+
+ protected:
+ // SignalThread Interface
+ virtual void DoWork();
+ virtual void OnMessage(talk_base::Message* message);
+
+ private:
+ void OnTimeout();
+
+ scoped_ptr<talk_base::SocketAddress> server_;
+ talk_base::CriticalSection cs_;
+ int error_;
+ std::vector<uint32> ip_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncDNSLookup);
+};
+} // namespace notifier
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_ASYNC_DNS_LOOKUP_H_
diff --git a/chrome/browser/sync/notifier/base/async_network_alive.h b/chrome/browser/sync/notifier/base/async_network_alive.h
new file mode 100644
index 0000000..330348d
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/async_network_alive.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_ASYNC_NETWORK_ALIVE_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_ASYNC_NETWORK_ALIVE_H_
+
+#include "talk/base/signalthread.h"
+
+namespace notifier {
+
+// System specific info needed for changes
+class PlatformNetworkInfo;
+
+class AsyncNetworkAlive : public talk_base::SignalThread {
+ public:
+ static AsyncNetworkAlive* Create();
+
+ virtual ~AsyncNetworkAlive() {}
+
+ bool alive() const {
+ return alive_;
+ }
+
+ bool error() const {
+ return error_;
+ }
+
+ void SetWaitForNetworkChange(PlatformNetworkInfo* previous_info) {
+ network_info_ = previous_info;
+ }
+
+ PlatformNetworkInfo* ReleaseInfo() {
+ PlatformNetworkInfo* info = network_info_;
+ network_info_ = NULL;
+ return info;
+ }
+
+ protected:
+ AsyncNetworkAlive() : network_info_(NULL), alive_(false), error_(false) {
+ }
+
+ protected:
+ PlatformNetworkInfo* network_info_;
+ bool alive_;
+ bool error_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsyncNetworkAlive);
+};
+} // namespace notifier
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_ASYNC_NETWORK_ALIVE_H_
diff --git a/chrome/browser/sync/notifier/base/fastalloc.h b/chrome/browser/sync/notifier/base/fastalloc.h
new file mode 100644
index 0000000..ed19a53
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/fastalloc.h
@@ -0,0 +1,59 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_FASTALLOC_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_FASTALLOC_H_
+
+#include <assert.h>
+
+namespace notifier {
+
+template<class T, size_t Size>
+class FastAlloc {
+ public:
+ FastAlloc() : buffer_(NULL), size_(0) { };
+ ~FastAlloc() { freeBuffer(); }
+ T* get_buffer(size_t size) {
+ if (size_ != 0) {
+ // We only allow one call to get_buffer. This makes the logic here
+ // simpler, and the user has to worry less about someone else calling
+ // get_buffer again on the same FastAlloc object and invalidating the
+ // memory they were using.
+ assert(false && "get_buffer may one be called once");
+ return NULL;
+ }
+
+ if (size <= Size) {
+ buffer_ = internal_buffer_;
+ } else {
+ buffer_ = new T[size];
+ }
+
+ if (buffer_ != NULL) {
+ size_ = size;
+ }
+ return buffer_;
+ }
+
+ private:
+ void freeBuffer() {
+#ifdef DEBUG
+ memset(buffer_, 0xCC, size_ * sizeof(T));
+#endif
+
+ if (buffer_ != NULL && buffer_ != internal_buffer_) {
+ delete[] buffer_;
+ }
+ buffer_ = NULL;
+ size_ = 0;
+ }
+
+ T* buffer_;
+ T internal_buffer_[Size];
+ size_t size_;
+};
+
+}
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_FASTALLOC_H_
diff --git a/chrome/browser/sync/notifier/base/linux/network_status_detector_task_linux.cc b/chrome/browser/sync/notifier/base/linux/network_status_detector_task_linux.cc
new file mode 100644
index 0000000..e232bcb
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/linux/network_status_detector_task_linux.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/network_status_detector_task.h"
+
+namespace notifier {
+
+NetworkStatusDetectorTask* NetworkStatusDetectorTask::Create(
+ talk_base::Task* parent) {
+ // TODO(sync): No implementation for linux
+ return NULL;
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/linux/time_linux.cc b/chrome/browser/sync/notifier/base/linux/time_linux.cc
new file mode 100644
index 0000000..ea2acf5
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/linux/time_linux.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Time functions
+
+#include "chrome/browser/sync/notifier/base/posix/time_posix.cc"
diff --git a/chrome/browser/sync/notifier/base/nethelpers.cc b/chrome/browser/sync/notifier/base/nethelpers.cc
new file mode 100644
index 0000000..23fc8d2
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/nethelpers.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/nethelpers.h"
+
+namespace notifier {
+
+hostent* SafeGetHostByName(const char* hostname, hostent* host,
+ char* buffer, size_t buffer_len,
+ int* herrno) {
+ hostent* result = NULL;
+#if WIN32
+ result = gethostbyname(hostname);
+ if (!result) {
+ *herrno = WSAGetLastError();
+ }
+#elif OS_LINUX
+ gethostbyname_r(hostname, host, buffer, buffer_len, &result, herrno);
+#elif OSX
+ result = getipnodebyname(hostname, AF_INET, AI_DEFAULT, herrno);
+#else
+#error "I don't know how to do gethostbyname safely on your system."
+#endif
+ return result;
+}
+
+// This function should mirror the above function, and free any resources
+// allocated by the above.
+void FreeHostEnt(hostent* host) {
+#if WIN32
+ // No need to free anything, struct returned is static memory.
+#elif OS_LINUX
+ // No need to free anything, we pass in a pointer to a struct.
+#elif OSX
+ freehostent(host);
+#else
+#error "I don't know how to free a hostent on your system."
+#endif
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/nethelpers.h b/chrome/browser/sync/notifier/base/nethelpers.h
new file mode 100644
index 0000000..d2b9fd4
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/nethelpers.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETHELPERS_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETHELPERS_H_
+
+#ifdef POSIX
+#include <netdb.h>
+#include <cstddef>
+#elif WIN32
+#include <winsock2.h>
+#endif
+
+namespace notifier {
+
+hostent* SafeGetHostByName(const char* hostname, hostent* host,
+ char* buffer, size_t buffer_len,
+ int* herrno);
+
+void FreeHostEnt(hostent* host);
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETHELPERS_H_
diff --git a/chrome/browser/sync/notifier/base/network_status_detector_task.cc b/chrome/browser/sync/notifier/base/network_status_detector_task.cc
new file mode 100644
index 0000000..f9acd88
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/network_status_detector_task.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/network_status_detector_task.h"
+
+namespace notifier {
+
+void NetworkStatusDetectorTask::DetectNetworkState() {
+ // If the detection has been finished, then just broadcast the current
+ // state. Otherwise, allow the signal to be sent when the initial
+ // detection is finished.
+ if (initial_detection_done_) {
+ SignalNetworkStateDetected(is_alive_, is_alive_);
+ }
+}
+
+void NetworkStatusDetectorTask::SetNetworkAlive(bool is_alive) {
+ bool was_alive = is_alive_;
+ is_alive_ = is_alive;
+
+ if (!initial_detection_done_ || was_alive != is_alive_) {
+ initial_detection_done_ = true;
+
+ // Tell everyone about the network state change.
+ SignalNetworkStateDetected(was_alive, is_alive_);
+ }
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/network_status_detector_task.h b/chrome/browser/sync/notifier/base/network_status_detector_task.h
new file mode 100644
index 0000000..4cf190e
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/network_status_detector_task.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETWORK_STATUS_DETECTOR_TASK_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETWORK_STATUS_DETECTOR_TASK_H_
+
+#include "chrome/browser/sync/notifier/base/time.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/task.h"
+
+namespace notifier {
+class AsyncNetworkAlive;
+
+// Detects the current network state and any changes to that.
+class NetworkStatusDetectorTask : public talk_base::Task,
+ public sigslot::has_slots<> {
+ public:
+ // Create an instance of (a subclass of) this class.
+ static NetworkStatusDetectorTask* Create(talk_base::Task* parent);
+
+ // Determines the current network state and
+ // then calls SignalNetworkStateDetected.
+ void DetectNetworkState();
+
+ // Fires whenever the network state is detected.
+ // SignalNetworkStateDetected(was_alive, is_alive);
+ sigslot::signal2<bool, bool> SignalNetworkStateDetected;
+
+ protected:
+ explicit NetworkStatusDetectorTask(talk_base::Task* parent)
+ : talk_base::Task(parent),
+ initial_detection_done_(false),
+ is_alive_(false) {
+ }
+
+ virtual ~NetworkStatusDetectorTask() { }
+
+ virtual int ProcessStart() = 0;
+
+ // Stay around until aborted.
+ virtual int ProcessResponse() {
+ return STATE_BLOCKED;
+ }
+
+ void SetNetworkAlive(bool is_alive);
+
+ private:
+ bool initial_detection_done_;
+ bool is_alive_;
+
+ DISALLOW_COPY_AND_ASSIGN(NetworkStatusDetectorTask);
+};
+} // namespace notifier
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETWORK_STATUS_DETECTOR_TASK_H_
diff --git a/chrome/browser/sync/notifier/base/network_status_detector_task_mt.cc b/chrome/browser/sync/notifier/base/network_status_detector_task_mt.cc
new file mode 100644
index 0000000..d4e406c
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/network_status_detector_task_mt.cc
@@ -0,0 +1,48 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/network_status_detector_task_mt.h"
+
+#include "chrome/browser/sync/notifier/base/async_network_alive.h"
+#include "chrome/browser/sync/notifier/base/signal_thread_task.h"
+
+#include "talk/base/common.h"
+
+namespace notifier {
+
+void NetworkStatusDetectorTaskMT::OnNetworkAliveDone(
+ AsyncNetworkAlive* network_alive) {
+ ASSERT(network_alive);
+ SetNetworkAlive(network_alive->alive());
+ // If we got an error from detecting the network alive state,
+ // then stop retrying the detection.
+ if (network_alive->error()) {
+ return;
+ }
+ StartAsyncDetection(network_alive->ReleaseInfo());
+}
+
+void NetworkStatusDetectorTaskMT::StartAsyncDetection(
+ PlatformNetworkInfo* previous_info) {
+ // Use the AsyncNetworkAlive to determine the network state (and
+ // changes in the network state).
+ AsyncNetworkAlive* network_alive = AsyncNetworkAlive::Create();
+
+ if (previous_info) {
+ network_alive->SetWaitForNetworkChange(previous_info);
+ }
+ SignalThreadTask<AsyncNetworkAlive>* task =
+ new SignalThreadTask<AsyncNetworkAlive>(this, &network_alive);
+ task->SignalWorkDone.connect(
+ this, &NetworkStatusDetectorTaskMT::OnNetworkAliveDone);
+ task->Start();
+}
+
+NetworkStatusDetectorTask* NetworkStatusDetectorTask::Create(
+ talk_base::Task* parent) {
+ ASSERT(parent);
+ return new NetworkStatusDetectorTaskMT(parent);
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/network_status_detector_task_mt.h b/chrome/browser/sync/notifier/base/network_status_detector_task_mt.h
new file mode 100644
index 0000000..e1812f2
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/network_status_detector_task_mt.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETWORK_STATUS_DETECTOR_TASK_MT_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETWORK_STATUS_DETECTOR_TASK_MT_H_
+
+#include "chrome/browser/sync/notifier/base/network_status_detector_task.h"
+
+namespace notifier {
+
+class AsyncNetworkAlive;
+class PlatformNetworkInfo;
+
+class NetworkStatusDetectorTaskMT : public NetworkStatusDetectorTask {
+ public:
+ explicit NetworkStatusDetectorTaskMT(talk_base::Task* parent)
+ : NetworkStatusDetectorTask(parent) {
+ }
+
+ protected:
+ virtual int ProcessStart() {
+ StartAsyncDetection(NULL);
+ return STATE_RESPONSE;
+ }
+
+ private:
+ void OnNetworkAliveDone(AsyncNetworkAlive* network_alive);
+ void StartAsyncDetection(PlatformNetworkInfo* network_info);
+};
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_NETWORK_STATUS_DETECTOR_TASK_MT_H_
diff --git a/chrome/browser/sync/notifier/base/posix/time_posix.cc b/chrome/browser/sync/notifier/base/posix/time_posix.cc
new file mode 100644
index 0000000..849f802
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/posix/time_posix.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "chrome/browser/sync/notifier/base/time.h"
+
+namespace notifier {
+
+time64 GetCurrent100NSTime() {
+ struct timeval tv;
+ struct timezone tz;
+
+ gettimeofday(&tv, &tz);
+
+ time64 retval = tv.tv_sec * kSecsTo100ns;
+ retval += tv.tv_usec * kMicrosecsTo100ns;
+ retval += kStart100NsTimeToEpoch;
+ return retval;
+}
+
+time64 TmToTime64(const struct tm& tm) {
+ struct tm tm_temp;
+ memcpy(&tm_temp, &tm, sizeof(struct tm));
+ time_t t = timegm(&tm_temp);
+ return t * kSecsTo100ns;
+}
+
+bool Time64ToTm(time64 t, struct tm* tm) {
+ assert(tm != NULL);
+ time_t secs = t / kSecsTo100ns;
+ gmtime_r(&secs, tm);
+ return true;
+}
+
+bool UtcTimeToLocalTime(struct tm* tm) {
+ assert(tm != NULL);
+ time_t t = timegm(tm);
+ localtime_r(&t, tm);
+ return true;
+}
+
+bool LocalTimeToUtcTime(struct tm* tm) {
+ assert(tm != NULL);
+ time_t t = mktime(tm);
+ gmtime_r(&t, tm);
+ return true;
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/signal_thread_task.h b/chrome/browser/sync/notifier/base/signal_thread_task.h
new file mode 100644
index 0000000..93059d8
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/signal_thread_task.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_SIGNAL_THREAD_TASK_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_SIGNAL_THREAD_TASK_H_
+
+#include "talk/base/common.h"
+#include "talk/base/signalthread.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/task.h"
+
+namespace notifier {
+
+template<class T>
+class SignalThreadTask : public talk_base::Task,
+ public sigslot::has_slots<> {
+ public:
+ // Takes ownership of signal_thread.
+ SignalThreadTask(talk_base::Task* task_parent, T** signal_thread)
+ : talk_base::Task(task_parent),
+ signal_thread_(NULL),
+ finished_(false) {
+ SetSignalThread(signal_thread);
+ }
+
+ virtual ~SignalThreadTask() {
+ ClearSignalThread();
+ }
+
+ virtual void Stop() {
+ Task::Stop();
+ ClearSignalThread();
+ }
+
+ virtual int ProcessStart() {
+ ASSERT(GetState() == talk_base::Task::STATE_START);
+ signal_thread_->SignalWorkDone.connect(
+ this,
+ &SignalThreadTask<T>::OnWorkDone);
+ signal_thread_->Start();
+ return talk_base::Task::STATE_RESPONSE;
+ }
+
+ int ProcessResponse() {
+ if (!finished_) {
+ return talk_base::Task::STATE_BLOCKED;
+ }
+ SignalWorkDone(signal_thread_);
+ ClearSignalThread();
+ return talk_base::Task::STATE_DONE;
+ }
+
+ sigslot::signal1<T*> SignalWorkDone;
+
+ private:
+ // Takes ownership of signal_thread
+ void SetSignalThread(T** signal_thread) {
+ ASSERT(!signal_thread_ && signal_thread && *signal_thread);
+ // Verify that no one is listening to the signal thread
+ // for work done. They should be using this class instead.
+ ASSERT((*signal_thread)->SignalWorkDone.is_empty());
+
+ signal_thread_ = *signal_thread;
+
+ // Helps callers not to use signal thread after this point
+ // since this class has taken ownership (and avoid the
+ // error of doing signal_thread->Start()).
+ *signal_thread = NULL;
+ }
+
+ void OnWorkDone(talk_base::SignalThread* signal_thread) {
+ ASSERT(signal_thread == signal_thread_);
+ finished_ = true;
+ Wake();
+ }
+
+ void ClearSignalThread() {
+ if (signal_thread_) {
+ signal_thread_->Destroy();
+ signal_thread_ = NULL;
+ }
+ }
+
+ T* signal_thread_;
+ bool finished_;
+ DISALLOW_COPY_AND_ASSIGN(SignalThreadTask);
+};
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_SIGNAL_THREAD_TASK_H_
diff --git a/chrome/browser/sync/notifier/base/static_assert.h b/chrome/browser/sync/notifier/base/static_assert.h
new file mode 100644
index 0000000..78e6ac3
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/static_assert.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_STATIC_ASSERT_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_STATIC_ASSERT_H_
+
+template <bool> struct STATIC_ASSERTION_FAILURE;
+
+template <> struct STATIC_ASSERTION_FAILURE<true> { enum { value = 1 }; };
+
+template<int> struct static_assert_test{};
+
+#define STATIC_ASSERT(B) \
+typedef static_assert_test<\
+ sizeof(STATIC_ASSERTION_FAILURE< (bool)( B ) >)>\
+ static_assert_typedef_ ## __LINE__
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_STATIC_ASSERT_H_
diff --git a/chrome/browser/sync/notifier/base/string.cc b/chrome/browser/sync/notifier/base/string.cc
new file mode 100644
index 0000000..c3ef54d
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/string.cc
@@ -0,0 +1,403 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef OS_MACOSX
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include <float.h>
+#include <string.h>
+
+#include "base/format_macros.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/notifier/base/string.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+
+using base::snprintf;
+
+namespace notifier {
+
+std::string HtmlEncode(const std::string& src) {
+ size_t max_length = src.length() * 6 + 1;
+ std::string dest;
+ dest.resize(max_length);
+ size_t new_size = talk_base::html_encode(&dest[0], max_length,
+ src.data(), src.length());
+ dest.resize(new_size);
+ return dest;
+}
+
+std::string HtmlDecode(const std::string& src) {
+ size_t max_length = src.length() + 1;
+ std::string dest;
+ dest.resize(max_length);
+ size_t new_size = talk_base::html_decode(&dest[0], max_length,
+ src.data(), src.length());
+ dest.resize(new_size);
+ return dest;
+}
+
+std::string UrlEncode(const std::string& src) {
+ size_t max_length = src.length() * 6 + 1;
+ std::string dest;
+ dest.resize(max_length);
+ size_t new_size = talk_base::url_encode(&dest[0], max_length,
+ src.data(), src.length());
+ dest.resize(new_size);
+ return dest;
+}
+
+std::string UrlDecode(const std::string& src) {
+ size_t max_length = src.length() + 1;
+ std::string dest;
+ dest.resize(max_length);
+ size_t new_size = talk_base::url_decode(&dest[0], max_length,
+ src.data(), src.length());
+ dest.resize(new_size);
+ return dest;
+}
+
+int CharToHexValue(char hex) {
+ if (hex >= '0' && hex <= '9') {
+ return hex - '0';
+ } else if (hex >= 'A' && hex <= 'F') {
+ return hex - 'A' + 10;
+ } else if (hex >= 'a' && hex <= 'f') {
+ return hex - 'a' + 10;
+ } else {
+ return -1;
+ }
+}
+
+// Template function to convert a string to an int/int64
+// If strict is true, check for the validity and overflow
+template<typename T>
+bool ParseStringToIntTemplate(const char* str,
+ T* value,
+ bool strict,
+ T min_value) {
+ ASSERT(str);
+ ASSERT(value);
+
+ // Skip spaces
+ while (*str == ' ') {
+ ++str;
+ }
+
+ // Process sign
+ int c = static_cast<int>(*str++); // current char
+ int possible_sign = c; // save sign indication
+ if (c == '-' || c == '+') {
+ c = static_cast<int>(*str++);
+ }
+
+ // Process numbers
+ T total = 0;
+ while (c && (c = CharToDigit(static_cast<char>(c))) != -1) {
+ // Check for overflow
+ if (strict && (total < min_value / 10 ||
+ (total == min_value / 10 &&
+ c > ((-(min_value + 10)) % 10)))) {
+ return false;
+ }
+
+ // Accumulate digit
+ // Note that we accumulate in the negative direction so that we will not
+ // blow away with the largest negative number
+ total = 10 * total - c;
+
+ // Get next char
+ c = static_cast<int>(*str++);
+ }
+
+ // Fail if encountering non-numeric character
+ if (strict && c == -1) {
+ return false;
+ }
+
+ // Negate the number if needed
+ if (possible_sign == '-') {
+ *value = total;
+ } else {
+ // Check for overflow
+ if (strict && total == min_value) {
+ return false;
+ }
+
+ *value = -total;
+ }
+
+ return true;
+}
+
+// Convert a string to an int
+// If strict is true, check for the validity and overflow
+bool ParseStringToInt(const char* str, int* value, bool strict) {
+ return ParseStringToIntTemplate<int>(str, value, strict, kint32min);
+}
+
+// Convert a string to an int
+// This version does not check for the validity and overflow
+int StringToInt(const char* str) {
+ int value = 0;
+ ParseStringToInt(str, &value, false);
+ return value;
+}
+
+// Convert a string to an unsigned int.
+// If strict is true, check for the validity and overflow
+bool ParseStringToUint(const char* str, uint32* value, bool strict) {
+ ASSERT(str);
+ ASSERT(value);
+
+ int64 int64_value;
+ if (!ParseStringToInt64(str, &int64_value, strict)) {
+ return false;
+ }
+ if (int64_value < 0 || int64_value > kuint32max) {
+ return false;
+ }
+
+ *value = static_cast<uint32>(int64_value);
+ return true;
+}
+
+// Convert a string to an int
+// This version does not check for the validity and overflow
+uint32 StringToUint(const char* str) {
+ uint32 value = 0;
+ ParseStringToUint(str, &value, false);
+ return value;
+}
+
+// Convert a string to an int64
+// If strict is true, check for the validity and overflow
+bool ParseStringToInt64(const char* str, int64* value, bool strict) {
+ return ParseStringToIntTemplate<int64>(str, value, strict, kint64min);
+}
+
+// Convert a string to an int64
+// This version does not check for the validity and overflow
+int64 StringToInt64(const char* str) {
+ int64 value = 0;
+ ParseStringToInt64(str, &value, false);
+ return value;
+}
+
+// Convert a string to a double
+// If strict is true, check for the validity and overflow
+bool ParseStringToDouble(const char* str, double* value, bool strict) {
+ ASSERT(str);
+ ASSERT(value);
+
+ // Skip spaces
+ while (*str == ' ') {
+ ++str;
+ }
+
+ // Process sign
+ int c = static_cast<int>(*str++); // current char
+ int sign = c; // save sign indication
+ if (c == '-' || c == '+') {
+ c = static_cast<int>(*str++);
+ }
+
+ // Process numbers before "."
+ double total = 0.0;
+ while (c && (c != '.') && (c = CharToDigit(static_cast<char>(c))) != -1) {
+ // Check for overflow
+ if (strict && total >= DBL_MAX / 10) {
+ return false;
+ }
+
+ // Accumulate digit
+ total = 10.0 * total + c;
+
+ // Get next char
+ c = static_cast<int>(*str++);
+ }
+
+ // Process "."
+ if (c == '.') {
+ c = static_cast<int>(*str++);
+ } else {
+ // Fail if encountering non-numeric character
+ if (strict && c == -1) {
+ return false;
+ }
+ }
+
+ // Process numbers after "."
+ double power = 1.0;
+ while ((c = CharToDigit(static_cast<char>(c))) != -1) {
+ // Check for overflow
+ if (strict && total >= DBL_MAX / 10) {
+ return false;
+ }
+
+ // Accumulate digit
+ total = 10.0 * total + c;
+ power *= 10.0;
+
+ // Get next char
+ c = static_cast<int>(*str++);
+ }
+
+ // Get the final number
+ *value = total / power;
+ if (sign == '-') {
+ *value = -(*value);
+ }
+
+ return true;
+}
+
+// Convert a string to a double
+// This version does not check for the validity and overflow
+double StringToDouble(const char* str) {
+ double value = 0;
+ ParseStringToDouble(str, &value, false);
+ return value;
+}
+
+// Convert a float to a string
+std::string FloatToString(float f) {
+ char buf[80];
+ snprintf(buf, sizeof(buf), "%f", f);
+ return std::string(buf);
+}
+
+std::string DoubleToString(double d) {
+ char buf[160];
+ snprintf(buf, sizeof(buf), "%.17g", d);
+ return std::string(buf);
+}
+
+std::string UIntToString(uint32 i) {
+ char buf[80];
+ snprintf(buf, sizeof(buf), "%lu", i);
+ return std::string(buf);
+}
+
+// Convert an int to a string
+std::string IntToString(int i) {
+ char buf[80];
+ snprintf(buf, sizeof(buf), "%d", i);
+ return std::string(buf);
+}
+
+// Convert an int64 to a string
+std::string Int64ToString(int64 i64) {
+ char buf[80];
+ snprintf(buf, sizeof(buf), "%" PRId64 "d", i64);
+ return std::string(buf);
+}
+
+std::string UInt64ToString(uint64 i64) {
+ char buf[80];
+ snprintf(buf, sizeof(buf), "%" PRId64 "u", i64);
+ return std::string(buf);
+}
+
+std::string Int64ToHexString(int64 i64) {
+ char buf[80];
+ snprintf(buf, sizeof(buf), "%" PRId64 "x", i64);
+ return std::string(buf);
+}
+
+// Parse a single "delim" delimited string from "*source"
+// Modify *source to point after the delimiter.
+// If no delimiter is present after the string, set *source to NULL.
+//
+// Mainly a stringified wrapper around strpbrk()
+std::string SplitOneStringToken(const char** source, const char* delim) {
+ ASSERT(source);
+ ASSERT(delim);
+
+ if (!*source) {
+ return std::string();
+ }
+ const char* begin = *source;
+ *source = strpbrk(*source, delim);
+ if (*source) {
+ return std::string(begin, (*source)++);
+ } else {
+ return std::string(begin);
+ }
+}
+
+std::string LowerWithUnderToPascalCase(const char* lower_with_under) {
+ ASSERT(lower_with_under);
+
+ std::string pascal_case;
+ bool make_upper = true;
+ for (; *lower_with_under != '\0'; lower_with_under++) {
+ char current_char = *lower_with_under;
+ if (current_char == '_') {
+ ASSERT(!make_upper);
+ make_upper = true;
+ continue;
+ }
+ if (make_upper) {
+ current_char = toupper(current_char);
+ make_upper = false;
+ }
+ pascal_case.append(1, current_char);
+ }
+ return pascal_case;
+}
+
+std::string PascalCaseToLowerWithUnder(const char* pascal_case) {
+ ASSERT(pascal_case);
+
+ std::string lower_with_under;
+ bool previous_was_upper = true;
+ for(; *pascal_case != '\0'; pascal_case++) {
+ char current_char = *pascal_case;
+ if (isupper(current_char)) {
+ // DNSName should be dns_name
+ if ((islower(pascal_case[1]) && !lower_with_under.empty()) ||
+ !previous_was_upper) {
+ lower_with_under.append(1, '_');
+ }
+ current_char = tolower(current_char);
+ } else if (previous_was_upper) {
+ previous_was_upper = false;
+ }
+ lower_with_under.append(1, current_char);
+ }
+ return lower_with_under;
+}
+void StringReplace(std::string* s,
+ const char* old_sub,
+ const char* new_sub,
+ bool replace_all) {
+ ASSERT(s);
+
+ // If old_sub is empty, nothing to do
+ if (!old_sub || !*old_sub) {
+ return;
+ }
+
+ int old_sub_size = strlen(old_sub);
+ std::string res;
+ std::string::size_type start_pos = 0;
+
+ do {
+ std::string::size_type pos = s->find(old_sub, start_pos);
+ if (pos == std::string::npos) {
+ break;
+ }
+ res.append(*s, start_pos, pos - start_pos);
+ res.append(new_sub);
+ start_pos = pos + old_sub_size; // start searching again after the "old"
+ } while (replace_all);
+ res.append(*s, start_pos, s->length() - start_pos);
+
+ *s = res;
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/string.h b/chrome/browser/sync/notifier/base/string.h
new file mode 100644
index 0000000..725cc66
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/string.h
@@ -0,0 +1,381 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_STRING_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_STRING_H_
+
+#ifdef COMPILER_MSVC
+#include <xhash>
+#elif defined(__GNUC__)
+#include <ext/hash_map>
+#endif
+
+#include <ctype.h>
+#include <string>
+
+#include "chrome/browser/sync/notifier/base/fastalloc.h"
+#include "talk/base/basictypes.h"
+
+namespace notifier {
+
+// Does html encoding of strings.
+std::string HtmlEncode(const std::string& src);
+
+// Does html decoding of strings.
+std::string HtmlDecode(const std::string& src);
+
+// Does utl encoding of strings.
+std::string UrlEncode(const std::string& src);
+
+// Does url decoding of strings.
+std::string UrlDecode(const std::string& src);
+
+// Convert a character to a digit
+// if the character is not a digit return -1 (same as CRT)
+inline int CharToDigit(char c) {
+ return ((c) >= '0' && (c) <= '9' ? (c) - '0' : -1);
+}
+
+int CharToHexValue(char hex);
+
+// ----------------------------------------------------------------------
+// ParseStringToInt()
+// ParseStringToUint()
+// ParseStringToInt64()
+// ParseStringToDouble()
+// Convert a string to an int/int64/double
+// If strict is true, check for the validity and overflow
+// ----------------------------------------------------------------------
+
+bool ParseStringToInt(const char* str, int* value, bool strict);
+
+bool ParseStringToUint(const char* str, uint32* value, bool strict);
+
+bool ParseStringToInt64(const char* str, int64* value, bool strict);
+
+bool ParseStringToDouble(const char* str, double* value, bool strict);
+
+// ----------------------------------------------------------------------
+// StringToInt()
+// StringToUint()
+// StringToInt64()
+// StringToDouble()
+// Convert a string to an int/int64/double
+// Note that these functions do not check for the validity or overflow
+// ----------------------------------------------------------------------
+
+int StringToInt(const char* str);
+
+uint32 StringToUint(const char* str);
+
+int64 StringToInt64(const char* str);
+
+double StringToDouble(const char* str);
+
+// ----------------------------------------------------------------------
+// FloatToString()
+// DoubleToString()
+// IntToString()
+// UIntToString()
+// Int64ToString()
+// UInt64ToString()
+// Convert various types to their string representation. These
+// all do the obvious, trivial thing.
+// ----------------------------------------------------------------------
+
+std::string FloatToString(float f);
+std::string DoubleToString(double d);
+
+std::string IntToString(int i);
+std::string UIntToString(uint32 i);
+
+std::string Int64ToString(int64 i64);
+std::string UInt64ToString(uint64 i64);
+
+std::string Int64ToHexString(int64 i64);
+
+// ----------------------------------------------------------------------
+// StringStartsWith()
+// StringEndsWith()
+// Check if a string starts or ends with a pattern
+// ----------------------------------------------------------------------
+
+inline bool StringStartsWith(const std::string& s, const char* p) {
+ return s.find(p) == 0;
+}
+
+inline bool StringEndsWith(const std::string& s, const char* p) {
+ return s.rfind(p) == (s.length() - strlen(p));
+}
+
+// ----------------------------------------------------------------------
+// MakeStringEndWith()
+// If the string does not end with a pattern, make it end with it
+// ----------------------------------------------------------------------
+
+inline std::string MakeStringEndWith(const std::string& s, const char* p) {
+ if (StringEndsWith(s, p)) {
+ return s;
+ } else {
+ std::string ns(s);
+ ns += p;
+ return ns;
+ }
+}
+
+// Convert a lower_case_string to LowerCaseString
+std::string LowerWithUnderToPascalCase(const char* lower_with_under);
+
+// Convert a PascalCaseString to pascal_case_string
+std::string PascalCaseToLowerWithUnder(const char* pascal_case);
+
+// ----------------------------------------------------------------------
+// LowerString()
+// LowerStringToBuf()
+// Convert the characters in "s" to lowercase.
+// Changes contents of "s". LowerStringToBuf copies at most
+// "n" characters (including the terminating '\0') from "s"
+// to another buffer.
+// ----------------------------------------------------------------------
+
+inline void LowerString(char* s) {
+ for (; *s; ++s) {
+ *s = tolower(*s);
+ }
+}
+
+inline void LowerString(std::string* s) {
+ std::string::iterator end = s->end();
+ for (std::string::iterator i = s->begin(); i != end; ++i) {
+ *i = tolower(*i);
+ }
+}
+
+inline void LowerStringToBuf(const char* s, char* buf, int n) {
+ for (int i = 0; i < n - 1; ++i) {
+ char c = s[i];
+ buf[i] = tolower(c);
+ if (c == '\0') {
+ return;
+ }
+ }
+ buf[n - 1] = '\0';
+}
+
+// ----------------------------------------------------------------------
+// UpperString()
+// UpperStringToBuf()
+// Convert the characters in "s" to uppercase.
+// UpperString changes "s". UpperStringToBuf copies at most
+// "n" characters (including the terminating '\0') from "s"
+// to another buffer.
+// ----------------------------------------------------------------------
+
+inline void UpperString(char* s) {
+ for (; *s; ++s) {
+ *s = toupper(*s);
+ }
+}
+
+inline void UpperString(std::string* s) {
+ for (std::string::iterator iter = s->begin(); iter != s->end(); ++iter) {
+ *iter = toupper(*iter);
+ }
+}
+
+inline void UpperStringToBuf(const char* s, char* buf, int n) {
+ for (int i = 0; i < n - 1; ++i) {
+ char c = s[i];
+ buf[i] = toupper(c);
+ if (c == '\0') {
+ return;
+ }
+ }
+ buf[n - 1] = '\0';
+}
+
+// ----------------------------------------------------------------------
+// TrimStringLeft
+// Removes any occurrences of the characters in 'remove' from the start
+// of the string. Returns the number of chars trimmed.
+// ----------------------------------------------------------------------
+inline int TrimStringLeft(std::string* s, const char* remove) {
+ int i = 0;
+ for (; i < static_cast<int>(s->size()) && strchr(remove, (*s)[i]); ++i);
+ if (i > 0) s->erase(0, i);
+ return i;
+}
+
+// ----------------------------------------------------------------------
+// TrimStringRight
+// Removes any occurrences of the characters in 'remove' from the end
+// of the string. Returns the number of chars trimmed.
+// ----------------------------------------------------------------------
+inline int TrimStringRight(std::string* s, const char* remove) {
+ int size = static_cast<int>(s->size());
+ int i = size;
+ for (; i > 0 && strchr(remove, (*s)[i - 1]); --i);
+ if (i < size) {
+ s->erase(i);
+ }
+ return size - i;
+}
+
+// ----------------------------------------------------------------------
+// TrimString
+// Removes any occurrences of the characters in 'remove' from either
+// end of the string.
+// ----------------------------------------------------------------------
+inline int TrimString(std::string* s, const char* remove) {
+ return TrimStringRight(s, remove) + TrimStringLeft(s, remove);
+}
+
+// ----------------------------------------------------------------------
+// StringReplace()
+// Replace the "old" pattern with the "new" pattern in a string. If
+// replace_all is false, it only replaces the first instance of "old."
+// ----------------------------------------------------------------------
+
+void StringReplace(std::string* s,
+ const char* old_sub,
+ const char* new_sub,
+ bool replace_all);
+
+inline size_t HashString(const std::string &value) {
+#ifdef COMPILER_MSVC
+ return stdext::hash_value(value);
+#elif defined(__GNUC__)
+ __gnu_cxx::hash<const char*> h;
+ return h(value.c_str());
+#else
+ // Compile time error because we don't return a value
+#endif
+}
+
+// ----------------------------------------------------------------------
+// SplitOneStringToken()
+// Parse a single "delim" delimited string from "*source"
+// Modify *source to point after the delimiter.
+// If no delimiter is present after the string, set *source to NULL.
+//
+// If the start of *source is a delimiter, return an empty string.
+// If *source is NULL, return an empty string.
+// ----------------------------------------------------------------------
+std::string SplitOneStringToken(const char** source, const char* delim);
+
+//----------------------------------------------------------------------
+// CharTraits provides wrappers with common function names for char/wchar_t
+// specific CRT functions
+//----------------------------------------------------------------------
+
+template <class CharT> struct CharTraits {
+};
+
+template <>
+struct CharTraits<char> {
+ static inline size_t length(const char* s) {
+ return strlen(s);
+ }
+ static inline bool copy(char* dst, size_t dst_size, const char* s) {
+ if (s == NULL || dst == NULL)
+ return false;
+ else
+ return copy_num(dst, dst_size, s, strlen(s));
+ }
+ static inline bool copy_num(char* dst, size_t dst_size, const char* s,
+ size_t s_len) {
+ if (dst_size < (s_len + 1))
+ return false;
+ memcpy(dst, s, s_len);
+ dst[s_len] = '\0';
+ return true;
+ }
+};
+
+template <>
+struct CharTraits<wchar_t> {
+ static inline size_t length(const wchar_t* s) {
+ return wcslen(s);
+ }
+ static inline bool copy(wchar_t* dst, size_t dst_size, const wchar_t* s) {
+ if (s == NULL || dst == NULL)
+ return false;
+ else
+ return copy_num(dst, dst_size, s, wcslen(s));
+ }
+ static inline bool copy_num(wchar_t* dst, size_t dst_size, const wchar_t* s,
+ size_t s_len) {
+ if (dst_size < (s_len + 1)) {
+ return false;
+ }
+ memcpy(dst, s, s_len * sizeof(wchar_t));
+ dst[s_len] = '\0';
+ return true;
+ }
+};
+
+//----------------------------------------------------------------------
+// This class manages a fixed-size, null-terminated string buffer. It is
+// meant to be allocated on the stack, and it makes no use of the heap
+// internally. In most cases you'll just want to use a std::(w)string, but
+// when you need to avoid the heap, you can use this class instead.
+//
+// Methods are provided to read the null-terminated buffer and to append
+// data to the buffer, and once the buffer fills-up, it simply discards any
+// extra append calls.
+//----------------------------------------------------------------------
+
+template <class CharT, int MaxSize>
+class FixedString {
+ public:
+ typedef CharTraits<CharT> char_traits;
+
+ FixedString() : index_(0), truncated_(false) {
+ buf_[0] = CharT(0);
+ }
+
+ ~FixedString() {
+ memset(buf_, 0xCC, sizeof(buf_));
+ }
+
+ // Returns true if the Append ever failed.
+ bool was_truncated() const { return truncated_; }
+
+ // Returns the number of characters in the string, excluding the null
+ // terminator.
+ size_t size() const { return index_; }
+
+ // Returns the null-terminated string.
+ const CharT* get() const { return buf_; }
+ CharT* get() { return buf_; }
+
+ // Append an array of characters. The operation is bounds checked, and if
+ // there is insufficient room, then the was_truncated() flag is set to true.
+ void Append(const CharT* s, size_t n) {
+ if (char_traits::copy_num(buf_ + index_, arraysize(buf_) - index_, s, n)) {
+ index_ += n;
+ } else {
+ truncated_ = true;
+ }
+ }
+
+ // Append a null-terminated string.
+ void Append(const CharT* s) {
+ Append(s, char_traits::length(s));
+ }
+
+ // Append a single character.
+ void Append(CharT c) {
+ Append(&c, 1);
+ }
+
+ private:
+ CharT buf_[MaxSize];
+ size_t index_;
+ bool truncated_;
+};
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_STRING_H_
diff --git a/chrome/browser/sync/notifier/base/string_unittest.cc b/chrome/browser/sync/notifier/base/string_unittest.cc
new file mode 100644
index 0000000..954315a
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/string_unittest.cc
@@ -0,0 +1,362 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/string.h"
+#include "notifier/testing/notifier/unittest.h"
+
+namespace notifier {
+
+TEST_NOTIFIER_F(StringTest);
+
+TEST_F(StringTest, StringToInt) {
+ ASSERT_EQ(StringToInt("625"), 625);
+ ASSERT_EQ(StringToInt("6"), 6);
+ ASSERT_EQ(StringToInt("0"), 0);
+ ASSERT_EQ(StringToInt(" 122"), 122);
+ ASSERT_EQ(StringToInt("a"), 0);
+ ASSERT_EQ(StringToInt(" a"), 0);
+ ASSERT_EQ(StringToInt("2147483647"), 2147483647);
+ ASSERT_EQ(StringToInt("-2147483648"),
+ static_cast<int>(0x80000000)); // Hex constant avoids gcc warning.
+
+ int value = 0;
+ ASSERT_FALSE(ParseStringToInt("62.5", &value, true));
+ ASSERT_FALSE(ParseStringToInt("625e", &value, true));
+ ASSERT_FALSE(ParseStringToInt("2147483648", &value, true));
+ ASSERT_FALSE(ParseStringToInt("-2147483649", &value, true));
+ ASSERT_FALSE(ParseStringToInt("-4857004031", &value, true));
+}
+
+TEST_F(StringTest, StringToUint) {
+ ASSERT_EQ(StringToUint("625"), 625);
+ ASSERT_EQ(StringToUint("6"), 6);
+ ASSERT_EQ(StringToUint("0"), 0);
+ ASSERT_EQ(StringToUint(" 122"), 122);
+ ASSERT_EQ(StringToUint("a"), 0);
+ ASSERT_EQ(StringToUint(" a"), 0);
+ ASSERT_EQ(StringToUint("4294967295"), static_cast<uint32>(0xffffffff));
+
+ uint32 value = 0;
+ ASSERT_FALSE(ParseStringToUint("62.5", &value, true));
+ ASSERT_FALSE(ParseStringToUint("625e", &value, true));
+ ASSERT_FALSE(ParseStringToUint("4294967296", &value, true));
+ ASSERT_FALSE(ParseStringToUint("-1", &value, true));
+}
+
+TEST_F(StringTest, StringToInt64) {
+ ASSERT_EQ(StringToInt64("119600064000000000"),
+ INT64_C(119600064000000000));
+ ASSERT_EQ(StringToInt64(" 119600064000000000"),
+ INT64_C(119600064000000000));
+ ASSERT_EQ(StringToInt64("625"), 625);
+ ASSERT_EQ(StringToInt64("6"), 6);
+ ASSERT_EQ(StringToInt64("0"), 0);
+ ASSERT_EQ(StringToInt64(" 122"), 122);
+ ASSERT_EQ(StringToInt64("a"), 0);
+ ASSERT_EQ(StringToInt64(" a"), 0);
+ ASSERT_EQ(StringToInt64("9223372036854775807"), INT64_C(9223372036854775807));
+ ASSERT_EQ(StringToInt64("-9223372036854775808I64"),
+ static_cast<int64>(INT64_C(0x8000000000000000)));
+
+ int64 value = 0;
+ ASSERT_FALSE(ParseStringToInt64("62.5", &value, true));
+ ASSERT_FALSE(ParseStringToInt64("625e", &value, true));
+ ASSERT_FALSE(ParseStringToInt64("9223372036854775808", &value, true));
+ ASSERT_FALSE(ParseStringToInt64("-9223372036854775809", &value, true));
+}
+
+TEST_F(StringTest, StringToDouble) {
+ ASSERT_DOUBLE_EQ(StringToDouble("625"), 625);
+ ASSERT_DOUBLE_EQ(StringToDouble("-625"), -625);
+ ASSERT_DOUBLE_EQ(StringToDouble("-6.25"), -6.25);
+ ASSERT_DOUBLE_EQ(StringToDouble("6.25"), 6.25);
+ ASSERT_DOUBLE_EQ(StringToDouble("0.00"), 0);
+ ASSERT_DOUBLE_EQ(StringToDouble(" 55.1"), 55.1);
+ ASSERT_DOUBLE_EQ(StringToDouble(" 55.001"), 55.001);
+ ASSERT_DOUBLE_EQ(StringToDouble(" 1.001"), 1.001);
+
+ double value = 0.0;
+ ASSERT_FALSE(ParseStringToDouble("62*5", &value, true));
+}
+
+TEST_F(StringTest, Int64ToHexString) {
+ ASSERT_STREQ("1a8e79fe1d58000",
+ Int64ToHexString(INT64_C(119600064000000000)).c_str());
+ ASSERT_STREQ("271", Int64ToHexString(625).c_str());
+ ASSERT_STREQ("0", Int64ToHexString(0).c_str());
+}
+
+TEST_F(StringTest, StringStartsWith) {
+ { std::string s(""); ASSERT_TRUE(StringStartsWith(s, "")); }
+ { std::string s("abc"); ASSERT_TRUE(StringStartsWith(s, "ab")); }
+ { std::string s("abc"); ASSERT_FALSE(StringStartsWith(s, "bc")); }
+}
+
+TEST_F(StringTest, StringEndsWith) {
+ { std::string s(""); ASSERT_TRUE(StringEndsWith(s, "")); }
+ { std::string s("abc"); ASSERT_TRUE(StringEndsWith(s, "bc")); }
+ { std::string s("abc"); ASSERT_FALSE(StringEndsWith(s, "ab")); }
+}
+
+TEST_F(StringTest, MakeStringEndWith) {
+ {
+ std::string s("");
+ std::string t(MakeStringEndWith(s, ""));
+ ASSERT_STREQ(t.c_str(), "");
+ }
+ {
+ std::string s("abc");
+ std::string t(MakeStringEndWith(s, "def"));
+ ASSERT_STREQ(t.c_str(), "abcdef");
+ }
+ {
+ std::string s("abc");
+ std::string t(MakeStringEndWith(s, "bc"));
+ ASSERT_STREQ(t.c_str(), "abc");
+ }
+}
+
+TEST_F(StringTest, LowerString) {
+ { std::string s(""); LowerString(&s); ASSERT_STREQ(s.c_str(), ""); }
+ { std::string s("a"); LowerString(&s); ASSERT_STREQ(s.c_str(), "a"); }
+ { std::string s("A"); LowerString(&s); ASSERT_STREQ(s.c_str(), "a"); }
+ { std::string s("abc"); LowerString(&s); ASSERT_STREQ(s.c_str(), "abc"); }
+ { std::string s("ABC"); LowerString(&s); ASSERT_STREQ(s.c_str(), "abc"); }
+}
+
+TEST_F(StringTest, UpperString) {
+ { std::string s(""); UpperString(&s); ASSERT_STREQ(s.c_str(), ""); }
+ { std::string s("A"); UpperString(&s); ASSERT_STREQ(s.c_str(), "A"); }
+ { std::string s("a"); UpperString(&s); ASSERT_STREQ(s.c_str(), "A"); }
+ { std::string s("ABC"); UpperString(&s); ASSERT_STREQ(s.c_str(), "ABC"); }
+ { std::string s("abc"); UpperString(&s); ASSERT_STREQ(s.c_str(), "ABC"); }
+}
+
+TEST_F(StringTest, TrimString) {
+ const char* white = " \n\t";
+ std::string s, c;
+
+ // TrimStringLeft
+ s = ""; // empty
+ c = "";
+ ASSERT_EQ(TrimStringLeft(&s, white), 0);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " \n\t"; // all bad
+ c = "";
+ ASSERT_EQ(TrimStringLeft(&s, white), 3);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = "dog"; // nothing bad
+ c = "dog";
+ ASSERT_EQ(TrimStringLeft(&s, white), 0);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " dog "; // some bad
+ c = "dog ";
+ ASSERT_EQ(TrimStringLeft(&s, white), 1);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " \n\t\t I love my little dog \n\t ";
+ c = "I love my little dog \n\t ";
+ ASSERT_EQ(TrimStringLeft(&s, white), 5);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ // TrimStringRight
+ s = "";
+ c = "";
+ ASSERT_EQ(TrimStringRight(&s, white), 0);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " \n\t";
+ c = "";
+ ASSERT_EQ(TrimStringRight(&s, white), 3);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = "dog";
+ c = "dog";
+ ASSERT_EQ(TrimStringRight(&s, white), 0);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " dog ";
+ c = " dog";
+ ASSERT_EQ(TrimStringRight(&s, white), 1);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " \n\t\t I love my little dog \n\t ";
+ c = " \n\t\t I love my little dog";
+ ASSERT_EQ(TrimStringRight(&s, white), 4);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ // TrimString
+ s = "";
+ c = "";
+ ASSERT_EQ(TrimString(&s, white), 0);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " \n\t";
+ c = "";
+ ASSERT_EQ(TrimString(&s, white), 3);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = "dog";
+ c = "dog";
+ ASSERT_EQ(TrimString(&s, white), 0);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " dog ";
+ c = "dog";
+ ASSERT_EQ(TrimString(&s, white), 2);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+
+ s = " \n\t\t I love my little dog \n\t ";
+ c = "I love my little dog";
+ ASSERT_EQ(TrimString(&s, white), 9);
+ ASSERT_STREQ(s.c_str(), c.c_str());
+}
+
+TEST_F(StringTest, SplitOneStringToken) {
+ const char* teststrings[] = {
+ "alongword",
+ "alongword ",
+ "alongword ",
+ "alongword anotherword",
+ " alongword",
+ "",
+ };
+ const char* source = NULL;
+
+ source = teststrings[0];
+ ASSERT_STREQ(SplitOneStringToken(&source, " ").c_str(), "alongword");
+ ASSERT_STREQ(source, NULL);
+
+ source = teststrings[1];
+ ASSERT_STREQ(SplitOneStringToken(&source, " ").c_str(), "alongword");
+ ASSERT_STREQ(source, teststrings[1] + strlen("alongword") + 1);
+
+ source = teststrings[2];
+ ASSERT_STREQ(SplitOneStringToken(&source, " ").c_str(), "alongword");
+ ASSERT_STREQ(source, teststrings[2] + strlen("alongword") + 1);
+
+ source = teststrings[3];
+ ASSERT_STREQ(SplitOneStringToken(&source, " ").c_str(), "alongword");
+ ASSERT_STREQ(source, teststrings[3] + strlen("alongword") + 1);
+
+ source = teststrings[4];
+ ASSERT_STREQ(SplitOneStringToken(&source, " ").c_str(), "");
+ ASSERT_STREQ(source, teststrings[4] + 1);
+
+ source = teststrings[5];
+ ASSERT_STREQ(SplitOneStringToken(&source, " ").c_str(), "");
+ ASSERT_STREQ(source, NULL);
+}
+
+TEST_F(StringTest, FixedString) {
+ // Test basic operation.
+ const wchar_t kData[] = L"hello world";
+ FixedString<wchar_t, 40> buf;
+
+ buf.Append(kData);
+ EXPECT_EQ(arraysize(kData)-1, buf.size());
+ EXPECT_EQ(0, wcscmp(kData, buf.get()));
+
+ buf.Append(' ');
+ buf.Append(kData);
+ const wchar_t kExpected[] = L"hello world hello world";
+ EXPECT_EQ(arraysize(kExpected)-1, buf.size());
+ EXPECT_EQ(0, wcscmp(kExpected, buf.get()));
+ EXPECT_EQ(false, buf.was_truncated());
+
+ // Test overflow.
+ FixedString<wchar_t, 5> buf2;
+ buf2.Append(L"hello world");
+ EXPECT_EQ(static_cast<size_t>(0), buf2.size());
+ EXPECT_EQ(0, buf2.get()[0]);
+ EXPECT_EQ(true, buf2.was_truncated());
+}
+
+TEST_F(StringTest, LowerToPascalCase) {
+ EXPECT_STREQ("", LowerWithUnderToPascalCase("").c_str());
+ EXPECT_STREQ("A", LowerWithUnderToPascalCase("a").c_str());
+ EXPECT_STREQ("TestS", LowerWithUnderToPascalCase("test_s").c_str());
+ EXPECT_STREQ("XQ", LowerWithUnderToPascalCase("x_q").c_str());
+ EXPECT_STREQ("XQDNS", LowerWithUnderToPascalCase("x_qDNS").c_str());
+}
+
+TEST_F(StringTest, PascalCaseToLower) {
+ EXPECT_STREQ("", PascalCaseToLowerWithUnder("").c_str());
+ EXPECT_STREQ("a", PascalCaseToLowerWithUnder("A").c_str());
+ EXPECT_STREQ("test_s", PascalCaseToLowerWithUnder("TestS").c_str());
+ EXPECT_STREQ("xq", PascalCaseToLowerWithUnder("XQ").c_str());
+ EXPECT_STREQ("dns_name", PascalCaseToLowerWithUnder("DNSName").c_str());
+ EXPECT_STREQ("xqdns", PascalCaseToLowerWithUnder("XQDNS").c_str());
+ EXPECT_STREQ("xqdn_sa", PascalCaseToLowerWithUnder("XQDNSa").c_str());
+ EXPECT_STREQ("dns1", PascalCaseToLowerWithUnder("DNS1").c_str());
+}
+
+TEST_F(StringTest, HtmlEncode) {
+ EXPECT_STREQ("dns", HtmlEncode("dns").c_str());
+ EXPECT_STREQ("&amp;", HtmlEncode("&").c_str());
+ EXPECT_STREQ("&amp;amp;", HtmlEncode("&amp;").c_str());
+ EXPECT_STREQ("&lt;!&gt;", HtmlEncode("<!>").c_str());
+}
+
+TEST_F(StringTest, HtmlDecode) {
+ EXPECT_STREQ("dns", HtmlDecode("dns").c_str());
+ EXPECT_STREQ("&", HtmlDecode("&amp;").c_str());
+ EXPECT_STREQ("&amp;", HtmlDecode("&amp;amp;").c_str());
+ EXPECT_STREQ("<!>", HtmlDecode("&lt;!&gt;").c_str());
+}
+
+TEST_F(StringTest, UrlEncode) {
+ EXPECT_STREQ("%26", UrlEncode("&").c_str());
+ EXPECT_STREQ("%3f%20", UrlEncode("? ").c_str());
+ EXPECT_STREQ("as%20dfdsa", UrlEncode("as dfdsa").c_str());
+ EXPECT_STREQ("%3c!%3e", UrlEncode("<!>").c_str());
+ EXPECT_STREQ("!%23!", UrlEncode("!#!").c_str());
+ EXPECT_STREQ("!!", UrlEncode("!!").c_str());
+}
+
+TEST_F(StringTest, UrlDecode) {
+ EXPECT_STREQ("&", UrlDecode("%26").c_str());
+ EXPECT_STREQ("? ", UrlDecode("%3f%20").c_str());
+ EXPECT_STREQ("as dfdsa", UrlDecode("as%20dfdsa").c_str());
+ EXPECT_STREQ("<!>", UrlDecode("%3c!%3e").c_str());
+ EXPECT_STREQ("&amp;", UrlDecode("&amp;").c_str());
+}
+
+TEST_F(StringTest, StringReplace) {
+ // Test StringReplace core functionality.
+ std::string s = "<attribute name=abcd/>";
+ StringReplace(&s, "=", " = ", false);
+ EXPECT_STREQ(s.c_str(), "<attribute name = abcd/>");
+
+ // Test for negative case.
+ s = "<attribute name=abcd/>";
+ StringReplace(&s, "-", "=", false);
+ EXPECT_STREQ(s.c_str(), "<attribute name=abcd/>");
+
+ // Test StringReplace core functionality with replace_all flag set.
+ s = "<attribute name==abcd/>";
+ StringReplace(&s, "=", " = ", true);
+ EXPECT_STREQ(s.c_str(), "<attribute name = = abcd/>");
+
+ // Input is an empty string.
+ s = "";
+ StringReplace(&s, "=", " = ", false);
+ EXPECT_STREQ(s.c_str(), "");
+
+ // Input is an empty string and this is a request for repeated
+ // string replaces.
+ s = "";
+ StringReplace(&s, "=", " = ", true);
+ EXPECT_STREQ(s.c_str(), "");
+
+ // Input and string to replace is an empty string.
+ s = "";
+ StringReplace(&s, "", " = ", false);
+ EXPECT_STREQ(s.c_str(), "");
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/task_pump.cc b/chrome/browser/sync/notifier/base/task_pump.cc
new file mode 100644
index 0000000..7e99fc1
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/task_pump.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/task_pump.h"
+
+#include "chrome/browser/sync/notifier/base/time.h"
+#include "talk/base/common.h"
+#include "talk/base/thread.h"
+
+namespace notifier {
+
+// Don't add any messages because there are cleared and thrown away.
+enum { MSG_WAKE_UP = 1, MSG_TIMED_WAKE_UP };
+
+TaskPump::TaskPump() : timeout_change_count_(0), posted_(false) {
+}
+
+void TaskPump::OnMessage(talk_base::Message* msg) {
+ posted_ = false;
+ int initial_count = timeout_change_count_;
+
+ // If a task timed out, ensure that it is not blocked, so it will be deleted.
+ // This may result in a WakeTasks if a task is timed out.
+ PollTasks();
+
+ // Run tasks and handle timeouts.
+ RunTasks();
+}
+
+void TaskPump::WakeTasks() {
+ if (!posted_) {
+ // Do the requested wake up
+ talk_base::Thread::Current()->Post(this, MSG_WAKE_UP);
+ posted_ = true;
+ }
+}
+
+int64 TaskPump::CurrentTime() {
+ return GetCurrent100NSTime();
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/task_pump.h b/chrome/browser/sync/notifier/base/task_pump.h
new file mode 100644
index 0000000..b6c00ec
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/task_pump.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_TASK_PUMP_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_TASK_PUMP_H_
+
+#include "talk/base/messagequeue.h"
+#include "talk/base/taskrunner.h"
+
+namespace notifier {
+
+class TaskPump : public talk_base::MessageHandler,
+ public talk_base::TaskRunner {
+ public:
+ TaskPump();
+
+ // MessageHandler interface.
+ virtual void OnMessage(talk_base::Message* msg);
+
+ // TaskRunner interface
+ virtual void WakeTasks();
+ virtual int64 CurrentTime();
+
+ private:
+ int timeout_change_count_;
+ bool posted_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskPump);
+};
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_TASK_PUMP_H_
diff --git a/chrome/browser/sync/notifier/base/time.cc b/chrome/browser/sync/notifier/base/time.cc
new file mode 100644
index 0000000..ed3c414
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/time.cc
@@ -0,0 +1,360 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/time.h"
+
+#include <string>
+#include <time.h>
+
+#include "chrome/browser/sync/notifier/base/string.h"
+#include "chrome/browser/sync/notifier/base/utils.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+
+namespace notifier {
+
+// Get the current time represented in 100NS granularity since epoch
+// (Jan 1, 1970)
+time64 GetCurrent100NSTimeSinceEpoch() {
+ return GetCurrent100NSTime() - kStart100NsTimeToEpoch;
+}
+
+char* GetLocalTimeAsString() {
+ time64 long_time = GetCurrent100NSTime();
+ struct tm now;
+ Time64ToTm(long_time, &now);
+ char* time_string = asctime(&now);
+ if (time_string) {
+ int time_len = strlen(time_string);
+ if (time_len > 0) {
+ time_string[time_len - 1] = 0; // trim off terminating \n
+ }
+ }
+ return time_string;
+}
+
+// Parses RFC 822 Date/Time format
+// 5. DATE AND TIME SPECIFICATION
+// 5.1. SYNTAX
+//
+// date-time = [ day "," ] date time ; dd mm yy
+// ; hh:mm:ss zzz
+// day = "Mon" / "Tue" / "Wed" / "Thu"
+// / "Fri" / "Sat" / "Sun"
+//
+// date = 1*2DIGIT month 2DIGIT ; day month year
+// ; e.g. 20 Jun 82
+//
+// month = "Jan" / "Feb" / "Mar" / "Apr"
+// / "May" / "Jun" / "Jul" / "Aug"
+// / "Sep" / "Oct" / "Nov" / "Dec"
+//
+// time = hour zone ; ANSI and Military
+//
+// hour = 2DIGIT ":" 2DIGIT [":" 2DIGIT]
+// ; 00:00:00 - 23:59:59
+//
+// zone = "UT" / "GMT" ; Universal Time
+// ; North American : UT
+// / "EST" / "EDT" ; Eastern: - 5/ - 4
+// / "CST" / "CDT" ; Central: - 6/ - 5
+// / "MST" / "MDT" ; Mountain: - 7/ - 6
+// / "PST" / "PDT" ; Pacific: - 8/ - 7
+// / 1ALPHA ; Military: Z = UT;
+// ; A:-1; (J not used)
+// ; M:-12; N:+1; Y:+12
+// / ( ("+" / "-") 4DIGIT ) ; Local differential
+// ; hours+min. (HHMM)
+// Return local time if ret_local_time == true, return UTC time otherwise
+const int kNumOfDays = 7;
+const int kNumOfMonth = 12;
+// Note: RFC822 does not include '-' as a separator, but Http Cookies use
+// it in the date field, like this: Wdy, DD-Mon-YYYY HH:MM:SS GMT
+// This differs from RFC822 only by those dashes. It is legacy quirk from
+// old Netscape cookie specification. So it makes sense to expand this
+// parser rather then add another one.
+// See http://wp.netscape.com/newsref/std/cookie_spec.html
+const char kRFC822_DateDelimiters[] = " ,:-";
+
+const char kRFC822_TimeDelimiter[] = ": ";
+const char* kRFC822_Day[] = {
+ "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"
+};
+const char* kRFC822_Month[] = {
+ "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
+};
+
+struct TimeZoneInfo {
+ const char* zone_name;
+ int hour_dif;
+};
+
+const TimeZoneInfo kRFC822_TimeZone[] = {
+ { "UT", 0 },
+ { "GMT", 0 },
+ { "EST", -5 },
+ { "EDT", -4 },
+ { "CST", -6 },
+ { "CDT", -5 },
+ { "MST", -7 },
+ { "MDT", -6 },
+ { "PST", -8 },
+ { "PDT", -7 },
+ { "A", -1 }, // Military time zones
+ { "B", -2 },
+ { "C", -3 },
+ { "D", -4 },
+ { "E", -5 },
+ { "F", -6 },
+ { "G", -7 },
+ { "H", -8 },
+ { "I", -9 },
+ { "K", -10 },
+ { "L", -11 },
+ { "M", -12 },
+ { "N", 1 },
+ { "O", 2 },
+ { "P", 3 },
+ { "Q", 4 },
+ { "R", 5 },
+ { "S", 6 },
+ { "T", 7 },
+ { "U", 8 },
+ { "V", 9 },
+ { "W", 10 },
+ { "X", 11 },
+ { "Y", 12 },
+ { "Z", 0 },
+};
+
+bool ParseRFC822DateTime(const char* str, struct tm* time,
+ bool ret_local_time) {
+ ASSERT(str && *str);
+ ASSERT(time);
+
+ std::string str_date(str);
+ std::string str_token;
+ const char* str_curr = str_date.c_str();
+
+ str_token = SplitOneStringToken(&str_curr, kRFC822_DateDelimiters);
+ if (str_token == "") {
+ return false;
+ }
+
+ for (int i = 0; i < kNumOfDays; ++i) {
+ if (str_token == kRFC822_Day[i]) {
+ // Skip spaces after ','
+ while (*str_curr == ' ' && *str_curr != '\0') {
+ str_curr++;
+ }
+
+ str_token = SplitOneStringToken(&str_curr, kRFC822_DateDelimiters);
+ if (str_token == "") {
+ return false;
+ }
+ break;
+ }
+ }
+
+ int day = 0;
+ if (!ParseStringToInt(str_token.c_str(), &day, true) || day < 0 || day > 31) {
+ return false;
+ }
+
+ str_token = SplitOneStringToken(&str_curr, kRFC822_DateDelimiters);
+ if (str_token == "") {
+ return false;
+ }
+
+ int month = -1;
+ for (int i = 0; i < kNumOfMonth; ++i) {
+ if (str_token == kRFC822_Month[i]) {
+ month = i; // month is 0 based number
+ break;
+ }
+ }
+ if (month == -1) { // month not found
+ return false;
+ }
+
+ str_token = SplitOneStringToken(&str_curr, kRFC822_DateDelimiters);
+ if (str_token == "") {
+ return false;
+ }
+
+ int year = 0;
+ if (!ParseStringToInt(str_token.c_str(), &year, true)) {
+ return false;
+ }
+ if (year < 100) { // two digit year format, convert to 1950 - 2050 range
+ if (year < 50) {
+ year += 2000;
+ } else {
+ year += 1900;
+ }
+ }
+
+ str_token = SplitOneStringToken(&str_curr, kRFC822_TimeDelimiter);
+ if (str_token == "") {
+ return false;
+ }
+
+ int hour = 0;
+ if (!ParseStringToInt(str_token.c_str(), &hour, true) ||
+ hour < 0 || hour > 23) {
+ return false;
+ }
+
+ str_token = SplitOneStringToken(&str_curr, kRFC822_TimeDelimiter);
+ if (str_token == "") {
+ return false;
+ }
+
+ int minute = 0;
+ if (!ParseStringToInt(str_token.c_str(), &minute, true) ||
+ minute < 0 || minute > 59) {
+ return false;
+ }
+
+ str_token = SplitOneStringToken(&str_curr, kRFC822_TimeDelimiter);
+ if (str_token == "") {
+ return false;
+ }
+
+ int second = 0;
+ // distingushed between XX:XX and XX:XX:XX time formats
+ if (str_token.size() == 2 && isdigit(str_token[0]) && isdigit(str_token[1])) {
+ second = 0;
+ if (!ParseStringToInt(str_token.c_str(), &second, true) ||
+ second < 0 || second > 59) {
+ return false;
+ }
+
+ str_token = SplitOneStringToken(&str_curr, kRFC822_TimeDelimiter);
+ if (str_token == "") {
+ return false;
+ }
+ }
+
+ int bias = 0;
+ if (str_token[0] == '+' || str_token[0] == '-' || isdigit(str_token[0])) {
+ // numeric format
+ int zone = 0;
+ if (!ParseStringToInt(str_token.c_str(), &zone, true)) {
+ return false;
+ }
+
+ // zone is in HHMM format, need to convert to the number of minutes
+ bias = (zone / 100) * 60 + (zone % 100);
+ } else { // text format
+ for (size_t i = 0; i < sizeof(kRFC822_TimeZone) / sizeof(TimeZoneInfo);
+ ++i) {
+ if (str_token == kRFC822_TimeZone[i].zone_name) {
+ bias = kRFC822_TimeZone[i].hour_dif * 60;
+ break;
+ }
+ }
+ }
+
+ SetZero(*time);
+ time->tm_year = year - 1900;
+ time->tm_mon = month;
+ time->tm_mday = day;
+ time->tm_hour = hour;
+ time->tm_min = minute;
+ time->tm_sec = second;
+
+ time64 time_64 = TmToTime64(*time);
+ time_64 = time_64 - bias * kMinsTo100ns;
+
+ if (!Time64ToTm(time_64, time)) {
+ return false;
+ }
+
+ if (ret_local_time) {
+ if (!UtcTimeToLocalTime(time)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Parse a string to time span
+//
+// A TimeSpan value can be represented as
+// [d.]hh:mm:ss
+//
+// d = days (optional)
+// hh = hours as measured on a 24-hour clock
+// mm = minutes
+// ss = seconds
+bool ParseStringToTimeSpan(const char* str, time64* time_span) {
+ ASSERT(str);
+ ASSERT(time_span);
+
+ const char kColonDelimitor[] = ":";
+ const char kDotDelimitor = '.';
+
+ std::string str_span(str);
+ time64 span = 0;
+
+ int idx = str_span.find(kDotDelimitor);
+ if (idx != -1) {
+ std::string str_day = str_span.substr(0, idx);
+ int day = 0;
+ if (!ParseStringToInt(str_day.c_str(), &day, true) ||
+ day < 0 || day > 365) {
+ return false;
+ }
+ span = day;
+
+ str_span = str_span.substr(idx + 1);
+ }
+
+ const char* str_curr = str_span.c_str();
+ std::string str_token;
+
+ str_token = SplitOneStringToken(&str_curr, kColonDelimitor);
+ if (str_token == "") {
+ return false;
+ }
+
+ int hour = 0;
+ if (!ParseStringToInt(str_token.c_str(), &hour, true) ||
+ hour < 0 || hour > 23) {
+ return false;
+ }
+ span = span * 24 + hour;
+
+ str_token = SplitOneStringToken(&str_curr, kColonDelimitor);
+ if (str_token == "") {
+ return false;
+ }
+
+ int minute = 0;
+ if (!ParseStringToInt(str_token.c_str(), &minute, true) ||
+ minute < 0 || minute > 59) {
+ return false;
+ }
+ span = span * 60 + minute;
+
+ str_token = SplitOneStringToken(&str_curr, kColonDelimitor);
+ if (str_token == "") {
+ return false;
+ }
+
+ int second = 0;
+ if (!ParseStringToInt(str_token.c_str(), &second, true) ||
+ second < 0 || second > 59) {
+ return false;
+ }
+
+ *time_span = (span * 60 + second) * kSecsTo100ns;
+
+ return true;
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/time.h b/chrome/browser/sync/notifier/base/time.h
new file mode 100644
index 0000000..9bdb6f6
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/time.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_TIME_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_TIME_H_
+
+#include <time.h>
+
+#include "talk/base/basictypes.h"
+
+typedef uint64 time64;
+
+#define kMicrosecsTo100ns (static_cast<time64>(10))
+#define kMillisecsTo100ns (static_cast<time64>(10000))
+#define kSecsTo100ns (1000 * kMillisecsTo100ns)
+#define kMinsTo100ns (60 * kSecsTo100ns)
+#define kHoursTo100ns (60 * kMinsTo100ns)
+#define kDaysTo100ns (24 * kHoursTo100ns)
+const time64 kMaxTime100ns = UINT64_C(9223372036854775807);
+
+// Time difference in 100NS granularity between platform-dependent starting
+// time and Jan 1, 1970.
+#ifdef WIN32
+// On Windows time64 is seconds since Jan 1, 1601.
+#define kStart100NsTimeToEpoch (116444736000000000uI64) // Jan 1, 1970 in time64
+#else
+// On Unix time64 is seconds since Jan 1, 1970.
+#define kStart100NsTimeToEpoch (0) // Jan 1, 1970 in time64
+#endif
+
+// Time difference in 100NS granularity between platform-dependent starting
+// time and Jan 1, 1980.
+#define kStart100NsTimeTo1980 \
+ kStart100NsTimeToEpoch + UINT64_C(3155328000000000)
+
+#define kTimeGranularity (kDaysTo100ns)
+
+namespace notifier {
+
+// Get the current time represented in 100NS granularity
+// Different platform might return the value since different starting time.
+// Win32 platform returns the value since Jan 1, 1601.
+time64 GetCurrent100NSTime();
+
+// Get the current time represented in 100NS granularity since epoch
+// (Jan 1, 1970).
+time64 GetCurrent100NSTimeSinceEpoch();
+
+// Convert from struct tm to time64.
+time64 TmToTime64(const struct tm& tm);
+
+// Convert from time64 to struct tm.
+bool Time64ToTm(time64 t, struct tm* tm);
+
+// Convert from UTC time to local time.
+bool UtcTimeToLocalTime(struct tm* tm);
+
+// Convert from local time to UTC time.
+bool LocalTimeToUtcTime(struct tm* tm);
+
+// Returns the local time as a string suitable for logging
+// Note: This is *not* threadsafe, so only call it from the main thread.
+char* GetLocalTimeAsString();
+
+// Parses RFC 822 Date/Time format
+// 5. DATE AND TIME SPECIFICATION
+// 5.1. SYNTAX
+//
+// date-time = [ day "," ] date time ; dd mm yy
+// ; hh:mm:ss zzz
+// day = "Mon" / "Tue" / "Wed" / "Thu"
+// / "Fri" / "Sat" / "Sun"
+//
+// date = 1*2DIGIT month 2DIGIT ; day month year
+// ; e.g. 20 Jun 82
+//
+// month = "Jan" / "Feb" / "Mar" / "Apr"
+// / "May" / "Jun" / "Jul" / "Aug"
+// / "Sep" / "Oct" / "Nov" / "Dec"
+//
+// time = hour zone ; ANSI and Military
+//
+// hour = 2DIGIT ":" 2DIGIT [":" 2DIGIT]
+// ; 00:00:00 - 23:59:59
+//
+// zone = "UT" / "GMT" ; Universal Time
+// ; North American : UT
+// / "EST" / "EDT" ; Eastern: - 5/ - 4
+// / "CST" / "CDT" ; Central: - 6/ - 5
+// / "MST" / "MDT" ; Mountain: - 7/ - 6
+// / "PST" / "PDT" ; Pacific: - 8/ - 7
+// / 1ALPHA ; Military: Z = UT;
+// ; A:-1; (J not used)
+// ; M:-12; N:+1; Y:+12
+// / ( ("+" / "-") 4DIGIT ) ; Local differential
+// ; hours+min. (HHMM)
+// Return local time if ret_local_time == true, return UTC time otherwise
+bool ParseRFC822DateTime(const char* str, struct tm* time, bool ret_local_time);
+
+// Parse a string to time span.
+//
+// A TimeSpan value can be represented as
+// [d.]hh:mm:ss
+//
+// d = days (optional)
+// hh = hours as measured on a 24-hour clock
+// mm = minutes
+// ss = seconds
+bool ParseStringToTimeSpan(const char* str, time64* time_span);
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_TIME_H_
diff --git a/chrome/browser/sync/notifier/base/time_unittest.cc b/chrome/browser/sync/notifier/base/time_unittest.cc
new file mode 100644
index 0000000..0a34b0a
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/time_unittest.cc
@@ -0,0 +1,73 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/time.h"
+#include "notifier/testing/notifier/unittest.h"
+
+namespace notifier {
+
+TEST_NOTIFIER_F(TimeTest);
+
+TEST_F(TimeTest, ParseRFC822DateTime) {
+ struct tm t = {0};
+
+ EXPECT_TRUE(ParseRFC822DateTime("Mon, 16 May 2005 15:44:18 -0700",
+ &t, false));
+ EXPECT_EQ(t.tm_year, 2005 - 1900);
+ EXPECT_EQ(t.tm_mon, 4);
+ EXPECT_EQ(t.tm_mday, 16);
+ EXPECT_EQ(t.tm_hour, 22);
+ EXPECT_EQ(t.tm_min, 44);
+ EXPECT_EQ(t.tm_sec, 18);
+
+ EXPECT_TRUE(ParseRFC822DateTime("Mon, 16 May 2005 15:44:18 -0700", &t, true));
+ EXPECT_EQ(t.tm_year, 2005 - 1900);
+ EXPECT_EQ(t.tm_mon, 4);
+ EXPECT_EQ(t.tm_mday, 16);
+ EXPECT_TRUE(t.tm_hour == 15 || t.tm_hour == 14); // daylight saving time
+ EXPECT_EQ(t.tm_min, 44);
+ EXPECT_EQ(t.tm_sec , 18);
+
+ EXPECT_TRUE(ParseRFC822DateTime("Tue, 17 May 2005 02:56:18 +0400",
+ &t, false));
+ EXPECT_EQ(t.tm_year, 2005 - 1900);
+ EXPECT_EQ(t.tm_mon, 4);
+ EXPECT_EQ(t.tm_mday, 16);
+ EXPECT_EQ(t.tm_hour, 22);
+ EXPECT_EQ(t.tm_min, 56);
+ EXPECT_EQ(t.tm_sec , 18);
+
+ EXPECT_TRUE(ParseRFC822DateTime("Tue, 17 May 2005 02:56:18 +0400", &t, true));
+ EXPECT_EQ(t.tm_year, 2005 - 1900);
+ EXPECT_EQ(t.tm_mon, 4);
+ EXPECT_EQ(t.tm_mday, 16);
+ EXPECT_TRUE(t.tm_hour == 15 || t.tm_hour == 14); // daylight saving time
+ EXPECT_EQ(t.tm_min, 56);
+ EXPECT_EQ(t.tm_sec, 18);
+}
+
+TEST_F(TimeTest, ParseStringToTimeSpan) {
+ time64 time_span = 0;
+
+ EXPECT_TRUE(ParseStringToTimeSpan("0:0:4", &time_span));
+ EXPECT_EQ(time_span, 4 * kSecsTo100ns);
+
+ EXPECT_TRUE(ParseStringToTimeSpan("0:3:4", &time_span));
+ EXPECT_EQ(time_span, (3 * 60 + 4) * kSecsTo100ns);
+
+ EXPECT_TRUE(ParseStringToTimeSpan("2:3:4", &time_span));
+ EXPECT_EQ(time_span, (2 * 3600 + 3 * 60 + 4) * kSecsTo100ns);
+
+ EXPECT_TRUE(ParseStringToTimeSpan("1.2:3:4", &time_span));
+ EXPECT_EQ(time_span, (1 * 86400 + 2 * 60 * 60 + 3 * 60 + 4) * kSecsTo100ns);
+
+ EXPECT_FALSE(ParseStringToTimeSpan("2:invalid:4", &time_span));
+}
+
+TEST_F(TimeTest, UseLocalTimeAsString) {
+ // Just call it to ensure that it doesn't assert.
+ GetLocalTimeAsString();
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/timer.cc b/chrome/browser/sync/notifier/base/timer.cc
new file mode 100644
index 0000000..7fa20b4
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/timer.cc
@@ -0,0 +1,33 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/base/timer.h"
+
+namespace notifier {
+
+Timer::Timer(talk_base::Task* parent, int timeout_seconds, bool repeat)
+ : Task(parent),
+ repeat_(repeat) {
+
+ set_timeout_seconds(timeout_seconds);
+ Start();
+ ResumeTimeout();
+}
+
+Timer::~Timer() {
+}
+
+int Timer::OnTimeout() {
+ if (!repeat_) {
+ return STATE_DONE;
+ }
+ ResetTimeout();
+ return STATE_BLOCKED;
+}
+
+int Timer::ProcessStart() {
+ return STATE_BLOCKED;
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/timer.h b/chrome/browser/sync/notifier/base/timer.h
new file mode 100644
index 0000000..dd68c73
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/timer.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_TIMER_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_TIMER_H_
+
+#include "talk/base/task.h"
+
+namespace notifier {
+
+class Timer : private talk_base::Task {
+ public:
+ Timer(talk_base::Task* parent, int timeout_seconds, bool repeat);
+ ~Timer();
+
+ // Call Abort() to stop the timer.
+ using talk_base::Task::Abort;
+
+ // Call to find out when the timer is set to go off
+ // Returns int64
+ using talk_base::Task::get_timeout_time;
+
+ // Call to set the timeout interval.
+ using talk_base::Task::set_timeout_seconds;
+
+ using talk_base::Task::SignalTimeout;
+
+ private:
+ virtual int OnTimeout();
+ virtual int ProcessStart();
+
+ bool repeat_;
+
+ DISALLOW_COPY_AND_ASSIGN(Timer);
+};
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_TIMER_H_
diff --git a/chrome/browser/sync/notifier/base/utils.h b/chrome/browser/sync/notifier/base/utils.h
new file mode 100644
index 0000000..2105233
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/utils.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Utility functions
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_BASE_UTILS_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_BASE_UTILS_H_
+
+#include <map>
+#include <string>
+
+#include "chrome/browser/sync/notifier/base/static_assert.h"
+
+// return error if the first argument evaluates to false
+#define RET_IF_FALSE(x) do { if (!(x)) return false; } while (false)
+
+// Protocol constants
+const char kHttpProto[] = "http://";
+const char kHttpsProto[] = "https://";
+
+// Initialize a POD to zero.
+// Using this function requires discipline. Don't use for types that have a
+// v-table or virtual bases.
+template <typename T>
+inline void SetZero(T& p) {
+ // Guard against the easy mistake of
+ // foo(int *p) { SetZero(p); } instead of
+ // SetZero(*p);
+ // which it should be.
+ STATIC_ASSERT(sizeof(T) != sizeof(void*));
+
+ // A POD (plain old data) object has one of these data types:
+ // a fundamental type, union, struct, array,
+ // or class--with no constructor. PODs don't have virtual functions or
+ // virtual bases.
+
+ // Test to see if the type has constructors.
+ union CtorTest {
+ T t;
+ int i;
+ };
+
+ // TODO(sync) There might be a way to test if the type has virtuals
+ // For now, if we zero a type with virtuals by mistake, it is going to crash
+ // predictable at run-time when the virtuals are called.
+ memset(&p, 0, sizeof(T));
+}
+
+// Used to delete each element in a vector<T*>/deque<T*>
+// (and then empty the sequence).
+template <class T>
+void CleanupSequence(T* items) {
+ for (typename T::iterator it(items->begin()); it != items->end(); ++it) {
+ delete (*it);
+ }
+ items->clear();
+}
+
+// Typically used to clean up values used in a hash_map
+// that had Type* as values.
+//
+// WARNING: This function assumes that T::clear will not access the values
+// (or the keys if they are the same as the values). This is true
+// for hash_map.
+template <class T>
+void CleanupMap(T* items) {
+ // This is likely slower than a for loop, but we have to do it this way. In
+ // some of the maps we use, deleting it->second causes it->first to be deleted
+ // as well, and that seems to send the iterator in a tizzy.
+ typename T::iterator it = items->begin();
+ while (it != items->end()) {
+ items->erase(it->first);
+ delete it->second;
+ it = items->begin();
+ }
+}
+
+// Get the value of an element in the map with the specified name
+template <class T>
+void GetMapElement(const std::map<const std::string, const T>& m,
+ const char* name,
+ T* value) {
+ typename std::map<const std::string, const T>::const_iterator iter(
+ m.find(name));
+ if (iter != m.end()) {
+ *value = iter->second;
+ }
+}
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_BASE_UTILS_H_
diff --git a/chrome/browser/sync/notifier/base/win32/async_network_alive_win32.cc b/chrome/browser/sync/notifier/base/win32/async_network_alive_win32.cc
new file mode 100644
index 0000000..b344817
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/win32/async_network_alive_win32.cc
@@ -0,0 +1,233 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <winsock2.h>
+
+#include "chrome/browser/sync/notifier/base/async_network_alive.h"
+#include "chrome/browser/sync/notifier/base/utils.h"
+#include "talk/base/criticalsection.h"
+#include "talk/base/logging.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/common.h"
+#include "third_party/smartany/scoped_any.h"
+
+namespace notifier {
+class PlatformNetworkInfo {
+ public:
+ PlatformNetworkInfo() : ws_handle_(NULL), event_handle_(NULL) {
+ }
+
+ ~PlatformNetworkInfo() {
+ Close();
+ }
+
+ void Close() {
+ talk_base::CritScope crit_scope(&crit_sect_);
+ if (ws_handle_) {
+ if (event_handle_) // unblock any waiting for network changes
+ SetEvent(get(event_handle_));
+ // finishes the iteration.
+ VERIFY(WSALookupServiceEnd(ws_handle_) == 0);
+ ws_handle_ = NULL;
+ LOG_F(LS_INFO) << "WSACleanup 1";
+ ::WSACleanup();
+ }
+ }
+
+ bool IsAlive(bool* error) {
+ ASSERT(error);
+ *error = false;
+
+ // If IsAlive was previously called, we need a new handle.
+ // Why? If we use the same handle, we only get diffs on what changed
+ // which isn't what we want.
+ Close();
+ int result = Initialize();
+ if (result != 0) {
+ LOG_F(LS_ERROR) << "failed:" << result;
+ // Default to alive on error.
+ *error = true;
+ return true;
+ }
+
+ bool alive = false;
+
+ // Retrieve network info and move to next one. In this function, we only
+ // need to know whether or not there is network connection.
+ // allocate 256 bytes for name, it should be enough for most cases.
+ // If the name is longer, it is OK as we will check the code returned and
+ // set correct network status.
+ char result_buffer[sizeof(WSAQUERYSET) + 256] = {0};
+ bool flush_previous_result = false;
+ do {
+ DWORD control_flags = LUP_RETURN_NAME;
+ if (flush_previous_result) {
+ control_flags |= LUP_FLUSHPREVIOUS;
+ }
+ DWORD length = sizeof(result_buffer);
+ reinterpret_cast<WSAQUERYSET*>(&result_buffer[0])->dwSize =
+ sizeof(WSAQUERYSET);
+ // ws_handle_ may be NULL (if exiting), but the call will simply fail
+ int result = ::WSALookupServiceNext(
+ ws_handle_,
+ control_flags,
+ &length,
+ reinterpret_cast<WSAQUERYSET*>(&result_buffer[0]));
+
+ if (result == 0) {
+ // get at least one connection, return "connected".
+ alive = true;
+ } else {
+ ASSERT(result == SOCKET_ERROR);
+ result = ::WSAGetLastError();
+ if (result == WSA_E_NO_MORE || result == WSAENOMORE) {
+ break;
+ }
+
+ // Error code WSAEFAULT means there is a network connection but the
+ // result_buffer size is too small to contain the results. The
+ // variable "length" returned from WSALookupServiceNext is the minimum
+ // number of bytes required. We do not need to retrieve detail info.
+ // Return "alive" in this case.
+ if (result == WSAEFAULT) {
+ alive = true;
+ flush_previous_result = true;
+ } else {
+ LOG_F(LS_WARNING) << "failed:" << result;
+ *error = true;
+ break;
+ }
+ }
+ } while (true);
+ LOG_F(LS_INFO) << "alive: " << alive;
+ return alive;
+ }
+
+ bool WaitForChange() {
+ // IsAlive must be called first.
+ int junk1 = 0, junk2 = 0;
+ DWORD bytes_returned = 0;
+ int result = SOCKET_ERROR;
+ {
+ talk_base::CritScope crit_scope(&crit_sect_);
+ if (!ws_handle_)
+ return false;
+ ASSERT(!event_handle_);
+ reset(event_handle_, ::CreateEvent(NULL, FALSE, FALSE, NULL));
+ if (!event_handle_) {
+ LOG_F(LS_WARNING) << "failed to CreateEvent";
+ return false;
+ }
+ WSAOVERLAPPED overlapped = {0};
+ overlapped.hEvent = get(event_handle_);
+ WSACOMPLETION completion;
+ ::SetZero(completion);
+ completion.Type = NSP_NOTIFY_EVENT;
+ completion.Parameters.Event.lpOverlapped = &overlapped;
+
+ LOG_F(LS_INFO) << "calling WSANSPIoctl";
+ // Do a non-blocking request for change notification. event_handle_
+ // will get signaled when there is a change, so we wait on it later.
+ // It can also be signaled by Close() in order allow clean termination.
+ result = ::WSANSPIoctl(ws_handle_,
+ SIO_NSP_NOTIFY_CHANGE,
+ &junk1,
+ 0,
+ &junk2,
+ 0,
+ &bytes_returned,
+ &completion);
+ }
+ if (NO_ERROR != result) {
+ result = ::WSAGetLastError();
+ if (WSA_IO_PENDING != result) {
+ LOG_F(LS_WARNING) << "failed: " << result;
+ reset(event_handle_);
+ return false;
+ }
+ }
+ LOG_F(LS_INFO) << "waiting";
+ WaitForSingleObject(get(event_handle_), INFINITE);
+ reset(event_handle_);
+ LOG_F(LS_INFO) << "changed";
+ return true;
+ }
+
+ private:
+ int Initialize() {
+ WSADATA wsa_data;
+ LOG_F(LS_INFO) << "calling WSAStartup";
+ int result = ::WSAStartup(MAKEWORD(2, 2), &wsa_data);
+ if (result != ERROR_SUCCESS) {
+ LOG_F(LS_ERROR) << "failed:" << result;
+ return result;
+ }
+
+ WSAQUERYSET query_set = {0};
+ query_set.dwSize = sizeof(WSAQUERYSET);
+ query_set.dwNameSpace = NS_NLA;
+ // Initiate a client query to iterate through the
+ // currently connected networks.
+ if (0 != ::WSALookupServiceBegin(&query_set, LUP_RETURN_ALL,
+ &ws_handle_)) {
+ result = ::WSAGetLastError();
+ LOG_F(LS_INFO) << "WSACleanup 2";
+ ::WSACleanup();
+ ASSERT(ws_handle_ == NULL);
+ ws_handle_ = NULL;
+ return result;
+ }
+ return 0;
+ }
+ talk_base::CriticalSection crit_sect_;
+ HANDLE ws_handle_;
+ scoped_event event_handle_;
+ DISALLOW_COPY_AND_ASSIGN(PlatformNetworkInfo);
+};
+
+class AsyncNetworkAliveWin32 : public AsyncNetworkAlive {
+ public:
+ AsyncNetworkAliveWin32() {
+ }
+
+ virtual ~AsyncNetworkAliveWin32() {
+ if (network_info_) {
+ delete network_info_;
+ network_info_ = NULL;
+ }
+ }
+
+ protected:
+ // SignalThread Interface
+ virtual void DoWork() {
+ if (!network_info_) {
+ network_info_ = new PlatformNetworkInfo();
+ } else {
+ // Since network_info is set, it means that
+ // we are suppose to wait for network state changes.
+ if (!network_info_->WaitForChange()) {
+ // The wait was aborted so we must be shutting down.
+ alive_ = false;
+ error_ = true;
+ return;
+ }
+ }
+ alive_ = network_info_->IsAlive(&error_);
+ }
+
+ virtual void OnWorkStop() {
+ if (network_info_) {
+ network_info_->Close();
+ }
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsyncNetworkAliveWin32);
+};
+
+AsyncNetworkAlive* AsyncNetworkAlive::Create() {
+ return new AsyncNetworkAliveWin32();
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/base/win32/time_win32.cc b/chrome/browser/sync/notifier/base/win32/time_win32.cc
new file mode 100644
index 0000000..34a53fe
--- /dev/null
+++ b/chrome/browser/sync/notifier/base/win32/time_win32.cc
@@ -0,0 +1,158 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Time functions
+
+#include <time.h>
+#include <windows.h>
+
+#include "chrome/browser/sync/notifier/base/time.h"
+
+#include "chrome/browser/sync/notifier/base/utils.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+
+namespace notifier {
+
+time64 FileTimeToTime64(const FILETIME& file_time) {
+ return static_cast<time64>(file_time.dwHighDateTime) << 32 |
+ file_time.dwLowDateTime;
+}
+
+void Time64ToFileTime(const time64& time, FILETIME* ft) {
+ ASSERT(ft);
+
+ ft->dwHighDateTime = static_cast<DWORD>(time >> 32);
+ ft->dwLowDateTime = static_cast<DWORD>(time & 0xffffffff);
+}
+
+void TmTimeToSystemTime(const struct tm& tm, SYSTEMTIME* sys_time) {
+ ASSERT(sys_time);
+
+ SetZero(*sys_time);
+ // tm's year is 1900 based, systemtime's year is absolute
+ sys_time->wYear = tm.tm_year + 1900;
+ // tm's month is 0 based, but systemtime's month is 1 based
+ sys_time->wMonth = tm.tm_mon + 1;
+ sys_time->wDay = tm.tm_mday;
+ sys_time->wDayOfWeek = tm.tm_wday;
+ sys_time->wHour = tm.tm_hour;
+ sys_time->wMinute = tm.tm_min;
+ sys_time->wSecond = tm.tm_sec;
+}
+
+void SystemTimeToTmTime(const SYSTEMTIME& sys_time, struct tm* tm) {
+ ASSERT(tm);
+
+ SetZero(*tm);
+ // tm's year is 1900 based, systemtime's year is absolute
+ tm->tm_year = sys_time.wYear - 1900;
+ // tm's month is 0 based, but systemtime's month is 1 based
+ tm->tm_mon = sys_time.wMonth - 1;
+ tm->tm_mday = sys_time.wDay;
+ tm->tm_wday = sys_time.wDayOfWeek;
+ tm->tm_hour = sys_time.wHour;
+ tm->tm_min = sys_time.wMinute;
+ tm->tm_sec = sys_time.wSecond;
+}
+
+time64 GetCurrent100NSTime() {
+ // In order to get the 100ns time we shouldn't use SystemTime
+ // as it's granularity is 1 ms. Below is the correct implementation.
+ // On the other hand the system clock granularity is 15 ms, so we
+ // are not gaining much by having the timestamp in nano-sec
+ // If we decise to go with ms, divide "time64 time" by 10000
+
+ FILETIME file_time;
+ ::GetSystemTimeAsFileTime(&file_time);
+
+ time64 time = FileTimeToTime64(file_time);
+ return time;
+}
+
+time64 TmToTime64(const struct tm& tm) {
+ SYSTEMTIME sys_time;
+ TmTimeToSystemTime(tm, &sys_time);
+
+ FILETIME file_time;
+ SetZero(file_time);
+ if (!::SystemTimeToFileTime(&sys_time, &file_time)) {
+ return 0;
+ }
+
+ return FileTimeToTime64(file_time);
+}
+
+bool Time64ToTm(time64 t, struct tm* tm) {
+ ASSERT(tm);
+
+ FILETIME file_time;
+ SetZero(file_time);
+ Time64ToFileTime(t, &file_time);
+
+ SYSTEMTIME sys_time;
+ SetZero(sys_time);
+ if (!::FileTimeToSystemTime(&file_time, &sys_time)) {
+ return false;
+ }
+
+ SystemTimeToTmTime(sys_time, tm);
+
+ return true;
+}
+
+bool UtcTimeToLocalTime(struct tm* tm) {
+ ASSERT(tm);
+
+ SYSTEMTIME utc_time;
+ TmTimeToSystemTime(*tm, &utc_time);
+
+ TIME_ZONE_INFORMATION time_zone;
+ if (::GetTimeZoneInformation(&time_zone) == TIME_ZONE_ID_INVALID) {
+ return false;
+ }
+
+ SYSTEMTIME local_time;
+ if (!::SystemTimeToTzSpecificLocalTime(&time_zone, &utc_time, &local_time)) {
+ return false;
+ }
+
+ SystemTimeToTmTime(local_time, tm);
+
+ return true;
+}
+
+bool LocalTimeToUtcTime(struct tm* tm) {
+ ASSERT(tm);
+
+ SYSTEMTIME local_time;
+ TmTimeToSystemTime(*tm, &local_time);
+
+ // Get the bias, which when added to local, gives UTC
+ TIME_ZONE_INFORMATION time_zone;
+ if (::GetTimeZoneInformation(&time_zone) == TIME_ZONE_ID_INVALID) {
+ return false;
+ }
+
+ // By negating the biases, we can get translation from UTC to local
+ time_zone.Bias *= -1;
+ time_zone.DaylightBias *= -1;
+ time_zone.StandardBias *= -1; // this is 0 but negating for completness
+
+ // We'll tell SystemTimeToTzSpecificLocalTime that the local time is actually
+ // UTC. With the negated bias, the "local" time that the API returns will
+ // actually be UTC. Casting the const off because
+ // SystemTimeToTzSpecificLocalTime's definition requires it, although the
+ // value is not modified.
+ SYSTEMTIME utc_time;
+ if (!::SystemTimeToTzSpecificLocalTime(&time_zone, &local_time, &utc_time)) {
+ return false;
+ }
+
+ SystemTimeToTmTime(utc_time, tm);
+
+ return true;
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/auth_task.cc b/chrome/browser/sync/notifier/communicator/auth_task.cc
new file mode 100644
index 0000000..11eba2d
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/auth_task.cc
@@ -0,0 +1,69 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/auth_task.h"
+
+#include "chrome/browser/sync/notifier/gaia_auth/gaiaauth.h"
+#include "chrome/browser/sync/notifier/communicator/login.h"
+#include "chrome/browser/sync/notifier/communicator/login_settings.h"
+#include "chrome/browser/sync/notifier/communicator/product_info.h"
+#include "talk/base/common.h"
+#include "talk/base/urlencode.h"
+#include "talk/xmpp/xmppclient.h"
+
+namespace notifier {
+const char kTalkGadgetAuthPath[] = "/auth";
+
+AuthTask::AuthTask(talk_base::Task* parent, Login* login, const char* url)
+ : talk_base::Task(parent),
+ login_(login),
+ url_(url),
+ use_gaia_redirect_(true) {
+ ASSERT(login && !url_.empty());
+}
+
+int AuthTask::ProcessStart() {
+ auth_.reset(new buzz::GaiaAuth(GetUserAgentString(),
+ GetProductSignature()));
+ auth_->SignalAuthDone.connect(this, &AuthTask::OnAuthDone);
+ auth_->StartTokenAuth(login_->xmpp_client()->jid().BareJid(),
+ login_->login_settings().user_settings().pass(),
+ use_gaia_redirect_ ? "gaia" : service_);
+ return STATE_RESPONSE;
+}
+
+int AuthTask::ProcessResponse() {
+ ASSERT(auth_.get());
+ if (!auth_->IsAuthDone()) {
+ return STATE_BLOCKED;
+ }
+ if (!auth_->IsAuthorized()) {
+ SignalAuthError(!auth_->HadError());
+ return STATE_ERROR;
+ }
+
+ std::string uber_url;
+ if (use_gaia_redirect_) {
+ uber_url = auth_->CreateAuthenticatedUrl(url_, service_);
+ } else {
+ uber_url = redir_auth_prefix_ + auth_->GetAuthCookie();
+ uber_url += redir_continue_;
+ uber_url += UrlEncodeString(url_);
+ }
+
+ if (uber_url == "") {
+ SignalAuthError(true);
+ return STATE_ERROR;
+ }
+
+ SignalAuthDone(uber_url);
+ return STATE_DONE;
+}
+
+
+void AuthTask::OnAuthDone() {
+ Wake();
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/auth_task.h b/chrome/browser/sync/notifier/communicator/auth_task.h
new file mode 100644
index 0000000..b5141f8
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/auth_task.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_AUTH_TASK_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_AUTH_TASK_H_
+
+#include <string>
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/task.h"
+
+namespace buzz {
+class GaiaAuth;
+}
+
+namespace notifier {
+class Login;
+
+// Create an authenticated talk url from an unauthenticated url
+class AuthTask : public talk_base::Task, public sigslot::has_slots<> {
+ public:
+ AuthTask(talk_base::Task* parent, Login* login, const char* url);
+
+ // An abort method which doesn't take any parameters.
+ // (talk_base::Task::Abort() has a default parameter.)
+ //
+ // The primary purpose of this method is to allow a
+ // signal to be hooked up to abort this task.
+ void Abort() {
+ talk_base::Task::Abort();
+ }
+
+ void set_service(const char* service) {
+ service_ = service;
+ }
+
+ void set_use_gaia_redirect(bool use_gaia_redirect) {
+ use_gaia_redirect_ = use_gaia_redirect;
+ }
+
+ void set_redir_auth_prefix(const char* redir_auth_prefix) {
+ redir_auth_prefix_ = redir_auth_prefix;
+ }
+
+ void set_redir_continue(const char* redir_continue) {
+ redir_continue_ = redir_continue;
+ }
+
+ sigslot::signal1<const std::string&> SignalAuthDone;
+ sigslot::signal1<bool> SignalAuthError;
+
+ protected:
+ virtual int ProcessStart();
+ virtual int ProcessResponse();
+
+ private:
+ void OnAuthDone();
+
+ scoped_ptr<buzz::GaiaAuth> auth_;
+ Login* login_;
+ std::string service_;
+ std::string url_;
+
+ // the following members are used for cases where we don't want to
+ // redirect through gaia, but rather via the end-site's mechanism
+ // (We need this for orkut)
+ bool use_gaia_redirect_;
+ std::string redir_auth_prefix_;
+ std::string redir_continue_;
+
+ DISALLOW_COPY_AND_ASSIGN(AuthTask);
+};
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_AUTH_TASK_H_
diff --git a/chrome/browser/sync/notifier/communicator/auto_reconnect.cc b/chrome/browser/sync/notifier/communicator/auto_reconnect.cc
new file mode 100644
index 0000000..eadfe46
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/auto_reconnect.cc
@@ -0,0 +1,155 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/auto_reconnect.h"
+
+#include "chrome/browser/sync/notifier/base/network_status_detector_task.h"
+#include "chrome/browser/sync/notifier/base/time.h"
+#include "chrome/browser/sync/notifier/base/timer.h"
+#include "talk/base/common.h"
+
+namespace notifier {
+const int kResetReconnectInfoDelaySec = 2;
+
+AutoReconnect::AutoReconnect(talk_base::Task* parent,
+ NetworkStatusDetectorTask* network_status)
+ : reconnect_interval_ns_(0),
+ reconnect_timer_(NULL),
+ delayed_reset_timer_(NULL),
+ parent_(parent),
+ is_idle_(false) {
+ SetupReconnectInterval();
+ if (network_status) {
+ network_status->SignalNetworkStateDetected.connect(
+ this, &AutoReconnect::OnNetworkStateDetected);
+ }
+}
+
+void AutoReconnect::OnNetworkStateDetected(bool was_alive, bool is_alive) {
+ if (is_retrying() && !was_alive && is_alive) {
+ // Reconnect in 1 to 9 seconds (vary the time a little to try to avoid
+ // spikey behavior on network hiccups).
+ StartReconnectTimerWithInterval((rand() % 9 + 1) * kSecsTo100ns);
+ }
+}
+
+int AutoReconnect::seconds_until() const {
+ if (!is_retrying() || !reconnect_timer_->get_timeout_time()) {
+ return 0;
+ }
+ int64 time_until_100ns =
+ reconnect_timer_->get_timeout_time() - GetCurrent100NSTime();
+ if (time_until_100ns < 0) {
+ return 0;
+ }
+
+ // Do a ceiling on the value (to avoid returning before its time)
+ return (time_until_100ns + kSecsTo100ns - 1) / kSecsTo100ns;
+}
+
+void AutoReconnect::StartReconnectTimer() {
+ StartReconnectTimerWithInterval(reconnect_interval_ns_);
+}
+
+void AutoReconnect::StartReconnectTimerWithInterval(time64 interval_ns) {
+ // Don't call StopReconnectTimer because we don't
+ // want other classes to detect that the intermediate state of
+ // the timer being stopped. (We're avoiding the call to SignalTimerStartStop
+ // while reconnect_timer_ is NULL.)
+ if (reconnect_timer_) {
+ reconnect_timer_->Abort();
+ reconnect_timer_ = NULL;
+ }
+ reconnect_timer_ = new Timer(parent_,
+ static_cast<int>(interval_ns / kSecsTo100ns),
+ false); // repeat
+ reconnect_timer_->SignalTimeout.connect(this,
+ &AutoReconnect::DoReconnect);
+ SignalTimerStartStop();
+}
+
+void AutoReconnect::DoReconnect() {
+ reconnect_timer_ = NULL;
+
+ // if timed out again, double autoreconnect time up to 30 minutes
+ reconnect_interval_ns_ *= 2;
+ if (reconnect_interval_ns_ > 30 * kMinsTo100ns) {
+ reconnect_interval_ns_ = 30 * kMinsTo100ns;
+ }
+ SignalStartConnection();
+}
+
+void AutoReconnect::StopReconnectTimer() {
+ if (reconnect_timer_) {
+ reconnect_timer_->Abort();
+ reconnect_timer_ = NULL;
+ SignalTimerStartStop();
+ }
+}
+
+void AutoReconnect::StopDelayedResetTimer() {
+ if (delayed_reset_timer_) {
+ delayed_reset_timer_->Abort();
+ delayed_reset_timer_ = NULL;
+ }
+}
+
+void AutoReconnect::ResetState() {
+ StopDelayedResetTimer();
+ StopReconnectTimer();
+ SetupReconnectInterval();
+}
+
+void AutoReconnect::SetupReconnectInterval() {
+ if (is_idle_) {
+ // If we were idle, start the timer over again (120 - 360 seconds).
+ reconnect_interval_ns_ = (rand() % 240 + 120) * kSecsTo100ns;
+ } else {
+ // If we weren't idle, try the connection 5 - 25 seconds later.
+ reconnect_interval_ns_ = (rand() % 20 + 5) * kSecsTo100ns;
+ }
+}
+
+void AutoReconnect::OnPowerSuspend(bool suspended) {
+ if (suspended) {
+ // When the computer comes back on, ensure that the reconnect
+ // happens quickly (5 - 25 seconds).
+ reconnect_interval_ns_ = (rand() % 20 + 5) * kSecsTo100ns;
+ }
+}
+
+void AutoReconnect::OnClientStateChange(Login::ConnectionState state) {
+ // On any state change, stop the reset timer.
+ StopDelayedResetTimer();
+ switch (state) {
+ case Login::STATE_RETRYING:
+ // do nothing
+ break;
+
+ case Login::STATE_CLOSED:
+ // When the user has been logged out and no auto-reconnect
+ // is happening, then the autoreconnect intervals should be
+ // reset.
+ ResetState();
+ break;
+
+ case Login::STATE_OPENING:
+ StopReconnectTimer();
+ break;
+
+ case Login::STATE_OPENED:
+ // Reset autoreconnect timeout sequence after being connected
+ // for a bit of time. This helps in the case that we are
+ // connecting briefly and then getting disconnect like when
+ // an account hits an abuse limit.
+ StopReconnectTimer();
+ delayed_reset_timer_ = new Timer(parent_,
+ kResetReconnectInfoDelaySec,
+ false); // repeat
+ delayed_reset_timer_->SignalTimeout.connect(this,
+ &AutoReconnect::ResetState);
+ break;
+ }
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/auto_reconnect.h b/chrome/browser/sync/notifier/communicator/auto_reconnect.h
new file mode 100644
index 0000000..f4ee4ec
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/auto_reconnect.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_AUTO_RECONNECT_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_AUTO_RECONNECT_H_
+#include <string>
+
+#include "chrome/browser/sync/notifier/base/time.h"
+#include "chrome/browser/sync/notifier/communicator/login.h"
+#include "talk/base/sigslot.h"
+
+namespace talk_base {
+class Task;
+}
+
+namespace notifier {
+class NetworkStatusDetectorTask;
+class Timer;
+
+class AutoReconnect : public sigslot::has_slots<> {
+ public:
+ AutoReconnect(talk_base::Task* parent,
+ NetworkStatusDetectorTask* network_status);
+ void StartReconnectTimer();
+ void StopReconnectTimer();
+ void OnClientStateChange(Login::ConnectionState state);
+
+ // Callback when power is suspended
+ void OnPowerSuspend(bool suspended);
+
+ void set_idle(bool idle) {
+ is_idle_ = idle;
+ }
+
+ // Returns true if the auto-retry is to be done (pending a countdown)
+ bool is_retrying() const {
+ return reconnect_timer_ != NULL;
+ }
+
+ int seconds_until() const;
+ sigslot::signal0<> SignalTimerStartStop;
+ sigslot::signal0<> SignalStartConnection;
+ private:
+ void StartReconnectTimerWithInterval(time64 interval_ns);
+ void DoReconnect();
+ void ResetState();
+ void SetupReconnectInterval();
+ void StopDelayedResetTimer();
+
+ void OnNetworkStateDetected(bool was_alive, bool is_alive);
+
+ time64 reconnect_interval_ns_;
+ Timer* reconnect_timer_;
+ Timer* delayed_reset_timer_;
+ talk_base::Task* parent_;
+
+ bool is_idle_;
+ DISALLOW_COPY_AND_ASSIGN(AutoReconnect);
+};
+
+// Wait 2 seconds until after we actually connect to
+// reset reconnect related items.
+//
+// The reason for this delay is to avoid the situation in which buzz
+// is trying to block the client due to abuse and the client responses
+// by going into rapid reconnect mode, which makes the problem more severe.
+extern const int kResetReconnectInfoDelaySec;
+
+} // namespace notifier
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_AUTO_RECONNECT_H_
diff --git a/chrome/browser/sync/notifier/communicator/connection_options.cc b/chrome/browser/sync/notifier/communicator/connection_options.cc
new file mode 100644
index 0000000..2d49bb6
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/connection_options.cc
@@ -0,0 +1,16 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/connection_options.h"
+
+namespace notifier {
+
+ConnectionOptions::ConnectionOptions()
+ : autodetect_proxy_(true),
+ auto_reconnect_(true),
+ proxy_port_(0),
+ use_proxy_auth_(0),
+ allow_unverified_certs_(false) {
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/connection_options.h b/chrome/browser/sync/notifier/communicator/connection_options.h
new file mode 100644
index 0000000..6b559f0
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/connection_options.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONNECTION_OPTIONS_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONNECTION_OPTIONS_H_
+
+#include <string>
+
+#include "talk/base/cryptstring.h"
+#include "talk/base/helpers.h"
+
+namespace notifier {
+
+class ConnectionOptions {
+ public:
+ ConnectionOptions();
+
+ bool autodetect_proxy() const { return autodetect_proxy_; }
+ bool auto_reconnect() const { return auto_reconnect_; }
+ const std::string& proxy_host() const { return proxy_host_; }
+ int proxy_port() const { return proxy_port_; }
+ bool use_proxy_auth() const { return use_proxy_auth_; }
+ const std::string& auth_user() const { return auth_user_; }
+ const talk_base::CryptString& auth_pass() const { return auth_pass_; }
+ bool allow_unverified_certs() const { return allow_unverified_certs_; }
+
+ void set_autodetect_proxy(bool f) { autodetect_proxy_ = f; }
+ void set_auto_reconnect(bool f) { auto_reconnect_ = f; }
+ void set_proxy_host(const std::string& val) { proxy_host_ = val; }
+ void set_proxy_port(int val) { proxy_port_ = val; }
+ void set_use_proxy_auth(bool f) { use_proxy_auth_ = f; }
+ void set_auth_user(const std::string& val) { auth_user_ = val; }
+ void set_auth_pass(const talk_base::CryptString& val) { auth_pass_ = val; }
+
+ // Setting this to true opens a security hole, so it is
+ // *highly* recommended that you don't do this.
+ void set_allow_unverified_certs(bool allow_unverified_certs) {
+ allow_unverified_certs_ = allow_unverified_certs;
+ }
+
+ private:
+ bool autodetect_proxy_;
+ bool auto_reconnect_;
+ std::string proxy_host_;
+ int proxy_port_;
+ bool use_proxy_auth_;
+ std::string auth_user_;
+ talk_base::CryptString auth_pass_;
+ bool allow_unverified_certs_;
+ // allow the copy constructor and operator=
+};
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONNECTION_OPTIONS_H_
diff --git a/chrome/browser/sync/notifier/communicator/connection_settings.cc b/chrome/browser/sync/notifier/communicator/connection_settings.cc
new file mode 100644
index 0000000..320a396
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/connection_settings.cc
@@ -0,0 +1,126 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <deque>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/notifier/communicator/connection_settings.h"
+#include "talk/base/helpers.h"
+#include "talk/xmpp/xmppclientsettings.h"
+
+namespace notifier {
+
+class RandomGenerator {
+ public:
+ int operator()(int ceiling) {
+ return static_cast<int>(cricket::CreateRandomId() % ceiling);
+ }
+};
+
+void ConnectionSettings::FillXmppClientSettings(
+ buzz::XmppClientSettings* xcs) const {
+ assert(xcs);
+ xcs->set_protocol(protocol_);
+ xcs->set_server(server_);
+ xcs->set_proxy(proxy_.type);
+ if (proxy_.type != talk_base::PROXY_NONE) {
+ xcs->set_proxy_host(proxy_.address.IPAsString());
+ xcs->set_proxy_port(proxy_.address.port());
+ }
+ if ((proxy_.type != talk_base::PROXY_NONE) && !proxy_.username.empty()) {
+ xcs->set_use_proxy_auth(true);
+ xcs->set_proxy_user(proxy_.username);
+ xcs->set_proxy_pass(proxy_.password);
+ } else {
+ xcs->set_use_proxy_auth(false);
+ }
+}
+
+void ConnectionSettingsList::AddPermutations(const std::string& hostname,
+ const std::vector<uint32>& iplist,
+ int16 port,
+ bool special_port_magic,
+ bool proxy_only) {
+ // randomize the list. This ensures the iplist isn't always
+ // evaluated in the order returned by DNS
+ std::vector<uint32> iplist_random = iplist;
+ RandomGenerator rg;
+ std::random_shuffle(iplist_random.begin(), iplist_random.end(), rg);
+
+ // Put generated addresses in a new deque, then append on the list_, since
+ // there are order dependencies and AddPermutations() may be called more
+ // than once.
+ std::deque<ConnectionSettings> list_temp;
+
+ // Permute addresses for this server. In some cases we haven't resolved the
+ // to ip addresses.
+ talk_base::SocketAddress server(hostname, port, false);
+ if (iplist_random.empty()) {
+ // We couldn't pre-resolve the hostname, so let's hope it will resolve
+ // further down the pipeline (by a proxy, for example).
+ PermuteForAddress(server, special_port_magic, proxy_only, &list_temp);
+ } else {
+ // Generate a set of possibilities for each server address.
+ // Don't do permute duplicates.
+ for (size_t index = 0; index < iplist_random.size(); ++index) {
+ if (std::find(iplist_seen_.begin(), iplist_seen_.end(),
+ iplist_random[index]) != iplist_seen_.end()) {
+ continue;
+ }
+ iplist_seen_.push_back(iplist_random[index]);
+ server.SetResolvedIP(iplist_random[index]);
+ PermuteForAddress(server, special_port_magic, proxy_only, &list_temp);
+ }
+ }
+
+ // Add this list to the instance list
+ while (list_temp.size() != 0) {
+ list_.push_back(list_temp[0]);
+ list_temp.pop_front();
+ }
+}
+
+
+void ConnectionSettingsList::PermuteForAddress(
+ const talk_base::SocketAddress& server,
+ bool special_port_magic,
+ bool proxy_only,
+ std::deque<ConnectionSettings>* list_temp) {
+ assert(list_temp);
+ *(template_.mutable_server()) = server;
+
+ // Use all of the original settings
+ list_temp->push_back(template_);
+
+ // Try alternate port
+ if (special_port_magic) {
+ ConnectionSettings settings(template_);
+ settings.set_protocol(cricket::PROTO_SSLTCP);
+ settings.mutable_server()->SetPort(443);
+ // HTTPS proxies usually require port 443, so try it first
+ if ((template_.proxy().type == talk_base::PROXY_HTTPS) ||
+ (template_.proxy().type == talk_base::PROXY_UNKNOWN)) {
+ list_temp->push_front(settings);
+ } else {
+ list_temp->push_back(settings);
+ }
+ }
+
+ if (!proxy_only) {
+ // Try without the proxy
+ if (template_.proxy().type != talk_base::PROXY_NONE) {
+ ConnectionSettings settings(template_);
+ settings.mutable_proxy()->type = talk_base::PROXY_NONE;
+ list_temp->push_back(settings);
+
+ if (special_port_magic) {
+ settings.set_protocol(cricket::PROTO_SSLTCP);
+ settings.mutable_server()->SetPort(443);
+ list_temp->push_back(settings);
+ }
+ }
+ }
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/connection_settings.h b/chrome/browser/sync/notifier/communicator/connection_settings.h
new file mode 100644
index 0000000..d83b1fc
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/connection_settings.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONNECTION_SETTINGS_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONNECTION_SETTINGS_H_
+#include <deque>
+#include <string>
+#include <vector>
+
+#include "talk/p2p/base/port.h"
+
+namespace buzz {
+ class XmppClientSettings;
+}
+
+namespace notifier {
+
+class ConnectionSettings {
+ public:
+ ConnectionSettings() : protocol_(cricket::PROTO_TCP) {}
+
+ cricket::ProtocolType protocol() { return protocol_; }
+ const talk_base::SocketAddress& server() const { return server_; }
+ const talk_base::ProxyInfo& proxy() const { return proxy_; }
+
+ void set_protocol(cricket::ProtocolType protocol) { protocol_ = protocol; }
+ talk_base::SocketAddress* mutable_server() { return &server_; }
+ talk_base::ProxyInfo* mutable_proxy() { return &proxy_; }
+
+ void FillXmppClientSettings(buzz::XmppClientSettings* xcs) const;
+
+ private:
+ cricket::ProtocolType protocol_; // PROTO_TCP, PROTO_SSLTCP, etc.
+ talk_base::SocketAddress server_; // Server
+ talk_base::ProxyInfo proxy_; // Proxy info
+ // need copy constructor due to use in stl deque
+};
+
+class ConnectionSettingsList {
+ public:
+ ConnectionSettingsList() {}
+
+ void SetProxy(const talk_base::ProxyInfo& proxy) {
+ *(template_.mutable_proxy()) = proxy;
+ }
+
+ const talk_base::ProxyInfo& proxy() const {
+ return template_.proxy();
+ }
+
+ int GetCount() { return list_.size(); }
+ ConnectionSettings* GetSettings(size_t index) { return &list_[index]; }
+
+ void ClearPermutations() {
+ list_.clear();
+ iplist_seen_.clear();
+ }
+
+ void AddPermutations(const std::string& hostname,
+ const std::vector<uint32>& iplist,
+ int16 port,
+ bool special_port_magic,
+ bool proxy_only);
+ private:
+ void PermuteForAddress(const talk_base::SocketAddress& server,
+ bool special_port_magic,
+ bool proxy_only,
+ std::deque<ConnectionSettings>* list_temp);
+
+ ConnectionSettings template_;
+ std::deque<ConnectionSettings> list_;
+ std::vector<uint32> iplist_seen_;
+ DISALLOW_COPY_AND_ASSIGN(ConnectionSettingsList);
+};
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONNECTION_SETTINGS_H_
diff --git a/chrome/browser/sync/notifier/communicator/const_communicator.h b/chrome/browser/sync/notifier/communicator/const_communicator.h
new file mode 100644
index 0000000..79bb92e
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/const_communicator.h
@@ -0,0 +1,11 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONST_COMMUNICATOR_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONST_COMMUNICATOR_H_
+namespace notifier {
+// The default port for jabber/xmpp communications
+const int kDefaultXmppPort = 5222;
+} // namespace notifier
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_CONST_COMMUNICATOR_H_
diff --git a/chrome/browser/sync/notifier/communicator/login.cc b/chrome/browser/sync/notifier/communicator/login.cc
new file mode 100644
index 0000000..5614dba
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/login.cc
@@ -0,0 +1,361 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "chrome/browser/sync/notifier/communicator/login.h"
+
+#include "chrome/browser/sync/notifier/base/network_status_detector_task.h"
+#include "chrome/browser/sync/notifier/base/time.h"
+#include "chrome/browser/sync/notifier/base/timer.h"
+#include "chrome/browser/sync/notifier/communicator/auto_reconnect.h"
+#include "chrome/browser/sync/notifier/communicator/connection_options.h"
+#include "chrome/browser/sync/notifier/communicator/login_settings.h"
+#include "chrome/browser/sync/notifier/communicator/product_info.h"
+#include "chrome/browser/sync/notifier/communicator/single_login_attempt.h"
+#include "talk/base/common.h"
+#include "talk/base/firewallsocketserver.h"
+#include "talk/base/logging.h"
+#include "talk/base/taskrunner.h"
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmpp/asyncsocket.h"
+#include "talk/xmpp/prexmppauth.h"
+#include "talk/xmpp/xmppclient.h"
+#include "talk/xmpp/xmppclientsettings.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace notifier {
+
+// redirect valid for 5 minutes
+static const time64 kRedirectTimeoutNs = 5 * kMinsTo100ns;
+
+// Disconnect if network stays down for more than 10 seconds.
+static const int kDisconnectionDelaySecs = 10;
+
+Login::Login(talk_base::Task* parent,
+ const buzz::XmppClientSettings& user_settings,
+ const ConnectionOptions& options,
+ std::string lang,
+ ServerInformation* server_list,
+ int server_count,
+ NetworkStatusDetectorTask* network_status,
+ talk_base::FirewallManager* firewall,
+ bool no_gaia_auth,
+ bool proxy_only,
+ bool previous_login_successful)
+ : login_settings_(new LoginSettings(user_settings,
+ options,
+ lang,
+ server_list,
+ server_count,
+ firewall,
+ no_gaia_auth,
+ proxy_only)),
+ single_attempt_(NULL),
+ successful_connection_(previous_login_successful),
+ parent_(parent),
+ state_(STATE_OPENING),
+ redirect_time_ns_(0),
+ redirect_port_(0),
+ unexpected_disconnect_occurred_(false),
+ reset_unexpected_timer_(NULL),
+ google_host_(user_settings.host()),
+ google_user_(user_settings.user()),
+ disconnect_timer_(NULL) {
+ if (!network_status) {
+ network_status = NetworkStatusDetectorTask::Create(parent_);
+ if (network_status) {
+ // On linux we don't have an implementation of NetworkStatusDetectorTask.
+ network_status->Start();
+ }
+ }
+ network_status->SignalNetworkStateDetected.connect(
+ this, &Login::OnNetworkStateDetected);
+ auto_reconnect_.reset(new AutoReconnect(parent_, network_status));
+ auto_reconnect_->SignalStartConnection.connect(this,
+ &Login::StartConnection);
+ auto_reconnect_->SignalTimerStartStop.connect(
+ this,
+ &Login::OnAutoReconnectTimerChange);
+ SignalClientStateChange.connect(auto_reconnect_.get(),
+ &AutoReconnect::OnClientStateChange);
+ SignalIdleChange.connect(auto_reconnect_.get(),
+ &AutoReconnect::set_idle);
+ SignalPowerSuspended.connect(auto_reconnect_.get(),
+ &AutoReconnect::OnPowerSuspend);
+}
+
+// defined so that the destructors are executed here (and
+// the corresponding classes don't need to be included in
+// the header file)
+Login::~Login() {
+ if (single_attempt_) {
+ single_attempt_->Abort();
+ single_attempt_ = NULL;
+ }
+}
+
+void Login::StartConnection() {
+ // If there is a server redirect, use it.
+ if (GetCurrent100NSTime() < redirect_time_ns_ + kRedirectTimeoutNs) {
+ // Override server/port with redirect values
+ talk_base::SocketAddress server_override;
+ server_override.SetIP(redirect_server_, false);
+ ASSERT(redirect_port_ != 0);
+ server_override.SetPort(redirect_port_);
+ login_settings_->set_server_override(server_override);
+ } else {
+ login_settings_->clear_server_override();
+ }
+
+ if (single_attempt_) {
+ single_attempt_->Abort();
+ single_attempt_ = NULL;
+ }
+ single_attempt_ = new SingleLoginAttempt(parent_,
+ login_settings_.get(),
+ successful_connection_);
+
+ // Do the signaling hook-ups.
+ single_attempt_->SignalLoginFailure.connect(this, &Login::OnLoginFailure);
+ single_attempt_->SignalRedirect.connect(this, &Login::OnRedirect);
+ single_attempt_->SignalClientStateChange.connect(
+ this,
+ &Login::OnClientStateChange) ;
+ single_attempt_->SignalUnexpectedDisconnect.connect(
+ this,
+ &Login::OnUnexpectedDisconnect);
+ single_attempt_->SignalLogoff.connect(
+ this,
+ &Login::OnLogoff);
+ single_attempt_->SignalNeedAutoReconnect.connect(
+ this,
+ &Login::DoAutoReconnect);
+ SignalLogInput.repeat(single_attempt_->SignalLogInput);
+ SignalLogOutput.repeat(single_attempt_->SignalLogOutput);
+
+ single_attempt_->Start();
+}
+
+const std::string& Login::google_host() const {
+ return google_host_;
+}
+
+const std::string& Login::google_user() const {
+ return google_user_;
+}
+
+const talk_base::ProxyInfo& Login::proxy() const {
+ return proxy_info_;
+}
+
+void Login::OnLoginFailure(const LoginFailure& failure) {
+ auto_reconnect_->StopReconnectTimer();
+ HandleClientStateChange(STATE_CLOSED);
+ SignalLoginFailure(failure);
+}
+
+void Login::OnLogoff() {
+ HandleClientStateChange(STATE_CLOSED);
+}
+
+void Login::OnClientStateChange(buzz::XmppEngine::State state) {
+ ConnectionState new_state = STATE_CLOSED;
+
+ switch (state) {
+ case buzz::XmppEngine::STATE_NONE:
+ case buzz::XmppEngine::STATE_CLOSED:
+ // Ignore the closed state (because
+ // we may be trying the next dns entry).
+ //
+ // But we go to this state for other
+ // signals when there is no retry happening.
+ new_state = state_;
+ break;
+
+ case buzz::XmppEngine::STATE_START:
+ case buzz::XmppEngine::STATE_OPENING:
+ new_state = STATE_OPENING;
+ break;
+
+ case buzz::XmppEngine::STATE_OPEN:
+ new_state = STATE_OPENED;
+ break;
+
+ default:
+ ASSERT(false);
+ break;
+ }
+ HandleClientStateChange(new_state);
+}
+
+void Login::HandleClientStateChange(ConnectionState new_state) {
+ // Do we need to transition between the retrying and closed states?
+ if (auto_reconnect_->is_retrying()) {
+ if (new_state == STATE_CLOSED) {
+ new_state = STATE_RETRYING;
+ }
+ } else {
+ if (new_state == STATE_RETRYING) {
+ new_state = STATE_CLOSED;
+ }
+ }
+
+ if (new_state != state_) {
+ state_ = new_state;
+ if (reset_unexpected_timer_) {
+ reset_unexpected_timer_->Abort();
+ reset_unexpected_timer_ = NULL;
+ }
+
+ if (state_ == STATE_OPENED) {
+ successful_connection_ = true;
+
+ google_host_ = single_attempt_->xmpp_client()->jid().domain();
+ google_user_ = single_attempt_->xmpp_client()->jid().node();
+ proxy_info_ = single_attempt_->proxy();
+
+ reset_unexpected_timer_ = new Timer(parent_,
+ kResetReconnectInfoDelaySec,
+ false); // repeat
+ reset_unexpected_timer_->SignalTimeout.connect(
+ this,
+ &Login::ResetUnexpectedDisconnect);
+ }
+ SignalClientStateChange(state_);
+ }
+}
+
+void Login::OnAutoReconnectTimerChange() {
+ if (!single_attempt_ || !single_attempt_->xmpp_client()) {
+ HandleClientStateChange(STATE_CLOSED);
+ return;
+ }
+ OnClientStateChange(single_attempt_->xmpp_client()->GetState());
+}
+
+buzz::XmppClient* Login::xmpp_client() {
+ if (!single_attempt_) {
+ return NULL;
+ }
+ return single_attempt_->xmpp_client();
+}
+
+int Login::seconds_until_reconnect() const {
+ return auto_reconnect_->seconds_until();
+}
+
+void Login::UseNextConnection() {
+ if (!single_attempt_) {
+ // Just in case, there is an obscure case that causes
+ // this to get called when there is no single_attempt_.
+ return;
+ }
+ single_attempt_->UseNextConnection();
+}
+
+void Login::UseCurrentConnection() {
+ if (!single_attempt_) {
+ // Just in case, there is an obscure case that causes
+ // this to get called when there is no single_attempt_.
+ return;
+ }
+ single_attempt_->UseCurrentConnection();
+}
+
+void Login::OnRedirect(const std::string& redirect_server, int redirect_port) {
+ ASSERT(redirect_port_ != 0);
+
+ redirect_time_ns_ = GetCurrent100NSTime();
+ redirect_server_ = redirect_server;
+ redirect_port_ = redirect_port;
+
+ // Drop the current connection, and start the login process again
+ StartConnection();
+}
+
+void Login::OnUnexpectedDisconnect() {
+ if (reset_unexpected_timer_) {
+ reset_unexpected_timer_->Abort();
+ reset_unexpected_timer_ = NULL;
+ }
+
+ // Start the login process again
+ if (unexpected_disconnect_occurred_) {
+ // If we already have received an unexpected disconnect recently,
+ // then our account may have be jailed due to abuse, so we shouldn't
+ // make the situation worse by trying really hard to reconnect.
+ // Instead, we'll do the autoreconnect route, which has exponential
+ // back-off.
+ DoAutoReconnect();
+ return;
+ }
+ StartConnection();
+ unexpected_disconnect_occurred_ = true;
+}
+
+void Login::ResetUnexpectedDisconnect() {
+ reset_unexpected_timer_ = NULL;
+ unexpected_disconnect_occurred_ = false;
+}
+
+void Login::DoAutoReconnect() {
+ bool allow_auto_reconnect =
+ login_settings_->connection_options().auto_reconnect();
+ // Start the reconnect time before aborting the connection
+ // to ensure that AutoReconnect::is_retrying() is true, so
+ // that the Login doesn't transition to the CLOSED state
+ // (which would cause the reconnection timer to reset
+ // and not double).
+ if (allow_auto_reconnect) {
+ auto_reconnect_->StartReconnectTimer();
+ }
+
+ if (single_attempt_) {
+ single_attempt_->Abort();
+ single_attempt_ = NULL;
+ }
+
+ if (!allow_auto_reconnect) {
+ HandleClientStateChange(STATE_CLOSED);
+ return;
+ }
+}
+
+void Login::OnNetworkStateDetected(bool was_alive, bool is_alive) {
+ if (was_alive && !is_alive) {
+ // Our network connection just went down.
+ // Setup a timer to disconnect. Don't disconnect immediately to avoid
+ // constant connection/disconnection due to flaky network interfaces.
+ ASSERT(disconnect_timer_ == NULL);
+ disconnect_timer_ = new Timer(parent_, kDisconnectionDelaySecs, false);
+ disconnect_timer_->SignalTimeout.connect(this,
+ &Login::OnDisconnectTimeout);
+ } else if (!was_alive && is_alive) {
+ // Our connection has come back up. If we have a disconnect timer going,
+ // abort it so we don't disconnect.
+ if (disconnect_timer_) {
+ disconnect_timer_->Abort();
+ // It will free itself.
+ disconnect_timer_ = NULL;
+ }
+ }
+}
+
+void Login::OnDisconnectTimeout() {
+ disconnect_timer_ = NULL;
+
+ if (state_ != STATE_OPENED) {
+ return;
+ }
+
+ if (single_attempt_) {
+ single_attempt_->Abort();
+ single_attempt_ = NULL;
+ }
+
+ StartConnection();
+}
+
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/login.h b/chrome/browser/sync/notifier/communicator/login.h
new file mode 100644
index 0000000..480f52b
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/login.h
@@ -0,0 +1,155 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_H_
+#include <string>
+
+#include "chrome/browser/sync/notifier/base/time.h"
+#include "chrome/browser/sync/notifier/gaia_auth/sigslotrepeater.h"
+#include "talk/base/proxyinfo.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sigslot.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace buzz {
+class CaptchaChallenge;
+class XmppClient;
+class XmppEngine;
+class XmppClientSettings;
+} // namespace buzz
+
+namespace talk_base {
+class FirewallManager;
+struct ProxyInfo;
+class Task;
+} // namespace talk_base
+
+namespace notifier {
+class AutoReconnect;
+class ConnectionOptions;
+class LoginFailure;
+class LoginSettings;
+class NetworkStatusDetectorTask;
+struct ServerInformation;
+class SingleLoginAttempt;
+class Timer;
+
+// Does the login, keeps it alive (with refreshing cookies
+// and reattempting login when disconnected), figures out
+// what actions to take on the various errors that may occur.
+class Login : public sigslot::has_slots<> {
+ public:
+ // network_status and firewall may be NULL
+ Login(talk_base::Task* parent,
+ const buzz::XmppClientSettings& user_settings,
+ const ConnectionOptions& options,
+ std::string lang,
+ ServerInformation* server_list,
+ int server_count,
+ NetworkStatusDetectorTask* network_status,
+ talk_base::FirewallManager* firewall,
+ bool no_gaia_auth,
+ bool proxy_only,
+ bool previous_login_successful);
+ ~Login();
+
+ enum ConnectionState {
+ STATE_CLOSED,
+ // Same as the closed state but indicates that a countdown is happening
+ // for auto-retrying the connection.
+ STATE_RETRYING,
+ STATE_OPENING,
+ STATE_OPENED,
+ };
+
+ ConnectionState connection_state() const {
+ return state_;
+ }
+
+ void StartConnection();
+ void UseNextConnection();
+ void UseCurrentConnection();
+ buzz::XmppClient* xmpp_client();
+
+ // Start the auto-reconnect. It may not do the auto-reconnect
+ // if auto-reconnect is turned off.
+ void DoAutoReconnect();
+
+ const LoginSettings& login_settings() const {
+ return *(login_settings_.get());
+ }
+
+ // Returns the best guess at the host responsible for
+ // the account (which we use to determine if it is
+ // a dasher account or not).
+ //
+ // After login this may return a more accurate answer,
+ // which accounts for open sign-up accounts.
+ const std::string& google_host() const;
+
+ // Analogous to google_host but for the user account.
+ // ("fred" in "fred@gmail.com")
+ const std::string& google_user() const;
+
+ // Returns the proxy that is being used to connect (or
+ // the default proxy information if all attempted
+ // connections failed).
+ //
+ // Do not call until StartConnection has been called.
+ const talk_base::ProxyInfo& proxy() const;
+
+ int seconds_until_reconnect() const;
+
+ // SignalClientStateChange(ConnectionState new_state);
+ sigslot::signal1<ConnectionState> SignalClientStateChange;
+
+ sigslot::signal1<const LoginFailure&> SignalLoginFailure;
+ sigslot::repeater2<const char*, int> SignalLogInput;
+ sigslot::repeater2<const char*, int> SignalLogOutput;
+ sigslot::repeater1<bool> SignalIdleChange;
+
+ // The creator should hook this up to a signal that indicates when the power
+ // is being suspended.
+ sigslot::repeater1<bool> SignalPowerSuspended;
+
+ private:
+ void OnRedirect(const std::string& redirect_server, int redirect_port);
+ void OnUnexpectedDisconnect();
+ void OnClientStateChange(buzz::XmppEngine::State state);
+ void OnLoginFailure(const LoginFailure& failure);
+ void OnLogoff();
+ void OnAutoReconnectTimerChange();
+
+ void HandleClientStateChange(ConnectionState new_state);
+ void ResetUnexpectedDisconnect();
+
+ void OnNetworkStateDetected(bool was_alive, bool is_alive);
+ void OnDisconnectTimeout();
+
+ scoped_ptr<LoginSettings> login_settings_;
+ scoped_ptr<AutoReconnect> auto_reconnect_;
+ SingleLoginAttempt* single_attempt_;
+ bool successful_connection_;
+ talk_base::Task* parent_;
+
+ ConnectionState state_;
+
+ // server redirect information
+ time64 redirect_time_ns_;
+ std::string redirect_server_;
+ int redirect_port_;
+ bool unexpected_disconnect_occurred_;
+ Timer* reset_unexpected_timer_;
+ std::string google_host_;
+ std::string google_user_;
+ talk_base::ProxyInfo proxy_info_;
+
+ Timer* disconnect_timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(Login);
+};
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_H_
diff --git a/chrome/browser/sync/notifier/communicator/login_failure.cc b/chrome/browser/sync/notifier/communicator/login_failure.cc
new file mode 100644
index 0000000..6f29d87
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/login_failure.cc
@@ -0,0 +1,45 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/login_failure.h"
+
+#include "talk/xmpp/prexmppauth.h"
+
+namespace notifier {
+
+LoginFailure::LoginFailure(LoginError error)
+ : error_(error),
+ xmpp_error_(buzz::XmppEngine::ERROR_NONE),
+ subcode_(0) {
+}
+
+LoginFailure::LoginFailure(LoginError error,
+ buzz::XmppEngine::Error xmpp_error,
+ int subcode)
+ : error_(error),
+ xmpp_error_(xmpp_error),
+ subcode_(subcode) {
+}
+
+LoginFailure::LoginFailure(LoginError error,
+ buzz::XmppEngine::Error xmpp_error,
+ int subcode,
+ const buzz::CaptchaChallenge& captcha)
+ : error_(error),
+ xmpp_error_(xmpp_error),
+ subcode_(subcode),
+ captcha_(new buzz::CaptchaChallenge(captcha)) {
+}
+
+buzz::XmppEngine::Error LoginFailure::xmpp_error() const {
+ ASSERT(error_ == XMPP_ERROR);
+ return xmpp_error_;
+}
+
+const buzz::CaptchaChallenge& LoginFailure::captcha() const {
+ ASSERT(xmpp_error_ == buzz::XmppEngine::ERROR_UNAUTHORIZED ||
+ xmpp_error_ == buzz::XmppEngine::ERROR_MISSING_USERNAME);
+ return *captcha_.get();
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/login_failure.h b/chrome/browser/sync/notifier/communicator/login_failure.h
new file mode 100644
index 0000000..cbddc00
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/login_failure.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_FAILURE_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_FAILURE_H_
+
+#include "talk/base/common.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace buzz {
+class CaptchaChallenge;
+}
+
+namespace notifier {
+
+class LoginFailure {
+ public:
+ enum LoginError {
+ // Check the xmpp_error for more information
+ XMPP_ERROR,
+
+ // If the certificate has expired, it usually means that the
+ // computer's clock isn't set correctly.
+ CERTIFICATE_EXPIRED_ERROR,
+
+ // Apparently, there is a proxy that needs authentication information.
+ PROXY_AUTHENTICATION_ERROR,
+ };
+
+ LoginFailure(LoginError error);
+ LoginFailure(LoginError error,
+ buzz::XmppEngine::Error xmpp_error,
+ int subcode);
+ LoginFailure(LoginError error,
+ buzz::XmppEngine::Error xmpp_error,
+ int subcode,
+ const buzz::CaptchaChallenge& captcha);
+
+ // Used as the first level of error information
+ LoginError error() const {
+ return error_;
+ }
+
+ // Returns the XmppEngine only. Valid if and only if error() == XMPP_ERROR
+ //
+ // Handler should mimic logic from PhoneWindow::ShowConnectionError
+ // (except that the DiagnoseConnectionError has already been done).
+ buzz::XmppEngine::Error xmpp_error() const;
+
+ // Returns the captcha challenge. Valid if and only if
+ // xmpp_error is buzz::XmppEngine::ERROR_UNAUTHORIZED or
+ // buzz::XmppEngine::ERROR_MISSING_USERNAME
+ //
+ // See PhoneWindow::HandleConnectionPasswordError for how to handle this
+ // (after the if (..) { LoginAccountAndConnectionSetting(); ...} because
+ // that is done by SingleLoginAttempt.
+ const buzz::CaptchaChallenge& captcha() const;
+
+ private:
+ LoginError error_;
+ buzz::XmppEngine::Error xmpp_error_;
+ int subcode_;
+ scoped_ptr<buzz::CaptchaChallenge> captcha_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoginFailure);
+};
+} // namespace notifier
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_FAILURE_H_
diff --git a/chrome/browser/sync/notifier/communicator/login_settings.cc b/chrome/browser/sync/notifier/communicator/login_settings.cc
new file mode 100644
index 0000000..2983dd8
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/login_settings.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "chrome/browser/sync/notifier/communicator/login_settings.h"
+
+#include "chrome/browser/sync/notifier/communicator/connection_options.h"
+#include "chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h"
+#include "talk/base/common.h"
+#include "talk/base/socketaddress.h"
+#include "talk/xmpp/xmppclientsettings.h"
+
+namespace notifier {
+
+LoginSettings::LoginSettings(const buzz::XmppClientSettings& user_settings,
+ const ConnectionOptions& options,
+ std::string lang,
+ ServerInformation* server_list,
+ int server_count,
+ talk_base::FirewallManager* firewall,
+ bool no_gaia_auth,
+ bool proxy_only)
+ : proxy_only_(proxy_only),
+ no_gaia_auth_(no_gaia_auth),
+ firewall_(firewall),
+ lang_(lang),
+ server_list_(new ServerInformation[server_count]),
+ server_count_(server_count),
+ user_settings_(new buzz::XmppClientSettings(user_settings)),
+ connection_options_(new ConnectionOptions(options)) {
+ // Note: firewall may be NULL
+ ASSERT(server_list != 0);
+ ASSERT(server_count > 0);
+ for (int i = 0; i < server_count_; ++i) {
+ server_list_[i] = server_list[i];
+ }
+}
+
+// defined so that the destructors are executed here (and
+// the corresponding classes don't need to be included in
+// the header file)
+LoginSettings::~LoginSettings() {
+}
+
+void LoginSettings::set_server_override(
+ const talk_base::SocketAddress& server) {
+ server_override_.reset(new ServerInformation());
+ server_override_->server = server;
+ server_override_->special_port_magic = server_list_[0].special_port_magic;
+}
+
+void LoginSettings::clear_server_override() {
+ server_override_.reset();
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/login_settings.h b/chrome/browser/sync/notifier/communicator/login_settings.h
new file mode 100644
index 0000000..3e9b971
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/login_settings.h
@@ -0,0 +1,97 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_SETTINGS_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_SETTINGS_H_
+#include <string>
+
+#include "chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h"
+#include "talk/base/scoped_ptr.h"
+
+namespace buzz {
+class XmppClientSettings;
+}
+
+namespace talk_base {
+class FirewallManager;
+class SocketAddress;
+}
+
+namespace notifier {
+class ConnectionOptions;
+struct ServerInformation;
+
+class LoginSettings {
+ public:
+ LoginSettings(const buzz::XmppClientSettings& user_settings,
+ const ConnectionOptions& options,
+ std::string lang,
+ ServerInformation* server_list,
+ int server_count,
+ talk_base::FirewallManager* firewall,
+ bool no_gaia_auth,
+ bool proxy_only);
+
+ ~LoginSettings();
+
+ // Note: firewall() may return NULL.
+ //
+ // Could be a const method, but it allows
+ // modification of part (FirewallManager) of its state.
+ talk_base::FirewallManager* firewall() {
+ return firewall_;
+ }
+
+ bool no_gaia_auth() const {
+ return no_gaia_auth_;
+ }
+
+ bool proxy_only() const {
+ return proxy_only_;
+ }
+
+ const std::string& lang() const {
+ return lang_;
+ }
+
+ const ServerInformation* server_list() const {
+ return server_override_.get() ? server_override_.get() : server_list_.get();
+ }
+
+ int server_count() const {
+ return server_override_.get() ? 1 : server_count_;
+ }
+
+ const buzz::XmppClientSettings& user_settings() const {
+ return *user_settings_.get();
+ }
+
+ buzz::XmppClientSettings* modifiable_user_settings() {
+ return user_settings_.get();
+ }
+
+ const ConnectionOptions& connection_options() const {
+ return *connection_options_.get();
+ }
+
+ void set_server_override(const talk_base::SocketAddress& server);
+ void clear_server_override();
+
+ private:
+ bool proxy_only_;
+ bool no_gaia_auth_;
+ talk_base::FirewallManager* firewall_;
+ std::string lang_;
+
+ talk_base::scoped_array<ServerInformation> server_list_;
+ int server_count_;
+ // Used to handle redirects
+ scoped_ptr<ServerInformation> server_override_;
+
+ scoped_ptr<buzz::XmppClientSettings> user_settings_;
+ scoped_ptr<ConnectionOptions> connection_options_;
+ DISALLOW_COPY_AND_ASSIGN(LoginSettings);
+};
+} // namespace notifier
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_LOGIN_SETTINGS_H_
diff --git a/chrome/browser/sync/notifier/communicator/mailbox.cc b/chrome/browser/sync/notifier/communicator/mailbox.cc
new file mode 100644
index 0000000..22b6690
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/mailbox.cc
@@ -0,0 +1,682 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/mailbox.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <stack>
+#include <vector>
+
+#include "chrome/browser/sync/notifier/base/string.h"
+#include "chrome/browser/sync/notifier/base/utils.h"
+#include "chrome/browser/sync/notifier/communicator/xml_parse_helpers.h"
+#include "talk/base/basictypes.h"
+#include "talk/base/common.h"
+#include "talk/base/stringutils.h"
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmpp/constants.h"
+
+namespace notifier {
+
+// Labels are a list of strings seperated by a '|' character.
+// The '|' character is escaped with a backslash ('\\') and the
+// backslash is also escaped with a backslash.
+static void ParseLabelSet(const std::string& text,
+ MessageThread::StringSet* labels) {
+ const char* input_cur = text.c_str();
+ const char* input_end = input_cur + text.size();
+ char* result = new char[text.size() + 1];
+ char* next_write = result;
+
+ while(input_cur < input_end) {
+ if (*input_cur == '|') {
+ if (next_write != result) {
+ *next_write = '\0';
+ labels->insert(std::string(result));
+ next_write = result;
+ }
+ input_cur++;
+ continue;
+ }
+
+ if (*input_cur == '\\') {
+ // skip a character in the input and break if we are at the end
+ input_cur++;
+ if (input_cur >= input_end)
+ break;
+ }
+ *next_write = *input_cur;
+ next_write++;
+ input_cur++;
+ }
+
+ if (next_write != result) {
+ *next_write = '\0';
+ labels->insert(std::string(result));
+ }
+
+ delete [] result;
+}
+
+// -----------------------------------------------------------------------------
+
+std::string MailAddress::safe_name() const {
+ if (!name().empty()) {
+ return name();
+ }
+
+ if (!address().empty()) {
+ size_t at = address().find('@');
+ if (at == std::string::npos) {
+ return address();
+ }
+
+ if (at != 0) {
+ return address().substr(0, at);
+ }
+ }
+
+ return std::string("(unknown)");
+}
+
+// -----------------------------------------------------------------------------
+MessageThread::~MessageThread() {
+ Clear();
+}
+
+void MessageThread::Clear() {
+ delete labels_;
+ labels_ = NULL;
+
+ delete senders_;
+ senders_ = NULL;
+}
+
+MessageThread& MessageThread::operator=(const MessageThread& r) {
+ if (&r != this) {
+ Clear();
+ // Copy everything
+ r.AssertValid();
+ thread_id_ = r.thread_id_;
+ date64_ = r.date64_;
+ message_count_ = r.message_count_;
+ personal_level_ = r.personal_level_;
+ subject_ = r.subject_;
+ snippet_ = r.snippet_;
+
+ if (r.labels_)
+ labels_ = new StringSet(*r.labels_);
+ else
+ labels_ = new StringSet;
+ if (r.senders_)
+ senders_ = new MailSenderList(*r.senders_);
+ else
+ senders_ = new MailSenderList;
+ }
+ AssertValid();
+ return *this;
+}
+
+MessageThread* MessageThread::CreateFromXML(
+ const buzz::XmlElement* src) {
+ MessageThread* info = new MessageThread();
+ if (!info || !info->InitFromXml(src)) {
+ delete info;
+ return NULL;
+ }
+ return info;
+}
+
+// Init from a chunk of XML
+bool MessageThread::InitFromXml(const buzz::XmlElement* src) {
+ labels_ = new StringSet;
+ senders_ = new MailSenderList;
+
+ if (src->Name() != buzz::kQnMailThreadInfo)
+ return false;
+
+ if (!ParseInt64Attr(src, buzz::kQnMailTid, &thread_id_))
+ return false;
+ if (!ParseInt64Attr(src, buzz::kQnMailDate, &date64_))
+ return false;
+ if (!ParseIntAttr(src, buzz::kQnMailMessages, &message_count_))
+ return false;
+ if (!ParseIntAttr(src, buzz::kQnMailParticipation, &personal_level_))
+ return false;
+
+ const buzz::XmlElement* senders = src->FirstNamed(buzz::kQnMailSenders);
+ if (!senders)
+ return false;
+ for (const buzz::XmlElement* child = senders->FirstElement();
+ child != NULL;
+ child = child->NextElement()) {
+ if (child->Name() != buzz::kQnMailSender)
+ continue;
+ std::string address;
+ if (!ParseStringAttr(child, buzz::kQnMailAddress, &address))
+ continue;
+ std::string name;
+ ParseStringAttr(child, buzz::kQnMailName, &name);
+ bool originator = false;
+ ParseBoolAttr(child, buzz::kQnMailOriginator, &originator);
+ bool unread = false;
+ ParseBoolAttr(child, buzz::kQnMailUnread, &unread);
+
+ senders_->push_back(MailSender(name, address, unread, originator));
+ }
+
+ const buzz::XmlElement* labels = src->FirstNamed(buzz::kQnMailLabels);
+ if (!labels)
+ return false;
+ ParseLabelSet(labels->BodyText(), labels_);
+
+ const buzz::XmlElement* subject = src->FirstNamed(buzz::kQnMailSubject);
+ if (!subject)
+ return false;
+ subject_ = subject->BodyText();
+
+ const buzz::XmlElement* snippet = src->FirstNamed(buzz::kQnMailSnippet);
+ if (!snippet)
+ return false;
+ snippet_ = snippet->BodyText();
+
+ AssertValid();
+ return true;
+}
+
+bool MessageThread::starred() const {
+ return (labels_->find("^t") != labels_->end());
+}
+
+bool MessageThread::unread() const {
+ return (labels_->find("^u") != labels_->end());
+}
+
+#ifdef _DEBUG
+// non-debug version is inline and empty
+void MessageThread::AssertValid() const {
+ assert(thread_id_ != 0);
+ assert(senders_ != NULL);
+ // In some (odd) cases, gmail may send email with no sender.
+ // assert(!senders_->empty());
+ assert(message_count_ > 0);
+ assert(labels_ != NULL);
+}
+#endif
+
+
+
+MailBox* MailBox::CreateFromXML(const buzz::XmlElement* src) {
+ MailBox* mail_box = new MailBox();
+ if (!mail_box || !mail_box->InitFromXml(src)) {
+ delete mail_box;
+ return NULL;
+ }
+ return mail_box;
+}
+
+// Init from a chunk of XML
+bool MailBox::InitFromXml(const buzz::XmlElement* src)
+{
+ if (src->Name() != buzz::kQnMailBox)
+ return false;
+
+ if (!ParseIntAttr(src, buzz::kQnMailTotalMatched, &mailbox_size_))
+ return false;
+
+ estimate_ = false;
+ ParseBoolAttr(src, buzz::kQnMailTotalEstimate, &estimate_);
+
+ first_index_ = 0;
+ ParseIntAttr(src, buzz::kQnMailFirstIndex, &first_index_);
+
+ result_time_ = 0;
+ ParseInt64Attr(src, buzz::kQnMailResultTime, &result_time_);
+
+ highest_thread_id_ = 0;
+
+ const buzz::XmlElement* thread_element = src->FirstNamed(buzz::kQnMailThreadInfo);
+ while (thread_element) {
+ MessageThread* thread = MessageThread::CreateFromXML(thread_element);
+ if (thread) {
+ if (thread->thread_id() > highest_thread_id_)
+ highest_thread_id_ = thread->thread_id();
+ threads_.push_back(MessageThreadPointer(thread));
+ }
+ thread_element = thread_element->NextNamed(buzz::kQnMailThreadInfo);
+ }
+ return true;
+}
+
+const size_t kMaxShortnameLength = 12;
+
+// Tip: If you extend this list of chars, do not include '-'
+const char name_delim[] = " ,.:;\'\"()[]{}<>*@";
+
+class SenderFormatter {
+ public:
+ // sender should not be deleted while this class is in use.
+ SenderFormatter(const MailSender& sender,
+ const std::string& me_address)
+ : sender_(sender),
+ visible_(false),
+ short_format_(true),
+ space_(kMaxShortnameLength) {
+ me_ = talk_base::ascicmp(me_address.c_str(),
+ sender.address().c_str()) == 0;
+ }
+
+ bool visible() const {
+ return visible_;
+ }
+
+ bool is_unread() const {
+ return sender_.unread();
+ }
+
+ const std::string name() const {
+ return name_;
+ }
+
+ void set_short_format(bool short_format) {
+ short_format_ = short_format;
+ UpdateName();
+ }
+
+ void set_visible(bool visible) {
+ visible_ = visible;
+ UpdateName();
+ }
+
+ void set_space(size_t space) {
+ space_ = space;
+ UpdateName();
+ }
+
+ private:
+ // Attempt to shorten to the first word in a person's name
+ // We could revisit and do better at international punctuation,
+ // but this is what cricket did, and it should be removed
+ // soon when gmail does the notification instead of us
+ // forming it on the client.
+ static void ShortenName(std::string* name) {
+ size_t start = name->find_first_not_of(name_delim);
+ if (start != std::string::npos && start > 0) {
+ name->erase(0, start);
+ }
+ start = name->find_first_of(name_delim);
+ if (start != std::string::npos) {
+ name->erase(start);
+ }
+ }
+
+ void UpdateName() {
+ // Update the name if is going to be used.
+ if (!visible_) {
+ return;
+ }
+
+ if (me_) {
+ name_ = "me";
+ return;
+ }
+
+ if (sender_.name().empty() && sender_.address().empty()) {
+ name_ = "";
+ return;
+ }
+
+ name_ = sender_.name();
+ // Handle the case of no name or a name looks like an email address.
+ // When mail is sent to "Quality@example.com" <quality-team@example.com>,
+ // we shouldn't show "Quality@example.com" as the name.
+ // Instead use the email address (without the @...)
+ if (name_.empty() || name_.find_first_of("@") != std::string::npos) {
+ name_ = sender_.address();
+ size_t at_index = name_.find_first_of("@");
+ if (at_index != std::string::npos) {
+ name_.erase(at_index);
+ }
+ } else if (short_format_) {
+ ShortenName(&name_);
+ }
+
+ if (name_.empty()) {
+ name_ = "(unknown)";
+ }
+
+ // Abbreviate if too long.
+ if (name_.size() > space_) {
+ name_.replace(space_ - 1, std::string::npos, ".");
+ }
+ }
+
+ const MailSender& sender_;
+ std::string name_;
+ bool visible_;
+ bool short_format_;
+ size_t space_;
+ bool me_;
+ DISALLOW_COPY_AND_ASSIGN(SenderFormatter);
+};
+
+const char kNormalSeparator[] = ",&nbsp;";
+const char kEllidedSeparator[] = "&nbsp;..";
+
+std::string FormatName(const std::string& name, bool bold) {
+ std::string formatted_name;
+ if (bold) {
+ formatted_name.append("<b>");
+ }
+ formatted_name.append(HtmlEncode(name));
+ if (bold) {
+ formatted_name.append("</b>");
+ }
+ return formatted_name;
+}
+
+class SenderFormatterList {
+ public:
+ // sender_list must not be deleted while this class is being used.
+ SenderFormatterList(const MailSenderList& sender_list,
+ const std::string& me_address)
+ : state_(INITIAL_STATE),
+ are_any_read_(false),
+ index_(-1),
+ first_unread_index_(-1) {
+ // Add all read messages.
+ const MailSender* originator = NULL;
+ bool any_unread = false;
+ for (size_t i = 0; i < sender_list.size(); ++i) {
+ if (sender_list[i].originator()) {
+ originator = &sender_list[i];
+ }
+ if (sender_list[i].unread()) {
+ any_unread = true;
+ continue;
+ }
+ are_any_read_ = true;
+ if (!sender_list[i].originator()) {
+ senders_.push_back(new SenderFormatter(sender_list[i],
+ me_address));
+ }
+ }
+
+ // There may not be an orignator (if there no senders).
+ if (originator) {
+ senders_.insert(senders_.begin(), new SenderFormatter(*originator,
+ me_address));
+ }
+
+ // Add all unread messages.
+ if (any_unread) {
+ // It should be rare, but there may be cases when all of the
+ // senders appear to have read the message.
+ first_unread_index_ = is_first_unread() ? 0 : senders_.size();
+ for (size_t i = 0; i < sender_list.size(); ++i) {
+ if (!sender_list[i].unread()) {
+ continue;
+ }
+ // Don't add the originator if it is already at the
+ // start of the "unread" list.
+ if (sender_list[i].originator() && is_first_unread()) {
+ continue;
+ }
+ senders_.push_back(new SenderFormatter(sender_list[i],
+ me_address));
+ }
+ }
+ }
+
+ ~SenderFormatterList() {
+ CleanupSequence(&senders_);
+ }
+
+ std::string GetHtml(int space) {
+ index_ = -1;
+ state_ = INITIAL_STATE;
+ while (!added_.empty()) {
+ added_.pop();
+ }
+
+ // If there is only one sender, let it take up all of the space.
+ if (senders_.size() == 1) {
+ senders_[0]->set_space(space);
+ senders_[0]->set_short_format(false);
+ }
+
+ int length = 1;
+ // Add as many senders as we can in the given space.
+ // Computes the visible length at each iteration,
+ // but does not construct the actual html.
+ while (length < space && AddNextSender()) {
+ int new_length = ConstructHtml(is_first_unread(), NULL);
+ // Remove names to avoid truncating
+ // * if there will be at least 2 left or
+ // * if the spacing <= 2 characters per sender and there
+ // is at least one left.
+ if ((new_length > space &&
+ (visible_count() > 2 ||
+ (ComputeSpacePerSender(space) <= 2 && visible_count() > 1)))) {
+ RemoveLastAddedSender();
+ break;
+ }
+ length = new_length;
+ }
+
+ if (length > space) {
+ int max = ComputeSpacePerSender(space);
+ for (size_t i = 0; i < senders_.size(); ++i) {
+ if (senders_[i]->visible()) {
+ senders_[i]->set_space(max);
+ }
+ }
+ }
+
+ // Now construct the actual html
+ std::string html_list;
+ length = ConstructHtml(is_first_unread(), &html_list);
+ if (length > space) {
+ LOG(LS_WARNING) << "LENGTH: " << length << " exceeds "
+ << space << " " << html_list;
+ }
+ return html_list;
+ }
+
+ private:
+ int ComputeSpacePerSender(int space) const {
+ // Why the "- 2"? To allow for the " .. " which may occur
+ // after the names, and no matter what always allow at least
+ // 2 characters per sender.
+ return talk_base::_max<int>(space / visible_count() - 2, 2);
+ }
+
+ // Finds the next sender that should be added to the "from" list
+ // and sets it to visible.
+ //
+ // This method may be called until it returns false or
+ // until RemoveLastAddedSender is called.
+ bool AddNextSender() {
+ // The progression is:
+ // 1. Add the person who started the thread, which is the first message.
+ // 2. Add the first unread message (unless it has already been added).
+ // 3. Add the last message (unless it has already been added).
+ // 4. Add the message that is just before the last message processed
+ // (unless it has already been added).
+ // If there is no message (i.e. at index -1), return false.
+ //
+ // Typically, this method is called until it returns false or
+ // all of the space available is used.
+ switch (state_) {
+ case INITIAL_STATE:
+ state_ = FIRST_MESSAGE;
+ index_ = 0;
+ // If the server behaves odd and doesn't send us any senders,
+ // do something graceful.
+ if (senders_.size() == 0) {
+ return false;
+ }
+ break;
+
+ case FIRST_MESSAGE:
+ if (first_unread_index_ >= 0) {
+ state_ = FIRST_UNREAD_MESSAGE;
+ index_ = first_unread_index_;
+ break;
+ }
+ // fall through
+ case FIRST_UNREAD_MESSAGE:
+ state_ = LAST_MESSAGE;
+ index_ = senders_.size() - 1;
+ break;
+
+ case LAST_MESSAGE:
+ case PREVIOUS_MESSAGE:
+ state_ = PREVIOUS_MESSAGE;
+ index_--;
+ break;
+
+ case REMOVED_MESSAGE:
+ default:
+ ASSERT(false);
+ return false;
+ }
+
+ if (index_ < 0) {
+ return false;
+ }
+
+ if (!senders_[index_]->visible()) {
+ added_.push(index_);
+ senders_[index_]->set_visible(true);
+ }
+ return true;
+ }
+
+ // Makes the last added sender not visible.
+ //
+ // May be called while visible_count() > 0.
+ void RemoveLastAddedSender() {
+ state_ = REMOVED_MESSAGE;
+ int index = added_.top();
+ added_.pop();
+ senders_[index]->set_visible(false);
+ }
+
+ // Constructs the html of the SenderList and returns the length of the
+ // visible text.
+ //
+ // The algorithm simply walks down the list of Senders, appending
+ // the html for each visible sender, and adding ellipsis or commas
+ // in between, whichever is appropriate.
+ //
+ // html Filled with html. Maybe NULL if the html doesn't
+ // need to be constructed yet (useful for simply
+ // determining the length of the visible text).
+ //
+ // returns The approximate visible length of the html.
+ int ConstructHtml(bool first_is_unread,
+ std::string* html) const {
+ if (senders_.empty()) {
+ return 0;
+ }
+
+ int length = 0;
+
+ // The first is always visible
+ const SenderFormatter* sender = senders_[0];
+ const std::string& originator_name = sender->name();
+ length += originator_name.length();
+ if (html) {
+ html->append(FormatName(originator_name, first_is_unread));
+ }
+
+ bool elided = false;
+ const char* between = "";
+ for (size_t i = 1; i < senders_.size(); i++) {
+ sender = senders_[i];
+
+ if (sender->visible()) {
+ // Handle the separator
+ between = elided ? "&nbsp;" : kNormalSeparator;
+ // Ignore the , for length because it is so narrow,
+ // so in both cases above the space is the only things
+ // that counts for spaces.
+ length++;
+
+ // Handle the name
+ const std::string name = sender->name();
+ length += name.size();
+
+ // Construct the html
+ if (html) {
+ html->append(between);
+ html->append(FormatName(name, sender->is_unread()));
+ }
+ elided = false;
+ } else if (!elided) {
+ between = kEllidedSeparator;
+ length += 2; // ".." is narrow
+ if (html) {
+ html->append(between);
+ }
+ elided = true;
+ }
+ }
+ return length;
+ }
+
+ bool is_first_unread() const {
+ return !are_any_read_;
+ }
+
+ size_t visible_count() const {
+ return added_.size();
+ }
+
+ enum MessageState {
+ INITIAL_STATE,
+ FIRST_MESSAGE,
+ FIRST_UNREAD_MESSAGE,
+ LAST_MESSAGE,
+ PREVIOUS_MESSAGE,
+ REMOVED_MESSAGE,
+ };
+
+ // What state we were in during the last "stateful" function call.
+ MessageState state_;
+ bool are_any_read_;
+ std::vector<SenderFormatter*> senders_;
+ std::stack<int> added_;
+ int index_;
+ int first_unread_index_;
+ DISALLOW_COPY_AND_ASSIGN(SenderFormatterList);
+};
+
+
+std::string GetSenderHtml(const MailSenderList& sender_list,
+ int message_count,
+ const std::string& me_address,
+ int space) {
+ // There has to be at least 9 spaces to show something reasonable.
+ ASSERT(space >= 10);
+ std::string count_html;
+ if (message_count > 1) {
+ std::string count(IntToString(message_count));
+ space -= (count.size());
+ count_html.append("&nbsp;(");
+ count_html.append(count);
+ count_html.append(")");
+ // Reduce space due to parenthesis and &nbsp;.
+ space -= 2;
+ }
+
+ SenderFormatterList senders(sender_list, me_address);
+ std::string html_list(senders.GetHtml(space));
+ html_list.append(count_html);
+ return html_list;
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/mailbox.h b/chrome/browser/sync/notifier/communicator/mailbox.h
new file mode 100644
index 0000000..009de73
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/mailbox.h
@@ -0,0 +1,166 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_MAILBOX_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_MAILBOX_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/linked_ptr.h"
+
+namespace buzz {
+class XmlElement;
+}
+
+namespace notifier {
+// -----------------------------------------------------------------------------
+class MailAddress {
+ public:
+ MailAddress(const std::string& name, const std::string& address)
+ : name_(name),
+ address_(address) {
+ }
+ const std::string& name() const { return name_; }
+ const std::string& address() const { return address_; }
+ std::string safe_name() const; // will return *something*
+ private:
+ std::string name_;
+ std::string address_;
+};
+
+// -----------------------------------------------------------------------------
+class MailSender : public MailAddress {
+ public:
+ MailSender(const std::string& name, const std::string& address, bool unread,
+ bool originator)
+ : MailAddress(name, address),
+ unread_(unread),
+ originator_(originator) {
+ }
+
+ MailSender(const MailSender& r)
+ : MailAddress(r.name(), r.address()) {
+ unread_ = r.unread_;
+ originator_ = r.originator_;
+ }
+
+ bool unread() const { return unread_; }
+ bool originator() const { return originator_; }
+
+ private:
+ bool unread_;
+ bool originator_;
+};
+
+typedef std::vector<MailSender> MailSenderList;
+
+// -----------------------------------------------------------------------------
+// MessageThread: everything there is to know about a mail thread.
+class MessageThread {
+ public:
+ MessageThread(const MessageThread& r) {
+ labels_ = NULL;
+ senders_ = NULL;
+ *this = r;
+ }
+
+ ~MessageThread();
+
+ // Try to parse the XML to create a MessageThreadInfo. If NULL is returned
+ // then we either ran out of memory or there was an error in parsing the
+ // XML
+ static MessageThread* CreateFromXML(const buzz::XmlElement* src);
+
+ MessageThread& operator=(const MessageThread& r);
+
+ // SameThreadAs : name is self evident
+ bool SameThreadAs(const MessageThread& r) {
+ AssertValid();
+ r.AssertValid();
+ return (thread_id_ == r.thread_id_);
+ }
+
+ // SameAs : true if thread has same id and messages
+ // Assumes that messages don't disappear from threads.
+ bool SameAs(const MessageThread& r) {
+ AssertValid();
+ r.AssertValid();
+ return SameThreadAs(r) &&
+ message_count_ == r.message_count_;
+ }
+
+ typedef std::set<std::string> StringSet;
+
+ int64 thread_id() const { return thread_id_; }
+ const StringSet* labels() const { return labels_; }
+ int64 date64() const { return date64_; }
+ MailSenderList* senders() const { return senders_; }
+ int personal_level() const { return personal_level_; }
+ int message_count() const { return message_count_; }
+ const std::string& subject() const { return subject_; }
+ const std::string& snippet() const { return snippet_; }
+ bool starred() const;
+ bool unread() const;
+
+#ifdef _DEBUG
+ void AssertValid() const;
+#else
+ inline void AssertValid() const {}
+#endif
+
+ private:
+ void Clear();
+
+ private:
+ MessageThread() : senders_(NULL), labels_(NULL) {}
+ bool InitFromXml(const buzz::XmlElement* src);
+
+ int64 thread_id_;
+ int64 date64_;
+ int message_count_;
+ int personal_level_;
+ std::string subject_;
+ std::string snippet_;
+ MailSenderList* senders_;
+ StringSet* labels_;
+};
+
+typedef talk_base::linked_ptr<MessageThread> MessageThreadPointer;
+typedef std::vector<MessageThreadPointer> MessageThreadVector;
+
+// -----------------------------------------------------------------------------
+class MailBox {
+ public:
+ static MailBox* CreateFromXML(const buzz::XmlElement* src);
+
+ const MessageThreadVector& threads() const { return threads_; }
+ int mailbox_size() const { return mailbox_size_; }
+ int first_index() const { return first_index_; }
+ bool estimate() const { return estimate_; }
+ int64 result_time() const { return result_time_; }
+ int64 highest_thread_id() const { return highest_thread_id_; }
+
+ private:
+ MailBox() {}
+ bool InitFromXml(const buzz::XmlElement* src);
+
+ MessageThreadVector threads_;
+
+ int mailbox_size_;
+ int first_index_;
+ bool estimate_;
+ int64 result_time_;
+ int64 highest_thread_id_;
+};
+
+std::string GetSenderHtml(const MailSenderList& sender_list,
+ int message_count,
+ const std::string& me_address,
+ int space);
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_MAILBOX_H_
diff --git a/chrome/browser/sync/notifier/communicator/mailbox_unittest.cc b/chrome/browser/sync/notifier/communicator/mailbox_unittest.cc
new file mode 100644
index 0000000..1d498d1
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/mailbox_unittest.cc
@@ -0,0 +1,118 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/mailbox.h"
+#include "notifier/testing/notifier/unittest.h"
+
+namespace notifier {
+TEST_NOTIFIER_F(MailBoxTest);
+
+TEST_F(MailBoxTest, SingleSenderHtml) {
+ std::string me_address("random@company.com");
+ MailSenderList sender_list;
+ sender_list.push_back(MailSender("Alex Smith", "a@a.com", true, true));
+ std::string sender_html = GetSenderHtml(sender_list, 1, me_address, 25);
+ ASSERT_STREQ("<b>Alex Smith</b>", sender_html.c_str());
+}
+
+TEST_F(MailBoxTest, TruncatedSingleSenderHtml) {
+ std::string me_address("random@company.com");
+ MailSenderList sender_list;
+ sender_list.push_back(MailSender(
+ "Alex Smith AReallyLongLastNameThatWillBeTruncated",
+ "a@a.com",
+ true,
+ true));
+ std::string sender_html = GetSenderHtml(sender_list, 1, me_address, 25);
+ ASSERT_STREQ("<b>Alex Smith AReallyLongLa.</b>", sender_html.c_str());
+}
+
+TEST_F(MailBoxTest, SingleSenderManyTimesHtml) {
+ std::string me_address("random@company.com");
+ MailSenderList sender_list;
+ sender_list.push_back(MailSender("Alex Smith", "a@a.com", true, true));
+ std::string sender_html = GetSenderHtml(sender_list, 10, me_address, 25);
+ ASSERT_STREQ("<b>Alex Smith</b>&nbsp;(10)", sender_html.c_str());
+}
+
+TEST_F(MailBoxTest, SenderWithMeHtml) {
+ std::string me_address("randOm@comPany.Com");
+ MailSenderList sender_list;
+ sender_list.push_back(
+ MailSender("Alex Smith", "alex@jones.com", false, false));
+ sender_list.push_back(
+ MailSender("Your Name Goes Here", "raNdom@coMpany.cOm", true, true));
+ std::string sender_html = GetSenderHtml(sender_list, 5, me_address, 25);
+ ASSERT_STREQ("me,&nbsp;Alex,&nbsp;<b>me</b>&nbsp;(5)", sender_html.c_str());
+}
+
+TEST_F(MailBoxTest, SenderHtmlWithAllUnread) {
+ std::string me_address("random@company.com");
+ MailSenderList sender_list;
+ sender_list.push_back(
+ MailSender("Alex Smith", "alex@jones.com", true, false));
+ sender_list.push_back(MailSender(
+ "Your Name Goes Here",
+ "foo@company.com",
+ true,
+ true));
+ sender_list.push_back(MailSender("", "bob@davis.com", true, false));
+ std::string sender_html = GetSenderHtml(sender_list, 100, me_address, 25);
+ ASSERT_STREQ("<b>Your</b>,&nbsp;<b>Alex</b>,&nbsp;<b>bob</b>&nbsp;(100)",
+ sender_html.c_str());
+}
+
+TEST_F(MailBoxTest, SenderHtmlWithTruncatedNames) {
+ std::string me_address("random@company.com");
+ MailSenderList sender_list;
+ sender_list.push_back(MailSender(
+ "ReallyLongName Smith",
+ "alex@jones.com",
+ true,
+ false));
+ sender_list.push_back(MailSender(
+ "AnotherVeryLongFirstNameIsHere",
+ "foo@company.com",
+ true,
+ true));
+ std::string sender_html = GetSenderHtml(sender_list, 2, me_address, 25);
+ ASSERT_STREQ("<b>AnotherV.</b>,&nbsp;<b>ReallyLo.</b>&nbsp;(2)",
+ sender_html.c_str());
+}
+
+TEST_F(MailBoxTest, SenderWithTwoSendersShowing) {
+ std::string me_address("random@company.com");
+ MailSenderList sender_list;
+ sender_list.push_back(
+ MailSender("ALongishName Smith", "alex@jones.com", false, false));
+ sender_list.push_back(
+ MailSender("AnotherBigName", "no@company.com", true, false));
+ sender_list.push_back(
+ MailSender("Person1", "no1@company.com", true, false));
+ sender_list.push_back(
+ MailSender("Person2", "no2@company.com", false, true));
+ std::string sender_html = GetSenderHtml(sender_list, 6, me_address, 25);
+ ASSERT_STREQ("Person2&nbsp;..&nbsp;<b>AnotherB.</b>&nbsp;..&nbsp;(6)",
+ sender_html.c_str());
+}
+
+TEST_F(MailBoxTest, SenderWithThreeSendersShowing) {
+ std::string me_address("random@company.com");
+ MailSenderList sender_list;
+ sender_list.push_back(
+ MailSender("Joe Smith", "alex@jones.com", false, false));
+ sender_list.push_back(
+ MailSender("Bob Other", "no@company.com", true, false));
+ sender_list.push_back(
+ MailSender("Person0", "no0@company.com", true, false));
+ sender_list.push_back(
+ MailSender("Person1", "no1@company.com", true, false));
+ sender_list.push_back(
+ MailSender("ted", "ted@company.com", false, true));
+ std::string sender_html = GetSenderHtml(sender_list, 6, me_address, 25);
+ ASSERT_STREQ(
+ "ted&nbsp;..&nbsp;<b>Bob</b>&nbsp;..&nbsp;<b>Person1</b>&nbsp;(6)",
+ sender_html.c_str());
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/product_info.cc b/chrome/browser/sync/notifier/communicator/product_info.cc
new file mode 100644
index 0000000..c1deafb
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/product_info.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+namespace notifier {
+std::string GetUserAgentString() {
+ return kXmppProductName;
+}
+
+std::string GetProductSignature() {
+ return kXmppProductName;
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/product_info.h b/chrome/browser/sync/notifier/communicator/product_info.h
new file mode 100644
index 0000000..1da60b0
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/product_info.h
@@ -0,0 +1,14 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_PRODUCT_INFO_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_PRODUCT_INFO_H_
+#include <string>
+
+namespace notifier {
+std::string GetUserAgentString();
+std::string GetProductSignature();
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_PRODUCT_INFO_H_
diff --git a/chrome/browser/sync/notifier/communicator/single_login_attempt.cc b/chrome/browser/sync/notifier/communicator/single_login_attempt.cc
new file mode 100644
index 0000000..68fe272
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/single_login_attempt.cc
@@ -0,0 +1,562 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "chrome/browser/sync/notifier/communicator/single_login_attempt.h"
+
+#include "chrome/browser/sync/notifier/communicator/connection_options.h"
+#include "chrome/browser/sync/notifier/communicator/connection_settings.h"
+#include "chrome/browser/sync/notifier/communicator/const_communicator.h"
+#include "chrome/browser/sync/notifier/communicator/login_failure.h"
+#include "chrome/browser/sync/notifier/communicator/login_settings.h"
+#include "chrome/browser/sync/notifier/communicator/product_info.h"
+#include "chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h"
+#include "chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.h"
+#include "chrome/browser/sync/notifier/gaia_auth/gaiaauth.h"
+#include "talk/base/asynchttprequest.h"
+#include "talk/base/firewallsocketserver.h"
+#include "talk/base/signalthread.h"
+#include "talk/base/taskrunner.h"
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmpp/constants.h"
+#include "talk/xmpp/prexmppauth.h"
+#include "talk/xmpp/xmppclient.h"
+#include "talk/xmpp/xmppclientsettings.h"
+
+namespace notifier {
+static void FillProxyInfo(const buzz::XmppClientSettings& xcs,
+ talk_base::ProxyInfo* proxy) {
+ ASSERT(proxy != NULL);
+ proxy->type = xcs.proxy();
+ proxy->address.SetIP(xcs.proxy_host());
+ proxy->address.SetPort(xcs.proxy_port());
+ if (xcs.use_proxy_auth()) {
+ proxy->username = xcs.proxy_user();
+ proxy->password = xcs.proxy_pass();
+ }
+}
+
+static void GetClientErrorInformation(
+ buzz::XmppClient* client,
+ buzz::XmppEngine::Error* error,
+ int* subcode,
+ buzz::XmlElement** stream_error,
+ buzz::CaptchaChallenge* captcha_challenge) {
+ ASSERT(client != NULL);
+ ASSERT(error && subcode && stream_error && captcha_challenge);
+
+ *error = client->GetError(subcode);
+ *captcha_challenge = client->GetCaptchaChallenge();
+
+ *stream_error = NULL;
+ if (*error == buzz::XmppEngine::ERROR_STREAM) {
+ const buzz::XmlElement* error_element = client->GetStreamError();
+ if (error_element) {
+ *stream_error = new buzz::XmlElement(*error_element);
+ }
+ }
+}
+
+SingleLoginAttempt::SingleLoginAttempt(talk_base::Task* parent,
+ LoginSettings* login_settings,
+ bool successful_connection)
+ : talk_base::Task(parent),
+ state_(buzz::XmppEngine::STATE_NONE),
+ code_(buzz::XmppEngine::ERROR_NONE),
+ subcode_(0),
+ need_authentication_(false),
+ certificate_expired_(false),
+ cookie_refreshed_(false),
+ successful_connection_(successful_connection),
+ login_settings_(login_settings),
+ client_(NULL) {
+ connection_generator_.reset(new XmppConnectionGenerator(
+ this,
+ &login_settings_->connection_options(),
+ login_settings_->proxy_only(),
+ login_settings_->server_list(),
+ login_settings_->server_count()));
+
+ connection_generator_->SignalExhaustedSettings.connect(
+ this,
+ &SingleLoginAttempt::OnAttemptedAllConnections);
+ connection_generator_->SignalNewSettings.connect(
+ this,
+ &SingleLoginAttempt::DoLogin);
+}
+
+SingleLoginAttempt::~SingleLoginAttempt() {
+ // If this assertion goes off, it means that "Stop()" didn't get
+ // called like it should have been.
+ ASSERT(client_ == NULL);
+}
+
+bool SingleLoginAttempt::auto_reconnect() const {
+ return login_settings_->connection_options().auto_reconnect();
+}
+
+const talk_base::ProxyInfo& SingleLoginAttempt::proxy() const {
+ ASSERT(connection_generator_.get());
+ return connection_generator_->proxy();
+}
+
+int SingleLoginAttempt::ProcessStart() {
+ ASSERT(GetState() == talk_base::Task::STATE_START);
+ connection_generator_->StartGenerating();
+
+ // After being started, this class is callback driven and does
+ // signaling from those callbacks (with checks to see if it is
+ // done if it may be called back from something that isn't a child task).
+ return talk_base::Task::STATE_BLOCKED;
+}
+
+void SingleLoginAttempt::Stop() {
+ ClearClient();
+ talk_base::Task::Stop();
+
+ // No more signals should happen after being stopped.
+ // (This is needed because some of these signals
+ // happen due to other components doing signaling which
+ // may continue running even though this task is stopped.)
+ SignalUnexpectedDisconnect.disconnect_all();
+ SignalRedirect.disconnect_all();
+ SignalLoginFailure.disconnect_all();
+ SignalNeedAutoReconnect.disconnect_all();
+ SignalClientStateChange.disconnect_all();
+}
+
+void SingleLoginAttempt::OnAttemptedAllConnections(
+ bool successfully_resolved_dns,
+ int first_dns_error) {
+
+ // Maybe we needed proxy authentication?
+ if (need_authentication_) {
+ LoginFailure failure(LoginFailure::PROXY_AUTHENTICATION_ERROR);
+ SignalLoginFailure(failure);
+ return;
+ }
+
+ if (certificate_expired_) {
+ LoginFailure failure(LoginFailure::CERTIFICATE_EXPIRED_ERROR);
+ SignalLoginFailure(failure);
+ return;
+ }
+
+ if (!successfully_resolved_dns) {
+ code_ = buzz::XmppEngine::ERROR_SOCKET;
+ subcode_ = first_dns_error;
+ }
+
+ LOG(INFO) << "Connection failed with error " << code_;
+
+ // We were connected and we had a problem
+ if (successful_connection_ && auto_reconnect()) {
+ SignalNeedAutoReconnect();
+ // expect to be deleted at this point
+ return;
+ }
+
+ DiagnoseConnectionError();
+}
+
+void SingleLoginAttempt::UseNextConnection() {
+ ASSERT(connection_generator_.get() != NULL);
+ ClearClient();
+ connection_generator_->UseNextConnection();
+}
+
+void SingleLoginAttempt::UseCurrentConnection() {
+ ASSERT(connection_generator_.get() != NULL);
+ ClearClient();
+ connection_generator_->UseCurrentConnection();
+}
+
+void SingleLoginAttempt::DoLogin(
+ const ConnectionSettings& connection_settings) {
+ if (client_) {
+ return;
+ }
+
+ buzz::XmppClientSettings client_settings;
+ // set the user settings portion
+ *static_cast<buzz::XmppClientSettings*>(&client_settings) =
+ login_settings_->user_settings();
+ // fill in the rest of the client settings
+ connection_settings.FillXmppClientSettings(&client_settings);
+
+ client_ = new buzz::XmppClient(this);
+ SignalLogInput.repeat(client_->SignalLogInput);
+ SignalLogOutput.repeat(client_->SignalLogOutput);
+
+ // listen for connection progress
+ client_->SignalStateChange.connect(this,
+ &SingleLoginAttempt::OnClientStateChange);
+
+ // transition to "start"
+ OnClientStateChange(buzz::XmppEngine::STATE_START);
+ // start connecting
+ client_->Connect(client_settings, login_settings_->lang(),
+ CreateSocket(client_settings),
+ CreatePreXmppAuth(client_settings));
+ client_->Start();
+}
+
+void SingleLoginAttempt::OnAuthenticationError() {
+ // We can check this flag later if all connection options fail
+ need_authentication_ = true;
+}
+
+void SingleLoginAttempt::OnCertificateExpired() {
+ // We can check this flag later if all connection options fail
+ certificate_expired_ = true;
+}
+
+
+buzz::AsyncSocket* SingleLoginAttempt::CreateSocket(
+ const buzz::XmppClientSettings& xcs) {
+ bool allow_unverified_certs =
+ login_settings_->connection_options().allow_unverified_certs();
+ XmppSocketAdapter* adapter = new XmppSocketAdapter(xcs,
+ allow_unverified_certs);
+ adapter->SignalAuthenticationError.connect(
+ this,
+ &SingleLoginAttempt::OnAuthenticationError);
+ if (login_settings_->firewall()) {
+ adapter->set_firewall(true);
+ }
+ return adapter;
+}
+
+buzz::PreXmppAuth* SingleLoginAttempt::CreatePreXmppAuth(
+ const buzz::XmppClientSettings& xcs) {
+ if (login_settings_->no_gaia_auth())
+ return NULL;
+
+ // For GMail, use Gaia preauthentication over HTTP
+ buzz::GaiaAuth* auth = new buzz::GaiaAuth(GetUserAgentString(),
+ GetProductSignature());
+ auth->SignalAuthenticationError.connect(
+ this,
+ &SingleLoginAttempt::OnAuthenticationError);
+ auth->SignalCertificateExpired.connect(
+ this,
+ &SingleLoginAttempt::OnCertificateExpired);
+ auth->SignalFreshAuthCookie.connect(
+ this,
+ &SingleLoginAttempt::OnFreshAuthCookie);
+ auth->set_token_service(xcs.token_service());
+
+ talk_base::ProxyInfo proxy;
+ FillProxyInfo(xcs, &proxy);
+ auth->set_proxy(proxy);
+ auth->set_firewall(login_settings_->firewall());
+ return auth;
+}
+
+void SingleLoginAttempt::OnFreshAuthCookie(const std::string& auth_cookie) {
+ // Remember this is a fresh cookie
+ cookie_refreshed_ = true;
+
+ // TODO(sync): do the cookie logic (part of which is in the #if 0 below)
+
+ // The following code is what PhoneWindow does for the equivalent method
+#if 0
+ // Save cookie
+ AccountInfo current(account_history_.current());
+ current.set_auth_cookie(auth_cookie);
+ account_history_.set_current(current);
+
+ // Calc next time to refresh cookie, between 5 and 10 days
+ // The cookie has 14 days of life; this gives at least 4 days of retries
+ // before the current cookie expires, maximizing the chance of
+ // having a valid cookie next time the connection servers go down.
+ FTULL now;
+
+ // NOTE: The following line is win32. Address this when implementing this
+ // code (doing "the cookie logic")
+ GetSystemTimeAsFileTime(&(now.ft));
+ ULONGLONG five_days = (ULONGLONG)10000 * 1000 * 60 * 60 * 24 * 5; // 5 days
+ ULONGLONG random = (ULONGLONG)10000 * // get to 100 ns units
+ ((rand() % (5 * 24 * 60)) * (60 * 1000) + // random min. in 5 day period
+ (rand() % 1000) * 60); // random 1/1000th of a minute
+ next_cookie_refresh_ = now.ull + five_days + random; // 5-10 days
+#endif
+}
+
+void SingleLoginAttempt::DiagnoseConnectionError() {
+ switch (code_) {
+ case buzz::XmppEngine::ERROR_MISSING_USERNAME:
+ case buzz::XmppEngine::ERROR_NETWORK_TIMEOUT:
+ case buzz::XmppEngine::ERROR_DOCUMENT_CLOSED:
+ case buzz::XmppEngine::ERROR_BIND:
+ case buzz::XmppEngine::ERROR_AUTH:
+ case buzz::XmppEngine::ERROR_TLS:
+ case buzz::XmppEngine::ERROR_UNAUTHORIZED:
+ case buzz::XmppEngine::ERROR_VERSION:
+ case buzz::XmppEngine::ERROR_STREAM:
+ case buzz::XmppEngine::ERROR_XML:
+ case buzz::XmppEngine::ERROR_NONE:
+ default: {
+ LoginFailure failure(LoginFailure::XMPP_ERROR, code_, subcode_);
+ SignalLoginFailure(failure);
+ return;
+ }
+
+ // The following errors require diagnosistics:
+ // * spurious close of connection
+ // * socket errors after auth
+ case buzz::XmppEngine::ERROR_CONNECTION_CLOSED:
+ case buzz::XmppEngine::ERROR_SOCKET:
+ break;
+ }
+
+ talk_base::AsyncHttpRequest *http_request =
+ new talk_base::AsyncHttpRequest(GetUserAgentString());
+ http_request->set_host("www.google.com");
+ http_request->set_port(80);
+ http_request->set_secure(false);
+ http_request->request().path = "/";
+ http_request->request().verb = talk_base::HV_GET;
+
+ talk_base::ProxyInfo proxy;
+ ASSERT(connection_generator_.get() != NULL);
+ if (connection_generator_.get()) {
+ proxy = connection_generator_->proxy();
+ }
+ http_request->set_proxy(proxy);
+ http_request->set_firewall(login_settings_->firewall());
+
+ http_request->SignalWorkDone.connect(this,
+ &SingleLoginAttempt::OnHttpTestDone);
+ http_request->Start();
+ http_request->Release();
+}
+
+void SingleLoginAttempt::OnHttpTestDone(talk_base::SignalThread* thread) {
+ ASSERT(thread != NULL);
+
+ talk_base::AsyncHttpRequest* request =
+ static_cast<talk_base::AsyncHttpRequest*>(thread);
+
+ if (request->response().scode == 200) {
+ // We were able to do an HTTP GET of www.google.com:80
+
+ //
+ // The original error should be reported
+ //
+ LoginFailure failure(LoginFailure::XMPP_ERROR, code_, subcode_);
+ SignalLoginFailure(failure);
+ return;
+ }
+
+ // Otherwise lets transmute the error into ERROR_SOCKET, and put
+ // the subcode as an indicator of what we think the problem
+ // might be.
+
+#if 0
+ // TODO(sync): determine if notifier has an analogous situation
+
+ //
+ // We weren't able to do an HTTP GET of www.google.com:80
+ //
+ GAutoupdater::Version version_logged_in(g_options.version_logged_in());
+ GAutoupdater::Version version_installed(GetProductVersion().c_str());
+ if (version_logged_in < version_installed) {
+ //
+ // Google Talk has been updated and can no longer connect
+ // to the Google Talk Service. Your firewall is probably
+ // not allowing the new version of Google Talk to connect
+ // to the internet. Please adjust your firewall settings
+ // to allow the new version of Google Talk to connect to
+ // the internet.
+ //
+ // We'll use the "error=1" to help figure this out for now
+ //
+ LoginFailure failure(LoginFailure::XMPP_ERROR,
+ buzz::XmppEngine::ERROR_SOCKET,
+ 1);
+ SignalLoginFailure(failure);
+ return;
+ }
+#endif
+
+ //
+ // Any other checking we can add here?
+ //
+
+ //
+ // Google Talk is unable to use your internet connection. Either your
+ // network isn't configured or Google Talk is being blocked by
+ // a local firewall.
+ //
+ // We'll use the "error=0" to help figure this out for now
+ //
+ LoginFailure failure(LoginFailure::XMPP_ERROR,
+ buzz::XmppEngine::ERROR_SOCKET,
+ 0);
+ SignalLoginFailure(failure);
+}
+
+void SingleLoginAttempt::OnClientStateChange(buzz::XmppEngine::State state) {
+ if (state_ == state)
+ return;
+
+ buzz::XmppEngine::State previous_state = state_;
+ state_ = state;
+
+ switch (state) {
+ case buzz::XmppEngine::STATE_NONE:
+ case buzz::XmppEngine::STATE_START:
+ case buzz::XmppEngine::STATE_OPENING:
+ // do nothing
+ break;
+ case buzz::XmppEngine::STATE_OPEN:
+ successful_connection_ = true;
+ break;
+ case buzz::XmppEngine::STATE_CLOSED:
+ OnClientStateChangeClosed(previous_state);
+ break;
+ }
+ SignalClientStateChange(state);
+ if (state_ == buzz::XmppEngine::STATE_CLOSED) {
+ OnClientStateChange(buzz::XmppEngine::STATE_NONE);
+ }
+}
+
+void SingleLoginAttempt::ClearClient() {
+ if (client_ != NULL) {
+ client_->Disconnect();
+
+ // If this assertion goes off, it means that the disconnect didn't occur
+ // properly. See SingleLoginAttempt::OnClientStateChange,
+ // case XmppEngine::STATE_CLOSED
+ ASSERT(client_ == NULL);
+ }
+}
+
+void SingleLoginAttempt::OnClientStateChangeClosed(
+ buzz::XmppEngine::State previous_state) {
+ buzz::XmppEngine::Error error = buzz::XmppEngine::ERROR_NONE;
+ int error_subcode = 0;
+ buzz::CaptchaChallenge captcha_challenge;
+ buzz::XmlElement* stream_error_ptr;
+ GetClientErrorInformation(client_,
+ &error,
+ &error_subcode,
+ &stream_error_ptr,
+ &captcha_challenge);
+ scoped_ptr<buzz::XmlElement> stream_error(stream_error_ptr);
+
+ client_->SignalStateChange.disconnect(this);
+ client_ = NULL;
+
+ if (error == buzz::XmppEngine::ERROR_NONE) {
+ SignalLogoff();
+ return;
+ } else if (previous_state == buzz::XmppEngine::STATE_OPEN) {
+ // Handler should attempt reconnect
+ SignalUnexpectedDisconnect();
+ return;
+ } else {
+ HandleConnectionError(error, error_subcode, stream_error.get(),
+ captcha_challenge);
+ }
+}
+
+void SingleLoginAttempt::HandleConnectionPasswordError(
+ const buzz::CaptchaChallenge& captcha_challenge) {
+ LOG(LS_VERBOSE) << "SingleLoginAttempt::HandleConnectionPasswordError";
+
+ // Clear the auth cookie
+ std::string current_auth_cookie =
+ login_settings_->user_settings().auth_cookie();
+ login_settings_->modifiable_user_settings()->set_auth_cookie("");
+ // If there was an auth cookie and it was the same as the last
+ // auth cookie, then it is a stale cookie. Retry login.
+ if (!current_auth_cookie.empty() && !cookie_refreshed_) {
+ UseCurrentConnection();
+ return;
+ }
+
+ LoginFailure failure(LoginFailure::XMPP_ERROR, code_, subcode_,
+ captcha_challenge);
+ SignalLoginFailure(failure);
+}
+
+void SingleLoginAttempt::HandleConnectionError(
+ buzz::XmppEngine::Error code,
+ int subcode,
+ const buzz::XmlElement* stream_error,
+ const buzz::CaptchaChallenge& captcha_challenge) {
+ LOG_F(LS_VERBOSE) << "(" << code << ", " << subcode << ")";
+
+ // Save off the error code information, so we can use it
+ // to tell the user what went wrong if all else fails
+ code_ = code;
+ subcode_ = subcode;
+ if ((code_ == buzz::XmppEngine::ERROR_UNAUTHORIZED) ||
+ (code_ == buzz::XmppEngine::ERROR_MISSING_USERNAME)) {
+ // There was a problem with credentials (username/password)
+ HandleConnectionPasswordError(captcha_challenge);
+ return;
+ }
+
+ // Unexpected disconnect,
+ // Unreachable host,
+ // Or internal server binding error -
+ // All these are temporary problems, so continue reconnecting
+
+ // GaiaAuth signals this directly via SignalCertificateExpired, but
+ // SChannelAdapter propagates the error through SocketWindow as a socket
+ // error.
+ if (code_ == buzz::XmppEngine::ERROR_SOCKET &&
+ subcode_ == SEC_E_CERT_EXPIRED) {
+ certificate_expired_ = true;
+ }
+
+ login_settings_->modifiable_user_settings()->set_resource("");
+
+ // Look for stream::error server redirection stanza "see-other-host"
+ if (stream_error) {
+ const buzz::XmlElement* other =
+ stream_error->FirstNamed(buzz::QN_XSTREAM_SEE_OTHER_HOST);
+ if (other) {
+ const buzz::XmlElement* text =
+ stream_error->FirstNamed(buzz::QN_XSTREAM_TEXT);
+ if (text) {
+ // Yep, its a "stream:error" with "see-other-host" text, let's
+ // parse out the server:port, and then reconnect with that.
+ const std::string& redirect = text->BodyText();
+ unsigned int colon = redirect.find(":");
+ int redirect_port = kDefaultXmppPort;
+ std::string redirect_server;
+ if (colon == std::string::npos) {
+ redirect_server = redirect;
+ } else {
+ redirect_server = redirect.substr(0, colon);
+ const std::string& port_text = redirect.substr(colon + 1);
+ std::istringstream ist(port_text);
+ ist >> redirect_port;
+ }
+ // we never allow a redirect to port 0
+ if (redirect_port == 0) {
+ redirect_port = kDefaultXmppPort;
+ }
+ SignalRedirect(redirect_server, redirect_port);
+ // may be deleted at this point
+ return;
+ }
+ }
+ }
+
+ ASSERT(connection_generator_.get() != NULL);
+ if (!connection_generator_.get()) {
+ return;
+ }
+
+ // Iterate to the next possible connection (still trying to connect)
+ UseNextConnection();
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/single_login_attempt.h b/chrome/browser/sync/notifier/communicator/single_login_attempt.h
new file mode 100644
index 0000000..ec265ea
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/single_login_attempt.h
@@ -0,0 +1,139 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_SINGLE_LOGIN_ATTEMPT_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_SINGLE_LOGIN_ATTEMPT_H_
+#include <string>
+
+#include "chrome/browser/sync/notifier/communicator/login.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/task.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace buzz {
+class AsyncSocket;
+class CaptchaChallenge;
+class PreXmppAuth;
+class XmppClient;
+class XmppClientSettings;
+class XmppClientSettings;
+}
+
+namespace talk_base {
+class FirewallManager;
+struct ProxyInfo;
+class SignalThread;
+class Task;
+}
+
+namespace notifier {
+class ConnectionSettings;
+class LoginFailure;
+class LoginSettings;
+struct ServerInformation;
+class XmppConnectionGenerator;
+
+// Handles all of the aspects of a single login attempt
+// (across multiple ip addresses) and maintainence. By containing
+// this within one class, when another login attempt is made,
+// this class will be disposed and all of the signalling for the
+// previous login attempt will be cleaned up immediately.
+//
+// This is a task to allow for cleaning this up when a signal
+// is being fired. Technically, delete this during the firing of
+// a signal could work but it is fragile.
+class SingleLoginAttempt : public talk_base::Task, public sigslot::has_slots<> {
+ public:
+ SingleLoginAttempt(talk_base::Task* parent,
+ LoginSettings* login_settings,
+ bool successful_connection);
+ ~SingleLoginAttempt();
+ virtual int ProcessStart();
+ void UseNextConnection();
+ void UseCurrentConnection();
+
+ buzz::XmppClient* xmpp_client() {
+ return client_;
+ }
+
+ // Returns the proxy that is being used to connect (or
+ // the default proxy information if all attempted
+ // connections failed).
+ const talk_base::ProxyInfo& proxy() const;
+
+ // Typically handled by creating a new SingleLoginAttempt
+ // and doing StartConnection
+ sigslot::signal0<> SignalUnexpectedDisconnect;
+
+ // Typically handled by setting storing the redirect for 5 seconds,
+ // and setting it into LoginSettings, then creating a new SingleLoginAttempt,
+ // and doing StartConnection.
+ //
+ // SignalRedirect(const std::string& redirect_server, int redirect_port);
+ sigslot::signal2<const std::string&, int> SignalRedirect;
+
+ sigslot::signal0<> SignalNeedAutoReconnect;
+
+ // SignalClientStateChange(buzz::XmppEngine::State new_state);
+ sigslot::signal1<buzz::XmppEngine::State> SignalClientStateChange;
+
+ // See the LoginFailure for how to handle this.
+ sigslot::signal1<const LoginFailure&> SignalLoginFailure;
+
+ // Sent when there is a graceful log-off (state goes to closed
+ // with no error)
+ sigslot::signal0<> SignalLogoff;
+
+ sigslot::repeater2<const char*, int> SignalLogInput;
+ sigslot::repeater2<const char*, int> SignalLogOutput;
+
+ protected:
+ virtual void Stop();
+
+ private:
+ void DoLogin(const ConnectionSettings& connection_settings);
+ buzz::AsyncSocket* CreateSocket(const buzz::XmppClientSettings& xcs);
+ buzz::PreXmppAuth* CreatePreXmppAuth(const buzz::XmppClientSettings& xcs);
+
+ // cleans up any xmpp client state to get ready for a new one
+ void ClearClient();
+
+ void HandleConnectionError(
+ buzz::XmppEngine::Error code,
+ int subcode,
+ const buzz::XmlElement* stream_error,
+ const buzz::CaptchaChallenge& captcha_challenge);
+ void HandleConnectionPasswordError(
+ const buzz::CaptchaChallenge& captcha_challenge);
+
+ void DiagnoseConnectionError();
+ void OnHttpTestDone(talk_base::SignalThread* thread);
+
+ void OnAuthenticationError();
+ void OnCertificateExpired();
+ void OnFreshAuthCookie(const std::string& auth_cookie);
+ void OnClientStateChange(buzz::XmppEngine::State state);
+ void OnClientStateChangeClosed(buzz::XmppEngine::State previous_state);
+ void OnAttemptedAllConnections(bool successfully_resolved_dns,
+ int first_dns_error);
+
+ bool auto_reconnect() const;
+
+ buzz::XmppEngine::State state_;
+ buzz::XmppEngine::Error code_;
+ int subcode_;
+ bool need_authentication_;
+ bool certificate_expired_;
+ bool cookie_refreshed_;
+ bool successful_connection_;
+ LoginSettings* login_settings_;
+ buzz::XmppClient* client_;
+ scoped_ptr<XmppConnectionGenerator> connection_generator_;
+
+ DISALLOW_COPY_AND_ASSIGN(SingleLoginAttempt);
+};
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_SINGLE_LOGIN_ATTEMPT_H_
diff --git a/chrome/browser/sync/notifier/communicator/talk_auth_task.cc b/chrome/browser/sync/notifier/communicator/talk_auth_task.cc
new file mode 100644
index 0000000..a69609c7
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/talk_auth_task.cc
@@ -0,0 +1,73 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/talk_auth_task.h"
+
+#include "chrome/browser/sync/notifier/communicator/login.h"
+#include "chrome/browser/sync/notifier/communicator/login_settings.h"
+#include "chrome/browser/sync/notifier/communicator/product_info.h"
+#include "chrome/browser/sync/notifier/gaia_auth/gaiaauth.h"
+#include "talk/base/common.h"
+#include "talk/base/urlencode.h"
+#include "talk/xmpp/xmppclient.h"
+
+namespace notifier {
+const char kTalkGadgetAuthPath[] = "/auth";
+
+TalkAuthTask::TalkAuthTask(talk_base::Task* parent,
+ Login* login,
+ const char* url)
+ : talk_base::Task(parent),
+ login_(login),
+ url_(url) {
+ ASSERT(login && !url_.empty());
+}
+
+int TalkAuthTask::ProcessStart() {
+ auth_.reset(new buzz::GaiaAuth(GetUserAgentString(),
+ GetProductSignature()));
+ auth_->SignalAuthDone.connect(
+ this,
+ &TalkAuthTask::OnAuthDone);
+ auth_->StartAuth(login_->xmpp_client()->jid().BareJid(),
+ login_->login_settings().user_settings().pass(),
+ "talk");
+ return STATE_RESPONSE;
+}
+
+int TalkAuthTask::ProcessResponse() {
+ ASSERT(auth_.get());
+ if (!auth_->IsAuthDone()) {
+ return STATE_BLOCKED;
+ }
+ SignalAuthDone(*this);
+ return STATE_DONE;
+}
+
+
+void TalkAuthTask::OnAuthDone() {
+ Wake();
+}
+
+bool TalkAuthTask::HadError() const {
+ return auth_->HadError();
+}
+
+std::string TalkAuthTask::GetAuthenticatedUrl(
+ const char* talk_base_url) const {
+ ASSERT(talk_base_url && *talk_base_url && !auth_->HadError());
+
+ std::string auth_url(talk_base_url);
+ auth_url.append(kTalkGadgetAuthPath);
+ auth_url.append("?silent=true&redirect=true&host=");
+ auth_url.append(UrlEncodeString(url_));
+ auth_url.append("&auth=");
+ auth_url.append(auth_->GetAuth());
+ return auth_url;
+}
+
+std::string TalkAuthTask::GetSID() const {
+ return auth_->GetSID();
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/talk_auth_task.h b/chrome/browser/sync/notifier/communicator/talk_auth_task.h
new file mode 100644
index 0000000..2f690a37
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/talk_auth_task.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_TALK_AUTH_TASK_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_TALK_AUTH_TASK_H_
+
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/task.h"
+
+namespace buzz {
+class GaiaAuth;
+}
+
+namespace notifier {
+class Login;
+
+// Create an authenticated talk url from an unauthenticated url
+class TalkAuthTask : public talk_base::Task, public sigslot::has_slots<> {
+ public:
+ TalkAuthTask(talk_base::Task* parent,
+ Login* login,
+ const char* url);
+
+ // An abort method which doesn't take any parameters.
+ // (talk_base::Task::Abort() has a default parameter.)
+ //
+ // The primary purpose of this method is to allow a
+ // signal to be hooked up to abort this task.
+ void Abort() {
+ talk_base::Task::Abort();
+ }
+
+ const std::string& url() {
+ return url_;
+ }
+
+ std::string GetAuthenticatedUrl(const char* talk_base_url) const;
+ std::string GetSID() const;
+
+ sigslot::signal1<const TalkAuthTask&> SignalAuthDone;
+
+ bool HadError() const;
+
+ // TODO(sync): add captcha support
+
+ protected:
+ virtual int ProcessStart();
+ virtual int ProcessResponse();
+
+ private:
+ void OnAuthDone();
+
+ scoped_ptr<buzz::GaiaAuth> auth_;
+ Login* login_;
+ std::string url_;
+ DISALLOW_COPY_AND_ASSIGN(TalkAuthTask);
+};
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_TALK_AUTH_TASK_H_
diff --git a/chrome/browser/sync/notifier/communicator/xml_parse_helpers-inl.h b/chrome/browser/sync/notifier/communicator/xml_parse_helpers-inl.h
new file mode 100644
index 0000000..b400218
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xml_parse_helpers-inl.h
@@ -0,0 +1,24 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XML_PARSE_HELPERS_INL_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XML_PARSE_HELPERS_INL_H_
+
+#include <sstream>
+
+#include "chrome/browser/sync/notifier/communicator/xml_parse_helpers.h"
+#include "talk/xmllite/xmlelement.h"
+
+namespace notifier {
+
+template<class T>
+void SetAttr(buzz::XmlElement* xml, const buzz::QName& name, const T& data) {
+ std::ostringstream ost;
+ ost << data;
+ xml->SetAttr(name, ost.str());
+}
+
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XML_PARSE_HELPERS_INL_H_
diff --git a/chrome/browser/sync/notifier/communicator/xml_parse_helpers.cc b/chrome/browser/sync/notifier/communicator/xml_parse_helpers.cc
new file mode 100644
index 0000000..b05f439
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xml_parse_helpers.cc
@@ -0,0 +1,185 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/xml_parse_helpers.h"
+#include "chrome/browser/sync/notifier/communicator/xml_parse_helpers-inl.h"
+
+#include <string>
+
+#include "chrome/browser/sync/notifier/base/string.h"
+#include "talk/base/basicdefs.h"
+#include "talk/base/stream.h"
+#include "talk/xmllite/xmlbuilder.h"
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmllite/xmlparser.h"
+#include "talk/xmllite/xmlprinter.h"
+#include "talk/xmpp/jid.h"
+
+namespace notifier {
+
+buzz::XmlElement* ReadXmlFromStream(talk_base::StreamInterface* stream) {
+ buzz::XmlBuilder builder;
+ buzz::XmlParser parser(&builder);
+
+ const int kBufferSize = 4 * 1024;
+ char buf[kBufferSize];
+
+ talk_base::StreamResult result = talk_base::SR_SUCCESS;
+ while(true) {
+ size_t read = 0;
+
+ // Read a chunk
+ result = stream->Read(buf, kBufferSize, &read, NULL);
+ if (result != talk_base::SR_SUCCESS)
+ break;
+
+ // Pass it to the parser
+ parser.Parse(buf, read, false);
+ }
+
+ if (result == talk_base::SR_EOS) {
+ parser.Parse(NULL, 0, true);
+ return builder.CreateElement();
+ }
+
+ return NULL;
+}
+
+bool ParseInt64Attr(const buzz::XmlElement* element,
+ const buzz::QName& attribute, int64* result) {
+ if (!element->HasAttr(attribute))
+ return false;
+ std::string text = element->Attr(attribute);
+ char* error = NULL;
+#ifdef POSIX
+ *result = atoll(text.c_str());
+#else
+ *result = _strtoi64(text.c_str(), &error, 10);
+#endif
+ return text.c_str() != error;
+}
+
+bool ParseIntAttr(const buzz::XmlElement* element, const buzz::QName& attribute,
+ int* result) {
+ if (!element->HasAttr(attribute))
+ return false;
+ std::string text = element->Attr(attribute);
+ char* error = NULL;
+ *result = static_cast<int>(strtol(text.c_str(), &error, 10));
+ return text.c_str() != error;
+}
+
+bool ParseBoolAttr(const buzz::XmlElement* element,
+ const buzz::QName& attribute, bool* result) {
+ int int_value = 0;
+ if (!ParseIntAttr(element, attribute, &int_value))
+ return false;
+ *result = int_value != 0;
+ return true;
+}
+
+bool ParseStringAttr(const buzz::XmlElement* element,
+ const buzz::QName& attribute, std::string* result) {
+ if (!element->HasAttr(attribute))
+ return false;
+ *result = element->Attr(attribute);
+ return true;
+}
+
+void WriteXmlToStream(talk_base::StreamInterface* stream,
+ const buzz::XmlElement* xml) {
+ // Save it all to a string and then write that string out to disk.
+ //
+ // This is probably really inefficient in multiple ways. We probably
+ // have an entire string copy of the XML in memory twice -- once in the
+ // stream and once in the string. There is probably a way to get the data
+ // directly out of the stream but I don't have the time to decode the stream
+ // classes right now.
+ std::ostringstream s;
+ buzz::XmlPrinter::PrintXml(&s, xml);
+ std::string output_string = s.str();
+ stream->WriteAll(output_string.data(), output_string.length(), NULL, NULL);
+}
+
+bool SetInt64Attr(buzz::XmlElement* element, const buzz::QName& attribute,
+ int64 value) {
+ if (!element->HasAttr(attribute))
+ return false;
+ element->AddAttr(attribute, Int64ToString(value).c_str());
+ return true;
+}
+
+bool SetIntAttr(buzz::XmlElement* element, const buzz::QName& attribute,
+ int value) {
+ if (!element->HasAttr(attribute))
+ return false;
+ element->AddAttr(attribute, IntToString(value).c_str());
+ return true;
+}
+
+bool SetBoolAttr(buzz::XmlElement* element, const buzz::QName& attribute,
+ bool value) {
+ int int_value = 0;
+ if (value) {
+ int_value = 1;
+ }
+ return SetIntAttr(element, attribute, int_value);
+}
+
+bool SetStringAttr(buzz::XmlElement* element, const buzz::QName& attribute,
+ const std::string& value) {
+ if (!element->HasAttr(attribute))
+ return false;
+ element->AddAttr(attribute, value);
+ return true;
+}
+
+
+// XmlStream
+
+XmlStream::XmlStream()
+ : state_(talk_base::SS_OPEN),
+ builder_(new buzz::XmlBuilder()),
+ parser_(new buzz::XmlParser(builder_.get())) {
+}
+
+XmlStream::~XmlStream() {
+}
+
+buzz::XmlElement* XmlStream::CreateElement() {
+ if (talk_base::SS_OPEN == state_) {
+ Close();
+ }
+ return builder_->CreateElement();
+}
+
+talk_base::StreamResult XmlStream::Read(void* buffer, size_t buffer_len,
+ size_t* read, int* error) {
+ if (error)
+ *error = -1;
+ return talk_base::SR_ERROR;
+}
+
+talk_base::StreamResult XmlStream::Write(const void* data, size_t data_len,
+ size_t* written, int* error) {
+ if (talk_base::SS_OPEN != state_) {
+ if (error)
+ *error = -1;
+ return talk_base::SR_ERROR;
+ }
+ parser_->Parse(static_cast<const char*>(data), data_len, false);
+ if (written)
+ *written = data_len;
+ return talk_base::SR_SUCCESS;
+}
+
+void XmlStream::Close() {
+ if (talk_base::SS_OPEN != state_)
+ return;
+
+ parser_->Parse(NULL, 0, true);
+ state_ = talk_base::SS_CLOSED;
+}
+
+} // namespace buzz
diff --git a/chrome/browser/sync/notifier/communicator/xml_parse_helpers.h b/chrome/browser/sync/notifier/communicator/xml_parse_helpers.h
new file mode 100644
index 0000000..0c918bd
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xml_parse_helpers.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XML_PARSE_HELPERS_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XML_PARSE_HELPERS_H_
+
+#include <string>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stream.h"
+
+namespace buzz {
+class XmlBuilder;
+class XmlElement;
+class XmlParser;
+class QName;
+}
+
+namespace notifier {
+buzz::XmlElement* ReadXmlFromStream(talk_base::StreamInterface* stream);
+bool ParseInt64Attr(const buzz::XmlElement* element,
+ const buzz::QName& attribute, int64* result);
+bool ParseIntAttr(const buzz::XmlElement* element,
+ const buzz::QName& attribute, int* result);
+bool ParseBoolAttr(const buzz::XmlElement* element,
+ const buzz::QName& attribute, bool* result);
+bool ParseStringAttr(const buzz::XmlElement* element,
+ const buzz::QName& attribute, std::string* result);
+
+void WriteXmlToStream(talk_base::StreamInterface* stream,
+ const buzz::XmlElement* xml);
+bool SetInt64Attr(buzz::XmlElement* element, const buzz::QName& attribute,
+ int64 result);
+bool SetIntAttr(buzz::XmlElement* element, const buzz::QName& attribute,
+ int result);
+bool SetBoolAttr(buzz::XmlElement* element, const buzz::QName& attribute,
+ bool result);
+bool SetStringAttr(buzz::XmlElement* element, const buzz::QName& attribute,
+ const std::string& result);
+
+template<class T>
+void SetAttr(buzz::XmlElement* xml, const buzz::QName& name, const T& data);
+
+///////////////////////////////////////////////////////////////////////////////
+// XmlStream
+///////////////////////////////////////////////////////////////////////////////
+
+class XmlStream : public talk_base::StreamInterface {
+ public:
+ XmlStream();
+ virtual ~XmlStream();
+
+ buzz::XmlElement* CreateElement();
+
+ virtual talk_base::StreamState GetState() const { return state_; }
+
+ virtual talk_base::StreamResult Read(void* buffer, size_t buffer_len,
+ size_t* read, int* error);
+ virtual talk_base::StreamResult Write(const void* data, size_t data_len,
+ size_t* written, int* error);
+ virtual void Close();
+
+ private:
+ talk_base::StreamState state_;
+ scoped_ptr<buzz::XmlBuilder> builder_;
+ scoped_ptr<buzz::XmlParser> parser_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace buzz
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XML_PARSE_HELPERS_H_
diff --git a/chrome/browser/sync/notifier/communicator/xmpp_connection_generator.cc b/chrome/browser/sync/notifier/communicator/xmpp_connection_generator.cc
new file mode 100644
index 0000000..f3a5f4c
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xmpp_connection_generator.cc
@@ -0,0 +1,210 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// XmppConnectionGenerator does the following algorithm:
+// proxy = ResolveProxyInformation(connection_options)
+// for server in server_list
+// get dns_addresses for server
+// connection_list = (dns_addresses X connection methods X proxy).shuffle()
+// for connection in connection_list
+// yield connection
+
+#include "chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h"
+
+#include <vector>
+
+#include "chrome/browser/sync/notifier/base/async_dns_lookup.h"
+#include "chrome/browser/sync/notifier/base/signal_thread_task.h"
+#include "chrome/browser/sync/notifier/communicator/connection_options.h"
+#include "chrome/browser/sync/notifier/communicator/connection_settings.h"
+#include "chrome/browser/sync/notifier/communicator/product_info.h"
+#include "talk/base/autodetectproxy.h"
+#include "talk/base/httpcommon.h"
+#include "talk/base/logging.h"
+#include "talk/base/task.h"
+#include "talk/base/thread.h"
+#include "talk/xmpp/prexmppauth.h"
+#include "talk/xmpp/xmppclientsettings.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace notifier {
+
+XmppConnectionGenerator::XmppConnectionGenerator(
+ talk_base::Task* parent,
+ const ConnectionOptions* options,
+ bool proxy_only,
+ const ServerInformation* server_list,
+ int server_count)
+ : settings_list_(new ConnectionSettingsList()),
+ settings_index_(0),
+ server_list_(new ServerInformation[server_count]),
+ server_count_(server_count),
+ server_index_(-1),
+ proxy_only_(proxy_only),
+ successfully_resolved_dns_(false),
+ first_dns_error_(0),
+ options_(options),
+ parent_(parent) {
+ assert(parent);
+ assert(options);
+ assert(server_count_ > 0);
+ for (int i = 0; i < server_count_; ++i) {
+ server_list_[i] = server_list[i];
+ }
+}
+
+XmppConnectionGenerator::~XmppConnectionGenerator() {
+ LOG(LS_VERBOSE) << "XmppConnectionGenerator::~XmppConnectionGenerator";
+}
+
+const talk_base::ProxyInfo& XmppConnectionGenerator::proxy() const {
+ assert(settings_list_.get());
+ if (settings_index_ >= settings_list_->GetCount()) {
+ return settings_list_->proxy();
+ }
+
+ ConnectionSettings* settings = settings_list_->GetSettings(settings_index_);
+ return settings->proxy();
+}
+
+// Starts resolving proxy information
+void XmppConnectionGenerator::StartGenerating() {
+ LOG(LS_VERBOSE) << "XmppConnectionGenerator::StartGenerating";
+
+ talk_base::AutoDetectProxy* proxy_detect =
+ new talk_base::AutoDetectProxy(GetUserAgentString());
+
+ if (options_->autodetect_proxy()) {
+ // Pretend the xmpp server is https, when detecting whether a proxy is
+ // required to connect.
+ talk_base::Url<char> host_url("/",
+ server_list_[0].server.IPAsString().c_str(),
+ server_list_[0].server.port());
+ host_url.set_secure(true);
+ proxy_detect->set_server_url(host_url.url());
+ } else if (options_->proxy_host().length()) {
+ talk_base::SocketAddress proxy(options_->proxy_host(),
+ options_->proxy_port());
+ proxy_detect->set_proxy(proxy);
+ }
+ proxy_detect->set_auth_info(options_->use_proxy_auth(),
+ options_->auth_user(),
+ talk_base::CryptString(options_->auth_pass()));
+
+ SignalThreadTask<talk_base::AutoDetectProxy>* wrapper_task =
+ new SignalThreadTask<talk_base::AutoDetectProxy>(parent_, &proxy_detect);
+ wrapper_task->SignalWorkDone.connect(
+ this,
+ &XmppConnectionGenerator::OnProxyDetect);
+ wrapper_task->Start();
+}
+
+void XmppConnectionGenerator::OnProxyDetect(
+ talk_base::AutoDetectProxy* proxy_detect) {
+ LOG(LS_VERBOSE) << "XmppConnectionGenerator::OnProxyDetect";
+
+ ASSERT(settings_list_.get());
+ ASSERT(proxy_detect);
+ settings_list_->SetProxy(proxy_detect->proxy());
+
+ // Start iterating through the connections (which
+ // are generated on demand).
+ UseNextConnection();
+}
+
+void XmppConnectionGenerator::UseNextConnection() {
+ // Trying to connect
+
+ // Iterate to the next possible connection
+ settings_index_++;
+ if (settings_index_ < settings_list_->GetCount()) {
+ // We have more connection settings in the settings_list_ to try, kick
+ // off the next one.
+ UseCurrentConnection();
+ return;
+ }
+
+ // Iterate to the next possible server
+ server_index_++;
+ if (server_index_ < server_count_) {
+ AsyncDNSLookup* dns_lookup = new AsyncDNSLookup(
+ server_list_[server_index_].server);
+ SignalThreadTask<AsyncDNSLookup>* wrapper_task =
+ new SignalThreadTask<AsyncDNSLookup>(parent_, &dns_lookup);
+ wrapper_task->SignalWorkDone.connect(
+ this,
+ &XmppConnectionGenerator::OnServerDNSResolved);
+ wrapper_task->Start();
+ return;
+ }
+
+ // All out of possibilities
+ HandleExhaustedConnections();
+}
+
+void XmppConnectionGenerator::OnServerDNSResolved(
+ AsyncDNSLookup* dns_lookup) {
+ LOG(LS_VERBOSE) << "XmppConnectionGenerator::OnServerDNSResolved";
+
+ // Print logging info
+ LOG(LS_VERBOSE) << " server: " <<
+ server_list_[server_index_].server.ToString() <<
+ " error: " << dns_lookup->error();
+ if (first_dns_error_ == 0 && dns_lookup->error() != 0) {
+ first_dns_error_ = dns_lookup->error();
+ }
+
+ if (!successfully_resolved_dns_ && dns_lookup->ip_list().size() > 0) {
+ successfully_resolved_dns_ = true;
+ }
+
+ for (int i = 0; i < static_cast<int>(dns_lookup->ip_list().size()); ++i) {
+ LOG(LS_VERBOSE)
+ << " ip " << i << " : "
+ << talk_base::SocketAddress::IPToString(dns_lookup->ip_list()[i]);
+ }
+
+ // Build the ip list
+ assert(settings_list_.get());
+ settings_index_ = -1;
+ settings_list_->ClearPermutations();
+ settings_list_->AddPermutations(
+ server_list_[server_index_].server.IPAsString(),
+ dns_lookup->ip_list(),
+ server_list_[server_index_].server.port(),
+ server_list_[server_index_].special_port_magic,
+ proxy_only_);
+
+ UseNextConnection();
+}
+
+static const char * const PROTO_NAMES[cricket::PROTO_LAST+1] = {
+ "udp", "tcp", "ssltcp"
+};
+
+static const char* ProtocolToString(cricket::ProtocolType proto) {
+ return PROTO_NAMES[proto];
+}
+
+void XmppConnectionGenerator::UseCurrentConnection() {
+ LOG(LS_VERBOSE) << "XmppConnectionGenerator::UseCurrentConnection";
+
+ ConnectionSettings* settings = settings_list_->GetSettings(settings_index_);
+ LOG(LS_INFO) << "*** Attempting "
+ << ProtocolToString(settings->protocol()) << " connection to "
+ << settings->server().IPAsString() << ":"
+ << settings->server().port()
+ << " (via " << ProxyToString(settings->proxy().type)
+ << " proxy @ " << settings->proxy().address.IPAsString() << ":"
+ << settings->proxy().address.port() << ")";
+
+ SignalNewSettings(*settings);
+}
+
+void XmppConnectionGenerator::HandleExhaustedConnections() {
+ LOG_F(LS_VERBOSE) << "(" << buzz::XmppEngine::ERROR_SOCKET
+ << ", " << first_dns_error_ << ")";
+ SignalExhaustedSettings(successfully_resolved_dns_, first_dns_error_);
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h b/chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h
new file mode 100644
index 0000000..03a7a8f
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_CONNECTION_GENERATOR_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_CONNECTION_GENERATOR_H_
+#include <vector>
+
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/socketaddress.h"
+
+namespace talk_base {
+class AutoDetectProxy;
+struct ProxyInfo;
+class SignalThread;
+class Task;
+}
+
+namespace notifier {
+class AsyncDNSLookup;
+class ConnectionOptions;
+class ConnectionSettings;
+class ConnectionSettingsList;
+
+struct ServerInformation {
+ talk_base::SocketAddress server;
+ bool special_port_magic;
+};
+
+// Resolves dns names and iterates through the various ip address
+// and transport combinations.
+class XmppConnectionGenerator : public sigslot::has_slots<> {
+ public:
+ // parent is the parent for any tasks needed during this operation
+ // proxy_only indicates if true connections are only attempted using the proxy
+ // server_list is the list of connections to attempt in priority order
+ // server_count is the number of items in the server list
+ XmppConnectionGenerator(talk_base::Task* parent,
+ const ConnectionOptions* options,
+ bool proxy_only,
+ const ServerInformation* server_list,
+ int server_count);
+ ~XmppConnectionGenerator();
+
+ // Only call this once. Create a new XmppConnectionGenerator and
+ // delete the current one if the process needs to start again.
+ void StartGenerating();
+
+ void UseNextConnection();
+ void UseCurrentConnection();
+
+ const talk_base::ProxyInfo& proxy() const;
+
+ sigslot::signal1<const ConnectionSettings&> SignalNewSettings;
+
+ // SignalExhaustedSettings(bool successfully_resolved_dns,
+ // int first_dns_error);
+ sigslot::signal2<bool, int> SignalExhaustedSettings;
+
+ private:
+ void OnProxyDetect(talk_base::AutoDetectProxy* proxy_detect);
+ void OnServerDNSResolved(AsyncDNSLookup* dns_lookup);
+ void HandleExhaustedConnections();
+
+ talk_base::scoped_ptr<ConnectionSettingsList> settings_list_;
+ int settings_index_; // the setting that is currently being used
+ talk_base::scoped_array<ServerInformation> server_list_;
+ int server_count_;
+ int server_index_; // the server that is current being used
+ bool proxy_only_;
+ bool successfully_resolved_dns_;
+ int first_dns_error_;
+ const ConnectionOptions* options_;
+
+ talk_base::Task* parent_;
+ DISALLOW_COPY_AND_ASSIGN(XmppConnectionGenerator);
+};
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_CONNECTION_GENERATOR_H_
diff --git a/chrome/browser/sync/notifier/communicator/xmpp_log.cc b/chrome/browser/sync/notifier/communicator/xmpp_log.cc
new file mode 100644
index 0000000..30b0036
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xmpp_log.cc
@@ -0,0 +1,111 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if LOGGING
+
+#include "chrome/browser/sync/notifier/communicator/xmpp_log.h"
+
+#include <iomanip>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/notifier/base/time.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+
+namespace notifier {
+
+static bool IsAuthTag(const char* str, size_t len) {
+ // Beware that str is not NULL terminated
+ if (str[0] == '<' &&
+ str[1] == 'a' &&
+ str[2] == 'u' &&
+ str[3] == 't' &&
+ str[4] == 'h' &&
+ str[5] <= ' ') {
+ std::string tag(str, len);
+ if (tag.find("mechanism") != std::string::npos)
+ return true;
+ }
+ return false;
+}
+
+static bool IsChatText(const char* str, size_t len) {
+ // Beware that str is not NULL terminated
+ if (str[0] == '<' &&
+ str[1] == 'm' &&
+ str[2] == 'e' &&
+ str[3] == 's' &&
+ str[4] == 's' &&
+ str[5] == 'a' &&
+ str[6] == 'g' &&
+ str[7] == 'e' &&
+ str[8] <= ' ') {
+ std::string tag(str, len);
+ if (tag.find("chat") != std::string::npos)
+ return true;
+ }
+ return false;
+}
+
+void XmppLog::XmppPrint(bool output) {
+ std::vector<char>* buffer = output ?
+ &xmpp_output_buffer_ : &xmpp_input_buffer_;
+ const bool log_chat = LOG_CHECK_LEVEL(LS_SENSITIVE);
+ if (buffer->size()) {
+ char* time_string = GetLocalTimeAsString();
+ LOG(INFO) << (output ? "SEND >>>>>>>>>>>>>>>>>>>>>>>>>" :
+ "RECV <<<<<<<<<<<<<<<<<<<<<<<<<")
+ << " : " << time_string;
+
+ int start = 0;
+ int nest = 3;
+ for (int i = 0; i < static_cast<int>(buffer->size()); ++i) {
+ if ((*buffer)[i] == '>') {
+ bool indent;
+ if ((i > 0) && ((*buffer)[i - 1] == '/')) {
+ indent = false;
+ } else if ((start + 1 < static_cast<int>(buffer->size())) &&
+ ((*buffer)[start + 1] == '/')) {
+ indent = false;
+ nest -= 2;
+ } else {
+ indent = true;
+ }
+
+ // Output a tag
+ LOG(INFO) << std::setw(nest) << " "
+ << std::string(&((*buffer)[start]), i + 1 - start);
+
+ if (indent)
+ nest += 2;
+
+ // Note if it's a PLAIN auth tag
+ if (IsAuthTag(&((*buffer)[start]), i + 1 - start)) {
+ censor_password_ = true;
+ } else if (!log_chat && IsChatText(&((*buffer)[start]),
+ i + 1 - start)) {
+ censor_password_ = true;
+ }
+
+ start = i + 1;
+ }
+
+ if ((*buffer)[i] == '<' && start < i) {
+ if (censor_password_) {
+ LOG(INFO) << std::setw(nest) << " " << "## TEXT REMOVED ##";
+ censor_password_ = false;
+ } else {
+ LOG(INFO) << std::setw(nest) << " "
+ << std::string(&((*buffer)[start]), i - start);
+ }
+ start = i;
+ }
+ }
+ buffer->erase(buffer->begin(), buffer->begin() + start);
+ }
+}
+} // namespace notifier
+
+#endif // if LOGGING
diff --git a/chrome/browser/sync/notifier/communicator/xmpp_log.h b/chrome/browser/sync/notifier/communicator/xmpp_log.h
new file mode 100644
index 0000000..a6d12bd
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xmpp_log.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_LOG_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_LOG_H_
+
+#if LOGGING
+
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/sigslot.h"
+
+namespace notifier {
+
+// Log the xmpp input and output.
+class XmppLog : public sigslot::has_slots<> {
+ public:
+ XmppLog() : censor_password_(false) {
+ }
+
+ void Input(const char* data, int len) {
+ xmpp_input_buffer_.insert(xmpp_input_buffer_.end(), data, data + len);
+ XmppPrint(false);
+ }
+
+ void Output(const char* data, int len) {
+ xmpp_output_buffer_.insert(xmpp_output_buffer_.end(), data, data + len);
+ XmppPrint(true);
+ }
+
+ private:
+ void XmppPrint(bool output);
+
+ std::vector<char> xmpp_input_buffer_;
+ std::vector<char> xmpp_output_buffer_;
+ bool censor_password_;
+ DISALLOW_COPY_AND_ASSIGN(XmppLog);
+};
+} // namespace notifier
+
+#endif // if LOGGING
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_LOG_H_
diff --git a/chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.cc b/chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.cc
new file mode 100644
index 0000000..9bd65db
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.cc
@@ -0,0 +1,437 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.h"
+
+#include <iomanip>
+#include <string>
+
+#include "chrome/browser/sync/notifier/communicator/product_info.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/common.h"
+#include "talk/base/firewallsocketserver.h"
+#include "talk/base/logging.h"
+#include "talk/base/socketadapters.h"
+#include "talk/base/ssladapter.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace notifier {
+
+XmppSocketAdapter::XmppSocketAdapter(const buzz::XmppClientSettings& xcs,
+ bool allow_unverified_certs)
+ : state_(STATE_CLOSED),
+ error_(ERROR_NONE),
+ wsa_error_(0),
+ socket_(NULL),
+ protocol_(xcs.protocol()),
+ firewall_(false),
+ write_buffer_(NULL),
+ write_buffer_length_(0),
+ write_buffer_capacity_(0),
+ allow_unverified_certs_(allow_unverified_certs) {
+ proxy_.type = xcs.proxy();
+ proxy_.address.SetIP(xcs.proxy_host(), false);
+ proxy_.address.SetPort(xcs.proxy_port());
+ proxy_.username = xcs.proxy_user();
+ proxy_.password = xcs.proxy_pass();
+}
+
+XmppSocketAdapter::~XmppSocketAdapter() {
+ FreeState();
+
+ // Clean up any previous socket - cannot delete socket on close because
+ // close happens during the child socket's stack callback.
+ if (socket_) {
+ delete socket_;
+ socket_ = NULL;
+ }
+}
+
+bool XmppSocketAdapter::FreeState() {
+ int code = 0;
+
+ // Clean up the socket
+ if (socket_ && !(state_ == STATE_CLOSED || state_ == STATE_CLOSING)) {
+ code = socket_->Close();
+ }
+
+ delete[] write_buffer_;
+ write_buffer_ = NULL;
+ write_buffer_length_ = 0;
+ write_buffer_capacity_ = 0;
+
+ if (code) {
+ SetWSAError(code);
+ return false;
+ }
+ return true;
+}
+
+bool XmppSocketAdapter::Connect(const talk_base::SocketAddress& addr) {
+ if (state_ != STATE_CLOSED) {
+ SetError(ERROR_WRONGSTATE);
+ return false;
+ }
+
+ LOG(LS_INFO) << "XmppSocketAdapter::Connect(" << addr.ToString() << ")";
+
+ // Clean up any previous socket - cannot delete socket on close because
+ // close happens during the child socket's stack callback.
+ if (socket_) {
+ delete socket_;
+ socket_ = NULL;
+ }
+
+ talk_base::AsyncSocket* socket =
+ talk_base::Thread::Current()->socketserver()
+ ->CreateAsyncSocket(SOCK_STREAM);
+ if (!socket) {
+ SetWSAError(WSA_NOT_ENOUGH_MEMORY);
+ return false;
+ }
+
+ if (firewall_) {
+ // TODO(sync): Change this to make WSAAsyncSockets support current thread
+ // socket server
+ talk_base::FirewallSocketServer* fw =
+ static_cast<talk_base::FirewallSocketServer*>(
+ talk_base::Thread::Current()->socketserver());
+ socket = fw->WrapSocket(socket, SOCK_STREAM);
+ }
+
+ if (proxy_.type) {
+ talk_base::AsyncSocket* proxy_socket = 0;
+ if (proxy_.type == talk_base::PROXY_SOCKS5) {
+ proxy_socket = new talk_base::AsyncSocksProxySocket(
+ socket, proxy_.address, proxy_.username, proxy_.password);
+ } else {
+ // Note: we are trying unknown proxies as HTTPS currently
+ proxy_socket = new talk_base::AsyncHttpsProxySocket(socket,
+ GetUserAgentString(), proxy_.address,
+ proxy_.username, proxy_.password);
+ }
+ if (!proxy_socket) {
+ SetWSAError(WSA_NOT_ENOUGH_MEMORY);
+ delete socket;
+ return false;
+ }
+ socket = proxy_socket; // for our purposes the proxy is now the socket
+ }
+
+// #if defined(PRODUCTION)
+ if (protocol_ == cricket::PROTO_SSLTCP) {
+ talk_base::AsyncSocket *fake_ssl_socket =
+ new talk_base::AsyncSSLSocket(socket);
+ if (!fake_ssl_socket) {
+ SetWSAError(WSA_NOT_ENOUGH_MEMORY);
+ delete socket;
+ return false;
+ }
+ socket = fake_ssl_socket; // for our purposes the SSL socket is the socket
+ }
+// #endif // PRODUCTION
+
+#if defined(FEATURE_ENABLE_SSL)
+ talk_base::SSLAdapter* ssl = talk_base::SSLAdapter::Create(socket);
+ socket = ssl;
+#endif
+
+// #if !defined(PRODUCTION)
+// if (protocol_ == cricket::PROTO_SSLTCP) {
+// ssl->set_ignore_bad_cert(true);
+// ssl->StartSSL(addr.hostname().c_str(), true);
+// }
+// #endif // PRODUCTION
+
+ socket->SignalReadEvent.connect(this, &XmppSocketAdapter::OnReadEvent);
+ socket->SignalWriteEvent.connect(this, &XmppSocketAdapter::OnWriteEvent);
+ socket->SignalConnectEvent.connect(this, &XmppSocketAdapter::OnConnectEvent);
+ socket->SignalCloseEvent.connect(this, &XmppSocketAdapter::OnCloseEvent);
+
+ // The linux implementation of socket::Connect
+ // returns an error when the connect didn't complete
+ // yet. This can be distinguished from a failure
+ // because socket::IsBlocking is true. Perhaps,
+ // the linux implementation should be made to
+ // behave like the windows version which doesn't do this,
+ // but it seems to be a pattern with these methods
+ // that they return an error if the operation didn't
+ // complete in a sync fashion and one has to check IsBlocking
+ // to tell if was a "real" error.
+ if (socket->Connect(addr) == SOCKET_ERROR && !socket->IsBlocking()) {
+ SetWSAError(socket->GetError());
+ delete socket;
+ return false;
+ }
+
+ socket_ = socket;
+ state_ = STATE_CONNECTING;
+ return true;
+}
+
+bool XmppSocketAdapter::Read(char* data, size_t len, size_t* len_read) {
+ if (len_read)
+ *len_read = 0;
+
+ if (state_ <= STATE_CLOSING) {
+ SetError(ERROR_WRONGSTATE);
+ return false;
+ }
+
+ ASSERT(socket_ != NULL);
+
+ if (IsOpen()) {
+ int result = socket_->Recv(data, len);
+ if (result < 0) {
+ if (!socket_->IsBlocking()) {
+ SetWSAError(socket_->GetError());
+ return false;
+ }
+
+ result = 0;
+ }
+
+ if (len_read)
+ *len_read = result;
+ }
+
+ return true;
+}
+
+bool XmppSocketAdapter::Write(const char* data, size_t len) {
+ if (state_ <= STATE_CLOSING) {
+ // There may be data in a buffer that gets lost. Too bad!
+ SetError(ERROR_WRONGSTATE);
+ return false;
+ }
+
+ ASSERT(socket_ != NULL);
+
+ size_t sent = 0;
+
+ // try an immediate write when there is no buffer
+ // and we aren't in SSL mode or opening the connection
+ if (write_buffer_length_ == 0 && IsOpen()) {
+ int result = socket_->Send(data, len);
+ if (result < 0) {
+ if (!socket_->IsBlocking()) {
+ SetWSAError(socket_->GetError());
+ return false;
+ }
+ result = 0;
+ }
+
+ sent = static_cast<size_t>(result);
+ }
+
+ // Buffer what we didn't send
+ if (sent < len) {
+ QueueWriteData(data + sent, len - sent);
+ }
+
+ // Service the socket right away to push the written data out in SSL mode
+ return HandleWritable();
+}
+
+bool XmppSocketAdapter::Close() {
+ if (state_ == STATE_CLOSING) {
+ return false; // avoid recursion, but not unexpected
+ }
+ if (state_ == STATE_CLOSED) {
+ // in theory should not be trying to re-InternalClose.
+ SetError(ERROR_WRONGSTATE);
+ return false;
+ }
+
+ // todo: deal with flushing close
+ // (flush, don't do reads, clean ssl)
+
+ // If we've gotten to the point where we really do have a socket underneath
+ // then close it. It should call us back to tell us it is closed, and
+ // NotifyClose will be called. We indicate "closing" state so that we
+ // do not recusively try to keep closing the socket.
+ if (socket_) {
+ state_ = STATE_CLOSING;
+ socket_->Close();
+ }
+
+ // If we didn't get the callback, then we better make sure we signal
+ // closed.
+ if (state_ != STATE_CLOSED) {
+ // The socket was closed manually, not directly due to error.
+ if (error_ != ERROR_NONE) {
+ LOG(LS_INFO) << "XmppSocketAdapter::Close - previous Error: " << error_
+ << " WSAError: " << wsa_error_;
+ error_ = ERROR_NONE;
+ wsa_error_ = 0;
+ }
+ NotifyClose();
+ }
+ return true;
+}
+
+void XmppSocketAdapter::NotifyClose() {
+ if (state_ == STATE_CLOSED) {
+ SetError(ERROR_WRONGSTATE);
+ } else {
+ LOG(LS_INFO) << "XmppSocketAdapter::NotifyClose - Error: " << error_
+ << " WSAError: " << wsa_error_;
+ state_ = STATE_CLOSED;
+ SignalClosed();
+ FreeState();
+ }
+}
+
+void XmppSocketAdapter::OnConnectEvent(talk_base::AsyncSocket *socket) {
+ if (state_ == STATE_CONNECTING) {
+ state_ = STATE_OPEN;
+ LOG(LS_INFO) << "XmppSocketAdapter::OnConnectEvent - STATE_OPEN";
+ SignalConnected();
+#if defined(FEATURE_ENABLE_SSL)
+ } else if (state_ == STATE_TLS_CONNECTING) {
+ state_ = STATE_TLS_OPEN;
+ LOG(LS_INFO) << "XmppSocketAdapter::OnConnectEvent - STATE_TLS_OPEN";
+ SignalSSLConnected();
+ if (write_buffer_length_ > 0) {
+ HandleWritable();
+ }
+#endif // defined(FEATURE_ENABLE_SSL)
+ } else {
+ LOG(LS_INFO) << "XmppSocketAdapter::OnConnectEvent - state is " << state_;
+ ASSERT(false);
+ }
+}
+
+void XmppSocketAdapter::OnReadEvent(talk_base::AsyncSocket *socket) {
+ HandleReadable();
+}
+
+void XmppSocketAdapter::OnWriteEvent(talk_base::AsyncSocket *socket) {
+ HandleWritable();
+}
+
+void XmppSocketAdapter::OnCloseEvent(talk_base::AsyncSocket *socket,
+ int error) {
+ LOG(LS_INFO) << "XmppSocketAdapter::OnCloseEvent(" << error << ")";
+ SetWSAError(error);
+ if (error == SOCKET_EACCES) {
+ SignalAuthenticationError(); // proxy needs authentication
+ }
+ NotifyClose();
+}
+
+#if defined(FEATURE_ENABLE_SSL)
+bool XmppSocketAdapter::StartTls(const std::string& verify_host_name) {
+ if (state_ != STATE_OPEN) {
+ SetError(ERROR_WRONGSTATE);
+ return false;
+ }
+
+ state_ = STATE_TLS_CONNECTING;
+
+ ASSERT(write_buffer_length_ == 0);
+
+ talk_base::SSLAdapter* ssl_adapter =
+ static_cast<talk_base::SSLAdapter*>(socket_);
+
+ if (allow_unverified_certs_) {
+ ssl_adapter->set_ignore_bad_cert(true);
+ }
+
+ if (ssl_adapter->StartSSL(verify_host_name.c_str(), false) != 0) {
+ state_ = STATE_OPEN;
+ SetError(ERROR_SSL);
+ return false;
+ }
+
+ return true;
+}
+#endif // defined(FEATURE_ENABLE_SSL)
+
+void XmppSocketAdapter::QueueWriteData(const char* data, size_t len) {
+ // expand buffer if needed
+ if (write_buffer_length_ + len > write_buffer_capacity_) {
+ size_t new_capacity = 1024;
+ while (new_capacity < write_buffer_length_ + len) {
+ new_capacity = new_capacity * 2;
+ }
+ char* new_buffer = new char[new_capacity];
+ ASSERT(write_buffer_length_ <= 64000);
+ memcpy(new_buffer, write_buffer_, write_buffer_length_);
+ delete[] write_buffer_;
+ write_buffer_ = new_buffer;
+ write_buffer_capacity_ = new_capacity;
+ }
+
+ // copy data into the end of buffer
+ memcpy(write_buffer_ + write_buffer_length_, data, len);
+ write_buffer_length_ += len;
+}
+
+void XmppSocketAdapter::FlushWriteQueue(Error* error, int* wsa_error) {
+ ASSERT(error && wsa_error);
+
+ size_t flushed = 0;
+ while (flushed < write_buffer_length_) {
+ int sent = socket_->Send(write_buffer_ + flushed,
+ static_cast<int>(write_buffer_length_ - flushed));
+ if (sent < 0) {
+ if (!socket_->IsBlocking()) {
+ *error = ERROR_WINSOCK;
+ *wsa_error = socket_->GetError();
+ }
+ break;
+ }
+ flushed += static_cast<size_t>(sent);
+ }
+
+ // remove flushed memory
+ write_buffer_length_ -= flushed;
+ memmove(write_buffer_, write_buffer_ + flushed, write_buffer_length_);
+
+ // when everything is flushed, deallocate the buffer if it's gotten big
+ if (write_buffer_length_ == 0) {
+ if (write_buffer_capacity_ > 8192) {
+ delete[] write_buffer_;
+ write_buffer_ = NULL;
+ write_buffer_capacity_ = 0;
+ }
+ }
+}
+
+void XmppSocketAdapter::SetError(Error error) {
+ if (error_ == ERROR_NONE) {
+ error_ = error;
+ }
+}
+
+void XmppSocketAdapter::SetWSAError(int error) {
+ if (error_ == ERROR_NONE && error != 0) {
+ error_ = ERROR_WINSOCK;
+ wsa_error_ = error;
+ }
+}
+
+bool XmppSocketAdapter::HandleReadable() {
+ if (!IsOpen())
+ return false;
+
+ SignalRead();
+ return true;
+}
+
+bool XmppSocketAdapter::HandleWritable() {
+ if (!IsOpen())
+ return false;
+
+ Error error = ERROR_NONE;
+ int wsa_error = 0;
+ FlushWriteQueue(&error, &wsa_error);
+ if (error != ERROR_NONE) {
+ Close();
+ return false;
+ }
+ return true;
+}
+} // namespace notifier
diff --git a/chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.h b/chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.h
new file mode 100644
index 0000000..7e42988
--- /dev/null
+++ b/chrome/browser/sync/notifier/communicator/xmpp_socket_adapter.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_SOCKET_ADAPTER_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_SOCKET_ADAPTER_H_
+#include <string>
+
+#include "talk/base/asyncsocket.h"
+#include "talk/xmpp/asyncsocket.h"
+#include "talk/xmpp/xmppclientsettings.h"
+#include "talk/xmpp/xmppengine.h"
+
+#ifndef _WIN32
+// Additional errors used by us from Win32 headers
+#define SEC_E_CERT_EXPIRED static_cast<int>(0x80090328L)
+#define WSA_NOT_ENOUGH_MEMORY ENOMEM
+#endif
+
+namespace notifier {
+
+class XmppSocketAdapter : public buzz::AsyncSocket,
+ public sigslot::has_slots<> {
+ public:
+ XmppSocketAdapter(const buzz::XmppClientSettings& xcs,
+ bool allow_unverified_certs);
+ virtual ~XmppSocketAdapter();
+
+ virtual State state() { return state_; }
+ virtual Error error() { return error_; }
+ virtual int GetError() { return wsa_error_; }
+
+ void set_firewall(bool firewall) { firewall_ = firewall; }
+
+ virtual bool Connect(const talk_base::SocketAddress& addr);
+ virtual bool Read(char* data, size_t len, size_t* len_read);
+ virtual bool Write(const char* data, size_t len);
+ virtual bool Close();
+
+#if defined(FEATURE_ENABLE_SSL)
+ bool StartTls(const std::string& domainname);
+ bool IsOpen() const { return state_ == STATE_OPEN
+ || state_ == STATE_TLS_OPEN; }
+#else
+ bool IsOpen() const { return state_ == STATE_OPEN; }
+#endif
+
+ sigslot::signal0<> SignalAuthenticationError;
+
+ private:
+ // return false if the socket is closed
+ bool HandleReadable();
+ bool HandleWritable();
+
+ State state_;
+ Error error_;
+ int wsa_error_;
+
+ talk_base::AsyncSocket* socket_;
+ cricket::ProtocolType protocol_;
+ talk_base::ProxyInfo proxy_;
+ bool firewall_;
+ char* write_buffer_;
+ size_t write_buffer_length_;
+ size_t write_buffer_capacity_;
+ bool allow_unverified_certs_;
+
+ bool FreeState();
+ void NotifyClose();
+
+ void OnReadEvent(talk_base::AsyncSocket* socket);
+ void OnWriteEvent(talk_base::AsyncSocket* socket);
+ void OnConnectEvent(talk_base::AsyncSocket* socket);
+ void OnCloseEvent(talk_base::AsyncSocket* socket, int error);
+
+ void QueueWriteData(const char* data, size_t len);
+ void FlushWriteQueue(Error* error, int* wsa_error);
+
+ void SetError(Error error);
+ void SetWSAError(int error);
+ DISALLOW_COPY_AND_ASSIGN(XmppSocketAdapter);
+};
+} // namespace notifier
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_COMMUNICATOR_XMPP_SOCKET_ADAPTER_H_
diff --git a/chrome/browser/sync/notifier/gaia_auth/gaiaauth.cc b/chrome/browser/sync/notifier/gaia_auth/gaiaauth.cc
new file mode 100644
index 0000000..3bc6550
--- /dev/null
+++ b/chrome/browser/sync/notifier/gaia_auth/gaiaauth.cc
@@ -0,0 +1,442 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "chrome/browser/sync/notifier/gaia_auth/gaiaauth.h"
+#include "talk/base/asynchttprequest.h"
+#include "talk/base/firewallsocketserver.h"
+#include "talk/base/httpclient.h"
+#include "talk/base/logging.h"
+#include "talk/base/physicalsocketserver.h"
+#include "talk/base/signalthread.h"
+#include "talk/base/socketadapters.h"
+#include "talk/base/socketpool.h"
+#include "talk/base/stringutils.h"
+#include "talk/base/urlencode.h"
+#include "talk/xmpp/saslcookiemechanism.h"
+#include "talk/xmpp/saslplainmechanism.h"
+
+namespace buzz {
+
+static const int kGaiaAuthTimeoutMs = 30 * 1000; // 30 sec
+
+// Warning, this is externed.
+GaiaServer buzz::g_gaia_server;
+
+///////////////////////////////////////////////////////////////////////////////
+// GaiaAuth::WorkerThread
+///////////////////////////////////////////////////////////////////////////////
+
+// GaiaAuth is NOT invoked during SASL authenticatioin, but
+// it is invoked even before XMPP login begins. As a PreXmppAuth
+// object, it is driven by XmppClient before the XMPP socket is
+// opened. The job of GaiaAuth is to goes out using HTTPS
+// POST to grab cookies from GAIA.
+
+// It is used by XmppClient.
+// It grabs a SaslAuthenticator which knows how to play the cookie login.
+
+class GaiaAuth::WorkerThread : public talk_base::SignalThread {
+ public:
+ WorkerThread(const std::string& username,
+ const talk_base::CryptString& pass,
+ const std::string& token,
+ const std::string& service,
+ const std::string& user_agent,
+ const std::string& signature,
+ bool obtain_auth,
+ const std::string& token_service) :
+ username_(username),
+ pass_(pass),
+ service_(service),
+ firewall_(0),
+ done_(false),
+ success_(false),
+ error_(true),
+ error_code_(0),
+ proxy_auth_required_(false),
+ certificate_expired_(false),
+ auth_token_(token),
+ fresh_auth_token_(false),
+ obtain_auth_(obtain_auth),
+ agent_(user_agent),
+ signature_(signature),
+ token_service_(token_service) {}
+
+ void set_proxy(const talk_base::ProxyInfo& proxy) { proxy_ = proxy; }
+ void set_firewall(talk_base::FirewallManager * firewall) {
+ firewall_ = firewall;
+ }
+ void set_captcha_answer(const CaptchaAnswer& captcha_answer) {
+ captcha_answer_ = captcha_answer;
+ }
+
+ virtual void DoWork() {
+ LOG(INFO) << "GaiaAuth Begin";
+ // Maybe we already have an auth token, then there is nothing to do.
+ if (!auth_token_.empty()) {
+ LOG(INFO) << "Reusing auth token:" << auth_token_;
+ success_ = true;
+ error_ = false;
+ } else {
+ talk_base::PhysicalSocketServer physical;
+ talk_base::SocketServer * ss = &physical;
+ if (firewall_) {
+ ss = new talk_base::FirewallSocketServer(ss, firewall_);
+ }
+
+ talk_base::SslSocketFactory factory(ss, agent_);
+ factory.SetProxy(proxy_);
+ if (g_gaia_server.use_ssl()) {
+ factory.SetIgnoreBadCert(true);
+ factory.UseSSL(g_gaia_server.hostname().c_str());
+ }
+ factory.SetLogging(talk_base::LS_VERBOSE, "GaiaAuth");
+
+ talk_base::ReuseSocketPool pool(&factory);
+ talk_base::HttpClient http(agent_, &pool);
+
+ talk_base::HttpMonitor monitor(ss);
+ monitor.Connect(&http);
+
+ // If we do not already have a SID, let's get one using our password.
+ if (sid_.empty() || (auth_.empty() && obtain_auth_)) {
+ GaiaRequestSid(&http, username_, pass_, signature_,
+ obtain_auth_ ? service_ : "", captcha_answer_,
+ g_gaia_server);
+ ss->Wait(kGaiaAuthTimeoutMs, true);
+
+ error_code_ = monitor.error(); // save off the error code
+
+ if (!monitor.done()) {
+ LOG(INFO) << "GaiaAuth request timed out";
+ goto Cleanup;
+ } else if (monitor.error()) {
+ LOG(INFO) << "GaiaAuth request error: " << monitor.error();
+ if (monitor.error() == talk_base::HE_AUTH) {
+ success_ = false;
+ proxy_auth_required_ = true;
+ } else if (monitor.error() == talk_base::HE_CERTIFICATE_EXPIRED) {
+ success_ = false;
+ certificate_expired_ = true;
+ }
+ goto Cleanup;
+ } else {
+ std::string captcha_token, captcha_url;
+ switch (GaiaParseSidResponse(http, g_gaia_server,
+ &captcha_token, &captcha_url,
+ &sid_, &lsid_, &auth_)) {
+ case GR_ERROR:
+ goto Cleanup;
+
+ case GR_UNAUTHORIZED:
+ if (!captcha_url.empty()) {
+ captcha_challenge_ = buzz::CaptchaChallenge(captcha_token,
+ captcha_url);
+ }
+ // We had no "error" - we were just unauthorized
+ error_ = false;
+ error_code_ = 0;
+ goto Cleanup;
+
+ case GR_SUCCESS:
+ break;
+ }
+ }
+ }
+
+ // If all we need is a SID, then we are done now.
+ if (service_.empty() || obtain_auth_) {
+ success_ = true;
+ error_ = false;
+ error_code_ = 0;
+ goto Cleanup;
+ }
+
+ monitor.reset();
+ GaiaRequestAuthToken(&http, sid_, lsid_, service_, g_gaia_server);
+ ss->Wait(kGaiaAuthTimeoutMs, true);
+
+ error_code_ = monitor.error(); // save off the error code
+
+ if (!monitor.done()) {
+ LOG(INFO) << "GaiaAuth request timed out";
+ } else if (monitor.error()) {
+ LOG(INFO) << "GaiaAuth request error: " << monitor.error();
+ if (monitor.error() == talk_base::HE_AUTH) {
+ success_ = false;
+ proxy_auth_required_ = true;
+ } else if (monitor.error() == talk_base::HE_CERTIFICATE_EXPIRED) {
+ success_ = false;
+ certificate_expired_ = true;
+ }
+ } else {
+ if (GR_SUCCESS == GaiaParseAuthTokenResponse(http, &auth_token_)) {
+ fresh_auth_token_ = true;
+ success_ = true;
+ error_ = false;
+ error_code_ = 0;
+ }
+ }
+ }
+
+ // done authenticating
+
+ Cleanup:
+ done_ = true;
+ }
+
+ bool IsDone() const { return done_; }
+ bool Succeeded() const { return success_; }
+ bool HadError() const { return error_; }
+ int GetError() const { return error_code_; }
+ bool ProxyAuthRequired() const { return proxy_auth_required_; }
+ bool CertificateExpired() const { return certificate_expired_; }
+ const buzz::CaptchaChallenge& GetCaptchaChallenge() {
+ return captcha_challenge_;
+ }
+ bool fresh_auth_token() const { return fresh_auth_token_; }
+
+ talk_base::CryptString GetPassword() const { return pass_; }
+ std::string GetSID() const { return sid_; }
+ std::string GetAuth() const { return auth_; }
+ std::string GetToken() const { return auth_token_; }
+ std::string GetUsername() const { return username_; }
+ std::string GetTokenService() const { return token_service_; }
+
+ private:
+ std::string username_;
+ talk_base::CryptString pass_;
+ std::string service_;
+ talk_base::ProxyInfo proxy_;
+ talk_base::FirewallManager * firewall_;
+ bool done_;
+ bool success_;
+ bool error_;
+ int error_code_;
+ bool proxy_auth_required_;
+ bool certificate_expired_;
+ std::string sid_;
+ std::string lsid_;
+ std::string auth_;
+ std::string auth_token_;
+ buzz::CaptchaChallenge captcha_challenge_;
+ CaptchaAnswer captcha_answer_;
+ bool fresh_auth_token_;
+ bool obtain_auth_;
+ std::string agent_;
+ std::string signature_;
+ std::string token_service_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// GaiaAuth
+///////////////////////////////////////////////////////////////////////////////
+
+GaiaAuth::GaiaAuth(const std::string &user_agent, const std::string &sig)
+ : agent_(user_agent), signature_(sig),
+ firewall_(0), worker_(NULL), done_(false) {
+}
+
+GaiaAuth::~GaiaAuth() {
+ if (worker_) {
+ worker_->Release();
+ worker_ = NULL;
+ }
+}
+
+void GaiaAuth::StartPreXmppAuth(const buzz::Jid& jid,
+ const talk_base::SocketAddress& server,
+ const talk_base::CryptString& pass,
+ const std::string & auth_cookie) {
+ InternalStartGaiaAuth(jid, server, pass, auth_cookie, "mail", false);
+}
+
+void GaiaAuth::StartTokenAuth(const buzz::Jid& jid,
+ const talk_base::CryptString& pass,
+ const std::string& service) {
+ InternalStartGaiaAuth(jid, talk_base::SocketAddress(),
+ pass, "", service, false);
+}
+
+void GaiaAuth::StartAuth(const buzz::Jid& jid,
+ const talk_base::CryptString& pass,
+ const std::string & service) {
+ InternalStartGaiaAuth(jid, talk_base::SocketAddress(),
+ pass, "", service, true);
+}
+
+void GaiaAuth::StartAuthFromSid(const buzz::Jid& jid,
+ const std::string& sid,
+ const std::string& service) {
+ InternalStartGaiaAuth(jid, talk_base::SocketAddress(),
+ talk_base::CryptString(), sid, service, false);
+}
+
+void GaiaAuth::InternalStartGaiaAuth(const buzz::Jid& jid,
+ const talk_base::SocketAddress& server,
+ const talk_base::CryptString& pass,
+ const std::string& token,
+ const std::string& service,
+ bool obtain_auth) {
+ worker_ = new WorkerThread(jid.Str(), pass, token,
+ service, agent_, signature_,
+ obtain_auth, token_service_);
+ worker_->set_proxy(proxy_);
+ worker_->set_firewall(firewall_);
+ worker_->set_captcha_answer(captcha_answer_);
+ worker_->SignalWorkDone.connect(this, &GaiaAuth::OnAuthDone);
+ worker_->Start();
+}
+
+void GaiaAuth::OnAuthDone(talk_base::SignalThread* worker) {
+ if (!worker_->IsDone())
+ return;
+ done_ = true;
+
+ if (worker_->fresh_auth_token()) {
+ SignalFreshAuthCookie(worker_->GetToken());
+ }
+ if (worker_->ProxyAuthRequired()) {
+ SignalAuthenticationError();
+ }
+ if (worker_->CertificateExpired()) {
+ SignalCertificateExpired();
+ }
+ SignalAuthDone();
+}
+
+std::string GaiaAuth::ChooseBestSaslMechanism(
+ const std::vector<std::string> & mechanisms, bool encrypted) {
+ if (!done_)
+ return "";
+
+ std::vector<std::string>::const_iterator it;
+
+ // a token is the weakest auth - 15s, service-limited, so prefer it.
+ it = std::find(mechanisms.begin(), mechanisms.end(), "X-GOOGLE-TOKEN");
+ if (it != mechanisms.end())
+ return "X-GOOGLE-TOKEN";
+
+ // a cookie is the next weakest - 14 days
+ it = std::find(mechanisms.begin(), mechanisms.end(), "X-GOOGLE-COOKIE");
+ if (it != mechanisms.end())
+ return "X-GOOGLE-COOKIE";
+
+ // never pass @google.com passwords without encryption!!
+ if (!encrypted &&
+ buzz::Jid(worker_->GetUsername()).domain() == "google.com") {
+ return "";
+ }
+
+ // as a last resort, use plain authentication
+ if (buzz::Jid(worker_->GetUsername()).domain() != "google.com") {
+ it = std::find(mechanisms.begin(), mechanisms.end(), "PLAIN");
+ if (it != mechanisms.end())
+ return "PLAIN";
+ }
+
+ // No good mechanism found
+ return "";
+}
+
+buzz::SaslMechanism* GaiaAuth::CreateSaslMechanism(
+ const std::string& mechanism) {
+
+ if (!done_) {
+ return NULL;
+ }
+
+ if (mechanism == "X-GOOGLE-TOKEN") {
+ return new buzz::SaslCookieMechanism(
+ mechanism,
+ worker_->GetUsername(),
+ worker_->GetToken(),
+ worker_->GetTokenService());
+ }
+
+ if (mechanism == "X-GOOGLE-COOKIE") {
+ return new buzz::SaslCookieMechanism(
+ "X-GOOGLE-COOKIE",
+ worker_->GetUsername(),
+ worker_->GetSID(),
+ worker_->GetTokenService());
+ }
+
+ if (mechanism == "PLAIN") {
+ return new buzz::SaslPlainMechanism(buzz::Jid(worker_->GetUsername()),
+ worker_->GetPassword());
+ }
+
+ // oh well - none of the above
+ return NULL;
+}
+
+std::string GaiaAuth::CreateAuthenticatedUrl(
+ const std::string & continue_url, const std::string & service) {
+ if (!done_ || worker_->GetToken().empty())
+ return "";
+
+ std::string url;
+ // Note that http_prefix always ends with a "/"
+ url += g_gaia_server.http_prefix()
+ + "accounts/TokenAuth?auth="
+ + worker_->GetToken(); // Do not URL encode - GAIA doesn't like that
+ url += "&service=" + service;
+ url += "&continue=" + UrlEncodeString(continue_url);
+ url += "&source=" + signature_;
+ return url;
+}
+
+std::string GaiaAuth::GetAuthCookie() {
+ assert(IsAuthDone() && IsAuthorized());
+ if (!done_ || !worker_->Succeeded()) {
+ return "";
+ }
+ return worker_->GetToken();
+}
+
+std::string GaiaAuth::GetAuth() {
+ assert(IsAuthDone() && IsAuthorized());
+ if (!done_ || !worker_->Succeeded()) {
+ return "";
+ }
+ return worker_->GetAuth();
+}
+
+std::string GaiaAuth::GetSID() {
+ assert(IsAuthDone() && IsAuthorized());
+ if (!done_ || !worker_->Succeeded()) {
+ return "";
+ }
+ return worker_->GetSID();
+}
+
+bool GaiaAuth::IsAuthDone() {
+ return done_;
+}
+
+bool GaiaAuth::IsAuthorized() {
+ return done_ && worker_ != NULL && worker_->Succeeded();
+}
+
+bool GaiaAuth::HadError() {
+ return done_ && worker_ != NULL && worker_->HadError();
+}
+
+int GaiaAuth::GetError() {
+ if (done_ && worker_ != NULL) {
+ return worker_->GetError();
+ }
+ return 0;
+}
+
+buzz::CaptchaChallenge GaiaAuth::GetCaptchaChallenge() {
+ if (!done_ || worker_->Succeeded()) {
+ return buzz::CaptchaChallenge();
+ }
+ return worker_->GetCaptchaChallenge();
+}
+} // namespace buzz
diff --git a/chrome/browser/sync/notifier/gaia_auth/gaiaauth.h b/chrome/browser/sync/notifier/gaia_auth/gaiaauth.h
new file mode 100644
index 0000000..8919bbc
--- /dev/null
+++ b/chrome/browser/sync/notifier/gaia_auth/gaiaauth.h
@@ -0,0 +1,129 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Gaia auth code for XMPP notifier support. This should be merged with
+// the other gaia auth file when we have time.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_GAIAAUTH_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_GAIAAUTH_H_
+
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/notifier/gaia_auth/gaiahelper.h"
+#include "talk/base/cryptstring.h"
+#include "talk/base/messagequeue.h"
+#include "talk/base/proxyinfo.h"
+#include "talk/xmpp/prexmppauth.h"
+
+namespace talk_base {
+class FirewallManager;
+class SignalThread;
+}
+
+namespace buzz {
+
+///////////////////////////////////////////////////////////////////////////////
+// GaiaAuth
+///////////////////////////////////////////////////////////////////////////////
+
+class GaiaAuth : public PreXmppAuth, public sigslot::has_slots<> {
+ public:
+ GaiaAuth(const std::string& user_agent, const std::string& signature);
+ virtual ~GaiaAuth();
+
+ void set_proxy(const talk_base::ProxyInfo& proxy) {
+ proxy_ = proxy;
+ }
+ void set_firewall(talk_base::FirewallManager* firewall) {
+ firewall_ = firewall;
+ }
+ void set_captcha_answer(const CaptchaAnswer& captcha_answer) {
+ captcha_answer_ = captcha_answer;
+ }
+
+ // From inside XMPP login, this is called
+ virtual void StartPreXmppAuth(const buzz::Jid& jid,
+ const talk_base::SocketAddress& server,
+ const talk_base::CryptString& pass,
+ const std::string& auth_cookie);
+
+ void StartTokenAuth(const buzz::Jid& jid,
+ const talk_base::CryptString& pass,
+ const std::string& service);
+
+ // This is used when calling GetAuth()
+ void StartAuth(const buzz::Jid& jid,
+ const talk_base::CryptString& pass,
+ const std::string& service);
+
+ // This is used when bootstrapping from a download page
+ void StartAuthFromSid(const buzz::Jid& jid,
+ const std::string& sid,
+ const std::string& service);
+
+ virtual bool IsAuthDone();
+ virtual bool IsAuthorized();
+ virtual bool HadError();
+ virtual int GetError();
+ virtual buzz::CaptchaChallenge GetCaptchaChallenge();
+ // Returns the auth token that can be sent in an url param to gaia in order
+ // to generate an auth cookie.
+ virtual std::string GetAuthCookie();
+
+ // Returns the auth cookie for gaia.
+ std::string GetAuth();
+ std::string GetSID();
+
+ // Sets / gets the token service to use.
+ std::string token_service() const { return token_service_; }
+ void set_token_service(const std::string& token_service) {
+ token_service_ = token_service;
+ }
+
+ virtual std::string ChooseBestSaslMechanism(
+ const std::vector<std::string>& mechanisms, bool encrypted);
+ virtual buzz::SaslMechanism* CreateSaslMechanism(
+ const std::string& mechanism);
+
+ std::string CreateAuthenticatedUrl(const std::string& continue_url,
+ const std::string& service);
+
+ sigslot::signal0<> SignalAuthenticationError;
+ sigslot::signal0<> SignalCertificateExpired;
+ sigslot::signal1<const std::string&> SignalFreshAuthCookie;
+
+ private:
+ void OnAuthDone(talk_base::SignalThread* worker);
+
+ void InternalStartGaiaAuth(const buzz::Jid& jid,
+ const talk_base::SocketAddress& server,
+ const talk_base::CryptString& pass,
+ const std::string& sid,
+ const std::string& service,
+ bool obtain_auth);
+
+ std::string agent_;
+ std::string signature_;
+ talk_base::ProxyInfo proxy_;
+ talk_base::FirewallManager* firewall_;
+ class WorkerThread;
+ WorkerThread* worker_;
+ bool done_;
+
+ CaptchaAnswer captcha_answer_;
+ std::string token_service_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Globals
+///////////////////////////////////////////////////////////////////////////////
+
+extern GaiaServer g_gaia_server;
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace buzz
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_GAIAAUTH_H_
diff --git a/chrome/browser/sync/notifier/gaia_auth/gaiahelper.cc b/chrome/browser/sync/notifier/gaia_auth/gaiahelper.cc
new file mode 100644
index 0000000..3e0683c
--- /dev/null
+++ b/chrome/browser/sync/notifier/gaia_auth/gaiahelper.cc
@@ -0,0 +1,236 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/gaia_auth/gaiahelper.h"
+#include "talk/base/common.h"
+#include "talk/base/cryptstring.h"
+#include "talk/base/httpclient.h"
+#include "talk/base/httpcommon-inl.h"
+#include "talk/base/stringutils.h"
+#include "talk/base/urlencode.h"
+#include "talk/xmpp/constants.h"
+#include "talk/xmpp/jid.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+std::string GetValueForKey(const std::string & key, const std::string & nvp) {
+ size_t start_of_line = 0;
+ size_t end_of_line = 0;
+ for (;;) { // for each line
+ start_of_line = nvp.find_first_not_of("\r\n", end_of_line);
+ if (start_of_line == std::string::npos)
+ break;
+ end_of_line = nvp.find_first_of("\r\n", start_of_line);
+ if (end_of_line == std::string::npos) {
+ end_of_line = nvp.length();
+ }
+ size_t equals = nvp.find('=', start_of_line);
+ if (equals >= end_of_line ||
+ equals == std::string::npos ||
+ equals - start_of_line != key.length()) {
+ continue;
+ }
+
+ if (nvp.find(key, start_of_line) == start_of_line) {
+ return std::string(nvp, equals + 1, end_of_line - equals - 1);
+ }
+ }
+ return "";
+}
+
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace buzz {
+
+GaiaServer::GaiaServer()
+ : hostname_("www.google.com"),
+ port_(443),
+ use_ssl_(true) {
+}
+
+bool GaiaServer::SetServer(const char* url) {
+ talk_base::Url<char> parsed(url);
+ hostname_ = parsed.server();
+ port_ = parsed.port();
+ use_ssl_ = parsed.secure();
+ return true; // parsed.valid();
+}
+
+bool GaiaServer::SetDebugServer(const char* server) {
+ const char* colon = strchr(server, ':');
+ if (colon) {
+ hostname_ = std::string(server, colon - server);
+ port_ = atoi(colon+1);
+ use_ssl_ = false;
+ return true;
+ }
+ return false;
+}
+
+std::string GaiaServer::http_prefix() const {
+ talk_base::Url<char> parsed("", hostname_, port_);
+ parsed.set_secure(use_ssl_);
+ return parsed.url();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GaiaRequestSid(talk_base::HttpClient* client,
+ const std::string& username,
+ const talk_base::CryptString& password,
+ const std::string& signature,
+ const std::string& service,
+ const CaptchaAnswer& captcha_answer,
+ const GaiaServer& gaia_server) {
+ buzz::Jid jid(username);
+ std::string usable_name = username;
+ if (jid.domain() == buzz::STR_DEFAULT_DOMAIN) {
+ // The default domain (default.talk.google.com) is not usable
+ // for Gaia auth. But both gmail.com and googlemain.com will
+ // work, because the gaia server doesn't check to make sure the
+ // appropriate one is being used. So we just slam on gmail.com
+ usable_name = jid.node() + "@" + buzz::STR_GMAIL_COM;
+ }
+
+ std::string post_data;
+ post_data += "Email=" + UrlEncodeString(usable_name);
+ post_data += "&Passwd=" + password.UrlEncode();
+ post_data += "&PersistentCookie=false";
+ post_data += "&source=" + signature;
+ // TODO(chron): This behavior is not the same as in the other gaia auth
+ // loader. We should make it the same. Probably GOOGLE is enough, we don't
+ // want to auth against hosted accounts.
+ post_data += "&accountType=HOSTED_OR_GOOGLE";
+ post_data += "&skipvpage=true";
+ if (!service.empty()) {
+ post_data += "&service=" + service;
+ }
+
+ if (!captcha_answer.captcha_token().empty()) {
+ post_data += "&logintoken=" + captcha_answer.captcha_token();
+ post_data += "&logincaptcha="
+ + UrlEncodeString(captcha_answer.captcha_answer());
+ }
+
+ client->reset();
+ client->set_server(talk_base::SocketAddress(gaia_server.hostname(),
+ gaia_server.port(), false));
+ client->request().verb = talk_base::HV_POST;
+ client->request().path = "/accounts/ClientAuth";
+ client->request().setContent("application/x-www-form-urlencoded",
+ new talk_base::MemoryStream(post_data.data(), post_data.size()));
+ client->response().document.reset(new talk_base::MemoryStream);
+ client->start();
+ return true;
+}
+
+GaiaResponse GaiaParseSidResponse(const talk_base::HttpClient& client,
+ const GaiaServer& gaia_server,
+ std::string* captcha_token,
+ std::string* captcha_url,
+ std::string* sid,
+ std::string* lsid,
+ std::string* auth) {
+ uint32 status_code = client.response().scode;
+ const talk_base::MemoryStream* stream =
+ static_cast<const talk_base::MemoryStream*>(
+ client.response().document.get());
+ size_t length;
+ stream->GetPosition(&length);
+ std::string response;
+ if (length > 0) {
+ response.assign(stream->GetBuffer(), length);
+ }
+
+ LOG(LS_INFO) << "GaiaAuth request to " << client.request().path;
+ LOG(LS_INFO) << "GaiaAuth Status Code: " << status_code;
+ LOG(LS_INFO) << response;
+
+ if (status_code == talk_base::HC_FORBIDDEN) {
+ // The error URL may be the relative path to the captcha jpg.
+ std::string image_url = GetValueForKey("CaptchaUrl", response);
+ if (!image_url.empty()) {
+ // We should activate this "full url code" once we have a better ways
+ // to crack the URL for later download. Right now we are too
+ // dependent on what Gaia returns.
+#if 0
+ if (image_url.find("http://") != 0 &&
+ image_url.find("https://") != 0) {
+ if (image_url.find("/") == 0) {
+ *captcha_url = gaia_server.http_prefix() + image_url;
+ } else {
+ *captcha_url = Utf8(gaia_server.http_prefix()).AsString()
+ + "/accounts/" + image_url;
+ }
+ }
+#else
+ *captcha_url = "/accounts/" + image_url;
+#endif
+
+ *captcha_token = GetValueForKey("CaptchaToken", response);
+ }
+ return GR_UNAUTHORIZED;
+ }
+
+ if (status_code != talk_base::HC_OK) {
+ return GR_ERROR;
+ }
+
+ *sid = GetValueForKey("SID", response);
+ *lsid = GetValueForKey("LSID", response);
+ if (auth) {
+ *auth = GetValueForKey("Auth", response);
+ }
+ if (sid->empty() || lsid->empty()) {
+ return GR_ERROR;
+ }
+
+ return GR_SUCCESS;
+}
+
+bool GaiaRequestAuthToken(talk_base::HttpClient* client,
+ const std::string& sid,
+ const std::string& lsid,
+ const std::string& service,
+ const GaiaServer& gaia_server) {
+ std::string post_data;
+ post_data += "SID=" + UrlEncodeString(sid);
+ post_data += "&LSID=" + UrlEncodeString(lsid);
+ post_data += "&service=" + service;
+ post_data += "&Session=true"; // creates two week cookie
+
+ client->reset();
+ client->set_server(talk_base::SocketAddress(gaia_server.hostname(),
+ gaia_server.port(), false));
+ client->request().verb = talk_base::HV_POST;
+ client->request().path = "/accounts/IssueAuthToken";
+ client->request().setContent("application/x-www-form-urlencoded",
+ new talk_base::MemoryStream(post_data.data(), post_data.size()));
+ client->response().document.reset(new talk_base::MemoryStream);
+ client->start();
+ return true;
+}
+
+GaiaResponse GaiaParseAuthTokenResponse(const talk_base::HttpClient& client,
+ std::string* auth_token) {
+ if (client.response().scode != talk_base::HC_OK) {
+ return GR_ERROR;
+ }
+
+ const talk_base::MemoryStream* stream =
+ static_cast<const talk_base::MemoryStream*>(
+ client.response().document.get());
+ size_t length;
+ stream->GetPosition(&length);
+ while ((length > 0) && isspace(stream->GetBuffer()[length-1]))
+ --length;
+ auth_token->assign(stream->GetBuffer(), length);
+ return auth_token->empty() ? GR_ERROR : GR_SUCCESS;
+}
+
+} // namespace buzz
diff --git a/chrome/browser/sync/notifier/gaia_auth/gaiahelper.h b/chrome/browser/sync/notifier/gaia_auth/gaiahelper.h
new file mode 100644
index 0000000..e0303d0
--- /dev/null
+++ b/chrome/browser/sync/notifier/gaia_auth/gaiahelper.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_GAIAHELPER_H__
+#define CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_GAIAHELPER_H__
+
+#include <string>
+
+namespace talk_base {
+class CryptString;
+class HttpClient;
+}
+
+namespace buzz {
+
+///////////////////////////////////////////////////////////////////////////////
+
+class CaptchaAnswer {
+ public:
+ CaptchaAnswer() {}
+ CaptchaAnswer(const std::string& token, const std::string& answer)
+ : captcha_token_(token), captcha_answer_(answer) {
+ }
+ const std::string& captcha_token() const { return captcha_token_; }
+ const std::string& captcha_answer() const { return captcha_answer_; }
+
+ private:
+ std::string captcha_token_;
+ std::string captcha_answer_;
+};
+
+class GaiaServer {
+ public:
+ GaiaServer();
+
+ bool SetServer(const char* url); // protocol://server:port
+ bool SetDebugServer(const char* server); // server:port
+
+ const std::string& hostname() const { return hostname_; }
+ int port() const { return port_; }
+ bool use_ssl() const { return use_ssl_; }
+
+ std::string http_prefix() const;
+
+ private:
+ std::string hostname_;
+ int port_;
+ bool use_ssl_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Gaia Authentication Helper Functions
+///////////////////////////////////////////////////////////////////////////////
+
+enum GaiaResponse { GR_ERROR, GR_UNAUTHORIZED, GR_SUCCESS };
+
+bool GaiaRequestSid(talk_base::HttpClient* client,
+ const std::string& username,
+ const talk_base::CryptString& password,
+ const std::string& signature,
+ const std::string& service,
+ const CaptchaAnswer& captcha_answer,
+ const GaiaServer& gaia_server);
+
+GaiaResponse GaiaParseSidResponse(const talk_base::HttpClient& client,
+ const GaiaServer& gaia_server,
+ std::string* captcha_token,
+ std::string* captcha_url,
+ std::string* sid,
+ std::string* lsid,
+ std::string* auth);
+
+bool GaiaRequestAuthToken(talk_base::HttpClient* client,
+ const std::string& sid,
+ const std::string& lsid,
+ const std::string& service,
+ const GaiaServer& gaia_server);
+
+GaiaResponse GaiaParseAuthTokenResponse(const talk_base::HttpClient& client,
+ std::string* auth_token);
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace buzz
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_GAIAHELPER_H__
diff --git a/chrome/browser/sync/notifier/gaia_auth/inet_aton.h b/chrome/browser/sync/notifier/gaia_auth/inet_aton.h
new file mode 100644
index 0000000..a10d6cf
--- /dev/null
+++ b/chrome/browser/sync/notifier/gaia_auth/inet_aton.h
@@ -0,0 +1,14 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Define inet_aton alone so it's easier to include.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_INET_ATON_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_INET_ATON_H_
+
+#ifdef WIN32
+int inet_aton(const char * cp, struct in_addr* inp);
+#endif
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_INET_ATON_H_
diff --git a/chrome/browser/sync/notifier/gaia_auth/sigslotrepeater.h b/chrome/browser/sync/notifier/gaia_auth/sigslotrepeater.h
new file mode 100644
index 0000000..3e223b9
--- /dev/null
+++ b/chrome/browser/sync/notifier/gaia_auth/sigslotrepeater.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_INET_ATON_H_SIGSLOTREPEATER_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_INET_ATON_H_SIGSLOTREPEATER_H_
+
+// Repeaters are both signals and slots, which are designed as intermediate
+// pass-throughs for signals and slots which don't know about each other (for
+// modularity or encapsulation). This eliminates the need to declare a signal
+// handler whose sole purpose is to fire another signal. The repeater connects
+// to the originating signal using the 'repeat' method. When the repeated
+// signal fires, the repeater will also fire.
+
+#include "talk/base/sigslot.h"
+
+namespace sigslot {
+
+ template<class mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+ class repeater0 : public signal0<mt_policy>,
+ public has_slots<mt_policy> {
+ public:
+ typedef signal0<mt_policy> base_type;
+ typedef repeater0<mt_policy> this_type;
+
+ repeater0() { }
+ explicit repeater0(const this_type& s) : base_type(s) { }
+
+ void reemit() { signal0<mt_policy>::emit(); }
+ void repeat(base_type &s) { s.connect(this, &this_type::reemit); }
+ };
+
+ template<class arg1_type, class mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+ class repeater1 : public signal1<arg1_type, mt_policy>,
+ public has_slots<mt_policy>
+ {
+ public:
+ typedef signal1<arg1_type, mt_policy> base_type;
+ typedef repeater1<arg1_type, mt_policy> this_type;
+
+ repeater1() { }
+ repeater1(const this_type& s) : base_type(s) { }
+
+ void reemit(arg1_type a1) { signal1<arg1_type, mt_policy>::emit(a1); }
+ void repeat(base_type& s) { s.connect(this, &this_type::reemit); }
+ };
+
+ template<class arg1_type, class arg2_type,
+ class mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+ class repeater2 : public signal2<arg1_type, arg2_type, mt_policy>,
+ public has_slots<mt_policy>
+ {
+ public:
+ typedef signal2<arg1_type, arg2_type, mt_policy> base_type;
+ typedef repeater2<arg1_type, arg2_type, mt_policy> this_type;
+
+ repeater2() { }
+ repeater2(const this_type& s) : base_type(s) { }
+
+ void reemit(arg1_type a1, arg2_type a2) {
+ signal2<arg1_type, arg2_type, mt_policy>::emit(a1, a2);
+ }
+ void repeat(base_type& s) { s.connect(this, &this_type::reemit); }
+ };
+
+ template<class arg1_type, class arg2_type, class arg3_type,
+ class mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+ class repeater3 : public signal3<arg1_type, arg2_type, arg3_type, mt_policy>,
+ public has_slots<mt_policy>
+ {
+ public:
+ typedef signal3<arg1_type, arg2_type, arg3_type, mt_policy> base_type;
+ typedef repeater3<arg1_type, arg2_type, arg3_type, mt_policy> this_type;
+
+ repeater3() { }
+ repeater3(const this_type& s) : base_type(s) { }
+
+ void reemit(arg1_type a1, arg2_type a2, arg3_type a3) {
+ signal3<arg1_type, arg2_type, arg3_type, mt_policy>::emit(a1, a2, a3);
+ }
+ void repeat(base_type& s) { s.connect(this, &this_type::reemit); }
+ };
+
+} // namespace sigslot
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_GAIA_AUTH_INET_ATON_H_SIGSLOTREPEATER_H_
diff --git a/chrome/browser/sync/notifier/gaia_auth/win32window.cc b/chrome/browser/sync/notifier/gaia_auth/win32window.cc
new file mode 100644
index 0000000..f1eb8bf
--- /dev/null
+++ b/chrome/browser/sync/notifier/gaia_auth/win32window.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Originally from libjingle. Minor alterations to compile it in Chrome.
+
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+#include "talk/base/win32window.h"
+
+namespace talk_base {
+
+///////////////////////////////////////////////////////////////////////////////
+// Win32Window
+///////////////////////////////////////////////////////////////////////////////
+
+static const wchar_t kWindowBaseClassName[] = L"WindowBaseClass";
+HINSTANCE instance_ = GetModuleHandle(NULL);
+ATOM window_class_ = 0;
+
+Win32Window::Win32Window() : wnd_(NULL) {
+}
+
+Win32Window::~Win32Window() {
+ ASSERT(NULL == wnd_);
+}
+
+bool Win32Window::Create(HWND parent, const wchar_t* title, DWORD style,
+ DWORD exstyle, int x, int y, int cx, int cy) {
+ if (wnd_) {
+ // Window already exists.
+ return false;
+ }
+
+ if (!window_class_) {
+ // Class not registered, register it.
+ WNDCLASSEX wcex;
+ memset(&wcex, 0, sizeof(wcex));
+ wcex.cbSize = sizeof(wcex);
+ wcex.hInstance = instance_;
+ wcex.lpfnWndProc = &Win32Window::WndProc;
+ wcex.lpszClassName = kWindowBaseClassName;
+ window_class_ = ::RegisterClassEx(&wcex);
+ if (!window_class_) {
+ LOG_GLE(LS_ERROR) << "RegisterClassEx failed";
+ return false;
+ }
+ }
+ wnd_ = ::CreateWindowEx(exstyle, kWindowBaseClassName, title, style,
+ x, y, cx, cy, parent, NULL, instance_, this);
+ return (NULL != wnd_);
+}
+
+void Win32Window::Destroy() {
+ VERIFY(::DestroyWindow(wnd_) != FALSE);
+}
+
+#if 0
+void Win32Window::SetInstance(HINSTANCE instance) {
+ instance_ = instance;
+}
+
+void Win32Window::Shutdown() {
+ if (window_class_) {
+ ::UnregisterClass(MAKEINTATOM(window_class_), instance_);
+ window_class_ = 0;
+ }
+}
+#endif
+
+bool Win32Window::OnMessage(UINT uMsg, WPARAM wParam, LPARAM lParam,
+ LRESULT& result) {
+ switch (uMsg) {
+ case WM_CLOSE:
+ if (!OnClose()) {
+ result = 0;
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+LRESULT Win32Window::WndProc(HWND hwnd, UINT uMsg,
+ WPARAM wParam, LPARAM lParam) {
+ Win32Window* that = reinterpret_cast<Win32Window*>(
+ ::GetWindowLongPtr(hwnd, GWL_USERDATA));
+ if (!that && (WM_CREATE == uMsg)) {
+ CREATESTRUCT* cs = reinterpret_cast<CREATESTRUCT*>(lParam);
+ that = static_cast<Win32Window*>(cs->lpCreateParams);
+ that->wnd_ = hwnd;
+ ::SetWindowLongPtr(hwnd, GWL_USERDATA, reinterpret_cast<LONG_PTR>(that));
+ }
+ if (that) {
+ LRESULT result;
+ bool handled = that->OnMessage(uMsg, wParam, lParam, result);
+ if (WM_DESTROY == uMsg) {
+ for (HWND child = ::GetWindow(hwnd, GW_CHILD); child;
+ child = ::GetWindow(child, GW_HWNDNEXT)) {
+ LOG(LS_INFO) << "Child window: " << static_cast<void*>(child);
+ }
+ }
+ if (WM_NCDESTROY == uMsg) {
+ ::SetWindowLongPtr(hwnd, GWL_USERDATA, NULL);
+ that->wnd_ = NULL;
+ that->OnDestroyed();
+ }
+ if (handled) {
+ return result;
+ }
+ }
+ return ::DefWindowProc(hwnd, uMsg, wParam, lParam);
+}
+
+} // namespace talk_base
diff --git a/chrome/browser/sync/notifier/listener/listen_task.cc b/chrome/browser/sync/notifier/listener/listen_task.cc
new file mode 100644
index 0000000..ff43df1
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/listen_task.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/listener/listen_task.h"
+
+#include "base/logging.h"
+#include "talk/base/task.h"
+#include "talk/xmllite/qname.h"
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmpp/constants.h"
+#include "talk/xmpp/xmppclient.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace browser_sync {
+
+ListenTask::ListenTask(Task* parent)
+ : buzz::XmppTask(parent, buzz::XmppEngine::HL_TYPE) {
+}
+
+ListenTask::~ListenTask() {
+}
+
+int ListenTask::ProcessStart() {
+ LOG(INFO) << "P2P: Listener task started.";
+ return STATE_RESPONSE;
+}
+
+int ListenTask::ProcessResponse() {
+ LOG(INFO) << "P2P: Listener response received.";
+ const buzz::XmlElement* stanza = NextStanza();
+ if (stanza == NULL) {
+ return STATE_BLOCKED;
+ }
+ // Acknowledge receipt of the notificaiton to the buzz server.
+ scoped_ptr<buzz::XmlElement> response_stanza(MakeIqResult(stanza));
+ SendStanza(response_stanza.get());
+
+ // Inform listeners that a notification has been received.
+ SignalUpdateAvailable();
+ return STATE_RESPONSE;
+}
+
+bool ListenTask::HandleStanza(const buzz::XmlElement* stanza) {
+ if (IsValidNotification(stanza)) {
+ QueueStanza(stanza);
+ return true;
+ }
+ return false;
+}
+
+bool ListenTask::IsValidNotification(const buzz::XmlElement* stanza) {
+ static const std::string kNSNotifier("google:notifier");
+ static const buzz::QName kQnNotifierGetAll(true, kNSNotifier, "getAll");
+ // An update notificaiton has the following form.
+ // <cli:iq from="{bare_jid}" to="{full_jid}"
+ // id="#" type="set" xmlns:cli="jabber:client">
+ // <not:getAll xmlns:not="google:notifier">
+ // <Timestamp long="#" xmlns=""/>
+ // </not:getAll>
+ // </cli:iq>
+ if (MatchRequestIq(stanza, buzz::STR_SET, kQnNotifierGetAll) &&
+ !stricmp(stanza->Attr(buzz::QN_TO).c_str(),
+ GetClient()->jid().Str().c_str()) &&
+ !stricmp(stanza->Attr(buzz::QN_FROM).c_str(),
+ GetClient()->jid().BareJid().Str().c_str())) {
+ return true;
+ }
+ return false;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/notifier/listener/listen_task.h b/chrome/browser/sync/notifier/listener/listen_task.h
new file mode 100644
index 0000000..ab1da3e
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/listen_task.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class listens for notifications from the talk service, and signals when
+// they arrive. It checks all incoming stanza's to see if they look like
+// notifications, and filters out those which are not valid.
+//
+// The task is deleted automatically by the buzz::XmppClient. This occurs in the
+// destructor of TaskRunner, which is a superclass of buzz::XmppClient.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_LISTEN_TASK_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_LISTEN_TASK_H_
+
+#include "talk/xmpp/xmpptask.h"
+
+namespace buzz {
+class buzz::XmlElement;
+class Jid;
+}
+
+namespace browser_sync {
+
+class ListenTask : public buzz::XmppTask {
+ public:
+ explicit ListenTask(Task* parent);
+ virtual ~ListenTask();
+
+ // Overriden from buzz::XmppTask.
+ virtual int ProcessStart();
+ virtual int ProcessResponse();
+ virtual bool HandleStanza(const buzz::XmlElement* stanza);
+
+ // Signal callback upon receipt of a notification.
+ sigslot::signal0<> SignalUpdateAvailable;
+
+ private:
+ // Decide whether a notification should start a sync. We only validate that
+ // this notification came from our own Jid().
+ bool IsValidNotification(const buzz::XmlElement* stanza);
+
+ DISALLOW_COPY_AND_ASSIGN(ListenTask);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_LISTEN_TASK_H_
diff --git a/chrome/browser/sync/notifier/listener/listener_unittest.cc b/chrome/browser/sync/notifier/listener/listener_unittest.cc
new file mode 100644
index 0000000..26697d6
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/listener_unittest.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+int main(int argc, char **argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/chrome/browser/sync/notifier/listener/mediator_thread.h b/chrome/browser/sync/notifier/listener/mediator_thread.h
new file mode 100644
index 0000000..7626b41
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/mediator_thread.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// These methods should post messages to a queue which a different thread will
+// later come back and read from.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_H_
+
+#include "talk/xmpp/xmppclientsettings.h"
+
+namespace browser_sync {
+
+class MediatorThread {
+ public:
+ enum MediatorMessage {
+ MSG_LOGGED_IN,
+ MSG_LOGGED_OUT,
+ MSG_SUBSCRIPTION_SUCCESS,
+ MSG_SUBSCRIPTION_FAILURE,
+ MSG_NOTIFICATION_RECEIVED,
+ MSG_NOTIFICATION_SENT
+ };
+
+ MediatorThread() {}
+ virtual ~MediatorThread() {}
+
+ virtual void Login(const buzz::XmppClientSettings& settings) = 0;
+ virtual void Logout() = 0;
+ virtual void Start() = 0;
+ virtual void SubscribeForUpdates() = 0;
+ virtual void ListenForUpdates() = 0;
+ virtual void SendNotification() = 0;
+
+ // Connect to this for messages about talk events.
+ sigslot::signal1<MediatorMessage> SignalStateChange;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_H_
diff --git a/chrome/browser/sync/notifier/listener/mediator_thread_impl.cc b/chrome/browser/sync/notifier/listener/mediator_thread_impl.cc
new file mode 100644
index 0000000..04a536d
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/mediator_thread_impl.cc
@@ -0,0 +1,278 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "chrome/browser/sync/notifier/listener/mediator_thread_impl.h"
+
+#include "base/logging.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/notifier/base/async_dns_lookup.h"
+#include "chrome/browser/sync/notifier/base/task_pump.h"
+#include "chrome/browser/sync/notifier/communicator/connection_options.h"
+#include "chrome/browser/sync/notifier/communicator/const_communicator.h"
+#include "chrome/browser/sync/notifier/communicator/xmpp_connection_generator.h"
+#include "chrome/browser/sync/notifier/listener/listen_task.h"
+#include "chrome/browser/sync/notifier/listener/send_update_task.h"
+#include "chrome/browser/sync/notifier/listener/subscribe_task.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "talk/base/thread.h"
+#ifdef WIN32
+#include "talk/base/win32socketserver.h"
+#endif
+#include "talk/xmpp/xmppclient.h"
+#include "talk/xmpp/xmppclientsettings.h"
+
+using std::string;
+
+namespace browser_sync {
+
+MediatorThreadImpl::MediatorThreadImpl() {
+}
+
+MediatorThreadImpl::~MediatorThreadImpl() {
+}
+
+void MediatorThreadImpl::Start() {
+ talk_base::Thread::Start();
+}
+
+void MediatorThreadImpl::Run() {
+ NameCurrentThreadForDebugging("SyncEngine_MediatorThread");
+ // For win32, this sets up the win32socketserver.
+ // Note that it needs to dispatch windows messages
+ // since that is what the win32 socket server uses.
+#ifdef WIN32
+ scoped_ptr<talk_base::SocketServer> socket_server(
+ new talk_base::Win32SocketServer(this));
+ talk_base::SocketServer* old_socket_server = socketserver();
+ set_socketserver(socket_server.get());
+
+ // Since we just changed the socket server,
+ // ensure that any queued up messages are processed.
+ socket_server->WakeUp();
+ ::MSG message;
+ while (::GetMessage(&message, NULL, 0, 0)) {
+ ::TranslateMessage(&message);
+ ::DispatchMessage(&message);
+ if (IsStopping()) {
+ break;
+ }
+ }
+#endif
+
+ ProcessMessages(talk_base::kForever);
+
+#ifdef WIN32
+ set_socketserver(old_socket_server);
+ socket_server.reset();
+#endif
+}
+
+void MediatorThreadImpl::Login(const buzz::XmppClientSettings& settings) {
+ Post(this, CMD_LOGIN, new LoginData(settings));
+}
+
+void MediatorThreadImpl::Logout() {
+ Post(this, CMD_DISCONNECT);
+ Stop();
+}
+
+void MediatorThreadImpl::ListenForUpdates() {
+ Post(this, CMD_LISTEN_FOR_UPDATES);
+}
+
+void MediatorThreadImpl::SubscribeForUpdates() {
+ Post(this, CMD_SUBSCRIBE_FOR_UPDATES);
+}
+
+void MediatorThreadImpl::SendNotification() {
+ Post(this, CMD_SEND_NOTIFICATION);
+}
+
+void MediatorThreadImpl::ProcessMessages(int milliseconds) {
+ talk_base::Thread::ProcessMessages(milliseconds);
+}
+
+void MediatorThreadImpl::OnMessage(talk_base::Message* msg) {
+ scoped_ptr<LoginData> data;
+ switch (msg->message_id) {
+ case CMD_LOGIN:
+ DCHECK(msg->pdata);
+ data.reset(reinterpret_cast<LoginData*>(msg->pdata));
+ DoLogin(data.get());
+ break;
+ case CMD_DISCONNECT:
+ DoDisconnect();
+ break;
+ case CMD_LISTEN_FOR_UPDATES:
+ DoListenForUpdates();
+ break;
+ case CMD_SEND_NOTIFICATION:
+ DoSendNotification();
+ break;
+ case CMD_SUBSCRIBE_FOR_UPDATES:
+ DoSubscribeForUpdates();
+ break;
+ default:
+ LOG(ERROR) << "P2P: Someone passed a bad message to the thread.";
+ break;
+ }
+}
+
+void MediatorThreadImpl::DoLogin(LoginData* login_data) {
+ LOG(INFO) << "P2P: Thread logging into talk network.";
+ buzz::XmppClientSettings& user_settings = login_data->user_settings;
+
+ // Set our service id.
+ user_settings.set_token_service(SYNC_SERVICE_NAME);
+
+ // Start a new pump for the login.
+ login_.reset();
+ pump_.reset(new notifier::TaskPump());
+
+ notifier::ServerInformation server_list[2];
+ int server_list_count = 2;
+
+ // The default servers know how to serve over port 443 (that's the magic)
+ server_list[0].server = talk_base::SocketAddress("talk.google.com",
+ notifier::kDefaultXmppPort,
+ true); // Use DNS
+ server_list[0].special_port_magic = true;
+ server_list[1].server = talk_base::SocketAddress("talkx.l.google.com",
+ notifier::kDefaultXmppPort,
+ true); // Use DNS
+ server_list[1].special_port_magic = true;
+
+ // Autodetect proxy is on by default.
+ notifier::ConnectionOptions options;
+
+ // Language is not used in the stanza so we default to |en|.
+ std::string lang = "en";
+ login_.reset(new notifier::Login(pump_.get(),
+ user_settings,
+ options,
+ lang,
+ server_list,
+ server_list_count,
+ // NetworkStatusDetectionTask will be
+ // created for you if NULL is passed in.
+ // It helps shorten the autoreconnect
+ // time after going offline and coming
+ // back online.
+ NULL,
+ // talk_base::FirewallManager* is NULL.
+ NULL,
+ false,
+ // Both the proxy and a non-proxy route
+ // will be attempted.
+ false,
+ // |previous_login_successful| is true
+ // because we have already done a
+ // successful gaia login at this point
+ // through another mechanism.
+ true));
+
+ login_->SignalClientStateChange.connect(
+ this, &MediatorThreadImpl::OnClientStateChangeMessage);
+ login_->SignalLoginFailure.connect(
+ this, &MediatorThreadImpl::OnLoginFailureMessage);
+ login_->StartConnection();
+}
+
+void MediatorThreadImpl::OnInputDebug(const char* msg, int length) {
+ string output(msg, length);
+ LOG(INFO) << "P2P: OnInputDebug:" << output << ".";
+}
+
+void MediatorThreadImpl::OnOutputDebug(const char* msg, int length) {
+ string output(msg, length);
+ LOG(INFO) << "P2P: OnOutputDebug:" << output << ".";
+}
+
+void MediatorThreadImpl::DoDisconnect() {
+ LOG(INFO) << "P2P: Thread logging out of talk network.";
+ login_.reset();
+ // Delete the old pump while on the thread to ensure that
+ // everything is cleaned-up in a predicatable manner.
+ pump_.reset();
+}
+
+void MediatorThreadImpl::DoSubscribeForUpdates() {
+ SubscribeTask* subscription = new SubscribeTask(xmpp_client());
+ subscription->SignalStatusUpdate.connect(
+ this,
+ &MediatorThreadImpl::OnSubscriptionStateChange);
+ subscription->Start();
+}
+
+void MediatorThreadImpl::DoListenForUpdates() {
+ ListenTask* listener = new ListenTask(xmpp_client());
+ listener->SignalUpdateAvailable.connect(
+ this,
+ &MediatorThreadImpl::OnUpdateListenerMessage);
+ listener->Start();
+}
+
+void MediatorThreadImpl::DoSendNotification() {
+ SendUpdateTask* task = new SendUpdateTask(xmpp_client());
+ task->SignalStatusUpdate.connect(
+ this,
+ &MediatorThreadImpl::OnUpdateNotificationSent);
+ task->Start();
+}
+
+void MediatorThreadImpl::OnUpdateListenerMessage() {
+ SignalStateChange(MSG_NOTIFICATION_RECEIVED);
+}
+
+void MediatorThreadImpl::OnUpdateNotificationSent(bool success) {
+ if (success) {
+ SignalStateChange(MSG_NOTIFICATION_SENT);
+ }
+}
+
+void MediatorThreadImpl::OnLoginFailureMessage(
+ const notifier::LoginFailure& failure) {
+ SignalStateChange(MSG_LOGGED_OUT);
+}
+
+void MediatorThreadImpl::OnClientStateChangeMessage(
+ notifier::Login::ConnectionState state) {
+ switch (state) {
+ case notifier::Login::STATE_CLOSED:
+ SignalStateChange(MSG_LOGGED_OUT);
+ break;
+ case notifier::Login::STATE_RETRYING:
+ case notifier::Login::STATE_OPENING:
+ LOG(INFO) << "P2P: Thread trying to connect.";
+ // Maybe first time logon, maybe intermediate network disruption
+ // Assume the server went down, and lost our subscription for updates.
+ SignalStateChange(MSG_SUBSCRIPTION_FAILURE);
+ break;
+ case notifier::Login::STATE_OPENED:
+ SignalStateChange(MSG_LOGGED_IN);
+ break;
+ default:
+ LOG(WARNING) << "P2P: Unknown client state change.";
+ break;
+ }
+}
+
+void MediatorThreadImpl::OnSubscriptionStateChange(bool success) {
+ if (success) {
+ SignalStateChange(MSG_SUBSCRIPTION_SUCCESS);
+ } else {
+ SignalStateChange(MSG_SUBSCRIPTION_FAILURE);
+ }
+}
+
+buzz::XmppClient* MediatorThreadImpl::xmpp_client() {
+ if (!login_.get()) {
+ return NULL;
+ }
+ return login_->xmpp_client();
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/notifier/listener/mediator_thread_impl.h b/chrome/browser/sync/notifier/listener/mediator_thread_impl.h
new file mode 100644
index 0000000..684952c
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/mediator_thread_impl.h
@@ -0,0 +1,120 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This object runs on a thread and knows how to interpret messages sent by
+// the talk mediator. The mediator posts messages to a queue which the thread
+// polls (in a super class).
+//
+// Example usage:
+// MediatorThread m = new MediatorThreadImpl(pass in stuff);
+// m.start(); // Start the thread
+// // Once the thread is started, you can do server stuff
+// m.Login(loginInformation);
+// // events happen, the mediator finds out through its pump
+// // more messages are dispatched to the thread
+// // eventually we want to log out
+// m.Logout();
+// delete m; // Also stops the thread
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_IMPL_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_IMPL_H_
+
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/notifier/communicator/login.h"
+#include "chrome/browser/sync/notifier/communicator/login_failure.h"
+#include "chrome/browser/sync/notifier/listener/mediator_thread.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/thread.h"
+#include "talk/xmpp/xmppclientsettings.h"
+
+namespace notifier {
+class TaskPump;
+} // namespace notifier
+
+namespace buzz {
+class buzz::XmppClient;
+} // namespace buzz
+
+namespace talk_base {
+class talk_base::SocketServer;
+} // namespace talk_base
+
+namespace browser_sync {
+
+enum MEDIATOR_CMD {
+ CMD_LOGIN,
+ CMD_DISCONNECT,
+ CMD_LISTEN_FOR_UPDATES,
+ CMD_SEND_NOTIFICATION,
+ CMD_SUBSCRIBE_FOR_UPDATES
+};
+
+// Used to pass authentication information from the mediator to the thread
+// Use new to allocate it on the heap, the thread will delete it for you.
+
+struct LoginData : public talk_base::MessageData {
+ explicit LoginData(const buzz::XmppClientSettings& settings)
+ : user_settings(settings) {
+ }
+ virtual ~LoginData() {}
+
+ buzz::XmppClientSettings user_settings;
+};
+
+class MediatorThreadImpl
+ : public MediatorThread,
+ public sigslot::has_slots<>,
+ public talk_base::MessageHandler,
+ public talk_base::Thread {
+ public:
+ MediatorThreadImpl();
+ virtual ~MediatorThreadImpl();
+
+ // Start the thread
+ virtual void Start();
+ virtual void Run();
+
+ // These are called from outside threads, by the talk mediator object.
+ // They add messages to a queue which we poll in this thread.
+ void Login(const buzz::XmppClientSettings& settings);
+ void Logout();
+ void ListenForUpdates();
+ void SubscribeForUpdates();
+ void SendNotification();
+ void LogStanzas();
+
+ private:
+ // Called from within the thread on internal events.
+ void ProcessMessages(int cms);
+ void OnMessage(talk_base::Message* msg);
+ void DoLogin(LoginData* login_data);
+ void DoDisconnect();
+ void DoSubscribeForUpdates();
+ void DoListenForUpdates();
+ void DoSendNotification();
+ void DoStanzaLogging();
+
+ // These handle messages indicating an event happened in the outside world.
+ void OnUpdateListenerMessage();
+ void OnUpdateNotificationSent(bool success);
+ void OnLoginFailureMessage(const notifier::LoginFailure& failure);
+ void OnClientStateChangeMessage(notifier::Login::ConnectionState state);
+ void OnSubscriptionStateChange(bool success);
+ void OnInputDebug(const char* msg, int length);
+ void OnOutputDebug(const char* msg, int length);
+
+ buzz::XmppClient* xmpp_client();
+
+ // All buzz::XmppClients are owned by their parent. The root parent is the
+ // SingleLoginTask created by the notifier::Login object. This in turn is
+ // owned by the TaskPump. They are destroyed either when processing is
+ // complete or the pump shuts down.
+ scoped_ptr<notifier::TaskPump> pump_;
+ scoped_ptr<notifier::Login> login_;
+ DISALLOW_COPY_AND_ASSIGN(MediatorThreadImpl);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_IMPL_H_
diff --git a/chrome/browser/sync/notifier/listener/mediator_thread_mock.h b/chrome/browser/sync/notifier/listener/mediator_thread_mock.h
new file mode 100644
index 0000000..dea8a8e
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/mediator_thread_mock.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is mock for delicious testing.
+// It's very primitive, and it would have been better to use gmock, except
+// that gmock is only for linux.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_MOCK_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_MOCK_H_
+
+#include "chrome/browser/sync/notifier/listener/mediator_thread.h"
+#include "talk/xmpp/xmppclientsettings.h"
+
+namespace browser_sync {
+
+class MockMediatorThread : public MediatorThread {
+ public:
+ MockMediatorThread() {
+ Reset();
+ }
+ ~MockMediatorThread() {}
+
+ void Reset() {
+ login_calls = 0;
+ logout_calls = 0;
+ start_calls = 0;
+ subscribe_calls = 0;
+ listen_calls = 0;
+ send_calls = 0;
+ }
+
+ // Overridden from MediatorThread
+ void Login(const buzz::XmppClientSettings& settings) {
+ login_calls++;
+ }
+
+ void Logout() {
+ logout_calls++;
+ }
+
+ void Start() {
+ start_calls++;
+ }
+
+ virtual void SubscribeForUpdates() {
+ subscribe_calls++;
+ }
+
+ virtual void ListenForUpdates() {
+ listen_calls++;
+ }
+
+ virtual void SendNotification() {
+ send_calls++;
+ }
+
+ // Callback control
+ void ChangeState(MediatorThread::MediatorMessage message) {
+ SignalStateChange(message);
+ }
+
+ // Intneral State
+ int login_calls;
+ int logout_calls;
+ int start_calls;
+ int subscribe_calls;
+ int listen_calls;
+ int send_calls;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_MEDIATOR_THREAD_MOCK_H_
diff --git a/chrome/browser/sync/notifier/listener/send_update_task.cc b/chrome/browser/sync/notifier/listener/send_update_task.cc
new file mode 100644
index 0000000..79fffed
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/send_update_task.cc
@@ -0,0 +1,96 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/listener/send_update_task.h"
+
+#include "base/logging.h"
+#include "base/scoped_ptr.h"
+#include "talk/xmllite/qname.h"
+#include "talk/xmpp/constants.h"
+#include "talk/xmpp/xmppclient.h"
+
+namespace browser_sync {
+
+SendUpdateTask::SendUpdateTask(Task* parent)
+ : XmppTask(parent, buzz::XmppEngine::HL_SINGLE) { // Watch for one reply.
+}
+
+SendUpdateTask::~SendUpdateTask() {
+}
+
+bool SendUpdateTask::HandleStanza(const buzz::XmlElement* stanza) {
+ if (!MatchResponseIq(stanza, GetClient()->jid().BareJid(), task_id()))
+ return false;
+ QueueStanza(stanza);
+ return true;
+}
+
+int SendUpdateTask::ProcessStart() {
+ LOG(INFO) << "P2P: Notification task started.";
+ scoped_ptr<buzz::XmlElement> stanza(NewUpdateMessage());
+ if (SendStanza(stanza.get()) != buzz::XMPP_RETURN_OK) {
+ // TODO(brg) : Retry on error.
+ SignalStatusUpdate(false);
+ return STATE_DONE;
+ }
+ return STATE_RESPONSE;
+}
+
+int SendUpdateTask::ProcessResponse() {
+ LOG(INFO) << "P2P: Notification response received.";
+ const buzz::XmlElement* stanza = NextStanza();
+ if (stanza == NULL) {
+ return STATE_BLOCKED;
+ }
+ if (stanza->HasAttr(buzz::QN_TYPE) &&
+ stanza->Attr(buzz::QN_TYPE) == buzz::STR_RESULT) {
+ // Notify listeners of success.
+ SignalStatusUpdate(true);
+ return STATE_DONE;
+ }
+
+ // An error response was received.
+ // TODO(brg) : Error handling.
+ SignalStatusUpdate(false);
+ return STATE_DONE;
+}
+
+buzz::XmlElement* SendUpdateTask::NewUpdateMessage() {
+ static const std::string kNSNotifier = "google:notifier";
+ static const buzz::QName kQnNotifierSet(true, kNSNotifier, "set");
+ static const buzz::QName kQnId(true, buzz::STR_EMPTY, "Id");
+ static const buzz::QName kQnServiceUrl(true, buzz::STR_EMPTY, "ServiceUrl");
+ static const buzz::QName kQnData(true, buzz::STR_EMPTY, "data");
+ static const buzz::QName kQnServiceId(true, buzz::STR_EMPTY, "ServiceId");
+
+ // Create our update stanza. In the future this may include the revision id,
+ // but at the moment simply does a p2p ping. The message is constructed as:
+ // <iq type='get' from='{fullJid}' to='{bareJid}' id='{#}'>
+ // <set xmlns="google:notifier">
+ // <Id xmlns="">
+ // <ServiceUrl xmlns="" data="google:notifier"/>
+ // <ServiceId xmlns="" data="notification"/>
+ // </Id>
+ // </set>
+ // </iq>
+ buzz::XmlElement* stanza = MakeIq(buzz::STR_GET, GetClient()->jid().BareJid(),
+ task_id());
+
+ buzz::XmlElement* notifier_set = new buzz::XmlElement(kQnNotifierSet, true);
+ stanza->AddElement(notifier_set);
+
+ buzz::XmlElement* id = new buzz::XmlElement(kQnId, true);
+ notifier_set->AddElement(id);
+
+ buzz::XmlElement* service_url = new buzz::XmlElement(kQnServiceUrl, true);
+ service_url->AddAttr(kQnData, kNSNotifier);
+ id->AddElement(service_url);
+
+ buzz::XmlElement* service_id = new buzz::XmlElement(kQnServiceId, true);
+ service_id->AddAttr(kQnData, "notification");
+ id->AddElement(service_id);
+ return stanza;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/notifier/listener/send_update_task.h b/chrome/browser/sync/notifier/listener/send_update_task.h
new file mode 100644
index 0000000..056703e
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/send_update_task.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Methods for sending the update stanza to notify peers via xmpp.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_SEND_UPDATE_TASK_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_SEND_UPDATE_TASK_H_
+
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmpp/xmpptask.h"
+
+namespace browser_sync {
+
+class SendUpdateTask : public buzz::XmppTask {
+ public:
+ explicit SendUpdateTask(Task* parent);
+ virtual ~SendUpdateTask();
+
+ // Overridden from buzz::XmppTask
+ virtual int ProcessStart();
+ virtual int ProcessResponse();
+ virtual bool HandleStanza(const buzz::XmlElement* stanza);
+
+ // Signal callback upon subscription success.
+ sigslot::signal1<bool> SignalStatusUpdate;
+
+ private:
+ // Allocates and constructs an buzz::XmlElement containing the update stanza.
+ buzz::XmlElement* NewUpdateMessage();
+
+ DISALLOW_COPY_AND_ASSIGN(SendUpdateTask);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_SEND_UPDATE_TASK_H_
diff --git a/chrome/browser/sync/notifier/listener/subscribe_task.cc b/chrome/browser/sync/notifier/listener/subscribe_task.cc
new file mode 100644
index 0000000..8d8a3ea
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/subscribe_task.cc
@@ -0,0 +1,90 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/listener/subscribe_task.h"
+
+#include <string>
+
+#include "base/logging.h"
+#include "talk/base/task.h"
+#include "talk/xmllite/qname.h"
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmpp/constants.h"
+#include "talk/xmpp/xmppclient.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace browser_sync {
+
+SubscribeTask::SubscribeTask(Task* parent)
+ : XmppTask(parent, buzz::XmppEngine::HL_SINGLE) {
+}
+
+SubscribeTask::~SubscribeTask() {
+}
+
+bool SubscribeTask::HandleStanza(const buzz::XmlElement* stanza) {
+ if (!MatchResponseIq(stanza, GetClient()->jid().BareJid(), task_id()))
+ return false;
+ QueueStanza(stanza);
+ return true;
+}
+
+int SubscribeTask::ProcessStart() {
+ LOG(INFO) << "P2P: Subscription task started.";
+ scoped_ptr<buzz::XmlElement> iq_stanza(NewSubscriptionMessage());
+
+ if (SendStanza(iq_stanza.get()) != buzz::XMPP_RETURN_OK) {
+ SignalStatusUpdate(false);
+ return STATE_DONE;
+ }
+ return STATE_RESPONSE;
+}
+
+int SubscribeTask::ProcessResponse() {
+ LOG(INFO) << "P2P: Subscription response received.";
+ const buzz::XmlElement* stanza = NextStanza();
+ if (stanza == NULL) {
+ return STATE_BLOCKED;
+ }
+ // We've receieved a response to our subscription request.
+ if (stanza->HasAttr(buzz::QN_TYPE) &&
+ stanza->Attr(buzz::QN_TYPE) == buzz::STR_RESULT) {
+ SignalStatusUpdate(true);
+ return STATE_DONE;
+ }
+ // An error response was received.
+ // TODO(brg) : Error handling.
+ SignalStatusUpdate(false);
+ return STATE_DONE;
+}
+
+buzz::XmlElement* SubscribeTask::NewSubscriptionMessage() {
+ static const buzz::QName kQnNotifierGetAll(true, "google:notifier", "getAll");
+ static const buzz::QName kQnNotifierClientActive(true, buzz::STR_EMPTY,
+ "ClientActive");
+ static const buzz::QName kQnBool(true, buzz::STR_EMPTY, "bool");
+ static const std::string kTrueString("true");
+
+ // Create the subscription stanza using the notificaitons protocol.
+ // <iq type='get' from='{fullJid}' to='{bareJid}' id='{#}'>
+ // <gn:getAll xmlns:gn='google:notifier' xmlns=''>
+ // <ClientActive bool='true'/>
+ // </gn:getAll>
+ // </iq>
+ buzz::XmlElement* get_all_request =
+ MakeIq(buzz::STR_GET, GetClient()->jid().BareJid(), task_id());
+
+ buzz::XmlElement* notifier_get =
+ new buzz::XmlElement(kQnNotifierGetAll, true);
+ get_all_request->AddElement(notifier_get);
+
+ buzz::XmlElement* client_active =
+ new buzz::XmlElement(kQnNotifierClientActive, true);
+ client_active->AddAttr(kQnBool, kTrueString);
+ notifier_get->AddElement(client_active);
+
+ return get_all_request;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/notifier/listener/subscribe_task.h b/chrome/browser/sync/notifier/listener/subscribe_task.h
new file mode 100644
index 0000000..4b96f38
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/subscribe_task.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class handles subscribing to talk notifications. It does the getAll
+// iq stanza which establishes the endpoint and directs future notifications to
+// be pushed.
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_SUBSCRIBE_TASK_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_SUBSCRIBE_TASK_H_
+
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmpp/xmpptask.h"
+
+namespace browser_sync {
+
+class SubscribeTask : public buzz::XmppTask {
+ public:
+ explicit SubscribeTask(Task* parent);
+ virtual ~SubscribeTask();
+
+ // Overridden from XmppTask.
+ virtual int ProcessStart();
+ virtual int ProcessResponse();
+ virtual bool HandleStanza(const buzz::XmlElement* stanza);
+
+ // Signal callback upon subscription success.
+ sigslot::signal1<bool> SignalStatusUpdate;
+
+ private:
+ // Assembles an Xmpp stanza which can be sent to subscribe to notifications.
+ buzz::XmlElement* NewSubscriptionMessage();
+
+ DISALLOW_COPY_AND_ASSIGN(SubscribeTask);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_SUBSCRIBE_TASK_H_
diff --git a/chrome/browser/sync/notifier/listener/talk_mediator.h b/chrome/browser/sync/notifier/listener/talk_mediator.h
new file mode 100644
index 0000000..c651c10
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/talk_mediator.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Interface to the code which handles talk logic. Used to initialize SSL
+// before the underlying talk login occurs
+// Example usage:
+//
+// TalkMediator mediator();
+// mediator.SetCredentials("email", "pass", false);
+// mediator.WatchAuthWatcher(auth_watcher_);
+// AuthWatcher eventually sends AUTH_SUCCEEDED which triggers:
+// mediator.Login();
+// ...
+// mediator.Logout();
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_TALK_MEDIATOR_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_TALK_MEDIATOR_H_
+
+#include <string>
+
+namespace browser_sync {
+class AuthWatcher;
+class SyncerThread;
+
+struct TalkMediatorEvent {
+ enum WhatHappened {
+ LOGIN_SUCCEEDED,
+ LOGOUT_SUCCEEDED,
+ SUBSCRIPTIONS_ON,
+ SUBSCRIPTIONS_OFF,
+ NOTIFICATION_RECEIVED,
+ NOTIFICATION_SENT,
+ TALKMEDIATOR_DESTROYED,
+ };
+
+ // Required by EventChannel
+ typedef TalkMediatorEvent EventType;
+
+ static inline bool IsChannelShutdownEvent(const TalkMediatorEvent& event) {
+ return event.what_happened == TALKMEDIATOR_DESTROYED;
+ }
+
+ WhatHappened what_happened;
+};
+
+typedef EventChannel<TalkMediatorEvent, PThreadMutex> TalkMediatorChannel;
+
+class TalkMediator {
+ public:
+ TalkMediator() {}
+ virtual ~TalkMediator() {}
+
+ // The following methods are for authorizaiton of the xmpp client.
+ virtual void WatchAuthWatcher(browser_sync::AuthWatcher* auth_watcher) = 0;
+ virtual bool SetAuthToken(const std::string& email,
+ const std::string& token) = 0;
+ virtual bool Login() = 0;
+ virtual bool Logout() = 0;
+
+ // Method for the owner of this object to notify peers that an update
+ // has occurred.
+ virtual bool SendNotification() = 0;
+
+ // Channel by which talk mediator events are signaled.
+ virtual TalkMediatorChannel* channel() const = 0;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_TALK_MEDIATOR_H_
diff --git a/chrome/browser/sync/notifier/listener/talk_mediator_impl.cc b/chrome/browser/sync/notifier/listener/talk_mediator_impl.cc
new file mode 100644
index 0000000..9d83d67
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/talk_mediator_impl.cc
@@ -0,0 +1,275 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/notifier/listener/talk_mediator_impl.h"
+
+#include "base/logging.h"
+#include "chrome/browser/sync/engine/auth_watcher.h"
+#include "chrome/browser/sync/engine/syncer_thread.h"
+#include "chrome/browser/sync/notifier/listener/mediator_thread_impl.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "talk/base/cryptstring.h"
+#include "talk/base/ssladapter.h"
+#include "talk/xmpp/xmppclientsettings.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace browser_sync {
+
+// Before any authorization event from TalkMediatorImpl, we need to initialize
+// the SSL library.
+class SslInitializationSingleton {
+ public:
+ virtual ~SslInitializationSingleton() {
+ talk_base::CleanupSSL();
+ };
+
+ void RegisterClient() {}
+
+ static SslInitializationSingleton* GetInstance() {
+ MutexLock lock(&mutex_);
+ if (!instance_.get()) {
+ instance_.reset(new SslInitializationSingleton());
+ }
+ return instance_.get();
+ }
+
+ private:
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+
+ SslInitializationSingleton() {
+ talk_base::InitializeSSL();
+ };
+
+ // The single instance of this class.
+ static PThreadMutex mutex_;
+ static scoped_ptr<SslInitializationSingleton> instance_;
+
+ DISALLOW_COPY_AND_ASSIGN(SslInitializationSingleton);
+};
+
+// Declaration of class scoped static variables.
+PThreadMutex SslInitializationSingleton::mutex_;
+scoped_ptr<SslInitializationSingleton> SslInitializationSingleton::instance_;
+
+TalkMediatorImpl::TalkMediatorImpl()
+ : mediator_thread_(new MediatorThreadImpl()) {
+ // Ensure the SSL library is initialized.
+ SslInitializationSingleton::GetInstance()->RegisterClient();
+
+ // Construct the callback channel with the shutdown event.
+ TalkMediatorInitialization(false);
+}
+
+TalkMediatorImpl::TalkMediatorImpl(MediatorThread *thread)
+ : mediator_thread_(thread) {
+ // When testing we do not initialize the SSL library.
+ TalkMediatorInitialization(true);
+}
+
+void TalkMediatorImpl::TalkMediatorInitialization(bool should_connect) {
+ TalkMediatorEvent done = { TalkMediatorEvent::TALKMEDIATOR_DESTROYED };
+ channel_.reset(new TalkMediatorChannel(done));
+ if (should_connect) {
+ mediator_thread_->SignalStateChange.connect(
+ this,
+ &TalkMediatorImpl::MediatorThreadMessageHandler);
+ state_.connected = 1;
+ }
+ mediator_thread_->Start();
+ state_.started = 1;
+}
+
+TalkMediatorImpl::~TalkMediatorImpl() {
+ if (state_.started) {
+ Logout();
+ }
+}
+
+void TalkMediatorImpl::AuthWatcherEventHandler(
+ const AuthWatcherEvent& auth_event) {
+ MutexLock lock(&mutex_);
+ switch (auth_event.what_happened) {
+ case AuthWatcherEvent::AUTHWATCHER_DESTROYED:
+ case AuthWatcherEvent::GAIA_AUTH_FAILED:
+ case AuthWatcherEvent::SERVICE_AUTH_FAILED:
+ case AuthWatcherEvent::SERVICE_CONNECTION_FAILED:
+ // We have failed to connect to the buzz server, and we maintain a
+ // decreased polling interval and stay in a flaky connection mode.
+ // Note that the failure is on the authwatcher's side and can not be
+ // resolved without manual retry.
+ break;
+ case AuthWatcherEvent::AUTHENTICATION_ATTEMPT_START:
+ // TODO(brg) : We are restarting the authentication attempt. We need to
+ // insure this code path is stable.
+ break;
+ case AuthWatcherEvent::AUTH_SUCCEEDED:
+ if (!state_.logged_in) {
+ DoLogin();
+ }
+ break;
+ default:
+ // Do nothing
+ break;
+ }
+}
+
+void TalkMediatorImpl::WatchAuthWatcher(AuthWatcher* watcher) {
+ auth_hookup_.reset(NewEventListenerHookup(
+ watcher->channel(),
+ this,
+ &TalkMediatorImpl::AuthWatcherEventHandler));
+}
+
+bool TalkMediatorImpl::Login() {
+ MutexLock lock(&mutex_);
+ return DoLogin();
+}
+
+bool TalkMediatorImpl::DoLogin() {
+ // Connect to the mediator thread and start it processing messages.
+ if (!state_.connected) {
+ mediator_thread_->SignalStateChange.connect(
+ this,
+ &TalkMediatorImpl::MediatorThreadMessageHandler);
+ state_.connected = 1;
+ }
+ if (state_.initialized && !state_.logged_in) {
+ mediator_thread_->Login(xmpp_settings_);
+ state_.logged_in = 1;
+ return true;
+ }
+ return false;
+}
+
+bool TalkMediatorImpl::Logout() {
+ MutexLock lock(&mutex_);
+ // We do not want to be called back during logout since we may be closing.
+ if (state_.connected) {
+ mediator_thread_->SignalStateChange.disconnect(this);
+ state_.connected = 0;
+ }
+ if (state_.started) {
+ mediator_thread_->Logout();
+ state_.started = 0;
+ state_.logged_in = 0;
+ state_.subscribed = 0;
+ return true;
+ }
+ return false;
+}
+
+bool TalkMediatorImpl::SendNotification() {
+ MutexLock lock(&mutex_);
+ if (state_.logged_in && state_.subscribed) {
+ mediator_thread_->SendNotification();
+ return true;
+ }
+ return false;
+}
+
+TalkMediatorChannel* TalkMediatorImpl::channel() const {
+ return channel_.get();
+}
+
+bool TalkMediatorImpl::SetAuthToken(const std::string& email,
+ const std::string& token) {
+ MutexLock lock(&mutex_);
+
+ // Verify that we can create a JID from the email provided.
+ buzz::Jid jid = buzz::Jid(email);
+ if (jid.node().empty() || !jid.IsValid()) {
+ return false;
+ }
+
+ // Construct the XmppSettings object for login to buzz.
+ xmpp_settings_.set_user(jid.node());
+ xmpp_settings_.set_resource("chrome-sync");
+ xmpp_settings_.set_host(jid.domain());
+ xmpp_settings_.set_use_tls(true);
+ xmpp_settings_.set_auth_cookie(token);
+
+ state_.initialized = 1;
+ return true;
+}
+
+void TalkMediatorImpl::MediatorThreadMessageHandler(
+ MediatorThread::MediatorMessage message) {
+ LOG(INFO) << "P2P: MediatorThread has passed a message";
+ switch (message) {
+ case MediatorThread::MSG_LOGGED_IN:
+ OnLogin();
+ break;
+ case MediatorThread::MSG_LOGGED_OUT:
+ OnLogout();
+ break;
+ case MediatorThread::MSG_SUBSCRIPTION_SUCCESS:
+ OnSubscriptionSuccess();
+ break;
+ case MediatorThread::MSG_SUBSCRIPTION_FAILURE:
+ OnSubscriptionFailure();
+ break;
+ case MediatorThread::MSG_NOTIFICATION_RECEIVED:
+ OnNotificationReceived();
+ break;
+ case MediatorThread::MSG_NOTIFICATION_SENT:
+ OnNotificationSent();
+ break;
+ default:
+ LOG(WARNING) << "P2P: Unknown message returned from mediator thread.";
+ break;
+ }
+}
+
+void TalkMediatorImpl::OnLogin() {
+ LOG(INFO) << "P2P: Logged in.";
+ MutexLock lock(&mutex_);
+ // ListenForUpdates enables the ListenTask. This is done before
+ // SubscribeForUpdates.
+ mediator_thread_->ListenForUpdates();
+ mediator_thread_->SubscribeForUpdates();
+ TalkMediatorEvent event = { TalkMediatorEvent::LOGIN_SUCCEEDED };
+ channel_->NotifyListeners(event);
+}
+
+void TalkMediatorImpl::OnLogout() {
+ LOG(INFO) << "P2P: Logged off.";
+ OnSubscriptionFailure();
+ MutexLock lock(&mutex_);
+ state_.logged_in = 0;
+ TalkMediatorEvent event = { TalkMediatorEvent::LOGOUT_SUCCEEDED };
+ channel_->NotifyListeners(event);
+}
+
+void TalkMediatorImpl::OnSubscriptionSuccess() {
+ LOG(INFO) << "P2P: Update subscription active.";
+ MutexLock lock(&mutex_);
+ state_.subscribed = 1;
+ TalkMediatorEvent event = { TalkMediatorEvent::SUBSCRIPTIONS_ON };
+ channel_->NotifyListeners(event);
+}
+
+void TalkMediatorImpl::OnSubscriptionFailure() {
+ LOG(INFO) << "P2P: Update subscription failure.";
+ MutexLock lock(&mutex_);
+ state_.subscribed = 0;
+ TalkMediatorEvent event = { TalkMediatorEvent::SUBSCRIPTIONS_OFF };
+ channel_->NotifyListeners(event);
+}
+
+void TalkMediatorImpl::OnNotificationReceived() {
+ LOG(INFO) << "P2P: Updates are available on the server.";
+ MutexLock lock(&mutex_);
+ TalkMediatorEvent event = { TalkMediatorEvent::NOTIFICATION_RECEIVED };
+ channel_->NotifyListeners(event);
+}
+
+void TalkMediatorImpl::OnNotificationSent() {
+ LOG(INFO) <<
+ "P2P: Peers were notified that updates are available on the server.";
+ MutexLock lock(&mutex_);
+ TalkMediatorEvent event = { TalkMediatorEvent::NOTIFICATION_SENT };
+ channel_->NotifyListeners(event);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/notifier/listener/talk_mediator_impl.h b/chrome/browser/sync/notifier/listener/talk_mediator_impl.h
new file mode 100644
index 0000000..33bb94a
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/talk_mediator_impl.h
@@ -0,0 +1,117 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class is the interface between talk code and the client code proper
+// It will manage all aspects of the connection and call back into the client
+// when it needs attention (for instance if updates are available for syncing).
+
+#ifndef CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_TALK_MEDIATOR_IMPL_H_
+#define CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_TALK_MEDIATOR_IMPL_H_
+
+#include <string>
+
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/auth_watcher.h"
+#include "chrome/browser/sync/notifier/listener/mediator_thread.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "talk/xmpp/xmppclientsettings.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+class EventListenerHookup;
+
+namespace browser_sync {
+class AuthWatcher;
+struct AuthWatcherEvent;
+class SyncerThread;
+
+class TalkMediatorImpl
+ : public TalkMediator,
+ public sigslot::has_slots<> {
+ public:
+ TalkMediatorImpl();
+ explicit TalkMediatorImpl(MediatorThread* thread);
+ virtual ~TalkMediatorImpl();
+
+ // Overriden from TalkMediator.
+ virtual void WatchAuthWatcher(AuthWatcher* auth_watcher);
+ virtual bool SetAuthToken(const std::string& email,
+ const std::string& token);
+ virtual bool Login();
+ virtual bool Logout();
+
+ virtual bool SendNotification();
+
+ TalkMediatorChannel* channel() const;
+
+ private:
+ struct TalkMediatorState {
+ TalkMediatorState()
+ : started(0), connected(0), initialized(0), logged_in(0),
+ subscribed(0) {
+ }
+
+ unsigned int started : 1; // Background thread has started.
+ unsigned int connected : 1; // Connected to the mediator thread signal.
+ unsigned int initialized : 1; // Initialized with login information.
+ unsigned int logged_in : 1; // Logged in the mediator's authenticator.
+ unsigned int subscribed : 1; // Subscribed to the xmpp receiving channel.
+ };
+
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+
+ // Completes common initialization between the constructors. Set should
+ // connect to true if the talk mediator should connect to the controlled
+ // mediator thread's SignalStateChange object.
+ void TalkMediatorInitialization(bool should_connect);
+
+ // Called from the authwatcher after authentication completes. Signals this
+ // class to push listening and subscription events to the mediator thread.
+ void AuthWatcherEventHandler(const AuthWatcherEvent& auth_event);
+
+ // Callback for the mediator thread.
+ void MediatorThreadMessageHandler(MediatorThread::MediatorMessage message);
+
+ // Responses to messages from the MediatorThread.
+ void OnNotificationReceived();
+ void OnNotificationSent();
+ void OnLogin();
+ void OnLogout();
+ void OnSubscriptionFailure();
+ void OnSubscriptionSuccess();
+
+ // Does the actual login funcationality, called from Login() and the
+ // AuthWatcher event handler.
+ bool DoLogin();
+
+ // Mutex for synchronizing event access. This class listens to two event
+ // sources, Authwatcher and MediatorThread. It can also be called by through
+ // the TalkMediatorInteface. All these access points are serialized by
+ // this mutex.
+ PThreadMutex mutex_;
+
+ // Internal state.
+ TalkMediatorState state_;
+
+ // Cached and verfied from the SetAuthToken method.
+ buzz::XmppClientSettings xmpp_settings_;
+
+ // Interface to listen to authentication events.
+ scoped_ptr<EventListenerHookup> auth_hookup_;
+
+ // The worker thread through which talk events are posted and received.
+ scoped_ptr<MediatorThread> mediator_thread_;
+
+ // Channel through which to broadcast events.
+ scoped_ptr<TalkMediatorChannel> channel_;
+
+ FRIEND_TEST(TalkMediatorImplTest, SetAuthTokenWithBadInput);
+ FRIEND_TEST(TalkMediatorImplTest, SetAuthTokenWithGoodInput);
+ FRIEND_TEST(TalkMediatorImplTest, SendNotification);
+ FRIEND_TEST(TalkMediatorImplTest, MediatorThreadCallbacks);
+ DISALLOW_COPY_AND_ASSIGN(TalkMediatorImpl);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_NOTIFIER_LISTENER_TALK_MEDIATOR_IMPL_H_
diff --git a/chrome/browser/sync/notifier/listener/talk_mediator_unittest.cc b/chrome/browser/sync/notifier/listener/talk_mediator_unittest.cc
new file mode 100644
index 0000000..6d26cdb
--- /dev/null
+++ b/chrome/browser/sync/notifier/listener/talk_mediator_unittest.cc
@@ -0,0 +1,176 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/logging.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "chrome/browser/sync/notifier/listener/mediator_thread_mock.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator_impl.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "talk/xmpp/xmppengine.h"
+
+namespace browser_sync {
+
+class TalkMediatorImplTest : public testing::Test {
+ public:
+ void HandleTalkMediatorEvent(
+ const browser_sync::TalkMediatorEvent& event) {
+ last_message_ = event.what_happened;
+ }
+
+ protected:
+ TalkMediatorImplTest() {}
+ ~TalkMediatorImplTest() {}
+
+ virtual void SetUp() {
+ last_message_ = -1;
+ }
+
+ virtual void TearDown() {
+ }
+
+ int last_message_;
+};
+
+TEST_F(TalkMediatorImplTest, ConstructionOfTheClass) {
+ // Constructing a single talk mediator enables SSL through the singleton.
+ scoped_ptr<TalkMediatorImpl> talk1(new TalkMediatorImpl());
+ talk1.reset(NULL);
+}
+
+TEST_F(TalkMediatorImplTest, SetAuthTokenWithBadInput) {
+ scoped_ptr<TalkMediatorImpl> talk1(new TalkMediatorImpl(
+ new MockMediatorThread()));
+ ASSERT_FALSE(talk1->SetAuthToken("@missinguser.com", ""));
+ ASSERT_EQ(talk1->state_.initialized, 0);
+
+ scoped_ptr<TalkMediatorImpl> talk2(new TalkMediatorImpl(
+ new MockMediatorThread()));
+ ASSERT_FALSE(talk2->SetAuthToken("", "1234567890"));
+ ASSERT_EQ(talk2->state_.initialized, 0);
+
+ scoped_ptr<TalkMediatorImpl> talk3(new TalkMediatorImpl(
+ new MockMediatorThread()));
+ ASSERT_FALSE(talk3->SetAuthToken("missingdomain", "abcde"));
+ ASSERT_EQ(talk3->state_.initialized, 0);
+}
+
+TEST_F(TalkMediatorImplTest, SetAuthTokenWithGoodInput) {
+ scoped_ptr<TalkMediatorImpl> talk1(new TalkMediatorImpl(
+ new MockMediatorThread()));
+ ASSERT_TRUE(talk1->SetAuthToken("chromium@gmail.com", "token"));
+ ASSERT_EQ(talk1->state_.initialized, 1);
+
+ scoped_ptr<TalkMediatorImpl> talk2(new TalkMediatorImpl(
+ new MockMediatorThread()));
+ ASSERT_TRUE(talk2->SetAuthToken("chromium@mail.google.com", "token"));
+ ASSERT_EQ(talk2->state_.initialized, 1);
+
+ scoped_ptr<TalkMediatorImpl> talk3(new TalkMediatorImpl(
+ new MockMediatorThread()));
+ ASSERT_TRUE(talk3->SetAuthToken("chromium@chromium.org", "token"));
+ ASSERT_EQ(talk3->state_.initialized, 1);
+}
+
+TEST_F(TalkMediatorImplTest, LoginWiring) {
+ // The TalkMediatorImpl owns the mock.
+ MockMediatorThread* mock = new MockMediatorThread();
+ scoped_ptr<TalkMediatorImpl> talk1(new TalkMediatorImpl(mock));
+
+ // Login checks states for initialization.
+ ASSERT_EQ(talk1->Login(), false);
+ ASSERT_EQ(mock->login_calls, 0);
+
+ ASSERT_EQ(talk1->SetAuthToken("chromium@gmail.com", "token"), true);
+ ASSERT_EQ(talk1->Login(), true);
+ ASSERT_EQ(mock->login_calls, 1);
+
+ // Successive calls to login will fail. One needs to create a new talk
+ // mediator object.
+ ASSERT_EQ(talk1->Login(), false);
+ ASSERT_EQ(mock->login_calls, 1);
+
+ ASSERT_EQ(talk1->Logout(), true);
+ ASSERT_EQ(mock->logout_calls, 1);
+
+ // Successive logout calls do nothing.
+ ASSERT_EQ(talk1->Logout(), false);
+ ASSERT_EQ(mock->logout_calls, 1);
+}
+
+TEST_F(TalkMediatorImplTest, SendNotification) {
+ // The TalkMediatorImpl owns the mock.
+ MockMediatorThread* mock = new MockMediatorThread();
+ scoped_ptr<TalkMediatorImpl> talk1(new TalkMediatorImpl(mock));
+
+ // Failure due to not being logged in.
+ ASSERT_EQ(talk1->SendNotification(), false);
+ ASSERT_EQ(mock->send_calls, 0);
+
+ ASSERT_EQ(talk1->SetAuthToken("chromium@gmail.com", "token"), true);
+ ASSERT_EQ(talk1->Login(), true);
+ ASSERT_EQ(mock->login_calls, 1);
+
+ // Failure due to not being subscribed.
+ ASSERT_EQ(talk1->SendNotification(), false);
+ ASSERT_EQ(mock->send_calls, 0);
+
+ // Fake subscription
+ talk1->OnSubscriptionSuccess();
+ ASSERT_EQ(talk1->state_.subscribed, 1);
+ ASSERT_EQ(talk1->SendNotification(), true);
+ ASSERT_EQ(mock->send_calls, 1);
+ ASSERT_EQ(talk1->SendNotification(), true);
+ ASSERT_EQ(mock->send_calls, 2);
+
+ ASSERT_EQ(talk1->Logout(), true);
+ ASSERT_EQ(mock->logout_calls, 1);
+
+ // Failure due to being logged out.
+ ASSERT_EQ(talk1->SendNotification(), false);
+ ASSERT_EQ(mock->send_calls, 2);
+}
+
+TEST_F(TalkMediatorImplTest, MediatorThreadCallbacks) {
+ // The TalkMediatorImpl owns the mock.
+ MockMediatorThread* mock = new MockMediatorThread();
+ scoped_ptr<TalkMediatorImpl> talk1(new TalkMediatorImpl(mock));
+
+ scoped_ptr<EventListenerHookup> callback(NewEventListenerHookup(
+ talk1->channel(), this, &TalkMediatorImplTest::HandleTalkMediatorEvent));
+
+ ASSERT_EQ(talk1->SetAuthToken("chromium@gmail.com", "token"), true);
+ ASSERT_EQ(talk1->Login(), true);
+ ASSERT_EQ(mock->login_calls, 1);
+
+ mock->ChangeState(MediatorThread::MSG_LOGGED_IN);
+ ASSERT_EQ(last_message_, TalkMediatorEvent::LOGIN_SUCCEEDED);
+
+ // The message triggers calls to listen and subscribe.
+ ASSERT_EQ(mock->listen_calls, 1);
+ ASSERT_EQ(mock->subscribe_calls, 1);
+ ASSERT_EQ(talk1->state_.subscribed, 0);
+
+ mock->ChangeState(MediatorThread::MSG_SUBSCRIPTION_SUCCESS);
+ ASSERT_EQ(last_message_, TalkMediatorEvent::SUBSCRIPTIONS_ON);
+ ASSERT_EQ(talk1->state_.subscribed, 1);
+
+ // After subscription success is receieved, the talk mediator will allow
+ // sending of notifications.
+ ASSERT_EQ(talk1->SendNotification(), true);
+ ASSERT_EQ(mock->send_calls, 1);
+
+ // |MSG_NOTIFICATION_RECEIVED| from the MediatorThread triggers a callback
+ // of type |NOTIFICATION_RECEIVED|.
+ mock->ChangeState(MediatorThread::MSG_NOTIFICATION_RECEIVED);
+ ASSERT_EQ(last_message_, TalkMediatorEvent::NOTIFICATION_RECEIVED);
+
+ // A |TALKMEDIATOR_DESTROYED| message is received during tear down.
+ talk1.reset();
+ ASSERT_EQ(last_message_, TalkMediatorEvent::TALKMEDIATOR_DESTROYED);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/protocol/proto2_to_oproto.py b/chrome/browser/sync/protocol/proto2_to_oproto.py
new file mode 100644
index 0000000..eb14ab9
--- /dev/null
+++ b/chrome/browser/sync/protocol/proto2_to_oproto.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Strip a .proto of options not supported by open-source protobuf tools. """
+
+import re
+import sys
+
+if __name__ == '__main__':
+ if len(sys.argv) != 3:
+ print "Usage: %s input_file output_file " % sys.argv[0]
+ sys.exit(1)
+
+ input_file = sys.argv[1]
+ output_file = sys.argv[2]
+
+ protobuf = open(input_file).read()
+
+ # Comment out lines like "option java_api_version = 1;"
+ protobuf = re.sub("(option .*api_version.*\=.*)", r"// \1", protobuf)
+
+ # Comment out lines like "option java_java5_enums = false;"
+ protobuf = re.sub("(option .*java_java5_enums.*\=.*)", r"// \1", protobuf)
+
+ # Comment out the java package.
+ protobuf = re.sub("(option .*java_package.*\=.*)", r"// \1", protobuf)
+
+ open(output_file, "w").write(protobuf)
+
diff --git a/chrome/browser/sync/protocol/service_constants.h b/chrome/browser/sync/protocol/service_constants.h
new file mode 100644
index 0000000..0d6c4b1
--- /dev/null
+++ b/chrome/browser/sync/protocol/service_constants.h
@@ -0,0 +1,24 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Product-specific constants.
+
+#include "chrome/browser/sync/util/sync_types.h"
+
+#ifndef CHROME_BROWSER_SYNC_PROTOCOL_SERVICE_CONSTANTS_H_
+#define CHROME_BROWSER_SYNC_PROTOCOL_SERVICE_CONSTANTS_H_
+
+// These fixed service names are used to obtain auth cookies for the
+// corresponding services. It might be interesting to make these updateable
+// as well as have the ability to add new ones.
+#define SYNC_SERVICE_NAME "chromiumsync"
+
+#define DEFAULT_SIGNIN_DOMAIN "gmail.com"
+
+#define PRODUCT_NAME_STRING_NARROW "Chromium Browser Sync"
+
+#define PRODUCT_NAME_STRING PSTR(PRODUCT_NAME_STRING_NARROW)
+#define PRODUCT_NAME_STRING_WIDE L##PRODUCT_NAME_STRING
+
+#endif // CHROME_BROWSER_SYNC_PROTOCOL_SERVICE_CONSTANTS_H_
diff --git a/chrome/browser/sync/protocol/sync.proto b/chrome/browser/sync/protocol/sync.proto
new file mode 100644
index 0000000..0381b329
--- /dev/null
+++ b/chrome/browser/sync/protocol/sync.proto
@@ -0,0 +1,344 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Sync protocol for communication between sync client and server.
+
+syntax = "proto2";
+
+option java_api_version = 1;
+option cc_api_version = 1;
+option py_api_version = 1;
+option java_java5_enums = false;
+
+package sync_pb;
+
+// Used to store and send extended attributes, which are arbitrary
+// key value pairs.
+message ExtendedAttributes {
+ repeated group ExtendedAttribute = 1 {
+ required string key = 2;
+ required bytes value = 3;
+ }
+}
+
+// Used for inspecting how long we spent performing operations in different
+// backends. All times must be in millis.
+message ProfilingData {
+ optional int64 meta_data_write_time = 1;
+ optional int64 file_data_write_time = 2;
+ optional int64 user_lookup_time = 3;
+ optional int64 meta_data_read_time = 4;
+ optional int64 file_data_read_time = 5;
+ optional int64 total_request_time = 6;
+}
+
+message SyncEntity {
+ // This item's identifier. In a commit of a new item, this will be a
+ // client-generated ID. If the commit succeeds, the server will generate
+ // a globally unique ID and return it to the committing client in the
+ // CommitResponse.EntryResponse. In the context of a GetUpdatesResponse,
+ // |id_string| is always the server generated ID. The original
+ // client-generated ID is preserved in the |originator_client_id| field.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional string id_string = 1;
+
+ // An id referencing this item's parent in the hierarchy. In a
+ // CommitMessage, it is accepted for this to be a client-generated temporary
+ // ID if there was a new created item with that ID appearing earlier
+ // in the message. In all other situations, it is a server ID.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional string parent_id_string = 2;
+
+ // old_parent_id is only set in commits and indicates the old server
+ // parent(s) to remove. When omitted, the old parent is the same as
+ // the new.
+ // Present only in CommitMessage.
+ optional string old_parent_id = 3;
+
+ // The version of this item -- a monotonically increasing value that is
+ // maintained by for each item. If zero in a CommitMessage, the server
+ // will interpret this entity as a newly-created item and generate a
+ // new server ID and an initial version number. If nonzero in a
+ // CommitMessage, this item is treated as an update to an existing item, and
+ // the server will use |id_string| to locate the item. Then, if the item's
+ // current version on the server does not match |version|, the commit will
+ // fail for that item. The server will not update it, and will return
+ // a result code of CONFLICT. In a GetUpdatesResponse, |version| is
+ // always positive and indentifies the revision of the item data being sent
+ // to the client.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ required int64 version = 4;
+
+ // Last modification time (in java time milliseconds)
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional int64 mtime = 5;
+
+ // Creation time.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional int64 ctime = 6;
+
+ // A unique-in-the parent name for this item.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ required string name = 7;
+
+ // non_unique_name holds the base name stored serverside, which is different
+ // from |name| when |name| has been suffixed in a way to make it unique
+ // among its siblings. In a GetUpdatesResponse, |non_unique_name| will
+ // be supplied in addition to |name|, and the client may choose which
+ // field to use depending on its needs. In a CommitMessage,
+ // |non_unique_name| takes precedence over the |name| value if both are
+ // supplied.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional string non_unique_name = 8;
+
+ // A value from a monotonically increasing sequence that indicates when
+ // this item was last updated on the server. This is now equivalent
+ // to version. This is now deprecated in favor of version.
+ // Present only in GetUpdatesResponse.
+ optional int64 sync_timestamp = 9;
+
+ // If present, singleton_tag identifies this item as being a uniquely
+ // instanced item. The server ensures that there is never more
+ // than one entity in a user's store with the same singleton_tag value.
+ // This value is used to identify and find e.g. the "Google Chrome" settings
+ // folder without relying on it existing at a particular path, or having
+ // a particular name, in the data store.
+ // Present only in GetUpdatesResponse.
+ optional string singleton_tag = 10;
+
+ // If this group is present, it implies that this SyncEntity corresponds to
+ // a bookmark or a bookmark folder.
+ //
+ // TODO(idana): for now, we put the bookmarks related information explicitly
+ // in the protocol definition. When we start syncing more data types, it is
+ // probably going to be better if we represent the different types as
+ // extended attributes.
+ optional group BookmarkData = 11 {
+ // We use a required field to differentiate between a bookmark and a
+ // bookmark folder.
+ // Present in both GetUpdatesMessage and CommitMessage.
+ required bool bookmark_folder = 12;
+
+ // For bookmark objects, contains the bookmark's URL.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional string bookmark_url = 13;
+
+ // For bookmark objects, contains the bookmark's favicon. The favicon is
+ // represented as a 16X16 PNG image.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional bytes bookmark_favicon = 14;
+ }
+
+ // Supplies a numeric position for this item, relative to other items with
+ // the same parent. This value is only meaningful in server-to-client
+ // contexts; to specify a position in a client-to-server commit context,
+ // use |insert_after_item_id|.
+ // Present only in GetUpdatesResponse.
+ optional int64 position_in_parent = 15;
+
+ // Contains the ID of the element (under the same parent) after which this
+ // element resides. An empty string indicates that the element is the first
+ // element in the parent. This value is used during commits to specify
+ // a relative position for a position change. In the context of
+ // a GetUpdatesMessage, |position_in_parent| is used instead to
+ // communicate position.
+ // Present only in CommitMessage.
+ optional string insert_after_item_id = 16;
+
+ // Arbitrary key/value pairs associated with this item.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional ExtendedAttributes extended_attributes = 17;
+
+ // If true, indicates that this item has been (or should be) deleted.
+ // Present in both GetUpdatesResponse and CommitMessage.
+ optional bool deleted = 18 [default = false];
+
+ // A GUID that identifies the the sync client who initially committed
+ // this entity. This value corresponds to |cache_guid| in CommitMessage.
+ // This field, along with |originator_client_item_id|, can be used to
+ // reunite the original with its official committed version in the case
+ // where a client does not receive or process the commit response for
+ // some reason.
+ // Present only in GetUpdatesResponse.
+ optional string originator_cache_guid = 19;
+
+ // The local item id of this entry from the client that initially
+ // committed this entity. Typically a negative integer.
+ // Present only in GetUpdatesResponse.
+ optional string originator_client_item_id = 20;
+};
+
+message CommitMessage {
+ repeated SyncEntity entries = 1;
+
+ // A GUID that identifies the committing sync client. This value will be
+ // returned as originator_cache_guid for any new items.
+ optional string cache_guid = 2;
+};
+
+message GetUpdatesCallerInfo {
+ enum GET_UPDATES_SOURCE {
+ UNKNOWN = 0; // The source was not set by the caller.
+ FIRST_UPDATE = 1; // First update from an instance of Chrome.
+ LOCAL = 2; // The source of the update was a local change.
+ NOTIFICATION = 3; // The source of the update was a p2p notification.
+ PERIODIC = 4; // The source of the update was periodic polling.
+ SYNC_CYCLE_CONTINUATION = 5; // The source of the update was a
+ } // continuation of a previous update.
+
+ required GET_UPDATES_SOURCE source = 1;
+
+ // True only if notifications were enabled for this GetUpdateMessage.
+ optional bool notifications_enabled = 2;
+};
+
+message GetUpdatesMessage {
+ required int64 from_timestamp = 1;
+
+ // Indicates the reason for the GetUpdatesMessage.
+ optional GetUpdatesCallerInfo caller_info = 2;
+};
+
+message AuthenticateMessage {
+ required string auth_token = 1;
+};
+
+message ClientToServerMessage {
+ required string share = 1;
+ optional int32 protocol_version = 2 [default = 20];
+ enum CONTENTS {
+ COMMIT = 1;
+ GET_UPDATES = 2;
+ AUTHENTICATE = 3;
+ }
+
+ required CONTENTS message_contents = 3;
+ optional CommitMessage commit = 4;
+ optional GetUpdatesMessage get_updates = 5;
+ optional AuthenticateMessage authenticate = 6;
+
+ optional string store_birthday = 7; // Opaque store ID; if it changes, duck!
+ // The client sets this if it detects a sync issue. The server will tell it
+ // if it should perform a refresh.
+ optional bool sync_problem_detected = 8 [default = false];
+};
+
+message CommitResponse {
+ enum RESPONSE_TYPE {
+ SUCCESS = 1;
+ CONFLICT = 2; // You're out of date; update and check your data
+ // TODO(ncarter): What's the difference between RETRY and TRANSIENT_ERROR?
+ RETRY = 3; // Someone has a conflicting, non-expired session open
+ INVALID_MESSAGE = 4; // What the client sent was invalid, and trying again
+ // won't help.
+ OVER_QUOTA = 5; // This operation would put you, or you are, over quota
+ TRANSIENT_ERROR = 6; // Something went wrong; try again in a bit
+ }
+ repeated group EntryResponse = 1 {
+ required RESPONSE_TYPE response_type = 2;
+
+ // Sync servers may also return a new ID for an existing item, indicating
+ // a new entry's been created to hold the data the client's sending up.
+ optional string id_string = 3;
+
+ // should be filled if our parent was assigned a new ID.
+ optional string parent_id_string = 4;
+
+ // This value is the same as the position_in_parent value returned within
+ // the SyncEntity message in GetUpdatesResponse. It is returned if the
+ // item was assigned a new position.
+ optional int64 position_in_parent = 5;
+
+ // The item's current version.
+ optional int64 version = 6;
+
+ // Allows the server to move-aside an entry as it's being committed.
+ // This name is the same as the name field returned within the SyncEntity
+ // message in GetUpdatesResponse.
+ optional string name = 7;
+
+ // This name is the same as the non_unique_name field returned within the
+ // SyncEntity message in GetUpdatesResponse.
+ optional string non_unique_name = 8;
+
+ optional string error_message = 9;
+
+ }
+};
+
+message GetUpdatesResponse {
+ repeated SyncEntity entries = 1;
+ // If there are more changes on the server that weren't processed during this
+ // GetUpdates request, the client should send another GetUpdates request and
+ // use new_timestamp as the from_timestamp value within GetUpdatesMessage.
+ optional int64 new_timestamp = 2;
+ // The newest timestamp in the share. Used to give UI feedback on progress.
+ optional int64 newest_timestamp = 3;
+};
+
+// A user-identifying struct. For a given Google account the email and display
+// name can change, but obfuscated_id should be constant.
+// The obfuscated id is optional because at least one planned use of the proto
+// (sharing) does not require it.
+message UserIdentification {
+ required string email = 1; // the user's full primary email address.
+ optional string display_name = 2; // the user's display name.
+ optional string obfuscated_id = 3; // an obfuscated, opaque user id.
+};
+
+message AuthenticateResponse {
+ // Optional only for backward compatibility.
+ optional UserIdentification user = 1;
+};
+
+message ThrottleParameters {
+ // Deprecated. Remove this from the server side.
+ required int32 min_measure_payload_size = 1;
+ required double target_utilization = 2;
+ required double measure_interval_max = 3;
+ required double measure_interval_min = 4;
+ required double observation_window = 5;
+};
+
+// A command from the server instructing the client to update settings or
+// perform some operation.
+message ClientCommand {
+ // Time to wait before sending any requests to the server.
+ optional int32 set_sync_poll_interval = 1; // in seconds
+ optional int32 set_sync_long_poll_interval = 2; // in seconds
+
+ optional int32 max_commit_batch_size = 3;
+};
+
+message ClientToServerResponse {
+ optional CommitResponse commit = 1;
+ optional GetUpdatesResponse get_updates = 2;
+ optional AuthenticateResponse authenticate = 3;
+
+ enum ERROR_TYPE {
+ SUCCESS = 0;
+ ACCESS_DENIED = 1; // Returned when the user doesn't have access to
+ // store (instead of HTTP 401).
+ NOT_MY_BIRTHDAY = 2; // Returned when the server and client disagree on
+ // the store birthday.
+ THROTTLED = 3; // Returned when the store has exceeded the allowed
+ // bandwidth utilization.
+ AUTH_EXPIRED = 4; // Auth token or cookie has expired.
+ USER_NOT_ACTIVATED = 5; // User doesn't have the Chrome bit set on that
+ // Google Account.
+ AUTH_INVALID = 6; // Auth token or cookie is otherwise invalid.
+ }
+ optional ERROR_TYPE error_code = 4 [default = SUCCESS];
+ optional string error_message = 5;
+
+ // Opaque store ID; if it changes, the contents of the client's cache
+ // is meaningless to this server. This happens most typically when
+ // you switch from one storage backend instance (say, a test instance)
+ // to another (say, the official instance).
+ optional string store_birthday = 6;
+
+ optional ClientCommand client_command = 7;
+ optional ProfilingData profiling_data = 8;
+};
+
diff --git a/chrome/browser/sync/syncable/blob.h b/chrome/browser/sync/syncable/blob.h
new file mode 100644
index 0000000..0d7f33a
--- /dev/null
+++ b/chrome/browser/sync/syncable/blob.h
@@ -0,0 +1,16 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
+
+#include <vector>
+
+namespace syncable {
+
+typedef std::vector<uint8> Blob;
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
diff --git a/chrome/browser/sync/syncable/dir_open_result.h b/chrome/browser/sync/syncable/dir_open_result.h
new file mode 100644
index 0000000..e122319
--- /dev/null
+++ b/chrome/browser/sync/syncable/dir_open_result.h
@@ -0,0 +1,17 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
+
+namespace syncable {
+enum DirOpenResult { OPENED, // success.
+ FAILED_NEWER_VERSION, // DB version is too new.
+ FAILED_MAKE_REPOSITORY, // Couldn't create subdir.
+ FAILED_OPEN_DATABASE, // sqlite_open() failed.
+ FAILED_DISK_FULL, // The disk is full.
+};
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc
new file mode 100644
index 0000000..2f13f93
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_backing_store.cc
@@ -0,0 +1,657 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/directory_backing_store.h"
+
+#ifdef OS_MACOSX
+#include <CoreFoundation/CoreFoundation.h>
+#elif defined(OS_LINUX)
+#include <glib.h>
+#endif
+
+#include <string>
+
+#include "base/hash_tables.h"
+#include "base/logging.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/syncable-inl.h"
+#include "chrome/browser/sync/syncable/syncable_columns.h"
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+#include "third_party/sqlite/preprocessed/sqlite3.h"
+
+// If sizeof(time_t) != sizeof(int32) we need to alter or expand the sqlite
+// datatype.
+COMPILE_ASSERT(sizeof(time_t) == sizeof(int32), time_t_is_not_int32);
+
+using std::string;
+
+namespace syncable {
+
+// This just has to be big enough to hold an UPDATE or
+// INSERT statement that modifies all the columns in the entry table.
+static const string::size_type kUpdateStatementBufferSize = 2048;
+
+// Increment this version whenever updating DB tables.
+static const int32 kCurrentDBVersion = 67;
+
+// TODO(sync): remove
+static void PathNameMatch16(sqlite3_context *context, int argc,
+ sqlite3_value **argv) {
+ const PathString pathspec(reinterpret_cast<const PathChar*>
+ (sqlite3_value_text16(argv[0])), sqlite3_value_bytes16(argv[0]) / 2);
+
+ const void* name_text = sqlite3_value_text16(argv[1]);
+ int name_bytes = sqlite3_value_bytes16(argv[1]);
+ // If the text is null, we need to avoid the PathString constructor.
+ if (name_text != NULL) {
+ // Have to copy to append a terminating 0 anyway.
+ const PathString name(reinterpret_cast<const PathChar*>
+ (sqlite3_value_text16(argv[1])),
+ sqlite3_value_bytes16(argv[1]) / 2);
+ sqlite3_result_int(context, PathNameMatch(name, pathspec));
+ } else {
+ sqlite3_result_int(context, PathNameMatch(PathString(), pathspec));
+ }
+}
+
+// Sqlite allows setting of the escape character in an ESCAPE clause and
+// this character is passed in as a third character to the like function.
+// See: http://www.sqlite.org/lang_expr.html
+static void PathNameMatch16WithEscape(sqlite3_context *context,
+ int argc, sqlite3_value **argv) {
+ // Never seen this called, but just in case.
+ LOG(FATAL) << "PathNameMatch16WithEscape() not implemented";
+}
+
+static void RegisterPathNameCollate(sqlite3* dbhandle) {
+#ifdef OS_WINDOWS
+ const int collate = SQLITE_UTF16;
+#else
+ const int collate = SQLITE_UTF8;
+#endif
+ CHECK(SQLITE_OK == sqlite3_create_collation(dbhandle, "PATHNAME", collate,
+ NULL, &ComparePathNames16));
+}
+
+// Replace the LIKE operator with our own implementation that
+// does file spec matching like "*.txt".
+static void RegisterPathNameMatch(sqlite3* dbhandle) {
+ // We only register this on Windows. We use the normal sqlite
+ // matching function on mac/linux.
+ // note that the function PathNameMatch() does a simple ==
+ // comparison on mac, so that would have to be fixed if
+ // we really wanted to use PathNameMatch on mac/linux w/ the
+ // same pattern strings as we do on windows.
+#ifdef OS_WINDOWS
+ CHECK(SQLITE_OK == sqlite3_create_function(dbhandle, "like",
+ 2, SQLITE_ANY, NULL, &PathNameMatch16, NULL, NULL));
+ CHECK(SQLITE_OK == sqlite3_create_function(dbhandle, "like",
+ 3, SQLITE_ANY, NULL, &PathNameMatch16WithEscape, NULL, NULL));
+#endif // OS_WINDOWS
+}
+
+static inline bool IsSqliteErrorOurFault(int result) {
+ switch (result) {
+ case SQLITE_MISMATCH:
+ case SQLITE_CONSTRAINT:
+ case SQLITE_MISUSE:
+ case SQLITE_RANGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+namespace {
+// This small helper class reduces the amount of code in the table upgrade code
+// below and also CHECKs as soon as there's an issue.
+class StatementExecutor {
+ public:
+ explicit StatementExecutor(sqlite3* dbhandle) : dbhandle_(dbhandle) {
+ result_ = SQLITE_DONE;
+ }
+ int Exec(const char* query) {
+ if (SQLITE_DONE != result_)
+ return result_;
+ result_ = ::Exec(dbhandle_, query);
+ CHECK(!IsSqliteErrorOurFault(result_)) << query;
+ return result_;
+ }
+ template <typename T1>
+ int Exec(const char* query, T1 arg1) {
+ if (SQLITE_DONE != result_)
+ return result_;
+ result_ = ::Exec(dbhandle_, query, arg1);
+ CHECK(!IsSqliteErrorOurFault(result_)) << query;
+ return result_;
+ }
+ int result() {
+ return result_;
+ }
+ void set_result(int result) {
+ result_ = result;
+ CHECK(!IsSqliteErrorOurFault(result_)) << result_;
+ }
+ bool healthy() const {
+ return SQLITE_DONE == result_;
+ }
+ private:
+ sqlite3* dbhandle_;
+ int result_;
+ DISALLOW_COPY_AND_ASSIGN(StatementExecutor);
+};
+
+} // namespace
+
+static string GenerateCacheGUID() {
+ return Generate128BitRandomHexString();
+}
+
+// Iterate over the fields of |entry| and bind dirty ones to |statement| for
+// updating. Returns the number of args bound.
+static int BindDirtyFields(const EntryKernel& entry, sqlite3_stmt* statement) {
+ int index = 1;
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<Int64Field>(i)), index++);
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<IdField>(i)), index++);
+ }
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<BitField>(i)), index++);
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<StringField>(i)), index++);
+ }
+ for ( ; i < BLOB_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<BlobField>(i)), index++);
+ }
+ return index - 1;
+}
+
+// The caller owns the returned EntryKernel*.
+static EntryKernel* UnpackEntry(sqlite3_stmt* statement) {
+ EntryKernel* result = NULL;
+ int query_result = sqlite3_step(statement);
+ if (SQLITE_ROW == query_result) {
+ result = new EntryKernel;
+ CHECK(sqlite3_column_count(statement) == static_cast<int>(FIELD_COUNT));
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ result->ref(static_cast<Int64Field>(i)) =
+ sqlite3_column_int64(statement, i);
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ GetColumn(statement, i, &result->ref(static_cast<IdField>(i)));
+ }
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ result->ref(static_cast<BitField>(i)) =
+ (0 != sqlite3_column_int(statement, i));
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ GetColumn(statement, i, &result->ref(static_cast<StringField>(i)));
+ }
+ for ( ; i < BLOB_FIELDS_END; ++i) {
+ GetColumn(statement, i, &result->ref(static_cast<BlobField>(i)));
+ }
+ ZeroFields(result, i);
+ } else {
+ CHECK(SQLITE_DONE == query_result);
+ result = NULL;
+ }
+ return result;
+}
+
+static bool StepDone(sqlite3_stmt* statement, const char* failed_call) {
+ int result = sqlite3_step(statement);
+ if (SQLITE_DONE == result && SQLITE_OK == (result = sqlite3_reset(statement)))
+ return true;
+ // Some error code.
+ LOG(WARNING) << failed_call << " failed with result " << result;
+ CHECK(!IsSqliteErrorOurFault(result));
+ return false;
+}
+
+static string ComposeCreateTableColumnSpecs(const ColumnSpec* begin,
+ const ColumnSpec* end) {
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ char separator = '(';
+ for (const ColumnSpec* column = begin; column != end; ++column) {
+ query.push_back(separator);
+ separator = ',';
+ query.append(column->name);
+ query.push_back(' ');
+ query.append(column->spec);
+ }
+ query.push_back(')');
+ return query;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// DirectoryBackingStore implementation.
+
+DirectoryBackingStore::DirectoryBackingStore(const PathString& dir_name,
+ const PathString& backing_filepath)
+ : dir_name_(dir_name), backing_filepath_(backing_filepath),
+ load_dbhandle_(NULL), save_dbhandle_(NULL) {
+}
+
+DirectoryBackingStore::~DirectoryBackingStore() {
+ if (NULL != load_dbhandle_) {
+ sqlite3_close(load_dbhandle_);
+ load_dbhandle_ = NULL;
+ }
+ if (NULL != save_dbhandle_) {
+ sqlite3_close(save_dbhandle_);
+ save_dbhandle_ = NULL;
+ }
+}
+
+bool DirectoryBackingStore::OpenAndConfigureHandleHelper(
+ sqlite3** handle) const {
+ if (SQLITE_OK == SqliteOpen(backing_filepath_.c_str(), handle)) {
+ sqlite3_busy_timeout(*handle, kDirectoryBackingStoreBusyTimeoutMs);
+ RegisterPathNameCollate(*handle);
+ RegisterPathNameMatch(*handle);
+ return true;
+ }
+ return false;
+}
+
+DirOpenResult DirectoryBackingStore::Load(MetahandlesIndex* entry_bucket,
+ ExtendedAttributes* xattrs_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) {
+ DCHECK(load_dbhandle_ == NULL);
+ if (!OpenAndConfigureHandleHelper(&load_dbhandle_))
+ return FAILED_OPEN_DATABASE;
+
+ DirOpenResult result = InitializeTables();
+ if (OPENED != result)
+ return result;
+
+ DropDeletedEntries();
+ LoadEntries(entry_bucket);
+ LoadExtendedAttributes(xattrs_bucket);
+ LoadInfo(kernel_load_info);
+
+ sqlite3_close(load_dbhandle_);
+ load_dbhandle_ = NULL; // No longer used.
+
+ return OPENED;
+}
+
+bool DirectoryBackingStore::SaveChanges(
+ const Directory::SaveChangesSnapshot& snapshot) {
+ bool disk_full = false;
+ sqlite3* dbhandle = LazyGetSaveHandle();
+ {
+ {
+ ScopedStatement begin(PrepareQuery(dbhandle,
+ "BEGIN EXCLUSIVE TRANSACTION"));
+ if (!StepDone(begin.get(), "BEGIN")) {
+ disk_full = true;
+ goto DoneDBTransaction;
+ }
+ }
+
+ for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
+ !disk_full && i != snapshot.dirty_metas.end(); ++i) {
+ DCHECK(i->dirty.any());
+ disk_full = !SaveEntryToDB(*i);
+ }
+
+ for (ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
+ !disk_full && i != snapshot.dirty_xattrs.end(); ++i) {
+ DCHECK(i->second.dirty);
+ if (i->second.is_deleted) {
+ disk_full = !DeleteExtendedAttributeFromDB(i);
+ } else {
+ disk_full = !SaveExtendedAttributeToDB(i);
+ }
+ }
+
+ if (!disk_full && (Directory::KERNEL_SHARE_INFO_DIRTY ==
+ snapshot.kernel_info_status)) {
+ const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
+ ScopedStatement update(PrepareQuery(dbhandle, "UPDATE share_info "
+ "SET last_sync_timestamp = ?, initial_sync_ended = ?, "
+ "store_birthday = ?, "
+ "next_id = ?",
+ info.last_sync_timestamp,
+ info.initial_sync_ended,
+ info.store_birthday,
+ info.next_id));
+ disk_full = !(StepDone(update.get(), "UPDATE share_info")
+ && 1 == sqlite3_changes(dbhandle));
+ }
+ if (disk_full) {
+ ExecOrDie(dbhandle, "ROLLBACK TRANSACTION");
+ } else {
+ ScopedStatement end_transaction(PrepareQuery(dbhandle,
+ "COMMIT TRANSACTION"));
+ disk_full = !StepDone(end_transaction.get(), "COMMIT TRANSACTION");
+ }
+ }
+
+ DoneDBTransaction:
+ return !disk_full;
+}
+
+DirOpenResult DirectoryBackingStore::InitializeTables() {
+ StatementExecutor se(load_dbhandle_);
+ if (SQLITE_DONE != se.Exec("BEGIN EXCLUSIVE TRANSACTION")) {
+ return FAILED_DISK_FULL;
+ }
+ int version_on_disk = 0;
+
+ if (DoesTableExist(load_dbhandle_, "share_version")) {
+ ScopedStatement version_query(
+ PrepareQuery(load_dbhandle_, "SELECT data from share_version"));
+ int query_result = sqlite3_step(version_query.get());
+ if (SQLITE_ROW == query_result) {
+ version_on_disk = sqlite3_column_int(version_query.get(), 0);
+ }
+ version_query.reset(NULL);
+ }
+ if (version_on_disk != kCurrentDBVersion) {
+ if (version_on_disk > kCurrentDBVersion) {
+ ExecOrDie(load_dbhandle_, "END TRANSACTION");
+ return FAILED_NEWER_VERSION;
+ }
+ LOG(INFO) << "Old/null sync database, version " << version_on_disk;
+ // Delete the existing database (if any), and create a freshone.
+ if (se.healthy()) {
+ DropAllTables();
+ se.set_result(CreateTables());
+ }
+ }
+ if (SQLITE_DONE == se.result()) {
+ {
+ ScopedStatement statement(PrepareQuery(load_dbhandle_,
+ "SELECT db_create_version, db_create_time FROM share_info"));
+ CHECK(SQLITE_ROW == sqlite3_step(statement.get()));
+ PathString db_create_version;
+ int db_create_time;
+ GetColumn(statement.get(), 0, &db_create_version);
+ GetColumn(statement.get(), 1, &db_create_time);
+ statement.reset(0);
+ LOG(INFO) << "DB created at " << db_create_time << " by version " <<
+ db_create_version;
+ }
+ // COMMIT TRANSACTION rolls back on failure.
+ if (SQLITE_DONE == Exec(load_dbhandle_, "COMMIT TRANSACTION"))
+ return OPENED;
+ } else {
+ ExecOrDie(load_dbhandle_, "ROLLBACK TRANSACTION");
+ }
+ return FAILED_DISK_FULL;
+}
+
+void DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
+ string select;
+ select.reserve(kUpdateStatementBufferSize);
+ select.append("SELECT");
+ const char* joiner = " ";
+ // Be explicit in SELECT order to match up with UnpackEntry.
+ for (int i = BEGIN_FIELDS; i < BEGIN_FIELDS + FIELD_COUNT; ++i) {
+ select.append(joiner);
+ select.append(ColumnName(i));
+ joiner = ", ";
+ }
+ select.append(" FROM metas ");
+ ScopedStatement statement(PrepareQuery(load_dbhandle_, select.c_str()));
+ base::hash_set<int> handles;
+ while (EntryKernel* kernel = UnpackEntry(statement.get())) {
+ DCHECK(handles.insert(kernel->ref(META_HANDLE)).second); // Only in debug.
+ entry_bucket->insert(kernel);
+ }
+}
+
+void DirectoryBackingStore::LoadExtendedAttributes(
+ ExtendedAttributes* xattrs_bucket) {
+ ScopedStatement statement(PrepareQuery(load_dbhandle_,
+ "SELECT metahandle, key, value FROM extended_attributes"));
+ int step_result = sqlite3_step(statement.get());
+ while (SQLITE_ROW == step_result) {
+ int64 metahandle;
+ PathString path_string_key;
+ ExtendedAttributeValue val;
+ val.is_deleted = false;
+ GetColumn(statement.get(), 0, &metahandle);
+ GetColumn(statement.get(), 1, &path_string_key);
+ GetColumn(statement.get(), 2, &(val.value));
+ ExtendedAttributeKey key(metahandle, path_string_key);
+ xattrs_bucket->insert(std::make_pair(key, val));
+ step_result = sqlite3_step(statement.get());
+ }
+ CHECK(SQLITE_DONE == step_result);
+}
+
+void DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
+ ScopedStatement query(PrepareQuery(load_dbhandle_,
+ "SELECT last_sync_timestamp, initial_sync_ended, "
+ "store_birthday, next_id, cache_guid "
+ "FROM share_info"));
+ CHECK(SQLITE_ROW == sqlite3_step(query.get()));
+ GetColumn(query.get(), 0, &info->kernel_info.last_sync_timestamp);
+ GetColumn(query.get(), 1, &info->kernel_info.initial_sync_ended);
+ GetColumn(query.get(), 2, &info->kernel_info.store_birthday);
+ GetColumn(query.get(), 3, &info->kernel_info.next_id);
+ GetColumn(query.get(), 4, &info->cache_guid);
+ query.reset(PrepareQuery(load_dbhandle_,
+ "SELECT MAX(metahandle) FROM metas"));
+ CHECK(SQLITE_ROW == sqlite3_step(query.get()));
+ GetColumn(query.get(), 0, &info->max_metahandle);
+}
+
+bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
+ return entry.ref(IS_NEW) ? SaveNewEntryToDB(entry) : UpdateEntryToDB(entry);
+}
+
+bool DirectoryBackingStore::SaveNewEntryToDB(const EntryKernel& entry) {
+ DCHECK(save_dbhandle_);
+ // TODO(timsteele): Should use INSERT OR REPLACE and eliminate one of
+ // the SaveNew / UpdateEntry code paths.
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ query.append("INSERT INTO metas ");
+ string values;
+ values.reserve(kUpdateStatementBufferSize);
+ values.append("VALUES ");
+ const char* separator = "( ";
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < BLOB_FIELDS_END; ++i) {
+ if (entry.dirty[i]) {
+ query.append(separator);
+ values.append(separator);
+ separator = ", ";
+ query.append(ColumnName(i));
+ values.append("?");
+ }
+ }
+ query.append(" ) ");
+ values.append(" )");
+ query.append(values);
+ ScopedStatement const statement(PrepareQuery(save_dbhandle_, query.c_str()));
+ BindDirtyFields(entry, statement.get());
+ return StepDone(statement.get(), "SaveNewEntryToDB()") &&
+ 1 == sqlite3_changes(save_dbhandle_);
+}
+
+bool DirectoryBackingStore::UpdateEntryToDB(const EntryKernel& entry) {
+ DCHECK(save_dbhandle_);
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ query.append("UPDATE metas ");
+ const char* separator = "SET ";
+ int i;
+ for (i = BEGIN_FIELDS; i < BLOB_FIELDS_END; ++i) {
+ if (entry.dirty[i]) {
+ query.append(separator);
+ separator = ", ";
+ query.append(ColumnName(i));
+ query.append(" = ? ");
+ }
+ }
+ query.append("WHERE metahandle = ?");
+ ScopedStatement const statement(PrepareQuery(save_dbhandle_, query.c_str()));
+ const int var_count = BindDirtyFields(entry, statement.get());
+ BindArg(statement.get(), entry.ref(META_HANDLE), var_count + 1);
+ return StepDone(statement.get(), "UpdateEntryToDB()") &&
+ 1 == sqlite3_changes(save_dbhandle_);
+}
+
+bool DirectoryBackingStore::SaveExtendedAttributeToDB(
+ ExtendedAttributes::const_iterator i) {
+ DCHECK(save_dbhandle_);
+ ScopedStatement insert(PrepareQuery(save_dbhandle_,
+ "INSERT INTO extended_attributes "
+ "(metahandle, key, value) "
+ "values ( ?, ?, ? )",
+ i->first.metahandle, i->first.key, i->second.value));
+ return StepDone(insert.get(), "SaveExtendedAttributeToDB()")
+ && 1 == sqlite3_changes(LazyGetSaveHandle());
+}
+
+bool DirectoryBackingStore::DeleteExtendedAttributeFromDB(
+ ExtendedAttributes::const_iterator i) {
+ DCHECK(save_dbhandle_);
+ ScopedStatement delete_attribute(PrepareQuery(save_dbhandle_,
+ "DELETE FROM extended_attributes "
+ "WHERE metahandle = ? AND key = ? ",
+ i->first.metahandle, i->first.key));
+ if (!StepDone(delete_attribute.get(), "DeleteExtendedAttributeFromDB()")) {
+ LOG(ERROR) << "DeleteExtendedAttributeFromDB(),StepDone() failed "
+ << "for metahandle: " << i->first.metahandle << " key: "
+ << i->first.key;
+ return false;
+ }
+ // The attribute may have never been saved to the database if it was
+ // created and then immediately deleted. So don't check that we
+ // deleted exactly 1 row.
+ return true;
+}
+
+void DirectoryBackingStore::DropDeletedEntries() {
+ static const char delete_extended_attributes[] =
+ "DELETE FROM extended_attributes WHERE metahandle IN "
+ "(SELECT metahandle from death_row)";
+ static const char delete_metas[] = "DELETE FROM metas WHERE metahandle IN "
+ "(SELECT metahandle from death_row)";
+ // Put all statements into a transaction for better performance
+ ExecOrDie(load_dbhandle_, "BEGIN TRANSACTION");
+ ExecOrDie(load_dbhandle_, "CREATE TEMP TABLE death_row (metahandle BIGINT)");
+ ExecOrDie(load_dbhandle_, "INSERT INTO death_row "
+ "SELECT metahandle from metas WHERE is_del > 0 "
+ " AND is_unsynced < 1"
+ " AND is_unapplied_update < 1");
+ StatementExecutor x(load_dbhandle_);
+ x.Exec(delete_extended_attributes);
+ x.Exec(delete_metas);
+ ExecOrDie(load_dbhandle_, "DROP TABLE death_row");
+ ExecOrDie(load_dbhandle_, "COMMIT TRANSACTION");
+}
+
+void DirectoryBackingStore::SafeDropTable(const char* table_name) {
+ string query = "DROP TABLE IF EXISTS ";
+ query.append(table_name);
+ const char* tail;
+ sqlite3_stmt* statement = NULL;
+ if (SQLITE_OK == sqlite3_prepare(load_dbhandle_, query.data(),
+ query.size(), &statement, &tail)) {
+ CHECK(SQLITE_DONE == sqlite3_step(statement));
+ }
+ sqlite3_finalize(statement);
+}
+
+int DirectoryBackingStore::CreateExtendedAttributeTable() {
+ SafeDropTable("extended_attributes");
+ LOG(INFO) << "CreateExtendedAttributeTable";
+ return Exec(load_dbhandle_, "CREATE TABLE extended_attributes("
+ "metahandle bigint, "
+ "key varchar(127), "
+ "value blob, "
+ "PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE)");
+}
+
+void DirectoryBackingStore::DropAllTables() {
+ SafeDropTable("metas");
+ SafeDropTable("share_info");
+ SafeDropTable("share_version");
+ SafeDropTable("extended_attributes");
+}
+
+int DirectoryBackingStore::CreateTables() {
+ LOG(INFO) << "First run, creating tables";
+ // Create two little tables share_version and share_info
+ int result = Exec(load_dbhandle_, "CREATE TABLE share_version ("
+ "id VARCHAR(128) primary key, data INT)");
+ result = SQLITE_DONE != result ? result :
+ Exec(load_dbhandle_, "INSERT INTO share_version VALUES(?, ?)",
+ dir_name_, kCurrentDBVersion);
+ result = SQLITE_DONE != result ? result :
+ Exec(load_dbhandle_, "CREATE TABLE share_info ("
+ "id VARCHAR(128) primary key, "
+ "last_sync_timestamp INT, "
+ "name VARCHAR(128), "
+ // Gets set if the syncer ever gets updates from the
+ // server and the server returns 0. Lets us detect the
+ // end of the initial sync.
+ "initial_sync_ended BIT default 0, "
+ "store_birthday VARCHAR(256), "
+ "db_create_version VARCHAR(128), "
+ "db_create_time int, "
+ "next_id bigint default -2, "
+ "cache_guid VARCHAR(32))");
+ result = SQLITE_DONE != result ? result :
+ Exec(load_dbhandle_, "INSERT INTO share_info VALUES"
+ "(?, " // id
+ "0, " // last_sync_timestamp
+ "?, " // name
+ "?, " // initial_sync_ended
+ "?, " // store_birthday
+ "?, " // db_create_version
+ "?, " // db_create_time
+ "-2, " // next_id
+ "?)", // cache_guid
+ dir_name_, // id
+ dir_name_, // name
+ false, // initial_sync_ended
+ "", // store_birthday
+ SYNC_ENGINE_VERSION_STRING, // db_create_version
+ static_cast<int32>(time(0)), // db_create_time
+ GenerateCacheGUID()); // cache_guid
+ // Create the big metas table.
+ string query = "CREATE TABLE metas " + ComposeCreateTableColumnSpecs
+ (g_metas_columns, g_metas_columns + ARRAYSIZE(g_metas_columns));
+ result = SQLITE_DONE != result ? result : Exec(load_dbhandle_, query.c_str());
+ // Insert the entry for the root into the metas table.
+ const int64 now = Now();
+ result = SQLITE_DONE != result ? result :
+ Exec(load_dbhandle_, "INSERT INTO metas "
+ "( id, metahandle, is_dir, ctime, mtime) "
+ "VALUES ( \"r\", 1, 1, ?, ?)",
+ now, now);
+ result = SQLITE_DONE != result ? result : CreateExtendedAttributeTable();
+ return result;
+}
+
+sqlite3* DirectoryBackingStore::LazyGetSaveHandle() {
+ if (!save_dbhandle_ && !OpenAndConfigureHandleHelper(&save_dbhandle_)) {
+ DCHECK(FALSE) << "Unable to open handle for saving";
+ return NULL;
+ }
+ return save_dbhandle_;
+}
+
+} // namespace syncable
diff --git a/chrome/browser/sync/syncable/directory_backing_store.h b/chrome/browser/sync/syncable/directory_backing_store.h
new file mode 100644
index 0000000..a0bf8b1
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_backing_store.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
+
+#include <set>
+
+#include "chrome/browser/sync/syncable/dir_open_result.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+
+extern "C" {
+struct sqlite3;
+struct sqlite3_stmt;
+}
+
+namespace syncable {
+
+struct ColumnSpec;
+typedef Directory::MetahandlesIndex MetahandlesIndex;
+
+// Provides sqlite3-based persistence for a syncable::Directory object. You can
+// load all the persisted data to prime a syncable::Directory on startup by
+// invoking Load. The only other thing you (or more correctly, a Directory)
+// can do here is save any changes that have occurred since calling Load, which
+// can be done periodically as often as desired*
+//
+// * If you only ever use a DirectoryBackingStore (DBS) from a single thread
+// then you can stop reading now. This is implemented using sqlite3, which
+// requires that each thread accesses a DB via a handle (sqlite3*) opened by
+// sqlite_open for that thread and only that thread. To avoid complicated TLS
+// logic to swap handles in-and-out as different threads try to get a hold of a
+// DBS, the DBS does two things:
+// 1. Uses a separate handle for Load()ing which is closed as soon as loading
+// finishes, and
+// 2. Requires that SaveChanges *only* be called from a single thread, and that
+// thread *must* be the thread that owns / is responsible for destroying
+// the DBS.
+// This way, any thread may open a Directory (which today can be either the
+// AuthWatcherThread or SyncCoreThread) and Load its DBS. The first time
+// SaveChanges is called a new sqlite3 handle is created, and it will get closed
+// when the DBS is destroyed, which is the reason for the requirement that the
+// thread that "uses" the DBS is the thread that destroys it.
+class DirectoryBackingStore {
+ public:
+ DirectoryBackingStore(const PathString& dir_name,
+ const PathString& backing_filepath);
+
+ virtual ~DirectoryBackingStore();
+
+ // Loads and drops all currently persisted meta entries into
+ // |entry_bucket|, all currently persisted xattrs in |xattrs_bucket|,
+ // and loads appropriate persisted kernel info in |info_bucket|.
+ // NOTE: On success (return value of OPENED), the buckets are populated with
+ // newly allocated items, meaning ownership is bestowed upon the caller.
+ DirOpenResult Load(MetahandlesIndex* entry_bucket,
+ ExtendedAttributes* xattrs_bucket,
+ Directory::KernelLoadInfo* kernel_load_info);
+
+ // Updates the on-disk store with the input |snapshot| as a database
+ // transaction. Does NOT open any syncable transactions as this would cause
+ // opening transactions elsewhere to block on synchronous I/O.
+ // DO NOT CALL THIS FROM MORE THAN ONE THREAD EVER. Also, whichever thread
+ // calls SaveChanges *must* be the thread that owns/destroys |this|.
+ virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot);
+
+ private:
+ // General Directory initialization and load helpers.
+ DirOpenResult InitializeTables();
+ // Returns an sqlite return code, usually SQLITE_DONE.
+ int CreateTables();
+ int CreateExtendedAttributeTable();
+ // We don't need to load any synced and applied deleted entries, we can
+ // in fact just purge them forever on startup.
+ void DropDeletedEntries();
+ // Drops a table if it exists, harmless if the table did not already exist.
+ void SafeDropTable(const char* table_name);
+
+ // Load helpers for entries and attributes.
+ void LoadEntries(MetahandlesIndex* entry_bucket);
+ void LoadExtendedAttributes(ExtendedAttributes* xattrs_bucket);
+ void LoadInfo(Directory::KernelLoadInfo* info);
+
+ // Save/update helpers for entries. Return false if sqlite commit fails.
+ bool SaveEntryToDB(const EntryKernel& entry);
+ bool SaveNewEntryToDB(const EntryKernel& entry);
+ bool UpdateEntryToDB(const EntryKernel& entry);
+
+ // Save/update helpers for attributes. Return false if sqlite commit fails.
+ bool SaveExtendedAttributeToDB(ExtendedAttributes::const_iterator i);
+ bool DeleteExtendedAttributeFromDB(ExtendedAttributes::const_iterator i);
+
+ // Creates a new sqlite3 handle to the backing database. Sets sqlite operation
+ // timeout preferences and registers our overridden sqlite3 operators for
+ // said handle. Returns true on success, false if the sqlite open operation
+ // did not succeed.
+ bool OpenAndConfigureHandleHelper(sqlite3** handle) const;
+
+ // Lazy creation of save_dbhandle_ for use by SaveChanges code path.
+ sqlite3* LazyGetSaveHandle();
+
+ // Drop all tables in preparation for reinitialization.
+ void DropAllTables();
+
+ // The handle to our sqlite on-disk store for initialization and loading, and
+ // for saving changes periodically via SaveChanges, respectively.
+ // TODO(timsteele): We should only have one handle here. The reason we need
+ // two at the moment is because the DB can be opened by either the AuthWatcher
+ // or SyncCore threads, but SaveChanges is always called by the latter. We
+ // need to change initialization so the DB is only accessed from one thread.
+ sqlite3* load_dbhandle_;
+ sqlite3* save_dbhandle_;
+
+ PathString dir_name_;
+ PathString backing_filepath_;
+
+ DISALLOW_COPY_AND_ASSIGN(DirectoryBackingStore);
+};
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
diff --git a/chrome/browser/sync/syncable/directory_event.h b/chrome/browser/sync/syncable/directory_event.h
new file mode 100644
index 0000000..638ce6a
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_event.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_EVENT_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_EVENT_H_
+
+namespace syncable {
+
+// This kind of Event is emitted when the state of a Directory object
+// changes somehow, such as the directory being opened or closed.
+// Don't confuse it with a DirectoryChangeEvent, which is what happens
+// when one or more of the Entry contents of a Directory have been updated.
+enum DirectoryEvent {
+ DIRECTORY_CLOSED,
+ DIRECTORY_DESTROYED,
+};
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_EVENT_H_
diff --git a/chrome/browser/sync/syncable/directory_manager.cc b/chrome/browser/sync/syncable/directory_manager.cc
new file mode 100644
index 0000000..b044d49
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_manager.cc
@@ -0,0 +1,169 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/directory_manager.h"
+
+#include <map>
+#include <set>
+#include <iterator>
+
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+
+namespace syncable {
+
+static const PSTR_CHAR kSyncDataDatabaseFilename[] = PSTR("SyncData.sqlite3");
+
+DirectoryManagerEvent DirectoryManagerShutdownEvent() {
+ DirectoryManagerEvent event;
+ event.what_happened = DirectoryManagerEvent::SHUTDOWN;
+ return event;
+}
+
+// static
+const PathString DirectoryManager::GetSyncDataDatabaseFilename() {
+ return PathString(kSyncDataDatabaseFilename);
+}
+
+const PathString DirectoryManager::GetSyncDataDatabasePath() const {
+ PathString path(root_path_);
+ path.append(kSyncDataDatabaseFilename);
+ return path;
+}
+
+DirectoryManager::DirectoryManager(const PathString& path)
+ : root_path_(AppendSlash(path)),
+ channel_(new Channel(DirectoryManagerShutdownEvent())),
+ managed_directory_(NULL) {
+ CHECK(0 == pthread_mutex_init(&mutex_, NULL));
+}
+
+DirectoryManager::~DirectoryManager() {
+ DCHECK_EQ(managed_directory_, static_cast<Directory*>(NULL))
+ << "Dir " << managed_directory_->name() << " not closed!";
+ pthread_mutex_lock(&mutex_);
+ delete channel_;
+ pthread_mutex_unlock(&mutex_);
+ CHECK(0 == pthread_mutex_destroy(&mutex_));
+}
+
+bool DirectoryManager::Open(const PathString& name) {
+ bool was_open = false;
+ const DirOpenResult result = OpenImpl(name,
+ GetSyncDataDatabasePath(), &was_open);
+ if (!was_open) {
+ DirectoryManagerEvent event;
+ event.dirname = name;
+ if (syncable::OPENED == result) {
+ event.what_happened = DirectoryManagerEvent::OPENED;
+ } else {
+ event.what_happened = DirectoryManagerEvent::OPEN_FAILED;
+ event.error = result;
+ }
+ channel_->NotifyListeners(event);
+ }
+ return syncable::OPENED == result;
+}
+
+// Opens a directory. Returns false on error.
+DirOpenResult DirectoryManager::OpenImpl(const PathString& name,
+ const PathString& path,
+ bool* was_open) {
+ pthread_mutex_lock(&mutex_);
+ // Check to see if it's already open.
+ bool opened = false;
+ if (managed_directory_) {
+ DCHECK_EQ(ComparePathNames(name, managed_directory_->name()), 0)
+ << "Can't open more than one directory.";
+ opened = *was_open = true;
+ }
+ pthread_mutex_unlock(&mutex_);
+ if (opened)
+ return syncable::OPENED;
+ // Otherwise, open it.
+
+ Directory* dir = new Directory;
+ const DirOpenResult result = dir->Open(path, name);
+ if (syncable::OPENED == result) {
+ pthread_mutex_lock(&mutex_);
+ managed_directory_ = dir;
+ pthread_mutex_unlock(&mutex_);
+ } else {
+ delete dir;
+ }
+ return result;
+}
+
+// Marks a directory as closed. It might take a while until all the
+// file handles and resources are freed by other threads.
+void DirectoryManager::Close(const PathString& name) {
+ // Erase from mounted and opened directory lists.
+ pthread_mutex_lock(&mutex_);
+
+ if (!managed_directory_ ||
+ ComparePathNames(name, managed_directory_->name()) != 0) {
+ // It wasn't open;
+ pthread_mutex_unlock(&mutex_);
+ return;
+ }
+ pthread_mutex_unlock(&mutex_);
+
+ // Notify listeners.
+ managed_directory_->channel()->NotifyListeners(DIRECTORY_CLOSED);
+ DirectoryManagerEvent event = { DirectoryManagerEvent::CLOSED, name };
+ channel_->NotifyListeners(event);
+
+ delete managed_directory_;
+ managed_directory_ = NULL;
+}
+
+// Marks all directories as closed. It might take a while until all the
+// file handles and resources are freed by other threads.
+void DirectoryManager::CloseAllDirectories() {
+ if (managed_directory_)
+ Close(managed_directory_->name());
+}
+
+void DirectoryManager::FinalSaveChangesForAll() {
+ pthread_mutex_lock(&mutex_);
+ if (managed_directory_)
+ managed_directory_->SaveChanges();
+ pthread_mutex_unlock(&mutex_);
+}
+
+void DirectoryManager::GetOpenDirectories(DirNames* result) {
+ result->clear();
+ pthread_mutex_lock(&mutex_);
+ if (managed_directory_)
+ result->push_back(managed_directory_->name());
+ pthread_mutex_unlock(&mutex_);
+}
+
+ScopedDirLookup::ScopedDirLookup(DirectoryManager* dirman,
+ const PathString& name) : dirman_(dirman) {
+ dir_ = dirman->managed_directory_ &&
+ (ComparePathNames(name, dirman->managed_directory_->name()) == 0) ?
+ dirman->managed_directory_ : NULL;
+ good_ = dir_;
+ good_checked_ = false;
+}
+
+ScopedDirLookup::~ScopedDirLookup() { }
+
+Directory* ScopedDirLookup::operator -> () const {
+ CHECK(good_checked_);
+ DCHECK(good_);
+ return dir_;
+}
+
+ScopedDirLookup::operator Directory* () const {
+ CHECK(good_checked_);
+ DCHECK(good_);
+ return dir_;
+}
+
+} // namespace syncable
diff --git a/chrome/browser/sync/syncable/directory_manager.h b/chrome/browser/sync/syncable/directory_manager.h
new file mode 100644
index 0000000..f937539
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_manager.h
@@ -0,0 +1,128 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This used to do a lot of TLS-based management of multiple Directory objects.
+// We now can access Directory objects from any thread for general purpose
+// operations and we only ever have one Directory, so this class isn't doing
+// anything too fancy besides keeping calling and access conventions the same
+// for now.
+// TODO(timsteele): We can probably nuke this entire class and use raw
+// Directory objects everywhere.
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_MANAGER_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_MANAGER_H_
+
+#include <pthread.h>
+
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "chrome/browser/sync/syncable/dir_open_result.h"
+#include "chrome/browser/sync/syncable/path_name_cmp.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace sync_api { class BaseTransaction; }
+
+namespace syncable {
+
+struct DirectoryManagerEvent {
+ enum {
+ OPEN_FAILED,
+ OPENED,
+ CLOSED,
+ CLOSED_ALL,
+ SHUTDOWN,
+ } what_happened;
+ PathString dirname;
+ DirOpenResult error; // Only for OPEN_FAILED.
+ typedef DirectoryManagerEvent EventType;
+ static inline bool IsChannelShutdownEvent(const EventType& event) {
+ return SHUTDOWN == event.what_happened;
+ }
+};
+
+DirectoryManagerEvent DirectoryManagerShutdownEvent();
+
+class DirectoryManager {
+ public:
+ typedef EventChannel<DirectoryManagerEvent> Channel;
+
+ // root_path specifies where db is stored.
+ explicit DirectoryManager(const PathString& root_path);
+ ~DirectoryManager();
+
+ static const PathString GetSyncDataDatabaseFilename();
+ const PathString GetSyncDataDatabasePath() const;
+
+ // Opens a directory. Returns false on error.
+ // Name parameter is the the user's login,
+ // MUST already have been converted to a common case.
+ bool Open(const PathString& name);
+
+ // Marks a directory as closed. It might take a while until all the
+ // file handles and resources are freed by other threads.
+ void Close(const PathString& name);
+
+ // Marks all directories as closed. It might take a while until all the
+ // file handles and resources are freed by other threads.
+ void CloseAllDirectories();
+
+ // Should be called at App exit.
+ void FinalSaveChangesForAll();
+
+ // Gets the list of currently open directory names.
+ typedef std::vector<PathString> DirNames;
+ void GetOpenDirectories(DirNames* result);
+
+ Channel* channel() const { return channel_; }
+
+ protected:
+ DirOpenResult OpenImpl(const PathString& name, const PathString& path,
+ bool* was_open);
+
+ // Helpers for friend class ScopedDirLookup:
+ friend class ScopedDirLookup;
+
+ const PathString root_path_;
+ // protects managed_directory_
+ mutable pthread_mutex_t mutex_;
+ Directory* managed_directory_;
+
+ Channel* const channel_;
+
+ private:
+
+ DISALLOW_COPY_AND_ASSIGN(DirectoryManager);
+};
+
+
+class ScopedDirLookup {
+ public:
+ ScopedDirLookup(DirectoryManager* dirman, const PathString& name);
+ ~ScopedDirLookup();
+
+ inline bool good() {
+ good_checked_ = true;
+ return good_;
+ }
+ Directory* operator -> () const;
+ operator Directory* () const;
+
+ protected: // Don't allow creation on heap, except by sync API wrapper.
+ friend class sync_api::BaseTransaction;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ Directory* dir_;
+ bool good_;
+ // Ensure that the programmer checks good before using the ScopedDirLookup
+ // This member should can be removed if it ever shows up in profiling
+ bool good_checked_;
+ DirectoryManager* const dirman_;
+};
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_MANAGER_H_
diff --git a/chrome/browser/sync/syncable/path_name_cmp.h b/chrome/browser/sync/syncable/path_name_cmp.h
new file mode 100644
index 0000000..1478a52
--- /dev/null
+++ b/chrome/browser/sync/syncable/path_name_cmp.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_PATH_NAME_CMP_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_PATH_NAME_CMP_H_
+
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+
+struct LessPathNames {
+ bool operator() (const PathString&, const PathString&) const;
+};
+
+int ComparePathNames(const PathString& a, const PathString& b);
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_PATH_NAME_CMP_H_
diff --git a/chrome/browser/sync/syncable/syncable-inl.h b/chrome/browser/sync/syncable/syncable-inl.h
new file mode 100644
index 0000000..81723bf
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable-inl.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
+
+#include "chrome/browser/sync/util/query_helpers.h"
+#include "chrome/browser/sync/util/row_iterator.h"
+
+namespace syncable {
+
+template <typename FieldType, FieldType field_index>
+class LessField {
+ public:
+ inline bool operator() (const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ return a->ref(field_index) < b->ref(field_index);
+ }
+};
+
+struct IdRowTraits {
+ typedef syncable::Id RowType;
+ void Extract(sqlite3_stmt* statement, syncable::Id* id) const {
+ GetColumn(statement, 0, id);
+ }
+};
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
diff --git a/chrome/browser/sync/syncable/syncable.cc b/chrome/browser/sync/syncable/syncable.cc
new file mode 100644
index 0000000..b997a5b
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable.cc
@@ -0,0 +1,2002 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/syncable.h"
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+#ifdef OS_MACOSX
+#include <CoreFoundation/CoreFoundation.h>
+#elif defined(OS_LINUX)
+#include <glib.h>
+#elif defined(OS_WINDOWS)
+#include <shlwapi.h> // for PathMatchSpec
+#endif
+
+#include <algorithm>
+#include <functional>
+#include <iomanip>
+#include <iterator>
+#include <set>
+#include <string>
+
+#include "base/hash_tables.h"
+#include "base/logging.h"
+#include "base/perftimer.h"
+#include "base/scoped_ptr.h"
+#include "base/time.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_backing_store.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable-inl.h"
+#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+#include "chrome/browser/sync/syncable/syncable_columns.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/fast_dump.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+
+namespace {
+enum InvariantCheckLevel {
+ OFF = 0,
+ VERIFY_IN_MEMORY = 1,
+ FULL_DB_VERIFICATION = 2
+};
+
+static const InvariantCheckLevel kInvariantCheckLevel = VERIFY_IN_MEMORY;
+
+// Max number of milliseconds to spend checking syncable entry invariants
+static const int kInvariantCheckMaxMs = 50;
+} // namespace
+
+// if sizeof(time_t) != sizeof(int32) we need to alter or expand the sqlite
+// datatype.
+COMPILE_ASSERT(sizeof(time_t) == sizeof(int32), time_t_is_not_int32);
+
+using browser_sync::FastDump;
+using browser_sync::SyncerUtil;
+using std::string;
+
+
+namespace syncable {
+
+int64 Now() {
+#ifdef OS_WINDOWS
+ FILETIME filetime;
+ SYSTEMTIME systime;
+ GetSystemTime(&systime);
+ SystemTimeToFileTime(&systime, &filetime);
+ // MSDN recommends converting via memcpy like this.
+ LARGE_INTEGER n;
+ memcpy(&n, &filetime, sizeof(filetime));
+ return n.QuadPart;
+#elif (defined(OS_LINUX) || defined(OS_MACOSX))
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return static_cast<int64>(tv.tv_sec);
+#else
+#error NEED OS SPECIFIC Now() implementation
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Compare functions and hashes for the indices.
+
+// Callback for sqlite3
+int ComparePathNames16(void*, int a_bytes, const void* a, int b_bytes,
+ const void* b) {
+#ifdef OS_WINDOWS
+ DCHECK_EQ(0, a_bytes % 2);
+ DCHECK_EQ(0, b_bytes % 2);
+ int result = CompareString(LOCALE_INVARIANT, NORM_IGNORECASE,
+ static_cast<const PathChar*>(a), a_bytes / 2,
+ static_cast<const PathChar*>(b), b_bytes / 2);
+ CHECK(0 != result) << "Error comparing strings: " << GetLastError();
+ return result - 2; // Convert to -1, 0, 1
+#elif defined(OS_LINUX)
+// misnomer for Linux. These are already utf8 bit strings.
+ gchar *case_folded_a;
+ gchar *case_folded_b;
+ GError *err = NULL;
+ case_folded_a = g_utf8_casefold(reinterpret_cast<const gchar*>(a), a_bytes);
+ CHECK(case_folded_a != NULL) << "g_utf8_casefold failed";
+ case_folded_b = g_utf8_casefold(reinterpret_cast<const gchar*>(b), b_bytes);
+ CHECK(case_folded_b != NULL) << "g_utf8_casefold failed";
+ gint result = g_utf8_collate(case_folded_a, case_folded_b);
+ g_free(case_folded_a);
+ g_free(case_folded_b);
+ if (result < 0) return -1;
+ if (result > 0) return 1;
+ return 0;
+#elif defined(OS_MACOSX)
+ CFStringRef a_str;
+ CFStringRef b_str;
+ a_str = CFStringCreateWithBytes(NULL, reinterpret_cast<const UInt8*>(a),
+ a_bytes, kCFStringEncodingUTF8, FALSE);
+ b_str = CFStringCreateWithBytes(NULL, reinterpret_cast<const UInt8*>(b),
+ b_bytes, kCFStringEncodingUTF8, FALSE);
+ CFComparisonResult res;
+ res = CFStringCompare(a_str, b_str, kCFCompareCaseInsensitive);
+ CFRelease(a_str);
+ CFRelease(b_str);
+ return res;
+#else
+#error no ComparePathNames16() for your OS
+#endif
+}
+
+template <Int64Field field_index>
+class SameField {
+ public:
+ inline bool operator()(const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ return a->ref(field_index) == b->ref(field_index);
+ }
+};
+
+template <Int64Field field_index>
+class HashField {
+ public:
+ inline size_t operator()(const syncable::EntryKernel* a) const {
+ return hasher_(a->ref(field_index));
+ }
+ base::hash_set<int64> hasher_;
+};
+
+// TODO(ncarter): Rename!
+int ComparePathNames(const PathString& a, const PathString& b) {
+ const size_t val_size = sizeof(PathString::value_type);
+ return ComparePathNames16(NULL, a.size() * val_size, a.data(),
+ b.size() * val_size, b.data());
+}
+
+class LessParentIdAndNames {
+ public:
+ bool operator() (const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ if (a->ref(PARENT_ID) != b->ref(PARENT_ID))
+ return a->ref(PARENT_ID) < b->ref(PARENT_ID);
+ return ComparePathNames(a->ref(NAME), b->ref(NAME)) < 0;
+ }
+};
+
+bool LessPathNames::operator() (const PathString& a,
+ const PathString& b) const {
+ return ComparePathNames(a, b) < 0;
+}
+
+// static
+Name Name::FromEntryKernel(EntryKernel* kernel) {
+ PathString& sync_name_ref = kernel->ref(UNSANITIZED_NAME).empty() ?
+ kernel->ref(NAME) : kernel->ref(UNSANITIZED_NAME);
+ return Name(kernel->ref(NAME), sync_name_ref, kernel->ref(NON_UNIQUE_NAME));
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Directory
+
+static const DirectoryChangeEvent kShutdownChangesEvent =
+ { DirectoryChangeEvent::SHUTDOWN, 0, 0 };
+
+void DestroyThreadNodeKey(void* vnode) {
+ ThreadNode* const node = reinterpret_cast<ThreadNode*>(vnode);
+ CHECK(!node->in_list)
+ << "\nThread exited while holding the transaction mutex!\n" << *node;
+ delete node;
+}
+
+Directory::Kernel::Kernel(const PathString& db_path,
+ const PathString& name,
+ const KernelLoadInfo& info)
+: db_path(db_path),
+ refcount(1),
+ name_(name),
+ metahandles_index(new Directory::MetahandlesIndex),
+ ids_index(new Directory::IdsIndex),
+ parent_id_and_names_index(new Directory::ParentIdAndNamesIndex),
+ extended_attributes(new ExtendedAttributes),
+ unapplied_update_metahandles(new MetahandleSet),
+ unsynced_metahandles(new MetahandleSet),
+ channel(new Directory::Channel(syncable::DIRECTORY_DESTROYED)),
+ changes_channel(new Directory::ChangesChannel(kShutdownChangesEvent)),
+ last_sync_timestamp_(info.kernel_info.last_sync_timestamp),
+ initial_sync_ended_(info.kernel_info.initial_sync_ended),
+ store_birthday_(info.kernel_info.store_birthday),
+ next_id(info.kernel_info.next_id),
+ cache_guid_(info.cache_guid),
+ next_metahandle(info.max_metahandle + 1) {
+ info_status_ = Directory::KERNEL_SHARE_INFO_VALID;
+ CHECK(0 == pthread_mutex_init(&mutex, NULL));
+ CHECK(0 == pthread_key_create(&thread_node_key, &DestroyThreadNodeKey));
+}
+
+inline void DeleteEntry(EntryKernel* kernel) {
+ delete kernel;
+}
+
+void Directory::Kernel::AddRef() {
+ base::subtle::NoBarrier_AtomicIncrement(&refcount, 1);
+}
+
+void Directory::Kernel::Release() {
+ if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1))
+ delete this;
+}
+
+Directory::Kernel::~Kernel() {
+ CHECK(0 == refcount);
+ delete channel;
+ delete changes_channel;
+ CHECK(0 == pthread_mutex_destroy(&mutex));
+ pthread_key_delete(thread_node_key);
+ delete unsynced_metahandles;
+ delete unapplied_update_metahandles;
+ delete extended_attributes;
+ delete parent_id_and_names_index;
+ delete ids_index;
+ for_each(metahandles_index->begin(), metahandles_index->end(), DeleteEntry);
+ delete metahandles_index;
+}
+
+Directory::Directory() : kernel_(NULL), store_(NULL) {
+}
+
+Directory::~Directory() {
+ Close();
+}
+
+BOOL PathNameMatch(const PathString& pathname, const PathString& pathspec) {
+#ifdef OS_WINDOWS
+ // NB If we go Vista only this is easier:
+ // http://msdn2.microsoft.com/en-us/library/ms628611.aspx
+
+ // PathMatchSpec strips spaces from the start of pathspec, so we compare those
+ // ourselves.
+ const PathChar* pathname_ptr = pathname.c_str();
+ const PathChar* pathspec_ptr = pathspec.c_str();
+
+ while (*pathname_ptr == ' ' && *pathspec_ptr == ' ')
+ ++pathname_ptr, ++pathspec_ptr;
+
+ // if we have more inital spaces in the pathspec than in the pathname then the
+ // result from PathMatchSpec will be erronous
+ if (*pathspec_ptr == ' ')
+ return FALSE;
+
+ // PathMatchSpec also gets "confused" when there are ';' characters in name or
+ // in spec. So, if we match (f.i.) ";" with ";" PathMatchSpec will return
+ // FALSE (which is wrong). Luckily for us, we can easily fix this by
+ // substituting ';' with ':' which is illegal character in file name and
+ // we're not going to see it there. With ':' in path name and spec
+ // PathMatchSpec works fine.
+ if ((NULL == wcschr(pathname_ptr, L';')) &&
+ (NULL == wcschr(pathspec_ptr, L';'))) {
+ // No ';' in file name and in spec. Just pass it as it is.
+ return ::PathMatchSpec(pathname_ptr, pathspec_ptr);
+ }
+
+ // We need to subst ';' with ':' in both, name and spec
+ PathString name_subst(pathname_ptr);
+ PathString spec_subst(pathspec_ptr);
+
+ PathString::size_type index = name_subst.find(L';');
+ while (PathString::npos != index) {
+ name_subst[index] = L':';
+ index = name_subst.find(L';', index + 1);
+ }
+
+ index = spec_subst.find(L';');
+ while (PathString::npos != index) {
+ spec_subst[index] = L':';
+ index = spec_subst.find(L';', index + 1);
+ }
+
+ return ::PathMatchSpec(name_subst.c_str(), spec_subst.c_str());
+#else
+ return 0 == ComparePathNames(pathname, pathspec);
+#endif
+}
+
+DirOpenResult Directory::Open(const PathString& file_path,
+ const PathString& name) {
+ const DirOpenResult result = OpenImpl(file_path, name);
+ if (OPENED != result)
+ Close();
+ return result;
+}
+
+void Directory::InitializeIndices() {
+ MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
+ for (; it != kernel_->metahandles_index->end(); ++it) {
+ EntryKernel* entry = *it;
+ if (!entry->ref(IS_DEL))
+ kernel_->parent_id_and_names_index->insert(entry);
+ kernel_->ids_index->insert(entry);
+ if (entry->ref(IS_UNSYNCED))
+ kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE));
+ if (entry->ref(IS_UNAPPLIED_UPDATE))
+ kernel_->unapplied_update_metahandles->insert(entry->ref(META_HANDLE));
+ }
+}
+
+DirectoryBackingStore* Directory::CreateBackingStore(
+ const PathString& dir_name, const PathString& backing_filepath) {
+ return new DirectoryBackingStore(dir_name, backing_filepath);
+}
+
+DirOpenResult Directory::OpenImpl(const PathString& file_path,
+ const PathString& name) {
+ DCHECK_EQ(static_cast<DirectoryBackingStore*>(NULL), store_);
+ const PathString db_path = ::GetFullPath(file_path);
+ store_ = CreateBackingStore(name, db_path);
+
+ KernelLoadInfo info;
+ // Temporary indicies before kernel_ initialized in case Load fails. We 0(1)
+ // swap these later.
+ MetahandlesIndex metas_bucket;
+ ExtendedAttributes xattrs_bucket;
+ DirOpenResult result = store_->Load(&metas_bucket, &xattrs_bucket, &info);
+ if (OPENED != result)
+ return result;
+
+ kernel_ = new Kernel(db_path, name, info);
+ kernel_->metahandles_index->swap(metas_bucket);
+ kernel_->extended_attributes->swap(xattrs_bucket);
+ InitializeIndices();
+ return OPENED;
+}
+
+void Directory::Close() {
+ if (store_)
+ delete store_;
+ store_ = NULL;
+ if (kernel_) {
+ bool del = !base::subtle::NoBarrier_AtomicIncrement(&kernel_->refcount, -1);
+ DCHECK(del) << "Kernel should only have a single ref";
+ if (del)
+ delete kernel_;
+ kernel_ = NULL;
+ }
+}
+
+EntryKernel* Directory::GetEntryById(const Id& id) {
+ ScopedKernelLock lock(this);
+ return GetEntryById(id, &lock);
+}
+
+EntryKernel* Directory::GetEntryById(const Id& id,
+ ScopedKernelLock* const lock) {
+ DCHECK(kernel_);
+ // First look up in memory
+ kernel_->needle.ref(ID) = id;
+ IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
+ if (id_found != kernel_->ids_index->end()) {
+ // Found it in memory. Easy.
+ return *id_found;
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetEntryByTag(const PathString& tag) {
+ ScopedKernelLock lock(this);
+ DCHECK(kernel_);
+ // We don't currently keep a separate index for the tags. Since tags
+ // only exist for server created items that are the first items
+ // to be created in a store, they should have small metahandles.
+ // So, we just iterate over the items in sorted metahandle order,
+ // looking for a match.
+ MetahandlesIndex& set = *kernel_->metahandles_index;
+ for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) {
+ if ((*i)->ref(SINGLETON_TAG) == tag) {
+ return *i;
+ }
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetEntryByHandle(const int64 metahandle) {
+ ScopedKernelLock lock(this);
+ return GetEntryByHandle(metahandle, &lock);
+}
+
+EntryKernel* Directory::GetEntryByHandle(const int64 metahandle,
+ ScopedKernelLock* lock) {
+ // Look up in memory
+ kernel_->needle.ref(META_HANDLE) = metahandle;
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ if (found != kernel_->metahandles_index->end()) {
+ // Found it in memory. Easy.
+ return *found;
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetChildWithName(const Id& parent_id,
+ const PathString& name) {
+ ScopedKernelLock lock(this);
+ return GetChildWithName(parent_id, name, &lock);
+}
+
+// Will return child entry if the folder is opened,
+// otherwise it will return NULL.
+EntryKernel* Directory::GetChildWithName(const Id& parent_id,
+ const PathString& name,
+ ScopedKernelLock* const lock) {
+ PathString dbname = name;
+ EntryKernel* parent = GetEntryById(parent_id, lock);
+ if (parent == NULL)
+ return NULL;
+ return GetChildWithNameImpl(parent_id, dbname, lock);
+}
+
+// Will return child entry even when the folder is not
+// opened. This is used by syncer to apply update when folder is closed.
+EntryKernel* Directory::GetChildWithDBName(const Id& parent_id,
+ const PathString& name) {
+ ScopedKernelLock lock(this);
+ return GetChildWithNameImpl(parent_id, name, &lock);
+}
+
+EntryKernel* Directory::GetChildWithNameImpl(const Id& parent_id,
+ const PathString& name,
+ ScopedKernelLock* const lock) {
+ // First look up in memory:
+ kernel_->needle.ref(NAME) = name;
+ kernel_->needle.ref(PARENT_ID) = parent_id;
+ ParentIdAndNamesIndex::iterator found =
+ kernel_->parent_id_and_names_index->find(&kernel_->needle);
+ if (found != kernel_->parent_id_and_names_index->end()) {
+ // Found it in memory. Easy.
+ return *found;
+ }
+ return NULL;
+}
+
+// An interface to specify the details of which children
+// GetChildHandles() is looking for.
+struct PathMatcher {
+ explicit PathMatcher(const Id& parent_id) : parent_id_(parent_id) { }
+ virtual ~PathMatcher() { }
+ enum MatchType {
+ NO_MATCH,
+ MATCH,
+ // Means we found the only entry we're looking for in
+ // memory so we don't need to check the DB.
+ EXACT_MATCH
+ };
+ virtual MatchType PathMatches(const PathString& path) = 0;
+ typedef Directory::ParentIdAndNamesIndex Index;
+ virtual Index::iterator lower_bound(Index* index) = 0;
+ virtual Index::iterator upper_bound(Index* index) = 0;
+ const Id parent_id_;
+ EntryKernel needle_;
+};
+
+// Matches all children.
+struct AllPathsMatcher : public PathMatcher {
+ explicit AllPathsMatcher(const Id& parent_id) : PathMatcher(parent_id) {
+ }
+ virtual MatchType PathMatches(const PathString& path) {
+ return MATCH;
+ }
+ virtual Index::iterator lower_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME).clear();
+ return index->lower_bound(&needle_);
+ }
+
+ virtual Index::iterator upper_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME).clear();
+ Index::iterator i = index->upper_bound(&needle_),
+ end = index->end();
+ while (i != end && (*i)->ref(PARENT_ID) == parent_id_)
+ ++i;
+ return i;
+ }
+};
+
+// Matches an exact filename only; no wildcards.
+struct ExactPathMatcher : public PathMatcher {
+ ExactPathMatcher(const PathString& pathspec, const Id& parent_id)
+ : PathMatcher(parent_id), pathspec_(pathspec) {
+ }
+ virtual MatchType PathMatches(const PathString& path) {
+ return 0 == ComparePathNames(path, pathspec_) ? EXACT_MATCH : NO_MATCH;
+ }
+ virtual Index::iterator lower_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME) = pathspec_;
+ return index->lower_bound(&needle_);
+ }
+ virtual Index::iterator upper_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME) = pathspec_;
+ return index->upper_bound(&needle_);
+ }
+ const PathString pathspec_;
+};
+
+// Matches a pathspec with wildcards.
+struct PartialPathMatcher : public PathMatcher {
+ PartialPathMatcher(const PathString& pathspec,
+ PathString::size_type wildcard, const Id& parent_id)
+ : PathMatcher(parent_id), pathspec_(pathspec) {
+ if (0 == wildcard)
+ return;
+ lesser_.assign(pathspec_.data(), wildcard);
+ greater_.assign(pathspec_.data(), wildcard);
+ // Increment the last letter of greater so we can then less than
+ // compare to it.
+ PathString::size_type i = greater_.size() - 1;
+ do {
+ if (greater_[i] == std::numeric_limits<PathString::value_type>::max()) {
+ greater_.resize(i); // Try the preceding character.
+ if (0 == i--)
+ break;
+ } else {
+ greater_[i] += 1;
+ }
+ // Yes, there are cases where incrementing a character
+ // actually decreases its position in the sort. Example: 9 -> :
+ } while (ComparePathNames(lesser_, greater_) >= 0);
+ }
+
+ virtual MatchType PathMatches(const PathString& path) {
+ return PathNameMatch(path, pathspec_) ? MATCH : NO_MATCH;
+ }
+
+ virtual Index::iterator lower_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME) = lesser_;
+ return index->lower_bound(&needle_);
+ }
+ virtual Index::iterator upper_bound(Index* index) {
+ if (greater_.empty()) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME).clear();
+ Index::iterator i = index->upper_bound(&needle_),
+ end = index->end();
+ while (i != end && (*i)->ref(PARENT_ID) == parent_id_)
+ ++i;
+ return i;
+ } else {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME) = greater_;
+ return index->lower_bound(&needle_);
+ }
+ }
+
+ const PathString pathspec_;
+ PathString lesser_;
+ PathString greater_;
+};
+
+
+void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id,
+ Directory::ChildHandles* result) {
+ AllPathsMatcher matcher(parent_id);
+ return GetChildHandlesImpl(trans, parent_id, &matcher, result);
+}
+
+void Directory::GetChildHandlesImpl(BaseTransaction* trans, const Id& parent_id,
+ PathMatcher* matcher,
+ Directory::ChildHandles* result) {
+ CHECK(this == trans->directory());
+ result->clear();
+ {
+ ScopedKernelLock lock(this);
+ ParentIdAndNamesIndex* const index =
+ kernel_->parent_id_and_names_index;
+ typedef ParentIdAndNamesIndex::iterator iterator;
+ for (iterator i = matcher->lower_bound(index),
+ end = matcher->upper_bound(index); i != end; ++i) {
+ // root's parent_id is NULL in the db but 0 in memory, so
+ // have avoid listing the root as its own child.
+ if ((*i)->ref(ID) == (*i)->ref(PARENT_ID))
+ continue;
+ PathMatcher::MatchType match = matcher->PathMatches((*i)->ref(NAME));
+ if (PathMatcher::NO_MATCH == match)
+ continue;
+ result->push_back((*i)->ref(META_HANDLE));
+ if (PathMatcher::EXACT_MATCH == match)
+ return;
+ }
+ }
+}
+
+EntryKernel* Directory::GetRootEntry() {
+ return GetEntryById(Id());
+}
+
+EntryKernel* Directory::GetEntryByPath(const PathString& path) {
+ CHECK(kernel_);
+ EntryKernel* result = GetRootEntry();
+ CHECK(result) << "There should always be a root node.";
+ for (PathSegmentIterator<PathString> i(path), end;
+ i != end && NULL != result; ++i) {
+ result = GetChildWithName(result->ref(ID), *i);
+ }
+ return result;
+}
+
+void ZeroFields(EntryKernel* entry, int first_field) {
+ int i = first_field;
+ // Note that bitset<> constructor sets all bits to zero, and strings
+ // initialize to empty.
+ for ( ; i < INT64_FIELDS_END; ++i)
+ entry->ref(static_cast<Int64Field>(i)) = 0;
+ for ( ; i < ID_FIELDS_END; ++i)
+ entry->ref(static_cast<IdField>(i)).Clear();
+ for ( ; i < BIT_FIELDS_END; ++i)
+ entry->ref(static_cast<BitField>(i)) = false;
+ if (i < BLOB_FIELDS_END)
+ i = BLOB_FIELDS_END;
+}
+
+void Directory::InsertEntry(EntryKernel* entry) {
+ ScopedKernelLock lock(this);
+ InsertEntry(entry, &lock);
+}
+
+void Directory::InsertEntry(EntryKernel* entry, ScopedKernelLock* lock) {
+ DCHECK(NULL != lock);
+ CHECK(NULL != entry);
+ static const char error[] = "Entry already in memory index.";
+ CHECK(kernel_->metahandles_index->insert(entry).second) << error;
+ if (!entry->ref(IS_DEL))
+ CHECK(kernel_->parent_id_and_names_index->insert(entry).second) << error;
+ CHECK(kernel_->ids_index->insert(entry).second) << error;
+}
+
+bool Directory::Undelete(EntryKernel* const entry) {
+ DCHECK(entry->ref(IS_DEL));
+ ScopedKernelLock lock(this);
+ if (NULL != GetChildWithName(entry->ref(PARENT_ID), entry->ref(NAME), &lock))
+ return false; // Would have duplicated existing entry.
+ entry->ref(IS_DEL) = false;
+ entry->dirty[IS_DEL] = true;
+ CHECK(kernel_->parent_id_and_names_index->insert(entry).second);
+ return true;
+}
+
+bool Directory::Delete(EntryKernel* const entry) {
+ DCHECK(!entry->ref(IS_DEL));
+ entry->ref(IS_DEL) = true;
+ entry->dirty[IS_DEL] = true;
+ ScopedKernelLock lock(this);
+ CHECK(1 == kernel_->parent_id_and_names_index->erase(entry));
+ return true;
+}
+
+bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) {
+ ScopedKernelLock lock(this);
+ if (NULL != GetEntryById(new_id, &lock))
+ return false;
+ CHECK(1 == kernel_->ids_index->erase(entry));
+ entry->ref(ID) = new_id;
+ CHECK(kernel_->ids_index->insert(entry).second);
+ return true;
+}
+
+bool Directory::ReindexParentIdAndName(EntryKernel* const entry,
+ const Id& new_parent_id,
+ const PathString& new_name) {
+ ScopedKernelLock lock(this);
+ PathString new_indexed_name = new_name;
+ if (entry->ref(IS_DEL)) {
+ entry->ref(PARENT_ID) = new_parent_id;
+ entry->ref(NAME) = new_indexed_name;
+ return true;
+ }
+
+ // check for a case changing rename
+ if (entry->ref(PARENT_ID) == new_parent_id &&
+ 0 == ComparePathNames(entry->ref(NAME), new_indexed_name)) {
+ entry->ref(NAME) = new_indexed_name;
+ } else {
+ if (NULL != GetChildWithName(new_parent_id, new_indexed_name, &lock))
+ return false;
+ CHECK(1 == kernel_->parent_id_and_names_index->erase(entry));
+ entry->ref(PARENT_ID) = new_parent_id;
+ entry->ref(NAME) = new_indexed_name;
+ CHECK(kernel_->parent_id_and_names_index->insert(entry).second);
+ }
+ return true;
+}
+
+// static
+bool Directory::SafeToPurgeFromMemory(const EntryKernel* const entry) {
+ return entry->ref(IS_DEL) && !entry->dirty.any() && !entry->ref(SYNCING) &&
+ !entry->ref(IS_UNAPPLIED_UPDATE) && !entry->ref(IS_UNSYNCED);
+}
+
+void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
+ ReadTransaction trans(this, __FILE__, __LINE__);
+ ScopedKernelLock lock(this);
+ // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
+ // clear dirty flags.
+ for (MetahandlesIndex::iterator i = kernel_->metahandles_index->begin();
+ i != kernel_->metahandles_index->end(); ++i) {
+ EntryKernel* entry = *i;
+ if (!entry->dirty.any())
+ continue;
+ snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry);
+ entry->dirty.reset();
+ // TODO(timsteele): The previous *windows only* SaveChanges code path seems
+ // to have a bug in that the IS_NEW bit is not rolled back if the entire DB
+ // transaction is rolled back, due to the "recent" windows optimization of
+ // using a ReadTransaction rather than a WriteTransaction in SaveChanges.
+ // This bit is only used to decide whether we should sqlite INSERT or
+ // UPDATE, and if we are INSERTing we make sure to dirty all the fields so
+ // as to overwrite the database default values. For now, this is rectified
+ // by flipping the bit to false here (note that the snapshot will contain
+ // the "original" value), and then resetting it on failure in
+ // HandleSaveChangesFailure, where "failure" is defined as "the DB
+ // "transaction was rolled back". This is safe because the only user of this
+ // bit is in fact SaveChanges, which enforces mutually exclusive access by
+ // way of save_changes_mutex_. The TODO is to consider abolishing this bit
+ // in favor of using a sqlite INSERT OR REPLACE, which could(would?) imply
+ // that all bits need to be written rather than just the dirty ones in
+ // the BindArg helper function.
+ entry->ref(IS_NEW) = false;
+ }
+
+ // Do the same for extended attributes.
+ for (ExtendedAttributes::iterator i = kernel_->extended_attributes->begin();
+ i != kernel_->extended_attributes->end(); ++i) {
+ if (!i->second.dirty)
+ continue;
+ snapshot->dirty_xattrs[i->first] = i->second;
+ i->second.dirty = false;
+ }
+
+ // Fill kernel_info_status and kernel_info.
+ PersistedKernelInfo& info = snapshot->kernel_info;
+ info.initial_sync_ended = kernel_->initial_sync_ended_;
+ info.last_sync_timestamp = kernel_->last_sync_timestamp_;
+ // To avoid duplicates when the process crashes, we record the next_id to be
+ // greater magnitude than could possibly be reached before the next save
+ // changes. In other words, it's effectively impossible for the user to
+ // generate 65536 new bookmarks in 3 seconds.
+ info.next_id = kernel_->next_id - 65536;
+ info.store_birthday = kernel_->store_birthday_;
+ snapshot->kernel_info_status = kernel_->info_status_;
+ // This one we reset on failure.
+ kernel_->info_status_ = KERNEL_SHARE_INFO_VALID;
+}
+
+bool Directory::SaveChanges() {
+ bool success = false;
+ DCHECK(store_);
+ PThreadScopedLock<PThreadMutex> lock(&kernel_->save_changes_mutex);
+ // Snapshot and save.
+ SaveChangesSnapshot snapshot;
+ TakeSnapshotForSaveChanges(&snapshot);
+ success = store_->SaveChanges(snapshot);
+
+ // Handle success or failure.
+ if (success)
+ VacuumAfterSaveChanges(snapshot);
+ else
+ HandleSaveChangesFailure(snapshot);
+ return success;
+}
+
+void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
+ // Need a write transaction as we are about to permanently purge entries.
+ WriteTransaction trans(this, VACUUM_AFTER_SAVE, __FILE__, __LINE__);
+ ScopedKernelLock lock(this);
+ kernel_->flushed_metahandles_.Push(0); // Begin flush marker
+ // Now drop everything we can out of memory.
+ for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ kernel_->needle.ref(META_HANDLE) = i->ref(META_HANDLE);
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
+ NULL : *found);
+ if (entry && SafeToPurgeFromMemory(entry)) {
+ // We now drop deleted metahandles that are up to date on both the client
+ // and the server.
+ size_t num_erased = 0;
+ kernel_->flushed_metahandles_.Push(entry->ref(META_HANDLE));
+ num_erased = kernel_->ids_index->erase(entry);
+ DCHECK_EQ(1, num_erased);
+ num_erased = kernel_->metahandles_index->erase(entry);
+ DCHECK_EQ(1, num_erased);
+ delete entry;
+ }
+ }
+
+ ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
+ while (i != snapshot.dirty_xattrs.end()) {
+ ExtendedAttributeKey key(i->first.metahandle, i->first.key);
+ ExtendedAttributes::iterator found =
+ kernel_->extended_attributes->find(key);
+ if (found == kernel_->extended_attributes->end() ||
+ found->second.dirty || !i->second.is_deleted) {
+ ++i;
+ } else {
+ kernel_->extended_attributes->erase(found);
+ }
+ }
+}
+
+void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
+ ScopedKernelLock lock(this);
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+
+ // Because we cleared dirty bits on the real entries when taking the snapshot,
+ // we should make sure the fact that the snapshot was not persisted gets
+ // reflected in the entries. Not doing this would mean if no other changes
+ // occur to the same fields of the entries in dirty_metas some changes could
+ // end up being lost, if they also failed to be committed to the server.
+ // Setting the bits ensures that SaveChanges will at least try again later.
+ for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ kernel_->needle.ref(META_HANDLE) = i->ref(META_HANDLE);
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ if (found != kernel_->metahandles_index->end()) {
+ (*found)->dirty |= i->dirty;
+ (*found)->ref(IS_NEW) = i->ref(IS_NEW);
+ }
+ }
+
+ for (ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
+ i != snapshot.dirty_xattrs.end(); ++i) {
+ ExtendedAttributeKey key(i->first.metahandle, i->first.key);
+ ExtendedAttributes::iterator found =
+ kernel_->extended_attributes->find(key);
+ if (found != kernel_->extended_attributes->end())
+ found->second.dirty = true;
+ }
+}
+
+int64 Directory::last_sync_timestamp() const {
+ ScopedKernelLock lock(this);
+ return kernel_->last_sync_timestamp_;
+}
+
+void Directory::set_last_sync_timestamp(int64 timestamp) {
+ ScopedKernelLock lock(this);
+ if (kernel_->last_sync_timestamp_ == timestamp)
+ return;
+ kernel_->last_sync_timestamp_ = timestamp;
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+}
+
+bool Directory::initial_sync_ended() const {
+ ScopedKernelLock lock(this);
+ return kernel_->initial_sync_ended_;
+}
+
+void Directory::set_initial_sync_ended(bool x) {
+ ScopedKernelLock lock(this);
+ if (kernel_->initial_sync_ended_ == x)
+ return;
+ kernel_->initial_sync_ended_ = x;
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+}
+
+string Directory::store_birthday() const {
+ ScopedKernelLock lock(this);
+ return kernel_->store_birthday_;
+}
+
+void Directory::set_store_birthday(string store_birthday) {
+ ScopedKernelLock lock(this);
+ if (kernel_->store_birthday_ == store_birthday)
+ return;
+ kernel_->store_birthday_ = store_birthday;
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+}
+
+string Directory::cache_guid() const {
+ // No need to lock since nothing ever writes to it after load.
+ return kernel_->cache_guid_;
+}
+
+void Directory::GetAllMetaHandles(BaseTransaction* trans,
+ MetahandleSet* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ MetahandlesIndex::iterator i;
+ for (i = kernel_->metahandles_index->begin();
+ i != kernel_->metahandles_index->end();
+ ++i) {
+ result->insert((*i)->ref(META_HANDLE));
+ }
+}
+
+void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
+ UnsyncedMetaHandles* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ copy(kernel_->unsynced_metahandles->begin(),
+ kernel_->unsynced_metahandles->end(), back_inserter(*result));
+}
+
+void Directory::GetAllExtendedAttributes(BaseTransaction* trans,
+ int64 metahandle,
+ std::set<ExtendedAttribute>* result) {
+ AttributeKeySet keys;
+ GetExtendedAttributesList(trans, metahandle, &keys);
+ AttributeKeySet::iterator iter;
+ for (iter = keys.begin(); iter != keys.end(); ++iter) {
+ ExtendedAttributeKey key(metahandle, *iter);
+ ExtendedAttribute extended_attribute(trans, GET_BY_HANDLE, key);
+ CHECK(extended_attribute.good());
+ result->insert(extended_attribute);
+ }
+}
+
+void Directory::GetExtendedAttributesList(BaseTransaction* trans,
+ int64 metahandle, AttributeKeySet* result) {
+ ExtendedAttributes::iterator iter;
+ for (iter = kernel_->extended_attributes->begin();
+ iter != kernel_->extended_attributes->end(); ++iter) {
+ if (iter->first.metahandle == metahandle) {
+ if (!iter->second.is_deleted)
+ result->insert(iter->first.key);
+ }
+ }
+}
+
+void Directory::DeleteAllExtendedAttributes(WriteTransaction* trans,
+ int64 metahandle) {
+ AttributeKeySet keys;
+ GetExtendedAttributesList(trans, metahandle, &keys);
+ AttributeKeySet::iterator iter;
+ for (iter = keys.begin(); iter != keys.end(); ++iter) {
+ ExtendedAttributeKey key(metahandle, *iter);
+ MutableExtendedAttribute attribute(trans, GET_BY_HANDLE, key);
+ // This flags the attribute for deletion during SaveChanges. At that time
+ // any deleted attributes are purged from disk and memory.
+ attribute.delete_attribute();
+ }
+}
+
+int64 Directory::unsynced_entity_count() const {
+ ScopedKernelLock lock(this);
+ return kernel_->unsynced_metahandles->size();
+}
+
+void Directory::GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
+ UnappliedUpdateMetaHandles* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ copy(kernel_->unapplied_update_metahandles->begin(),
+ kernel_->unapplied_update_metahandles->end(),
+ back_inserter(*result));
+}
+
+
+class IdFilter {
+ public:
+ virtual ~IdFilter() { }
+ virtual bool ShouldConsider(const Id& id) const = 0;
+};
+
+
+class FullScanFilter : public IdFilter {
+ public:
+ virtual bool ShouldConsider(const Id& id) const {
+ return true;
+ }
+};
+
+class SomeIdsFilter : public IdFilter {
+ public:
+ virtual bool ShouldConsider(const Id& id) const {
+ return binary_search(ids_.begin(), ids_.end(), id);
+ }
+ std::vector<Id> ids_;
+};
+
+void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const OriginalEntries* originals) {
+ MetahandleSet handles;
+ SomeIdsFilter filter;
+ filter.ids_.reserve(originals->size());
+ for (OriginalEntries::const_iterator i = originals->begin(),
+ end = originals->end(); i != end; ++i) {
+ Entry e(trans, GET_BY_HANDLE, i->ref(META_HANDLE));
+ CHECK(e.good());
+ filter.ids_.push_back(e.Get(ID));
+ handles.insert(i->ref(META_HANDLE));
+ }
+ std::sort(filter.ids_.begin(), filter.ids_.end());
+ CheckTreeInvariants(trans, handles, filter);
+}
+
+void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ bool full_scan) {
+ // TODO(timsteele): This is called every time a WriteTransaction finishes.
+ // The performance hit is substantial given that we now examine every single
+ // syncable entry. Need to redesign this.
+ MetahandleSet handles;
+ GetAllMetaHandles(trans, &handles);
+ if (full_scan) {
+ FullScanFilter fullfilter;
+ CheckTreeInvariants(trans, handles, fullfilter);
+ } else {
+ SomeIdsFilter filter;
+ MetahandleSet::iterator i;
+ for (i = handles.begin() ; i != handles.end() ; ++i) {
+ Entry e(trans, GET_BY_HANDLE, *i);
+ CHECK(e.good());
+ filter.ids_.push_back(e.Get(ID));
+ }
+ sort(filter.ids_.begin(), filter.ids_.end());
+ CheckTreeInvariants(trans, handles, filter);
+ }
+}
+
+void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const MetahandleSet& handles,
+ const IdFilter& idfilter) {
+ int64 max_ms = kInvariantCheckMaxMs;
+ if (max_ms < 0)
+ max_ms = std::numeric_limits<int64>::max();
+ PerfTimer check_timer;
+ MetahandleSet::const_iterator i;
+ int entries_done = 0;
+ for (i = handles.begin() ; i != handles.end() ; ++i) {
+ int64 metahandle = *i;
+ Entry e(trans, GET_BY_HANDLE, metahandle);
+ CHECK(e.good());
+ syncable::Id id = e.Get(ID);
+ syncable::Id parentid = e.Get(PARENT_ID);
+
+ if (id.IsRoot()) {
+ CHECK(e.Get(IS_DIR)) << e;
+ CHECK(parentid.IsRoot()) << e;
+ CHECK(!e.Get(IS_UNSYNCED)) << e;
+ ++entries_done;
+ continue;
+ }
+ if (!e.Get(IS_DEL)) {
+ CHECK(id != parentid) << e;
+ CHECK(!e.Get(NAME).empty()) << e;
+ int safety_count = handles.size() + 1;
+ while (!parentid.IsRoot()) {
+ if (!idfilter.ShouldConsider(parentid))
+ break;
+ Entry parent(trans, GET_BY_ID, parentid);
+ CHECK(parent.good()) << e;
+ CHECK(parent.Get(IS_DIR)) << parent << e;
+ CHECK(!parent.Get(IS_DEL)) << parent << e;
+ CHECK(handles.end() != handles.find(parent.Get(META_HANDLE)))
+ << e << parent;
+ parentid = parent.Get(PARENT_ID);
+ CHECK(--safety_count >= 0) << e << parent;
+ }
+ }
+ int64 base_version = e.Get(BASE_VERSION);
+ int64 server_version = e.Get(SERVER_VERSION);
+ if (CHANGES_VERSION == base_version || 0 == base_version) {
+ if (e.Get(IS_UNAPPLIED_UPDATE)) {
+ // Unapplied new item.
+ CHECK(e.Get(IS_DEL)) << e;
+ CHECK(id.ServerKnows()) << e;
+ } else {
+ // Uncommitted item.
+ if (!e.Get(IS_DEL)) {
+ CHECK(e.Get(IS_UNSYNCED)) << e;
+ }
+ CHECK(0 == server_version) << e;
+ CHECK(!id.ServerKnows()) << e;
+ }
+ } else {
+ CHECK(id.ServerKnows());
+ }
+ ++entries_done;
+ int64 elapsed_ms = check_timer.Elapsed().InMilliseconds();
+ if (elapsed_ms > max_ms) {
+ LOG(INFO) << "Cutting Invariant check short after " << elapsed_ms << "ms."
+ " Processed " << entries_done << "/" << handles.size() << " entries";
+ return;
+ }
+ }
+ // I did intend to add a check here to ensure no entries had been pulled into
+ // memory by this function, but we can't guard against another ReadTransaction
+ // pulling entries into RAM
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// ScopedKernelLocks
+
+ScopedKernelLock::ScopedKernelLock(const Directory* dir)
+ : dir_(const_cast<Directory*>(dir)) {
+ // Swap out the dbhandle to enforce the "No IO while holding kernel
+ // lock" rule.
+ // HA!! Yeah right. What about your pre-cached queries :P
+ pthread_mutex_lock(&dir->kernel_->mutex);
+}
+ScopedKernelLock::~ScopedKernelLock() {
+ pthread_mutex_unlock(&dir_->kernel_->mutex);
+}
+
+ScopedKernelUnlock::ScopedKernelUnlock(ScopedKernelLock* lock)
+ : lock_(lock) {
+ pthread_mutex_unlock(&lock->dir_->kernel_->mutex);
+}
+ScopedKernelUnlock::~ScopedKernelUnlock() {
+ pthread_mutex_lock(&lock_->dir_->kernel_->mutex);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Transactions
+#if defined LOG_ALL || !defined NDEBUG
+static const bool kLoggingInfo = true;
+#else
+static const bool kLoggingInfo = false;
+#endif
+
+ThreadNode* BaseTransaction::MakeThreadNode() {
+ ThreadNode* node = reinterpret_cast<ThreadNode*>
+ (pthread_getspecific(dirkernel_->thread_node_key));
+ if (NULL == node) {
+ node = new ThreadNode;
+ node->id = GetCurrentThreadId();
+ pthread_setspecific(dirkernel_->thread_node_key, node);
+ } else if (node->in_list) {
+ logging::LogMessage(source_file_, line_, logging::LOG_FATAL).stream()
+ << " Recursive Lock attempt by thread id " << node->id << "." << std::endl
+ << "Already entered transaction at " << node->file << ":" << node->line;
+ }
+ node->file = source_file_;
+ node->line = line_;
+ node->wait_started = base::TimeTicks::Now();
+ return node;
+}
+
+void BaseTransaction::Lock(ThreadCounts* const thread_counts,
+ ThreadNode* node, TransactionClass tclass) {
+ ScopedTransactionLock lock(&dirkernel_->transaction_mutex);
+ // Increment the waiters count.
+ node->tclass = tclass;
+ thread_counts->waiting += 1;
+ node->Insert(&thread_counts->waiting_headtail);
+
+ // Block until we can own the reader/writer lock
+ bool ready = 1 == thread_counts->waiting;
+ while (true) {
+ if (ready) {
+ if (0 == thread_counts->active) {
+ // We can take the lock because there is no contention.
+ break;
+ } else if (READ == tclass
+ && READ == thread_counts->active_headtail.next->tclass) {
+ // We can take the lock because reads can run simultaneously.
+ break;
+ }
+ }
+ // Wait to be woken up and check again.
+ node->wake_up = false;
+ do {
+ CHECK(0 == pthread_cond_wait(&node->condvar.condvar_,
+ &dirkernel_->transaction_mutex.mutex_));
+ } while (!node->wake_up);
+ ready = true;
+ }
+
+ // Move from the list of waiters to the list of active.
+ thread_counts->waiting -= 1;
+ thread_counts->active += 1;
+ CHECK(WRITE != tclass || 1 == thread_counts->active);
+ node->Remove();
+ node->Insert(&thread_counts->active_headtail);
+ if (WRITE == tclass)
+ node->current_write_trans = static_cast<WriteTransaction*>(this);
+}
+
+void BaseTransaction::AfterLock(ThreadNode* node) {
+ time_acquired_ = base::TimeTicks::Now();
+
+ const base::TimeDelta elapsed = time_acquired_ - node->wait_started;
+ if (kLoggingInfo && elapsed.InMilliseconds() > 200) {
+ logging::LogMessage(source_file_, line_, logging::LOG_INFO).stream()
+ << name_ << " transaction waited "
+ << elapsed.InSecondsF() << " seconds.";
+ }
+}
+
+void BaseTransaction::Init(ThreadCounts* const thread_counts,
+ TransactionClass tclass) {
+ ThreadNode* const node = MakeThreadNode();
+ Lock(thread_counts, node, tclass);
+ AfterLock(node);
+}
+
+BaseTransaction::BaseTransaction(Directory* directory, const char* name,
+ const char* source_file, int line)
+ : directory_(directory), dirkernel_(directory->kernel_), name_(name),
+ source_file_(source_file), line_(line) {
+}
+
+void BaseTransaction::UnlockAndLog(ThreadCounts* const thread_counts,
+ OriginalEntries* originals_arg) {
+ scoped_ptr<OriginalEntries> originals(originals_arg);
+ const base::TimeDelta elapsed = base::TimeTicks::Now() - time_acquired_;
+ if (kLoggingInfo && elapsed.InMilliseconds() > 50) {
+ logging::LogMessage(source_file_, line_, logging::LOG_INFO).stream()
+ << name_ << " transaction completed in " << elapsed.InSecondsF()
+ << " seconds.";
+ }
+
+ {
+ ScopedTransactionLock lock(&dirkernel_->transaction_mutex);
+ // Let go of the reader/writer lock
+ thread_counts->active -= 1;
+ ThreadNode* const node = reinterpret_cast<ThreadNode*>
+ (pthread_getspecific(dirkernel_->thread_node_key));
+ CHECK(node != NULL);
+ node->Remove();
+ node->current_write_trans = NULL;
+ if (0 == thread_counts->active) {
+ // Wake up a waiting thread, FIFO
+ if (dirkernel_->thread_counts.waiting > 0) {
+ ThreadNode* const headtail =
+ &dirkernel_->thread_counts.waiting_headtail;
+ ThreadNode* node = headtail->next;
+ node->wake_up = true;
+ CHECK(0 == pthread_cond_signal(&node->condvar.condvar_));
+ if (READ == node->tclass) do {
+ // Wake up all consecutive readers.
+ node = node->next;
+ if (node == headtail)
+ break;
+ if (READ != node->tclass)
+ break;
+ node->wake_up = true;
+ CHECK(0 == pthread_cond_signal(&node->condvar.condvar_));
+ } while (true);
+ }
+ }
+ if (NULL == originals.get() || originals->empty())
+ return;
+ dirkernel_->changes_channel_mutex.Lock();
+ // Tell listeners to calculate changes while we still have the mutex.
+ DirectoryChangeEvent event = { DirectoryChangeEvent::CALCULATE_CHANGES,
+ originals.get(), this, writer_ };
+ dirkernel_->changes_channel->NotifyListeners(event);
+ }
+ DirectoryChangeEvent event = { DirectoryChangeEvent::TRANSACTION_COMPLETE,
+ NULL, NULL, INVALID };
+ dirkernel_->changes_channel->NotifyListeners(event);
+ dirkernel_->changes_channel_mutex.Unlock();
+}
+
+ReadTransaction::ReadTransaction(Directory* directory, const char* file,
+ int line)
+ : BaseTransaction(directory, "Read", file, line) {
+ Init(&dirkernel_->thread_counts, READ);
+ writer_ = INVALID;
+}
+
+ReadTransaction::ReadTransaction(const ScopedDirLookup& scoped_dir,
+ const char* file, int line)
+ : BaseTransaction(scoped_dir.operator -> (), "Read", file, line) {
+ Init(&dirkernel_->thread_counts, READ);
+ writer_ = INVALID;
+}
+
+ReadTransaction::~ReadTransaction() {
+ UnlockAndLog(&dirkernel_->thread_counts, NULL);
+}
+
+WriteTransaction::WriteTransaction(Directory* directory, WriterTag writer,
+ const char* file, int line)
+ : BaseTransaction(directory, "Write", file, line), skip_destructor_(false),
+ originals_(new OriginalEntries) {
+ Init(&dirkernel_->thread_counts, WRITE);
+ writer_ = writer;
+}
+
+WriteTransaction::WriteTransaction(const ScopedDirLookup& scoped_dir,
+ WriterTag writer, const char* file, int line)
+ : BaseTransaction(scoped_dir.operator -> (), "Write", file, line),
+ skip_destructor_(false), originals_(new OriginalEntries) {
+ Init(&dirkernel_->thread_counts, WRITE);
+ writer_ = writer;
+}
+
+WriteTransaction::WriteTransaction(Directory* directory, const char* name,
+ WriterTag writer,
+ const char* file, int line,
+ bool skip_destructor,
+ OriginalEntries* originals)
+ : BaseTransaction(directory, name, file, line),
+ skip_destructor_(skip_destructor), originals_(originals) {
+ writer_ = writer;
+}
+
+void WriteTransaction::SaveOriginal(EntryKernel* entry) {
+ if (NULL == entry)
+ return;
+ OriginalEntries::iterator i = originals_->lower_bound(*entry);
+ if (i == originals_->end() ||
+ i->ref(META_HANDLE) != entry->ref(META_HANDLE)) {
+ originals_->insert(i, *entry);
+ }
+}
+
+WriteTransaction::~WriteTransaction() {
+ if (skip_destructor_)
+ return;
+ if (OFF != kInvariantCheckLevel) {
+ const bool full_scan = (FULL_DB_VERIFICATION == kInvariantCheckLevel);
+ if (full_scan)
+ directory()->CheckTreeInvariants(this, full_scan);
+ else
+ directory()->CheckTreeInvariants(this, originals_);
+ }
+ UnlockAndLog(&dirkernel_->thread_counts, originals_);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Entry
+
+Entry::Entry(BaseTransaction* trans, GetById, const Id& id)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryById(id);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByTag, const PathString& tag)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByTag(tag);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByHandle(metahandle);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByPath, const PathString& path)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByPath(path);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByParentIdAndName, const Id& parentid,
+ const PathString& name)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetChildWithName(parentid, name);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByParentIdAndDBName, const Id& parentid,
+ const PathString& name)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetChildWithDBName(parentid, name);
+}
+
+
+Directory* Entry::dir() const {
+ return basetrans_->directory();
+}
+
+PathString Entry::Get(StringField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+}
+
+void Entry::GetAllExtendedAttributes(BaseTransaction* trans,
+ std::set<ExtendedAttribute> *result) {
+ dir()->GetAllExtendedAttributes(trans, kernel_->ref(META_HANDLE), result);
+}
+
+void Entry::GetExtendedAttributesList(BaseTransaction* trans,
+ AttributeKeySet* result) {
+ dir()->GetExtendedAttributesList(trans, kernel_->ref(META_HANDLE), result);
+}
+
+void Entry::DeleteAllExtendedAttributes(WriteTransaction *trans) {
+ dir()->DeleteAllExtendedAttributes(trans, kernel_->ref(META_HANDLE));
+}
+
+///////////////////////////////////////////////////////////////////////////
+// MutableEntry
+
+MutableEntry::MutableEntry(WriteTransaction* trans, Create,
+ const Id& parent_id, const PathString& name)
+ : Entry(trans) {
+ if (NULL != trans->directory()->GetChildWithName(parent_id, name)) {
+ kernel_ = NULL; // would have duplicated an existing entry.
+ return;
+ }
+ Init(trans, parent_id, name);
+}
+
+
+void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
+ const PathString& name) {
+ kernel_ = new EntryKernel;
+ ZeroFields(kernel_, BEGIN_FIELDS);
+ kernel_->ref(ID) = trans->directory_->NextId();
+ kernel_->dirty[ID] = true;
+ kernel_->ref(META_HANDLE) = trans->directory_->NextMetahandle();
+ kernel_->dirty[META_HANDLE] = true;
+ kernel_->ref(PARENT_ID) = parent_id;
+ kernel_->dirty[PARENT_ID] = true;
+ kernel_->ref(NAME) = name;
+ kernel_->dirty[NAME] = true;
+ kernel_->ref(NON_UNIQUE_NAME) = name;
+ kernel_->dirty[NON_UNIQUE_NAME] = true;
+ kernel_->ref(IS_NEW) = true;
+ const int64 now = Now();
+ kernel_->ref(CTIME) = now;
+ kernel_->dirty[CTIME] = true;
+ kernel_->ref(MTIME) = now;
+ kernel_->dirty[MTIME] = true;
+ // We match the database defaults here
+ kernel_->ref(BASE_VERSION) = CHANGES_VERSION;
+ trans->directory()->InsertEntry(kernel_);
+ // Because this entry is new, it was originally deleted.
+ kernel_->ref(IS_DEL) = true;
+ trans->SaveOriginal(kernel_);
+ kernel_->ref(IS_DEL) = false;
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
+ const Id& id)
+ : Entry(trans) {
+ Entry same_id(trans, GET_BY_ID, id);
+ if (same_id.good()) {
+ kernel_ = NULL; // already have an item with this ID.
+ return;
+ }
+ kernel_ = new EntryKernel;
+ ZeroFields(kernel_, BEGIN_FIELDS);
+ kernel_->ref(ID) = id;
+ kernel_->dirty[ID] = true;
+ kernel_->ref(META_HANDLE) = trans->directory_->NextMetahandle();
+ kernel_->dirty[META_HANDLE] = true;
+ kernel_->ref(IS_DEL) = true;
+ kernel_->dirty[IS_DEL] = true;
+ kernel_->ref(IS_NEW) = true;
+ // We match the database defaults here
+ kernel_->ref(BASE_VERSION) = CHANGES_VERSION;
+ trans->directory()->InsertEntry(kernel_);
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
+ : Entry(trans, GET_BY_ID, id) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
+ int64 metahandle)
+ : Entry(trans, GET_BY_HANDLE, metahandle) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByPath,
+ const PathString& path)
+ : Entry(trans, GET_BY_PATH, path) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByParentIdAndName,
+ const Id& parentid, const PathString& name)
+ : Entry(trans, GET_BY_PARENTID_AND_NAME, parentid, name) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByParentIdAndDBName,
+ const Id& parentid, const PathString& name)
+ : Entry(trans, GET_BY_PARENTID_AND_DBNAME, parentid, name) {
+ trans->SaveOriginal(kernel_);
+}
+
+bool MutableEntry::PutIsDel(bool is_del) {
+ DCHECK(kernel_);
+ if (is_del == kernel_->ref(IS_DEL))
+ return true;
+ if (is_del) {
+ UnlinkFromOrder();
+ if (!dir()->Delete(kernel_))
+ return false;
+ return true;
+ } else {
+ if (!dir()->Undelete(kernel_))
+ return false;
+ PutPredecessor(Id()); // Restores position to the 0th index.
+ return true;
+ }
+}
+
+bool MutableEntry::Put(Int64Field field, const int64& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->ref(field) = value;
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+bool MutableEntry::Put(IdField field, const Id& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ if (ID == field) {
+ if (!dir()->ReindexId(kernel_, value))
+ return false;
+ } else if (PARENT_ID == field) {
+ if (!dir()->ReindexParentIdAndName(kernel_, value, kernel_->ref(NAME)))
+ return false;
+ } else {
+ kernel_->ref(field) = value;
+ }
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+WriteTransaction* MutableEntry::trans() const {
+ // We are in a mutable entry, so we must be in a write transaction.
+ // Maybe we could keep a pointer to the transaction in MutableEntry.
+ ThreadNode* node = reinterpret_cast<ThreadNode*>
+ (pthread_getspecific(dir()->kernel_->thread_node_key));
+ return node->current_write_trans;
+}
+
+bool MutableEntry::Put(BaseVersion field, int64 value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->ref(field) = value;
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+bool MutableEntry::Put(StringField field, const PathString& value) {
+ return PutImpl(field, value);
+}
+
+bool MutableEntry::PutImpl(StringField field, const PathString& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ if (NAME == field) {
+ if (!dir()->ReindexParentIdAndName(kernel_, kernel_->ref(PARENT_ID),
+ value))
+ return false;
+ } else {
+ kernel_->ref(field) = value;
+ }
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+bool MutableEntry::Put(IndexedBitField field, bool value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ MetahandleSet* index;
+ if (IS_UNSYNCED == field)
+ index = dir()->kernel_->unsynced_metahandles;
+ else
+ index = dir()->kernel_->unapplied_update_metahandles;
+
+ ScopedKernelLock lock(dir());
+ if (value)
+ CHECK(index->insert(kernel_->ref(META_HANDLE)).second);
+ else
+ CHECK(1 == index->erase(kernel_->ref(META_HANDLE)));
+ kernel_->ref(field) = value;
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+// Avoids temporary collision in index when renaming a bookmark
+// to another folder.
+bool MutableEntry::PutParentIdAndName(const Id& parent_id,
+ const Name& name) {
+ DCHECK(kernel_);
+ const bool parent_id_changes = parent_id != kernel_->ref(PARENT_ID);
+ bool db_name_changes = name.db_value() != kernel_->ref(NAME);
+ if (parent_id_changes || db_name_changes) {
+ if (!dir()->ReindexParentIdAndName(kernel_, parent_id,
+ name.db_value()))
+ return false;
+ }
+ Put(UNSANITIZED_NAME, name.GetUnsanitizedName());
+ Put(NON_UNIQUE_NAME, name.non_unique_value());
+ if (db_name_changes)
+ kernel_->dirty[NAME] = true;
+ if (parent_id_changes) {
+ kernel_->dirty[PARENT_ID] = true;
+ PutPredecessor(Id()); // Put in the 0th position.
+ }
+ return true;
+}
+
+void MutableEntry::UnlinkFromOrder() {
+ Id old_previous = Get(PREV_ID);
+ Id old_next = Get(NEXT_ID);
+
+ // Self-looping signifies that this item is not in the order. If
+ // we were to set these to 0, we could get into trouble because
+ // this node might look like the first node in the ordering.
+ Put(NEXT_ID, Get(ID));
+ Put(PREV_ID, Get(ID));
+
+ if (!old_previous.IsRoot()) {
+ if (old_previous == old_next) {
+ // Note previous == next doesn't imply previous == next == Get(ID). We
+ // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
+ // and deleted before receiving the server ID in the commit response.
+ CHECK((old_next == Get(ID)) || !old_next.ServerKnows());
+ return; // Done if we were already self-looped (hence unlinked).
+ }
+ MutableEntry previous_entry(trans(), GET_BY_ID, old_previous);
+ CHECK(previous_entry.good());
+ previous_entry.Put(NEXT_ID, old_next);
+ }
+
+ if (!old_next.IsRoot()) {
+ MutableEntry next_entry(trans(), GET_BY_ID, old_next);
+ CHECK(next_entry.good());
+ next_entry.Put(PREV_ID, old_previous);
+ }
+}
+
+bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
+ // TODO(ncarter): Maybe there should be an independent HAS_POSITION bit?
+ if (!Get(IS_BOOKMARK_OBJECT))
+ return true;
+ UnlinkFromOrder();
+
+ if (Get(IS_DEL)) {
+ DCHECK(predecessor_id.IsNull());
+ return true;
+ }
+
+ // This is classic insert-into-doubly-linked-list from CS 101 and your last
+ // job interview. An "IsRoot" Id signifies the head or tail.
+ Id successor_id;
+ if (!predecessor_id.IsRoot()) {
+ MutableEntry predecessor(trans(), GET_BY_ID, predecessor_id);
+ CHECK(predecessor.good());
+ if (predecessor.Get(PARENT_ID) != Get(PARENT_ID))
+ return false;
+ successor_id = predecessor.Get(NEXT_ID);
+ predecessor.Put(NEXT_ID, Get(ID));
+ } else {
+ syncable::Directory* dir = trans()->directory();
+ successor_id = dir->GetFirstChildId(trans(), Get(PARENT_ID));
+ }
+ if (!successor_id.IsRoot()) {
+ MutableEntry successor(trans(), GET_BY_ID, successor_id);
+ CHECK(successor.good());
+ if (successor.Get(PARENT_ID) != Get(PARENT_ID))
+ return false;
+ successor.Put(PREV_ID, Get(ID));
+ }
+ DCHECK(predecessor_id != Get(ID));
+ DCHECK(successor_id != Get(ID));
+ Put(PREV_ID, predecessor_id);
+ Put(NEXT_ID, successor_id);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// High-level functions
+
+int64 Directory::NextMetahandle() {
+ ScopedKernelLock lock(this);
+ int64 metahandle = (kernel_->next_metahandle)++;
+ return metahandle;
+}
+
+// Always returns a client ID that is the string representation of a negative
+// number.
+Id Directory::NextId() {
+ int64 result;
+ {
+ ScopedKernelLock lock(this);
+ result = (kernel_->next_id)--;
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+ }
+ DCHECK_LT(result, 0);
+ return Id::CreateFromClientString(Int64ToString(result));
+}
+
+Id Directory::GetChildWithNullIdField(IdField field,
+ BaseTransaction* trans,
+ const Id& parent_id) {
+ // This query is O(number of children), which should be acceptable
+ // when this method is used as the first step in enumerating the children of
+ // a node. But careless use otherwise could potentially result in
+ // O((number of children)^2) performance.
+ ChildHandles child_handles;
+ GetChildHandles(trans, parent_id, &child_handles);
+ ChildHandles::const_iterator it;
+ for (it = child_handles.begin(); it != child_handles.end(); ++it) {
+ Entry e(trans, GET_BY_HANDLE, *it);
+ CHECK(e.good());
+ if (e.Get(field).IsRoot())
+ return e.Get(ID);
+ }
+
+ return Id();
+}
+
+Id Directory::GetFirstChildId(BaseTransaction* trans,
+ const Id& parent_id) {
+ return GetChildWithNullIdField(PREV_ID, trans, parent_id);
+}
+
+Id Directory::GetLastChildId(BaseTransaction* trans,
+ const Id& parent_id) {
+ return GetChildWithNullIdField(NEXT_ID, trans, parent_id);
+}
+
+ExtendedAttribute::ExtendedAttribute(BaseTransaction* trans, GetByHandle,
+ const ExtendedAttributeKey& key) {
+ Directory::Kernel* const kernel = trans->directory()->kernel_;
+ ScopedKernelLock lock(trans->directory());
+ Init(trans, kernel, &lock, key);
+}
+
+bool ExtendedAttribute::Init(BaseTransaction* trans,
+ Directory::Kernel* const kernel,
+ ScopedKernelLock* lock,
+ const ExtendedAttributeKey& key) {
+ i_ = kernel->extended_attributes->find(key);
+ good_ = kernel->extended_attributes->end() != i_;
+ return good_;
+}
+
+MutableExtendedAttribute::MutableExtendedAttribute(
+ WriteTransaction* trans, GetByHandle,
+ const ExtendedAttributeKey& key) :
+ ExtendedAttribute(trans, GET_BY_HANDLE, key) {
+}
+
+MutableExtendedAttribute::MutableExtendedAttribute(
+ WriteTransaction* trans, Create, const ExtendedAttributeKey& key) {
+ Directory::Kernel* const kernel = trans->directory()->kernel_;
+ ScopedKernelLock lock(trans->directory());
+ if (!Init(trans, kernel, &lock, key)) {
+ ExtendedAttributeValue val;
+ val.dirty = true;
+ i_ = kernel->extended_attributes->insert(std::make_pair(key, val)).first;
+ good_ = true;
+ }
+}
+
+bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
+ const Id& new_parent_id) {
+ if (entry_id.IsRoot())
+ return false;
+ // we have to ensure that the entry is not an ancestor of the new parent.
+ Id ancestor_id = new_parent_id;
+ while (!ancestor_id.IsRoot()) {
+ if (entry_id == ancestor_id)
+ return false;
+ Entry new_parent(trans, GET_BY_ID, ancestor_id);
+ CHECK(new_parent.good());
+ ancestor_id = new_parent.Get(PARENT_ID);
+ }
+ return true;
+}
+
+// returns -1 if s contains any non [0-9] characters
+static int PathStringToInteger(PathString s) {
+ PathString::const_iterator i = s.begin();
+ for (; i != s.end(); ++i) {
+ if (PathString::npos == PathString(PSTR("0123456789")).find(*i))
+ return -1;
+ }
+ return
+#if !PATHSTRING_IS_STD_STRING
+ _wtoi
+#else
+ atoi
+#endif
+ (s.c_str());
+}
+
+static PathString IntegerToPathString(int i) {
+ const size_t kBufSize = 25;
+ PathChar buf[kBufSize];
+#if !PATHSTRING_IS_STD_STRING
+ const int radix = 10;
+ _itow(i, buf, radix);
+#else
+ snprintf(buf, kBufSize, "%d", i);
+#endif
+ return buf;
+}
+
+// appends ~1 to the end of 's' unless there is already ~#, in which case
+// it just increments the number
+static PathString FixBasenameInCollision(const PathString s) {
+ PathString::size_type last_tilde = s.find_last_of(PSTR('~'));
+ if (PathString::npos == last_tilde) return s + PSTR("~1");
+ if (s.size() == (last_tilde + 1)) return s + PSTR("1");
+ // we have ~, but not necessarily ~# (for some number >= 0). check for that
+ int n;
+ if ((n = PathStringToInteger(s.substr(last_tilde + 1))) != -1) {
+ n++;
+ PathString pre_number = s.substr(0, last_tilde + 1);
+ return pre_number + IntegerToPathString(n);
+ } else {
+ // we have a ~, but not a number following it, so we'll add another
+ // ~ and this time, a number
+ return s + PSTR("~1");
+ }
+}
+
+void DBName::MakeNoncollidingForEntry(BaseTransaction* trans,
+ const Id& parent_id,
+ Entry *e) {
+ const PathString& desired_name = *this;
+ CHECK(!desired_name.empty());
+ PathString::size_type first_dot = desired_name.find_first_of(PSTR('.'));
+ if (PathString::npos == first_dot)
+ first_dot = desired_name.size();
+ PathString basename = desired_name.substr(0, first_dot);
+ PathString dotextension = desired_name.substr(first_dot);
+ CHECK(basename + dotextension == desired_name);
+ for (;;) {
+ // check for collision
+ PathString testname = basename + dotextension;
+ Entry same_path_entry(trans, GET_BY_PARENTID_AND_DBNAME,
+ parent_id, testname);
+ if (!same_path_entry.good() || (e && same_path_entry.Get(ID) == e->Get(ID)))
+ break;
+ // there was a collision, so fix the name
+ basename = FixBasenameInCollision(basename);
+ }
+ // Set our value to the new value. This invalidates desired_name.
+ PathString new_value = basename + dotextension;
+ swap(new_value);
+}
+
+PathString GetFullPath(BaseTransaction* trans, const Entry& e) {
+ PathString result;
+#ifdef STL_MSVC
+ result.reserve(MAX_PATH);
+#endif
+ ReverseAppend(e.Get(NAME), &result);
+ Id id = e.Get(PARENT_ID);
+ while (!id.IsRoot()) {
+ result.push_back(kPathSeparator[0]);
+ Entry ancestor(trans, GET_BY_ID, id);
+ if (!ancestor.good()) {
+ // This can happen if the parent folder got deleted before the entry.
+ LOG(WARNING) << "Cannot get full path of " << e
+ << "\nbecause an ancestor folder has been deleted.";
+ result.clear();
+ return result;
+ }
+ ReverseAppend(ancestor.Get(NAME), &result);
+ id = ancestor.Get(PARENT_ID);
+ }
+ result.push_back(kPathSeparator[0]);
+ reverse(result.begin(), result.end());
+ return result;
+}
+
+const Blob* GetExtendedAttributeValue(const Entry& e,
+ const PathString& attribute_name) {
+ ExtendedAttributeKey key(e.Get(META_HANDLE), attribute_name);
+ ExtendedAttribute extended_attribute(e.trans(), GET_BY_HANDLE, key);
+ if (extended_attribute.good() && !extended_attribute.is_deleted())
+ return &extended_attribute.value();
+ return NULL;
+}
+
+// This function sets only the flags needed to get this entry to sync.
+void MarkForSyncing(syncable::MutableEntry* e) {
+ DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
+ DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing.";
+ e->Put(IS_UNSYNCED, true);
+ e->Put(SYNCING, false);
+}
+
+} // namespace syncable
+
+namespace {
+ class DumpSeparator {
+ } separator;
+ class DumpColon {
+ } colon;
+} // namespace
+
+inline FastDump& operator<<(FastDump& dump, const DumpSeparator&) {
+ dump.out_->sputn(", ", 2);
+ return dump;
+}
+
+inline FastDump& operator<<(FastDump& dump, const DumpColon&) {
+ dump.out_->sputn(": ", 2);
+ return dump;
+}
+
+std::ostream& operator<<(std::ostream& stream, const syncable::Entry& entry) {
+ // Using ostreams directly here is dreadfully slow, because a mutex is
+ // acquired for every <<. Users noticed it spiking CPU.
+ using browser_sync::ToUTF8;
+ using syncable::BitField;
+ using syncable::BitTemp;
+ using syncable::BlobField;
+ using syncable::EntryKernel;
+ using syncable::g_metas_columns;
+ using syncable::IdField;
+ using syncable::Int64Field;
+ using syncable::StringField;
+ using syncable::BEGIN_FIELDS;
+ using syncable::BIT_FIELDS_END;
+ using syncable::BIT_TEMPS_BEGIN;
+ using syncable::BIT_TEMPS_END;
+ using syncable::BLOB_FIELDS_END;
+ using syncable::INT64_FIELDS_END;
+ using syncable::ID_FIELDS_END;
+ using syncable::STRING_FIELDS_END;
+
+ int i;
+ FastDump s(&stream);
+ EntryKernel* const kernel = entry.kernel_;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ s << g_metas_columns[i].name << colon
+ << kernel->ref(static_cast<Int64Field>(i)) << separator;
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ s << g_metas_columns[i].name << colon
+ << kernel->ref(static_cast<IdField>(i)) << separator;
+ }
+ s << "Flags: ";
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ if (kernel->ref(static_cast<BitField>(i)))
+ s << g_metas_columns[i].name << separator;
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ ToUTF8 field(kernel->ref(static_cast<StringField>(i)));
+ s << g_metas_columns[i].name << colon << field.get_string() << separator;
+ }
+ for ( ; i < BLOB_FIELDS_END; ++i) {
+ s << g_metas_columns[i].name << colon
+ << kernel->ref(static_cast<BlobField>(i)) << separator;
+ }
+ s << "TempFlags: ";
+ for ( ; i < BIT_TEMPS_END; ++i) {
+ if (kernel->ref(static_cast<BitTemp>(i)))
+ s << "#" << i - BIT_TEMPS_BEGIN << separator;
+ }
+ return stream;
+}
+
+std::ostream& operator<<(std::ostream& s, const syncable::Blob& blob) {
+ for (syncable::Blob::const_iterator i = blob.begin(); i != blob.end(); ++i)
+ s << std::hex << std::setw(2)
+ << std::setfill('0') << static_cast<unsigned int>(*i);
+ return s << std::dec;
+}
+
+FastDump& operator<<(FastDump& dump, const syncable::Blob& blob) {
+ if (blob.empty())
+ return dump;
+ string buffer(HexEncode(&blob[0], blob.size()));
+ dump.out_->sputn(buffer.c_str(), buffer.size());
+ return dump;
+}
+
+std::ostream& operator<<(std::ostream& s, const syncable::ThreadNode& node) {
+ s << "thread id: " << std::hex << node.id << "\n"
+ << "file: " << node.file << "\n"
+ << "line: " << std::dec << node.line << "\n"
+ << "wait_started: " << node.wait_started.ToInternalValue();
+ return s;
+}
diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h
new file mode 100644
index 0000000..d2e8353
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable.h
@@ -0,0 +1,1419 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
+
+#include <algorithm>
+#include <bitset>
+#include <iosfwd>
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/time.h"
+#include "chrome/browser/sync/syncable/blob.h"
+#include "chrome/browser/sync/syncable/dir_open_result.h"
+#include "chrome/browser/sync/syncable/directory_event.h"
+#include "chrome/browser/sync/syncable/path_name_cmp.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/compat-pthread.h"
+#include "chrome/browser/sync/util/dbgq.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/row_iterator.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+struct PurgeInfo;
+
+namespace sync_api {
+class ReadTransaction;
+class WriteNode;
+class ReadNode;
+}
+
+namespace syncable {
+class Entry;
+}
+
+std::ostream& operator<<(std::ostream& s, const syncable::Entry& e);
+
+namespace syncable {
+
+class DirectoryBackingStore;
+
+static const int64 kInvalidMetaHandle = 0;
+
+enum {
+ BEGIN_FIELDS = 0,
+ INT64_FIELDS_BEGIN = BEGIN_FIELDS
+};
+
+enum MetahandleField {
+ // Primary key into the table. Keep this as a handle to the meta entry
+ // across transactions.
+ META_HANDLE = INT64_FIELDS_BEGIN
+};
+
+enum BaseVersion {
+ // After initial upload, the version is controlled by the server, and is
+ // increased whenever the data or metadata changes on the server.
+ BASE_VERSION = META_HANDLE + 1,
+};
+
+enum Int64Field {
+ SERVER_VERSION = BASE_VERSION + 1,
+ MTIME,
+ SERVER_MTIME,
+ CTIME,
+ SERVER_CTIME,
+
+ // A numeric position value that indicates the relative ordering of
+ // this object among its siblings.
+ SERVER_POSITION_IN_PARENT,
+
+ LOCAL_EXTERNAL_ID, // ID of an item in the external local storage that this
+ // entry is associated with. (such as bookmarks.js)
+
+ INT64_FIELDS_END
+};
+
+enum {
+ INT64_FIELDS_COUNT = INT64_FIELDS_END,
+ ID_FIELDS_BEGIN = INT64_FIELDS_END,
+};
+
+enum IdField {
+ // Code in InitializeTables relies on ID being the first IdField value.
+ ID = ID_FIELDS_BEGIN,
+ PARENT_ID,
+ SERVER_PARENT_ID,
+
+ PREV_ID,
+ NEXT_ID,
+ ID_FIELDS_END
+};
+
+enum {
+ ID_FIELDS_COUNT = ID_FIELDS_END - ID_FIELDS_BEGIN,
+ BIT_FIELDS_BEGIN = ID_FIELDS_END
+};
+
+enum IndexedBitField {
+ IS_UNSYNCED = BIT_FIELDS_BEGIN,
+ IS_UNAPPLIED_UPDATE,
+ INDEXED_BIT_FIELDS_END,
+};
+
+enum IsDelField {
+ IS_DEL = INDEXED_BIT_FIELDS_END,
+};
+
+enum BitField {
+ IS_DIR = IS_DEL + 1,
+ IS_BOOKMARK_OBJECT,
+
+ SERVER_IS_DIR,
+ SERVER_IS_DEL,
+ SERVER_IS_BOOKMARK_OBJECT,
+
+ BIT_FIELDS_END
+};
+
+enum {
+ BIT_FIELDS_COUNT = BIT_FIELDS_END - BIT_FIELDS_BEGIN,
+ STRING_FIELDS_BEGIN = BIT_FIELDS_END
+};
+
+enum StringField {
+ // The name, transformed so as to be suitable for use as a path-element. It
+ // is unique, and legal for this client.
+ NAME = STRING_FIELDS_BEGIN,
+ // The local name, pre-sanitization. It is not necessarily unique. If this
+ // is empty, it means |NAME| did not require sanitization.
+ UNSANITIZED_NAME,
+ // If NAME/UNSANITIZED_NAME are "Foo (2)", then NON_UNIQUE_NAME may be "Foo".
+ NON_UNIQUE_NAME,
+ // The server version of |NAME|. It is uniquified, but not necessarily
+ // OS-legal.
+ SERVER_NAME,
+ // The server version of |NON_UNIQUE_NAME|. Again, if SERVER_NAME is
+ // like "Foo (2)" due to a commit-time name aside, SERVER_NON_UNIQUE_NAME
+ // may hold the value "Foo".
+ SERVER_NON_UNIQUE_NAME,
+ // For bookmark entries, the URL of the bookmark.
+ BOOKMARK_URL,
+ SERVER_BOOKMARK_URL,
+
+ // A tag string which identifies this node as a particular top-level
+ // permanent object. The tag can be thought of as a unique key that
+ // identifies a singleton instance.
+ SINGLETON_TAG,
+ STRING_FIELDS_END,
+};
+
+enum {
+ STRING_FIELDS_COUNT = STRING_FIELDS_END - STRING_FIELDS_BEGIN,
+ BLOB_FIELDS_BEGIN = STRING_FIELDS_END
+};
+
+// From looking at the sqlite3 docs, it's not directly stated, but it
+// seems the overhead for storing a NULL blob is very small.
+enum BlobField {
+ // For bookmark entries, the favicon data. These will be NULL for
+ // non-bookmark items.
+ BOOKMARK_FAVICON = BLOB_FIELDS_BEGIN,
+ SERVER_BOOKMARK_FAVICON,
+ BLOB_FIELDS_END,
+};
+
+enum {
+ BLOB_FIELDS_COUNT = BLOB_FIELDS_END - BLOB_FIELDS_BEGIN
+};
+
+enum {
+ FIELD_COUNT = BLOB_FIELDS_END,
+ // Past this point we have temporaries, stored in memory only.
+ BEGIN_TEMPS = BLOB_FIELDS_END,
+ BIT_TEMPS_BEGIN = BEGIN_TEMPS,
+};
+
+enum BitTemp {
+ SYNCING = BIT_TEMPS_BEGIN,
+ IS_NEW, // Means use INSERT instead of UPDATE to save to db.
+ DEPRECATED_DELETE_ON_CLOSE, // Set by redirector, IS_OPEN must also be set.
+ DEPRECATED_CHANGED_SINCE_LAST_OPEN, // Have we been written to since we've
+ // been opened.
+ BIT_TEMPS_END,
+};
+
+enum {
+ BIT_TEMPS_COUNT = BIT_TEMPS_END - BIT_TEMPS_BEGIN
+};
+
+class BaseTransaction;
+class WriteTransaction;
+class ReadTransaction;
+class Directory;
+class ScopedDirLookup;
+class ExtendedAttribute;
+
+// Instead of:
+// Entry e = transaction.GetById(id);
+// use:
+// Entry e(transaction, GET_BY_ID, id);
+//
+// Why? The former would require a copy constructor, and it would be difficult
+// to enforce that an entry never outlived its transaction if there were a copy
+// constructor.
+enum GetById {
+ GET_BY_ID
+};
+
+enum GetByTag {
+ GET_BY_TAG
+};
+
+enum GetByHandle {
+ GET_BY_HANDLE
+};
+
+enum GetByPath {
+ GET_BY_PATH
+};
+
+enum GetByParentIdAndName {
+ GET_BY_PARENTID_AND_NAME
+};
+
+// DBName is the name stored in the database.
+enum GetByParentIdAndDBName {
+ GET_BY_PARENTID_AND_DBNAME
+};
+
+enum Create {
+ CREATE
+};
+
+enum CreateNewUpdateItem {
+ CREATE_NEW_UPDATE_ITEM
+};
+
+typedef std::set<PathString> AttributeKeySet;
+
+// DBName is a PathString with additional transformation methods that are
+// useful when trying to derive a unique and legal database name from
+// an unsanitized sync name.
+class DBName : public PathString {
+ public:
+ explicit DBName(const PathString& database_name)
+ : PathString(database_name) { }
+
+ // TODO(ncarter): Remove these codepaths to maintain alternate titles
+ // which are OS legal filenames, Chrome doesn't depend on this like some
+ // other browsers do.
+ void MakeOSLegal() {
+ PathString new_value = MakePathComponentOSLegal(*this);
+ if (!new_value.empty())
+ swap(new_value);
+ }
+
+ // Modify the value of this DBName so that it is not in use by any entry
+ // inside |parent_id|, except maybe |e|. |e| may be NULL if you are trying
+ // to compute a name for an entry which has yet to be created.
+ void MakeNoncollidingForEntry(BaseTransaction* trans,
+ const Id& parent_id,
+ Entry *e);
+};
+
+// SyncName encapsulates a canonical server name. In general, when we need to
+// muck around with a name that the server sends us (e.g. to make it OS legal),
+// we try to preserve the original value in a SyncName,
+// and distill the new local value into a DBName.
+// At other times, we need to apply transforms in the
+// other direction -- that is, to create a server-appropriate SyncName from a
+// user-updated DBName (which is an OS legal name, but not necessarily in the
+// format that the server wants it to be). For that sort of thing, you should
+// initialize a SyncName from the DB name value, and use the methods of
+// SyncName to canonicalize it. At other times, you have a pair of canonical
+// server values -- one (the "value") which is unique in the parent, and another
+// (the "non unique value") which is not unique in the parent -- and you
+// simply want to create a SyncName to hold them as a pair.
+class SyncName {
+ public:
+ // Create a SyncName with the initially specified value.
+ explicit SyncName(const PathString& sync_name)
+ : value_(sync_name), non_unique_value_(sync_name) { }
+
+ // Create a SyncName by specifying a value and a non-unique value. If
+ // you use this constructor, the values you provide should already be
+ // acceptable server names. Don't use the mutation/sanitization methods
+ // on the resulting instance -- mutation won't work if you have distinct
+ // values for the unique and non-unique fields.
+ SyncName(const PathString& unique_sync_name,
+ const PathString& non_unique_sync_name)
+ : value_(unique_sync_name), non_unique_value_(non_unique_sync_name) { }
+
+#ifdef OS_MACOSX
+ // Translate [':' -> '/'] within the sync name. Used on OSX.
+ void ConvertColonsToSlashes() {
+ DCHECK_EQ(value_, non_unique_value_)
+ << "Deriving value_ will overwrite non_unique_value_.";
+ std::string temporary_copy;
+ temporary_copy.reserve(value_.size());
+ StringReplace(value_, ":", "/", true, &temporary_copy);
+ value_.swap(temporary_copy);
+ non_unique_value_ = value_;
+ }
+#endif
+
+ // Transform |value_| so that it's a legal server name.
+ void MakeServerLegal() {
+ DCHECK_EQ(value_, non_unique_value_)
+ << "Deriving value_ will overwrite non_unique_value_.";
+ // Append a trailing space if the value is one of the server's three
+ // forbidden special cases.
+ if (value_.empty() ||
+ value_ == PSTR(".") ||
+ value_ == PSTR("..")) {
+ value_.append(PSTR(" "));
+ non_unique_value_ = value_;
+ }
+ // TODO(ncarter): Handle server's other requirement: truncation to
+ // 256 bytes in Unicode NFC.
+ }
+
+ const PathString& value() const { return value_; }
+ PathString& value() { return value_; }
+ const PathString& non_unique_value() const { return non_unique_value_; }
+ PathString& non_unique_value() { return non_unique_value_; }
+
+ inline bool operator==(const SyncName& right_hand_side) const {
+ return value_ == right_hand_side.value_ &&
+ non_unique_value_ == right_hand_side.non_unique_value_;
+ }
+ inline bool operator!=(const SyncName& right_hand_side) const {
+ return !(*this == right_hand_side);
+ }
+ private:
+ PathString value_;
+ PathString non_unique_value_;
+};
+
+// Name is a SyncName which has an additional DBName that provides a way to
+// interpolate the "unsanitized name" according to the syncable convention.
+//
+// A method might accept a Name as an parameter when the sync and database
+// names need to be set simultaneously:
+//
+// void PutName(const Name& new_name) {
+// Put(NAME, new_name.db_value());
+// Put(UNSANITIZED_NAME, new_name.GetUnsanitizedName());
+// }
+//
+// A code point that is trying to convert between local database names and
+// server sync names can use Name to help with the conversion:
+//
+// SyncName server_name = entry->GetServerName();
+// Name name = Name::FromSyncName(server_name); // Initially, name.value()
+// // and name.db_value() are
+// // equal to
+// // server_name.value().
+// name.db_value().MakeOSLegal(); // Updates name.db_value in-place,
+// // leaving name.value() unchanged.
+// foo->PutName(name);
+//
+class Name : public SyncName {
+ public:
+ // Create a Name with an initially specified db_value and value.
+ Name(const PathString& db_name, const PathString& sync_name)
+ : SyncName(sync_name), db_value_(db_name) { }
+
+ // Create a Name by specifying the db name, sync name, and non-unique
+ // sync name values.
+ Name(const PathString& db_name, const PathString& sync_name,
+ const PathString& non_unique_sync_name)
+ : SyncName(sync_name, non_unique_sync_name), db_value_(db_name) { }
+
+ // Create a Name with all name values initially equal to the the single
+ // specified argument.
+ explicit Name(const PathString& sync_and_db_name)
+ : SyncName(sync_and_db_name), db_value_(sync_and_db_name) { }
+
+ // Create a Name using the local (non-SERVER) fields of an EntryKernel.
+ static Name FromEntryKernel(struct EntryKernel*);
+
+ // Create a Name from a SyncName. db_value is initially sync_name.value().
+ // non_unique_value() and value() are copied from |sync_name|.
+ static Name FromSyncName(const SyncName& sync_name) {
+ return Name(sync_name.value(), sync_name.value(),
+ sync_name.non_unique_value());
+ }
+
+ static Name FromDBNameAndSyncName(const PathString& db_name,
+ const SyncName& sync_name) {
+ return Name(db_name, sync_name.value(), sync_name.non_unique_value());
+ }
+
+ // Get the database name. The non-const version is useful for in-place
+ // mutation.
+ const DBName& db_value() const { return db_value_; }
+ DBName& db_value() { return db_value_; }
+
+ // Do the sync names and database names differ? This indicates that
+ // the sync name has been sanitized, and that GetUnsanitizedName() will
+ // be non-empty.
+ bool HasBeenSanitized() const { return db_value_ != value(); }
+
+ // Compute the value of the unsanitized name from the current sync and db
+ // name values. The unsanitized name is the sync name value, unless the sync
+ // name is the same as the db name value, in which case the unsanitized name
+ // is empty.
+ PathString GetUnsanitizedName() const {
+ return HasBeenSanitized() ? value() : PathString();
+ }
+
+ inline bool operator==(const Name& right_hand_side) const {
+ return this->SyncName::operator==(right_hand_side) &&
+ db_value_ == right_hand_side.db_value_;
+ }
+ inline bool operator!=(const Name& right_hand_side) const {
+ return !(*this == right_hand_side);
+ }
+
+ private:
+ // The database name, which is maintained to be a legal and unique-in-parent
+ // name.
+ DBName db_value_;
+};
+
+// Why the singular enums? So the code compile-time dispatches instead of
+// runtime dispatches as it would with a single enum and an if() statement.
+
+// The EntryKernel class contains the actual data for an entry. It
+// would be a private class, except the number of required friend
+// declarations would bloat the code.
+struct EntryKernel {
+ protected:
+ PathString string_fields[STRING_FIELDS_COUNT];
+ Blob blob_fields[BLOB_FIELDS_COUNT];
+ int64 int64_fields[INT64_FIELDS_COUNT];
+ Id id_fields[ID_FIELDS_COUNT];
+ std::bitset<BIT_FIELDS_COUNT> bit_fields;
+ std::bitset<BIT_TEMPS_COUNT> bit_temps;
+
+ public:
+ std::bitset<FIELD_COUNT> dirty;
+
+ // Contain all this error-prone arithmetic in one place.
+ inline int64& ref(MetahandleField field) {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline int64& ref(Int64Field field) {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline Id& ref(IdField field) {
+ return id_fields[field - ID_FIELDS_BEGIN];
+ }
+ inline int64& ref(BaseVersion field) {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline std::bitset<BIT_FIELDS_COUNT>::reference ref(IndexedBitField field) {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline std::bitset<BIT_FIELDS_COUNT>::reference ref(IsDelField field) {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline std::bitset<BIT_FIELDS_COUNT>::reference ref(BitField field) {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline PathString& ref(StringField field) {
+ return string_fields[field - STRING_FIELDS_BEGIN];
+ }
+ inline Blob& ref(BlobField field) {
+ return blob_fields[field - BLOB_FIELDS_BEGIN];
+ }
+ inline std::bitset<BIT_TEMPS_COUNT>::reference ref(BitTemp field) {
+ return bit_temps[field - BIT_TEMPS_BEGIN];
+ }
+
+ inline int64 ref(MetahandleField field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline int64 ref(Int64Field field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline const Id& ref(IdField field) const {
+ return id_fields[field - ID_FIELDS_BEGIN];
+ }
+ inline int64 ref(BaseVersion field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline bool ref(IndexedBitField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline bool ref(IsDelField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline bool ref(BitField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline PathString ref(StringField field) const {
+ return string_fields[field - STRING_FIELDS_BEGIN];
+ }
+ inline Blob ref(BlobField field) const {
+ return blob_fields[field - BLOB_FIELDS_BEGIN];
+ }
+ inline bool ref(BitTemp field) const {
+ return bit_temps[field - BIT_TEMPS_BEGIN];
+ }
+};
+
+// A read-only meta entry.
+class Entry {
+ friend class Directory;
+ friend std::ostream& ::operator << (std::ostream& s, const Entry& e);
+
+ public:
+ // After constructing, you must check good() to test whether the Get
+ // succeed.
+ Entry(BaseTransaction* trans, GetByHandle, int64 handle);
+ Entry(BaseTransaction* trans, GetById, const Id& id);
+ Entry(BaseTransaction* trans, GetByTag, const PathString& tag);
+ Entry(BaseTransaction* trans, GetByPath, const PathString& path);
+ Entry(BaseTransaction* trans, GetByParentIdAndName, const Id& id,
+ const PathString& name);
+ Entry(BaseTransaction* trans, GetByParentIdAndDBName, const Id& id,
+ const PathString& name);
+
+ bool good() const { return 0 != kernel_; }
+
+ BaseTransaction* trans() const { return basetrans_; }
+
+ // Field accessors.
+ inline int64 Get(MetahandleField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline Id Get(IdField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline int64 Get(Int64Field field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline int64 Get(BaseVersion field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(IndexedBitField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(IsDelField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(BitField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ PathString Get(StringField field) const;
+ inline Blob Get(BlobField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(BitTemp field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline Name GetName() const {
+ DCHECK(kernel_);
+ return Name::FromEntryKernel(kernel_);
+ }
+ inline SyncName GetServerName() const {
+ DCHECK(kernel_);
+ return SyncName(kernel_->ref(SERVER_NAME),
+ kernel_->ref(SERVER_NON_UNIQUE_NAME));
+ }
+ inline bool SyncNameMatchesServerName() const {
+ DCHECK(kernel_);
+ SyncName sync_name(GetName());
+ return sync_name == GetServerName();
+ }
+ inline PathString GetSyncNameValue() const {
+ DCHECK(kernel_);
+ // This should always be equal to GetName().sync_name().value(), but
+ // maybe faster.
+ return kernel_->ref(UNSANITIZED_NAME).empty() ? kernel_->ref(NAME) :
+ kernel_->ref(UNSANITIZED_NAME);
+ }
+ inline bool ExistsOnClientBecauseDatabaseNameIsNonEmpty() const {
+ DCHECK(kernel_);
+ return !kernel_->ref(NAME).empty();
+ }
+ inline bool IsRoot() const {
+ DCHECK(kernel_);
+ return kernel_->ref(ID).IsRoot();
+ }
+
+ void GetAllExtendedAttributes(BaseTransaction* trans,
+ std::set<ExtendedAttribute>* result);
+ void GetExtendedAttributesList(BaseTransaction* trans,
+ AttributeKeySet* result);
+ // Flags all extended attributes for deletion on the next SaveChanges.
+ void DeleteAllExtendedAttributes(WriteTransaction *trans);
+
+ Directory* dir() const;
+
+ const EntryKernel GetKernelCopy() const {
+ return *kernel_;
+ }
+
+
+ protected: // Don't allow creation on heap, except by sync API wrappers.
+ friend class sync_api::ReadNode;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ inline Entry(BaseTransaction* trans) : basetrans_(trans) { }
+
+ protected:
+
+ BaseTransaction* const basetrans_;
+
+ EntryKernel* kernel_;
+
+ DISALLOW_COPY_AND_ASSIGN(Entry);
+};
+
+// A mutable meta entry. Changes get committed to the database when the
+// WriteTransaction is destroyed.
+class MutableEntry : public Entry {
+ friend class WriteTransaction;
+ friend class Directory;
+ void Init(WriteTransaction* trans, const Id& parent_id,
+ const PathString& name);
+ public:
+ MutableEntry(WriteTransaction* trans, Create, const Id& parent_id,
+ const PathString& name);
+ MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
+ MutableEntry(WriteTransaction* trans, GetByHandle, int64);
+ MutableEntry(WriteTransaction* trans, GetById, const Id&);
+ MutableEntry(WriteTransaction* trans, GetByPath, const PathString& path);
+ MutableEntry(WriteTransaction* trans, GetByParentIdAndName, const Id&,
+ const PathString& name);
+ MutableEntry(WriteTransaction* trans, GetByParentIdAndDBName,
+ const Id& parentid, const PathString& name);
+
+ WriteTransaction* trans() const;
+
+ // Field Accessors. Some of them trigger the re-indexing of the entry.
+ // Return true on success, return false on failure, which means
+ // that putting the value would have caused a duplicate in the index.
+ bool Put(Int64Field field, const int64& value);
+ bool Put(IdField field, const Id& value);
+ bool Put(StringField field, const PathString& value);
+ bool Put(BaseVersion field, int64 value);
+ inline bool PutName(const Name& name) {
+ return (Put(NAME, name.db_value()) &&
+ Put(UNSANITIZED_NAME, name.GetUnsanitizedName()) &&
+ Put(NON_UNIQUE_NAME, name.non_unique_value()));
+ }
+ inline bool PutServerName(const SyncName& server_name) {
+ return (Put(SERVER_NAME, server_name.value()) &&
+ Put(SERVER_NON_UNIQUE_NAME, server_name.non_unique_value()));
+ }
+ inline bool Put(BlobField field, const Blob& value) {
+ return PutField(field, value);
+ }
+ inline bool Put(BitField field, bool value) {
+ return PutField(field, value);
+ }
+ inline bool Put(IsDelField field, bool value) {
+ return PutIsDel(value);
+ }
+ bool Put(IndexedBitField field, bool value);
+
+ // Avoids temporary collision in index when renaming a bookmark
+ // into another folder.
+ bool PutParentIdAndName(const Id& parent_id, const Name& name);
+
+ // Sets the position of this item, and updates the entry kernels of the
+ // adjacent siblings so that list invariants are maintained. Returns false
+ // and fails if |predecessor_id| does not identify a sibling. Pass the root
+ // ID to put the node in first position.
+ bool PutPredecessor(const Id& predecessor_id);
+
+ inline bool Put(BitTemp field, bool value) {
+ return PutTemp(field, value);
+ }
+
+ protected:
+
+ template <typename FieldType, typename ValueType>
+ inline bool PutField(FieldType field, const ValueType& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->ref(field) = value;
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+ }
+
+ template <typename TempType, typename ValueType>
+ inline bool PutTemp(TempType field, const ValueType& value) {
+ DCHECK(kernel_);
+ kernel_->ref(field) = value;
+ return true;
+ }
+
+ bool PutIsDel(bool value);
+
+ private: // Don't allow creation on heap, except by sync API wrappers.
+ friend class sync_api::WriteNode;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ bool PutImpl(StringField field, const PathString& value);
+
+
+ // Adjusts the successor and predecessor entries so that they no longer
+ // refer to this entry.
+ void UnlinkFromOrder();
+
+ protected:
+ MutableEntry();
+
+ DISALLOW_COPY_AND_ASSIGN(MutableEntry);
+};
+
+template <Int64Field field_index>
+class SameField;
+template <Int64Field field_index>
+class HashField;
+class LessParentIdAndNames;
+class LessMultiIncusionTargetAndMetahandle;
+template <typename FieldType, FieldType field_index>
+class LessField;
+class LessEntryMetaHandles {
+ public:
+ inline bool operator()(const syncable::EntryKernel& a,
+ const syncable::EntryKernel& b) const {
+ return a.ref(META_HANDLE) < b.ref(META_HANDLE);
+ }
+};
+typedef std::set<EntryKernel, LessEntryMetaHandles> OriginalEntries;
+
+// a WriteTransaction has a writer tag describing which body of code is doing
+// the write. This is defined up here since DirectoryChangeEvent also contains
+// one.
+enum WriterTag {
+ INVALID, SYNCER, AUTHWATCHER, UNITTEST, VACUUM_AFTER_SAVE, SYNCAPI
+};
+
+// A separate Event type and channel for very frequent changes, caused
+// by anything, not just the user.
+struct DirectoryChangeEvent {
+ enum {
+ // Means listener should go through list of original entries and
+ // calculate what it needs to notify. It should *not* call any
+ // callbacks or attempt to lock anything because a
+ // WriteTransaction is being held until the listener returns.
+ CALCULATE_CHANGES,
+ // Means the WriteTransaction has been released and the listener
+ // can now take action on the changes it calculated.
+ TRANSACTION_COMPLETE,
+ // Channel is closing.
+ SHUTDOWN
+ } todo;
+ // These members are only valid for CALCULATE_CHANGES
+ const OriginalEntries* originals;
+ BaseTransaction* trans;
+ WriterTag writer;
+ typedef DirectoryChangeEvent EventType;
+ static inline bool IsChannelShutdownEvent(const EventType& e) {
+ return SHUTDOWN == e.todo;
+ }
+};
+
+struct ExtendedAttributeKey {
+ int64 metahandle;
+ PathString key;
+ inline bool operator < (const ExtendedAttributeKey& x) const {
+ if (metahandle != x.metahandle)
+ return metahandle < x.metahandle;
+ return key.compare(x.key) < 0;
+ }
+ ExtendedAttributeKey(int64 metahandle, PathString key) :
+ metahandle(metahandle), key(key) { }
+};
+
+struct ExtendedAttributeValue {
+ Blob value;
+ bool is_deleted;
+ bool dirty;
+};
+
+typedef std::map<ExtendedAttributeKey, ExtendedAttributeValue>
+ ExtendedAttributes;
+
+// Used to maintain our per-thread transaction state and to enforce
+// our transaction invariants (e.g. no recursive transactions).
+// Each time a thread enters a transaction by constructing a Read or a
+// WriteTransaction object, a ThreadNode object is pulled from thread
+// local storage, or created and stored in thread-local storage if it
+// doesn't yet exist.
+struct ThreadNode {
+ const char* file;
+ int line;
+ base::TimeTicks wait_started;
+ ThreadId id;
+ ThreadNode* next;
+ ThreadNode* prev;
+
+ // True when this node is in a linked list. Only accessed from
+ // owner thread so no locking necessary.
+ bool in_list;
+ WriteTransaction* current_write_trans;
+ PThreadCondVar condvar; // Mutex is the kernel's transaction mutex.
+ bool wake_up; // flag for condvar.
+ int tclass; // Really a BaseTransaction::TClass, but no forward enums.
+
+ // Linked list operations.
+ inline ThreadNode() : in_list(false), current_write_trans(NULL),
+ wake_up(false) {
+ next = prev = this;
+ }
+ inline ThreadNode* Remove() {
+ in_list = false;
+ prev->next = next;
+ next->prev = prev;
+ return next = prev = this;
+ }
+ inline void Insert(ThreadNode* node) {
+ in_list = true;
+ prev = node->prev;
+ next = node;
+ next->prev = prev->next = this;
+ }
+};
+
+struct ThreadCounts {
+ int waiting;
+ int active;
+ // Also keep a linked list of thread information.
+ ThreadNode waiting_headtail;
+ ThreadNode active_headtail;
+
+ ThreadCounts() : waiting(0), active(0) { }
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadCounts);
+};
+
+typedef PThreadScopedLock<PThreadMutex> ScopedTransactionLock;
+typedef std::set<int64> MetahandleSet;
+
+// A list of metahandles whose metadata should not be purged.
+typedef std::multiset<int64> Pegs;
+
+// The name Directory in this case means the entire directory
+// structure within a single user account.
+//
+// Sqlite is a little goofy, in that each thread must access a database
+// via its own handle. So, a Directory object should only be accessed
+// from a single thread. Use DirectoryManager's Open() method to
+// always get a directory that has been properly initialized on the
+// current thread.
+//
+// The db is protected against concurrent modification by a reader/
+// writer lock, negotiated by the ReadTransaction and WriteTransaction
+// friend classes. The in-memory indices are protected against
+// concurrent modification by the kernel lock.
+//
+// All methods which require the reader/writer lock to be held either
+// are protected and only called from friends in a transaction
+// or are public and take a Transaction* argument.
+//
+// All methods which require the kernel lock to be already held take a
+// ScopeKernelLock* argument.
+//
+// To prevent deadlock, the reader writer transaction lock must always
+// be held before acquiring the kernel lock.
+class ScopedKernelLock;
+class IdFilter;
+class DirectoryManager;
+struct PathMatcher;
+
+class Directory {
+ friend class BaseTransaction;
+ friend class Entry;
+ friend class ExtendedAttribute;
+ friend class MutableEntry;
+ friend class MutableExtendedAttribute;
+ friend class ReadTransaction;
+ friend class ReadTransactionWithoutDB;
+ friend class ScopedKernelLock;
+ friend class ScopedKernelUnlock;
+ friend class WriteTransaction;
+ friend class TestUnsaveableDirectory;
+ public:
+ // Various data that the Directory::Kernel we are backing (persisting data
+ // for) needs saved across runs of the application.
+ struct PersistedKernelInfo {
+ int64 last_sync_timestamp;
+ bool initial_sync_ended;
+ std::string store_birthday;
+ int64 next_id;
+ PersistedKernelInfo() : last_sync_timestamp(0),
+ initial_sync_ended(false),
+ next_id(0) {
+ }
+ };
+
+ // What the Directory needs on initialization to create itself and its Kernel.
+ // Filled by DirectoryBackingStore::Load.
+ struct KernelLoadInfo {
+ PersistedKernelInfo kernel_info;
+ std::string cache_guid; // Created on first initialization, never changes.
+ int64 max_metahandle; // Computed (using sql MAX aggregate) on init.
+ KernelLoadInfo() : max_metahandle(0) {
+ }
+ };
+
+ // The dirty/clean state of kernel fields backed by the share_info table.
+ // This is public so it can be used in SaveChangesSnapshot for persistence.
+ enum KernelShareInfoStatus {
+ KERNEL_SHARE_INFO_INVALID,
+ KERNEL_SHARE_INFO_VALID,
+ KERNEL_SHARE_INFO_DIRTY
+ };
+
+ // When the Directory is told to SaveChanges, a SaveChangesSnapshot is
+ // constructed and forms a consistent snapshot of what needs to be sent to
+ // the backing store.
+ struct SaveChangesSnapshot {
+ KernelShareInfoStatus kernel_info_status;
+ PersistedKernelInfo kernel_info;
+ OriginalEntries dirty_metas;
+ ExtendedAttributes dirty_xattrs;
+ SaveChangesSnapshot() : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
+ }
+ };
+
+ Directory();
+ virtual ~Directory();
+
+ DirOpenResult Open(const PathString& file_path, const PathString& name);
+
+ void Close();
+
+ int64 NextMetahandle();
+ // Always returns a negative id. Positive client ids are generated
+ // by the server only.
+ Id NextId();
+
+ PathString file_path() const { return kernel_->db_path; }
+ bool good() const { return NULL != store_; }
+
+ // The sync timestamp is an index into the list of changes for an account.
+ // It doesn't actually map to any time scale, it's name is an historical
+ // anomaly.
+ int64 last_sync_timestamp() const;
+ void set_last_sync_timestamp(int64 timestamp);
+
+ bool initial_sync_ended() const;
+ void set_initial_sync_ended(bool value);
+
+ PathString name() const { return kernel_->name_; }
+
+ // (Account) Store birthday is opaque to the client,
+ // so we keep it in the format it is in the proto buffer
+ // in case we switch to a binary birthday later.
+ std::string store_birthday() const;
+ void set_store_birthday(std::string store_birthday);
+
+ // Unique to each account / client pair.
+ std::string cache_guid() const;
+
+ protected: // for friends, mainly used by Entry constructors
+ EntryKernel* GetChildWithName(const Id& parent_id, const PathString& name);
+ EntryKernel* GetChildWithDBName(const Id& parent_id, const PathString& name);
+ EntryKernel* GetEntryByHandle(const int64 handle);
+ EntryKernel* GetEntryByHandle(const int64 metahandle, ScopedKernelLock* lock);
+ EntryKernel* GetEntryById(const Id& id);
+ EntryKernel* GetEntryByTag(const PathString& tag);
+ EntryKernel* GetRootEntry();
+ EntryKernel* GetEntryByPath(const PathString& path);
+ bool ReindexId(EntryKernel* const entry, const Id& new_id);
+ bool ReindexParentIdAndName(EntryKernel* const entry, const Id& new_parent_id,
+ const PathString& new_name);
+ // These don't do the semantic checking that the redirector needs.
+ // The semantic checking is implemented higher up.
+ bool Undelete(EntryKernel* const entry);
+ bool Delete(EntryKernel* const entry);
+
+ // Overridden by tests.
+ virtual DirectoryBackingStore* CreateBackingStore(
+ const PathString& dir_name,
+ const PathString& backing_filepath);
+
+ private:
+ // These private versions expect the kernel lock to already be held
+ // before calling.
+ EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
+ EntryKernel* GetChildWithName(const Id& parent_id,
+ const PathString& name,
+ ScopedKernelLock* const lock);
+ EntryKernel* GetChildWithNameImpl(const Id& parent_id,
+ const PathString& name,
+ ScopedKernelLock* const lock);
+
+ DirOpenResult OpenImpl(const PathString& file_path, const PathString& name);
+
+ struct DirectoryEventTraits {
+ typedef DirectoryEvent EventType;
+ static inline bool IsChannelShutdownEvent(const DirectoryEvent& event) {
+ return DIRECTORY_DESTROYED == event;
+ }
+ };
+ public:
+ typedef EventChannel<DirectoryEventTraits, PThreadMutex> Channel;
+ typedef EventChannel<DirectoryChangeEvent, PThreadMutex> ChangesChannel;
+ typedef std::vector<int64> ChildHandles;
+
+ // Returns the child meta handles for given parent id.
+ void GetChildHandles(BaseTransaction*, const Id& parent_id,
+ const PathString& path_spec, ChildHandles* result);
+ void GetChildHandles(BaseTransaction*, const Id& parent_id,
+ ChildHandles* result);
+ void GetChildHandlesImpl(BaseTransaction* trans, const Id& parent_id,
+ PathMatcher* matcher, ChildHandles* result);
+
+ // Find the first or last child in the positional ordering under a parent,
+ // and return its id. Returns a root Id if parent has no children.
+ Id GetFirstChildId(BaseTransaction* trans, const Id& parent_id);
+ Id GetLastChildId(BaseTransaction* trans, const Id& parent_id);
+
+ // SaveChanges works by taking a consistent snapshot of the current Directory
+ // state and indices (by deep copy) under a ReadTransaction, passing this
+ // snapshot to the backing store under no transaction, and finally cleaning
+ // up by either purging entries no longer needed (this part done under a
+ // WriteTransaction) or rolling back dirty and IS_NEW bits. It also uses
+ // internal locking to enforce SaveChanges operations are mutually exclusive.
+ //
+ // WARNING: THIS METHOD PERFORMS SYNCHRONOUS I/O VIA SQLITE.
+ bool SaveChanges();
+
+ // Returns the number of entities with the unsynced bit set.
+ int64 unsynced_entity_count() const;
+
+ // Get GetUnsyncedMetaHandles should only be called after SaveChanges and
+ // before any new entries have been created. The intention is that the
+ // syncer should call it from its PerformSyncQueries member.
+ typedef std::vector<int64> UnsyncedMetaHandles;
+ void GetUnsyncedMetaHandles(BaseTransaction* trans,
+ UnsyncedMetaHandles* result);
+
+ // Get all the metahandles for unapplied updates
+ typedef std::vector<int64> UnappliedUpdateMetaHandles;
+ void GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
+ UnappliedUpdateMetaHandles* result);
+
+ void GetAllExtendedAttributes(BaseTransaction* trans, int64 metahandle,
+ std::set<ExtendedAttribute>* result);
+ // Get all extended attribute keys associated with a metahandle
+ void GetExtendedAttributesList(BaseTransaction* trans, int64 metahandle,
+ AttributeKeySet* result);
+ // Flags all extended attributes for deletion on the next SaveChanges.
+ void DeleteAllExtendedAttributes(WriteTransaction*trans, int64 metahandle);
+
+ // Get the channel for post save notification, used by the syncer.
+ inline Channel* channel() const {
+ return kernel_->channel;
+ }
+ inline ChangesChannel* changes_channel() const {
+ return kernel_->changes_channel;
+ }
+
+ // Checks tree metadata consistency.
+ // If full_scan is false, the function will avoid pulling any entries from the
+ // db and scan entries currently in ram.
+ // If full_scan is true, all entries will be pulled from the database.
+ // No return value, CHECKs will be triggered if we're given bad
+ // information.
+ void CheckTreeInvariants(syncable::BaseTransaction* trans,
+ bool full_scan);
+
+ void CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const OriginalEntries* originals);
+
+ void CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const MetahandleSet& handles,
+ const IdFilter& idfilter);
+
+ private:
+ // Helper to prime ids_index, parent_id_and_names_index, unsynced_metahandles
+ // and unapplied_metahandles from metahandles_index.
+ void InitializeIndices();
+
+ // Constructs a consistent snapshot of the current Directory state and
+ // indices (by deep copy) under a ReadTransaction for use in |snapshot|.
+ // See SaveChanges() for more information.
+ void TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot);
+
+ // Purges from memory any unused, safe to remove entries that were
+ // successfully deleted on disk as a result of the SaveChanges that processed
+ // |snapshot|. See SaveChanges() for more information.
+ void VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot);
+
+ // Rolls back dirty and IS_NEW bits in the event that the SaveChanges that
+ // processed |snapshot| failed, for ex. due to no disk space.
+ void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
+
+ void InsertEntry(EntryKernel* entry, ScopedKernelLock* lock);
+ void InsertEntry(EntryKernel* entry);
+
+ // Used by CheckTreeInvariants
+ void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
+
+ static bool SafeToPurgeFromMemory(const EntryKernel* const entry);
+
+ // Helper method used to implement GetFirstChildId/GetLastChildId.
+ Id GetChildWithNullIdField(IdField field,
+ BaseTransaction* trans,
+ const Id& parent_id);
+
+ Directory& operator = (const Directory&);
+
+ // TODO(sync): If lookups and inserts in these sets become
+ // the bottle-neck, then we can use hash-sets instead. But
+ // that will require using #ifdefs and compiler-specific code,
+ // so use standard sets for now.
+ public:
+ typedef std::set<EntryKernel*, LessField<MetahandleField, META_HANDLE> >
+ MetahandlesIndex;
+ typedef std::set<EntryKernel*, LessField<IdField, ID> > IdsIndex;
+ // All entries in memory must be in both the MetahandlesIndex and
+ // the IdsIndex, but only non-deleted entries will be the
+ // ParentIdAndNamesIndex, because there can be multiple deleted
+ // entries with the same parent id and name.
+ typedef std::set<EntryKernel*, LessParentIdAndNames> ParentIdAndNamesIndex;
+ typedef std::vector<int64> MetahandlesToPurge;
+
+ private:
+
+ struct Kernel {
+ Kernel(const PathString& db_path, const PathString& name,
+ const KernelLoadInfo& info);
+
+ ~Kernel();
+
+ PathString const db_path;
+ // TODO(timsteele): audit use of the member and remove if possible
+ volatile base::subtle::AtomicWord refcount;
+ void AddRef(); // For convenience.
+ void Release();
+
+ // Next 3 members implement the reader/writer lock.
+ PThreadMutex transaction_mutex; // Protects next member.
+ ThreadCounts thread_counts;
+ pthread_key_t thread_node_key;
+
+ // The name of this directory, used as a key into open_files_;
+ PathString const name_;
+
+ // Protects all members below.
+ // The mutex effectively protects all the indices, but not the
+ // entries themselves. So once a pointer to an entry is pulled
+ // from the index, the mutex can be unlocked and entry read or written.
+ //
+ // Never hold the mutex and do anything with the database or any
+ // other buffered IO. Violating this rule will result in deadlock.
+ pthread_mutex_t mutex;
+ MetahandlesIndex* metahandles_index; // Entries indexed by metahandle
+ IdsIndex* ids_index; // Entries indexed by id
+ ParentIdAndNamesIndex* parent_id_and_names_index;
+ // So we don't have to create an EntryKernel every time we want to
+ // look something up in an index. Needle in haystack metaphore.
+ EntryKernel needle;
+ ExtendedAttributes* const extended_attributes;
+
+ // 2 in-memory indices on bits used extremely frequently by the syncer.
+ MetahandleSet* const unapplied_update_metahandles;
+ MetahandleSet* const unsynced_metahandles;
+ // TODO(timsteele): Add a dirty_metahandles index as we now may want to
+ // optimize the SaveChanges work of scanning all entries to find dirty ones
+ // due to the entire entry domain now being in-memory.
+
+ // TODO(ncarter): Figure out what the hell this is, and comment it.
+ Channel* const channel;
+
+ // The changes channel mutex is explicit because it must be locked
+ // while holding the transaction mutex and released after
+ // releasing the transaction mutex.
+ ChangesChannel* const changes_channel;
+ PThreadMutex changes_channel_mutex;
+ KernelShareInfoStatus info_status_;
+ // These 5 members are backed in the share_info table, and
+ // their state is marked by the flag above.
+ // Last sync timestamp fetched from the server.
+ int64 last_sync_timestamp_;
+ // true iff we ever reached the end of the changelog.
+ bool initial_sync_ended_;
+ // The store birthday we were given by the server. Contents are opaque to
+ // the client.
+ std::string store_birthday_;
+ // A unique identifier for this account's cache db, used to generate
+ // unique server IDs. No need to lock, only written at init time.
+ std::string cache_guid_;
+
+ // It doesn't make sense for two threads to run SaveChanges at the same
+ // time; this mutex protects that activity.
+ PThreadMutex save_changes_mutex;
+
+ // The next metahandle and id are protected by kernel mutex.
+ int64 next_metahandle;
+ int64 next_id;
+
+ // Keep a history of recently flushed metahandles for debugging
+ // purposes. Protected by the save_changes_mutex.
+ DebugQueue<int64, 1000> flushed_metahandles_;
+ };
+
+ Kernel* kernel_;
+
+ DirectoryBackingStore* store_;
+};
+
+class ScopedKernelLock {
+ public:
+ explicit ScopedKernelLock(const Directory*);
+ ~ScopedKernelLock();
+
+ Directory* const dir_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedKernelLock);
+};
+
+class ScopedKernelUnlock {
+ public:
+ explicit ScopedKernelUnlock(ScopedKernelLock* lock);
+ ~ScopedKernelUnlock();
+ ScopedKernelLock* const lock_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedKernelUnlock);
+};
+
+// Transactions are now processed FIFO (+overlapping reads).
+class BaseTransaction {
+ friend class Entry;
+ public:
+ enum TransactionClass { READ, WRITE };
+
+ protected:
+ explicit BaseTransaction(Directory* directory, const char* name,
+ const char* source_file, int line);
+
+ // The members below are optionally called by descendants.
+ void Lock(ThreadCounts* const thread_counts, ThreadNode* thread_node,
+ TransactionClass tclass);
+ void AfterLock(ThreadNode* thread_node);
+ void UnlockAndLog(ThreadCounts* const thread_counts, OriginalEntries*);
+ void Init(ThreadCounts* const thread_counts, TransactionClass tclass);
+ ThreadNode* MakeThreadNode();
+ public:
+
+ inline Directory* directory() const { return directory_; }
+
+ inline Id root_id() const { return Id(); }
+
+ protected:
+ Directory* const directory_;
+ Directory::Kernel* const dirkernel_; // for brevity
+ const char* const name_;
+ base::TimeTicks time_acquired_;
+ const char* const source_file_;
+ const int line_;
+ WriterTag writer_;
+
+ DISALLOW_COPY_AND_ASSIGN(BaseTransaction);
+};
+
+// Locks db in constructor, unlocks in destructor.
+class ReadTransaction : public BaseTransaction {
+ public:
+ ReadTransaction(Directory* directory, const char* source_file,
+ int line);
+ ReadTransaction(const ScopedDirLookup& scoped_dir,
+ const char* source_file, int line);
+
+ ~ReadTransaction();
+
+ protected: // Don't allow creation on heap, except by sync API wrapper.
+ friend class sync_api::ReadTransaction;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ DISALLOW_COPY_AND_ASSIGN(ReadTransaction);
+};
+
+// Locks db in constructor, unlocks in destructor.
+class WriteTransaction : public BaseTransaction {
+ friend class MutableEntry;
+ public:
+ explicit WriteTransaction(Directory* directory, WriterTag writer,
+ const char* source_file, int line);
+ explicit WriteTransaction(const ScopedDirLookup& directory,
+ WriterTag writer, const char* source_file,
+ int line);
+ virtual ~WriteTransaction();
+
+ void SaveOriginal(EntryKernel* entry);
+
+ protected:
+ // If I had had the foresight to create a BaseWriteTransactionClass,
+ // I would not have needed this pass-through constructor and the
+ // skip_destructor flag.
+ explicit WriteTransaction(Directory* directory,
+ const char* name, WriterTag writer,
+ const char* source_file,
+ int line, bool skip_destructor,
+ OriginalEntries* originals);
+
+ const bool skip_destructor_;
+
+ // Before an entry gets modified, we copy the original into a list
+ // so that we can issue change notifications when the transaction
+ // is done.
+ OriginalEntries* const originals_;
+
+ DISALLOW_COPY_AND_ASSIGN(WriteTransaction);
+};
+
+bool IsLegalNewParent(BaseTransaction* trans, const Id& id, const Id& parentid);
+int ComparePathNames(const PathString& a, const PathString& b);
+
+// Exposed in header as this is used as a sqlite3 callback.
+int ComparePathNames16(void*, int a_bytes, const void* a, int b_bytes,
+ const void* b);
+
+int64 Now();
+
+// Does wildcard processing.
+BOOL PathNameMatch(const PathString& pathname, const PathString& pathspec);
+
+PathString GetFullPath(BaseTransaction* trans, const Entry& e);
+
+inline void ReverseAppend(const PathString& s, PathString* target) {
+ target->append(s.rbegin(), s.rend());
+}
+
+class ExtendedAttribute {
+ public:
+ ExtendedAttribute(BaseTransaction* trans, GetByHandle,
+ const ExtendedAttributeKey& key);
+ int64 metahandle() const { return i_->first.metahandle; }
+ const PathString& key() const { return i_->first.key; }
+ const Blob& value() const { return i_->second.value; }
+ bool is_deleted() const { return i_->second.is_deleted; }
+ bool good() const { return good_; }
+ bool operator < (const ExtendedAttribute& x) const {
+ return i_->first < x.i_->first;
+ }
+ protected:
+ bool Init(BaseTransaction* trans,
+ Directory::Kernel* const kernel,
+ ScopedKernelLock* lock,
+ const ExtendedAttributeKey& key);
+ ExtendedAttribute() { }
+ ExtendedAttributes::iterator i_;
+ bool good_;
+};
+
+class MutableExtendedAttribute : public ExtendedAttribute {
+ public:
+ MutableExtendedAttribute(WriteTransaction* trans, GetByHandle,
+ const ExtendedAttributeKey& key);
+ MutableExtendedAttribute(WriteTransaction* trans, Create,
+ const ExtendedAttributeKey& key);
+
+ Blob* mutable_value() {
+ i_->second.dirty = true;
+ i_->second.is_deleted = false;
+ return &(i_->second.value);
+ }
+
+ void delete_attribute() {
+ i_->second.dirty = true;
+ i_->second.is_deleted = true;
+ }
+};
+
+// Get an extended attribute from an Entry by name. Returns a pointer
+// to a const Blob containing the attribute data, or NULL if there is
+// no attribute with the given name. The pointer is valid for the
+// duration of the Entry's transaction.
+const Blob* GetExtendedAttributeValue(const Entry& e,
+ const PathString& attribute_name);
+
+// This function sets only the flags needed to get this entry to sync.
+void MarkForSyncing(syncable::MutableEntry* e);
+
+// This is not a reset. It just sets the numeric fields which are not
+// initialized by the constructor to zero.
+void ZeroFields(EntryKernel* entry, int first_field);
+
+} // namespace syncable
+
+std::ostream& operator <<(std::ostream&, const syncable::Blob&);
+
+browser_sync::FastDump& operator <<
+ (browser_sync::FastDump&, const syncable::Blob&);
+
+
+std::ostream& operator <<(std::ostream&, const syncable::ThreadNode&);
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
diff --git a/chrome/browser/sync/syncable/syncable_changes_version.h b/chrome/browser/sync/syncable/syncable_changes_version.h
new file mode 100644
index 0000000..26a5eb8
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_changes_version.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
+
+namespace syncable {
+
+// For the most part, the sync engine treats version numbers as opaque values.
+// However, there are parts of our code base that break this abstraction, and
+// depend on the following two invariants:
+//
+// 1. CHANGES_VERSION is less than 0.
+// 2. The server only issues positive version numbers.
+//
+// Breaking these abstractions makes some operations 10 times
+// faster. If either of these invariants change, then those queries
+// must be revisited.
+
+enum {
+ CHANGES_VERSION = -1
+};
+
+#define CHANGES_VERSION_STRING "-1"
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
diff --git a/chrome/browser/sync/syncable/syncable_columns.h b/chrome/browser/sync/syncable/syncable_columns.h
new file mode 100644
index 0000000..10f7578
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_columns.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
+
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+
+namespace syncable {
+
+struct ColumnSpec {
+ const char* name;
+ const char* spec;
+};
+
+// Must be in exact same order as fields in syncable.
+static const ColumnSpec g_metas_columns[] = {
+ //////////////////////////////////////
+ // int64s
+ {"metahandle", "bigint primary key ON CONFLICT FAIL"},
+ {"base_version", "bigint default " CHANGES_VERSION_STRING},
+ {"server_version", "bigint default 0"},
+ // These timestamps are kept in native file timestamp format. It is
+ // up to the syncer to translate to Java time when syncing.
+ {"mtime", "bigint default 0"},
+ {"server_mtime", "bigint default 0"},
+ {"ctime", "bigint default 0"},
+ {"server_ctime", "bigint default 0"},
+ {"server_position_in_parent", "bigint default 0"},
+ // This is the item ID that we store for the embedding application.
+ {"local_external_id", "bigint default 0"},
+ //////////////////////////////////////
+ // Ids
+ {"id", "varchar(255) default \"r\""},
+ {"parent_id", "varchar(255) default \"r\""},
+ {"server_parent_id", "varchar(255) default \"r\""},
+ {"prev_id", "varchar(255) default \"r\""},
+ {"next_id", "varchar(255) default \"r\""},
+ //////////////////////////////////////
+ // bits
+ {"is_unsynced", "bit default 0"},
+ {"is_unapplied_update", "bit default 0"},
+ {"is_del", "bit default 0"},
+ {"is_dir", "bit default 0"},
+ {"is_bookmark_object", "bit default 0"},
+ {"server_is_dir", "bit default 0"},
+ {"server_is_del", "bit default 0"},
+ {"server_is_bookmark_object", "bit default 0"},
+ //////////////////////////////////////
+ // Strings
+ {"name", "varchar(255) COLLATE PATHNAME"},
+ {"unsanitized_name", "varchar(255) COLLATE PATHNAME"},
+ {"non_unique_name", "varchar"},
+ {"server_name", "varchar(255) COLLATE PATHNAME"},
+ {"server_non_unique_name", "varchar"},
+ {"bookmark_url", "varchar"},
+ {"server_bookmark_url", "varchar"},
+ {"singleton_tag", "varchar"},
+ //////////////////////////////////////
+ // Blobs.
+ {"bookmark_favicon", "blob"},
+ {"server_bookmark_favicon", "blob"},
+};
+
+// At least enforce that there are equal number of column names and fields.
+COMPILE_ASSERT(ARRAYSIZE(g_metas_columns) >= FIELD_COUNT, missing_column_name);
+COMPILE_ASSERT(ARRAYSIZE(g_metas_columns) <= FIELD_COUNT, extra_column_names);
+
+static inline const char* ColumnName(int field) {
+ DCHECK(field < BEGIN_TEMPS);
+ return g_metas_columns[field].name;
+}
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
diff --git a/chrome/browser/sync/syncable/syncable_id.cc b/chrome/browser/sync/syncable/syncable_id.cc
new file mode 100644
index 0000000..04d5afc
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_id.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/syncable_id.h"
+
+#include <iosfwd>
+
+#include "base/string_util.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+
+using std::ostream;
+using std::string;
+
+namespace syncable {
+const Id kNullId; // Currently == root.
+} // namespace syncable
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, const syncable::Id& id,
+ int index) {
+ return BindArg(statement, id.s_.c_str(), index);
+}
+
+void GetColumn(sqlite3_stmt* statement, int index, syncable::Id* id) {
+ GetColumn(statement, index, &id->s_);
+}
+
+ostream& operator << (ostream& out, const syncable::Id& id) {
+ out << id.s_;
+ return out;
+}
+
+using browser_sync::FastDump;
+FastDump& operator << (FastDump& dump, const syncable::Id& id) {
+ dump.out_->sputn(id.s_.data(), id.s_.size());
+ return dump;
+}
+
+namespace syncable {
+
+string Id::AsQueryParam() const {
+ if ('s' == s_[0])
+ return s_.c_str() + 1;
+ return "";
+}
+
+string Id::GetServerId() const {
+ // Currently root is the string "0". We need to decide on a true value.
+ // "" would be convenient here, as the IsRoot call would not be needed.
+ if (IsRoot())
+ return "0";
+ return s_.substr(1);
+}
+
+Id Id::CreateFromServerId(const string& server_id) {
+ Id id;
+ if (server_id == "0")
+ id.s_ = "r";
+ else
+ id.s_ = string("s") + server_id;
+ return id;
+}
+
+Id Id::CreateFromClientString(const string& local_id) {
+ Id id;
+ if (local_id == "0")
+ id.s_ = "r";
+ else
+ id.s_ = string("c") + local_id;
+ return id;
+}
+} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable_id.h b/chrome/browser/sync/syncable/syncable_id.h
new file mode 100644
index 0000000..5f2a28e
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_id.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
+
+#include <iosfwd>
+#include <limits>
+#include <sstream>
+#include <string>
+
+#include "base/hash_tables.h"
+#include "chrome/browser/sync/util/fast_dump.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+extern "C" {
+struct sqlite3;
+struct sqlite3_stmt;
+}
+
+namespace syncable {
+class Id;
+} // namespace syncable
+
+class MockConnectionManager;
+
+sqlite3_stmt* BindArg(sqlite3_stmt*, const syncable::Id&, int index);
+void GetColumn(sqlite3_stmt*, int index, syncable::Id* value);
+std::ostream& operator << (std::ostream& out, const syncable::Id& id);
+browser_sync::FastDump& operator <<
+ (browser_sync::FastDump& out, const syncable::Id& id);
+
+namespace syncable {
+
+// For historical reasons, 3 concepts got everloaded into the Id:
+// 1. A unique, opaque identifier for the object.
+// 2. Flag specifing whether server know about this object.
+// 3. Flag for root.
+//
+// We originally wrapped an integer for this information, but now we use a
+// string. It will have one of three forms:
+// 1. c<client only opaque id> for client items that have not been committed.
+// 2. r for the root item.
+// 3. s<server provided opaque id> for items that the server knows about.
+class Id {
+ friend sqlite3_stmt* ::BindArg(sqlite3_stmt*, const syncable::Id&, int index);
+ friend void ::GetColumn(sqlite3_stmt*, int index, syncable::Id* value);
+ friend std::ostream& ::operator << (std::ostream& out,
+ const syncable::Id& id);
+ friend browser_sync::FastDump& ::operator <<
+ (browser_sync::FastDump& out, const syncable::Id& id);
+ friend class MockConnectionManager;
+ friend class SyncableIdTest;
+ public:
+ // This constructor will be handy even when we move away from
+ // int64s, just for unit tests.
+ inline Id() : s_("r") { }
+ inline Id(const Id& that) {
+ Copy(that);
+ }
+ inline Id& operator = (const Id& that) {
+ Copy(that);
+ return *this;
+ }
+ inline void Copy(const Id& that) {
+ this->s_ = that.s_;
+ }
+ inline bool IsRoot() const {
+ return "r" == s_;
+ }
+ inline bool ServerKnows() const {
+ return s_[0] == 's' || s_ == "r";
+ }
+
+ // TODO(sync): We could use null here, but to ease conversion we use "r".
+ // fix this, this is madness :)
+ inline bool IsNull() const {
+ return IsRoot();
+ }
+ inline void Clear() {
+ s_ = "r";
+ }
+ std::string AsQueryParam() const;
+ // Must never allow id == 0 or id < 0 to compile.
+ inline bool operator == (const Id& that) const {
+ return s_ == that.s_;
+ }
+ inline bool operator != (const Id& that) const {
+ return s_ != that.s_;
+ }
+ inline bool operator < (const Id& that) const {
+ return s_ < that.s_;
+ }
+ inline bool operator > (const Id& that) const {
+ return s_ > that.s_;
+ }
+
+ public:
+ // Three functions used to work with our proto buffers.
+ std::string GetServerId() const;
+ static Id CreateFromServerId(const std::string& server_id);
+ // This should only be used if you get back a reference to a local
+ // id from the server. Returns a client only opaque id.
+ static Id CreateFromClientString(const std::string& local_id);
+ protected:
+ std::string s_;
+};
+
+extern const Id kNullId;
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
diff --git a/chrome/browser/sync/syncable/syncable_id_unittest.cc b/chrome/browser/sync/syncable/syncable_id_unittest.cc
new file mode 100644
index 0000000..b592ad5
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_id_unittest.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/syncable_id.h"
+
+#include <vector>
+
+#include "chrome/test/sync/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::vector;
+
+namespace syncable {
+
+using browser_sync::TestIdFactory;
+
+class SyncableIdTest : public testing::Test { };
+
+TEST(SyncableIdTest, TestIDCreation) {
+ vector<Id> v;
+ v.push_back(TestIdFactory::FromNumber(5));
+ v.push_back(TestIdFactory::FromNumber(1));
+ v.push_back(TestIdFactory::FromNumber(-5));
+ v.push_back(TestIdFactory::MakeLocal("A"));
+ v.push_back(TestIdFactory::MakeLocal("B"));
+ v.push_back(TestIdFactory::MakeServer("A"));
+ v.push_back(TestIdFactory::MakeServer("B"));
+ v.push_back(Id::CreateFromServerId("-5"));
+ v.push_back(Id::CreateFromClientString("A"));
+ v.push_back(Id::CreateFromServerId("A"));
+
+ for (vector<Id>::iterator i = v.begin(); i != v.end(); ++i) {
+ for (vector<Id>::iterator j = v.begin(); j != i; ++j) {
+ ASSERT_NE(*i, *j) << "mis equated two distinct ids";
+ }
+ ASSERT_EQ(*i, *i) << "self-equality failed";
+ Id copy1 = *i;
+ Id copy2 = *i;
+ ASSERT_EQ(copy1, copy2) << "equality after copy failed";
+ }
+}
+
+} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable_unittest.cc b/chrome/browser/sync/syncable/syncable_unittest.cc
new file mode 100644
index 0000000..63bc153
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_unittest.cc
@@ -0,0 +1,1554 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/syncable.h"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <limits>
+#include <string>
+
+// TODO(ncarter): Winnow down the OS-specific includes from the test
+// file.
+#if defined(OS_WINDOWS)
+#include <tchar.h>
+#include <atlbase.h>
+#include <process.h>
+#endif // defined(OS_WINDOWS)
+
+#if !defined(OS_WINDOWS)
+#define MAX_PATH PATH_MAX
+#include <strstream>
+#include <ostream>
+#include <stdio.h>
+#include <sys/ipc.h>
+#include <sys/sem.h>
+#include <sys/times.h>
+#endif // !defined(OS_WINDOWS)
+
+#include "base/at_exit.h"
+#include "base/logging.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/syncable/directory_backing_store.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/closure.h"
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+#include "chrome/test/sync/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/sqlite/preprocessed/sqlite3.h"
+
+using browser_sync::TestIdFactory;
+using std::cout;
+using std::endl;
+using std::string;
+
+namespace syncable {
+
+// A lot of these tests were written expecting to be able to read and
+// write object data on entries. However, the design has changed.
+void PutDataAsExtendedAttribute(WriteTransaction *wtrans,
+ MutableEntry* e,
+ const char* bytes,
+ size_t bytes_length) {
+ ExtendedAttributeKey key(e->Get(META_HANDLE), PSTR("DATA"));
+ MutableExtendedAttribute attr(wtrans, CREATE, key);
+ Blob bytes_blob(bytes, bytes + bytes_length);
+ attr.mutable_value()->swap(bytes_blob);
+}
+
+void ExpectDataFromExtendedAttributeEquals(BaseTransaction *trans,
+ Entry* e,
+ const char* bytes,
+ size_t bytes_length) {
+ Blob expected_value(bytes, bytes + bytes_length);
+ ExtendedAttributeKey key(e->Get(META_HANDLE), PSTR("DATA"));
+ ExtendedAttribute attr(trans, GET_BY_HANDLE, key);
+ EXPECT_FALSE(attr.is_deleted());
+ EXPECT_EQ(expected_value, attr.value());
+}
+
+
+TEST(Syncable, General) {
+ remove("SimpleTest.sqlite3");
+ Directory dir;
+ dir.Open(PSTR("SimpleTest.sqlite3"), PSTR("SimpleTest"));
+ bool entry_exists = false;
+ int64 metahandle;
+ const Id id = TestIdFactory::FromNumber(99);
+ // Test simple read operations.
+ {
+ ReadTransaction rtrans(&dir, __FILE__, __LINE__);
+ Entry e(&rtrans, GET_BY_ID, id);
+ if (e.good()) {
+ entry_exists = true;
+ metahandle = e.Get(META_HANDLE);
+ }
+ Directory::ChildHandles child_handles;
+ dir.GetChildHandles(&rtrans, rtrans.root_id(), &child_handles);
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i)
+ cout << *i << endl;
+
+ Entry e2(&rtrans, GET_BY_PATH, PSTR("/Hello\\World/"));
+ }
+
+ // Test creating a new meta entry.
+ {
+ WriteTransaction wtrans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), PSTR("Jeff"));
+ ASSERT_TRUE(entry_exists ? !me.good() : me.good());
+ if (me.good()) {
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ metahandle = me.Get(META_HANDLE);
+ }
+ }
+
+ // Test writing data to an entity.
+ static const char s[] = "Hello World.";
+ {
+ WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_PATH,
+ PathString(kPathSeparator) + PSTR("Jeff"));
+ ASSERT_TRUE(e.good());
+ PutDataAsExtendedAttribute(&trans, &e, s, sizeof(s));
+ }
+
+ // Test reading back the name contents that we just wrote.
+ {
+ WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_PATH,
+ PathString(kPathSeparator) + PSTR("Jeff"));
+ ASSERT_TRUE(e.good());
+ ExpectDataFromExtendedAttributeEquals(&trans, &e, s, sizeof(s));
+ }
+
+ // Now delete it.
+ {
+ WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, CREATE, trans.root_id(), PSTR("New File"));
+ e.Put(IS_DEL, true);
+ }
+
+ dir.SaveChanges();
+}
+
+TEST(Syncable, NameClassTest) {
+ const PathString foo(PSTR("foo"));
+ const PathString bar(PSTR("bar"));
+
+ Name name1(foo);
+ EXPECT_EQ(name1.value(), foo);
+ EXPECT_EQ(name1.db_value(), foo);
+ EXPECT_FALSE(name1.HasBeenSanitized());
+ EXPECT_TRUE(name1.GetUnsanitizedName().empty());
+
+ Name name2(foo, foo);
+ EXPECT_EQ(name2.value(), foo);
+ EXPECT_EQ(name2.db_value(), foo);
+ EXPECT_FALSE(name2.HasBeenSanitized());
+ EXPECT_TRUE(name2.GetUnsanitizedName().empty());
+
+ Name name3(foo, bar);
+ EXPECT_EQ(name3.value(), bar);
+ EXPECT_EQ(name3.db_value(), foo);
+ EXPECT_TRUE(name3.HasBeenSanitized());
+ EXPECT_EQ(name3.GetUnsanitizedName(), bar);
+
+ EXPECT_TRUE(name1 == name2);
+ EXPECT_FALSE(name1 != name2);
+ EXPECT_FALSE(name2 == name3);
+ EXPECT_TRUE(name2 != name3);
+}
+
+namespace {
+
+// A Directory whose backing store always fails SaveChanges by returning false.
+class TestUnsaveableDirectory : public Directory {
+ public:
+ class UnsaveableBackingStore : public DirectoryBackingStore {
+ public:
+ UnsaveableBackingStore(const PathString& dir_name,
+ const PathString& backing_filepath)
+ : DirectoryBackingStore(dir_name, backing_filepath) { }
+ virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot) {
+ return false;
+ }
+ };
+ virtual DirectoryBackingStore* CreateBackingStore(
+ const PathString& dir_name, const PathString& backing_filepath) {
+ return new UnsaveableBackingStore(dir_name, backing_filepath);
+ }
+};
+
+// Test suite for syncable::Directory.
+class SyncableDirectoryTest : public testing::Test {
+ protected:
+ static const PathString kFilePath;
+ static const PathString kName;
+ static const PathChar *kSqlite3File;
+ static const Id kId;
+
+ // SetUp() is called before each test case is run.
+ // The sqlite3 DB is deleted before each test is run.
+ virtual void SetUp() {
+ PathRemove(PathString(kSqlite3File));
+ dir_.reset(new Directory());
+ ASSERT_TRUE(dir_.get());
+ ASSERT_EQ(OPENED, dir_->Open(kFilePath, kName));
+ ASSERT_TRUE(dir_->good());
+ }
+
+ virtual void TearDown() {
+ // This also closes file handles.
+ dir_->SaveChanges();
+ dir_.reset();
+ PathRemove(PathString(kSqlite3File));
+ }
+
+ scoped_ptr<Directory> dir_;
+
+ // Creates an empty entry and sets the ID field to the default kId.
+ void CreateEntry(const PathString &entryname) {
+ CreateEntry(entryname, kId);
+ }
+
+ // Creates an empty entry and sets the ID field to id.
+ void CreateEntry(const PathString &entryname, const int id) {
+ CreateEntry(entryname, TestIdFactory::FromNumber(id));
+ }
+ void CreateEntry(const PathString &entryname, Id id) {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), entryname);
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(IS_UNSYNCED, true);
+ }
+
+ void ValidateEntry(BaseTransaction *trans, int64 id, bool check_name,
+ PathString name, int64 base_version, int64 server_version, bool is_del);
+ void CreateAndCheck(WriteTransaction *trans, int64 parent_id, int64 id,
+ PathString name, PathString server_name, int64 version,
+ bool set_server_fields, bool is_dir, bool add_to_lru, int64 *meta_handle);
+};
+
+const PathString SyncableDirectoryTest::kFilePath(PSTR("Test.sqlite3"));
+const PathChar* SyncableDirectoryTest::kSqlite3File(PSTR("Test.sqlite3"));
+const PathString SyncableDirectoryTest::kName(PSTR("Foo"));
+const Id SyncableDirectoryTest::kId(TestIdFactory::FromNumber(-99));
+
+TEST_F(SyncableDirectoryTest, TestBasicLookupNonExistantID) {
+ ReadTransaction rtrans(dir_.get(), __FILE__, __LINE__);
+ Entry e(&rtrans, GET_BY_ID, kId);
+ ASSERT_FALSE(e.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestBasicLookupValidID) {
+ CreateEntry(PSTR("rtc"));
+ ReadTransaction rtrans(dir_.get(), __FILE__, __LINE__);
+ Entry e(&rtrans, GET_BY_ID, kId);
+ ASSERT_TRUE(e.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestBasicCaseSensitivity) {
+ PathString name = PSTR("RYAN");
+ PathString conflicting_name = PSTR("ryan");
+ CreateEntry(name);
+
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), conflicting_name);
+ ASSERT_FALSE(me.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestDelete) {
+ PathString name = PSTR("peanut butter jelly time");
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry e1(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e1.good());
+ ASSERT_TRUE(e1.Put(IS_DEL, true));
+ MutableEntry e2(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e2.good());
+ ASSERT_TRUE(e2.Put(IS_DEL, true));
+ MutableEntry e3(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e3.good());
+ ASSERT_TRUE(e3.Put(IS_DEL, true));
+
+ ASSERT_TRUE(e3.Put(IS_DEL, false));
+ ASSERT_FALSE(e1.Put(IS_DEL, false));
+ ASSERT_FALSE(e2.Put(IS_DEL, false));
+ ASSERT_TRUE(e3.Put(IS_DEL, true));
+
+ ASSERT_TRUE(e1.Put(IS_DEL, false));
+ ASSERT_FALSE(e2.Put(IS_DEL, false));
+ ASSERT_FALSE(e3.Put(IS_DEL, false));
+ ASSERT_TRUE(e1.Put(IS_DEL, true));
+}
+
+TEST_F(SyncableDirectoryTest, TestGetFullPathNeverCrashes) {
+ PathString dirname = PSTR("honey"),
+ childname = PSTR("jelly");
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry e1(&trans, CREATE, trans.root_id(), dirname);
+ ASSERT_TRUE(e1.good());
+ ASSERT_TRUE(e1.Put(IS_DIR, true));
+ MutableEntry e2(&trans, CREATE, e1.Get(ID), childname);
+ ASSERT_TRUE(e2.good());
+ PathString path = GetFullPath(&trans, e2);
+ ASSERT_FALSE(path.empty());
+ // Give the child a parent that doesn't exist.
+ e2.Put(PARENT_ID, TestIdFactory::FromNumber(42));
+ path = GetFullPath(&trans, e2);
+ ASSERT_TRUE(path.empty());
+ // Done testing, make sure CheckTreeInvariants doesn't choke.
+ e2.Put(PARENT_ID, e1.Get(ID));
+ e2.Put(IS_DEL, true);
+ e1.Put(IS_DEL, true);
+}
+
+TEST_F(SyncableDirectoryTest, TestGetUnsynced) {
+ Directory::UnsyncedMetaHandles handles;
+ int64 handle1, handle2;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(0, handles.size());
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), PSTR("abba"));
+ ASSERT_TRUE(e1.good());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(IS_DIR, true);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+
+ MutableEntry e2(&trans, CREATE, e1.Get(ID), PSTR("bread"));
+ ASSERT_TRUE(e2.good());
+ handle2 = e2.Get(META_HANDLE);
+ e2.Put(BASE_VERSION, 1);
+ e2.Put(ID, TestIdFactory::FromNumber(102));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(0, handles.size());
+
+ MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e3.good());
+ e3.Put(IS_UNSYNCED, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(1, handles.size());
+ ASSERT_TRUE(handle1 == handles[0]);
+
+ MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(e4.good());
+ e4.Put(IS_UNSYNCED, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(2, handles.size());
+ if (handle1 == handles[0]) {
+ ASSERT_TRUE(handle2 == handles[1]);
+ } else {
+ ASSERT_TRUE(handle2 == handles[0]);
+ ASSERT_TRUE(handle1 == handles[1]);
+ }
+
+ MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e5.good());
+ ASSERT_TRUE(e5.Get(IS_UNSYNCED));
+ ASSERT_TRUE(e5.Put(IS_UNSYNCED, false));
+ ASSERT_FALSE(e5.Get(IS_UNSYNCED));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(1, handles.size());
+ ASSERT_TRUE(handle2 == handles[0]);
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestGetUnappliedUpdates) {
+ Directory::UnappliedUpdateMetaHandles handles;
+ int64 handle1, handle2;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(0, handles.size());
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), PSTR("abba"));
+ ASSERT_TRUE(e1.good());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(IS_UNAPPLIED_UPDATE, false);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ e1.Put(IS_DIR, true);
+
+ MutableEntry e2(&trans, CREATE, e1.Get(ID), PSTR("bread"));
+ ASSERT_TRUE(e2.good());
+ handle2 = e2.Get(META_HANDLE);
+ e2.Put(IS_UNAPPLIED_UPDATE, false);
+ e2.Put(BASE_VERSION, 1);
+ e2.Put(ID, TestIdFactory::FromNumber(102));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(0, handles.size());
+
+ MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e3.good());
+ e3.Put(IS_UNAPPLIED_UPDATE, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(1, handles.size());
+ ASSERT_TRUE(handle1 == handles[0]);
+
+ MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(e4.good());
+ e4.Put(IS_UNAPPLIED_UPDATE, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(2, handles.size());
+ if (handle1 == handles[0]) {
+ ASSERT_TRUE(handle2 == handles[1]);
+ } else {
+ ASSERT_TRUE(handle2 == handles[0]);
+ ASSERT_TRUE(handle1 == handles[1]);
+ }
+
+ MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e5.good());
+ e5.Put(IS_UNAPPLIED_UPDATE, false);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(1, handles.size());
+ ASSERT_TRUE(handle2 == handles[0]);
+ }
+}
+
+
+TEST_F(SyncableDirectoryTest, DeleteBug_531383) {
+ // Try to evoke a check failure...
+ TestIdFactory id_factory;
+ int64 grandchild_handle, twin_handle;
+ {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, id_factory.root(), PSTR("Bob"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(ID, id_factory.NewServerId());
+ parent.Put(BASE_VERSION, 1);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(child.good());
+ child.Put(IS_DIR, true);
+ child.Put(ID, id_factory.NewServerId());
+ child.Put(BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, CREATE, child.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(ID, id_factory.NewServerId());
+ grandchild.Put(BASE_VERSION, 1);
+ ASSERT_TRUE(grandchild.Put(IS_DEL, true));
+ MutableEntry twin(&wtrans, CREATE, child.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(twin.good());
+ ASSERT_TRUE(twin.Put(IS_DEL, true));
+ ASSERT_TRUE(grandchild.Put(IS_DEL, false));
+ ASSERT_FALSE(twin.Put(IS_DEL, false));
+ grandchild_handle = grandchild.Get(META_HANDLE);
+ twin_handle = twin.Get(META_HANDLE);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry grandchild(&wtrans, GET_BY_HANDLE, grandchild_handle);
+ grandchild.Put(IS_DEL, true); // Used to CHECK fail here.
+ }
+}
+
+static inline bool IsLegalNewParent(const Entry& a, const Entry& b) {
+ return IsLegalNewParent(a.trans(), a.Get(ID), b.Get(ID));
+}
+
+TEST_F(SyncableDirectoryTest, TestIsLegalNewParent) {
+ TestIdFactory id_factory;
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ Entry root(&wtrans, GET_BY_ID, id_factory.root());
+ ASSERT_TRUE(root.good());
+ MutableEntry parent(&wtrans, CREATE, root.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(ID, id_factory.NewServerId());
+ parent.Put(BASE_VERSION, 1);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(child.good());
+ child.Put(IS_DIR, true);
+ child.Put(ID, id_factory.NewServerId());
+ child.Put(BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, CREATE, child.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(ID, id_factory.NewServerId());
+ grandchild.Put(BASE_VERSION, 1);
+
+ MutableEntry parent2(&wtrans, CREATE, root.Get(ID), PSTR("Pete"));
+ ASSERT_TRUE(parent2.good());
+ parent2.Put(IS_DIR, true);
+ parent2.Put(ID, id_factory.NewServerId());
+ parent2.Put(BASE_VERSION, 1);
+ MutableEntry child2(&wtrans, CREATE, parent2.Get(ID), PSTR("Pete"));
+ ASSERT_TRUE(child2.good());
+ child2.Put(IS_DIR, true);
+ child2.Put(ID, id_factory.NewServerId());
+ child2.Put(BASE_VERSION, 1);
+ MutableEntry grandchild2(&wtrans, CREATE, child2.Get(ID), PSTR("Pete"));
+ ASSERT_TRUE(grandchild2.good());
+ grandchild2.Put(ID, id_factory.NewServerId());
+ grandchild2.Put(BASE_VERSION, 1);
+ // resulting tree
+ // root
+ // / \
+ // parent parent2
+ // | |
+ // child child2
+ // | |
+ // grandchild grandchild2
+ ASSERT_TRUE(IsLegalNewParent(child, root));
+ ASSERT_TRUE(IsLegalNewParent(child, parent));
+ ASSERT_FALSE(IsLegalNewParent(child, child));
+ ASSERT_FALSE(IsLegalNewParent(child, grandchild));
+ ASSERT_TRUE(IsLegalNewParent(child, parent2));
+ ASSERT_TRUE(IsLegalNewParent(child, grandchild2));
+ ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
+ ASSERT_FALSE(IsLegalNewParent(root, grandchild));
+ ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
+}
+
+TEST_F(SyncableDirectoryTest, TestFindEntryInFolder) {
+ // Create a subdir and an entry.
+ int64 entry_handle;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("folder"));
+ ASSERT_TRUE(folder.good());
+ EXPECT_TRUE(folder.Put(IS_DIR, true));
+ EXPECT_TRUE(folder.Put(IS_UNSYNCED, true));
+ MutableEntry entry(&trans, CREATE, folder.Get(ID), PSTR("entry"));
+ ASSERT_TRUE(entry.good());
+ entry_handle = entry.Get(META_HANDLE);
+ entry.Put(IS_UNSYNCED, true);
+ }
+
+ // Make sure we can find the entry in the folder.
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PathString(kPathSeparator) +
+ PSTR("folder") +
+ kPathSeparator + PSTR("entry"));
+ ASSERT_TRUE(entry.good());
+ ASSERT_EQ(entry.Get(META_HANDLE), entry_handle);
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestGetByParentIdAndName) {
+ PathString name = PSTR("Bob");
+ Id id = TestIdFactory::MakeServer("ID for Bob");
+ {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, CREATE, wtrans.root_id() /*entry id*/, name);
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_DIR, true);
+ entry.Put(ID, id);
+ entry.Put(BASE_VERSION, 1);
+ entry.Put(IS_UNSYNCED, true);
+ }
+ {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, GET_BY_PARENTID_AND_NAME, wtrans.root_id(),
+ name);
+ ASSERT_TRUE(entry.good());
+ ASSERT_EQ(id, entry.Get(ID));
+ }
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), name);
+ ASSERT_TRUE(entry.good());
+ ASSERT_EQ(id, entry.Get(ID));
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestParentIDIndexUpdate) {
+ WriteTransaction wt(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&wt, CREATE, wt.root_id(), PSTR("oldname"));
+ folder.Put(NAME, PSTR("newname"));
+ folder.Put(IS_UNSYNCED, true);
+ Entry entry(&wt, GET_BY_PATH, PSTR("newname"));
+ ASSERT_TRUE(entry.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestNoReindexDeletedItems) {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("folder"));
+ ASSERT_TRUE(folder.good());
+ ASSERT_TRUE(folder.Put(IS_DIR, true));
+ ASSERT_TRUE(folder.Put(IS_DEL, true));
+ Entry gone(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("folder"));
+ ASSERT_FALSE(gone.good());
+ ASSERT_TRUE(folder.PutParentIdAndName(trans.root_id(),
+ Name(PSTR("new_name"))));
+}
+
+TEST_F(SyncableDirectoryTest, TestCaseChangeRename) {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("CaseChange"));
+ ASSERT_TRUE(folder.good());
+ EXPECT_TRUE(folder.PutParentIdAndName(trans.root_id(),
+ Name(PSTR("CASECHANGE"))));
+ EXPECT_TRUE(folder.Put(IS_DEL, true));
+}
+
+TEST_F(SyncableDirectoryTest, TestShareInfo) {
+ dir_->set_last_sync_timestamp(100);
+ dir_->set_store_birthday("Jan 31st");
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ EXPECT_EQ(100, dir_->last_sync_timestamp());
+ EXPECT_EQ("Jan 31st", dir_->store_birthday());
+ }
+ dir_->set_last_sync_timestamp(200);
+ dir_->set_store_birthday("April 10th");
+ dir_->SaveChanges();
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ EXPECT_EQ(200, dir_->last_sync_timestamp());
+ EXPECT_EQ("April 10th", dir_->store_birthday());
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestSimpleFieldsPreservedDuringSaveChanges) {
+ Id id = TestIdFactory::FromNumber(1);
+ EntryKernel create_pre_save, update_pre_save;
+ EntryKernel create_post_save, update_post_save;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry create(&trans, CREATE, trans.root_id(), PSTR("Create"));
+ MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, id);
+ create.Put(IS_UNSYNCED, true);
+ update.Put(IS_UNAPPLIED_UPDATE, true);
+ create_pre_save = create.GetKernelCopy();
+ update_pre_save = update.GetKernelCopy();
+ }
+ dir_->SaveChanges();
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry create(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(),
+ PSTR("Create"));
+ Entry update(&trans, GET_BY_ID, id);
+ create_post_save = create.GetKernelCopy();
+ update_post_save = update.GetKernelCopy();
+ }
+ int i = BEGIN_FIELDS;
+ for ( ; i < INT64_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((Int64Field)i),
+ create_post_save.ref((Int64Field)i))
+ << "int64 field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((Int64Field)i),
+ update_post_save.ref((Int64Field)i))
+ << "int64 field #" << i << " changed during save/load";
+ }
+ for ( ; i < ID_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((IdField)i),
+ create_post_save.ref((IdField)i))
+ << "id field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((IdField)i),
+ update_pre_save.ref((IdField)i))
+ << "id field #" << i << " changed during save/load";
+ }
+ for ( ; i < BIT_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((BitField)i),
+ create_post_save.ref((BitField)i))
+ << "Bit field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((BitField)i),
+ update_post_save.ref((BitField)i))
+ << "Bit field #" << i << " changed during save/load";
+ }
+ for ( ; i < STRING_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((StringField)i),
+ create_post_save.ref((StringField)i))
+ << "String field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((StringField)i),
+ update_post_save.ref((StringField)i))
+ << "String field #" << i << " changed during save/load";
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestSaveChangesFailure) {
+ int64 handle1 = 0;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), PSTR("aguilera"));
+ ASSERT_TRUE(e1.good());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(IS_DIR, true);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ }
+ ASSERT_TRUE(dir_->SaveChanges());
+
+ dir_.reset(new TestUnsaveableDirectory());
+ ASSERT_TRUE(dir_.get());
+ ASSERT_EQ(OPENED, dir_->Open(kFilePath, kName));
+ ASSERT_TRUE(dir_->good());
+ int64 handle2 = 0;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(aguilera.good());
+ aguilera.Put(NAME, PSTR("christina"));
+ ASSERT_TRUE(aguilera.GetKernelCopy().dirty[NAME]);
+
+ MutableEntry kids_on_block(&trans, CREATE, trans.root_id(), PSTR("kids"));
+ ASSERT_TRUE(kids_on_block.good());
+ handle2 = kids_on_block.Get(META_HANDLE);
+ kids_on_block.Put(BASE_VERSION, 1);
+ kids_on_block.Put(IS_DIR, true);
+ kids_on_block.Put(ID, TestIdFactory::FromNumber(102));
+ EXPECT_TRUE(kids_on_block.Get(IS_NEW));
+ }
+
+ // We are using an unsaveable directory, so this can't succeed. However,
+ // the HandleSaveChangesFailure code path should have been triggered.
+ ASSERT_FALSE(dir_->SaveChanges());
+
+ // Make sure things were rolled back and the world is as it was before call.
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry e1(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e1.good());
+ const EntryKernel& aguilera = e1.GetKernelCopy();
+ Entry kids_on_block(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(kids_on_block.good());
+
+ EXPECT_TRUE(aguilera.dirty[NAME]);
+ EXPECT_TRUE(kids_on_block.Get(IS_NEW));
+ }
+}
+
+
+void SyncableDirectoryTest::ValidateEntry(BaseTransaction *trans, int64 id,
+ bool check_name, PathString name, int64 base_version, int64 server_version,
+ bool is_del) {
+ Entry e(trans, GET_BY_ID, TestIdFactory::FromNumber(id));
+ ASSERT_TRUE(e.good());
+ if (check_name)
+ ASSERT_EQ(name, e.Get(NAME));
+ ASSERT_EQ(base_version, e.Get(BASE_VERSION));
+ ASSERT_EQ(server_version, e.Get(SERVER_VERSION));
+ ASSERT_EQ(is_del, e.Get(IS_DEL));
+}
+
+TEST(SyncableDirectoryManager, TestFileRelease) {
+ DirectoryManager dm(PSTR("."));
+ ASSERT_TRUE(dm.Open(PSTR("ScopeTest")));
+ {
+ ScopedDirLookup(&dm, PSTR("ScopeTest"));
+ }
+ dm.Close(PSTR("ScopeTest"));
+ ASSERT_EQ(0, PathRemove(dm.GetSyncDataDatabasePath()));
+}
+
+static void* OpenTestThreadMain(void* arg) {
+ DirectoryManager* const dm = reinterpret_cast<DirectoryManager*>(arg);
+ CHECK(dm->Open(PSTR("Open")));
+ return 0;
+}
+
+TEST(SyncableDirectoryManager, ThreadOpenTest) {
+ DirectoryManager dm(PSTR("."));
+ pthread_t thread;
+ ASSERT_EQ(0, pthread_create(&thread, 0, OpenTestThreadMain, &dm));
+ void* result;
+ ASSERT_EQ(0, pthread_join(thread, &result));
+ {
+ ScopedDirLookup dir(&dm, PSTR("Open"));
+ ASSERT_TRUE(dir.good());
+ }
+ dm.Close(PSTR("Open"));
+ ScopedDirLookup dir(&dm, PSTR("Open"));
+ ASSERT_FALSE(dir.good());
+}
+
+namespace ThreadBug1 {
+ struct Step {
+ PThreadMutex mutex;
+ PThreadCondVar condvar;
+ int number;
+ int64 metahandle;
+ };
+ struct ThreadArg {
+ int role; // 0 or 1, meaning this thread does the odd or event steps.
+ Step* step;
+ DirectoryManager* dirman;
+ };
+
+ void* ThreadMain(void* arg) {
+ ThreadArg* const args = reinterpret_cast<ThreadArg*>(arg);
+ const int role = args->role;
+ Step* const step = args->step;
+ DirectoryManager* const dirman = args->dirman;
+ const PathString dirname = PSTR("ThreadBug1");
+ PThreadScopedLock<PThreadMutex> lock(&step->mutex);
+ while (step->number < 3) {
+ while (step->number % 2 != role)
+ pthread_cond_wait(&step->condvar.condvar_, &step->mutex.mutex_);
+ switch (step->number) {
+ case 0:
+ dirman->Open(dirname);
+ break;
+ case 1:
+ {
+ dirman->Close(dirname);
+ dirman->Open(dirname);
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&trans, CREATE, trans.root_id(), PSTR("Jeff"));
+ step->metahandle = me.Get(META_HANDLE);
+ me.Put(IS_UNSYNCED, true);
+ }
+ break;
+ case 2:
+ {
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry e(&trans, GET_BY_HANDLE, step->metahandle);
+ CHECK(e.good()); // Failed due to ThreadBug1
+ }
+ dirman->Close(dirname);
+ break;
+ }
+ step->number += 1;
+ pthread_cond_signal(&step->condvar.condvar_);
+ }
+ return 0;
+ }
+}
+
+TEST(SyncableDirectoryManager, ThreadBug1) {
+ using ThreadBug1::Step;
+ using ThreadBug1::ThreadArg;
+ using ThreadBug1::ThreadMain;
+
+ Step step;
+ step.number = 0;
+ DirectoryManager dirman(PSTR("."));
+ ThreadArg arg1 = { 0, &step, &dirman };
+ ThreadArg arg2 = { 1, &step, &dirman };
+ pthread_t thread1, thread2;
+ ASSERT_EQ(0, pthread_create(&thread1, NULL, &ThreadMain, &arg1));
+ ASSERT_EQ(0, pthread_create(&thread2, NULL, &ThreadMain, &arg2));
+ void* retval;
+ ASSERT_EQ(0, pthread_join(thread1, &retval));
+ ASSERT_EQ(0, pthread_join(thread2, &retval));
+}
+
+namespace DirectoryKernelStalenessBug {
+ // The in-memory information would get out of sync because a
+ // directory would be closed and re-opened, and then an old
+ // Directory::Kernel with stale information would get saved to the db.
+ typedef ThreadBug1::Step Step;
+ typedef ThreadBug1::ThreadArg ThreadArg;
+
+ void* ThreadMain(void* arg) {
+ const char test_bytes[] = "test data";
+ ThreadArg* const args = reinterpret_cast<ThreadArg*>(arg);
+ const int role = args->role;
+ Step* const step = args->step;
+ DirectoryManager* const dirman = args->dirman;
+ const PathString dirname = PSTR("DirectoryKernelStalenessBug");
+ PThreadScopedLock<PThreadMutex> lock(&step->mutex);
+ while (step->number < 4) {
+ while (step->number % 2 != role)
+ pthread_cond_wait(&step->condvar.condvar_, &step->mutex.mutex_);
+ switch (step->number) {
+ case 0:
+ {
+ // Clean up remnants of earlier test runs.
+ PathRemove(dirman->GetSyncDataDatabasePath());
+ // Test.
+ dirman->Open(dirname);
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&trans, CREATE, trans.root_id(), PSTR("Jeff"));
+ me.Put(BASE_VERSION, 1);
+ me.Put(ID, TestIdFactory::FromNumber(100));
+ PutDataAsExtendedAttribute(&trans, &me, test_bytes,
+ sizeof(test_bytes));
+ }
+ {
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ dir->SaveChanges();
+ }
+ dirman->CloseAllDirectories();
+ break;
+ case 1:
+ {
+ dirman->Open(dirname);
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ }
+ break;
+ case 2:
+ {
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ }
+ break;
+ case 3:
+ {
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry e(&trans, GET_BY_PATH, PSTR("Jeff"));
+ ExpectDataFromExtendedAttributeEquals(&trans, &e, test_bytes,
+ sizeof(test_bytes));
+ }
+ // Same result as CloseAllDirectories, but more code coverage.
+ dirman->Close(dirname);
+ break;
+ }
+ step->number += 1;
+ pthread_cond_signal(&step->condvar.condvar_);
+ }
+ return 0;
+ }
+}
+
+TEST(SyncableDirectoryManager, DirectoryKernelStalenessBug) {
+ using DirectoryKernelStalenessBug::Step;
+ using DirectoryKernelStalenessBug::ThreadArg;
+ using DirectoryKernelStalenessBug::ThreadMain;
+
+ Step step;
+ step.number = 0;
+ DirectoryManager dirman(PSTR("."));
+ ThreadArg arg1 = { 0, &step, &dirman };
+ ThreadArg arg2 = { 1, &step, &dirman };
+ pthread_t thread1, thread2;
+ ASSERT_EQ(0, pthread_create(&thread1, NULL, &ThreadMain, &arg1));
+ ASSERT_EQ(0, pthread_create(&thread2, NULL, &ThreadMain, &arg2));
+ void* retval;
+ ASSERT_EQ(0, pthread_join(thread1, &retval));
+ ASSERT_EQ(0, pthread_join(thread2, &retval));
+}
+
+timespec operator + (const timespec& a, const timespec& b) {
+ const long nanos = a.tv_nsec + b.tv_nsec;
+ static const long nanos_per_second = 1000000000;
+ timespec r = { a.tv_sec + b.tv_sec + (nanos / nanos_per_second),
+ nanos % nanos_per_second };
+ return r;
+}
+
+void SleepMs(int milliseconds) {
+#ifdef OS_WINDOWS
+ Sleep(milliseconds);
+#else
+ usleep(milliseconds * 1000);
+#endif
+}
+
+namespace StressTransaction {
+ struct Globals {
+ DirectoryManager* dirman;
+ PathString dirname;
+ };
+
+ struct ThreadArg {
+ Globals* globals;
+ int thread_number;
+ };
+
+ void* ThreadMain(void* arg) {
+ ThreadArg* const args = reinterpret_cast<ThreadArg*>(arg);
+ Globals* const globals = args->globals;
+ ScopedDirLookup dir(globals->dirman, globals->dirname);
+ CHECK(dir.good());
+ int entry_count = 0;
+ PathString path_name;
+ for (int i = 0; i < 20; ++i) {
+ const int rand_action = rand() % 10;
+ if (rand_action < 4 && !path_name.empty()) {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry e(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), path_name);
+ SleepMs(rand() % 10);
+ CHECK(e.good());
+ } else {
+ string unique_name = StringPrintf("%d.%d", args->thread_number,
+ entry_count++);
+ path_name.assign(unique_name.begin(), unique_name.end());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, CREATE, trans.root_id(), path_name);
+ CHECK(e.good());
+ SleepMs(rand() % 20);
+ e.Put(IS_UNSYNCED, true);
+ if (e.Put(ID, TestIdFactory::FromNumber(rand())) &&
+ e.Get(ID).ServerKnows() && !e.Get(ID).IsRoot())
+ e.Put(BASE_VERSION, 1);
+ }
+ }
+ return 0;
+ }
+}
+
+TEST(SyncableDirectory, StressTransactions) {
+ using StressTransaction::Globals;
+ using StressTransaction::ThreadArg;
+ using StressTransaction::ThreadMain;
+
+ DirectoryManager dirman(PSTR("."));
+ Globals globals;
+ globals.dirname = PSTR("stress");
+ globals.dirman = &dirman;
+ PathRemove(dirman.GetSyncDataDatabasePath());
+ dirman.Open(globals.dirname);
+ const int kThreadCount = 7;
+ pthread_t threads[kThreadCount];
+ ThreadArg thread_args[kThreadCount];
+ for (int i = 0; i < kThreadCount; ++i) {
+ thread_args[i].thread_number = i;
+ thread_args[i].globals = &globals;
+ ASSERT_EQ(0, pthread_create(threads + i, NULL, &ThreadMain,
+ thread_args + i));
+ }
+ void* retval;
+ for (pthread_t* i = threads; i < threads + kThreadCount; ++i)
+ ASSERT_EQ(0, pthread_join(*i, &retval));
+ dirman.Close(globals.dirname);
+ PathRemove(dirman.GetSyncDataDatabasePath());
+}
+
+static PathString UTF8ToPathStringQuick(const char *str) {
+ PathString ret;
+ CHECK(browser_sync::UTF8ToPathString(str, strlen(str), &ret));
+ return ret;
+}
+
+// returns number of chars used. max possible is 4
+// This algorithm was coded from the table at
+// http://en.wikipedia.org/w/index.php?title=UTF-8&oldid=153391259
+// there are no endian issues.
+static int UTF32ToUTF8(uint32 incode, unsigned char *out) {
+ if (incode <= 0x7f) {
+ out[0] = incode;
+ return 1;
+ }
+ if (incode <= 0x7ff) {
+ out[0] = 0xC0;
+ out[0] |= (incode >> 6);
+ out[1] = 0x80;
+ out[1] |= (incode & 0x3F);
+ return 2;
+ }
+ if (incode <= 0xFFFF) {
+ if ((incode > 0xD7FF) && (incode < 0xE000))
+ return 0;
+ out[0] = 0xE0;
+ out[0] |= (incode >> 12);
+ out[1] = 0x80;
+ out[1] |= (incode >> 6) & 0x3F;
+ out[2] = 0x80;
+ out[2] |= incode & 0x3F;
+ return 3;
+ }
+ if (incode <= 0x10FFFF) {
+ out[0] = 0xF0;
+ out[0] |= incode >> 18;
+ out[1] = 0x80;
+ out[1] |= (incode >> 12) & 0x3F;
+ out[2] = 0x80;
+ out[2] |= (incode >> 6) & 0x3F;
+ out[3] = 0x80;
+ out[3] |= incode & 0x3F;
+ return 4;
+ }
+ return 0;
+}
+
+TEST(Syncable, ComparePathNames) {
+ struct {
+ char a;
+ char b;
+ int expected_result;
+ } tests[] = {
+ { 'A', 'A', 0 },
+ { 'A', 'a', 0 },
+ { 'a', 'A', 0 },
+ { 'a', 'a', 0 },
+ { 'A', 'B', -1 },
+ { 'A', 'b', -1 },
+ { 'a', 'B', -1 },
+ { 'a', 'b', -1 },
+ { 'B', 'A', 1 },
+ { 'B', 'a', 1 },
+ { 'b', 'A', 1 },
+ { 'b', 'a', 1 } };
+ for (int i = 0; i < ARRAYSIZE(tests); ++i) {
+ PathString a(1, tests[i].a);
+ PathString b(1, tests[i].b);
+ const int result = ComparePathNames(a, b);
+ if (result != tests[i].expected_result) {
+ ADD_FAILURE() << "ComparePathNames(" << tests[i].a << ", " << tests[i].b
+ << ") returned " << result << "; expected "
+ << tests[i].expected_result;
+ }
+ }
+
+#ifndef OS_WINDOWS
+ // This table lists (to the best of my knowledge) every pair of characters
+ // in unicode such that:
+ // for all i: tolower(kUpperToLowerMap[i].upper) = kUpperToLowerMap[i].lower
+ // This is then used to test that case-insensitive comparison of each pair
+ // returns 0 (that, that they are equal). After running the test on Mac OS X
+ // with the CFString API for comparision, the failing cases were commented
+ // out.
+ //
+ // Map of upper to lower case characters taken from
+ // ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
+ typedef struct {
+ uint32 upper; // the upper case character
+ uint32 lower; // the lower case character that upper maps to
+ } UpperToLowerMapEntry;
+ static const UpperToLowerMapEntry kUpperToLowerMap[] = {
+ // { UPPER, lower }, { UPPER, lower }, etc...
+ // some of these are commented out because they fail on some OS.
+ { 0x00041, 0x00061 }, { 0x00042, 0x00062 }, { 0x00043, 0x00063 },
+ { 0x00044, 0x00064 }, { 0x00045, 0x00065 }, { 0x00046, 0x00066 },
+ { 0x00047, 0x00067 }, { 0x00048, 0x00068 }, { 0x00049, 0x00069 },
+ { 0x0004A, 0x0006A }, { 0x0004B, 0x0006B }, { 0x0004C, 0x0006C },
+ { 0x0004D, 0x0006D }, { 0x0004E, 0x0006E }, { 0x0004F, 0x0006F },
+ { 0x00050, 0x00070 }, { 0x00051, 0x00071 }, { 0x00052, 0x00072 },
+ { 0x00053, 0x00073 }, { 0x00054, 0x00074 }, { 0x00055, 0x00075 },
+ { 0x00056, 0x00076 }, { 0x00057, 0x00077 }, { 0x00058, 0x00078 },
+ { 0x00059, 0x00079 }, { 0x0005A, 0x0007A }, { 0x000C0, 0x000E0 },
+ { 0x000C1, 0x000E1 }, { 0x000C2, 0x000E2 }, { 0x000C3, 0x000E3 },
+ { 0x000C4, 0x000E4 }, { 0x000C5, 0x000E5 }, { 0x000C6, 0x000E6 },
+ { 0x000C7, 0x000E7 }, { 0x000C8, 0x000E8 }, { 0x000C9, 0x000E9 },
+ { 0x000CA, 0x000EA }, { 0x000CB, 0x000EB }, { 0x000CC, 0x000EC },
+ { 0x000CD, 0x000ED }, { 0x000CE, 0x000EE }, { 0x000CF, 0x000EF },
+ { 0x000D0, 0x000F0 }, { 0x000D1, 0x000F1 }, { 0x000D2, 0x000F2 },
+ { 0x000D3, 0x000F3 }, { 0x000D4, 0x000F4 }, { 0x000D5, 0x000F5 },
+ { 0x000D6, 0x000F6 }, { 0x000D8, 0x000F8 }, { 0x000D9, 0x000F9 },
+ { 0x000DA, 0x000FA }, { 0x000DB, 0x000FB }, { 0x000DC, 0x000FC },
+ { 0x000DD, 0x000FD }, { 0x000DE, 0x000FE },
+ { 0x00100, 0x00101 }, { 0x00102, 0x00103 }, { 0x00104, 0x00105 },
+ { 0x00106, 0x00107 }, { 0x00108, 0x00109 }, { 0x0010A, 0x0010B },
+ { 0x0010C, 0x0010D }, { 0x0010E, 0x0010F }, { 0x00110, 0x00111 },
+ { 0x00112, 0x00113 }, { 0x00114, 0x00115 }, { 0x00116, 0x00117 },
+ { 0x00118, 0x00119 }, { 0x0011A, 0x0011B }, { 0x0011C, 0x0011D },
+ { 0x0011E, 0x0011F }, { 0x00120, 0x00121 }, { 0x00122, 0x00123 },
+ { 0x00124, 0x00125 }, { 0x00126, 0x00127 }, { 0x00128, 0x00129 },
+ { 0x0012A, 0x0012B }, { 0x0012C, 0x0012D }, { 0x0012E, 0x0012F },
+ /*{ 0x00130, 0x00069 },*/ { 0x00132, 0x00133 }, { 0x00134, 0x00135 },
+ { 0x00136, 0x00137 }, { 0x00139, 0x0013A }, { 0x0013B, 0x0013C },
+ { 0x0013D, 0x0013E }, { 0x0013F, 0x00140 }, { 0x00141, 0x00142 },
+ { 0x00143, 0x00144 }, { 0x00145, 0x00146 }, { 0x00147, 0x00148 },
+ { 0x0014A, 0x0014B }, { 0x0014C, 0x0014D }, { 0x0014E, 0x0014F },
+ { 0x00150, 0x00151 }, { 0x00152, 0x00153 }, { 0x00154, 0x00155 },
+ { 0x00156, 0x00157 }, { 0x00158, 0x00159 }, { 0x0015A, 0x0015B },
+ { 0x0015C, 0x0015D }, { 0x0015E, 0x0015F }, { 0x00160, 0x00161 },
+ { 0x00162, 0x00163 }, { 0x00164, 0x00165 }, { 0x00166, 0x00167 },
+ { 0x00168, 0x00169 }, { 0x0016A, 0x0016B }, { 0x0016C, 0x0016D },
+ { 0x0016E, 0x0016F }, { 0x00170, 0x00171 }, { 0x00172, 0x00173 },
+ { 0x00174, 0x00175 }, { 0x00176, 0x00177 }, { 0x00178, 0x000FF },
+ { 0x00179, 0x0017A }, { 0x0017B, 0x0017C }, { 0x0017D, 0x0017E },
+ { 0x00181, 0x00253 }, { 0x00182, 0x00183 }, { 0x00184, 0x00185 },
+ { 0x00186, 0x00254 }, { 0x00187, 0x00188 }, { 0x00189, 0x00256 },
+ { 0x0018A, 0x00257 }, { 0x0018B, 0x0018C }, { 0x0018E, 0x001DD },
+ { 0x0018F, 0x00259 }, { 0x00190, 0x0025B }, { 0x00191, 0x00192 },
+ { 0x00193, 0x00260 }, { 0x00194, 0x00263 }, { 0x00196, 0x00269 },
+ { 0x00197, 0x00268 }, { 0x00198, 0x00199 }, { 0x0019C, 0x0026F },
+ { 0x0019D, 0x00272 }, { 0x0019F, 0x00275 }, { 0x001A0, 0x001A1 },
+ { 0x001A2, 0x001A3 }, { 0x001A4, 0x001A5 }, { 0x001A6, 0x00280 },
+ { 0x001A7, 0x001A8 }, { 0x001A9, 0x00283 }, { 0x001AC, 0x001AD },
+ { 0x001AE, 0x00288 }, { 0x001AF, 0x001B0 }, { 0x001B1, 0x0028A },
+ { 0x001B2, 0x0028B }, { 0x001B3, 0x001B4 }, { 0x001B5, 0x001B6 },
+ { 0x001B7, 0x00292 }, { 0x001B8, 0x001B9 }, { 0x001BC, 0x001BD },
+ { 0x001C4, 0x001C6 }, { 0x001C7, 0x001C9 }, { 0x001CA, 0x001CC },
+ { 0x001CD, 0x001CE }, { 0x001CF, 0x001D0 }, { 0x001D1, 0x001D2 },
+ { 0x001D3, 0x001D4 }, { 0x001D5, 0x001D6 }, { 0x001D7, 0x001D8 },
+ { 0x001D9, 0x001DA }, { 0x001DB, 0x001DC }, { 0x001DE, 0x001DF },
+ { 0x001E0, 0x001E1 }, { 0x001E2, 0x001E3 }, { 0x001E4, 0x001E5 },
+ { 0x001E6, 0x001E7 }, { 0x001E8, 0x001E9 }, { 0x001EA, 0x001EB },
+ { 0x001EC, 0x001ED }, { 0x001EE, 0x001EF }, { 0x001F1, 0x001F3 },
+ { 0x001F4, 0x001F5 }, { 0x001F6, 0x00195 }, { 0x001F7, 0x001BF },
+ { 0x001F8, 0x001F9 }, { 0x001FA, 0x001FB }, { 0x001FC, 0x001FD },
+ { 0x001FE, 0x001FF }, { 0x00200, 0x00201 }, { 0x00202, 0x00203 },
+ { 0x00204, 0x00205 }, { 0x00206, 0x00207 }, { 0x00208, 0x00209 },
+ { 0x0020A, 0x0020B }, { 0x0020C, 0x0020D }, { 0x0020E, 0x0020F },
+ { 0x00210, 0x00211 }, { 0x00212, 0x00213 }, { 0x00214, 0x00215 },
+ { 0x00216, 0x00217 }, { 0x00218, 0x00219 }, { 0x0021A, 0x0021B },
+ { 0x0021C, 0x0021D }, { 0x0021E, 0x0021F }, { 0x00220, 0x0019E },
+ { 0x00222, 0x00223 }, { 0x00224, 0x00225 }, { 0x00226, 0x00227 },
+ { 0x00228, 0x00229 }, { 0x0022A, 0x0022B }, { 0x0022C, 0x0022D },
+ { 0x0022E, 0x0022F }, { 0x00230, 0x00231 }, { 0x00232, 0x00233 },
+ /*{ 0x0023B, 0x0023C }, { 0x0023D, 0x0019A }, { 0x00241, 0x00294 }, */
+ { 0x00386, 0x003AC }, { 0x00388, 0x003AD }, { 0x00389, 0x003AE },
+ { 0x0038A, 0x003AF }, { 0x0038C, 0x003CC }, { 0x0038E, 0x003CD },
+ { 0x0038F, 0x003CE }, { 0x00391, 0x003B1 }, { 0x00392, 0x003B2 },
+ { 0x00393, 0x003B3 }, { 0x00394, 0x003B4 }, { 0x00395, 0x003B5 },
+ { 0x00396, 0x003B6 }, { 0x00397, 0x003B7 }, { 0x00398, 0x003B8 },
+ { 0x00399, 0x003B9 }, { 0x0039A, 0x003BA }, { 0x0039B, 0x003BB },
+ { 0x0039C, 0x003BC }, { 0x0039D, 0x003BD }, { 0x0039E, 0x003BE },
+ { 0x0039F, 0x003BF }, { 0x003A0, 0x003C0 }, { 0x003A1, 0x003C1 },
+ { 0x003A3, 0x003C3 }, { 0x003A4, 0x003C4 }, { 0x003A5, 0x003C5 },
+ { 0x003A6, 0x003C6 }, { 0x003A7, 0x003C7 }, { 0x003A8, 0x003C8 },
+ { 0x003A9, 0x003C9 }, { 0x003AA, 0x003CA }, { 0x003AB, 0x003CB },
+ { 0x003D8, 0x003D9 }, { 0x003DA, 0x003DB }, { 0x003DC, 0x003DD },
+ { 0x003DE, 0x003DF }, { 0x003E0, 0x003E1 }, { 0x003E2, 0x003E3 },
+ { 0x003E4, 0x003E5 }, { 0x003E6, 0x003E7 }, { 0x003E8, 0x003E9 },
+ { 0x003EA, 0x003EB }, { 0x003EC, 0x003ED }, { 0x003EE, 0x003EF },
+ { 0x003F4, 0x003B8 }, { 0x003F7, 0x003F8 }, { 0x003F9, 0x003F2 },
+ { 0x003FA, 0x003FB }, { 0x00400, 0x00450 }, { 0x00401, 0x00451 },
+ { 0x00402, 0x00452 }, { 0x00403, 0x00453 }, { 0x00404, 0x00454 },
+ { 0x00405, 0x00455 }, { 0x00406, 0x00456 }, { 0x00407, 0x00457 },
+ { 0x00408, 0x00458 }, { 0x00409, 0x00459 }, { 0x0040A, 0x0045A },
+ { 0x0040B, 0x0045B }, { 0x0040C, 0x0045C }, { 0x0040D, 0x0045D },
+ { 0x0040E, 0x0045E }, { 0x0040F, 0x0045F }, { 0x00410, 0x00430 },
+ { 0x00411, 0x00431 }, { 0x00412, 0x00432 }, { 0x00413, 0x00433 },
+ { 0x00414, 0x00434 }, { 0x00415, 0x00435 }, { 0x00416, 0x00436 },
+ { 0x00417, 0x00437 }, { 0x00418, 0x00438 }, { 0x00419, 0x00439 },
+ { 0x0041A, 0x0043A }, { 0x0041B, 0x0043B }, { 0x0041C, 0x0043C },
+ { 0x0041D, 0x0043D }, { 0x0041E, 0x0043E }, { 0x0041F, 0x0043F },
+ { 0x00420, 0x00440 }, { 0x00421, 0x00441 }, { 0x00422, 0x00442 },
+ { 0x00423, 0x00443 }, { 0x00424, 0x00444 }, { 0x00425, 0x00445 },
+ { 0x00426, 0x00446 }, { 0x00427, 0x00447 }, { 0x00428, 0x00448 },
+ { 0x00429, 0x00449 }, { 0x0042A, 0x0044A }, { 0x0042B, 0x0044B },
+ { 0x0042C, 0x0044C }, { 0x0042D, 0x0044D }, { 0x0042E, 0x0044E },
+ { 0x0042F, 0x0044F }, { 0x00460, 0x00461 }, { 0x00462, 0x00463 },
+ { 0x00464, 0x00465 }, { 0x00466, 0x00467 }, { 0x00468, 0x00469 },
+ { 0x0046A, 0x0046B }, { 0x0046C, 0x0046D }, { 0x0046E, 0x0046F },
+ { 0x00470, 0x00471 }, { 0x00472, 0x00473 }, { 0x00474, 0x00475 },
+ { 0x00476, 0x00477 }, { 0x00478, 0x00479 }, { 0x0047A, 0x0047B },
+ { 0x0047C, 0x0047D }, { 0x0047E, 0x0047F }, { 0x00480, 0x00481 },
+ { 0x0048A, 0x0048B }, { 0x0048C, 0x0048D }, { 0x0048E, 0x0048F },
+ { 0x00490, 0x00491 }, { 0x00492, 0x00493 }, { 0x00494, 0x00495 },
+ { 0x00496, 0x00497 }, { 0x00498, 0x00499 }, { 0x0049A, 0x0049B },
+ { 0x0049C, 0x0049D }, { 0x0049E, 0x0049F }, { 0x004A0, 0x004A1 },
+ { 0x004A2, 0x004A3 }, { 0x004A4, 0x004A5 }, { 0x004A6, 0x004A7 },
+ { 0x004A8, 0x004A9 }, { 0x004AA, 0x004AB }, { 0x004AC, 0x004AD },
+ { 0x004AE, 0x004AF }, { 0x004B0, 0x004B1 }, { 0x004B2, 0x004B3 },
+ { 0x004B4, 0x004B5 }, { 0x004B6, 0x004B7 }, { 0x004B8, 0x004B9 },
+ { 0x004BA, 0x004BB }, { 0x004BC, 0x004BD }, { 0x004BE, 0x004BF },
+ { 0x004C1, 0x004C2 }, { 0x004C3, 0x004C4 }, { 0x004C5, 0x004C6 },
+ { 0x004C7, 0x004C8 }, { 0x004C9, 0x004CA }, { 0x004CB, 0x004CC },
+ { 0x004CD, 0x004CE }, { 0x004D0, 0x004D1 }, { 0x004D2, 0x004D3 },
+ { 0x004D4, 0x004D5 }, { 0x004D6, 0x004D7 }, { 0x004D8, 0x004D9 },
+ { 0x004DA, 0x004DB }, { 0x004DC, 0x004DD }, { 0x004DE, 0x004DF },
+ { 0x004E0, 0x004E1 }, { 0x004E2, 0x004E3 }, { 0x004E4, 0x004E5 },
+ { 0x004E6, 0x004E7 }, { 0x004E8, 0x004E9 }, { 0x004EA, 0x004EB },
+ { 0x004EC, 0x004ED }, { 0x004EE, 0x004EF }, { 0x004F0, 0x004F1 },
+ { 0x004F2, 0x004F3 }, { 0x004F4, 0x004F5 }, /*{ 0x004F6, 0x004F7 }, */
+ { 0x004F8, 0x004F9 }, { 0x00500, 0x00501 }, { 0x00502, 0x00503 },
+ { 0x00504, 0x00505 }, { 0x00506, 0x00507 }, { 0x00508, 0x00509 },
+ { 0x0050A, 0x0050B }, { 0x0050C, 0x0050D }, { 0x0050E, 0x0050F },
+ { 0x00531, 0x00561 }, { 0x00532, 0x00562 }, { 0x00533, 0x00563 },
+ { 0x00534, 0x00564 }, { 0x00535, 0x00565 }, { 0x00536, 0x00566 },
+ { 0x00537, 0x00567 }, { 0x00538, 0x00568 }, { 0x00539, 0x00569 },
+ { 0x0053A, 0x0056A }, { 0x0053B, 0x0056B }, { 0x0053C, 0x0056C },
+ { 0x0053D, 0x0056D }, { 0x0053E, 0x0056E }, { 0x0053F, 0x0056F },
+ { 0x00540, 0x00570 }, { 0x00541, 0x00571 }, { 0x00542, 0x00572 },
+ { 0x00543, 0x00573 }, { 0x00544, 0x00574 }, { 0x00545, 0x00575 },
+ { 0x00546, 0x00576 }, { 0x00547, 0x00577 }, { 0x00548, 0x00578 },
+ { 0x00549, 0x00579 }, { 0x0054A, 0x0057A }, { 0x0054B, 0x0057B },
+ { 0x0054C, 0x0057C }, { 0x0054D, 0x0057D }, { 0x0054E, 0x0057E },
+ { 0x0054F, 0x0057F }, { 0x00550, 0x00580 }, { 0x00551, 0x00581 },
+ { 0x00552, 0x00582 }, { 0x00553, 0x00583 }, { 0x00554, 0x00584 },
+ { 0x00555, 0x00585 }, { 0x00556, 0x00586 }, /*{ 0x010A0, 0x02D00 },
+ { 0x010A1, 0x02D01 }, { 0x010A2, 0x02D02 }, { 0x010A3, 0x02D03 },
+ { 0x010A4, 0x02D04 }, { 0x010A5, 0x02D05 }, { 0x010A6, 0x02D06 },
+ { 0x010A7, 0x02D07 }, { 0x010A8, 0x02D08 }, { 0x010A9, 0x02D09 },
+ { 0x010AA, 0x02D0A }, { 0x010AB, 0x02D0B }, { 0x010AC, 0x02D0C },
+ { 0x010AD, 0x02D0D }, { 0x010AE, 0x02D0E }, { 0x010AF, 0x02D0F },
+ { 0x010B0, 0x02D10 }, { 0x010B1, 0x02D11 }, { 0x010B2, 0x02D12 },
+ { 0x010B3, 0x02D13 }, { 0x010B4, 0x02D14 }, { 0x010B5, 0x02D15 },
+ { 0x010B6, 0x02D16 }, { 0x010B7, 0x02D17 }, { 0x010B8, 0x02D18 },
+ { 0x010B9, 0x02D19 }, { 0x010BA, 0x02D1A }, { 0x010BB, 0x02D1B },
+ { 0x010BC, 0x02D1C }, { 0x010BD, 0x02D1D }, { 0x010BE, 0x02D1E },
+ { 0x010BF, 0x02D1F }, { 0x010C0, 0x02D20 }, { 0x010C1, 0x02D21 },
+ { 0x010C2, 0x02D22 }, { 0x010C3, 0x02D23 }, { 0x010C4, 0x02D24 },
+ { 0x010C5, 0x02D25 },*/ { 0x01E00, 0x01E01 }, { 0x01E02, 0x01E03 },
+ { 0x01E04, 0x01E05 }, { 0x01E06, 0x01E07 }, { 0x01E08, 0x01E09 },
+ { 0x01E0A, 0x01E0B }, { 0x01E0C, 0x01E0D }, { 0x01E0E, 0x01E0F },
+ { 0x01E10, 0x01E11 }, { 0x01E12, 0x01E13 }, { 0x01E14, 0x01E15 },
+ { 0x01E16, 0x01E17 }, { 0x01E18, 0x01E19 }, { 0x01E1A, 0x01E1B },
+ { 0x01E1C, 0x01E1D }, { 0x01E1E, 0x01E1F }, { 0x01E20, 0x01E21 },
+ { 0x01E22, 0x01E23 }, { 0x01E24, 0x01E25 }, { 0x01E26, 0x01E27 },
+ { 0x01E28, 0x01E29 }, { 0x01E2A, 0x01E2B }, { 0x01E2C, 0x01E2D },
+ { 0x01E2E, 0x01E2F }, { 0x01E30, 0x01E31 }, { 0x01E32, 0x01E33 },
+ { 0x01E34, 0x01E35 }, { 0x01E36, 0x01E37 }, { 0x01E38, 0x01E39 },
+ { 0x01E3A, 0x01E3B }, { 0x01E3C, 0x01E3D }, { 0x01E3E, 0x01E3F },
+ { 0x01E40, 0x01E41 }, { 0x01E42, 0x01E43 }, { 0x01E44, 0x01E45 },
+ { 0x01E46, 0x01E47 }, { 0x01E48, 0x01E49 }, { 0x01E4A, 0x01E4B },
+ { 0x01E4C, 0x01E4D }, { 0x01E4E, 0x01E4F }, { 0x01E50, 0x01E51 },
+ { 0x01E52, 0x01E53 }, { 0x01E54, 0x01E55 }, { 0x01E56, 0x01E57 },
+ { 0x01E58, 0x01E59 }, { 0x01E5A, 0x01E5B }, { 0x01E5C, 0x01E5D },
+ { 0x01E5E, 0x01E5F }, { 0x01E60, 0x01E61 }, { 0x01E62, 0x01E63 },
+ { 0x01E64, 0x01E65 }, { 0x01E66, 0x01E67 }, { 0x01E68, 0x01E69 },
+ { 0x01E6A, 0x01E6B }, { 0x01E6C, 0x01E6D }, { 0x01E6E, 0x01E6F },
+ { 0x01E70, 0x01E71 }, { 0x01E72, 0x01E73 }, { 0x01E74, 0x01E75 },
+ { 0x01E76, 0x01E77 }, { 0x01E78, 0x01E79 }, { 0x01E7A, 0x01E7B },
+ { 0x01E7C, 0x01E7D }, { 0x01E7E, 0x01E7F }, { 0x01E80, 0x01E81 },
+ { 0x01E82, 0x01E83 }, { 0x01E84, 0x01E85 }, { 0x01E86, 0x01E87 },
+ { 0x01E88, 0x01E89 }, { 0x01E8A, 0x01E8B }, { 0x01E8C, 0x01E8D },
+ { 0x01E8E, 0x01E8F }, { 0x01E90, 0x01E91 }, { 0x01E92, 0x01E93 },
+ { 0x01E94, 0x01E95 }, { 0x01EA0, 0x01EA1 }, { 0x01EA2, 0x01EA3 },
+ { 0x01EA4, 0x01EA5 }, { 0x01EA6, 0x01EA7 }, { 0x01EA8, 0x01EA9 },
+ { 0x01EAA, 0x01EAB }, { 0x01EAC, 0x01EAD }, { 0x01EAE, 0x01EAF },
+ { 0x01EB0, 0x01EB1 }, { 0x01EB2, 0x01EB3 }, { 0x01EB4, 0x01EB5 },
+ { 0x01EB6, 0x01EB7 }, { 0x01EB8, 0x01EB9 }, { 0x01EBA, 0x01EBB },
+ { 0x01EBC, 0x01EBD }, { 0x01EBE, 0x01EBF }, { 0x01EC0, 0x01EC1 },
+ { 0x01EC2, 0x01EC3 }, { 0x01EC4, 0x01EC5 }, { 0x01EC6, 0x01EC7 },
+ { 0x01EC8, 0x01EC9 }, { 0x01ECA, 0x01ECB }, { 0x01ECC, 0x01ECD },
+ { 0x01ECE, 0x01ECF }, { 0x01ED0, 0x01ED1 }, { 0x01ED2, 0x01ED3 },
+ { 0x01ED4, 0x01ED5 }, { 0x01ED6, 0x01ED7 }, { 0x01ED8, 0x01ED9 },
+ { 0x01EDA, 0x01EDB }, { 0x01EDC, 0x01EDD }, { 0x01EDE, 0x01EDF },
+ { 0x01EE0, 0x01EE1 }, { 0x01EE2, 0x01EE3 }, { 0x01EE4, 0x01EE5 },
+ { 0x01EE6, 0x01EE7 }, { 0x01EE8, 0x01EE9 }, { 0x01EEA, 0x01EEB },
+ { 0x01EEC, 0x01EED }, { 0x01EEE, 0x01EEF }, { 0x01EF0, 0x01EF1 },
+ { 0x01EF2, 0x01EF3 }, { 0x01EF4, 0x01EF5 }, { 0x01EF6, 0x01EF7 },
+ { 0x01EF8, 0x01EF9 }, { 0x01F08, 0x01F00 }, { 0x01F09, 0x01F01 },
+ { 0x01F0A, 0x01F02 }, { 0x01F0B, 0x01F03 }, { 0x01F0C, 0x01F04 },
+ { 0x01F0D, 0x01F05 }, { 0x01F0E, 0x01F06 }, { 0x01F0F, 0x01F07 },
+ { 0x01F18, 0x01F10 }, { 0x01F19, 0x01F11 }, { 0x01F1A, 0x01F12 },
+ { 0x01F1B, 0x01F13 }, { 0x01F1C, 0x01F14 }, { 0x01F1D, 0x01F15 },
+ { 0x01F28, 0x01F20 }, { 0x01F29, 0x01F21 }, { 0x01F2A, 0x01F22 },
+ { 0x01F2B, 0x01F23 }, { 0x01F2C, 0x01F24 }, { 0x01F2D, 0x01F25 },
+ { 0x01F2E, 0x01F26 }, { 0x01F2F, 0x01F27 }, { 0x01F38, 0x01F30 },
+ { 0x01F39, 0x01F31 }, { 0x01F3A, 0x01F32 }, { 0x01F3B, 0x01F33 },
+ { 0x01F3C, 0x01F34 }, { 0x01F3D, 0x01F35 }, { 0x01F3E, 0x01F36 },
+ { 0x01F3F, 0x01F37 }, { 0x01F48, 0x01F40 }, { 0x01F49, 0x01F41 },
+ { 0x01F4A, 0x01F42 }, { 0x01F4B, 0x01F43 }, { 0x01F4C, 0x01F44 },
+ { 0x01F4D, 0x01F45 }, { 0x01F59, 0x01F51 }, { 0x01F5B, 0x01F53 },
+ { 0x01F5D, 0x01F55 }, { 0x01F5F, 0x01F57 }, { 0x01F68, 0x01F60 },
+ { 0x01F69, 0x01F61 }, { 0x01F6A, 0x01F62 }, { 0x01F6B, 0x01F63 },
+ { 0x01F6C, 0x01F64 }, { 0x01F6D, 0x01F65 }, { 0x01F6E, 0x01F66 },
+ { 0x01F6F, 0x01F67 }, { 0x01F88, 0x01F80 }, { 0x01F89, 0x01F81 },
+ { 0x01F8A, 0x01F82 }, { 0x01F8B, 0x01F83 }, { 0x01F8C, 0x01F84 },
+ { 0x01F8D, 0x01F85 }, { 0x01F8E, 0x01F86 }, { 0x01F8F, 0x01F87 },
+ { 0x01F98, 0x01F90 }, { 0x01F99, 0x01F91 }, { 0x01F9A, 0x01F92 },
+ { 0x01F9B, 0x01F93 }, { 0x01F9C, 0x01F94 }, { 0x01F9D, 0x01F95 },
+ { 0x01F9E, 0x01F96 }, { 0x01F9F, 0x01F97 }, { 0x01FA8, 0x01FA0 },
+ { 0x01FA9, 0x01FA1 }, { 0x01FAA, 0x01FA2 }, { 0x01FAB, 0x01FA3 },
+ { 0x01FAC, 0x01FA4 }, { 0x01FAD, 0x01FA5 }, { 0x01FAE, 0x01FA6 },
+ { 0x01FAF, 0x01FA7 }, { 0x01FB8, 0x01FB0 }, { 0x01FB9, 0x01FB1 },
+ { 0x01FBA, 0x01F70 }, { 0x01FBB, 0x01F71 }, { 0x01FBC, 0x01FB3 },
+ { 0x01FC8, 0x01F72 }, { 0x01FC9, 0x01F73 }, { 0x01FCA, 0x01F74 },
+ { 0x01FCB, 0x01F75 }, { 0x01FCC, 0x01FC3 }, { 0x01FD8, 0x01FD0 },
+ { 0x01FD9, 0x01FD1 }, { 0x01FDA, 0x01F76 }, { 0x01FDB, 0x01F77 },
+ { 0x01FE8, 0x01FE0 }, { 0x01FE9, 0x01FE1 }, { 0x01FEA, 0x01F7A },
+ { 0x01FEB, 0x01F7B }, { 0x01FEC, 0x01FE5 }, { 0x01FF8, 0x01F78 },
+ { 0x01FF9, 0x01F79 }, { 0x01FFA, 0x01F7C }, { 0x01FFB, 0x01F7D },
+ { 0x01FFC, 0x01FF3 }, { 0x02126, 0x003C9 }, { 0x0212A, 0x0006B },
+ { 0x0212B, 0x000E5 }, { 0x02160, 0x02170 }, { 0x02161, 0x02171 },
+ { 0x02162, 0x02172 }, { 0x02163, 0x02173 }, { 0x02164, 0x02174 },
+ { 0x02165, 0x02175 }, { 0x02166, 0x02176 }, { 0x02167, 0x02177 },
+ { 0x02168, 0x02178 }, { 0x02169, 0x02179 }, { 0x0216A, 0x0217A },
+ { 0x0216B, 0x0217B }, { 0x0216C, 0x0217C }, { 0x0216D, 0x0217D },
+ { 0x0216E, 0x0217E }, { 0x0216F, 0x0217F }, { 0x024B6, 0x024D0 },
+ { 0x024B7, 0x024D1 }, { 0x024B8, 0x024D2 }, { 0x024B9, 0x024D3 },
+ { 0x024BA, 0x024D4 }, { 0x024BB, 0x024D5 }, { 0x024BC, 0x024D6 },
+ { 0x024BD, 0x024D7 }, { 0x024BE, 0x024D8 }, { 0x024BF, 0x024D9 },
+ { 0x024C0, 0x024DA }, { 0x024C1, 0x024DB }, { 0x024C2, 0x024DC },
+ { 0x024C3, 0x024DD }, { 0x024C4, 0x024DE }, { 0x024C5, 0x024DF },
+ { 0x024C6, 0x024E0 }, { 0x024C7, 0x024E1 }, { 0x024C8, 0x024E2 },
+ { 0x024C9, 0x024E3 }, { 0x024CA, 0x024E4 }, { 0x024CB, 0x024E5 },
+ { 0x024CC, 0x024E6 }, { 0x024CD, 0x024E7 }, { 0x024CE, 0x024E8 },
+ { 0x024CF, 0x024E9 }, /*{ 0x02C00, 0x02C30 }, { 0x02C01, 0x02C31 },
+ { 0x02C02, 0x02C32 }, { 0x02C03, 0x02C33 }, { 0x02C04, 0x02C34 },
+ { 0x02C05, 0x02C35 }, { 0x02C06, 0x02C36 }, { 0x02C07, 0x02C37 },
+ { 0x02C08, 0x02C38 }, { 0x02C09, 0x02C39 }, { 0x02C0A, 0x02C3A },
+ { 0x02C0B, 0x02C3B }, { 0x02C0C, 0x02C3C }, { 0x02C0D, 0x02C3D },
+ { 0x02C0E, 0x02C3E }, { 0x02C0F, 0x02C3F }, { 0x02C10, 0x02C40 },
+ { 0x02C11, 0x02C41 }, { 0x02C12, 0x02C42 }, { 0x02C13, 0x02C43 },
+ { 0x02C14, 0x02C44 }, { 0x02C15, 0x02C45 }, { 0x02C16, 0x02C46 },
+ { 0x02C17, 0x02C47 }, { 0x02C18, 0x02C48 }, { 0x02C19, 0x02C49 },
+ { 0x02C1A, 0x02C4A }, { 0x02C1B, 0x02C4B }, { 0x02C1C, 0x02C4C },
+ { 0x02C1D, 0x02C4D }, { 0x02C1E, 0x02C4E }, { 0x02C1F, 0x02C4F },
+ { 0x02C20, 0x02C50 }, { 0x02C21, 0x02C51 }, { 0x02C22, 0x02C52 },
+ { 0x02C23, 0x02C53 }, { 0x02C24, 0x02C54 }, { 0x02C25, 0x02C55 },
+ { 0x02C26, 0x02C56 }, { 0x02C27, 0x02C57 }, { 0x02C28, 0x02C58 },
+ { 0x02C29, 0x02C59 }, { 0x02C2A, 0x02C5A }, { 0x02C2B, 0x02C5B },
+ { 0x02C2C, 0x02C5C }, { 0x02C2D, 0x02C5D }, { 0x02C2E, 0x02C5E },
+ { 0x02C80, 0x02C81 }, { 0x02C82, 0x02C83 }, { 0x02C84, 0x02C85 },
+ { 0x02C86, 0x02C87 }, { 0x02C88, 0x02C89 }, { 0x02C8A, 0x02C8B },
+ { 0x02C8C, 0x02C8D }, { 0x02C8E, 0x02C8F }, { 0x02C90, 0x02C91 },
+ { 0x02C92, 0x02C93 }, { 0x02C94, 0x02C95 }, { 0x02C96, 0x02C97 },
+ { 0x02C98, 0x02C99 }, { 0x02C9A, 0x02C9B }, { 0x02C9C, 0x02C9D },
+ { 0x02C9E, 0x02C9F }, { 0x02CA0, 0x02CA1 }, { 0x02CA2, 0x02CA3 },
+ { 0x02CA4, 0x02CA5 }, { 0x02CA6, 0x02CA7 }, { 0x02CA8, 0x02CA9 },
+ { 0x02CAA, 0x02CAB }, { 0x02CAC, 0x02CAD }, { 0x02CAE, 0x02CAF },
+ { 0x02CB0, 0x02CB1 }, { 0x02CB2, 0x02CB3 }, { 0x02CB4, 0x02CB5 },
+ { 0x02CB6, 0x02CB7 }, { 0x02CB8, 0x02CB9 }, { 0x02CBA, 0x02CBB },
+ { 0x02CBC, 0x02CBD }, { 0x02CBE, 0x02CBF }, { 0x02CC0, 0x02CC1 },
+ { 0x02CC2, 0x02CC3 }, { 0x02CC4, 0x02CC5 }, { 0x02CC6, 0x02CC7 },
+ { 0x02CC8, 0x02CC9 }, { 0x02CCA, 0x02CCB }, { 0x02CCC, 0x02CCD },
+ { 0x02CCE, 0x02CCF }, { 0x02CD0, 0x02CD1 }, { 0x02CD2, 0x02CD3 },
+ { 0x02CD4, 0x02CD5 }, { 0x02CD6, 0x02CD7 }, { 0x02CD8, 0x02CD9 },
+ { 0x02CDA, 0x02CDB }, { 0x02CDC, 0x02CDD }, { 0x02CDE, 0x02CDF },
+ { 0x02CE0, 0x02CE1 }, { 0x02CE2, 0x02CE3 },*/ { 0x0FF21, 0x0FF41 },
+ { 0x0FF22, 0x0FF42 }, { 0x0FF23, 0x0FF43 }, { 0x0FF24, 0x0FF44 },
+ { 0x0FF25, 0x0FF45 }, { 0x0FF26, 0x0FF46 }, { 0x0FF27, 0x0FF47 },
+ { 0x0FF28, 0x0FF48 }, { 0x0FF29, 0x0FF49 }, { 0x0FF2A, 0x0FF4A },
+ { 0x0FF2B, 0x0FF4B }, { 0x0FF2C, 0x0FF4C }, { 0x0FF2D, 0x0FF4D },
+ { 0x0FF2E, 0x0FF4E }, { 0x0FF2F, 0x0FF4F }, { 0x0FF30, 0x0FF50 },
+ { 0x0FF31, 0x0FF51 }, { 0x0FF32, 0x0FF52 }, { 0x0FF33, 0x0FF53 },
+ { 0x0FF34, 0x0FF54 }, { 0x0FF35, 0x0FF55 }, { 0x0FF36, 0x0FF56 },
+ { 0x0FF37, 0x0FF57 }, { 0x0FF38, 0x0FF58 }, { 0x0FF39, 0x0FF59 },
+ // the following commented out ones fail on OS X 10.5 Leopard
+ { 0x0FF3A, 0x0FF5A }/*, { 0x10400, 0x10428 }, { 0x10401, 0x10429 },
+ { 0x10402, 0x1042A }, { 0x10403, 0x1042B }, { 0x10404, 0x1042C },
+ { 0x10405, 0x1042D }, { 0x10406, 0x1042E }, { 0x10407, 0x1042F },
+ { 0x10408, 0x10430 }, { 0x10409, 0x10431 }, { 0x1040A, 0x10432 },
+ { 0x1040B, 0x10433 }, { 0x1040C, 0x10434 }, { 0x1040D, 0x10435 },
+ { 0x1040E, 0x10436 }, { 0x1040F, 0x10437 }, { 0x10410, 0x10438 },
+ { 0x10411, 0x10439 }, { 0x10412, 0x1043A }, { 0x10413, 0x1043B },
+ { 0x10414, 0x1043C }, { 0x10415, 0x1043D }, { 0x10416, 0x1043E },
+ { 0x10417, 0x1043F }, { 0x10418, 0x10440 }, { 0x10419, 0x10441 },
+ { 0x1041A, 0x10442 }, { 0x1041B, 0x10443 }, { 0x1041C, 0x10444 },
+ { 0x1041D, 0x10445 }, { 0x1041E, 0x10446 }, { 0x1041F, 0x10447 },
+ { 0x10420, 0x10448 }, { 0x10421, 0x10449 }, { 0x10422, 0x1044A },
+ { 0x10423, 0x1044B }, { 0x10424, 0x1044C }, { 0x10425, 0x1044D },
+ { 0x10426, 0x1044E }, { 0x10427, 0x1044F } */
+ };
+ unsigned char utf8str_upper[5];
+ unsigned char utf8str_lower[5];
+ for (int i = 0; i < ARRAYSIZE(kUpperToLowerMap); i++) {
+ int len;
+ len = UTF32ToUTF8(kUpperToLowerMap[i].upper, utf8str_upper);
+ CHECK_NE(0, len);
+ utf8str_upper[len] = '\0';
+ len = UTF32ToUTF8(kUpperToLowerMap[i].lower, utf8str_lower);
+ CHECK_NE(0, len);
+ utf8str_lower[len] = '\0';
+ int result = ComparePathNames(
+ UTF8ToPathStringQuick(reinterpret_cast<char*>(utf8str_upper)),
+ UTF8ToPathStringQuick(reinterpret_cast<char*>(utf8str_lower)));
+ if (0 != result) {
+ // This ugly strstream works around an issue where using << hex on the
+ // stream for ADD_FAILURE produces "true" and "false" in the output.
+ strstream msg;
+ msg << "ComparePathNames(0x" << hex << kUpperToLowerMap[i].upper
+ << ", 0x" << hex << kUpperToLowerMap[i].lower
+ << ") returned " << dec << result << "; expected 0" << '\0';
+ ADD_FAILURE() << msg.str();
+ }
+ }
+#endif // not defined OS_WINDOWS
+}
+
+#ifdef OS_WINDOWS
+TEST(Syncable, PathNameMatch) {
+ // basic stuff, not too many otherwise we're testing the os.
+ EXPECT_TRUE(PathNameMatch(PSTR("bob"), PSTR("bob")));
+ EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR("fred")));
+ // Test our ; extension.
+ EXPECT_TRUE(PathNameMatch(PSTR("bo;b"), PSTR("bo;b")));
+ EXPECT_TRUE(PathNameMatch(PSTR("bo;b"), PSTR("bo*")));
+ EXPECT_FALSE(PathNameMatch(PSTR("bo;b"), PSTR("co;b")));
+ EXPECT_FALSE(PathNameMatch(PSTR("bo;b"), PSTR("co*")));
+ // Test our fixes for prepended spaces.
+ EXPECT_TRUE(PathNameMatch(PSTR(" bob"), PSTR(" bo*")));
+ EXPECT_TRUE(PathNameMatch(PSTR(" bob"), PSTR(" bob")));
+ EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR(" bob")));
+ EXPECT_FALSE(PathNameMatch(PSTR(" bob"), PSTR("bob")));
+ // Combo test
+ EXPECT_TRUE(PathNameMatch(PSTR(" b;ob"), PSTR(" b;o*")));
+ EXPECT_TRUE(PathNameMatch(PSTR(" b;ob"), PSTR(" b;ob")));
+ EXPECT_FALSE(PathNameMatch(PSTR("b;ob"), PSTR(" b;ob")));
+ EXPECT_FALSE(PathNameMatch(PSTR(" b;ob"), PSTR("b;ob")));
+ // other whitespace should give no matches.
+ EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR("\tbob")));
+}
+#endif // OS_WINDOWS
+
+} // namespace
+
+void FakeSync(MutableEntry* e, const char* fake_id) {
+ e->Put(IS_UNSYNCED, false);
+ e->Put(BASE_VERSION, 2);
+ e->Put(ID, Id::CreateFromServerId(fake_id));
+}
+
+TEST_F(SyncableDirectoryTest, Bug1509232) {
+ const PathString a = PSTR("alpha");
+
+ CreateEntry(a, dir_.get()->NextId());
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_PATH, a);
+ ASSERT_TRUE(e.good());
+ ExtendedAttributeKey key(e.Get(META_HANDLE), PSTR("resourcefork"));
+ MutableExtendedAttribute ext(&trans, CREATE, key);
+ ASSERT_TRUE(ext.good());
+ const char value[] = "stuff";
+ Blob value_blob(value, value + ARRAYSIZE(value));
+ ext.mutable_value()->swap(value_blob);
+ ext.delete_attribute();
+ }
+ // This call to SaveChanges used to CHECK fail.
+ dir_.get()->SaveChanges();
+}
+
+} // namespace syncable
+
+#ifdef OS_WINDOWS
+class LocalModule : public CAtlExeModuleT<LocalModule> { };
+LocalModule module_;
+
+int main(int argc, char* argv[]) {
+ testing::InitGoogleTest(&argc, argv);
+
+ // TODO(chron) Add method to change random seed.
+ const int32 test_random_seed = time(NULL);
+ cout << "Random seed: " << test_random_seed << endl;
+ LOG(INFO) << "Random seed: " << test_random_seed << endl;
+ srand(test_random_seed);
+
+ // Necessary for NewCallback, scoped to main
+ base::AtExitManager at_exit_manager;
+
+ int result = RUN_ALL_TESTS();
+ return result;
+}
+#endif
diff --git a/chrome/browser/sync/util/character_set_converters-linux.cc b/chrome/browser/sync/util/character_set_converters-linux.cc
new file mode 100644
index 0000000..96ad1b6
--- /dev/null
+++ b/chrome/browser/sync/util/character_set_converters-linux.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+#include <string>
+
+using std::string;
+
+namespace browser_sync {
+
+// Converts input_string to UTF8 and appends the result into output_string.
+void AppendPathStringToUTF8(const PathChar *wide, int size,
+ string* output_string) {
+ output_string->append(wide, size);
+}
+
+bool AppendUTF8ToPathString(const char* utf8, size_t size,
+ PathString* output_string) {
+ output_string->append(utf8, size);
+ return true;
+}
+
+void TrimPathStringToValidCharacter(PathString* string) {
+ // Constants from http://en.wikipedia.org/wiki/UTF-8
+ CHECK(string);
+ if (string->empty())
+ return;
+ if (0 == (string->at(string->length() - 1) & 0x080))
+ return;
+ int partial_enc_bytes = 0;
+ for (partial_enc_bytes = 0 ; true ; ++partial_enc_bytes) {
+ if (4 == partial_enc_bytes || partial_enc_bytes == string->length()) {
+ // original string was broken, garbage in, garbage out.
+ return;
+ }
+ PathChar c = string->at(string->length() - 1 - partial_enc_bytes);
+ if ((c & 0x0c0) == 0x080) // utf continuation char;
+ continue;
+ if ((c & 0x0e0) == 0x0e0) // 2-byte encoded char.
+ if (1 == partial_enc_bytes)
+ return;
+ else
+ break;
+ if ((c & 0x0f0) == 0xc0) // 3-byte encoded char.
+ if (2 == partial_enc_bytes)
+ return;
+ else
+ break;
+ if ((c & 0x0f8) == 0x0f0) // 4-byte encoded char.
+ if (3 == partial_enc_bytes)
+ return;
+ else
+ break;
+ }
+ string->resize(string->length() - 1 - partial_enc_bytes);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/util/character_set_converters-win32.cc b/chrome/browser/sync/util/character_set_converters-win32.cc
new file mode 100644
index 0000000..79e9281
--- /dev/null
+++ b/chrome/browser/sync/util/character_set_converters-win32.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+#include <windows.h>
+
+#include <string>
+
+using std::string;
+
+namespace browser_sync {
+
+// Converts input_string to UTF8 and appends the result into to output_string
+void AppendPathStringToUTF8(const PathChar* wide, int size,
+ string* output_string) {
+ CHECK(output_string);
+ if (0 == size)
+ return;
+
+ int needed_space = ::WideCharToMultiByte(CP_UTF8, 0, wide, size, 0, 0, 0, 0);
+ // TODO(sync): This should flag an error when we move to an api that can let
+ // utf-16 -> utf-8 fail.
+ CHECK(0 != needed_space);
+ string::size_type current_size = output_string->size();
+ output_string->resize(current_size + needed_space);
+ CHECK(0 != ::WideCharToMultiByte(CP_UTF8, 0, wide, size,
+ &(*output_string)[current_size], needed_space, 0, 0));
+}
+
+bool AppendUTF8ToPathString(const char* utf8, size_t size,
+ PathString* output_string) {
+ CHECK(output_string);
+ if (0 == size)
+ return true;
+ // TODO(sync): Do we want to force precomposed characters here?
+ int needed_wide_chars = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS,
+ utf8, size, 0, 0);
+ if (0 == needed_wide_chars) {
+ DWORD err = ::GetLastError();
+ if (MB_ERR_INVALID_CHARS == err)
+ return false;
+ CHECK(0 == err);
+ }
+ PathString::size_type current_length = output_string->size();
+ output_string->resize(current_length + needed_wide_chars);
+ CHECK(0 != ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, utf8, size,
+ &(*output_string)[current_length], needed_wide_chars));
+ return true;
+}
+
+void TrimPathStringToValidCharacter(PathString* string) {
+ CHECK(string);
+ // Constants from http://en.wikipedia.org/wiki/UTF-16
+ if (string->empty())
+ return;
+ if (0x0dc00 == (string->at(string->length() - 1) & 0x0fc00))
+ string->resize(string->length() - 1);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/util/character_set_converters.cc b/chrome/browser/sync/util/character_set_converters.cc
new file mode 100644
index 0000000..a9114cf3
--- /dev/null
+++ b/chrome/browser/sync/util/character_set_converters.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+#include <string>
+
+using std::string;
+
+namespace browser_sync {
+
+void PathStringToUTF8(const PathChar* wide, int size,
+ std::string* output_string) {
+ CHECK(output_string);
+ output_string->clear();
+ AppendPathStringToUTF8(wide, size, output_string);
+}
+
+bool UTF8ToPathString(const char* utf8, size_t size,
+ PathString* output_string) {
+ CHECK(output_string);
+ output_string->clear();
+ return AppendUTF8ToPathString(utf8, size, output_string);
+};
+
+ToUTF8::ToUTF8(const PathChar* wide, size_t size) {
+ PathStringToUTF8(wide, size, &result_);
+}
+
+ToUTF8::ToUTF8(const PathString& wide) {
+ PathStringToUTF8(wide.data(), wide.length(), &result_);
+}
+
+ToUTF8::ToUTF8(const PathChar* wide) {
+ PathStringToUTF8(wide, PathLen(wide), &result_);
+}
+
+ToPathString::ToPathString(const char* utf8, size_t size) {
+ good_ = UTF8ToPathString(utf8, size, &result_);
+ good_checked_ = false;
+}
+
+ToPathString::ToPathString(const std::string& utf8) {
+ good_ = UTF8ToPathString(utf8.data(), utf8.length(), &result_);
+ good_checked_ = false;
+}
+
+ToPathString::ToPathString(const char* utf8) {
+ good_ = UTF8ToPathString(utf8, strlen(utf8), &result_);
+ good_checked_ = false;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/util/character_set_converters.h b/chrome/browser/sync/util/character_set_converters.h
new file mode 100644
index 0000000..3e614c3
--- /dev/null
+++ b/chrome/browser/sync/util/character_set_converters.h
@@ -0,0 +1,236 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_CHARACTER_SET_CONVERTERS_H_
+#define CHROME_BROWSER_SYNC_UTIL_CHARACTER_SET_CONVERTERS_H_
+
+// A pair of classes to convert UTF8 <-> UCS2 character strings.
+//
+// Note that the current implementation is limited to UCS2, whereas the
+// interface is agnostic to the wide encoding used.
+//
+// Also note that UCS2 is different from UTF-16, in that UTF-16 can encode all
+// the code points in the Unicode character set by multi-character encodings,
+// while UCS2 is limited to encoding < 2^16 code points.
+//
+// It appears that Windows support UTF-16, which means we have to be careful
+// what we feed this class.
+//
+// Usage:
+// string utf8;
+// CHECK(browser_sync::Append(wide_string, &utf8));
+// PathString bob;
+// CHECK(browser_sync::Append(utf8, &bob));
+// PathString fred = bob;
+
+#ifdef OS_LINUX
+#include <glib.h>
+#endif
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/string16.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+// Need to cast literals (Linux, OSX)
+#define STRING16_UGLY_DOUBLE_DEFINE_HACK(s) \
+ reinterpret_cast<const char16*>(L##s)
+#define STRING16(s) STRING16_UGLY_DOUBLE_DEFINE_HACK(s)
+
+using std::string;
+
+namespace browser_sync {
+
+// These 2 classes are deprecated. Instead, prefer the Append() functions.
+
+// A class to convert wide -> UTF8.
+class ToUTF8 {
+ public:
+ explicit ToUTF8(const PathChar* wide);
+ ToUTF8(const PathChar* wide, PathString::size_type size);
+ explicit ToUTF8(const PathString& wide);
+
+ // cast operators
+ operator const std::string&() const;
+ operator const char*() const;
+
+ // accessors
+ const std::string& get_string() const;
+ const char* data() const;
+ std::string::size_type byte_length() const;
+
+ private:
+ std::string result_;
+};
+
+// A class to convert UTF8 -> wide.
+class ToPathString {
+ public:
+ explicit ToPathString(const char*);
+ ToPathString(const char*, size_t size);
+ explicit ToPathString(const std::string&);
+
+ // true iff UTF-8 to wide conversion succeeded in constructor.
+ bool good() {
+ good_checked_ = true;
+ return good_;
+ }
+
+ // It's invalid to invoke the accessors or the cast operators unless the
+ // string is good and good() has been invoked at least once.
+
+ // Implicit casts to const PathString& and const PathChar*
+ operator const PathString&() const;
+ operator const PathChar*() const;
+
+ // Accessors
+ const PathString& get_string16() const;
+ const PathChar* data() const;
+ PathString::size_type length() const;
+
+ private:
+ PathString result_;
+
+ // Conversion succeeded.
+ bool good_;
+ // good() has been invoked at least once.
+ bool good_checked_;
+};
+
+// Converts the UCS2 string "wide" to UTF8 encoding and stores the result in
+// output_string.
+void PathStringToUTF8(const PathChar* wide, int size,
+ std::string* output_string);
+
+// Converts UCS2 string wide to UTF8 encoding and appends the result to
+// output_string.
+void AppendPathStringToUTF8(const PathChar* wide, int size,
+ std::string* output_string);
+
+// Converts the UTF8 encoded string "utf8" to UCS16 and stores the result in
+// output_string.
+//
+// Returns true iff conversion was successful, false otherwise.
+bool UTF8ToPathString(const char* utf8, size_t size,
+ PathString* output_string);
+
+// Converts the UTF8 encoded string "utf8" to UCS2 and appends the result in
+// output_string.
+//
+// Returns true iff conversion was successful, false otherwise.
+bool AppendUTF8ToPathString(const char* utf8, size_t size,
+ PathString* output_string);
+
+// Converts the UTF8 encoded string "utf8" to UCS2 and appends the result in
+// output_string.
+//
+// @returns true iff conversion was successful, false otherwise.
+inline bool AppendUTF8ToPathString(const std::string& utf8,
+ PathString* output_string) {
+ return AppendUTF8ToPathString(utf8.data(), utf8.length(), output_string);
+}
+
+// Converts UCS2 string wide to UTF8 encoding and appends the result to
+// output_string.
+inline void AppendPathStringToUTF8(const PathString& wide,
+ std::string* output_string) {
+ return AppendPathStringToUTF8(wide.data(), wide.length(), output_string);
+}
+
+
+inline bool Append(const PathChar* wide, int size,
+ std::string* output_string) {
+ AppendPathStringToUTF8(wide, size, output_string);
+ return true;
+}
+
+inline bool Append(const PathChar* wide, std::string* output_string) {
+ AppendPathStringToUTF8(wide, PathLen(wide), output_string);
+ return true;
+}
+
+inline bool Append(const std::string& utf8, PathString* output_string) {
+ return AppendUTF8ToPathString(utf8.data(), utf8.length(), output_string);
+}
+
+#if !PATHSTRING_IS_STD_STRING
+inline bool Append(const char* utf8, size_t size, PathString* output_string) {
+ return AppendUTF8ToPathString(utf8, size, output_string);
+}
+
+inline bool Append(const char* s, int size, std::string* output_string) {
+ output_string->append(s, size);
+ return true;
+}
+
+inline bool Append(const char* utf8, PathString* output_string) {
+ return AppendUTF8ToPathString(utf8, strlen(utf8), output_string);
+}
+
+inline bool Append(const char* s, std::string* output_string) {
+ output_string->append(s);
+ return true;
+}
+
+inline bool Append(const PathString& wide, std::string* output_string) {
+ return Append(wide.data(), wide.length(), output_string);
+}
+
+inline bool Append(const std::string& s, std::string* output_string) {
+ return Append(s.data(), s.length(), output_string);
+}
+#endif
+
+inline ToUTF8::operator const std::string&() const {
+ return result_;
+}
+
+inline ToUTF8::operator const char*() const {
+ return result_.c_str();
+}
+
+inline const std::string& ToUTF8::get_string() const {
+ return result_;
+}
+
+inline const char* ToUTF8::data() const {
+ return result_.data();
+}
+
+inline std::string::size_type ToUTF8::byte_length() const {
+ return result_.size();
+}
+
+inline ToPathString::operator const PathString&() const {
+ DCHECK(good_ && good_checked_);
+ return result_;
+}
+
+inline ToPathString::operator const PathChar*() const {
+ DCHECK(good_ && good_checked_);
+ return result_.c_str();
+}
+
+inline const PathString& ToPathString::get_string16() const {
+ DCHECK(good_ && good_checked_);
+ return result_;
+}
+
+inline const PathChar* ToPathString::data() const {
+ DCHECK(good_ && good_checked_);
+ return result_.data();
+}
+
+inline PathString::size_type ToPathString::length() const {
+ DCHECK(good_ && good_checked_);
+ return result_.length();
+}
+
+void TrimPathStringToValidCharacter(PathString* string);
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_UTIL_CHARACTER_SET_CONVERTERS_H_
diff --git a/chrome/browser/sync/util/character_set_converters_unittest.cc b/chrome/browser/sync/util/character_set_converters_unittest.cc
new file mode 100644
index 0000000..838bbd1
--- /dev/null
+++ b/chrome/browser/sync/util/character_set_converters_unittest.cc
@@ -0,0 +1,168 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using browser_sync::ToPathString;
+using browser_sync::ToUTF8;
+using browser_sync::AppendPathStringToUTF8;
+using browser_sync::AppendUTF8ToPathString;
+using browser_sync::PathStringToUTF8;
+using browser_sync::UTF8ToPathString;
+using std::string;
+
+class CharacterSetConverterTest : public testing::Test {
+};
+
+TEST_F(CharacterSetConverterTest, ASCIIConversionTest) {
+ string ascii = "Test String";
+ PathString wide = PSTR("Test String");
+ ToPathString to_wide(ascii);
+ ASSERT_TRUE(to_wide.good());
+ ToUTF8 to_utf8(wide);
+
+ // Using == as gunit doesn't handle PathString equality tests correctly (it
+ // tries to print the PathString and fails).
+ ASSERT_TRUE(PathString(wide) == to_wide.get_string16());
+ ASSERT_EQ(string(ascii), to_utf8.get_string());
+ ToPathString to_16(ascii);
+ ASSERT_TRUE(to_16.good());
+ ASSERT_TRUE(PathString(wide) == to_16.get_string16());
+#ifdef OS_WINDOWS
+ // On Linux, PathString is already UTF8
+ ASSERT_EQ(string(ascii), static_cast<string>(ToUTF8(wide)));
+#endif
+ // The next line fails the good_checked_ test. It would be a good death test
+ // but they don't work on Windows.
+ // ASSERT_TRUE(wide == ToPathString(utf8).get_string16());
+}
+
+#ifdef OS_WINDOWS
+ // On Linux, PathString is already UTF8
+TEST_F(CharacterSetConverterTest, UnicodeConversionText) {
+ // Source data obtained by running od -b on files saved in utf-8 and unicode
+ // from a text editor.
+ const char* utf8 = "\357\273\277\150\145\154\154\157\040\303\250\303\251"
+ "\302\251\342\202\254\302\243\302\245\302\256\342\204\242";
+// #ifdef IS_LITTLE_ENDIAN
+ const PathChar* wide = reinterpret_cast<const PathChar*>("\377\376\150\000"
+ "\145\000\154\000\154\000\157\000\040\000\350\000\351\000\251\000\254\040"
+ "\243\000\245\000\256\000\042\041");
+// #else
+// // This should work, but on Windows we don't have the endian
+// // macros. Since we only do conversion between 16<->8 on Windows,
+// // it's safe to assume little endian.
+// const PathChar* wide =
+// reinterpret_cast<PathChar*>("\376\377\000\150\000\145\000"
+// "\154\000\154\000\157\000\040\000\350\000\351\000\251\040\254\000\243"
+// "\000\245\000\256\041\042");
+// #endif
+
+ ToPathString to_wide(utf8);
+ ASSERT_TRUE(to_wide.good());
+ ToUTF8 to_utf8(wide);
+
+ // Using == as gunit doesn't handle PathString equality tests correctly (it
+ // tries to print the PathString and fails).
+ ASSERT_TRUE(wide == to_wide.get_string16());
+ ASSERT_EQ(string(utf8), to_utf8.get_string());
+ ToPathString to_16(utf8);
+ ASSERT_TRUE(to_16.good());
+ ASSERT_TRUE(wide == to_16.get_string16());
+ ASSERT_EQ(string(utf8), reinterpret_cast<const string&>(ToUTF8(wide)));
+}
+#endif
+
+TEST_F(CharacterSetConverterTest, AppendUTF8Tests) {
+ PathString one = PSTR("one");
+ PathString two = PSTR("two");
+ PathString three = PSTR("three");
+ string out;
+ AppendPathStringToUTF8(one.data(), one.length(), &out);
+ AppendPathStringToUTF8(two.data(), two.length(), &out);
+ AppendPathStringToUTF8(three.data(), three.length(), &out);
+ ASSERT_EQ(out, "onetwothree");
+ PathString onetwothree = PSTR("onetwothree");
+ PathStringToUTF8(onetwothree.data(), onetwothree.length(), &out);
+ ASSERT_EQ(out, "onetwothree");
+}
+
+TEST_F(CharacterSetConverterTest, AppendPathStringTests) {
+ string one = "one";
+ string two = "two";
+ string three = "three";
+ PathString out;
+ AppendUTF8ToPathString(one.data(), one.length(), &out);
+ AppendUTF8ToPathString(two.data(), two.length(), &out);
+ AppendUTF8ToPathString(three.data(), three.length(), &out);
+ ASSERT_TRUE(out == PathString(PSTR("onetwothree")));
+ string onetwothree = "onetwothree";
+ UTF8ToPathString(onetwothree.data(), onetwothree.length(), &out);
+ ASSERT_TRUE(out == PathString(PSTR("onetwothree")));
+}
+
+#ifdef OS_WINDOWS
+namespace {
+// See http://en.wikipedia.org/wiki/UTF-16 for an explanation of UTF16.
+// For a test case we use the UTF-8 and UTF-16 encoding of char 119070
+// (hex 1D11E), which is musical G clef.
+const unsigned char utf8_test_string[] = {
+ 0xEF, 0xBB, 0xBF, // BOM
+ 0xE6, 0xB0, 0xB4, // water, Chinese (0x6C34)
+ 0x7A, // lower case z
+ 0xF0, 0x9D, 0x84, 0x9E, // musical G clef (0x1D11E)
+ 0x00,
+};
+const PathChar utf16_test_string[] = {
+ 0xFEFF, // BOM
+ 0x6C34, // water, Chinese
+ 0x007A, // lower case z
+ 0xD834, 0xDD1E, // musical G clef (0x1D11E)
+ 0x0000,
+};
+}
+
+TEST_F(CharacterSetConverterTest, UTF16ToUTF8Test) {
+ // Avoid truncation warning.
+ const char* utf8_test_string_pointer =
+ reinterpret_cast<const char*>(utf8_test_string);
+ ASSERT_STREQ(utf8_test_string_pointer, ToUTF8(utf16_test_string));
+}
+
+TEST_F(CharacterSetConverterTest, utf8_test_stringToUTF16Test) {
+ // Avoid truncation warning.
+ const char* utf8_test_string_pointer =
+ reinterpret_cast<const char*>(utf8_test_string);
+ ToPathString converted_utf8(utf8_test_string_pointer);
+ ASSERT_TRUE(converted_utf8.good());
+ ASSERT_EQ(wcscmp(utf16_test_string, converted_utf8), 0);
+}
+
+TEST(NameTruncation, WindowsNameTruncation) {
+ using browser_sync::TrimPathStringToValidCharacter;
+ PathChar array[] = {'1', '2', 0xD950, 0xDF21, '3', '4', 0};
+ PathString message = array;
+ ASSERT_EQ(message.length(), arraysize(array) - 1);
+ int old_length = message.length();
+ while (old_length != 0) {
+ TrimPathStringToValidCharacter(&message);
+ if (old_length == 4)
+ EXPECT_EQ(3, message.length());
+ else
+ EXPECT_EQ(old_length, message.length());
+ message.resize(message.length() - 1);
+ old_length = message.length();
+ }
+ TrimPathStringToValidCharacter(&message);
+}
+#else
+
+// TODO(zork): Add unittests here once we're running these tests on linux.
+
+#endif
diff --git a/chrome/browser/sync/util/closure.h b/chrome/browser/sync/util/closure.h
new file mode 100644
index 0000000..b282bc4
--- /dev/null
+++ b/chrome/browser/sync/util/closure.h
@@ -0,0 +1,12 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_CLOSURE_H_
+#define CHROME_BROWSER_SYNC_UTIL_CLOSURE_H_
+
+#include "base/task.h"
+
+typedef CallbackRunner<Tuple0> Closure;
+
+#endif // CHROME_BROWSER_SYNC_UTIL_CLOSURE_H_
diff --git a/chrome/browser/sync/util/compat-file-posix.cc b/chrome/browser/sync/util/compat-file-posix.cc
new file mode 100644
index 0000000..66582fa
--- /dev/null
+++ b/chrome/browser/sync/util/compat-file-posix.cc
@@ -0,0 +1,12 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if ((!defined(OS_LINUX)) && (!defined(OS_MACOSX)))
+#error Compile this file on Mac OS X or Linux only.
+#endif
+
+#include "chrome/browser/sync/util/compat-file.h"
+
+const char* const kPathSeparator = "/";
+
diff --git a/chrome/browser/sync/util/compat-file-win.cc b/chrome/browser/sync/util/compat-file-win.cc
new file mode 100644
index 0000000..d812d68
--- /dev/null
+++ b/chrome/browser/sync/util/compat-file-win.cc
@@ -0,0 +1,14 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef OS_WINDOWS
+#error Compile this file on Windows only.
+#endif
+
+#include "chrome/browser/sync/util/compat-file.h"
+
+#include <windows.h>
+
+const wchar_t* const kPathSeparator = L"\\";
+
diff --git a/chrome/browser/sync/util/compat-file.h b/chrome/browser/sync/util/compat-file.h
new file mode 100644
index 0000000..273e3cb
--- /dev/null
+++ b/chrome/browser/sync/util/compat-file.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// File compatability routines. Useful to delete database files with.
+//
+#ifndef CHROME_BROWSER_SYNC_UTIL_COMPAT_FILE_H_
+#define CHROME_BROWSER_SYNC_UTIL_COMPAT_FILE_H_
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include "chrome/browser/sync/util/sync_types.h"
+
+extern const PathChar* const kPathSeparator;
+
+// Path calls for all OSes.
+// Returns 0 on success, non-zero on failure.
+int PathRemove(const PathString& path);
+
+#ifdef OS_WINDOWS
+inline int PathRemove(const PathString& path) {
+ return _wremove(path.c_str());
+}
+#elif (defined(OS_MACOSX) || defined(OS_LINUX))
+inline int PathRemove(const PathString& path) {
+ return unlink(path.c_str());
+}
+#endif
+
+#endif // CHROME_BROWSER_SYNC_UTIL_COMPAT_FILE_H_
diff --git a/chrome/browser/sync/util/compat-pthread.h b/chrome/browser/sync/util/compat-pthread.h
new file mode 100644
index 0000000..e5817af
--- /dev/null
+++ b/chrome/browser/sync/util/compat-pthread.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Pthread compatability routines.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_COMPAT_PTHREAD_H_
+#define CHROME_BROWSER_SYNC_UTIL_COMPAT_PTHREAD_H_
+
+// TODO(timsteele): This file is deprecated. Use PlatformThread.
+#include "base/platform_thread.h"
+
+#define ThreadId PlatformThreadId
+
+#ifndef OS_WINDOWS
+inline ThreadId GetCurrentThreadId() {
+ return PlatformThread::CurrentId();
+}
+#endif // OS_WINDOWS
+
+#if (!defined(OS_WINDOWS) && !defined(OS_MACOSX))
+// TODO(timsteele): What the heck is this?
+inline int sem_post_multiple(sem_t * sem, int number) {
+ int i;
+ int r = 0;
+ for (i = 0; i < number; i++) {
+ r = sem_post(sem);
+ if (r != 0) {
+ LOG_IF(ERROR, i > 0) << "sem_post() failed on iteration #" << i <<
+ " of " << number;
+ return r;
+ }
+ }
+ return 0;
+}
+#endif // (!defined(OS_WINDOWS) && !defined(OS_MACOSX))
+
+#endif // CHROME_BROWSER_SYNC_UTIL_COMPAT_PTHREAD_H_
diff --git a/chrome/browser/sync/util/crypto_helpers.cc b/chrome/browser/sync/util/crypto_helpers.cc
new file mode 100644
index 0000000..c84bfaf
--- /dev/null
+++ b/chrome/browser/sync/util/crypto_helpers.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/crypto_helpers.h"
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/rand_util.h"
+#include "base/string_util.h"
+
+using std::string;
+using std::vector;
+
+MD5Calculator::MD5Calculator() {
+ MD5Init(&context_);
+}
+
+void MD5Calculator::AddData(const unsigned char* data, int length) {
+ CHECK(bin_digest_.empty());
+ MD5Update(&context_, data, length);
+}
+
+void MD5Calculator::CalcDigest() {
+ if (bin_digest_.empty()) {
+ MD5Digest digest;
+ MD5Final(&digest, &context_);
+ bin_digest_.assign(digest.a, digest.a + arraysize(digest.a));
+ }
+}
+
+vector<uint8> MD5Calculator::GetDigest() {
+ CalcDigest();
+ return bin_digest_;
+}
+
+PathString MD5Calculator::GetHexDigest() {
+ CalcDigest();
+ string hex = HexEncode(reinterpret_cast<char*>(&bin_digest_.front()),
+ bin_digest_.size());
+ StringToLowerASCII(&hex);
+ return PathString(hex.begin(), hex.end());
+}
+
+void GetRandomBytes(char* output, int output_length) {
+ for (int i = 0; i < output_length; i++) {
+ // TODO(chron): replace this with something less stupid.
+ output[i] = static_cast<char>(base::RandUint64());
+ }
+}
+
+string Generate128BitRandomHexString() {
+ int64 chunk1 = static_cast<int64>(base::RandUint64());
+ int64 chunk2 = static_cast<int64>(base::RandUint64());
+
+ return StringPrintf("%016" PRId64 "x%016" PRId64 "x",
+ chunk1, chunk2);
+}
diff --git a/chrome/browser/sync/util/crypto_helpers.h b/chrome/browser/sync/util/crypto_helpers.h
new file mode 100644
index 0000000..c31a278
--- /dev/null
+++ b/chrome/browser/sync/util/crypto_helpers.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_CRYPTO_HELPERS_H_
+#define CHROME_BROWSER_SYNC_UTIL_CRYPTO_HELPERS_H_
+
+#include <string>
+#include <vector>
+
+// An object to handle calculation of MD5 sums.
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/md5.h"
+#include "base/port.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+class MD5Calculator {
+ protected:
+ MD5Context context_;
+ std::vector<uint8> bin_digest_;
+
+ void CalcDigest();
+ public:
+ MD5Calculator();
+ ~MD5Calculator() {}
+ void AddData(const uint8* data, int length);
+ void AddData(const char* data, int length) {
+ AddData(reinterpret_cast<const uint8*>(data), length);
+ }
+ PathString GetHexDigest();
+ std::vector<uint8> GetDigest();
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MD5Calculator);
+};
+
+void GetRandomBytes(char* output, int output_length);
+std::string Generate128BitRandomHexString();
+
+#endif // CHROME_BROWSER_SYNC_UTIL_CRYPTO_HELPERS_H_
diff --git a/chrome/browser/sync/util/crypto_helpers_unittest.cc b/chrome/browser/sync/util/crypto_helpers_unittest.cc
new file mode 100644
index 0000000..1d2dd60
--- /dev/null
+++ b/chrome/browser/sync/util/crypto_helpers_unittest.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(ChecksumTest, MD5ChecksumTest) {
+ uint8 buffer[256];
+ for (unsigned int i = 0; i < arraysize(buffer); ++i) {
+ buffer[i] = i;
+ }
+ MD5Calculator md5;
+ md5.AddData(buffer, arraysize(buffer));
+ PathString checksum(PSTR("e2c865db4162bed963bfaa9ef6ac18f0"));
+ ASSERT_EQ(checksum, md5.GetHexDigest());
+}
diff --git a/chrome/browser/sync/util/data_encryption.cc b/chrome/browser/sync/util/data_encryption.cc
new file mode 100644
index 0000000..b835147
--- /dev/null
+++ b/chrome/browser/sync/util/data_encryption.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// NOTE: this file is Winodws specific.
+
+#include "chrome/browser/sync/util/data_encryption.h"
+
+#include <windows.h>
+#include <wincrypt.h>
+
+#include <cstddef>
+#include <string>
+#include <vector>
+
+using std::string;
+using std::vector;
+
+vector<uint8> EncryptData(const string& data) {
+ DATA_BLOB unencrypted_data, encrypted_data;
+ unencrypted_data.pbData = (BYTE*)(data.data());
+ unencrypted_data.cbData = data.size();
+
+ if (!CryptProtectData(&unencrypted_data, L"", NULL, NULL, NULL, 0,
+ &encrypted_data))
+ LOG(ERROR) << "Encryption fails: " << data;
+
+ vector<uint8> result(encrypted_data.pbData,
+ encrypted_data.pbData + encrypted_data.cbData);
+ LocalFree(encrypted_data.pbData);
+ return result;
+}
+
+bool DecryptData(const vector<uint8>& in_data, string* out_data) {
+ DATA_BLOB encrypted_data, decrypted_data;
+ encrypted_data.pbData =
+ (in_data.empty() ? NULL : const_cast<BYTE*>(&in_data[0]));
+ encrypted_data.cbData = in_data.size();
+ LPWSTR descrip = L"";
+
+ if (!CryptUnprotectData(&encrypted_data, &descrip, NULL, NULL, NULL, 0,
+ &decrypted_data)) {
+ LOG(ERROR) << "Decryption fails: ";
+ return false;
+ } else {
+ out_data->assign(reinterpret_cast<const char*>(decrypted_data.pbData),
+ decrypted_data.cbData);
+ LocalFree(decrypted_data.pbData);
+ return true;
+ }
+}
diff --git a/chrome/browser/sync/util/data_encryption.h b/chrome/browser/sync/util/data_encryption.h
new file mode 100644
index 0000000..b62a14a
--- /dev/null
+++ b/chrome/browser/sync/util/data_encryption.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_DATA_ENCRYPTION_H_
+#define CHROME_BROWSER_SYNC_UTIL_DATA_ENCRYPTION_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::string;
+using std::vector;
+
+vector<uint8> EncryptData(const string& data);
+bool DecryptData(const vector<uint8>& in_data, string* out_data);
+
+#endif // CHROME_BROWSER_SYNC_UTIL_DATA_ENCRYPTION_H_
diff --git a/chrome/browser/sync/util/data_encryption_unittest.cc b/chrome/browser/sync/util/data_encryption_unittest.cc
new file mode 100644
index 0000000..63bfda5
--- /dev/null
+++ b/chrome/browser/sync/util/data_encryption_unittest.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/data_encryption.h"
+
+#include <string>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+using std::vector;
+
+namespace {
+
+TEST(DataEncryption, TestEncryptDecryptOfSampleString) {
+ vector<uint8> example(EncryptData("example"));
+ ASSERT_FALSE(example.empty());
+ string result;
+ ASSERT_TRUE(DecryptData(example, &result));
+ ASSERT_TRUE(result == "example");
+}
+
+TEST(DataEncryption, TestDecryptFailure) {
+ vector<uint8> example(0, 0);
+ string result;
+ ASSERT_FALSE(DecryptData(example, &result));
+}
+
+} // namespace
diff --git a/chrome/browser/sync/util/dbgq.h b/chrome/browser/sync/util/dbgq.h
new file mode 100644
index 0000000..65ebb5c
--- /dev/null
+++ b/chrome/browser/sync/util/dbgq.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_DBGQ_H_
+#define CHROME_BROWSER_SYNC_UTIL_DBGQ_H_
+
+#include "base/basictypes.h" // for COMPILE_ASSERT
+
+// A circular queue that is designed to be easily inspectable in a debugger. It
+// puts the elements into the array in reverse, so you can just look at the i_
+// pointer for a recent history.
+template <typename T, size_t size>
+class DebugQueue {
+ COMPILE_ASSERT(size > 0, DebugQueue_size_must_be_greater_than_zero);
+ public:
+ DebugQueue() : i_(array_) { }
+ void Push(const T& t) {
+ i_ = (array_ == i_ ? array_ + size - 1 : i_ - 1);
+ *i_ = t;
+ }
+ protected:
+ T* i_; // Points to the newest element in the queue.
+ T array_[size];
+};
+
+#endif // CHROME_BROWSER_SYNC_UTIL_DBGQ_H_
diff --git a/chrome/browser/sync/util/event_sys-inl.h b/chrome/browser/sync/util/event_sys-inl.h
new file mode 100644
index 0000000..c361528
--- /dev/null
+++ b/chrome/browser/sync/util/event_sys-inl.h
@@ -0,0 +1,340 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_EVENT_SYS_INL_H_
+#define CHROME_BROWSER_SYNC_UTIL_EVENT_SYS_INL_H_
+
+#include <map>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/util/compat-pthread.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+// How to use Channels:
+
+// 0. Assume Bob is the name of the class from which you want to broadcast
+// events.
+// 1. Choose an EventType. This could be an Enum or something more complicated.
+// 2. Create an EventTraits class for your EventType. It must have
+// two members:
+//
+// typedef x EventType;
+// static bool IsChannelShutdownEvent(const EventType& event);
+//
+// 3. Add an EventChannel<MyEventTraits>* instance and event_channel() const;
+// accessor to Bob.
+// Delete the channel ordinarily in Bob's destructor, or whenever you want.
+// 4. To broadcast events, call bob->event_channel()->NotifyListeners(event).
+// 5. Only call NotifyListeners from a single thread at a time.
+
+// How to use Listeners/Hookups:
+
+// 0. Assume you want a class called Lisa to listen to events from Bob.
+// 1. Create an event handler method in Lisa. Its single argument should be of
+// your event type.
+// 2. Add a EventListenerHookup* hookup_ member to Lisa.
+// 3. Initialize the hookup by calling:
+//
+// hookup_ = NewEventListenerHookup(bob->event_channel(),
+// this,
+// &Lisa::HandleEvent);
+//
+// 4. Delete hookup_ in Lisa's destructor, or anytime sooner to stop receiving
+// events.
+
+// An Event Channel is a source, or broadcaster of events. Listeners subscribe
+// by calling the AddListener() method. The owner of the channel calls the
+// NotifyListeners() method.
+//
+// Don't inherit from this class. Just make an event_channel member and an
+// event_channel() accessor.
+
+// No reason why CallbackWaiters has to be templatized.
+class CallbackWaiters {
+ public:
+ CallbackWaiters() : waiter_count_(0), callback_done_(false) {
+ }
+ ~CallbackWaiters() {
+ DCHECK_EQ(0, waiter_count_);
+ }
+ void WaitForCallbackToComplete(PThreadMutex* listeners_mutex) {
+ {
+ PThreadScopedLock<PThreadMutex> lock(&mutex_);
+ waiter_count_ += 1;
+ listeners_mutex->Unlock();
+ while (!callback_done_)
+ pthread_cond_wait(&condvar_.condvar_, &mutex_.mutex_);
+ waiter_count_ -= 1;
+ if (0 != waiter_count_)
+ return;
+ }
+ delete this;
+ }
+
+ void Signal() {
+ PThreadScopedLock<PThreadMutex> lock(&mutex_);
+ callback_done_ = true;
+ pthread_cond_broadcast(&condvar_.condvar_);
+ }
+
+ protected:
+ int waiter_count_;
+ bool callback_done_;
+ PThreadMutex mutex_;
+ PThreadCondVar condvar_;
+};
+
+template <typename EventTraitsType, typename NotifyLock,
+ typename ScopedNotifyLocker>
+class EventChannel {
+ public:
+ typedef EventTraitsType EventTraits;
+ typedef typename EventTraits::EventType EventType;
+ typedef EventListener<EventType> Listener;
+
+ protected:
+ typedef std::map<Listener*, bool> Listeners;
+ typedef PThreadScopedLock<PThreadMutex> ScopedListenersLock;
+
+ public:
+ // The shutdown event gets send in the EventChannel's destructor.
+ explicit EventChannel(const EventType& shutdown_event)
+ : callback_waiters_(NULL), shutdown_event_(shutdown_event),
+ current_listener_callback_(NULL) {
+ }
+
+ ~EventChannel() {
+ // Tell all the listeners that the channel is being deleted.
+ NotifyListeners(shutdown_event_);
+
+ // Make sure all the listeners have been disconnected. Otherwise, they
+ // will try to call RemoveListener() at a later date.
+#ifdef DEBUG
+ ScopedListenersLock lock(&listeners_mutex_);
+ for (typename Listeners::iterator i = listeners_.begin();
+ i != listeners_.end(); ++i) {
+ DCHECK(i->second) << "Listener not disconnected";
+ }
+#endif
+ }
+
+ // Never call this twice for the same listener.
+ //
+ // Thread safe.
+ void AddListener(Listener* listener) {
+ ScopedListenersLock lock(&listeners_mutex_);
+ typename Listeners::iterator found = listeners_.find(listener);
+ if (found == listeners_.end()) {
+ listeners_.insert(std::make_pair(listener,
+ false)); // Not dead yet.
+ } else {
+ DCHECK(found->second) << "Attempted to add the same listener twice.";
+ found->second = false; // Not dead yet.
+ }
+ }
+
+ // If listener's callback is currently executing, this method waits until the
+ // callback completes before returning.
+ //
+ // Thread safe.
+ void RemoveListener(Listener* listener) {
+ bool wait = false;
+ listeners_mutex_.Lock();
+ typename Listeners::iterator found = listeners_.find(listener);
+ if (found != listeners_.end()) {
+ found->second = true; // Mark as dead.
+ wait = (found->first == current_listener_callback_ &&
+ (!pthread_equal(current_listener_callback_thread_id_,
+ pthread_self())));
+ }
+ if (!wait) {
+ listeners_mutex_.Unlock();
+ return;
+ }
+ if (NULL == callback_waiters_)
+ callback_waiters_ = new CallbackWaiters;
+ callback_waiters_->WaitForCallbackToComplete(&listeners_mutex_);
+ }
+
+ // Blocks until all listeners have been notified.
+ //
+ // NOT thread safe. Must only be called by one thread at a time.
+ void NotifyListeners(const EventType& event) {
+ ScopedNotifyLocker lock_notify(&notify_lock_);
+ listeners_mutex_.Lock();
+ DCHECK(NULL == current_listener_callback_);
+ current_listener_callback_thread_id_ = pthread_self();
+ typename Listeners::iterator i = listeners_.begin();
+ while (i != listeners_.end()) {
+ if (i->second) { // Clean out dead listeners
+ listeners_.erase(i++);
+ continue;
+ }
+ current_listener_callback_ = i->first;
+ listeners_mutex_.Unlock();
+
+ i->first->HandleEvent(event);
+
+ listeners_mutex_.Lock();
+ current_listener_callback_ = NULL;
+ if (NULL != callback_waiters_) {
+ callback_waiters_->Signal();
+ callback_waiters_ = NULL;
+ }
+
+ ++i;
+ }
+ listeners_mutex_.Unlock();
+ }
+
+ // A map iterator remains valid until the element it points to gets removed
+ // from the map, so a map is perfect for our needs.
+ //
+ // Map value is a bool, true means the Listener is dead.
+ Listeners listeners_;
+ // NULL means no callback is currently being called.
+ Listener* current_listener_callback_;
+ // Only valid when current_listener is not NULL.
+ // The thread on which the callback is executing.
+ pthread_t current_listener_callback_thread_id_;
+ // Win32 Event that is usually NULL. Only created when another thread calls
+ // Remove while in callback. Owned and closed by the thread calling Remove().
+ CallbackWaiters* callback_waiters_;
+
+ PThreadMutex listeners_mutex_; // Protects all members above.
+ const EventType shutdown_event_;
+ NotifyLock notify_lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(EventChannel);
+};
+
+// An EventListenerHookup hooks up a method in your class to an EventChannel.
+// Deleting the hookup disconnects from the EventChannel.
+//
+// Contains complexity of inheriting from Listener class and managing lifetimes.
+//
+// Create using NewEventListenerHookup() to avoid explicit template arguments.
+class EventListenerHookup {
+ public:
+ virtual ~EventListenerHookup() { }
+};
+
+template <typename EventChannel, typename EventTraits,
+ class Derived>
+class EventListenerHookupImpl : public EventListenerHookup,
+public EventListener<typename EventTraits::EventType> {
+ public:
+ explicit EventListenerHookupImpl(EventChannel* channel)
+ : channel_(channel), deleted_(NULL) {
+ channel->AddListener(this);
+ connected_ = true;
+ }
+
+ ~EventListenerHookupImpl() {
+ if (NULL != deleted_)
+ *deleted_ = true;
+ if (connected_)
+ channel_->RemoveListener(this);
+ }
+
+ typedef typename EventTraits::EventType EventType;
+ virtual void HandleEvent(const EventType& event) {
+ DCHECK(connected_);
+ bool deleted = false;
+ deleted_ = &deleted;
+ static_cast<Derived*>(this)->Callback(event);
+ if (deleted) // The callback (legally) deleted this.
+ return; // The only safe thing to do.
+ deleted_ = NULL;
+ if (EventTraits::IsChannelShutdownEvent(event)) {
+ channel_->RemoveListener(this);
+ connected_ = false;
+ }
+ }
+
+ protected:
+ EventChannel* const channel_;
+ bool connected_;
+ bool* deleted_; // Allows the handler to delete the hookup.
+};
+
+// SimpleHookup just passes the event to the callback message.
+template <typename EventChannel, typename EventTraits,
+ typename CallbackObject, typename CallbackMethod>
+class SimpleHookup
+ : public EventListenerHookupImpl<EventChannel, EventTraits,
+ SimpleHookup<EventChannel,
+ EventTraits,
+ CallbackObject,
+ CallbackMethod> > {
+ public:
+ SimpleHookup(EventChannel* channel, CallbackObject* cbobject,
+ CallbackMethod cbmethod)
+ : EventListenerHookupImpl<EventChannel, EventTraits,
+ SimpleHookup>(channel), cbobject_(cbobject),
+ cbmethod_(cbmethod) { }
+
+ typedef typename EventTraits::EventType EventType;
+ void Callback(const EventType& event) {
+ (cbobject_->*cbmethod_)(event);
+ }
+ CallbackObject* const cbobject_;
+ CallbackMethod const cbmethod_;
+};
+
+// ArgHookup also passes an additional arg to the callback method.
+template <typename EventChannel, typename EventTraits,
+ typename CallbackObject, typename CallbackMethod,
+ typename CallbackArg0>
+class ArgHookup :
+ public EventListenerHookupImpl<EventChannel, EventTraits,
+ ArgHookup<EventChannel, EventTraits,
+ CallbackObject,
+ CallbackMethod,
+ CallbackArg0> > {
+ public:
+ ArgHookup(EventChannel* channel, CallbackObject* cbobject,
+ CallbackMethod cbmethod, CallbackArg0 arg0)
+ : EventListenerHookupImpl<EventChannel, EventTraits,
+ ArgHookup>(channel), cbobject_(cbobject),
+ cbmethod_(cbmethod), arg0_(arg0) { }
+
+ typedef typename EventTraits::EventType EventType;
+ void Callback(const EventType& event) {
+ (cbobject_->*cbmethod_)(arg0_, event);
+ }
+ CallbackObject* const cbobject_;
+ CallbackMethod const cbmethod_;
+ CallbackArg0 const arg0_;
+};
+
+
+template <typename EventChannel, typename CallbackObject,
+ typename CallbackMethod>
+EventListenerHookup* NewEventListenerHookup(EventChannel* channel,
+ CallbackObject* cbobject,
+ CallbackMethod cbmethod) {
+ return new SimpleHookup<EventChannel,
+ typename EventChannel::EventTraits,
+ CallbackObject, CallbackMethod>(channel, cbobject, cbmethod);
+}
+
+template <typename EventChannel, typename CallbackObject,
+ typename CallbackMethod, typename CallbackArg0>
+EventListenerHookup* NewEventListenerHookup(EventChannel* channel,
+ CallbackObject* cbobject,
+ CallbackMethod cbmethod,
+ CallbackArg0 arg0) {
+ return new ArgHookup<EventChannel,
+ typename EventChannel::EventTraits,
+ CallbackObject, CallbackMethod, CallbackArg0>(channel, cbobject,
+ cbmethod, arg0);
+}
+
+#endif // CHROME_BROWSER_SYNC_UTIL_EVENT_SYS_INL_H_
diff --git a/chrome/browser/sync/util/event_sys.h b/chrome/browser/sync/util/event_sys.h
new file mode 100644
index 0000000..5dcf44a
--- /dev/null
+++ b/chrome/browser/sync/util/event_sys.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_EVENT_SYS_H_
+#define CHROME_BROWSER_SYNC_UTIL_EVENT_SYS_H_
+
+#include "chrome/browser/sync/util/pthread_helpers_fwd.h"
+
+// An abstract base class for listening to events.
+//
+// Don't inherit from this class yourself. Using NewEventListenerHookup() is
+// much easier.
+template <typename EventType>
+class EventListener {
+ public:
+ virtual void HandleEvent(const EventType& event) = 0;
+};
+
+// See the -inl.h for details about the following.
+
+template <typename EventTraits, typename NotifyLock = PThreadNoLock,
+ typename ScopedNotifyLocker = PThreadScopedLock<NotifyLock> >
+class EventChannel;
+
+class EventListenerHookup;
+
+template <typename EventChannel, typename CallbackObject,
+ typename CallbackMethod>
+EventListenerHookup* NewEventListenerHookup(EventChannel* channel,
+ CallbackObject* cbobject,
+ CallbackMethod cbmethod);
+
+template <typename EventChannel, typename CallbackObject,
+ typename CallbackMethod, typename CallbackArg0>
+EventListenerHookup* NewEventListenerHookup(EventChannel* channel,
+ CallbackObject* cbobject,
+ CallbackMethod cbmethod,
+ CallbackArg0 arg0);
+
+#endif // CHROME_BROWSER_SYNC_UTIL_EVENT_SYS_H_
diff --git a/chrome/browser/sync/util/event_sys_unittest.cc b/chrome/browser/sync/util/event_sys_unittest.cc
new file mode 100644
index 0000000..5e521b1
--- /dev/null
+++ b/chrome/browser/sync/util/event_sys_unittest.cc
@@ -0,0 +1,271 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iosfwd>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::endl;
+using std::ostream;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+namespace {
+
+class Pair;
+
+struct TestEvent {
+ Pair* source;
+ enum {
+ A_CHANGED, B_CHANGED, PAIR_BEING_DELETED,
+ } what_happened;
+ int old_value;
+};
+
+struct TestEventTraits {
+ typedef TestEvent EventType;
+ static bool IsChannelShutdownEvent(const TestEvent& event) {
+ return TestEvent::PAIR_BEING_DELETED == event.what_happened;
+ }
+};
+
+class Pair {
+ public:
+ typedef EventChannel<TestEventTraits> Channel;
+ explicit Pair(const string& name) : name_(name), a_(0), b_(0) {
+ TestEvent shutdown = { this, TestEvent::PAIR_BEING_DELETED, 0 };
+ event_channel_ = new Channel(shutdown);
+ }
+ ~Pair() {
+ delete event_channel_;
+ }
+ void set_a(int n) {
+ TestEvent event = { this, TestEvent::A_CHANGED, a_ };
+ a_ = n;
+ event_channel_->NotifyListeners(event);
+ }
+ void set_b(int n) {
+ TestEvent event = { this, TestEvent::B_CHANGED, b_ };
+ b_ = n;
+ event_channel_->NotifyListeners(event);
+ }
+ int a() const { return a_; }
+ int b() const { return b_; }
+ const string& name() { return name_; }
+ Channel* event_channel() const { return event_channel_; }
+
+ protected:
+ const string name_;
+ int a_;
+ int b_;
+ Channel* event_channel_;
+};
+
+class EventLogger {
+ public:
+ explicit EventLogger(ostream& out) : out_(out) { }
+ ~EventLogger() {
+ for (Hookups::iterator i = hookups_.begin(); i != hookups_.end(); ++i)
+ delete *i;
+ }
+
+ void Hookup(const string name, Pair::Channel* channel) {
+ hookups_.push_back(NewEventListenerHookup(channel, this,
+ &EventLogger::HandlePairEvent,
+ name));
+ }
+
+ void HandlePairEvent(const string& name, const TestEvent& event) {
+ const char* what_changed;
+ int new_value;
+ Hookups::iterator dead;
+ switch (event.what_happened) {
+ case TestEvent::A_CHANGED:
+ what_changed = "A";
+ new_value = event.source->a();
+ break;
+ case TestEvent::B_CHANGED:
+ what_changed = "B";
+ new_value = event.source->b();
+ break;
+ case TestEvent::PAIR_BEING_DELETED:
+ out_ << name << " heard " << event.source->name() << " being deleted."
+ << endl;
+ return;
+ default:
+ LOG(FATAL) << "Bad event.what_happened: " << event.what_happened;
+ break;
+ }
+ out_ << name << " heard " << event.source->name() << "'s " << what_changed
+ << " change from "
+ << event.old_value << " to " << new_value << endl;
+ }
+
+ typedef vector<EventListenerHookup*> Hookups;
+ Hookups hookups_;
+ ostream& out_;
+};
+
+const char golden_result[] = "Larry heard Sally's B change from 0 to 2\n"
+"Larry heard Sally's A change from 1 to 3\n"
+"Lewis heard Sam's B change from 0 to 5\n"
+"Larry heard Sally's A change from 3 to 6\n"
+"Larry heard Sally being deleted.\n";
+
+TEST(EventSys, Basic) {
+ Pair sally("Sally"), sam("Sam");
+ sally.set_a(1);
+ stringstream log;
+ EventLogger logger(log);
+ logger.Hookup("Larry", sally.event_channel());
+ sally.set_b(2);
+ sally.set_a(3);
+ sam.set_a(4);
+ logger.Hookup("Lewis", sam.event_channel());
+ sam.set_b(5);
+ sally.set_a(6);
+ // Test that disconnect within callback doesn't deadlock.
+ TestEvent event = {&sally, TestEvent::PAIR_BEING_DELETED, 0 };
+ sally.event_channel()->NotifyListeners(event);
+ sally.set_a(7);
+ ASSERT_EQ(log.str(), golden_result);
+}
+
+
+// This goes pretty far beyond the normal use pattern, so don't use
+// ThreadTester as an example of what to do.
+class ThreadTester : public EventListener<TestEvent> {
+ public:
+ explicit ThreadTester(Pair* pair)
+ : pair_(pair), remove_event_bool_(false) {
+ pair_->event_channel()->AddListener(this);
+ }
+ ~ThreadTester() {
+ pair_->event_channel()->RemoveListener(this);
+ for (int i = 0; i < threads_.size(); i++) {
+ CHECK(pthread_join(threads_[i].thread, NULL) == 0);
+ delete threads_[i].completed;
+ }
+ }
+
+ struct ThreadInfo {
+ pthread_t thread;
+ bool *completed;
+ };
+
+ struct ThreadArgs {
+ ThreadTester* self;
+ pthread_cond_t *thread_running_cond;
+ pthread_mutex_t *thread_running_mutex;
+ bool *thread_running;
+ bool *completed;
+ };
+
+ pthread_t Go() {
+ PThreadCondVar thread_running_cond;
+ PThreadMutex thread_running_mutex;
+ ThreadArgs args;
+ ThreadInfo info;
+ info.completed = new bool(false);
+ args.self = this;
+ args.completed = info.completed;
+ args.thread_running_cond = &(thread_running_cond.condvar_);
+ args.thread_running_mutex = &(thread_running_mutex.mutex_);
+ args.thread_running = new bool(false);
+ CHECK(0 ==
+ pthread_create(&info.thread, NULL, ThreadTester::ThreadMain, &args));
+ thread_running_mutex.Lock();
+ while ((*args.thread_running) == false) {
+ pthread_cond_wait(&(thread_running_cond.condvar_),
+ &(thread_running_mutex.mutex_));
+ }
+ thread_running_mutex.Unlock();
+ delete args.thread_running;
+ threads_.push_back(info);
+ return info.thread;
+ }
+
+ static void* ThreadMain(void* arg) {
+ ThreadArgs args = *reinterpret_cast<ThreadArgs*>(arg);
+ pthread_mutex_lock(args.thread_running_mutex);
+ *args.thread_running = true;
+ pthread_cond_signal(args.thread_running_cond);
+ pthread_mutex_unlock(args.thread_running_mutex);
+
+ args.self->remove_event_mutex_.Lock();
+ while (args.self->remove_event_bool_ == false) {
+ pthread_cond_wait(&args.self->remove_event_.condvar_,
+ &args.self->remove_event_mutex_.mutex_);
+ }
+ args.self->remove_event_mutex_.Unlock();
+
+ // Normally, you'd just delete the hookup. This is very bad style, but
+ // necessary for the test.
+ args.self->pair_->event_channel()->RemoveListener(args.self);
+ *args.completed = true;
+ return 0;
+ }
+
+ void HandleEvent(const TestEvent& event) {
+ remove_event_mutex_.Lock();
+ remove_event_bool_ = true;
+ pthread_cond_broadcast(&remove_event_.condvar_);
+ remove_event_mutex_.Unlock();
+
+ // Windows and posix use different functions to sleep.
+#ifdef OS_WINDOWS
+ Sleep(1);
+#else
+ sleep(1);
+#endif
+
+ for (int i = 0; i < threads_.size(); i++) {
+ if (*(threads_[i].completed))
+ LOG(FATAL) << "A test thread exited too early.";
+ }
+ }
+
+ Pair* pair_;
+ PThreadCondVar remove_event_;
+ PThreadMutex remove_event_mutex_;
+ bool remove_event_bool_;
+ vector<ThreadInfo> threads_;
+};
+
+TEST(EventSys, Multithreaded) {
+ Pair sally("Sally");
+ ThreadTester a(&sally);
+ for (int i = 0; i < 3; ++i)
+ a.Go();
+ sally.set_b(99);
+}
+
+class HookupDeleter {
+ public:
+ void HandleEvent(const TestEvent& event) {
+ delete hookup_;
+ hookup_ = NULL;
+ }
+ EventListenerHookup* hookup_;
+};
+
+TEST(EventSys, InHandlerDeletion) {
+ Pair sally("Sally");
+ HookupDeleter deleter;
+ deleter.hookup_ = NewEventListenerHookup(sally.event_channel(),
+ &deleter,
+ &HookupDeleter::HandleEvent);
+ sally.set_a(1);
+ ASSERT_TRUE(NULL == deleter.hookup_);
+}
+
+} // namespace
diff --git a/chrome/browser/sync/util/fast_dump.h b/chrome/browser/sync/util/fast_dump.h
new file mode 100644
index 0000000..9266f0e
--- /dev/null
+++ b/chrome/browser/sync/util/fast_dump.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_FAST_DUMP_H_
+#define CHROME_BROWSER_SYNC_UTIL_FAST_DUMP_H_
+
+#include <ostream>
+#include <streambuf>
+#include <string>
+
+#include "base/string_util.h"
+
+using std::ostream;
+using std::streambuf;
+using std::string;
+
+// This seems totally gratuitous, and it would be if std::ostream was fast, but
+// std::ostream is slow. It's slow because it creates a ostream::sentry (mutex)
+// for every little << operator. When you want to dump a whole lot of stuff
+// under a single mutex lock, use this FastDump class.
+namespace browser_sync {
+class FastDump {
+ public:
+ explicit FastDump(ostream* outs) : sentry_(*outs), out_(outs->rdbuf()) {
+ }
+ ostream::sentry sentry_;
+ streambuf* const out_;
+};
+} // namespace browser_sync
+
+inline browser_sync::FastDump& operator <<
+ (browser_sync::FastDump& dump, int64 n) {
+ string numbuf(Int64ToString(n));
+ const char* number = numbuf.c_str();
+ dump.out_->sputn(number, numbuf.length());
+ return dump;
+}
+
+inline browser_sync::FastDump& operator <<
+ (browser_sync::FastDump& dump, int32 n) {
+ string numbuf(IntToString(n));
+ const char* number = numbuf.c_str();
+ dump.out_->sputn(number, numbuf.length());
+ return dump;
+}
+
+inline browser_sync::FastDump& operator <<
+ (browser_sync::FastDump& dump, const char* s) {
+ dump.out_->sputn(s, strlen(s));
+ return dump;
+}
+
+inline browser_sync::FastDump& operator <<
+ (browser_sync::FastDump& dump, const string& s) {
+ dump.out_->sputn(s.data(), s.size());
+ return dump;
+}
+
+#endif // CHROME_BROWSER_SYNC_UTIL_FAST_DUMP_H_
diff --git a/chrome/browser/sync/util/highres_timer-linux.cc b/chrome/browser/sync/util/highres_timer-linux.cc
new file mode 100644
index 0000000..8fed343
--- /dev/null
+++ b/chrome/browser/sync/util/highres_timer-linux.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// High resolution timer functions for use in Linux.
+
+#include "chrome/browser/sync/util/highres_timer.h"
+
+const uint64 MICROS_IN_MILLI = 1000L;
+const uint64 MICROS_IN_HALF_MILLI = 500L;
+const uint64 MICROS_IN_HALF_SECOND = 500000L;
+
+uint64 HighresTimer::GetElapsedMs() const {
+ uint64 end_time = GetCurrentTicks();
+
+ // Scale to ms and round to nearest ms - rounding is important because
+ // otherwise the truncation error may accumulate e.g. in sums.
+ return (uint64(end_time - start_ticks_) + MICROS_IN_HALF_MILLI) /
+ MICROS_IN_MILLI;
+}
+
+uint64 HighresTimer::GetElapsedSec() const {
+ uint64 end_time = GetCurrentTicks();
+
+ // Scale to ms and round to nearest ms - rounding is important because
+ // otherwise the truncation error may accumulate e.g. in sums.
+ return (uint64(end_time - start_ticks_) + MICROS_IN_HALF_SECOND) /
+ MICROS_IN_SECOND;
+}
diff --git a/chrome/browser/sync/util/highres_timer-linux.h b/chrome/browser/sync/util/highres_timer-linux.h
new file mode 100644
index 0000000..01a022d
--- /dev/null
+++ b/chrome/browser/sync/util/highres_timer-linux.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// High resolution timer functions for use in Linux.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_HIGHRES_TIMER_LINUX_H_
+#define CHROME_BROWSER_SYNC_UTIL_HIGHRES_TIMER_LINUX_H_
+
+#include "base/basictypes.h"
+
+#include <sys/time.h>
+
+const uint64 MICROS_IN_SECOND = 1000000L;
+
+// A handy class for reliably measuring wall-clock time with decent resolution.
+//
+// We want to measure time with high resolution on Linux. What to do?
+//
+// RDTSC? Sure, but how do you convert it to wall clock time?
+// clock_gettime? It's not in all Linuxes.
+//
+// Let's just use gettimeofday; it's good to the microsecond.
+class HighresTimer {
+ public:
+ // Captures the current start time.
+ HighresTimer();
+
+ // Captures the current tick, can be used to reset a timer for reuse.
+ void Start();
+
+ // Returns the elapsed ticks with full resolution.
+ uint64 GetElapsedTicks() const;
+
+ // Returns the elapsed time in milliseconds, rounded to the nearest
+ // millisecond.
+ uint64 GetElapsedMs() const;
+
+ // Returns the elapsed time in seconds, rounded to the nearest second.
+ uint64 GetElapsedSec() const;
+
+ uint64 start_ticks() const { return start_ticks_; }
+
+ // Returns timer frequency from cache, should be less overhead than
+ // ::QueryPerformanceFrequency.
+ static uint64 GetTimerFrequency();
+ // Returns current ticks.
+ static uint64 GetCurrentTicks();
+
+ private:
+ // Captured start time.
+ uint64 start_ticks_;
+};
+
+inline HighresTimer::HighresTimer() {
+ Start();
+}
+
+inline void HighresTimer::Start() {
+ start_ticks_ = GetCurrentTicks();
+}
+
+inline uint64 HighresTimer::GetTimerFrequency() {
+ // Fixed; one "tick" is one microsecond.
+ return MICROS_IN_SECOND;
+}
+
+inline uint64 HighresTimer::GetCurrentTicks() {
+ timeval tv;
+ gettimeofday(&tv, 0);
+
+ return tv.tv_sec * MICROS_IN_SECOND + tv.tv_usec;
+}
+
+inline uint64 HighresTimer::GetElapsedTicks() const {
+ return start_ticks_ - GetCurrentTicks();
+}
+
+#endif // CHROME_BROWSER_SYNC_UTIL_HIGHRES_TIMER_LINUX_H_
diff --git a/chrome/browser/sync/util/highres_timer-win32.cc b/chrome/browser/sync/util/highres_timer-win32.cc
new file mode 100644
index 0000000..0d7323a
--- /dev/null
+++ b/chrome/browser/sync/util/highres_timer-win32.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// High resolution timer functions for use in Windows.
+
+#include "chrome/browser/sync/util/highres_timer.h"
+
+bool HighresTimer::perf_freq_collected_ = false;
+ULONGLONG HighresTimer::perf_freq_ = 0;
+
+ULONGLONG HighresTimer::GetElapsedMs() const {
+ ULONGLONG end_time = GetCurrentTicks();
+
+ // Scale to ms and round to nearest ms - rounding is important because
+ // otherwise the truncation error may accumulate e.g. in sums.
+ //
+ // Given infinite resolution, this expression could be written as:
+ // trunc((end - start (units:freq*sec))/freq (units:sec) *
+ // 1000 (unit:ms) + 1/2 (unit:ms))
+ ULONGLONG freq = GetTimerFrequency();
+ return ((end_time - start_ticks_) * 1000L + freq / 2) / freq;
+}
+
+ULONGLONG HighresTimer::GetElapsedSec() const {
+ ULONGLONG end_time = GetCurrentTicks();
+
+ // Round to nearest ms - rounding is important because otherwise the
+ // truncation error may accumulate e.g. in sums.
+ //
+ // Given infinite resolution, this expression could be written as:
+ // trunc((end - start (units:freq*sec))/freq (unit:sec) + 1/2 (unit:sec))
+ ULONGLONG freq = GetTimerFrequency();
+ return ((end_time - start_ticks_) + freq / 2) / freq;
+}
+
+void HighresTimer::CollectPerfFreq() {
+ LARGE_INTEGER freq;
+
+ // Note that this is racy. It's OK, however, because even concurrent
+ // executions of this are idempotent.
+ if (::QueryPerformanceFrequency(&freq)) {
+ perf_freq_ = freq.QuadPart;
+ perf_freq_collected_ = true;
+ }
+}
diff --git a/chrome/browser/sync/util/highres_timer-win32.h b/chrome/browser/sync/util/highres_timer-win32.h
new file mode 100644
index 0000000..6e87ce9
--- /dev/null
+++ b/chrome/browser/sync/util/highres_timer-win32.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// High resolution timer functions for use in Windows.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_HIGHRES_TIMER_WIN32_H_
+#define CHROME_BROWSER_SYNC_UTIL_HIGHRES_TIMER_WIN32_H_
+
+#include <windows.h>
+
+// A handy class for reliably measuring wall-clock time with decent resolution,
+// even on multi-processor machines and on laptops (where RDTSC potentially
+// returns different results on different processors and/or the RDTSC timer
+// clocks at different rates depending on the power state of the CPU,
+// respectively).
+class HighresTimer {
+ public:
+ // Captures the current start time.
+ HighresTimer();
+
+ // Captures the current tick, can be used to reset a timer for reuse.
+ void Start();
+
+ // Returns the elapsed ticks with full resolution.
+ ULONGLONG GetElapsedTicks() const;
+
+ // Returns the elapsed time in milliseconds, rounded to the nearest
+ // millisecond.
+ ULONGLONG GetElapsedMs() const;
+
+ // Returns the elapsed time in seconds, rounded to the nearest second.
+ ULONGLONG GetElapsedSec() const;
+
+ ULONGLONG start_ticks() const { return start_ticks_; }
+
+ // Returns timer frequency from cache, should be less
+ // overhead than ::QueryPerformanceFrequency.
+ static ULONGLONG GetTimerFrequency();
+ // Returns current ticks.
+ static ULONGLONG GetCurrentTicks();
+
+ private:
+ static void CollectPerfFreq();
+
+ // Captured start time.
+ ULONGLONG start_ticks_;
+
+ // Captured performance counter frequency.
+ static bool perf_freq_collected_;
+ static ULONGLONG perf_freq_;
+};
+
+inline HighresTimer::HighresTimer() {
+ Start();
+}
+
+inline void HighresTimer::Start() {
+ start_ticks_ = GetCurrentTicks();
+}
+
+inline ULONGLONG HighresTimer::GetTimerFrequency() {
+ if (!perf_freq_collected_)
+ CollectPerfFreq();
+ return perf_freq_;
+}
+
+inline ULONGLONG HighresTimer::GetCurrentTicks() {
+ LARGE_INTEGER ticks;
+ ::QueryPerformanceCounter(&ticks);
+ return ticks.QuadPart;
+}
+
+inline ULONGLONG HighresTimer::GetElapsedTicks() const {
+ return start_ticks_ - GetCurrentTicks();
+}
+
+#endif // CHROME_BROWSER_SYNC_UTIL_HIGHRES_TIMER_WIN32_H_
diff --git a/chrome/browser/sync/util/highres_timer.h b/chrome/browser/sync/util/highres_timer.h
new file mode 100644
index 0000000..e2bde4e
--- /dev/null
+++ b/chrome/browser/sync/util/highres_timer.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// High resolution timer functions defined for each OS.
+
+#if defined(OS_WINDOWS)
+#include "chrome/browser/sync/util/highres_timer-win32.h"
+#elif defined(OS_MACOSX)
+#error "Mac timer functions are missing."
+#else
+#include "chrome/browser/sync/util/highres_timer-linux.h"
+#endif
diff --git a/chrome/browser/sync/util/highres_timer_unittest.cc b/chrome/browser/sync/util/highres_timer_unittest.cc
new file mode 100644
index 0000000..5723e7f8
--- /dev/null
+++ b/chrome/browser/sync/util/highres_timer_unittest.cc
@@ -0,0 +1,49 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// High resolution timer unit tests.
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/util/highres_timer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// These unittests have proven to be flaky on buildbot. While we don't want
+// them breaking the build, we still build them to guard against bitrot.
+// On dev machines during local builds we can enable them.
+TEST(HighresTimer, DISABLED_MillisecondClock) {
+ HighresTimer timer;
+
+ // note: this could fail if we context switch between initializing the timer
+ // and here. Very unlikely however.
+ EXPECT_EQ(0, timer.GetElapsedMs());
+ timer.Start();
+ uint64 half_ms = HighresTimer::GetTimerFrequency() / 2000;
+ // busy wait for half a millisecond
+ while (timer.start_ticks() + half_ms > HighresTimer::GetCurrentTicks()) {
+ // Nothing
+ }
+ EXPECT_EQ(1, timer.GetElapsedMs());
+}
+
+TEST(HighresTimer, DISABLED_SecondClock) {
+ HighresTimer timer;
+
+ EXPECT_EQ(0, timer.GetElapsedSec());
+#ifdef OS_WINDOWS
+ ::Sleep(250);
+#else
+ struct timespec ts1 = {0, 250000000};
+ nanosleep(&ts1, 0);
+#endif
+ EXPECT_EQ(0, timer.GetElapsedSec());
+ EXPECT_LE(230, timer.GetElapsedMs());
+ EXPECT_GE(270, timer.GetElapsedMs());
+#ifdef OS_WINDOWS
+ ::Sleep(251);
+#else
+ struct timespec ts2 = {0, 251000000};
+ nanosleep(&ts2, 0);
+#endif
+ EXPECT_EQ(1, timer.GetElapsedSec());
+}
diff --git a/chrome/browser/sync/util/path_helpers-linux.cc b/chrome/browser/sync/util/path_helpers-linux.cc
new file mode 100644
index 0000000..4f4543d
--- /dev/null
+++ b/chrome/browser/sync/util/path_helpers-linux.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sys/types.h>
+
+#include <glib.h>
+#include <string.h>
+
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+
+#ifndef OS_LINUX
+#error Compile this file on Linux only.
+#endif
+
+string LastPathSegment(const string& path) {
+ string str(path);
+ string::size_type final_slash = str.find_last_of('/');
+ if (string::npos != final_slash && final_slash == str.length() - 1
+ && str.length() > 1) {
+ str.erase(final_slash);
+ final_slash = str.find_last_of('/');
+ }
+ if (string::npos == final_slash)
+ return str;
+ str.erase(0, final_slash+1);
+ return str;
+}
+
+PathString GetFullPath(const PathString& path) {
+ // TODO(sync): Not sure what the base of the relative path should be on
+ // linux.
+ return path;
+}
+
+PathString AppendSlash(const PathString& path) {
+ if ((!path.empty()) && (*path.rbegin() != '/')) {
+ return path + '/';
+ }
+ return path;
+}
+
+PathString LowercasePath(const PathString& path) {
+ gchar *ret = g_utf8_strdown(path.c_str(), -1);
+ PathString retstr(ret);
+ g_free(ret);
+ return retstr;
+}
diff --git a/chrome/browser/sync/util/path_helpers-posix.cc b/chrome/browser/sync/util/path_helpers-posix.cc
new file mode 100644
index 0000000..02726db
--- /dev/null
+++ b/chrome/browser/sync/util/path_helpers-posix.cc
@@ -0,0 +1,99 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <pwd.h>
+
+#include <string.h>
+
+#include "base/port.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "strings/strutil.h"
+
+#if ((!defined(OS_LINUX)) && (!defined(OS_MACOSX)))
+#error Compile this file on Mac OS X or Linux only.
+#endif
+
+PathString ExpandTilde(const PathString& path) {
+ if (path.empty())
+ return path;
+ if ('~' != path[0])
+ return path;
+ PathString ret;
+ // TODO(sync): Consider using getpwuid_r.
+ ret.insert(0, getpwuid(getuid())->pw_dir);
+ ret.append(++path.begin(), path.end());
+ return ret;
+}
+
+namespace {
+// TODO(sync): We really should use char[].
+string cache_dir_;
+}
+
+void set_cache_dir(string cache_dir) {
+ CHECK(cache_dir_.empty());
+ cache_dir_ = cache_dir;
+}
+
+string get_cache_dir() {
+ CHECK(!cache_dir_.empty());
+ return cache_dir_;
+}
+
+// On Posix, PathStrings are UTF-8, not UTF-16 as they are on Windows.
+// Thus, this function is different from the Windows version.
+PathString TruncatePathString(const PathString& original, int length) {
+ if (original.size() <= length)
+ return original;
+ if (length <= 0)
+ return original;
+ PathString ret(original.begin(), original.begin() + length);
+ COMPILE_ASSERT(sizeof(PathChar) == sizeof(uint8), PathStringNotUTF8);
+ PathString::reverse_iterator last_char = ret.rbegin();
+
+ // Values taken from
+ // http://en.wikipedia.org/w/index.php?title=UTF-8&oldid=252875566
+ if (0 == (*last_char & 0x80))
+ return ret;
+
+ for (; last_char != ret.rend(); ++last_char) {
+ if (0 == (*last_char & 0x80)) {
+ // got malformed UTF-8; bail
+ return ret;
+ }
+ if (0 == (*last_char & 0x40)) {
+ // got another trailing byte
+ continue;
+ }
+ break;
+ }
+
+ if (ret.rend() == last_char) {
+ // We hit the beginning of the string. bail.
+ return ret;
+ }
+
+ int last_codepoint_len = last_char - ret.rbegin() + 1;
+
+ if (((0xC0 == (*last_char & 0xE0)) && (2 == last_codepoint_len)) ||
+ ((0xE0 == (*last_char & 0xF0)) && (3 == last_codepoint_len)) ||
+ ((0xF0 == (*last_char & 0xF8)) && (4 == last_codepoint_len))) {
+ // Valid utf-8.
+ return ret;
+ }
+
+ // Invalid utf-8. chop off last "codepoint" and return.
+ ret.resize(ret.size() - last_codepoint_len);
+ return ret;
+}
+
+// Convert /s to :s .
+PathString MakePathComponentOSLegal(const PathString& component) {
+ if (PathString::npos == component.find("/"))
+ return PSTR("");
+ PathString new_name;
+ new_name.reserve(component.size());
+ StringReplace(component, "/", ":", true, &new_name);
+ return new_name;
+}
diff --git a/chrome/browser/sync/util/path_helpers.cc b/chrome/browser/sync/util/path_helpers.cc
new file mode 100644
index 0000000..1cf8d4e
--- /dev/null
+++ b/chrome/browser/sync/util/path_helpers.cc
@@ -0,0 +1,153 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/path_helpers.h"
+
+#include <Shlwapi.h>
+#include <stdlib.h>
+
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+
+#ifndef OS_WINDOWS
+#error Compile this file on Windows only.
+#endif
+
+using std::string;
+
+#if OS_WINDOWS
+const char PATH_SEPARATOR = '\\';
+#else
+const char PATH_SEPARATOR = '/';
+#endif // OS_WINDOWS
+
+
+static PathString RemoveTrailingSlashes16(PathString str) {
+ while ((str.length() > 0) && (str.at(str.length() - 1) == kPathSeparator[0]))
+ str.resize(str.length() - 1);
+ return str;
+}
+
+static string RemoveTrailingSlashes(string str) {
+ while ((str.length() > 0) && (str.at(str.length() - 1) == PATH_SEPARATOR))
+ str.resize(str.length() - 1);
+ return str;
+}
+
+PathString LastPathSegment(const PathString& path) {
+ return RemoveTrailingSlashes16(PathFindFileNameW(path.c_str()));
+}
+
+string LastPathSegment(const string& path) {
+ return RemoveTrailingSlashes(PathFindFileNameA(path.c_str()));
+}
+
+PathString GetFullPath(const PathString& path) {
+ PathChar buffer[MAX_PATH];
+ PathChar* file_part;
+ DWORD const size = GetFullPathName(path.c_str(), ARRAYSIZE(buffer), buffer,
+ &file_part);
+ return PathString(buffer, size);
+}
+
+PathString AppendSlash(const PathString& path) {
+ PathString result(path);
+ if (!result.empty()) {
+ if (*result.rbegin() == '/')
+ *result.rbegin() = '\\';
+ else if (*result.rbegin() != '\\')
+ result.push_back('\\');
+ }
+ return result;
+}
+
+PathString ExpandTilde(const PathString& path) {
+ // Do nothing on windows.
+ return path;
+}
+
+// Returns a string with length or fewer elements, careful to
+// not truncate a string mid-surrogate pair.
+PathString TruncatePathString(const PathString& original, int length) {
+ if (original.size() <= length)
+ return original;
+ if (length <= 0)
+ return original;
+ PathString ret(original.begin(), original.begin() + length);
+ COMPILE_ASSERT(sizeof(PathChar) == sizeof(uint16), PathStringNotUTF16);
+ PathChar last_char = *ret.rbegin();
+
+ // Values taken from
+ // http://en.wikipedia.org/w/index.php?title=UTF-16/UCS-2&oldid=248072840
+ if (last_char >= 0xD800 && last_char <= 0xDBFF)
+ ret.resize(ret.size() - 1);
+ return ret;
+}
+
+namespace {
+const PathString kWindowsIllegalBaseFilenames[] = {
+ L"CON", L"PRN", L"AUX", L"NUL", L"COM1", L"COM2",
+ L"COM3", L"COM4", L"COM5", L"COM6", L"COM7",
+ L"COM8", L"COM9", L"LPT1", L"LPT2", L"LPT3",
+ L"LPT4", L"LPT5", L"LPT6", L"LPT7", L"LPT8",
+ L"LPT9" };
+}
+
+// See: http://msdn.microsoft.com/library/default.asp?url=/library/
+// en-us/fileio/fs/naming_a_file.asp
+// note that * and ? are not listed on the page as illegal characters,
+// but they are.
+PathString MakePathComponentOSLegal(const PathString& component) {
+ CHECK(!component.empty());
+ PathString mutable_component = component;
+
+ // Remove illegal characters.
+ for (PathString::iterator i = mutable_component.begin();
+ i != mutable_component.end();) {
+ if ((PathString::npos != PathString(L"<>:\"/\\|*?").find(*i)) ||
+ ((static_cast<unsigned short>(*i) >= 0) &&
+ (static_cast<unsigned short>(*i) <= 31))) {
+ mutable_component.erase(i);
+ } else {
+ ++i;
+ }
+ }
+
+ // Remove trailing spaces or periods.
+ while (mutable_component.size() &&
+ ((mutable_component.at(mutable_component.size() - 1) == L' ') ||
+ (mutable_component.at(mutable_component.size() - 1) == L'.')))
+ mutable_component.resize(mutable_component.size() - 1, L' ');
+
+ // Remove a bunch of forbidden names. windows only seems to mind if
+ // a forbidden name matches our name exactly (e.g. "prn") or if the
+ // name is the forbidden name, followed by a dot, followed by anything
+ // (e.g., "prn.anything.foo.bar")
+
+ // From this point out, we break mutable_component into two strings, and
+ // use them this way: we save anything after and including the first dot
+ // (usually the extension) and only mess with stuff before the first dot.
+ PathString::size_type first_dot = mutable_component.find_first_of(L'.');
+ if (PathString::npos == first_dot)
+ first_dot = mutable_component.size();
+ PathString sub = mutable_component.substr(0, first_dot);
+ PathString postsub = mutable_component.substr(first_dot);
+ CHECK(sub + postsub == mutable_component);
+ for (int i = 0; i < ARRAYSIZE(kWindowsIllegalBaseFilenames); i++) {
+ // ComparePathNames(a, b) == 0 -> same
+ if (syncable::ComparePathNames(kWindowsIllegalBaseFilenames[i], sub) == 0) {
+ sub.append(L"~1");
+ break;
+ }
+ }
+ if ((L"" == sub) && (L"" == postsub)) {
+ sub = L"~1";
+ }
+
+ // Return the new name, only if it differs from the original.
+ if ((sub + postsub) == component)
+ return L"";
+ return (sub + postsub);
+}
diff --git a/chrome/browser/sync/util/path_helpers.h b/chrome/browser/sync/util/path_helpers.h
new file mode 100644
index 0000000..d8b4663
--- /dev/null
+++ b/chrome/browser/sync/util/path_helpers.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_PATH_HELPERS_H_
+#define CHROME_BROWSER_SYNC_UTIL_PATH_HELPERS_H_
+
+#include <algorithm>
+#include <iterator>
+#include <string>
+
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+template <typename StringType>
+class PathSegmentIterator : public std::iterator<std::forward_iterator_tag,
+ StringType> {
+ public:
+ explicit PathSegmentIterator(const StringType& path) :
+ path_(path), segment_begin_(0), segment_end_(0) {
+ ++(*this);
+ }
+
+ PathSegmentIterator() : segment_begin_(0), segment_end_(0) { }
+
+ // Default copy constructors, constructors, etc. will all do the right thing.
+ PathSegmentIterator& operator ++() {
+ segment_begin_ =
+ std::min(path_.size(),
+ path_.find_first_not_of(kPathSeparator, segment_end_));
+ segment_end_ =
+ std::min(path_.size(),
+ path_.find_first_of(kPathSeparator, segment_begin_));
+ value_.assign(path_, segment_begin_, segment_end_ - segment_begin_);
+ return *this;
+ }
+
+ PathSegmentIterator operator ++(int) {
+ PathSegmentIterator i(*this);
+ return ++i;
+ }
+
+ const StringType& operator * () const {
+ return value_;
+ }
+ const StringType* operator -> () const {
+ return &value_;
+ }
+
+ // If the current value and remaining path are equal, then we
+ // call the iterators equal.
+ bool operator == (const PathSegmentIterator& i) const {
+ return 0 == path_.compare(segment_begin_,
+ path_.size() - segment_begin_,
+ i.path_, i.segment_begin_, i.path_.size() - i.segment_begin_);
+ }
+
+ bool operator != (const PathSegmentIterator& i) const {
+ return !(*this == i);
+ }
+
+ protected:
+ StringType path_;
+ typename StringType::size_type segment_begin_;
+ typename StringType::size_type segment_end_;
+ StringType value_;
+};
+
+// NOTE: The functions (Strip)LastPathSegment always return values without a
+// trailing slash.
+PathString LastPathSegment(const PathString& path);
+std::string LastPathSegment(const std::string& path);
+PathString AppendSlash(const PathString& path);
+PathString GetFullPath(const PathString& path);
+PathString LowercasePath(const PathString& path);
+PathString ExpandTilde(const PathString& path);
+
+inline bool HasSuffixPathString(const PathString& str,
+ const PathString& suffix) {
+ return str.find(suffix, str.size() - suffix.size()) != PathString::npos;
+}
+
+inline PathString StripSuffixPathString(const PathString& str,
+ const PathString& suffix) {
+ PathString ret(str);
+ if (HasSuffixPathString(str, suffix)) {
+ ret.resize(str.size() - suffix.size());
+ }
+ return ret;
+}
+
+// Returns a string with length or fewer elements, careful to
+// not truncate a string mid-surrogate pair.
+PathString TruncatePathString(const PathString& original, int length);
+
+// Makes a path component legal for your OS, but doesn't handle collisions
+// with other files in the same directory. it can do this by removing
+// illegal characters and adding ~1 before the first '.' in the filename.
+// returns PSTR("") if the name is fine as-is
+// on mac/linux we let names stay unicode normalization form C in the system
+// and convert to another normal form in fuse handlers. but, if a '/' is in
+// a filename, we handle it here.
+PathString MakePathComponentOSLegal(const PathString& component);
+
+#endif // CHROME_BROWSER_SYNC_UTIL_PATH_HELPERS_H_
diff --git a/chrome/browser/sync/util/path_helpers_unittest.cc b/chrome/browser/sync/util/path_helpers_unittest.cc
new file mode 100644
index 0000000..75a81a2
--- /dev/null
+++ b/chrome/browser/sync/util/path_helpers_unittest.cc
@@ -0,0 +1,131 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/syncable/path_name_cmp.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncable {
+
+class PathHelpersTest : public testing::Test {
+};
+
+TEST(PathHelpersTest, TruncatePathStringTest) {
+ // Simple case.
+ PathString str = PSTR("12345");
+ EXPECT_EQ(PSTR("123"), TruncatePathString(str, 3));
+ EXPECT_EQ(str, TruncatePathString(str, str.size()));
+
+ // abcg is "abc" + musical g clef U+1D11E
+#if PATHSTRING_IS_STD_STRING
+ // UTF-8
+ PathChar abcg[] = {'a', 'b', 'c', 0xF0, 0x9D, 0x84, 0x9E, '\0'};
+#else // PATHSTRING_IS_STD_STRING
+ // UTF-16
+ PathChar abcg[] = {'a', 'b', 'c', 0xD834, 0xDD1E, '\0'};
+#endif // PATHSTRING_IS_STD_STRING
+
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abcg, 4));
+
+ // Further utf-8 tests.
+#if PATHSTRING_IS_STD_STRING
+ // UTF-8
+
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abcg, 4));
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abcg, 5));
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abcg, 6));
+ EXPECT_EQ(PathString(abcg), TruncatePathString(abcg, 7));
+
+ PathChar abc2[] = {'a', 'b', 'c', 0xC3, 0xB1, '\0'}; // abc(n w/ tilde)
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abc2, 3));
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abc2, 4));
+ EXPECT_EQ(PathString(abc2), TruncatePathString(abc2, 5));
+
+ PathChar abc3[] = {'a', 'b', 'c', 0xE2, 0x82, 0xAC, '\0'}; // abc(euro)
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abc3, 3));
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abc3, 4));
+ EXPECT_EQ(PSTR("abc"), TruncatePathString(abc3, 5));
+ EXPECT_EQ(PathString(abc3), TruncatePathString(abc3, 6));
+#endif
+}
+
+TEST(PathHelpersTest, PathStrutil) {
+ PathString big = PSTR("abcdef");
+ PathString suffix = PSTR("def");
+ PathString other = PSTR("x");
+ EXPECT_TRUE(HasSuffixPathString(big, suffix));
+ EXPECT_FALSE(HasSuffixPathString(suffix, big));
+ EXPECT_FALSE(HasSuffixPathString(big, other));
+ EXPECT_EQ(PSTR("abc"), StripSuffixPathString(big, suffix));
+}
+
+TEST(PathHelpersTest, SanitizePathComponent) {
+#ifdef OS_WINDOWS
+ EXPECT_EQ(MakePathComponentOSLegal(L"bar"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"bar <"), L"bar");
+ EXPECT_EQ(MakePathComponentOSLegal(L"bar.<"), L"bar");
+ EXPECT_EQ(MakePathComponentOSLegal(L"prn"), L"prn~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"pr>n"), L"prn~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"ab:c"), L"abc");
+ EXPECT_EQ(MakePathComponentOSLegal(L"a|bc"), L"abc");
+ EXPECT_EQ(MakePathComponentOSLegal(L"baz~9"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"\007"), L"~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"com1.txt.bat"), L"com1~1.txt.bat");
+ EXPECT_EQ(MakePathComponentOSLegal(L"foo.com1.bat"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"\010gg"), L"gg");
+ EXPECT_EQ(MakePathComponentOSLegal(L"<"), L"~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"col:on"), L"colon");
+ EXPECT_EQ(MakePathComponentOSLegal(L"q\""), L"q");
+ EXPECT_EQ(MakePathComponentOSLegal(L"back\\slAsh"), L"backslAsh");
+ EXPECT_EQ(MakePathComponentOSLegal(L"sla/sh "), L"slash");
+ EXPECT_EQ(MakePathComponentOSLegal(L"s|laSh"), L"slaSh");
+ EXPECT_EQ(MakePathComponentOSLegal(L"CON"), L"CON~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"PRN"), L"PRN~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"AUX"), L"AUX~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"NUL"), L"NUL~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM1"), L"COM1~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM2"), L"COM2~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM3"), L"COM3~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM4"), L"COM4~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM5"), L"COM5~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM6"), L"COM6~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM7"), L"COM7~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM8"), L"COM8~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"COM9"), L"COM9~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT1"), L"LPT1~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT2"), L"LPT2~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT3"), L"LPT3~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT4"), L"LPT4~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT5"), L"LPT5~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT6"), L"LPT6~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT7"), L"LPT7~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT8"), L"LPT8~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"LPT9"), L"LPT9~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"bar~bar"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"adlr~-3"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"tilde~"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"mytext.txt"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"mytext|.txt"), L"mytext.txt");
+ EXPECT_EQ(MakePathComponentOSLegal(L"okay.com1.txt"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"software-3.tar.gz"), L"");
+ EXPECT_EQ(MakePathComponentOSLegal(L"<"), L"~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"<.<"), L"~1");
+ EXPECT_EQ(MakePathComponentOSLegal(L"<.<txt"), L".txt");
+ EXPECT_EQ(MakePathComponentOSLegal(L"txt<.<"), L"txt");
+#else // OS_WINDOWS
+
+ EXPECT_EQ(MakePathComponentOSLegal("bar"), "");
+ EXPECT_EQ(MakePathComponentOSLegal("b"), "");
+ EXPECT_EQ(MakePathComponentOSLegal("A"), "");
+ EXPECT_EQ(MakePathComponentOSLegal("<'|"), "");
+ EXPECT_EQ(MakePathComponentOSLegal("/"), ":");
+ EXPECT_EQ(MakePathComponentOSLegal(":"), "");
+
+#endif // OS_WINDOWS
+}
+
+} // namespace syncable
diff --git a/chrome/browser/sync/util/pthread_helpers.cc b/chrome/browser/sync/util/pthread_helpers.cc
new file mode 100644
index 0000000..4dadc55
--- /dev/null
+++ b/chrome/browser/sync/util/pthread_helpers.cc
@@ -0,0 +1,162 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/pthread_helpers.h"
+
+#if (defined(OS_LINUX) || defined(OS_MACOSX))
+#include <sys/time.h>
+#endif // (defined(OS_LINUX) || defined(OS_MACOSX))
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/port.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+
+#ifdef OS_WINDOWS
+
+namespace {
+
+// Ensure that we don't bug the user more than once about the process being
+// terminated.
+base::subtle::AtomicWord g_process_terminating = 0;
+
+struct ThreadStartParams {
+ void *(*start) (void* payload);
+ void* param;
+};
+
+void* ThreadMainProc(void* parameter) {
+ ThreadStartParams* tsp = reinterpret_cast<ThreadStartParams*>(parameter);
+ void *(*start) (void *) = tsp->start;
+ void* param = tsp->param;
+
+ delete tsp;
+
+ void* result = NULL;
+ __try {
+ result = start(param);
+ } __except(EXCEPTION_CONTINUE_SEARCH) {
+ // Make sure only one thread complains and exits the process. Other
+ // faulting threads simply return.
+ if (0 == base::subtle::NoBarrier_CompareAndSwap(
+ &g_process_terminating, 0, 1)) {
+ // Service notification means we don't have a recursive event loop inside
+ // this call, and so won't suffer recursive exceptions.
+ ::MessageBox(NULL,
+ PRODUCT_NAME_STRING
+ L" has suffered a non-recoverable\n"
+ L"exception, and must exit immediately",
+ L"Nonrecoverable Exception",
+ MB_OK | MB_APPLMODAL | MB_SERVICE_NOTIFICATION);
+
+ ::ExitProcess(GetExceptionCode());
+ }
+ }
+
+ return result;
+}
+
+} // namespace
+
+#endif
+
+thread_handle CreatePThread(void *(*start) (void *), void* parameter) {
+#ifdef OS_WINDOWS
+ scoped_ptr<ThreadStartParams> param(new ThreadStartParams);
+ if (NULL == param.get())
+ return NULL;
+
+ param->start = start;
+ param->param = parameter;
+
+ pthread_t pthread;
+ if (0 != pthread_create(&pthread, NULL, ThreadMainProc, param.get()))
+ return NULL;
+
+ // ownership has passed to the new thread
+ param.release();
+
+ const HANDLE thread_handle = pthread_getw32threadhandle_np(pthread);
+ HANDLE thread_copy;
+ // Have to duplicate the thread handle, because when we call
+ // pthread_detach(), the handle will get closed as soon as the thread exits.
+ // We want to keep the handle indefinitely.
+ CHECK(DuplicateHandle(GetCurrentProcess(), thread_handle,
+ GetCurrentProcess(), &thread_copy, 0, FALSE,
+ DUPLICATE_SAME_ACCESS)) <<
+ "DuplicateHandle() failed with error " << GetLastError();
+ pthread_detach(pthread);
+ return thread_copy;
+#else
+ pthread_t handle;
+
+ int result = pthread_create(&handle, NULL, start, parameter);
+ if (result == 0) {
+ return handle;
+ } else {
+ return 0;
+ }
+#endif
+}
+
+struct timespec GetPThreadAbsoluteTime(uint32 ms) {
+#ifdef OS_WINDOWS
+ FILETIME filenow;
+ GetSystemTimeAsFileTime(&filenow);
+ ULARGE_INTEGER n;
+ n.LowPart = filenow.dwLowDateTime;
+ n.HighPart = filenow.dwHighDateTime;
+ // Filetime unit is 100-nanosecond intervals
+ const int64 ms_ftime = 10000;
+ n.QuadPart += ms_ftime * ms;
+
+ // The number of 100 nanosecond intervals from Jan 1, 1601 'til Jan 1, 1970.
+ const int64 kOffset = GG_LONGLONG(116444736000000000);
+ timespec result;
+ result.tv_sec = (n.QuadPart - kOffset) / 10000000;
+ result.tv_nsec = (n.QuadPart - kOffset -
+ (result.tv_sec * GG_LONGLONG(10000000))) * 100;
+ return result;
+#else
+ struct timeval now;
+ struct timezone zone;
+ gettimeofday(&now, &zone);
+ struct timespec deadline = { now.tv_sec };
+ // microseconds to nanoseconds.
+ // and add the ms delay.
+ ms += now.tv_usec / 1000;
+ deadline.tv_sec += ms / 1000;
+ deadline.tv_nsec = (ms % 1000) * 1000000;
+ return deadline;
+#endif
+}
+
+void NameCurrentThreadForDebugging(char* name) {
+#if defined(OS_WINDOWS)
+ // This implementation is taken from Chromium's platform_thread framework.
+ // The information on how to set the thread name comes from a MSDN article:
+ // http://msdn2.microsoft.com/en-us/library/xcb2z8hs.aspx
+ const DWORD kVCThreadNameException = 0x406D1388;
+ typedef struct tagTHREADNAME_INFO {
+ DWORD dwType; // Must be 0x1000.
+ LPCSTR szName; // Pointer to name (in user addr space).
+ DWORD dwThreadID; // Thread ID (-1=caller thread).
+ DWORD dwFlags; // Reserved for future use, must be zero.
+ } THREADNAME_INFO;
+
+ // The debugger needs to be around to catch the name in the exception. If
+ // there isn't a debugger, we are just needlessly throwing an exception.
+ if (!::IsDebuggerPresent())
+ return;
+
+ THREADNAME_INFO info = { 0x1000, name, GetCurrentThreadId(), 0 };
+
+ __try {
+ RaiseException(kVCThreadNameException, 0, sizeof(info)/sizeof(DWORD),
+ reinterpret_cast<DWORD_PTR*>(&info));
+ } __except(EXCEPTION_CONTINUE_EXECUTION) {
+ }
+#endif // defined(OS_WINDOWS)
+}
diff --git a/chrome/browser/sync/util/pthread_helpers.h b/chrome/browser/sync/util/pthread_helpers.h
new file mode 100644
index 0000000..26defe0
--- /dev/null
+++ b/chrome/browser/sync/util/pthread_helpers.h
@@ -0,0 +1,259 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_PTHREAD_HELPERS_H_
+#define CHROME_BROWSER_SYNC_UTIL_PTHREAD_HELPERS_H_
+
+#include <pthread.h>
+#include "base/logging.h"
+
+#ifdef OS_WINDOWS
+typedef void* thread_handle;
+#else
+typedef pthread_t thread_handle;
+#endif
+
+// Creates a pthread, detaches from it, and returns a Win32 HANDLE for it that
+// the caller must CloseHandle().
+thread_handle CreatePThread(void* (*start)(void* payload), void* parameter);
+
+class PThreadRWLock {
+ public:
+ inline PThreadRWLock() {
+ int result = pthread_rwlock_init(&rwlock_, 0);
+ DCHECK_EQ(0, result);
+ }
+ ~PThreadRWLock() {
+ int result = pthread_rwlock_destroy(&rwlock_);
+ DCHECK_EQ(0, result);
+ }
+ pthread_rwlock_t rwlock_;
+
+ DISALLOW_COPY_AND_ASSIGN(PThreadRWLock);
+};
+
+// ScopedReadLock attempts to acquire a write lock in its constructor and then
+// releases it in its destructor.
+class ScopedWriteLock {
+ public:
+ explicit ScopedWriteLock(pthread_rwlock_t* rwlock) : rwlock_(rwlock) {
+ int result = pthread_rwlock_wrlock(rwlock_);
+ DCHECK_EQ(0, result);
+ }
+
+ explicit ScopedWriteLock(PThreadRWLock* rwlock) : rwlock_(&rwlock->rwlock_) {
+ int result = pthread_rwlock_wrlock(rwlock_);
+ DCHECK_EQ(0, result);
+ }
+
+ ~ScopedWriteLock() {
+ int result = pthread_rwlock_unlock(rwlock_);
+ DCHECK_EQ(0, result);
+ }
+
+ protected:
+ pthread_rwlock_t* rwlock_;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedWriteLock);
+};
+
+// ScopedReadLock attempts to acquire a read lock in its constructor and then
+// releases it in its destructor.
+class ScopedReadLock {
+ public:
+ explicit ScopedReadLock(pthread_rwlock_t* rwlock) : rwlock_(rwlock) {
+ int result = pthread_rwlock_rdlock(rwlock_);
+ DCHECK_EQ(0, result);
+ }
+
+ explicit ScopedReadLock(PThreadRWLock* rwlock) : rwlock_(&rwlock->rwlock_) {
+ int result = pthread_rwlock_rdlock(rwlock_);
+ DCHECK_EQ(0, result);
+ }
+
+ ~ScopedReadLock() {
+ int result = pthread_rwlock_unlock(rwlock_);
+ DCHECK_EQ(0, result);
+ }
+ protected:
+ pthread_rwlock_t* rwlock_;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedReadLock);
+};
+
+template <typename LockType>
+class PThreadScopedLock {
+ public:
+ explicit inline PThreadScopedLock(LockType* lock) : lock_(lock) {
+ if (lock_)
+ lock->Lock();
+ }
+ inline ~PThreadScopedLock() {
+ Unlock();
+ }
+ inline void Unlock() {
+ if (lock_) {
+ lock_->Unlock();
+ lock_ = NULL;
+ }
+ }
+ LockType* lock_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PThreadScopedLock);
+};
+
+class PThreadNoLock {
+ public:
+ inline void Lock() { }
+ inline void Unlock() { }
+};
+
+// On win32, the pthread mutex implementation is about as efficient a critical
+// section. It uses atomic operations and only needs kernel calls on
+// contention.
+class PThreadMutex {
+ public:
+ inline PThreadMutex() {
+ pthread_mutexattr_t* attributes = NULL;
+#ifndef NDEBUG
+ private_attributes_in_use_ = true;
+ pthread_mutexattr_init(&mutex_attributes_);
+ pthread_mutexattr_settype(&mutex_attributes_, PTHREAD_MUTEX_ERRORCHECK);
+ attributes = &mutex_attributes_;
+#endif
+ int result = pthread_mutex_init(&mutex_, attributes);
+ DCHECK_EQ(0, result);
+ }
+ inline explicit PThreadMutex(const pthread_mutexattr_t* attr) {
+#ifndef NDEBUG
+ private_attributes_in_use_ = false;
+#endif
+ int result = pthread_mutex_init(&mutex_, attr);
+ DCHECK_EQ(0, result);
+ }
+ inline ~PThreadMutex() {
+ int result = pthread_mutex_destroy(&mutex_);
+ DCHECK_EQ(0, result);
+#ifndef NDEBUG
+ if (private_attributes_in_use_) {
+ pthread_mutexattr_destroy(&mutex_attributes_);
+ }
+#endif
+ }
+ inline void Lock() {
+ int result = pthread_mutex_lock(&mutex_);
+ DCHECK_EQ(0, result);
+ }
+ inline void Unlock() {
+ int result = pthread_mutex_unlock(&mutex_);
+ DCHECK_EQ(0, result);
+ }
+ pthread_mutex_t mutex_;
+
+#ifndef NDEBUG
+ pthread_mutexattr_t mutex_attributes_;
+ bool private_attributes_in_use_;
+#endif
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PThreadMutex);
+};
+
+class PThreadMutexAttr {
+ public:
+ pthread_mutexattr_t attr;
+ inline PThreadMutexAttr(int type) {
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, type);
+ }
+ inline ~PThreadMutexAttr() {
+ pthread_mutexattr_destroy(&attr);
+ }
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PThreadMutexAttr);
+};
+
+class PThreadRecursiveMutex : public PThreadMutex {
+ public:
+ inline PThreadRecursiveMutex() : PThreadMutex(recursive_attr()) {}
+ private:
+ static inline pthread_mutexattr_t* recursive_attr() {
+ static PThreadMutexAttr recursive(PTHREAD_MUTEX_RECURSIVE);
+ return &recursive.attr;
+ }
+ DISALLOW_COPY_AND_ASSIGN(PThreadRecursiveMutex);
+};
+
+
+class PThreadScopedDisabledCancellation {
+ public:
+ inline PThreadScopedDisabledCancellation() {
+ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_cancel_state_);
+ }
+ inline ~PThreadScopedDisabledCancellation() {
+ pthread_setcancelstate(old_cancel_state_, NULL);
+ }
+ private:
+ int old_cancel_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(PThreadScopedDisabledCancellation);
+};
+
+class PThreadCondVar {
+ public:
+ inline PThreadCondVar() {
+ int result = pthread_cond_init(&condvar_, 0);
+ DCHECK_EQ(0, result);
+ }
+ ~PThreadCondVar() {
+ int result = pthread_cond_destroy(&condvar_);
+ DCHECK_EQ(0, result);
+ }
+ inline void Signal() {
+ int result = pthread_cond_signal(&condvar_);
+ DCHECK_EQ(0, result);
+ }
+ inline void Broadcast() {
+ int result = pthread_cond_broadcast(&condvar_);
+ DCHECK_EQ(0, result);
+ }
+ pthread_cond_t condvar_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PThreadCondVar);
+};
+
+template <typename ValueType>
+class PThreadThreadVar {
+ public:
+ PThreadThreadVar(void (*destructor)(void* payload)) {
+ int result = pthread_key_create(&thread_key_, destructor);
+ DCHECK_EQ(0, result);
+ }
+ ~PThreadThreadVar() {
+ int result = pthread_key_delete(thread_key_);
+ DCHECK_EQ(0, result);
+ }
+ void SetValue(ValueType value) {
+ int result = pthread_setspecific(thread_key_, static_cast<void*>(value));
+ DCHECK_EQ(0, result);
+ }
+ ValueType GetValue() {
+ return static_cast<ValueType>(pthread_getspecific(thread_key_));
+ }
+ private:
+ pthread_key_t thread_key_;
+ DISALLOW_COPY_AND_ASSIGN(PThreadThreadVar);
+};
+
+// Returns the absolute time ms milliseconds from now. Useful for passing
+// result to pthread_cond_timedwait().
+struct timespec GetPThreadAbsoluteTime(uint32 ms_from_now);
+
+// Assign a descriptive label to the current thread. This is useful to see
+// in a GUI debugger, but may not be implementable on all platforms.
+void NameCurrentThreadForDebugging(char* name);
+
+#endif // CHROME_BROWSER_SYNC_UTIL_PTHREAD_HELPERS_H_
diff --git a/chrome/browser/sync/util/pthread_helpers_fwd.h b/chrome/browser/sync/util/pthread_helpers_fwd.h
new file mode 100644
index 0000000..2756fceb
--- /dev/null
+++ b/chrome/browser/sync/util/pthread_helpers_fwd.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_PTHREAD_HELPERS_FWD_H_
+#define CHROME_BROWSER_SYNC_UTIL_PTHREAD_HELPERS_FWD_H_
+
+template <typename LockType>
+class PThreadScopedLock;
+class PThreadNoLock;
+class PThreadMutex;
+
+#endif // CHROME_BROWSER_SYNC_UTIL_PTHREAD_HELPERS_FWD_H_
diff --git a/chrome/browser/sync/util/query_helpers.cc b/chrome/browser/sync/util/query_helpers.cc
new file mode 100644
index 0000000..e640a6c
--- /dev/null
+++ b/chrome/browser/sync/util/query_helpers.cc
@@ -0,0 +1,282 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/query_helpers.h"
+
+#if defined(OS_WINDOWS)
+#include <windows.h>
+#endif
+
+#include <limits>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::numeric_limits;
+using std::string;
+using std::vector;
+
+sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query) {
+ sqlite3_stmt* statement = NULL;
+ const char* query_tail;
+ if (SQLITE_OK != sqlite3_prepare(dbhandle, query,
+ CountBytes(query), &statement,
+ &query_tail)) {
+ LOG(ERROR) << query << "\n" << sqlite3_errmsg(dbhandle);
+ }
+ return statement;
+}
+
+void ExecOrDie(sqlite3* dbhandle, const char* query) {
+ return ExecOrDie(dbhandle, query, PrepareQuery(dbhandle, query));
+}
+
+// Finalizes (deletes) the query before returning.
+void ExecOrDie(sqlite3* dbhandle, const char* query, sqlite3_stmt* statement) {
+ int result = Exec(dbhandle, query, statement);
+ if (SQLITE_DONE != result) {
+ LOG(FATAL) << query << "\n" << sqlite3_errmsg(dbhandle);
+ }
+}
+
+int Exec(sqlite3* dbhandle, const char* query) {
+ return Exec(dbhandle, query, PrepareQuery(dbhandle, query));
+}
+
+// Finalizes (deletes) the query before returning.
+int Exec(sqlite3* dbhandle, const char* query, sqlite3_stmt* statement) {
+ int result;
+ do {
+ result = sqlite3_step(statement);
+ } while (SQLITE_ROW == result);
+ int finalize_result = sqlite3_finalize(statement);
+ return SQLITE_OK == finalize_result ? result : finalize_result;
+}
+
+int SqliteOpen(PathString filename, sqlite3** db) {
+ int result =
+#if PATHSTRING_IS_STD_STRING
+ sqlite3_open
+#else
+ sqlite3_open16
+#endif
+ (filename.c_str(), db);
+ LOG_IF(ERROR, SQLITE_OK != result) << "Error opening " << filename << ": "
+ << result;
+#ifdef OS_WINDOWS
+ if (SQLITE_OK == result) {
+ // Make sure we mark the db file as not indexed so since if any other app
+ // opens it, it can break our db locking.
+ DWORD attrs = GetFileAttributes(filename.c_str());
+ if (FILE_ATTRIBUTE_NORMAL == attrs)
+ attrs = FILE_ATTRIBUTE_NOT_CONTENT_INDEXED;
+ else
+ attrs = attrs | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED;
+ SetFileAttributes(filename.c_str(), attrs);
+ }
+#endif
+ // Be patient as we set pragmas.
+ sqlite3_busy_timeout(*db, numeric_limits<int>::max());
+#ifndef DISABLE_SQLITE_FULL_FSYNC
+ ExecOrDie(*db, "PRAGMA fullfsync = 1");
+#endif // DISABLE_SQLITE_FULL_FSYNC
+ ExecOrDie(*db, "PRAGMA synchronous = 2");
+ sqlite3_busy_timeout(*db, 0);
+ return SQLITE_OK;
+}
+
+#if !PATHSTRING_IS_STD_STRING
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, const PathString& s, int index) {
+ if (NULL == statement)
+ return statement;
+ CHECK(SQLITE_OK == sqlite3_bind_text16(statement, index, s.data(),
+ CountBytes(s), SQLITE_TRANSIENT));
+ return statement;
+}
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, const PathChar* s, int index) {
+ if (NULL == statement)
+ return statement;
+ CHECK(SQLITE_OK == sqlite3_bind_text16(statement,
+ index,
+ s,
+ -1, // -1 means s is zero-terminated
+ SQLITE_TRANSIENT));
+ return statement;
+}
+#endif
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, const string& s, int index) {
+ if (NULL == statement)
+ return statement;
+ CHECK(SQLITE_OK == sqlite3_bind_text(statement,
+ index,
+ s.data(),
+ CountBytes(s),
+ SQLITE_TRANSIENT));
+ return statement;
+}
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, const char* s, int index) {
+ if (NULL == statement)
+ return statement;
+ CHECK(SQLITE_OK == sqlite3_bind_text(statement,
+ index,
+ s,
+ -1, // -1 means s is zero-terminated
+ SQLITE_TRANSIENT));
+ return statement;
+}
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, int32 n, int index) {
+ if (NULL == statement)
+ return statement;
+ CHECK(SQLITE_OK == sqlite3_bind_int(statement, index, n));
+ return statement;
+}
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, int64 n, int index) {
+ if (NULL == statement)
+ return statement;
+ CHECK(SQLITE_OK == sqlite3_bind_int64(statement, index, n));
+ return statement;
+}
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, double n, int index) {
+ if (NULL == statement)
+ return statement;
+ CHECK(SQLITE_OK == sqlite3_bind_double(statement, index, n));
+ return statement;
+}
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, bool b, int index) {
+ if (NULL == statement)
+ return statement;
+ int32 n = b ? 1 : 0;
+ CHECK(SQLITE_OK == sqlite3_bind_int(statement, index, n));
+ return statement;
+}
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, const vector<uint8>& v,
+ int index) {
+ if (NULL == statement)
+ return statement;
+ uint8* blob = v.empty() ? NULL : const_cast<uint8*>(&v[0]);
+ CHECK(SQLITE_OK == sqlite3_bind_blob(statement,
+ index,
+ blob,
+ v.size(),
+ SQLITE_TRANSIENT));
+ return statement;
+}
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, SqliteNullType, int index) {
+ if (NULL == statement)
+ return statement;
+ CHECK(SQLITE_OK == sqlite3_bind_null(statement, index));
+ return statement;
+}
+
+
+#if !PATHSTRING_IS_STD_STRING
+void GetColumn(sqlite3_stmt* statement, int index, PathString* value) {
+ if (sqlite3_column_type(statement, index) == SQLITE_NULL) {
+ value->clear();
+ } else {
+ value->assign(
+ static_cast<const PathChar*>(sqlite3_column_text16(statement, index)),
+ sqlite3_column_bytes16(statement, index) / sizeof(PathChar));
+ }
+}
+#endif
+
+void GetColumn(sqlite3_stmt* statement, int index, string* value) {
+ if (sqlite3_column_type(statement, index) == SQLITE_NULL) {
+ value->clear();
+ } else {
+ value->assign(
+ reinterpret_cast<const char*>(sqlite3_column_text(statement, index)),
+ sqlite3_column_bytes(statement, index));
+ }
+}
+
+void GetColumn(sqlite3_stmt* statement, int index, int32* value) {
+ *value = sqlite3_column_int(statement, index);
+}
+
+void GetColumn(sqlite3_stmt* statement, int index, int64* value) {
+ *value = sqlite3_column_int64(statement, index);
+}
+
+void GetColumn(sqlite3_stmt* statement, int index, double* value) {
+ *value = sqlite3_column_double(statement, index);
+}
+
+void GetColumn(sqlite3_stmt* statement, int index, bool* value) {
+ *value = (0 != sqlite3_column_int(statement, index));
+}
+
+void GetColumn(sqlite3_stmt* statement, int index, std::vector<uint8>* value) {
+ if (sqlite3_column_type(statement, index) == SQLITE_NULL) {
+ value->clear();
+ } else {
+ const uint8* blob =
+ reinterpret_cast<const uint8*>(sqlite3_column_blob(statement, index));
+ for (int i = 0; i < sqlite3_column_bytes(statement, index); i++)
+ value->push_back(blob[i]);
+ }
+}
+
+bool DoesTableExist(sqlite3 *dbhandle, const string &table_name) {
+ ScopedStatement count_query
+ (PrepareQuery(dbhandle,
+ "SELECT count(*) from sqlite_master where name = ?",
+ table_name));
+
+ int query_result = sqlite3_step(count_query.get());
+ CHECK(SQLITE_ROW == query_result);
+ int count = sqlite3_column_int(count_query.get(), 0);
+
+ return 1 == count;
+}
+
+void ScopedStatement::reset(sqlite3_stmt* statement) {
+ if (NULL != statement_)
+ sqlite3_finalize(statement_);
+ statement_ = statement;
+}
+
+ScopedStatement::~ScopedStatement() {
+ reset(NULL);
+}
+
+ScopedStatementResetter::~ScopedStatementResetter() {
+ sqlite3_reset(statement_);
+}
+
+// Useful for encoding any sequence of bytes into a string that can be used in
+// a table name. Kind of like hex encoding, except that A is zero and P is 15.
+string APEncode(const string& in) {
+ string result;
+ result.reserve(in.size() * 2);
+ for (string::const_iterator i = in.begin(); i != in.end(); ++i) {
+ unsigned int c = static_cast<unsigned char>(*i);
+ result.push_back((c & 0x0F) + 'A');
+ result.push_back(((c >> 4) & 0x0F) + 'A');
+ }
+ return result;
+}
+
+string APDecode(const string& in) {
+ string result;
+ result.reserve(in.size() / 2);
+ for (string::const_iterator i = in.begin(); i != in.end(); ++i) {
+ unsigned int c = *i - 'A';
+ if (++i != in.end())
+ c = c | (static_cast<unsigned char>(*i - 'A') << 4);
+ result.push_back(c);
+ }
+ return result;
+}
diff --git a/chrome/browser/sync/util/query_helpers.h b/chrome/browser/sync/util/query_helpers.h
new file mode 100644
index 0000000..73aa422
--- /dev/null
+++ b/chrome/browser/sync/util/query_helpers.h
@@ -0,0 +1,698 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Typesafe composition of SQL query strings.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_QUERY_HELPERS_H_
+#define CHROME_BROWSER_SYNC_UTIL_QUERY_HELPERS_H_
+
+#include <limits>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "chrome/browser/sync/util/sync_types.h"
+#include "third_party/sqlite/preprocessed/sqlite3.h"
+
+// Sometimes threads contend on the DB lock itself, especially when one thread
+// is calling SaveChanges. In the worst case scenario, the user can put his
+// laptop to sleep during db contention, and wake up the laptop days later, so
+// infinity seems like the best choice here.
+const int kDirectoryBackingStoreBusyTimeoutMs = std::numeric_limits<int>::max();
+
+enum SqliteNullType {
+ SQLITE_NULL_VALUE
+};
+
+int SqliteOpen(PathString filename, sqlite3** ppDb);
+
+sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query);
+#if !PATHSTRING_IS_STD_STRING
+sqlite3_stmt* BindArg(sqlite3_stmt*, const PathString&, int index);
+sqlite3_stmt* BindArg(sqlite3_stmt*, const PathChar*, int index);
+#endif
+sqlite3_stmt* BindArg(sqlite3_stmt*, const std::string&, int index);
+sqlite3_stmt* BindArg(sqlite3_stmt*, const char*, int index);
+sqlite3_stmt* BindArg(sqlite3_stmt*, int32, int index);
+sqlite3_stmt* BindArg(sqlite3_stmt*, int64, int index);
+sqlite3_stmt* BindArg(sqlite3_stmt*, double, int index);
+sqlite3_stmt* BindArg(sqlite3_stmt*, bool, int index);
+sqlite3_stmt* BindArg(sqlite3_stmt*, const std::vector<uint8>&, int index);
+sqlite3_stmt* BindArg(sqlite3_stmt*, SqliteNullType, int index);
+
+#if !PATHSTRING_IS_STD_STRING
+void GetColumn(sqlite3_stmt*, int index, PathString* value);
+#endif
+void GetColumn(sqlite3_stmt*, int index, std::string* value);
+void GetColumn(sqlite3_stmt*, int index, int32* value);
+void GetColumn(sqlite3_stmt*, int index, int64* value);
+void GetColumn(sqlite3_stmt*, int index, double* value);
+void GetColumn(sqlite3_stmt*, int index, bool* value);
+void GetColumn(sqlite3_stmt*, int index, std::vector<uint8>* value);
+
+bool DoesTableExist(sqlite3* dbhandle, const std::string& tablename);
+
+// Prepares a query with a WHERE clause that filters the values by the items
+// passed inside of the Vector.
+// Example:
+//
+// vector<PathString> v;
+// v.push_back("abc");
+// v.push_back("123");
+// PrepareQuery(dbhandle, "SELECT * FROM table", "column_name", v.begin(),
+// v.end(), "ORDER BY id");
+//
+// will produce the following query.
+//
+// SELECT * FROM table WHERE column_name = 'abc' OR column_name = '123' ORDER BY
+// id.
+//
+template<typename ItemIterator>
+sqlite3_stmt* PrepareQueryWhereColumnIn(sqlite3* dbhandle,
+ const std::string& query_head,
+ const std::string& filtername,
+ ItemIterator begin, ItemIterator end,
+ const std::string& query_options) {
+ std::string query;
+ query.reserve(512);
+ query += query_head;
+ const char* joiner = " WHERE ";
+ for (ItemIterator it = begin; it != end; ++it) {
+ query += joiner;
+ query += filtername;
+ query += " = ?";
+ joiner = " OR ";
+ }
+ query += " ";
+ query += query_options;
+ sqlite3_stmt* statement = NULL;
+ const char* query_tail;
+ if (SQLITE_OK != sqlite3_prepare(dbhandle, query.data(),
+ CountBytes(query), &statement,
+ &query_tail)) {
+ LOG(ERROR) << query << "\n" << sqlite3_errmsg(dbhandle);
+ }
+ int index = 1;
+ for (ItemIterator it = begin; it != end; ++it) {
+ BindArg(statement, *it, index);
+ ++index;
+ }
+ return statement;
+}
+
+template <typename Type1>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1) {
+ return BindArg(PrepareQuery(dbhandle, query), arg1, 1);
+}
+
+template <typename Type1, typename Type2>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1), arg2, 2);
+}
+
+template <typename Type1, typename Type2, typename Type3>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2), arg3, 3);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3), arg4, 4);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4),
+ arg5, 5);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5),
+ arg6, 6);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6),
+ arg7, 7);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7),
+ arg8, 8);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8),
+ arg9, 9);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9),
+ arg10, 10);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10),
+ arg11, 11);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11),
+ arg12, 12);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12),
+ arg13, 13);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13, typename Type14>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13, const Type14& arg14) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12,
+ arg13),
+ arg14, 14);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13, typename Type14, typename Type15>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13, const Type14& arg14,
+ const Type15& arg15) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12,
+ arg13, arg14),
+ arg15, 15);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13, typename Type14, typename Type15, typename Type16>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13, const Type14& arg14,
+ const Type15& arg15, const Type16& arg16) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12,
+ arg13, arg14, arg15),
+ arg16, 16);
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13, typename Type14, typename Type15, typename Type16,
+ typename Type17>
+inline sqlite3_stmt* PrepareQuery(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13, const Type14& arg14,
+ const Type15& arg15, const Type16& arg16,
+ const Type17& arg17) {
+ return BindArg(PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12,
+ arg13, arg14, arg15, arg16),
+ arg17, 17);
+}
+
+void ExecOrDie(sqlite3* dbhandle, const char* query);
+
+// Finalizes (deletes) the query before returning.
+void ExecOrDie(sqlite3* dbhandle, const char* query, sqlite3_stmt* statement);
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10) {
+ return ExecOrDie(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9) {
+ return ExecOrDie(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8) {
+ return ExecOrDie(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7) {
+ return ExecOrDie(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6) {
+ return ExecOrDie(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2,
+ arg3, arg4, arg5, arg6));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5) {
+ return ExecOrDie(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2,
+ arg3, arg4, arg5));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4) {
+ return ExecOrDie(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2,
+ arg3, arg4));
+}
+
+template <typename Type1, typename Type2, typename Type3>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3) {
+ return ExecOrDie(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2,
+ arg3));
+}
+
+template <typename Type1, typename Type2>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2) {
+ return ExecOrDie(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2));
+}
+
+template <typename Type1>
+inline void ExecOrDie(sqlite3* dbhandle, const char* query,
+ const Type1& arg1) {
+ return ExecOrDie(dbhandle, query, PrepareQuery(dbhandle, query, arg1));
+}
+
+
+int Exec(sqlite3* dbhandle, const char* query);
+// Finalizes (deletes) the query before returning.
+int Exec(sqlite3* dbhandle, const char* query, sqlite3_stmt* statement);
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13, typename Type14, typename Type15, typename Type16,
+ typename Type17>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13, const Type14& arg14,
+ const Type15& arg15, const Type16& arg16,
+ const Type17& arg17) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13,
+ arg14, arg15, arg16, arg17));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13, typename Type14, typename Type15, typename Type16>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13, const Type14& arg14,
+ const Type15& arg15, const Type16& arg16) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13,
+ arg14, arg15, arg16));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13, typename Type14, typename Type15>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13, const Type14& arg14,
+ const Type15& arg15) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13,
+ arg14, arg15));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13, typename Type14>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13, const Type14& arg14) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13,
+ arg14));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12,
+ typename Type13>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12,
+ const Type13& arg13) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11, typename Type12>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11, const Type12& arg12) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11, arg12));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10, typename Type11>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10,
+ const Type11& arg11) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10, arg11));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9, typename Type10>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9, const Type10& arg10) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, arg10));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8,
+ typename Type9>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8,
+ const Type9& arg9) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7, typename Type8>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7, const Type8& arg8) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6, typename Type7>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6,
+ const Type7& arg7) {
+ return Exec(dbhandle, query,
+ PrepareQuery(dbhandle, query, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5, typename Type6>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5, const Type6& arg6) {
+ return Exec(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2,
+ arg3, arg4, arg5, arg6));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4,
+ typename Type5>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4,
+ const Type5& arg5) {
+ return Exec(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2,
+ arg3, arg4, arg5));
+}
+
+template <typename Type1, typename Type2, typename Type3, typename Type4>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3, const Type4& arg4) {
+ return Exec(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2,
+ arg3, arg4));
+}
+
+template <typename Type1, typename Type2, typename Type3>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2,
+ const Type3& arg3) {
+ return Exec(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2,
+ arg3));
+}
+
+template <typename Type1, typename Type2>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1, const Type2& arg2) {
+ return Exec(dbhandle, query, PrepareQuery(dbhandle, query, arg1, arg2));
+}
+
+template <typename Type1>
+inline int Exec(sqlite3* dbhandle, const char* query,
+ const Type1& arg1) {
+ return Exec(dbhandle, query, PrepareQuery(dbhandle, query, arg1));
+}
+
+
+// Holds an sqlite3_stmt* and automatically finalizes when passes out of scope.
+class ScopedStatement {
+ public:
+ explicit ScopedStatement(sqlite3_stmt* statement = 0)
+ : statement_(statement) { }
+ ~ScopedStatement();
+
+ sqlite3_stmt* get() const { return statement_; }
+
+ // Finalizes currently held statement and sets to new one.
+ void reset(sqlite3_stmt* statement);
+ protected:
+ sqlite3_stmt* statement_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedStatement);
+};
+
+
+// Holds an sqlite3_stmt* and automatically resets when passes out of scope.
+class ScopedStatementResetter {
+ public:
+ explicit ScopedStatementResetter(sqlite3_stmt* statement)
+ : statement_(statement) { }
+ ~ScopedStatementResetter();
+
+ protected:
+ sqlite3_stmt* const statement_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedStatementResetter);
+};
+
+// Useful for encoding any sequence of bytes into a string that can be used in
+// a table name. Kind of like hex encoding, except that A is zero and P is 15.
+std::string APEncode(const std::string& in);
+std::string APDecode(const std::string& in);
+
+#endif // CHROME_BROWSER_SYNC_UTIL_QUERY_HELPERS_H_
diff --git a/chrome/browser/sync/util/query_helpers_unittest.cc b/chrome/browser/sync/util/query_helpers_unittest.cc
new file mode 100644
index 0000000..8be295d
--- /dev/null
+++ b/chrome/browser/sync/util/query_helpers_unittest.cc
@@ -0,0 +1,36 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/util/query_helpers.h"
+
+#include <limits>
+#include <string>
+
+#include "chrome/browser/sync/util/compat-file.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::numeric_limits;
+using std::string;
+
+TEST(QueryHelpers, APEncode) {
+ string test;
+ char i;
+ for (i = numeric_limits<char>::min(); i < numeric_limits<char>::max(); ++i)
+ test.push_back(i);
+ test.push_back(i);
+ const string encoded = APEncode(test);
+ const string decoded = APDecode(encoded);
+ ASSERT_EQ(test, decoded);
+}
+
+TEST(QueryHelpers, TestExecFailure) {
+ sqlite3* database;
+ const PathString test_database(PSTR("queryhelper_test.sqlite3"));
+ PathRemove(test_database);
+ ASSERT_EQ(SQLITE_OK, SqliteOpen(test_database, &database));
+ EXPECT_EQ(SQLITE_DONE, Exec(database, "CREATE TABLE test_table (idx int)"));
+ EXPECT_NE(SQLITE_DONE, Exec(database, "ALTER TABLE test_table ADD COLUMN "
+ "broken int32 default ?", -1));
+ PathRemove(test_database);
+}
diff --git a/chrome/browser/sync/util/row_iterator.h b/chrome/browser/sync/util/row_iterator.h
new file mode 100644
index 0000000..73748ee
--- /dev/null
+++ b/chrome/browser/sync/util/row_iterator.h
@@ -0,0 +1,122 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A handy type for iterating through query results.
+//
+// Just define your Traits type with
+//
+// RowType
+// Extract(statement*, RowType);
+//
+// and then pass an sqlite3_stmt into the constructor for begin,
+// and use the no-arg constructor for an end iterator. Ex:
+//
+// for (RowIterator<SomeTraits> i(statement), end; i != end; ++i)
+// ...
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_ROW_ITERATOR_H_
+#define CHROME_BROWSER_SYNC_UTIL_ROW_ITERATOR_H_
+
+#include "base/logging.h"
+#include "third_party/sqlite/preprocessed/sqlite3.h"
+
+template <typename ColumnType, int index = 0>
+struct SingleColumnTraits {
+ typedef ColumnType RowType;
+ inline void Extract(sqlite3_stmt* statement, ColumnType* x) const {
+ GetColumn(statement, index, x);
+ }
+};
+
+template <typename RowTraits>
+class RowIterator : public std::iterator<std::input_iterator_tag,
+ const typename RowTraits::RowType> {
+ public:
+ typedef typename RowTraits::RowType RowType;
+ // Statement must have been prepared, but not yet stepped:
+ RowIterator(sqlite3_stmt* statement, RowTraits traits = RowTraits()) {
+ kernel_ = new Kernel;
+ kernel_->done = false;
+ kernel_->refcount = 1;
+ kernel_->statement = statement;
+ kernel_->row_traits = traits;
+ ++(*this);
+ }
+ RowIterator() : kernel_(NULL) { } // creates end iterator
+
+ RowIterator(const RowIterator& i)
+ : kernel_(NULL) {
+ *this = i;
+ }
+
+ ~RowIterator() {
+ if (kernel_ && 0 == --(kernel_->refcount)) {
+ sqlite3_finalize(kernel_->statement);
+ delete kernel_;
+ }
+ }
+
+ RowIterator& operator = (const RowIterator& i) {
+ if (kernel_ && (0 == --(kernel_->refcount))) {
+ sqlite3_finalize(kernel_->statement);
+ delete kernel_;
+ }
+ kernel_ = i.kernel_;
+ if (kernel_)
+ kernel_->refcount += 1;
+ return *this;
+ }
+
+ RowIterator operator ++(int) {
+ RowIterator i(*this);
+ return ++i;
+ }
+
+ RowIterator& operator ++() {
+ DCHECK(NULL != kernel_);
+ if (SQLITE_ROW == sqlite3_step(kernel_->statement)) {
+ kernel_->row_traits.Extract(kernel_->statement, &kernel_->row);
+ } else {
+ kernel_->done = true;
+ }
+ return *this;
+ }
+
+ const RowType& operator *() const {
+ return *(operator -> ());
+ }
+
+ const RowType* operator ->() const {
+ DCHECK(NULL != kernel_);
+ DCHECK(!kernel_->done);
+ return &(kernel_->row);
+ }
+
+ bool operator == (const RowIterator& i) const {
+ if (kernel_ == i.kernel_)
+ return true;
+ if (NULL == kernel_ && i.kernel_->done)
+ return true;
+ if (NULL == i.kernel_ && kernel_->done)
+ return true;
+ return false;
+ }
+
+ bool operator != (const RowIterator& i) const {
+ return !(*this == i);
+ }
+
+ protected:
+ struct Kernel {
+ int refcount;
+ bool done;
+ RowType row;
+ sqlite3_stmt* statement;
+ RowTraits row_traits;
+ };
+
+ Kernel* kernel_;
+};
+
+#endif // CHROME_BROWSER_SYNC_UTIL_ROW_ITERATOR_H_
diff --git a/chrome/browser/sync/util/signin.h b/chrome/browser/sync/util/signin.h
new file mode 100644
index 0000000..0664d38
--- /dev/null
+++ b/chrome/browser/sync/util/signin.h
@@ -0,0 +1,15 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_SIGNIN_H_
+#define CHROME_BROWSER_SYNC_UTIL_SIGNIN_H_
+
+// This enumeration is here since we used to support hosted and non-hosted
+// accounts, but now only the latter is supported.
+enum SignIn {
+ // The account foo@domain is authenticated as a consumer account.
+ GMAIL_SIGNIN
+};
+
+#endif // CHROME_BROWSER_SYNC_UTIL_SIGNIN_H_
diff --git a/chrome/browser/sync/util/sync_types.h b/chrome/browser/sync/util/sync_types.h
new file mode 100644
index 0000000..7a08575
--- /dev/null
+++ b/chrome/browser/sync/util/sync_types.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_SYNC_TYPES_H_
+#define CHROME_BROWSER_SYNC_UTIL_SYNC_TYPES_H_
+
+#include <iosfwd>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/string_util.h"
+
+// TODO(timsteele): Use base/file_path.h instead of PathString.
+#ifdef OS_WINDOWS
+#define PATHSTRING_IS_STD_STRING 0
+typedef std::wstring PathString;
+
+// This ugly double define hack is needed to allow the following pattern on
+// Windows:
+//
+// #define FOO "Foo"
+// #define FOO_PATH_STRING PSTR("Foo")
+//
+// TODO(sync): find out if we can avoid this.
+#define PSTR_UGLY_DOUBLE_DEFINE_HACK(s) L##s
+#define PSTR(s) PSTR_UGLY_DOUBLE_DEFINE_HACK(s)
+#define PSTR_CHAR wchar_t
+
+inline size_t PathLen(const wchar_t* s) {
+ return wcslen(s);
+}
+
+#else // Mac and Linux
+#define PATHSTRING_IS_STD_STRING 1
+#define PSTR_CHAR char
+typedef string PathString;
+#define PSTR(s) s
+inline size_t PathLen(const char* s) {
+ return strlen(s);
+}
+// Mac OS X typedef's BOOL to signed char, so we do that on Linux too.
+typedef signed char BOOL;
+typedef int32 LONG;
+typedef uint32 DWORD;
+typedef int64 LONGLONG;
+typedef uint64 ULONGLONG;
+
+#define MAX_PATH PATH_MAX
+#if !defined(TRUE)
+const BOOL TRUE = 1;
+#endif
+#if !defined(FALSE)
+const BOOL FALSE = 0;
+#endif
+#endif
+
+typedef PathString::value_type PathChar;
+
+inline size_t CountBytes(const std::wstring& s) {
+ return s.size() * sizeof(std::wstring::value_type);
+}
+
+inline size_t CountBytes(const std::string &s) {
+ return s.size() * sizeof(std::string::value_type);
+}
+
+inline PathString IntToPathString(int digit) {
+ std::string tmp = StringPrintf("%d", digit);
+ return PathString(tmp.begin(), tmp.end());
+}
+
+const int kSyncProtocolMaxNameLengthBytes = 255;
+
+#endif // CHROME_BROWSER_SYNC_UTIL_SYNC_TYPES_H_
diff --git a/chrome/browser/sync/util/user_settings-posix.cc b/chrome/browser/sync/util/user_settings-posix.cc
new file mode 100644
index 0000000..091e7e3
--- /dev/null
+++ b/chrome/browser/sync/util/user_settings-posix.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+// Implement the storage of service tokens in memory
+
+#include "chrome/browser/sync/util/user_settings.h"
+
+namespace browser_sync {
+
+void UserSettings::ClearAllServiceTokens() {
+ service_tokens_.clear();
+}
+
+void UserSettings::SetAuthTokenForService(const string& email,
+ const string& service_name, const string& long_lived_service_token) {
+ service_tokens_[service_name] = long_lived_service_token;
+}
+
+bool UserSettings::GetLastUserAndServiceToken(const string& service_name,
+ string* username,
+ string* service_token) {
+ ServiceTokenMap::const_iterator iter = service_tokens_.find(service_name);
+
+ if (iter != service_tokens_.end()) {
+ *username = email_;
+ *service_token = iter->second;
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/util/user_settings-win32.cc b/chrome/browser/sync/util/user_settings-win32.cc
new file mode 100644
index 0000000..dac7f21
--- /dev/null
+++ b/chrome/browser/sync/util/user_settings-win32.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#include "chrome/browser/sync/util/user_settings.h"
+
+#include <string>
+
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "chrome/browser/sync/util/data_encryption.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+
+using std::string;
+
+namespace browser_sync {
+
+bool UserSettings::GetLastUser(string* username) {
+ ScopedDBHandle dbhandle(this);
+ ScopedStatement query(PrepareQuery(dbhandle.get(),
+ "SELECT email FROM cookies"));
+ if (SQLITE_ROW == sqlite3_step(query.get())) {
+ GetColumn(query.get(), 0, username);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void UserSettings::ClearAllServiceTokens() {
+ ScopedDBHandle dbhandle(this);
+ ExecOrDie(dbhandle.get(), "DELETE FROM cookies");
+}
+
+void UserSettings::SetAuthTokenForService(const string& email,
+ const string& service_name, const string& long_lived_service_token) {
+ ScopedDBHandle dbhandle(this);
+ ExecOrDie(dbhandle.get(), "INSERT INTO cookies "
+ "(email, service_name, service_token) "
+ "values (?, ?, ?)", email, service_name,
+ EncryptData(long_lived_service_token));
+}
+
+// Returns the username whose credentials have been persisted as well as
+// a service token for the named service.
+bool UserSettings::GetLastUserAndServiceToken(const string& service_name,
+ string* username,
+ string* service_token) {
+ ScopedDBHandle dbhandle(this);
+ ScopedStatement query(PrepareQuery(
+ dbhandle.get(),
+ "SELECT email, service_token FROM cookies WHERE service_name = ?",
+ service_name));
+
+ if (SQLITE_ROW == sqlite3_step(query.get())) {
+ GetColumn(query.get(), 0, username);
+
+ std::vector<uint8> encrypted_service_token;
+ GetColumn(query.get(), 1, &encrypted_service_token);
+ DecryptData(encrypted_service_token, service_token);
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace browser_sync
+
diff --git a/chrome/browser/sync/util/user_settings.cc b/chrome/browser/sync/util/user_settings.cc
new file mode 100644
index 0000000..573365a
--- /dev/null
+++ b/chrome/browser/sync/util/user_settings.cc
@@ -0,0 +1,350 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+// This class isn't pretty. It's just a step better than globals, which is what
+// these were previously.
+
+#include "chrome/browser/sync/util/user_settings.h"
+
+#if defined(OS_WINDOWS)
+#include <windows.h>
+#endif
+
+#include <string>
+#include <limits>
+#include <vector>
+
+#include "base/file_util.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/syncable/directory_manager.h" // For migration.
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "chrome/browser/sync/util/data_encryption.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+
+using std::numeric_limits;
+using std::string;
+using std::vector;
+
+using syncable::DirectoryManager;
+
+namespace browser_sync {
+
+static const char PASSWORD_HASH[] = "password_hash2";
+static const char SALT[] = "salt2";
+
+static const int kSaltSize = 20;
+static const int kCurrentDBVersion = 11;
+
+UserSettings::ScopedDBHandle::ScopedDBHandle(UserSettings* settings) :
+ mutex_lock_(&settings->dbhandle_mutex_), handle_(&settings->dbhandle_) {
+}
+
+UserSettings::UserSettings() :
+ dbhandle_(NULL) {
+}
+
+string UserSettings::email() const {
+ ScopedLock lock(&mutex_);
+ return email_;
+}
+
+static void MakeSigninsTable(sqlite3* const dbhandle) {
+ // Multiple email addresses can map to the same Google Account.
+ // This table keeps a map of sign-in email addresses to primary
+ // Google Account email addresses.
+ ExecOrDie(dbhandle, "CREATE TABLE signins"
+ " (signin, primary_email, "
+ " PRIMARY KEY(signin, primary_email) ON CONFLICT REPLACE)");
+}
+
+void UserSettings::MigrateOldVersionsAsNeeded(sqlite3* const handle,
+ int current_version) {
+ switch (current_version) {
+ // Versions 1-9 are unhandled. Version numbers greater than
+ // kCurrentDBVersion should have already been weeded out by the caller.
+ default:
+ // When the version is too old, we just try to continue anyway. There
+ // should not be a released product that makes a database too old for us
+ // to handle.
+ LOG(WARNING) << "UserSettings database version " << current_version <<
+ " is too old to handle.";
+ return;
+ case 10:
+ {
+ // Scrape the 'shares' table to find the syncable DB. 'shares'
+ // had a pair of string columns that mapped the username to the filename
+ // of the sync data sqlite3 file. Version 11 switched to a constant
+ // filename, so here we read the string, copy the file to the new name,
+ // delete the old one, and then drop the unused shares table.
+ ScopedStatement share_query(PrepareQuery(handle,
+ "SELECT share_name, file_name FROM shares"));
+ int query_result = sqlite3_step(share_query.get());
+ CHECK(SQLITE_ROW == query_result);
+ PathString share_name, file_name;
+ GetColumn(share_query.get(), 0, &share_name);
+ GetColumn(share_query.get(), 1, &file_name);
+
+ if (!file_util::Move(file_name,
+ DirectoryManager::GetSyncDataDatabaseFilename())) {
+ LOG(WARNING) << "Unable to upgrade UserSettings from v10";
+ return;
+ }
+ }
+ ExecOrDie(handle, "DROP TABLE shares");
+ ExecOrDie(handle, "UPDATE db_version SET version = 11");
+ // FALL THROUGH
+ case kCurrentDBVersion:
+ // Nothing to migrate.
+ return;
+ }
+}
+
+static void MakeCookiesTable(sqlite3* const dbhandle) {
+ // This table keeps a list of auth tokens for each signed in account. There
+ // will be as many rows as there are auth tokens per sign in.
+ // The service_token column will store encrypted values.
+ ExecOrDie(dbhandle, "CREATE TABLE cookies"
+ " (email, service_name, service_token, "
+ " PRIMARY KEY(email, service_name) ON CONFLICT REPLACE)");
+}
+
+static void MakeSigninTypesTable(sqlite3* const dbhandle) {
+ // With every successful gaia authentication, remember if it was
+ // a hosted domain or not.
+ ExecOrDie(dbhandle, "CREATE TABLE signin_types"
+ " (signin, signin_type, "
+ " PRIMARY KEY(signin, signin_type) ON CONFLICT REPLACE)");
+}
+
+static void MakeClientIDTable(sqlite3* const dbhandle) {
+ // Stores a single client ID value that can be used as the client id,
+ // if there's not another such ID provided on the install.
+ ExecOrDie(dbhandle, "CREATE TABLE client_id (id) ");
+ ExecOrDie(dbhandle, "INSERT INTO client_id values ( ? )",
+ Generate128BitRandomHexString());
+}
+
+bool UserSettings::Init(const PathString& settings_path) {
+ { // Scope the handle
+ ScopedDBHandle dbhandle(this);
+ if (dbhandle_)
+ sqlite3_close(dbhandle_);
+ CHECK(SQLITE_OK == SqliteOpen(settings_path.c_str(), &dbhandle_));
+ // In the worst case scenario, the user may hibernate his computer during
+ // one of our transactions.
+ sqlite3_busy_timeout(dbhandle_, numeric_limits<int>::max());
+
+ int sqlite_result = Exec(dbhandle.get(), "BEGIN EXCLUSIVE TRANSACTION");
+ CHECK(SQLITE_DONE == sqlite_result);
+ ScopedStatement table_query(PrepareQuery(dbhandle.get(),
+ "select count(*) from sqlite_master where type = 'table'"
+ " and name = 'db_version'"));
+ int query_result = sqlite3_step(table_query.get());
+ CHECK(SQLITE_ROW == query_result);
+ int table_count = 0;
+ GetColumn(table_query.get(), 0, &table_count);
+ table_query.reset(NULL);
+ if (table_count > 0) {
+ ScopedStatement version_query(PrepareQuery(dbhandle.get(),
+ "SELECT version FROM db_version"));
+ query_result = sqlite3_step(version_query.get());
+ CHECK(SQLITE_ROW == query_result);
+ const int version = sqlite3_column_int(version_query.get(), 0);
+ version_query.reset(NULL);
+ if (version > kCurrentDBVersion) {
+ LOG(WARNING) << "UserSettings database is too new.";
+ return false;
+ }
+
+ MigrateOldVersionsAsNeeded(dbhandle.get(), version);
+ } else {
+ // Create settings table.
+ ExecOrDie(dbhandle.get(), "CREATE TABLE settings"
+ " (email, key, value, "
+ " PRIMARY KEY(email, key) ON CONFLICT REPLACE)");
+
+ // Create and populate version table.
+ ExecOrDie(dbhandle.get(), "CREATE TABLE db_version ( version )");
+ ExecOrDie(dbhandle.get(), "INSERT INTO db_version values ( ? )",
+ kCurrentDBVersion);
+
+ MakeSigninsTable(dbhandle.get());
+ MakeCookiesTable(dbhandle.get());
+ MakeSigninTypesTable(dbhandle.get());
+ MakeClientIDTable(dbhandle.get());
+ }
+ ExecOrDie(dbhandle.get(), "COMMIT TRANSACTION");
+ }
+#ifdef OS_WINDOWS
+ // Do not index this file. Scanning can occur every time we close the file,
+ // which causes long delays in SQLite's file locking.
+ const DWORD attrs = GetFileAttributes(settings_path.c_str());
+ const BOOL attrs_set =
+ SetFileAttributes(settings_path.c_str(),
+ attrs | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED);
+#endif
+ return true;
+}
+
+
+UserSettings::~UserSettings() {
+ if (dbhandle_)
+ sqlite3_close(dbhandle_);
+}
+
+const int32 kInvalidHash = 0xFFFFFFFF;
+
+// We use 10 bits of data from the MD5 digest as the hash.
+const int32 kHashMask = 0x3FF;
+
+int32 GetHashFromDigest(const vector<uint8>& digest) {
+ int32 hash = 0;
+ int32 mask = kHashMask;
+ for (vector<uint8>::const_iterator i = digest.begin(); i != digest.end();
+ ++i) {
+ hash = hash << 8;
+ hash = hash | (*i & kHashMask);
+ mask = mask >> 8;
+ if (0 == mask)
+ break;
+ }
+ return hash;
+}
+
+void UserSettings::StoreEmailForSignin(const string& signin,
+ const string& primary_email) {
+ ScopedDBHandle dbhandle(this);
+ ExecOrDie(dbhandle.get(), "BEGIN TRANSACTION");
+ ScopedStatement query(PrepareQuery(dbhandle.get(),
+ "SELECT COUNT(*) FROM signins"
+ " WHERE signin = ? AND primary_email = ?",
+ signin, primary_email));
+ int query_result = sqlite3_step(query.get());
+ CHECK(SQLITE_ROW == query_result);
+ int32 count = 0;
+ GetColumn(query.get(), 0, &count);
+ query.reset(NULL);
+ if (0 == count) {
+ // Migrate any settings the user might have from earlier versions.
+ ExecOrDie(dbhandle.get(), "UPDATE settings SET email = ? WHERE email = ?",
+ primary_email, signin);
+ // Store this signin:email mapping.
+ ExecOrDie(dbhandle.get(), "INSERT INTO signins(signin, primary_email)"
+ " values ( ?, ? )", signin, primary_email);
+ }
+ ExecOrDie(dbhandle.get(), "COMMIT TRANSACTION");
+}
+
+bool UserSettings::GetEmailForSignin(/*in, out*/string* signin) {
+ ScopedDBHandle dbhandle(this);
+ string result;
+ ScopedStatement query(PrepareQuery(dbhandle.get(),
+ "SELECT primary_email FROM signins"
+ " WHERE signin = ?", *signin));
+ int query_result = sqlite3_step(query.get());
+ if (SQLITE_ROW == query_result) {
+ GetColumn(query.get(), 0, &result);
+ if (!result.empty()) {
+ swap(result, *signin);
+ return true;
+ }
+ }
+ return false;
+}
+
+void UserSettings::StoreHashedPassword(const string& email,
+ const string& password) {
+ // Save one-way hashed password:
+ char binary_salt[kSaltSize];
+ {
+ ScopedLock lock(&mutex_);
+ GetRandomBytes(binary_salt, sizeof(binary_salt));
+ }
+ const string salt = APEncode(string(binary_salt, sizeof(binary_salt)));
+ MD5Calculator md5;
+ md5.AddData(salt.data(), salt.size());
+ md5.AddData(password.data(), password.size());
+ ScopedDBHandle dbhandle(this);
+ ExecOrDie(dbhandle.get(), "BEGIN TRANSACTION");
+ ExecOrDie(dbhandle.get(), "INSERT INTO settings(email, key, value )"
+ " values ( ?, ?, ? )", email, PASSWORD_HASH,
+ GetHashFromDigest(md5.GetDigest()));
+ ExecOrDie(dbhandle.get(), "INSERT INTO settings(email, key, value )"
+ " values ( ?, ?, ? )", email, SALT, salt);
+ ExecOrDie(dbhandle.get(), "COMMIT TRANSACTION");
+}
+
+bool UserSettings::VerifyAgainstStoredHash(const string& email,
+ const string& password) {
+ ScopedDBHandle dbhandle(this);
+ string salt_and_digest;
+
+ ScopedStatement query(PrepareQuery(dbhandle.get(),
+ "SELECT key, value FROM settings"
+ " WHERE email = ? AND"
+ " (key = ? OR key = ?)",
+ email, PASSWORD_HASH, SALT));
+ int query_result = sqlite3_step(query.get());
+ string salt;
+ int32 hash = kInvalidHash;
+ while (SQLITE_ROW == query_result) {
+ string key;
+ GetColumn(query.get(), 0, &key);
+ if (key == SALT)
+ GetColumn(query.get(), 1, &salt);
+ else
+ GetColumn(query.get(), 1, &hash);
+ query_result = sqlite3_step(query.get());
+ }
+ CHECK(SQLITE_DONE == query_result);
+ if (salt.empty() || hash == kInvalidHash)
+ return false;
+ MD5Calculator md5;
+ md5.AddData(salt.data(), salt.size());
+ md5.AddData(password.data(), password.size());
+ return hash == GetHashFromDigest(md5.GetDigest());
+}
+
+void UserSettings::SwitchUser(const string& username) {
+ {
+ ScopedLock lock(&mutex_);
+ email_ = username;
+ }
+}
+
+void UserSettings::RememberSigninType(const string& signin, SignIn signin_type)
+{
+ ScopedDBHandle dbhandle(this);
+ ExecOrDie(dbhandle.get(), "INSERT INTO signin_types(signin, signin_type)"
+ " values ( ?, ? )", signin, static_cast<int>(signin_type));
+}
+
+SignIn UserSettings::RecallSigninType(const string& signin, SignIn default_type)
+{
+ ScopedDBHandle dbhandle(this);
+ ScopedStatement query(PrepareQuery(dbhandle.get(),
+ "SELECT signin_type from signin_types"
+ " WHERE signin = ?", signin));
+ int query_result = sqlite3_step(query.get());
+ if (SQLITE_ROW == query_result) {
+ int signin_type;
+ GetColumn(query.get(), 0, &signin_type);
+ return static_cast<SignIn>(signin_type);
+ }
+ return default_type;
+}
+
+string UserSettings::GetClientId() {
+ ScopedDBHandle dbhandle(this);
+ ScopedStatement query(PrepareQuery(dbhandle.get(),
+ "SELECT id FROM client_id"));
+ int query_result = sqlite3_step(query.get());
+ string client_id;
+ if (query_result == SQLITE_ROW)
+ GetColumn(query.get(), 0, &client_id);
+ return client_id;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/util/user_settings.h b/chrome/browser/sync/util/user_settings.h
new file mode 100644
index 0000000..45116a5
--- /dev/null
+++ b/chrome/browser/sync/util/user_settings.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_UTIL_USER_SETTINGS_H_
+#define CHROME_BROWSER_SYNC_UTIL_USER_SETTINGS_H_
+
+#include <map>
+#include <set>
+#include <string>
+
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/signin.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+extern "C" struct sqlite3;
+
+namespace browser_sync {
+
+class URLFactory;
+
+class UserSettings {
+ public:
+ // db_path is used for the main user settings.
+ // passwords_file contains hashes of passwords.
+ UserSettings();
+ ~UserSettings();
+ // Returns false (failure) if the db is a newer version.
+ bool Init(const PathString& settings_path);
+ void StoreHashedPassword(const std::string& email,
+ const std::string& password);
+ bool VerifyAgainstStoredHash(const std::string& email,
+ const std::string& password);
+
+ // Set the username.
+ void SwitchUser(const std::string& email);
+
+ // Saves the email address and the named service token for the given user.
+ // Call this multiple times with the same email parameter to save
+ // multiple service tokens.
+ void SetAuthTokenForService(const std::string& email,
+ const std::string& service_name,
+ const std::string& long_lived_service_token);
+ // Erases all saved service tokens.
+ void ClearAllServiceTokens();
+
+ // Returns the user name whose credentials have been persisted.
+ bool GetLastUser(std::string* username);
+
+ // Returns the user name whose credentials have been persisted as well as
+ // a service token for the named service
+ bool GetLastUserAndServiceToken(const std::string& service_name,
+ std::string* username,
+ std::string* service_token);
+
+ void RememberSigninType(const std::string& signin, SignIn signin_type);
+ SignIn RecallSigninType(const std::string& signin, SignIn default_type);
+
+ void RemoveAllGuestSettings();
+
+ void RemoveShare(const PathString& share_path);
+
+ void StoreEmailForSignin(const std::string& signin,
+ const std::string& primary_email);
+
+ // Multiple email addresses can map to the same Google Account. This method
+ // returns the primary Google Account email associated with |signin|, which
+ // is used as both input and output.
+ bool GetEmailForSignin(std::string* signin);
+
+
+ std::string email() const;
+
+ // Get a unique ID suitable for use as the client ID. This ID
+ // has the lifetime of the user settings database. You may use this ID if
+ // your operating environment does not provide its own unique client ID.
+ std::string GetClientId();
+
+ protected:
+ struct ScopedDBHandle {
+ ScopedDBHandle(UserSettings* settings);
+ inline sqlite3* get() const { return *handle_; }
+ PThreadScopedLock<PThreadMutex> mutex_lock_;
+ sqlite3** const handle_;
+ };
+
+ friend struct ScopedDBHandle;
+ friend class URLFactory;
+
+ void MigrateOldVersionsAsNeeded(sqlite3* const handle, int current_version);
+
+ private:
+ std::string email_;
+ mutable PThreadMutex mutex_; // protects email_
+ typedef PThreadScopedLock<PThreadMutex> ScopedLock;
+
+ // We keep a single dbhandle.
+ sqlite3* dbhandle_;
+ PThreadMutex dbhandle_mutex_;
+
+ // TODO(sync): Use in-memory cache for service auth tokens on posix.
+ // Have someone competent in Windows switch it over to not use Sqlite in the
+ // future.
+#ifndef OS_WINDOWS
+ typedef std::map<std::string, std::string> ServiceTokenMap;
+ ServiceTokenMap service_tokens_;
+#endif // OS_WINDOWS
+
+ DISALLOW_COPY_AND_ASSIGN(UserSettings);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_UTIL_USER_SETTINGS_H_
diff --git a/chrome/browser/sync/util/user_settings_unittest.cc b/chrome/browser/sync/util/user_settings_unittest.cc
new file mode 100644
index 0000000..56c761d
--- /dev/null
+++ b/chrome/browser/sync/util/user_settings_unittest.cc
@@ -0,0 +1,86 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#include "base/file_util.h"
+#include "base/test_file_util.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/user_settings.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using browser_sync::UserSettings;
+
+static const wchar_t* kV10UserSettingsDB = L"Version10Settings.sqlite3";
+static const wchar_t* kOldStyleSyncDataDB = L"OldStyleSyncData.sqlite3";
+
+class UserSettingsTest : public testing::Test {
+ public:
+ UserSettingsTest() : sync_data_("Some sync data") { }
+ void SetUpVersion10Databases() {
+ CleanUpVersion10Databases();
+ sqlite3* primer_handle = NULL;
+ ASSERT_EQ(SQLITE_OK, SqliteOpen(kV10UserSettingsDB,
+ &primer_handle));
+ FilePath old_sync_data(kOldStyleSyncDataDB);
+
+ ASSERT_EQ(sync_data_.length(), file_util::WriteFile(
+ old_sync_data, sync_data_.data(), sync_data_.length()));
+
+ // Create settings table.
+ ExecOrDie(primer_handle, "CREATE TABLE settings"
+ " (email, key, value, "
+ " PRIMARY KEY(email, key) ON CONFLICT REPLACE)");
+
+ // Create and populate version table.
+ ExecOrDie(primer_handle, "CREATE TABLE db_version ( version )");
+ ExecOrDie(primer_handle, "INSERT INTO db_version values ( ? )", 10);
+ // Create shares table.
+ ExecOrDie(primer_handle, "CREATE TABLE shares"
+ " (email, share_name, file_name,"
+ " PRIMARY KEY(email, share_name) ON CONFLICT REPLACE)");
+ // Populate a share.
+ ExecOrDie(primer_handle, "INSERT INTO shares values ( ?, ?, ?)",
+ "foo@foo.com", "foo@foo.com", WideToUTF8(kOldStyleSyncDataDB));
+ sqlite3_close(primer_handle);
+ }
+
+ void CleanUpVersion10Databases() {
+ ASSERT_TRUE(file_util::DieFileDie(FilePath(kV10UserSettingsDB), false));
+ ASSERT_TRUE(file_util::DieFileDie(FilePath(kOldStyleSyncDataDB), false));
+ ASSERT_TRUE(file_util::DieFileDie(FilePath(L"SyncData.sqlite3"), false));
+ }
+
+ const std::string& sync_data() const { return sync_data_; }
+
+ private:
+ std::string sync_data_;
+};
+
+TEST_F(UserSettingsTest, MigrateFromV10ToV11) {
+ SetUpVersion10Databases();
+ {
+ // Create a UserSettings, which should trigger migration code.
+ // We do this inside a scoped block so it closes itself and we can poke
+ // around to see what happened later.
+ UserSettings settings;
+ settings.Init(kV10UserSettingsDB);
+ }
+
+ // Now poke around using sqlite to see if UserSettings migrated properly.
+ sqlite3* handle = NULL;
+ ASSERT_EQ(SQLITE_OK, SqliteOpen(kV10UserSettingsDB, &handle));
+ ScopedStatement version_query(PrepareQuery(handle,
+ "SELECT version FROM db_version"));
+ ASSERT_EQ(SQLITE_ROW, sqlite3_step(version_query.get()));
+
+ const int version = sqlite3_column_int(version_query.get(), 0);
+ EXPECT_EQ(11, version);
+ EXPECT_FALSE(file_util::PathExists(kOldStyleSyncDataDB));
+
+ std::wstring path(syncable::DirectoryManager::GetSyncDataDatabaseFilename());
+
+ std::string contents;
+ ASSERT_TRUE(file_util::ReadFileToString(path, &contents));
+ EXPECT_EQ(sync_data(), contents);
+}