summaryrefslogtreecommitdiffstats
path: root/chrome/browser/sync/engine
diff options
context:
space:
mode:
authornick@chromium.org <nick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-10 06:05:27 +0000
committernick@chromium.org <nick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-10 06:05:27 +0000
commit5852edc1b6eab234b9e048c41dd0d664ae7fc747 (patch)
tree9e5d8eb4833b76cdb11e66fc3607689e0f5e0122 /chrome/browser/sync/engine
parentf6059e37f8b8ac335ce18a189a13e702974a1c7e (diff)
downloadchromium_src-5852edc1b6eab234b9e048c41dd0d664ae7fc747.zip
chromium_src-5852edc1b6eab234b9e048c41dd0d664ae7fc747.tar.gz
chromium_src-5852edc1b6eab234b9e048c41dd0d664ae7fc747.tar.bz2
Initial commit of sync engine code to browser/sync.
The code is not built on any platform yet. That will arrive as a subsequent checkin. This is an implementation of the interface exposed earlier through syncapi.h. It is the client side of a sync protocol that lets users sync their browser data (currently, just bookmarks) with their Google Account. Table of contents: browser/sync/ protocol - The protocol definition, and other definitions necessary to connect to the service. syncable/ - defines a data model for syncable objects, and provides a sqlite-based backing store for this model. engine/ - includes the core sync logic, including commiting changes to the server, downloading changes from the server, resolving conflicts, other parts of the sync algorithm. engine/net - parts of the sync engine focused on the business of talking to the server. Some of this is binds a generic "server connection" interface to a concrete implementation provided by Chromium. notifier - the part of the syncer focused on the business of sending and receiving xmpp notifications. Notifications are used instead of polling to achieve very low latency change propagation. util - not necessarily sync specific utility code. Much of this is scaffolding which should either be replaced by, or merged with, the utility code in base/. BUG=none TEST=this code includes its own suite of unit tests. Review URL: http://codereview.chromium.org/194065 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@25850 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser/sync/engine')
-rw-r--r--chrome/browser/sync/engine/all_status.cc335
-rw-r--r--chrome/browser/sync/engine/all_status.h210
-rw-r--r--chrome/browser/sync/engine/all_status_unittest.cc24
-rw-r--r--chrome/browser/sync/engine/apply_updates_command.cc34
-rw-r--r--chrome/browser/sync/engine/apply_updates_command.h33
-rw-r--r--chrome/browser/sync/engine/apply_updates_command_unittest.cc166
-rw-r--r--chrome/browser/sync/engine/auth_watcher.cc419
-rw-r--r--chrome/browser/sync/engine/auth_watcher.h204
-rw-r--r--chrome/browser/sync/engine/authenticator.cc106
-rw-r--r--chrome/browser/sync/engine/authenticator.h106
-rw-r--r--chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc439
-rw-r--r--chrome/browser/sync/engine/build_and_process_conflict_sets_command.h64
-rw-r--r--chrome/browser/sync/engine/build_commit_command.cc143
-rw-r--r--chrome/browser/sync/engine/build_commit_command.h27
-rw-r--r--chrome/browser/sync/engine/change_reorder_buffer.cc199
-rw-r--r--chrome/browser/sync/engine/change_reorder_buffer.h100
-rw-r--r--chrome/browser/sync/engine/client_command_channel.h31
-rw-r--r--chrome/browser/sync/engine/conflict_resolution_view.cc167
-rw-r--r--chrome/browser/sync/engine/conflict_resolution_view.h123
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.cc758
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.h129
-rw-r--r--chrome/browser/sync/engine/download_updates_command.cc64
-rw-r--r--chrome/browser/sync/engine/download_updates_command.h27
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.cc242
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.h202
-rw-r--r--chrome/browser/sync/engine/model_changing_syncer_command.cc19
-rw-r--r--chrome/browser/sync/engine/model_changing_syncer_command.h50
-rw-r--r--chrome/browser/sync/engine/model_safe_worker.h45
-rw-r--r--chrome/browser/sync/engine/net/gaia_authenticator.cc483
-rw-r--r--chrome/browser/sync/engine/net/gaia_authenticator.h304
-rw-r--r--chrome/browser/sync/engine/net/gaia_authenticator_unittest.cc42
-rw-r--r--chrome/browser/sync/engine/net/http_return.h16
-rw-r--r--chrome/browser/sync/engine/net/openssl_init.cc129
-rw-r--r--chrome/browser/sync/engine/net/openssl_init.h20
-rw-r--r--chrome/browser/sync/engine/net/server_connection_manager.cc375
-rw-r--r--chrome/browser/sync/engine/net/server_connection_manager.h345
-rw-r--r--chrome/browser/sync/engine/net/syncapi_server_connection_manager.cc77
-rw-r--r--chrome/browser/sync/engine/net/syncapi_server_connection_manager.h75
-rw-r--r--chrome/browser/sync/engine/net/url_translator.cc50
-rw-r--r--chrome/browser/sync/engine/net/url_translator.h27
-rw-r--r--chrome/browser/sync/engine/post_commit_message_command.cc50
-rw-r--r--chrome/browser/sync/engine/post_commit_message_command.h27
-rw-r--r--chrome/browser/sync/engine/process_commit_response_command.cc374
-rw-r--r--chrome/browser/sync/engine/process_commit_response_command.h54
-rw-r--r--chrome/browser/sync/engine/process_updates_command.cc167
-rw-r--r--chrome/browser/sync/engine/process_updates_command.h45
-rw-r--r--chrome/browser/sync/engine/resolve_conflicts_command.cc28
-rw-r--r--chrome/browser/sync/engine/resolve_conflicts_command.h34
-rw-r--r--chrome/browser/sync/engine/sync_cycle_state.h253
-rw-r--r--chrome/browser/sync/engine/sync_process_state.cc325
-rw-r--r--chrome/browser/sync/engine/sync_process_state.h384
-rw-r--r--chrome/browser/sync/engine/syncapi.cc1565
-rw-r--r--chrome/browser/sync/engine/syncer.cc338
-rw-r--r--chrome/browser/sync/engine/syncer.h234
-rw-r--r--chrome/browser/sync/engine/syncer_command.cc54
-rw-r--r--chrome/browser/sync/engine/syncer_command.h44
-rw-r--r--chrome/browser/sync/engine/syncer_end_command.cc44
-rw-r--r--chrome/browser/sync/engine/syncer_end_command.h32
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util.cc276
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util.h73
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util_unittest.cc119
-rw-r--r--chrome/browser/sync/engine/syncer_session.h364
-rw-r--r--chrome/browser/sync/engine/syncer_status.cc15
-rw-r--r--chrome/browser/sync/engine/syncer_status.h255
-rw-r--r--chrome/browser/sync/engine/syncer_thread.cc558
-rw-r--r--chrome/browser/sync/engine/syncer_thread.h235
-rw-r--r--chrome/browser/sync/engine/syncer_thread_unittest.cc299
-rw-r--r--chrome/browser/sync/engine/syncer_types.h151
-rw-r--r--chrome/browser/sync/engine/syncer_unittest.cc4588
-rw-r--r--chrome/browser/sync/engine/syncer_util.cc845
-rw-r--r--chrome/browser/sync/engine/syncer_util.h206
-rw-r--r--chrome/browser/sync/engine/syncproto.h72
-rw-r--r--chrome/browser/sync/engine/syncproto_unittest.cc18
-rw-r--r--chrome/browser/sync/engine/update_applicator.cc98
-rw-r--r--chrome/browser/sync/engine/update_applicator.h61
-rw-r--r--chrome/browser/sync/engine/verify_updates_command.cc102
-rw-r--r--chrome/browser/sync/engine/verify_updates_command.h36
77 files changed, 18832 insertions, 0 deletions
diff --git a/chrome/browser/sync/engine/all_status.cc b/chrome/browser/sync/engine/all_status.cc
new file mode 100644
index 0000000..e1bc5c7
--- /dev/null
+++ b/chrome/browser/sync/engine/all_status.cc
@@ -0,0 +1,335 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/all_status.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/port.h"
+#include "base/rand_util.h"
+#include "chrome/browser/sync/engine/auth_watcher.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_thread.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+
+namespace browser_sync {
+
+static const time_t kMinSyncObserveInterval = 10; // seconds
+
+// Backoff interval randomization factor.
+static const int kBackoffRandomizationFactor = 2;
+
+const char* AllStatus::GetSyncStatusString(SyncStatus icon) {
+ const char* strings[] = {"OFFLINE", "OFFLINE_UNSYNCED", "SYNCING", "READY",
+ "CONFLICT", "OFFLINE_UNUSABLE"};
+ COMPILE_ASSERT(ARRAYSIZE(strings) == ICON_STATUS_COUNT, enum_indexed_array);
+ if (icon < 0 || icon >= ARRAYSIZE(strings))
+ LOG(FATAL) << "Illegal Icon State:" << icon;
+ return strings[icon];
+}
+
+static const AllStatus::Status init_status =
+ { AllStatus::OFFLINE };
+
+static const AllStatusEvent shutdown_event =
+ { AllStatusEvent::SHUTDOWN, init_status };
+
+AllStatus::AllStatus() : channel_(new Channel(shutdown_event)),
+ status_(init_status) {
+ status_.initial_sync_ended = true;
+ status_.notifications_enabled = false;
+}
+
+AllStatus::~AllStatus() {
+ delete channel_;
+}
+
+void AllStatus::WatchConnectionManager(ServerConnectionManager* conn_mgr) {
+ conn_mgr_hookup_.reset(NewEventListenerHookup(conn_mgr->channel(), this,
+ &AllStatus::HandleServerConnectionEvent));
+}
+
+void AllStatus::WatchAuthenticator(GaiaAuthenticator* gaia) {
+ gaia_hookup_.reset(NewEventListenerHookup(gaia->channel(), this,
+ &AllStatus::HandleGaiaAuthEvent));
+}
+
+void AllStatus::WatchAuthWatcher(AuthWatcher* auth_watcher) {
+ authwatcher_hookup_.reset(
+ NewEventListenerHookup(auth_watcher->channel(), this,
+ &AllStatus::HandleAuthWatcherEvent));
+}
+
+void AllStatus::WatchSyncerThread(SyncerThread* syncer_thread) {
+ syncer_thread_hookup_.reset(
+ NewEventListenerHookup(syncer_thread->channel(), this,
+ &AllStatus::HandleSyncerEvent));
+}
+
+AllStatus::Status AllStatus::CreateBlankStatus() const {
+ Status status = status_;
+ status.syncing = true;
+ status.unsynced_count = 0;
+ status.conflicting_count = 0;
+ status.initial_sync_ended = false;
+ status.syncer_stuck = false;
+ status.max_consecutive_errors = 0;
+ status.server_broken = false;
+ status.updates_available = 0;
+ status.updates_received = 0;
+ return status;
+}
+
+AllStatus::Status AllStatus::CalcSyncing(const SyncerEvent &event) const {
+ Status status = CreateBlankStatus();
+ SyncerStatus syncerStatus(event.last_session);
+ status.unsynced_count += syncerStatus.unsynced_count();
+ status.conflicting_count += syncerStatus.conflicting_commits();
+ if (syncerStatus.current_sync_timestamp() ==
+ syncerStatus.servers_latest_timestamp()) {
+ status.conflicting_count += syncerStatus.conflicting_updates();
+ }
+ status.syncing |= syncerStatus.syncing();
+ // Show a syncer as syncing if it's got stalled updates.
+ status.syncing = event.last_session->ShouldSyncAgain();
+ status.initial_sync_ended |= syncerStatus.IsShareUsable();
+ status.syncer_stuck |= syncerStatus.syncer_stuck();
+ if (syncerStatus.consecutive_errors() > status.max_consecutive_errors)
+ status.max_consecutive_errors = syncerStatus.consecutive_errors();
+
+ // 100 is an arbitrary limit.
+ if (syncerStatus.consecutive_transient_error_commits() > 100)
+ status.server_broken = true;
+
+ status.updates_available += syncerStatus.servers_latest_timestamp();
+ status.updates_received += syncerStatus.current_sync_timestamp();
+ return status;
+}
+
+AllStatus::Status AllStatus::CalcSyncing() const {
+ return CreateBlankStatus();
+}
+
+int AllStatus::CalcStatusChanges(Status* old_status) {
+ int what_changed = 0;
+
+ // Calculate what changed and what the new icon should be.
+ if (status_.syncing != old_status->syncing)
+ what_changed |= AllStatusEvent::SYNCING;
+ if (status_.unsynced_count != old_status->unsynced_count)
+ what_changed |= AllStatusEvent::UNSYNCED_COUNT;
+ if (status_.server_up != old_status->server_up)
+ what_changed |= AllStatusEvent::SERVER_UP;
+ if (status_.server_reachable != old_status->server_reachable)
+ what_changed |= AllStatusEvent::SERVER_REACHABLE;
+ if (status_.notifications_enabled != old_status->notifications_enabled)
+ what_changed |= AllStatusEvent::NOTIFICATIONS_ENABLED;
+ if (status_.notifications_received != old_status->notifications_received)
+ what_changed |= AllStatusEvent::NOTIFICATIONS_RECEIVED;
+ if (status_.notifications_sent != old_status->notifications_sent)
+ what_changed |= AllStatusEvent::NOTIFICATIONS_SENT;
+ if (status_.initial_sync_ended != old_status->initial_sync_ended)
+ what_changed |= AllStatusEvent::INITIAL_SYNC_ENDED;
+ if (status_.authenticated != old_status->authenticated)
+ what_changed |= AllStatusEvent::AUTHENTICATED;
+
+ const bool unsynced_changes = status_.unsynced_count > 0;
+ const bool online = status_.authenticated &&
+ status_.server_reachable && status_.server_up && !status_.server_broken;
+ if (online) {
+ if (status_.syncer_stuck)
+ status_.icon = CONFLICT;
+ else if (unsynced_changes || status_.syncing)
+ status_.icon = SYNCING;
+ else
+ status_.icon = READY;
+ } else if (!status_.initial_sync_ended) {
+ status_.icon = OFFLINE_UNUSABLE;
+ } else if (unsynced_changes) {
+ status_.icon = OFFLINE_UNSYNCED;
+ } else {
+ status_.icon = OFFLINE;
+ }
+
+ if (status_.icon != old_status->icon)
+ what_changed |= AllStatusEvent::ICON;
+
+ if (0 == what_changed)
+ return 0;
+ *old_status = status_;
+ return what_changed;
+}
+
+void AllStatus::HandleGaiaAuthEvent(const GaiaAuthEvent& gaia_event) {
+ ScopedStatusLockWithNotify lock(this);
+ switch (gaia_event.what_happened) {
+ case GaiaAuthEvent::GAIA_AUTH_FAILED:
+ status_.authenticated = false;
+ break;
+ case GaiaAuthEvent::GAIA_AUTH_SUCCEEDED:
+ status_.authenticated = true;
+ break;
+ default:
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ }
+}
+
+void AllStatus::HandleAuthWatcherEvent(const AuthWatcherEvent& auth_event) {
+ ScopedStatusLockWithNotify lock(this);
+ switch (auth_event.what_happened) {
+ case AuthWatcherEvent::GAIA_AUTH_FAILED:
+ case AuthWatcherEvent::SERVICE_AUTH_FAILED:
+ case AuthWatcherEvent::SERVICE_CONNECTION_FAILED:
+ case AuthWatcherEvent::AUTHENTICATION_ATTEMPT_START:
+ status_.authenticated = false;
+ break;
+ case AuthWatcherEvent::AUTH_SUCCEEDED:
+ // If we've already calculated that the server is reachable, since we've
+ // successfully authenticated, we can be confident that the server is up.
+ if (status_.server_reachable)
+ status_.server_up = true;
+
+ if (!status_.authenticated) {
+ status_.authenticated = true;
+ status_ = CalcSyncing();
+ } else {
+ lock.set_notify_plan(DONT_NOTIFY);
+ }
+ break;
+ default:
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ }
+}
+
+void AllStatus::HandleSyncerEvent(const SyncerEvent& event) {
+ ScopedStatusLockWithNotify lock(this);
+ switch (event.what_happened) {
+ case SyncerEvent::SYNC_CYCLE_ENDED:
+ case SyncerEvent::COMMITS_SUCCEEDED:
+ break;
+ case SyncerEvent::STATUS_CHANGED:
+ status_ = CalcSyncing(event);
+ break;
+ case SyncerEvent::SHUTDOWN_USE_WITH_CARE:
+ // We're safe to use this value here because we don't call into the syncer
+ // or block on any processes.
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ case SyncerEvent::OVER_QUOTA:
+ LOG(WARNING) << "User has gone over quota.";
+ lock.NotifyOverQuota();
+ break;
+ case SyncerEvent::REQUEST_SYNC_NUDGE:
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ default:
+ LOG(ERROR) << "Unrecognized Syncer Event: " << event.what_happened;
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ }
+}
+
+void AllStatus::HandleServerConnectionEvent(
+ const ServerConnectionEvent& event) {
+ if (ServerConnectionEvent::STATUS_CHANGED == event.what_happened) {
+ ScopedStatusLockWithNotify lock(this);
+ status_.server_up = IsGoodReplyFromServer(event.connection_code);
+ status_.server_reachable = event.server_reachable;
+ }
+}
+
+void AllStatus::WatchTalkMediator(const TalkMediator* mediator) {
+ status_.notifications_enabled = false;
+ talk_mediator_hookup_.reset(
+ NewEventListenerHookup(mediator->channel(), this,
+ &AllStatus::HandleTalkMediatorEvent));
+}
+
+void AllStatus::HandleTalkMediatorEvent(
+ const TalkMediatorEvent& event) {
+ ScopedStatusLockWithNotify lock(this);
+ switch (event.what_happened) {
+ case TalkMediatorEvent::SUBSCRIPTIONS_ON:
+ status_.notifications_enabled = true;
+ break;
+ case TalkMediatorEvent::LOGOUT_SUCCEEDED:
+ case TalkMediatorEvent::SUBSCRIPTIONS_OFF:
+ case TalkMediatorEvent::TALKMEDIATOR_DESTROYED:
+ status_.notifications_enabled = false;
+ break;
+ case TalkMediatorEvent::NOTIFICATION_RECEIVED:
+ status_.notifications_received++;
+ break;
+ case TalkMediatorEvent::NOTIFICATION_SENT:
+ status_.notifications_sent++;
+ break;
+ case TalkMediatorEvent::LOGIN_SUCCEEDED:
+ default:
+ lock.set_notify_plan(DONT_NOTIFY);
+ break;
+ }
+}
+
+AllStatus::Status AllStatus::status() const {
+ MutexLock lock(&mutex_);
+ return status_;
+}
+
+int AllStatus::GetRecommendedDelaySeconds(int base_delay_seconds) {
+ if (base_delay_seconds >= kMaxBackoffSeconds)
+ return kMaxBackoffSeconds;
+
+ // This calculates approx. base_delay_seconds * 2 +/- base_delay_seconds / 2
+ int backoff_s = (0 == base_delay_seconds) ? 1 :
+ base_delay_seconds * kBackoffRandomizationFactor;
+
+ // Flip a coin to randomize backoff interval by +/- 50%.
+ int rand_sign = base::RandInt(0, 1) * 2 - 1;
+
+ // Truncation is adequate for rounding here.
+ backoff_s = backoff_s +
+ (rand_sign * (base_delay_seconds / kBackoffRandomizationFactor));
+
+ // Cap the backoff interval.
+ backoff_s = std::min(backoff_s, kMaxBackoffSeconds);
+
+ return backoff_s;
+}
+
+int AllStatus::GetRecommendedDelay(int base_delay_ms) const {
+ return GetRecommendedDelaySeconds(base_delay_ms / 1000) * 1000;
+}
+
+ScopedStatusLockWithNotify::ScopedStatusLockWithNotify(AllStatus* allstatus)
+ : allstatus_(allstatus), plan_(NOTIFY_IF_STATUS_CHANGED) {
+ event_.what_changed = 0;
+ allstatus->mutex_.Lock();
+ event_.status = allstatus->status_;
+}
+
+ScopedStatusLockWithNotify::~ScopedStatusLockWithNotify() {
+ if (DONT_NOTIFY == plan_) {
+ allstatus_->mutex_.Unlock();
+ return;
+ }
+ event_.what_changed |= allstatus_->CalcStatusChanges(&event_.status);
+ allstatus_->mutex_.Unlock();
+ if (event_.what_changed)
+ allstatus_->channel()->NotifyListeners(event_);
+}
+
+void ScopedStatusLockWithNotify::NotifyOverQuota() {
+ event_.what_changed |= AllStatusEvent::OVER_QUOTA;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/all_status.h b/chrome/browser/sync/engine/all_status.h
new file mode 100644
index 0000000..e7fb0ba
--- /dev/null
+++ b/chrome/browser/sync/engine/all_status.h
@@ -0,0 +1,210 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// The all status object watches various sync engine components and aggregates
+// the status of all of them into one place.
+//
+#ifndef CHROME_BROWSER_SYNC_ENGINE_ALL_STATUS_H_
+#define CHROME_BROWSER_SYNC_ENGINE_ALL_STATUS_H_
+
+#include <map>
+
+#include "base/atomicops.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+
+namespace browser_sync {
+class AuthWatcher;
+class GaiaAuthenticator;
+class ScopedStatusLockWithNotify;
+class ServerConnectionManager;
+class Syncer;
+class SyncerThread;
+class TalkMediator;
+struct AllStatusEvent;
+struct AuthWatcherEvent;
+struct GaiaAuthEvent;
+struct ServerConnectionEvent;
+struct SyncerEvent;
+struct TalkMediatorEvent;
+
+class AllStatus {
+ friend class ScopedStatusLockWithNotify;
+ public:
+ typedef EventChannel<AllStatusEvent, PThreadMutex> Channel;
+
+ // Status of the entire sync process distilled into a single enum.
+ enum SyncStatus {
+ // Can't connect to server, but there are no pending changes in
+ // our local dataase.
+ OFFLINE,
+ // Can't connect to server, and there are pending changes in our
+ // local cache.
+ OFFLINE_UNSYNCED,
+ // Connected and syncing.
+ SYNCING,
+ // Connected, no pending changes.
+ READY,
+ // Internal sync error.
+ CONFLICT,
+ // Can't connect to server, and we haven't completed the initial
+ // sync yet. So there's nothing we can do but wait for the server.
+ OFFLINE_UNUSABLE,
+ // For array sizing, etc.
+ ICON_STATUS_COUNT
+ };
+
+ struct Status {
+ SyncStatus icon;
+ int unsynced_count;
+ int conflicting_count;
+ bool syncing;
+ bool authenticated; // Successfully authenticated via gaia
+ // True if we have received at least one good reply from the server.
+ bool server_up;
+ bool server_reachable;
+ // True after a client has done a first sync.
+ bool initial_sync_ended;
+ // True if any syncer is stuck.
+ bool syncer_stuck;
+ // True if any syncer is stopped because of server issues.
+ bool server_broken;
+ // True only if the notification listener has subscribed.
+ bool notifications_enabled;
+ // Notifications counters updated by the actions in synapi.
+ int notifications_received;
+ int notifications_sent;
+ // The max number of consecutive errors from any component.
+ int max_consecutive_errors;
+ bool disk_full;
+
+ // Contains current transfer item meta handle
+ int64 current_item_meta_handle;
+ // The next two values will be equal if all updates have been received.
+ // total updates available.
+ int64 updates_available;
+ // total updates received.
+ int64 updates_received;
+ };
+
+ // Maximum interval for exponential backoff.
+ static const int kMaxBackoffSeconds = 60 * 60 * 4; // 4 hours.
+
+ AllStatus();
+ ~AllStatus();
+
+ void WatchConnectionManager(ServerConnectionManager* conn_mgr);
+ void HandleServerConnectionEvent(const ServerConnectionEvent& event);
+
+ // Both WatchAuthenticator/HandleGaiaAuthEvent and WatchAuthWatcher/
+ // HandleAuthWatcherEventachieve have the same goal; use only one of the
+ // following two. (The AuthWatcher is watched under Windows; the
+ // GaiaAuthenticator is watched under Mac/Linux.)
+ void WatchAuthenticator(GaiaAuthenticator* gaia);
+ void HandleGaiaAuthEvent(const GaiaAuthEvent& event);
+
+ void WatchAuthWatcher(AuthWatcher* auth_watcher);
+ void HandleAuthWatcherEvent(const AuthWatcherEvent& event);
+
+ void WatchSyncerThread(SyncerThread* syncer_thread);
+ void HandleSyncerEvent(const SyncerEvent& event);
+
+ void WatchTalkMediator(
+ const browser_sync::TalkMediator* talk_mediator);
+ void HandleTalkMediatorEvent(
+ const browser_sync::TalkMediatorEvent& event);
+
+ // Returns a string description of the SyncStatus (currently just the ascii
+ // version of the enum). Will LOG(FATAL) if the status us out of range.
+ static const char* GetSyncStatusString(SyncStatus status);
+
+ Channel* channel() const { return channel_; }
+
+ Status status() const;
+
+ // DDOS avoidance function. The argument and return value is in seconds
+ static int GetRecommendedDelaySeconds(int base_delay_seconds);
+
+ // This uses AllStatus' max_consecutive_errors as the error count
+ int GetRecommendedDelay(int base_delay) const;
+
+ protected:
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+ typedef std::map<Syncer*, EventListenerHookup*> Syncers;
+
+ // Examines syncer to calculate syncing and the unsynced count,
+ // and returns a Status with new values.
+ Status CalcSyncing() const;
+ Status CalcSyncing(const SyncerEvent& event) const;
+ Status CreateBlankStatus() const;
+
+ // Examines status to see what has changed, updates old_status in place.
+ int CalcStatusChanges(Status* old_status);
+
+ Status status_;
+ Channel* const channel_;
+ scoped_ptr<EventListenerHookup> conn_mgr_hookup_;
+ scoped_ptr<EventListenerHookup> gaia_hookup_;
+ scoped_ptr<EventListenerHookup> authwatcher_hookup_;
+ scoped_ptr<EventListenerHookup> syncer_thread_hookup_;
+ scoped_ptr<EventListenerHookup> diskfull_hookup_;
+ scoped_ptr<EventListenerHookup> talk_mediator_hookup_;
+
+ mutable PThreadMutex mutex_; // Protects all data members.
+};
+
+struct AllStatusEvent {
+ enum { // A bit mask of which members have changed.
+ SHUTDOWN = 0x0000,
+ ICON = 0x0001,
+ UNSYNCED_COUNT = 0x0002,
+ AUTHENTICATED = 0x0004,
+ SYNCING = 0x0008,
+ SERVER_UP = 0x0010,
+ NOTIFICATIONS_ENABLED = 0x0020,
+ INITIAL_SYNC_ENDED = 0x0080,
+ SERVER_REACHABLE = 0x0100,
+ DISK_FULL = 0x0200,
+ OVER_QUOTA = 0x0400,
+ NOTIFICATIONS_RECEIVED = 0x0800,
+ NOTIFICATIONS_SENT = 0x1000,
+ TRASH_WARNING = 0x40000,
+ };
+ int what_changed;
+ AllStatus::Status status;
+
+ typedef AllStatusEvent EventType;
+ static inline bool IsChannelShutdownEvent(const AllStatusEvent& e) {
+ return SHUTDOWN == e.what_changed;
+ }
+};
+
+enum StatusNotifyPlan {
+ NOTIFY_IF_STATUS_CHANGED,
+ // A small optimization, don't do the big compare when we know
+ // nothing has changed.
+ DONT_NOTIFY,
+};
+
+class ScopedStatusLockWithNotify {
+ public:
+ explicit ScopedStatusLockWithNotify(AllStatus* allstatus);
+ ~ScopedStatusLockWithNotify();
+ // Defaults to true, but can be explicitly reset so we don't have to
+ // do the big compare in the destructor. Small optimization.
+
+ inline void set_notify_plan(StatusNotifyPlan plan) { plan_ = plan; }
+ void NotifyOverQuota();
+ protected:
+ AllStatusEvent event_;
+ AllStatus* const allstatus_;
+ StatusNotifyPlan plan_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_ALL_STATUS_H_
diff --git a/chrome/browser/sync/engine/all_status_unittest.cc b/chrome/browser/sync/engine/all_status_unittest.cc
new file mode 100644
index 0000000..86829cd
--- /dev/null
+++ b/chrome/browser/sync/engine/all_status_unittest.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/all_status.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+TEST(AllStatus, GetRecommendedDelay) {
+ EXPECT_LE(0, AllStatus::GetRecommendedDelaySeconds(0));
+ EXPECT_LE(1, AllStatus::GetRecommendedDelaySeconds(1));
+ EXPECT_LE(50, AllStatus::GetRecommendedDelaySeconds(50));
+ EXPECT_LE(10, AllStatus::GetRecommendedDelaySeconds(10));
+ EXPECT_EQ(AllStatus::kMaxBackoffSeconds,
+ AllStatus::GetRecommendedDelaySeconds(
+ AllStatus::kMaxBackoffSeconds));
+ EXPECT_EQ(AllStatus::kMaxBackoffSeconds,
+ AllStatus::GetRecommendedDelaySeconds(
+ AllStatus::kMaxBackoffSeconds+1));
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/apply_updates_command.cc b/chrome/browser/sync/engine/apply_updates_command.cc
new file mode 100644
index 0000000..2d9f3e5
--- /dev/null
+++ b/chrome/browser/sync/engine/apply_updates_command.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/apply_updates_command.h"
+
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/update_applicator.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+ApplyUpdatesCommand::ApplyUpdatesCommand() {}
+ApplyUpdatesCommand::~ApplyUpdatesCommand() {}
+
+void ApplyUpdatesCommand::ModelChangingExecuteImpl(SyncerSession *session) {
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ syncable::Directory::UnappliedUpdateMetaHandles handles;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &handles);
+
+ UpdateApplicator applicator(session, handles.begin(), handles.end());
+ while (applicator.AttemptOneApplication(&trans)) {
+ }
+ applicator.SaveProgressIntoSessionState();
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/apply_updates_command.h b/chrome/browser/sync/engine/apply_updates_command.h
new file mode 100644
index 0000000..320e42c
--- /dev/null
+++ b/chrome/browser/sync/engine/apply_updates_command.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
+
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+class WriteTransaction;
+class MutableEntry;
+class Id;
+}
+
+namespace browser_sync {
+
+class ApplyUpdatesCommand : public ModelChangingSyncerCommand {
+ public:
+ ApplyUpdatesCommand();
+ virtual ~ApplyUpdatesCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession *session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_APPLY_UPDATES_COMMAND_H_
diff --git a/chrome/browser/sync/engine/apply_updates_command_unittest.cc b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
new file mode 100644
index 0000000..ea4e253
--- /dev/null
+++ b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
@@ -0,0 +1,166 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/apply_updates_command.h"
+#include "chrome/browser/sync/engine/sync_cycle_state.h"
+#include "chrome/browser/sync/engine/sync_process_state.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/test/sync/engine/test_directory_setter_upper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+using syncable::ScopedDirLookup;
+using syncable::WriteTransaction;
+using syncable::ReadTransaction;
+using syncable::MutableEntry;
+using syncable::Entry;
+using syncable::Id;
+using syncable::UNITTEST;
+
+namespace browser_sync {
+
+// A test fixture for tests exercising ApplyUpdatesCommand.
+class ApplyUpdatesCommandTest : public testing::Test {
+ protected:
+ ApplyUpdatesCommandTest() : next_revision_(1) {}
+ virtual ~ApplyUpdatesCommandTest() {}
+ virtual void SetUp() {
+ syncdb_.SetUp();
+ }
+ virtual void TearDown() {
+ syncdb_.TearDown();
+ }
+
+ protected:
+ // Create a new unapplied update.
+ void CreateUnappliedNewItemWithParent(const string& item_id,
+ const string& parent_id) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ Id::CreateFromServerId(item_id));
+ ASSERT_TRUE(entry.good());
+ PathString name;
+ AppendUTF8ToPathString(item_id, &name);
+ entry.Put(syncable::SERVER_VERSION, next_revision_++);
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ entry.Put(syncable::SERVER_NAME, name);
+ entry.Put(syncable::SERVER_NON_UNIQUE_NAME, name);
+ entry.Put(syncable::SERVER_PARENT_ID, Id::CreateFromServerId(parent_id));
+ entry.Put(syncable::SERVER_IS_DIR, true);
+ }
+
+ TestDirectorySetterUpper syncdb_;
+ ApplyUpdatesCommand apply_updates_command_;
+
+ private:
+ int64 next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesCommandTest);
+};
+
+TEST_F(ApplyUpdatesCommandTest, Simple) {
+ string root_server_id = syncable::kNullId.GetServerId();
+ CreateUnappliedNewItemWithParent("parent", root_server_id);
+ CreateUnappliedNewItemWithParent("child", "parent");
+
+ SyncCycleState cycle_state;
+ SyncProcessState process_state(syncdb_.manager(), syncdb_.name(),
+ NULL, NULL, NULL, NULL);
+ SyncerSession session(&cycle_state, &process_state);
+
+ apply_updates_command_.ModelChangingExecuteImpl(&session);
+
+ EXPECT_EQ(2, cycle_state.AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ EXPECT_EQ(0, process_state.ConflictingItemsSize())
+ << "Simple update shouldn't result in conflicts";
+ EXPECT_EQ(0, process_state.BlockedItemsSize())
+ << "Blocked items shouldn't be possible under any circumstances";
+ EXPECT_EQ(2, cycle_state.SuccessfullyAppliedUpdateCount())
+ << "All items should have been successfully applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, UpdateWithChildrenBeforeParents) {
+ // Set a bunch of updates which are difficult to apply in the order
+ // they're received due to dependencies on other unseen items.
+ string root_server_id = syncable::kNullId.GetServerId();
+ CreateUnappliedNewItemWithParent("a_child_created_first", "parent");
+ CreateUnappliedNewItemWithParent("x_child_created_first", "parent");
+ CreateUnappliedNewItemWithParent("parent", root_server_id);
+ CreateUnappliedNewItemWithParent("a_child_created_second", "parent");
+ CreateUnappliedNewItemWithParent("x_child_created_second", "parent");
+
+ SyncCycleState cycle_state;
+ SyncProcessState process_state(syncdb_.manager(), syncdb_.name(),
+ NULL, NULL, NULL, NULL);
+ SyncerSession session(&cycle_state, &process_state);
+
+ apply_updates_command_.ModelChangingExecuteImpl(&session);
+
+ EXPECT_EQ(5, cycle_state.AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ EXPECT_EQ(0, process_state.ConflictingItemsSize())
+ << "Simple update shouldn't result in conflicts, even if out-of-order";
+ EXPECT_EQ(0, process_state.BlockedItemsSize())
+ << "Blocked items shouldn't be possible under any circumstances";
+ EXPECT_EQ(5, cycle_state.SuccessfullyAppliedUpdateCount())
+ << "All updates should have been successfully applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, NestedItemsWithUnknownParent) {
+ // We shouldn't be able to do anything with either of these items.
+ CreateUnappliedNewItemWithParent("some_item", "unknown_parent");
+ CreateUnappliedNewItemWithParent("some_other_item", "some_item");
+
+ SyncCycleState cycle_state;
+ SyncProcessState process_state(syncdb_.manager(), syncdb_.name(),
+ NULL, NULL, NULL, NULL);
+ SyncerSession session(&cycle_state, &process_state);
+
+ apply_updates_command_.ModelChangingExecuteImpl(&session);
+
+ EXPECT_EQ(2, cycle_state.AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ EXPECT_EQ(2, process_state.ConflictingItemsSize())
+ << "All updates with an unknown ancestors should be in conflict";
+ EXPECT_EQ(0, process_state.BlockedItemsSize())
+ << "Blocked items shouldn't be possible under any circumstances";
+ EXPECT_EQ(0, cycle_state.SuccessfullyAppliedUpdateCount())
+ << "No item with an unknown ancestor should be applied";
+}
+
+TEST_F(ApplyUpdatesCommandTest, ItemsBothKnownAndUnknown) {
+ // See what happens when there's a mixture of good and bad updates.
+ string root_server_id = syncable::kNullId.GetServerId();
+ CreateUnappliedNewItemWithParent("first_unknown_item", "unknown_parent");
+ CreateUnappliedNewItemWithParent("first_known_item", root_server_id);
+ CreateUnappliedNewItemWithParent("second_unknown_item", "unknown_parent");
+ CreateUnappliedNewItemWithParent("second_known_item", "first_known_item");
+ CreateUnappliedNewItemWithParent("third_known_item", "fourth_known_item");
+ CreateUnappliedNewItemWithParent("fourth_known_item", root_server_id);
+
+ SyncCycleState cycle_state;
+ SyncProcessState process_state(syncdb_.manager(), syncdb_.name(),
+ NULL, NULL, NULL, NULL);
+ SyncerSession session(&cycle_state, &process_state);
+
+ apply_updates_command_.ModelChangingExecuteImpl(&session);
+
+ EXPECT_EQ(6, cycle_state.AppliedUpdatesSize())
+ << "All updates should have been attempted";
+ EXPECT_EQ(2, process_state.ConflictingItemsSize())
+ << "The updates with unknown ancestors should be in conflict";
+ EXPECT_EQ(0, process_state.BlockedItemsSize())
+ << "Blocked items shouldn't be possible under any circumstances";
+ EXPECT_EQ(4, cycle_state.SuccessfullyAppliedUpdateCount())
+ << "The updates with known ancestors should be successfully applied";
+}
+
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/auth_watcher.cc b/chrome/browser/sync/engine/auth_watcher.cc
new file mode 100644
index 0000000..0c999dd
--- /dev/null
+++ b/chrome/browser/sync/engine/auth_watcher.cc
@@ -0,0 +1,419 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/auth_watcher.h"
+
+#include "base/file_util.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/engine/all_status.h"
+#include "chrome/browser/sync/engine/authenticator.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/user_settings.h"
+
+// How authentication happens:
+//
+// Kick Off:
+// The sync API looks to see if the user's name and
+// password are stored. If so, it calls authwatcher.Authenticate() with
+// them. Otherwise it fires an error event.
+//
+// On failed Gaia Auth:
+// The AuthWatcher attempts to use saved hashes to authenticate
+// locally, and on success opens the share.
+// On failure, fires an error event.
+//
+// On successful Gaia Auth:
+// AuthWatcher launches a thread to open the share and to get the
+// authentication token from the sync server.
+
+using std::pair;
+using std::string;
+using std::vector;
+
+namespace browser_sync {
+
+AuthWatcher::AuthWatcher(DirectoryManager* dirman,
+ ServerConnectionManager* scm,
+ AllStatus* allstatus,
+ const string& user_agent,
+ const string& service_id,
+ const string& gaia_url,
+ UserSettings* user_settings,
+ GaiaAuthenticator* gaia_auth,
+ TalkMediator* talk_mediator)
+ : dirman_(dirman),
+ scm_(scm),
+ allstatus_(allstatus),
+ status_(NOT_AUTHENTICATED),
+ thread_handle_valid_(false),
+ authenticating_now_(false),
+ current_attempt_trigger_(AuthWatcherEvent::USER_INITIATED),
+ user_settings_(user_settings),
+ gaia_(gaia_auth),
+ talk_mediator_(talk_mediator) {
+ connmgr_hookup_.reset(
+ NewEventListenerHookup(scm->channel(), this,
+ &AuthWatcher::HandleServerConnectionEvent));
+ AuthWatcherEvent done = { AuthWatcherEvent::AUTHWATCHER_DESTROYED };
+ channel_.reset(new Channel(done));
+}
+
+void* AuthWatcher::AuthenticationThreadStartRoutine(void* arg) {
+ ThreadParams* args = reinterpret_cast<ThreadParams*>(arg);
+ return args->self->AuthenticationThreadMain(args);
+}
+
+bool AuthWatcher::ProcessGaiaAuthSuccess() {
+ GaiaAuthenticator::AuthResults results = gaia_->results();
+
+ // We just successfully signed in again, let's clear out any residual cached
+ // login data from earlier sessions.
+ ClearAuthenticationData();
+
+ user_settings_->StoreEmailForSignin(results.email, results.primary_email);
+ user_settings_->RememberSigninType(results.email, results.signin);
+ user_settings_->RememberSigninType(results.primary_email, results.signin);
+ results.email = results.primary_email;
+ gaia_->SetUsernamePassword(results.primary_email, results.password);
+ if (!user_settings_->VerifyAgainstStoredHash(results.email, results.password))
+ user_settings_->StoreHashedPassword(results.email, results.password);
+
+ if (PERSIST_TO_DISK == results.credentials_saved) {
+ user_settings_->SetAuthTokenForService(results.email,
+ SYNC_SERVICE_NAME,
+ gaia_->auth_token());
+ }
+
+ return AuthenticateWithToken(results.email, gaia_->auth_token());
+}
+
+bool AuthWatcher::GetAuthTokenForService(const string& service_name,
+ string* service_token) {
+ string user_name;
+
+ // We special case this one by trying to return it from memory first. We
+ // do this because the user may not have checked "Remember me" and so we
+ // may not have persisted the sync service token beyond the initial
+ // login.
+ if (SYNC_SERVICE_NAME == service_name && !sync_service_token_.empty()) {
+ *service_token = sync_service_token_;
+ return true;
+ }
+
+ if (user_settings_->GetLastUserAndServiceToken(service_name, &user_name,
+ service_token)) {
+ // The casing gets preserved in some places and not in others it seems,
+ // at least I have observed different casings persisted to different DB
+ // tables.
+ if (!base::strcasecmp(user_name.c_str(),
+ user_settings_->email().c_str())) {
+ return true;
+ } else {
+ LOG(ERROR) << "ERROR: We seem to have saved credentials for someone "
+ << " other than the current user.";
+ return false;
+ }
+ }
+
+ return false;
+}
+
+const char kAuthWatcher[] = "AuthWatcher";
+
+bool AuthWatcher::AuthenticateWithToken(const string& gaia_email,
+ const string& auth_token) {
+ // Store a copy of the sync service token in memory.
+ sync_service_token_ = auth_token;
+ scm_->set_auth_token(sync_service_token_);
+
+ Authenticator auth(scm_, user_settings_);
+ Authenticator::AuthenticationResult result =
+ auth.AuthenticateToken(auth_token);
+ string email = gaia_email;
+ if (auth.display_email() && *auth.display_email()) {
+ email = auth.display_email();
+ LOG(INFO) << "Auth returned email " << email << " for gaia email " <<
+ gaia_email;
+ }
+ AuthWatcherEvent event = {AuthWatcherEvent::ILLEGAL_VALUE , 0};
+ gaia_->SetUsername(email);
+ gaia_->SetAuthToken(auth_token, SAVE_IN_MEMORY_ONLY);
+ const bool was_authenticated = NOT_AUTHENTICATED != status_;
+ switch (result) {
+ case Authenticator::SUCCESS:
+ {
+ status_ = GAIA_AUTHENTICATED;
+ PathString share_name;
+ CHECK(AppendUTF8ToPathString(email.data(), email.size(), &share_name));
+ user_settings_->SwitchUser(email);
+
+ // Set the authentication token for notifications
+ talk_mediator_->SetAuthToken(email, auth_token);
+
+ if (!was_authenticated)
+ LoadDirectoryListAndOpen(share_name);
+ NotifyAuthSucceeded(email);
+ return true;
+ }
+ case Authenticator::BAD_AUTH_TOKEN:
+ event.what_happened = AuthWatcherEvent::SERVICE_AUTH_FAILED;
+ break;
+ case Authenticator::CORRUPT_SERVER_RESPONSE:
+ case Authenticator::SERVICE_DOWN:
+ event.what_happened = AuthWatcherEvent::SERVICE_CONNECTION_FAILED;
+ break;
+ case Authenticator::USER_NOT_ACTIVATED:
+ event.what_happened = AuthWatcherEvent::SERVICE_USER_NOT_SIGNED_UP;
+ break;
+ default:
+ LOG(FATAL) << "Illegal return from AuthenticateToken";
+ return true; // keep the compiler happy
+ }
+ // Always fall back to local authentication.
+ if (was_authenticated || AuthenticateLocally(email)) {
+ if (AuthWatcherEvent::SERVICE_CONNECTION_FAILED == event.what_happened)
+ return true;
+ }
+ CHECK(event.what_happened != AuthWatcherEvent::ILLEGAL_VALUE);
+ NotifyListeners(&event);
+ return true;
+}
+
+bool AuthWatcher::AuthenticateLocally(string email) {
+ user_settings_->GetEmailForSignin(&email);
+ if (file_util::PathExists(dirman_->GetSyncDataDatabasePath())) {
+ gaia_->SetUsername(email);
+ status_ = LOCALLY_AUTHENTICATED;
+ user_settings_->SwitchUser(email);
+ PathString share_name;
+ CHECK(AppendUTF8ToPathString(email.data(), email.size(), &share_name));
+ LoadDirectoryListAndOpen(share_name);
+ NotifyAuthSucceeded(email);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool AuthWatcher::AuthenticateLocally(string email, const string& password) {
+ user_settings_->GetEmailForSignin(&email);
+ return user_settings_->VerifyAgainstStoredHash(email, password)
+ && AuthenticateLocally(email);
+}
+
+void AuthWatcher::ProcessGaiaAuthFailure() {
+ GaiaAuthenticator::AuthResults results = gaia_->results();
+ if (LOCALLY_AUTHENTICATED == status_) {
+ return; // nothing todo
+ } else if (AuthenticateLocally(results.email, results.password)) {
+ // We save the "Remember me" checkbox by putting a non-null auth
+ // token into the last_user table. So if we're offline and the
+ // user checks the box, insert a bogus auth token.
+ if (PERSIST_TO_DISK == results.credentials_saved) {
+ const string auth_token("bogus");
+ user_settings_->SetAuthTokenForService(results.email,
+ SYNC_SERVICE_NAME,
+ auth_token);
+ }
+ const bool unavailable = ConnectionUnavailable == results.auth_error ||
+ Unknown == results.auth_error ||
+ ServiceUnavailable == results.auth_error;
+ if (unavailable)
+ return;
+ }
+ AuthWatcherEvent myevent = { AuthWatcherEvent::GAIA_AUTH_FAILED, &results };
+ NotifyListeners(&myevent);
+}
+
+void* AuthWatcher::AuthenticationThreadMain(ThreadParams* args) {
+ NameCurrentThreadForDebugging("SyncEngine_AuthWatcherThread");
+ {
+ // This short lock ensures our launching function (StartNewAuthAttempt) is
+ // done.
+ MutexLock lock(&mutex_);
+ current_attempt_trigger_ = args->trigger;
+ }
+ SaveCredentials save = args->persist_creds_to_disk ?
+ PERSIST_TO_DISK : SAVE_IN_MEMORY_ONLY;
+ int attempt = 0;
+ SignIn const signin = user_settings_->
+ RecallSigninType(args->email, GMAIL_SIGNIN);
+
+ if (!args->password.empty()) while (true) {
+ bool authenticated;
+ if (!args->captcha_token.empty() && !args->captcha_value.empty())
+ authenticated = gaia_->Authenticate(args->email, args->password,
+ save, true, args->captcha_token,
+ args->captcha_value, signin);
+ else
+ authenticated = gaia_->Authenticate(args->email, args->password,
+ save, true, signin);
+ if (authenticated) {
+ if (!ProcessGaiaAuthSuccess()) {
+ if (3 != ++attempt)
+ continue;
+ AuthWatcherEvent event =
+ { AuthWatcherEvent::SERVICE_CONNECTION_FAILED, 0 };
+ NotifyListeners(&event);
+ }
+ } else {
+ ProcessGaiaAuthFailure();
+ }
+ break;
+ } else if (!args->auth_token.empty()) {
+ AuthenticateWithToken(args->email, args->auth_token);
+ } else {
+ LOG(ERROR) << "Attempt to authenticate with no credentials.";
+ }
+ {
+ MutexLock lock(&mutex_);
+ authenticating_now_ = false;
+ }
+ delete args;
+ return 0;
+}
+
+void AuthWatcher::Reset() {
+ status_ = NOT_AUTHENTICATED;
+}
+
+void AuthWatcher::NotifyAuthSucceeded(const string& email) {
+ LOG(INFO) << "NotifyAuthSucceeded";
+ AuthWatcherEvent event = { AuthWatcherEvent::AUTH_SUCCEEDED };
+ event.user_email = email;
+
+ NotifyListeners(&event);
+}
+
+bool AuthWatcher::StartNewAuthAttempt(const string& email,
+ const string& password, const string& auth_token,
+ const string& captcha_token, const string& captcha_value,
+ bool persist_creds_to_disk,
+ AuthWatcherEvent::AuthenticationTrigger trigger) {
+ AuthWatcherEvent event = { AuthWatcherEvent::AUTHENTICATION_ATTEMPT_START };
+ NotifyListeners(&event);
+ MutexLock lock(&mutex_);
+ if (authenticating_now_)
+ return false;
+ if (thread_handle_valid_) {
+ int join_return = pthread_join(thread_, 0);
+ if (0 != join_return)
+ LOG(ERROR) << "pthread_join failed returning " << join_return;
+ }
+ string mail = email;
+ if (email.find('@') == string::npos) {
+ mail.push_back('@');
+ // TODO(chron): Should this be done only at the UI level?
+ mail.append(DEFAULT_SIGNIN_DOMAIN);
+ }
+ ThreadParams* args = new ThreadParams;
+ args->self = this;
+ args->email = mail;
+ args->password = password;
+ args->auth_token = auth_token;
+ args->captcha_token = captcha_token;
+ args->captcha_value = captcha_value;
+ args->persist_creds_to_disk = persist_creds_to_disk;
+ args->trigger = trigger;
+ if (0 != pthread_create(&thread_, NULL, AuthenticationThreadStartRoutine,
+ args)) {
+ LOG(ERROR) << "Failed to create auth thread.";
+ return false;
+ }
+ authenticating_now_ = true;
+ thread_handle_valid_ = true;
+ return true;
+}
+
+void AuthWatcher::WaitForAuthThreadFinish() {
+ {
+ MutexLock lock(&mutex_);
+ if (!thread_handle_valid_)
+ return;
+ }
+ pthread_join(thread_, 0);
+}
+
+void AuthWatcher::HandleServerConnectionEvent(
+ const ServerConnectionEvent& event) {
+ if (event.server_reachable &&
+ !authenticating_now_ &&
+ (event.connection_code == HttpResponse::SYNC_AUTH_ERROR ||
+ status_ == LOCALLY_AUTHENTICATED)) {
+ // We're either online or just got reconnected and want to try to
+ // authenticate. If we've got a saved token this should just work. If not
+ // the auth failure should trigger UI indications that we're not logged in.
+
+ // METRIC: If we get a SYNC_AUTH_ERROR, our token expired.
+ GaiaAuthenticator::AuthResults authresults = gaia_->results();
+ if (!StartNewAuthAttempt(authresults.email, authresults.password,
+ authresults.auth_token, "", "",
+ PERSIST_TO_DISK == authresults.credentials_saved,
+ AuthWatcherEvent::EXPIRED_CREDENTIALS))
+ LOG(INFO) << "Couldn't start a new auth attempt.";
+ }
+}
+
+bool AuthWatcher::LoadDirectoryListAndOpen(const PathString& login) {
+ LOG(INFO) << "LoadDirectoryListAndOpen(" << login << ")";
+ bool initial_sync_ended = false;
+
+ dirman_->Open(login);
+ syncable::ScopedDirLookup dir(dirman_, login);
+ if (dir.good() && dir->initial_sync_ended())
+ initial_sync_ended = true;
+
+ LOG(INFO) << "LoadDirectoryListAndOpen returning " << initial_sync_ended;
+ return initial_sync_ended;
+}
+
+AuthWatcher::~AuthWatcher() {
+ WaitForAuthThreadFinish();
+}
+
+void AuthWatcher::Authenticate(const string& email, const string& password,
+ const string& captcha_token, const string& captcha_value,
+ bool persist_creds_to_disk) {
+ LOG(INFO) << "AuthWatcher::Authenticate called";
+ WaitForAuthThreadFinish();
+
+ // We CHECK here because WaitForAuthThreadFinish should ensure there's no
+ // ongoing auth attempt.
+ string empty;
+ CHECK(StartNewAuthAttempt(email, password, empty, captcha_token,
+ captcha_value, persist_creds_to_disk,
+ AuthWatcherEvent::USER_INITIATED));
+}
+
+void AuthWatcher::Logout() {
+ scm_->ResetAuthStatus();
+ Reset();
+ WaitForAuthThreadFinish();
+ ClearAuthenticationData();
+}
+
+void AuthWatcher::ClearAuthenticationData() {
+ sync_service_token_.clear();
+ scm_->set_auth_token(sync_service_token());
+ user_settings_->ClearAllServiceTokens();
+}
+
+string AuthWatcher::email() const {
+ return gaia_->email();
+}
+
+void AuthWatcher::NotifyListeners(AuthWatcherEvent* event) {
+ event->trigger = current_attempt_trigger_;
+ channel_->NotifyListeners(*event);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/auth_watcher.h b/chrome/browser/sync/engine/auth_watcher.h
new file mode 100644
index 0000000..f1bd424d
--- /dev/null
+++ b/chrome/browser/sync/engine/auth_watcher.h
@@ -0,0 +1,204 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AuthWatcher watches authentication events and user open and close
+// events and accordingly opens and closes shares.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_AUTH_WATCHER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_AUTH_WATCHER_H_
+
+#include <map>
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+struct DirectoryManagerEvent;
+class DirectoryManager;
+}
+
+namespace browser_sync {
+class AllStatus;
+class AuthWatcher;
+class ServerConnectionManager;
+class TalkMediator;
+class URLFactory;
+class UserSettings;
+struct ServerConnectionEvent;
+
+struct AuthWatcherEvent {
+ enum WhatHappened {
+ AUTHENTICATION_ATTEMPT_START,
+ AUTHWATCHER_DESTROYED,
+ AUTH_SUCCEEDED,
+ GAIA_AUTH_FAILED,
+ SERVICE_USER_NOT_SIGNED_UP,
+ SERVICE_AUTH_FAILED,
+ SERVICE_CONNECTION_FAILED,
+ // Used in a safety check in AuthWatcher::AuthenticateWithToken()
+ ILLEGAL_VALUE,
+ };
+ WhatHappened what_happened;
+ const GaiaAuthenticator::AuthResults* auth_results;
+ // use AuthWatcherEvent as its own traits type in hookups.
+ typedef AuthWatcherEvent EventType;
+ static inline bool IsChannelShutdownEvent(const AuthWatcherEvent& event) {
+ return event.what_happened == AUTHWATCHER_DESTROYED;
+ }
+
+ // Used for AUTH_SUCCEEDED notification
+ std::string user_email;
+
+ // How was this auth attempt initiated?
+ enum AuthenticationTrigger {
+ USER_INITIATED = 0, // default value.
+ EXPIRED_CREDENTIALS,
+ };
+
+ AuthenticationTrigger trigger;
+};
+
+class AuthWatcher {
+ public:
+ // Normal progression is local -> gaia -> token
+ enum Status { LOCALLY_AUTHENTICATED, GAIA_AUTHENTICATED, NOT_AUTHENTICATED };
+ typedef syncable::DirectoryManagerEvent DirectoryManagerEvent;
+ typedef syncable::DirectoryManager DirectoryManager;
+ typedef TalkMediator TalkMediator;
+
+ AuthWatcher(DirectoryManager* dirman,
+ ServerConnectionManager* scm,
+ AllStatus* allstatus,
+ const std::string& user_agent,
+ const std::string& service_id,
+ const std::string& gaia_url,
+ UserSettings* user_settings,
+ GaiaAuthenticator* gaia_auth,
+ TalkMediator* talk_mediator);
+ ~AuthWatcher();
+
+ // Returns true if the open share has gotten zero
+ // updates from the sync server (initial sync complete.)
+ bool LoadDirectoryListAndOpen(const PathString& login);
+
+ typedef EventChannel<AuthWatcherEvent, PThreadMutex> Channel;
+
+ inline Channel* channel() const {
+ return channel_.get();
+ }
+
+ void Authenticate(const std::string& email, const std::string& password,
+ const std::string& captcha_token, const std::string& captcha_value,
+ bool persist_creds_to_disk);
+
+ void Authenticate(const std::string& email, const std::string& password,
+ bool persist_creds_to_disk) {
+ Authenticate(email, password, "", "", persist_creds_to_disk);
+ }
+
+ // Retrieves an auth token for a named service for which a long-lived token
+ // was obtained at login time. Returns true if a long-lived token can be
+ // found, false otherwise.
+ bool GetAuthTokenForService(const std::string& service_name,
+ std::string* service_token);
+
+ std::string email() const;
+ syncable::DirectoryManager* dirman() const { return dirman_; }
+ ServerConnectionManager* scm() const { return scm_; }
+ AllStatus* allstatus() const { return allstatus_; }
+ UserSettings* settings() const { return user_settings_; }
+ Status status() const { return (Status)status_; }
+
+ void Logout();
+
+ // For synchronizing other destructors.
+ void WaitForAuthThreadFinish();
+
+ protected:
+ void Reset();
+ void ClearAuthenticationData();
+
+ void NotifyAuthSucceeded(const std::string& email);
+ bool StartNewAuthAttempt(const std::string& email,
+ const std::string& password,
+ const std::string& auth_token, const std::string& captcha_token,
+ const std::string& captcha_value, bool persist_creds_to_disk,
+ AuthWatcherEvent::AuthenticationTrigger trigger);
+ void HandleServerConnectionEvent(const ServerConnectionEvent& event);
+
+ void SaveUserSettings(const std::string& username,
+ const std::string& auth_token,
+ const bool save_credentials);
+
+ // These two helpers should only be called from the auth function.
+ // returns false iff we had problems and should try GAIA_AUTH again.
+ bool ProcessGaiaAuthSuccess();
+ void ProcessGaiaAuthFailure();
+
+ // Just checks that the user has at least one local share cache.
+ bool AuthenticateLocally(std::string email);
+ // Also checks the user's password against stored password hash.
+ bool AuthenticateLocally(std::string email, const std::string& password);
+
+ // Sets the trigger member of the event and sends the event on channel_.
+ void NotifyListeners(AuthWatcherEvent* event);
+
+ const std::string& sync_service_token() const { return sync_service_token_; }
+
+ public:
+ bool AuthenticateWithToken(const std::string& email,
+ const std::string& auth_token);
+
+ protected:
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+
+ // Passed to newly created threads.
+ struct ThreadParams {
+ AuthWatcher* self;
+ std::string email;
+ std::string password;
+ std::string auth_token;
+ std::string captcha_token;
+ std::string captcha_value;
+ bool persist_creds_to_disk;
+ AuthWatcherEvent::AuthenticationTrigger trigger;
+ };
+
+ // Initial function passed to pthread_create.
+ static void* AuthenticationThreadStartRoutine(void* arg);
+ // Member function called by AuthenticationThreadStartRoutine.
+ void* AuthenticationThreadMain(struct ThreadParams* arg);
+
+ scoped_ptr<GaiaAuthenticator> const gaia_;
+ syncable::DirectoryManager* const dirman_;
+ ServerConnectionManager* const scm_;
+ scoped_ptr<EventListenerHookup> connmgr_hookup_;
+ AllStatus* const allstatus_;
+ // TODO(chron): It is incorrect to make assignments to AtomicWord.
+ volatile base::subtle::AtomicWord status_;
+ UserSettings* user_settings_;
+ TalkMediator* talk_mediator_; // Interface to the notifications engine.
+ scoped_ptr<Channel> channel_;
+
+ // We store our service token in memory as a workaround to the fact that we
+ // don't persist it when the user unchecks "remember me".
+ // We also include it on outgoing requests.
+ std::string sync_service_token_;
+
+ PThreadMutex mutex_;
+ // All members below are protected by the above mutex
+ pthread_t thread_;
+ bool thread_handle_valid_;
+ bool authenticating_now_;
+ AuthWatcherEvent::AuthenticationTrigger current_attempt_trigger_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_AUTH_WATCHER_H_
diff --git a/chrome/browser/sync/engine/authenticator.cc b/chrome/browser/sync/engine/authenticator.cc
new file mode 100644
index 0000000..cd168d2
--- /dev/null
+++ b/chrome/browser/sync/engine/authenticator.cc
@@ -0,0 +1,106 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/authenticator.h"
+
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/user_settings.h"
+
+namespace browser_sync {
+
+using std::string;
+
+Authenticator::Authenticator(ServerConnectionManager *manager,
+ UserSettings* settings)
+ : server_connection_manager_(manager), settings_(settings) {
+}
+
+Authenticator::Authenticator(ServerConnectionManager *manager)
+ : server_connection_manager_(manager), settings_(NULL) {
+}
+
+Authenticator::AuthenticationResult Authenticator::Authenticate() {
+ // TODO(sync): Pull and work with saved credentials.
+ return NO_SAVED_CREDENTIALS;
+}
+
+Authenticator::AuthenticationResult Authenticator::Authenticate(
+ string username, string password, bool save_credentials) {
+ // TODO(scrub): need to figure out if this routine is used anywhere other than
+ // the test code.
+ GaiaAuthenticator auth_service("ChromiumBrowser", "chromiumsync",
+ "https://www.google.com:443/accounts/ClientLogin");
+ const SignIn signin_type =
+ settings_->RecallSigninType(username, GMAIL_SIGNIN);
+ if (!auth_service.Authenticate(username, password, SAVE_IN_MEMORY_ONLY,
+ true, signin_type)) {
+ return UNSPECIFIC_ERROR_RETURN;
+ }
+ CHECK(!auth_service.auth_token().empty());
+ return AuthenticateToken(auth_service.auth_token());
+}
+
+COMPILE_ASSERT(sync_pb::ClientToServerResponse::ERROR_TYPE_MAX == 6,
+ client_to_server_response_errors_changed);
+
+Authenticator::AuthenticationResult Authenticator::HandleSuccessfulTokenRequest(
+ const sync_pb::UserIdentification* user) {
+ display_email_ = user->has_email() ? user->email() : "";
+ display_name_ = user->has_display_name() ? user->display_name() : "";
+ obfuscated_id_ = user->has_obfuscated_id() ? user->obfuscated_id() : "";
+ return SUCCESS;
+}
+
+Authenticator::AuthenticationResult Authenticator::AuthenticateToken(
+ string auth_token) {
+ ClientToServerMessage client_to_server_message;
+ // Used to be required for all requests.
+ client_to_server_message.set_share("");
+ client_to_server_message.set_message_contents(
+ ClientToServerMessage::AUTHENTICATE);
+
+ string tx, rx;
+ client_to_server_message.SerializeToString(&tx);
+ HttpResponse http_response;
+
+ ServerConnectionManager::PostBufferParams params =
+ { tx, &rx, &http_response };
+ if (!server_connection_manager_->PostBufferWithAuth(&params, auth_token)) {
+ LOG(WARNING) << "Error posting from authenticator:" << http_response;
+ return SERVICE_DOWN;
+ }
+ sync_pb::ClientToServerResponse response;
+ if (!response.ParseFromString(rx))
+ return CORRUPT_SERVER_RESPONSE;
+
+ switch (response.error_code()) {
+ case sync_pb::ClientToServerResponse::SUCCESS:
+ if (response.has_authenticate() && response.authenticate().has_user())
+ return HandleSuccessfulTokenRequest(&response.authenticate().user());
+ // TODO:(sync) make this CORRUPT_SERVER_RESPONSE when all servers are
+ // returning user identification at login time.
+ return SUCCESS;
+ case sync_pb::ClientToServerResponse::USER_NOT_ACTIVATED:
+ return USER_NOT_ACTIVATED;
+ case sync_pb::ClientToServerResponse::AUTH_INVALID:
+ case sync_pb::ClientToServerResponse::AUTH_EXPIRED:
+ return BAD_AUTH_TOKEN;
+ // should never happen (no birthday in this request).
+ case sync_pb::ClientToServerResponse::NOT_MY_BIRTHDAY:
+ // should never happen (auth isn't throttled).
+ case sync_pb::ClientToServerResponse::THROTTLED:
+ // should never happen (only for stores).
+ case sync_pb::ClientToServerResponse::ACCESS_DENIED:
+ default:
+ LOG(ERROR) << "Corrupt Server packet received by auth, error code " <<
+ response.error_code();
+ return CORRUPT_SERVER_RESPONSE;
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/authenticator.h b/chrome/browser/sync/engine/authenticator.h
new file mode 100644
index 0000000..6c5005b
--- /dev/null
+++ b/chrome/browser/sync/engine/authenticator.h
@@ -0,0 +1,106 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The authenticator is a cross-platform class that handles authentication for
+// the sync client.
+//
+// Current State:
+// The authenticator is currently only used to authenticate tokens using the
+// newer protocol buffer request.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_AUTHENTICATOR_H_
+#define CHROME_BROWSER_SYNC_ENGINE_AUTHENTICATOR_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/port.h"
+
+namespace sync_pb {
+class UserIdentification;
+}
+
+namespace browser_sync {
+
+class ServerConnectionManager;
+class UserSettings;
+
+class Authenticator {
+ public:
+ // Single return enum.
+ enum AuthenticationResult {
+ SUCCESS = 0,
+ // We couldn't log on because we don't have saved credentials.
+ NO_SAVED_CREDENTIALS,
+ // We can't reach auth server (i.e. we're offline or server's down).
+ NOT_CONNECTED,
+ // Server's up, but we're down.
+ SERVICE_DOWN,
+ // We contacted the server, but the response didn't make sense.
+ CORRUPT_SERVER_RESPONSE,
+ // Bad username/password.
+ BAD_CREDENTIALS,
+ // Credentials are fine, but the user hasn't signed up.
+ USER_NOT_ACTIVATED,
+
+ // Return values for internal use.
+
+ // We will never return this to the user unless they call AuthenticateToken
+ // directly. Other auth functions retry and then return
+ // CORRUPT_SERVER_RESPONSE.
+ // TODO(sync): Implement retries.
+ BAD_AUTH_TOKEN,
+ // We should never return this, it's a placeholder during development.
+ // TODO(sync): Remove this
+ UNSPECIFIC_ERROR_RETURN,
+ };
+
+ // Constructor. This class will keep the connection authenticated.
+ // TODO(sync): Make it work as described.
+ // TODO(sync): Require a UI callback mechanism.
+ Authenticator(ServerConnectionManager* manager, UserSettings* settings);
+
+ // Constructor for a simple authenticator used for programmatic login from
+ // test programs.
+ explicit Authenticator(ServerConnectionManager* manager);
+
+ // This version of Authenticate tries to use saved credentials, if we have
+ // any.
+ AuthenticationResult Authenticate();
+
+ // If save_credentials is set we save the long-lived auth token to local disk.
+ // In all cases we save the username and password in memory (if given) so we
+ // can refresh the long-lived auth token if it expires.
+ // Also we save a 10-bit hash of the password to allow offline login.
+ // TODO(sync): Make it work as described.
+ // TODO(sync): Arguments for desired domain.
+ AuthenticationResult Authenticate(std::string username, std::string password,
+ bool save_credentials);
+ // A version of the auth token to authenticate cookie portion of
+ // authentication. It uses the new proto buffer based call instead of the HTTP
+ // GET based one we currently use.
+ // Can return one of SUCCESS, SERVICE_DOWN, CORRUPT_SERVER_RESPONSE,
+ // USER_NOT_ACTIVATED or BAD_AUTH_TOKEN. See above for the meaning of these
+ // values.
+ // TODO(sync): Make this function private when we're done.
+ AuthenticationResult AuthenticateToken(std::string auth_token);
+
+ const char * display_email() const { return display_email_.c_str(); }
+ const char * display_name() const { return display_name_.c_str(); }
+ private:
+ // Stores the information in the UserIdentification returned from the server.
+ AuthenticationResult HandleSuccessfulTokenRequest(
+ const sync_pb::UserIdentification* user);
+ // The server connection manager that we're looking after.
+ ServerConnectionManager* server_connection_manager_;
+ // Returns SUCCESS or the value that should be returned to the user.
+ std::string display_email_;
+ std::string display_name_;
+ std::string obfuscated_id_;
+ UserSettings* const settings_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_AUTHENTICATOR_H_
diff --git a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
new file mode 100644
index 0000000..0eb279a
--- /dev/null
+++ b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
@@ -0,0 +1,439 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/build_and_process_conflict_sets_command.h"
+
+#include <string>
+#include <sstream>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/format_macros.h"
+#include "base/rand_util.h"
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/update_applicator.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+
+namespace browser_sync {
+
+using std::set;
+using std::string;
+using std::vector;
+
+BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSetsCommand() {}
+BuildAndProcessConflictSetsCommand::~BuildAndProcessConflictSetsCommand() {}
+
+void BuildAndProcessConflictSetsCommand::ModelChangingExecuteImpl(
+ SyncerSession *session) {
+ session->set_conflict_sets_built(BuildAndProcessConflictSets(session));
+}
+
+bool BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSets(
+ SyncerSession *session) {
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good())
+ return false;
+ bool had_single_direction_sets = false;
+ { // scope for transaction
+ syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ ConflictResolutionView conflict_view(session);
+ BuildConflictSets(&trans, &conflict_view);
+ had_single_direction_sets =
+ ProcessSingleDirectionConflictSets(&trans, session);
+ // we applied some updates transactionally, lets try syncing again.
+ if (had_single_direction_sets)
+ return true;
+ }
+ return false;
+}
+
+bool BuildAndProcessConflictSetsCommand::ProcessSingleDirectionConflictSets(
+ syncable::WriteTransaction* trans, SyncerSession* const session) {
+ bool rv = false;
+ ConflictResolutionView conflict_view(session);
+ set<ConflictSet*>::const_iterator all_sets_iterator;
+ for(all_sets_iterator = conflict_view.ConflictSetsBegin();
+ all_sets_iterator != conflict_view.ConflictSetsEnd() ; ) {
+ const ConflictSet* conflict_set = *all_sets_iterator;
+ CHECK(conflict_set->size() >= 2);
+ // We scan the set to see if it consists of changes of only one type.
+ ConflictSet::const_iterator i;
+ int unsynced_count = 0, unapplied_count = 0;
+ for (i = conflict_set->begin(); i != conflict_set->end(); ++i) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, *i);
+ CHECK(entry.good());
+ if (entry.Get(syncable::IS_UNSYNCED))
+ unsynced_count++;
+ if (entry.Get(syncable::IS_UNAPPLIED_UPDATE))
+ unapplied_count++;
+ }
+ if (conflict_set->size() == unsynced_count && 0 == unapplied_count) {
+ LOG(INFO) << "Skipped transactional commit attempt.";
+ } else if (conflict_set->size() == unapplied_count &&
+ 0 == unsynced_count &&
+ ApplyUpdatesTransactionally(trans, conflict_set, session)) {
+ rv = true;
+ }
+ ++all_sets_iterator;
+ }
+ return rv;
+}
+
+namespace {
+
+void StoreLocalDataForUpdateRollback(syncable::Entry* entry,
+ syncable::EntryKernel* backup) {
+ CHECK(!entry->Get(syncable::IS_UNSYNCED)) << " Storing Rollback data for "
+ "entry that's unsynced." << *entry ;
+ CHECK(entry->Get(syncable::IS_UNAPPLIED_UPDATE)) << " Storing Rollback data "
+ "for entry that's not an unapplied update." << *entry ;
+ *backup = entry->GetKernelCopy();
+}
+
+class UniqueNameGenerator {
+ public:
+ void Initialize() {
+ // To avoid name collisions we prefix the names with hex data derived from
+ // 64 bits of randomness.
+ int64 name_prefix = static_cast<int64>(base::RandUint64());
+ name_stem_ = StringPrintf("%0" PRId64 "x.", name_prefix);
+ }
+ string StringNameForEntry(const syncable::Entry& entry) {
+ CHECK(!name_stem_.empty());
+ std::stringstream rv;
+ rv << name_stem_ << entry.Get(syncable::ID);
+ return rv.str();
+ }
+ PathString PathStringNameForEntry(const syncable::Entry& entry) {
+ string name = StringNameForEntry(entry);
+ return PathString(name.begin(), name.end());
+ }
+
+ private:
+ string name_stem_;
+};
+
+bool RollbackEntry(syncable::WriteTransaction* trans,
+ syncable::EntryKernel* backup) {
+ syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE,
+ backup->ref(syncable::META_HANDLE));
+ CHECK(entry.good());
+ bool was_del = entry.Get(syncable::IS_DEL);
+
+ if (!entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL)))
+ return false;
+ syncable::Name name = syncable::Name::FromEntryKernel(backup);
+ if (!entry.PutParentIdAndName(backup->ref(syncable::PARENT_ID), name))
+ return false;
+
+ if (!backup->ref(syncable::IS_DEL)) {
+ if (!entry.PutPredecessor(backup->ref(syncable::PREV_ID)))
+ return false;
+ }
+
+ if (backup->ref(syncable::PREV_ID) != entry.Get(syncable::PREV_ID))
+ return false;
+
+ entry.Put(syncable::CTIME, backup->ref(syncable::CTIME));
+ entry.Put(syncable::MTIME, backup->ref(syncable::MTIME));
+ entry.Put(syncable::BASE_VERSION, backup->ref(syncable::BASE_VERSION));
+ entry.Put(syncable::IS_DIR, backup->ref(syncable::IS_DIR));
+ entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL));
+ entry.Put(syncable::ID, backup->ref(syncable::ID));
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE,
+ backup->ref(syncable::IS_UNAPPLIED_UPDATE));
+ return true;
+}
+
+class TransactionalUpdateEntryPreparer {
+ public:
+ TransactionalUpdateEntryPreparer() {
+ namegen_.Initialize();
+ }
+
+ void PrepareEntries(syncable::WriteTransaction* trans,
+ const vector<syncable::Id>* ids) {
+ vector<syncable::Id>::const_iterator it;
+ for (it = ids->begin(); it != ids->end(); ++it) {
+ syncable::MutableEntry entry(trans, syncable::GET_BY_ID, *it);
+ syncable::Name random_name(namegen_.PathStringNameForEntry(entry));
+ CHECK(entry.PutParentIdAndName(trans->root_id(), random_name));
+ }
+ }
+
+ private:
+ UniqueNameGenerator namegen_;
+ DISALLOW_COPY_AND_ASSIGN(TransactionalUpdateEntryPreparer);
+};
+
+} // namespace
+
+bool BuildAndProcessConflictSetsCommand::ApplyUpdatesTransactionally(
+ syncable::WriteTransaction* trans,
+ const vector<syncable::Id>* const update_set,
+ SyncerSession* const session) {
+ vector<int64> handles; // The handles in the |update_set| order.
+ vector<syncable::Id> rollback_ids; // Holds the same Ids as update_set, but
+ // sorted so that runs of adjacent nodes
+ // appear in order.
+ rollback_ids.reserve(update_set->size());
+ syncable::MetahandleSet rollback_ids_inserted_items; // Tracks what's added
+ // to |rollback_ids|.
+
+ vector<syncable::Id>::const_iterator it;
+ // 1. Build |rollback_ids| in the order required for successful rollback.
+ // Specifically, for positions to come out right, restoring an item
+ // requires that its predecessor in the sibling order is properly
+ // restored first.
+ // 2. Build |handles|, the list of handles for ApplyUpdates.
+ for (it = update_set->begin(); it != update_set->end(); ++it) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, *it);
+ SyncerUtil::AddPredecessorsThenItem(trans, &entry,
+ syncable::IS_UNAPPLIED_UPDATE, &rollback_ids_inserted_items,
+ &rollback_ids);
+ handles.push_back(entry.Get(syncable::META_HANDLE));
+ }
+ DCHECK_EQ(rollback_ids.size(), update_set->size());
+ DCHECK_EQ(rollback_ids_inserted_items.size(), update_set->size());
+
+ // 3. Store the information needed to rollback if the transaction fails.
+ // Do this before modifying anything to keep the next/prev values intact.
+ vector<syncable::EntryKernel> rollback_data(rollback_ids.size());
+ for (size_t i = 0; i < rollback_ids.size(); ++i) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, rollback_ids[i]);
+ StoreLocalDataForUpdateRollback(&entry, &rollback_data[i]);
+ }
+
+ // 4. Use the preparer to move things to an initial starting state where no
+ // names collide, and nothing in the set is a child of anything else. If
+ // we've correctly calculated the set, the server tree is valid and no
+ // changes have occurred locally we should be able to apply updates from this
+ // state.
+ TransactionalUpdateEntryPreparer preparer;
+ preparer.PrepareEntries(trans, update_set);
+
+ // 5. Use the usual apply updates from the special start state we've just
+ // prepared.
+ UpdateApplicator applicator(session, handles.begin(), handles.end());
+ while (applicator.AttemptOneApplication(trans)) {
+ // Keep going till all updates are applied.
+ }
+ if (!applicator.AllUpdatesApplied()) {
+ LOG(ERROR) << "Transactional Apply Failed, Rolling back.";
+ // We have to move entries into the temp dir again. e.g. if a swap was in a
+ // set with other failing updates, the swap may have gone through, meaning
+ // the roll back needs to be transactional. But as we're going to a known
+ // good state we should always succeed.
+ preparer.PrepareEntries(trans, update_set);
+
+ // Rollback all entries.
+ for (size_t i = 0; i < rollback_data.size(); ++i) {
+ CHECK(RollbackEntry(trans, &rollback_data[i]));
+ }
+ return false; // Don't save progress -- we just undid it.
+ }
+ applicator.SaveProgressIntoSessionState();
+ return true;
+}
+
+void BuildAndProcessConflictSetsCommand::BuildConflictSets(
+ syncable::BaseTransaction* trans,
+ ConflictResolutionView* view) {
+ view->CleanupSets();
+ set<syncable::Id>::iterator i = view->CommitConflictsBegin();
+ while (i != view->CommitConflictsEnd()) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, *i);
+ CHECK(entry.good());
+ if (!entry.Get(syncable::IS_UNSYNCED) &&
+ !entry.Get(syncable::IS_UNAPPLIED_UPDATE)) {
+ // This can happen very rarely. It means we had a simply conflicting item
+ // that randomly committed. We drop the entry as it's no longer
+ // conflicting.
+ view->EraseCommitConflict(i++);
+ continue;
+ }
+ if (entry.ExistsOnClientBecauseDatabaseNameIsNonEmpty() &&
+ (entry.Get(syncable::IS_DEL) || entry.Get(syncable::SERVER_IS_DEL))) {
+ // If we're deleted on client or server we can't be in a complex set.
+ ++i;
+ continue;
+ }
+ bool new_parent =
+ entry.Get(syncable::PARENT_ID) != entry.Get(syncable::SERVER_PARENT_ID);
+ bool new_name = 0 != syncable::ComparePathNames(entry.GetSyncNameValue(),
+ entry.Get(syncable::SERVER_NAME));
+ if (new_parent || new_name)
+ MergeSetsForNameClash(trans, &entry, view);
+ if (new_parent)
+ MergeSetsForIntroducedLoops(trans, &entry, view);
+ MergeSetsForNonEmptyDirectories(trans, &entry, view);
+ ++i;
+ }
+}
+
+void BuildAndProcessConflictSetsCommand::MergeSetsForNameClash(
+ syncable::BaseTransaction* trans, syncable::Entry* entry,
+ ConflictResolutionView* view) {
+ PathString server_name = entry->Get(syncable::SERVER_NAME);
+ // Uncommitted entries have no server name. We trap this because the root
+ // item has a null name and 0 parentid.
+ if (server_name.empty())
+ return;
+ syncable::Id conflicting_id =
+ SyncerUtil::GetNameConflictingItemId(
+ trans, entry->Get(syncable::SERVER_PARENT_ID), server_name);
+ if (syncable::kNullId != conflicting_id)
+ view->MergeSets(entry->Get(syncable::ID), conflicting_id);
+}
+
+void BuildAndProcessConflictSetsCommand::MergeSetsForIntroducedLoops(
+ syncable::BaseTransaction* trans, syncable::Entry* entry,
+ ConflictResolutionView* view) {
+ // This code crawls up from the item in question until it gets to the root
+ // or itself. If it gets to the root it does nothing. If it finds a loop all
+ // moved unsynced entries in the list of crawled entries have their sets
+ // merged with the entry.
+ // TODO(sync): Build test cases to cover this function when the argument
+ // list has settled.
+ syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID);
+ syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
+ if (!parent.good()) {
+ return;
+ }
+ // Don't check for loop if the server parent is deleted.
+ if (parent.Get(syncable::IS_DEL))
+ return;
+ vector<syncable::Id> conflicting_entries;
+ while (!parent_id.IsRoot()) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
+ if (!parent.good()) {
+ LOG(INFO) << "Bad parent in loop check, skipping. Bad parent id: "
+ << parent_id << " entry: " << *entry;
+ return;
+ }
+ if (parent.Get(syncable::IS_UNSYNCED) &&
+ entry->Get(syncable::PARENT_ID) !=
+ entry->Get(syncable::SERVER_PARENT_ID))
+ conflicting_entries.push_back(parent_id);
+ parent_id = parent.Get(syncable::PARENT_ID);
+ if (parent_id == entry->Get(syncable::ID))
+ break;
+ }
+ if (parent_id.IsRoot())
+ return;
+ for (size_t i = 0; i < conflicting_entries.size(); i++) {
+ view->MergeSets(entry->Get(syncable::ID), conflicting_entries[i]);
+ }
+}
+
+namespace {
+
+class ServerDeletedPathChecker {
+ public:
+ bool CausingConflict(const syncable::Entry& e,
+ const syncable::Entry& log_entry) {
+ CHECK(e.good()) << "Missing parent in path of: " << log_entry;
+ if (e.Get(syncable::IS_UNAPPLIED_UPDATE) &&
+ e.Get(syncable::SERVER_IS_DEL)) {
+ CHECK(!e.Get(syncable::IS_DEL)) << " Inconsistency in local tree. "
+ "syncable::Entry: " << e << " Leaf: " << log_entry;
+ return true;
+ } else {
+ CHECK(!e.Get(syncable::IS_DEL)) << " Deleted entry has children. "
+ "syncable::Entry: " << e << " Leaf: " << log_entry;
+ return false;
+ }
+ }
+ // returns 0 if we should stop investigating the path.
+ syncable::Id GetAndExamineParent(syncable::BaseTransaction* trans,
+ syncable::Id id,
+ syncable::Id check_id,
+ const syncable::Entry& log_entry) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID, id);
+ CHECK(parent.good()) << "Tree inconsitency, missing id" << id << " "
+ << log_entry;
+ syncable::Id parent_id = parent.Get(syncable::PARENT_ID);
+ CHECK(parent_id != check_id) << "Loop in dir tree! "
+ << log_entry << " " << parent;
+ return parent_id;
+ }
+};
+
+class LocallyDeletedPathChecker {
+ public:
+ bool CausingConflict(const syncable::Entry& e,
+ const syncable::Entry& log_entry) {
+ return e.good() && e.Get(syncable::IS_DEL) && e.Get(syncable::IS_UNSYNCED);
+ }
+ // returns 0 if we should stop investigating the path.
+ syncable::Id GetAndExamineParent(syncable::BaseTransaction* trans,
+ syncable::Id id,
+ syncable::Id check_id,
+ const syncable::Entry& log_entry) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID, id);
+ if (!parent.good())
+ return syncable::kNullId;
+ syncable::Id parent_id = parent.Get(syncable::PARENT_ID);
+ if (parent_id == check_id)
+ return syncable::kNullId;
+ return parent_id;
+ }
+};
+
+template <typename Checker>
+void CrawlDeletedTreeMergingSets(syncable::BaseTransaction* trans,
+ const syncable::Entry& entry,
+ ConflictResolutionView* view,
+ Checker checker) {
+ syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
+ syncable::Id double_step_parent_id = parent_id;
+ // This block builds sets where we've got an entry in a directory the
+ // server wants to delete.
+ //
+ // Here we're walking up the tree to find all entries that the pass checks
+ // deleted. We can be extremely strict here as anything unexpected means
+ // invariants in the local hierarchy have been broken.
+ while (!parent_id.IsRoot()) {
+ if (!double_step_parent_id.IsRoot()) {
+ // Checks to ensure we don't loop.
+ double_step_parent_id = checker.GetAndExamineParent(
+ trans, double_step_parent_id, parent_id, entry);
+ double_step_parent_id = checker.GetAndExamineParent(
+ trans, double_step_parent_id, parent_id, entry);
+ }
+ syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
+ if (checker.CausingConflict(parent, entry))
+ view->MergeSets(entry.Get(syncable::ID), parent.Get(syncable::ID));
+ else
+ break;
+ parent_id = parent.Get(syncable::PARENT_ID);
+ }
+}
+
+} // namespace
+
+void BuildAndProcessConflictSetsCommand::MergeSetsForNonEmptyDirectories(
+ syncable::BaseTransaction* trans, syncable::Entry* entry,
+ ConflictResolutionView* view) {
+ if (entry->Get(syncable::IS_UNSYNCED) && !entry->Get(syncable::IS_DEL)) {
+ ServerDeletedPathChecker checker;
+ CrawlDeletedTreeMergingSets(trans, *entry, view, checker);
+ }
+ if (entry->Get(syncable::IS_UNAPPLIED_UPDATE) &&
+ !entry->Get(syncable::SERVER_IS_DEL)) {
+ syncable::Entry parent(trans, syncable::GET_BY_ID,
+ entry->Get(syncable::SERVER_PARENT_ID));
+ syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID);
+ if (!parent.good())
+ return;
+ LocallyDeletedPathChecker checker;
+ if (!checker.CausingConflict(parent, *entry))
+ return;
+ view->MergeSets(entry->Get(syncable::ID), parent.Get(syncable::ID));
+ CrawlDeletedTreeMergingSets(trans, parent, view, checker);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h
new file mode 100644
index 0000000..79559ba
--- /dev/null
+++ b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h
@@ -0,0 +1,64 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_BUILD_AND_PROCESS_CONFLICT_SETS_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_BUILD_AND_PROCESS_CONFLICT_SETS_COMMAND_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+
+namespace syncable {
+class BaseTransaction;
+class Entry;
+class Id;
+class MutableEntry;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class ConflictResolutionView;
+class SyncerSession;
+
+class BuildAndProcessConflictSetsCommand : public ModelChangingSyncerCommand {
+ public:
+ BuildAndProcessConflictSetsCommand();
+ virtual ~BuildAndProcessConflictSetsCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession *session);
+
+ private:
+ bool BuildAndProcessConflictSets(SyncerSession *session);
+
+ bool ProcessSingleDirectionConflictSets(
+ syncable::WriteTransaction* trans, SyncerSession* const session);
+ bool ApplyUpdatesTransactionally(
+ syncable::WriteTransaction* trans,
+ const std::vector<syncable::Id>* const update_set,
+ SyncerSession* const session);
+ void BuildAndProcessConflictSetsCommand::BuildConflictSets(
+ syncable::BaseTransaction* trans,
+ ConflictResolutionView* view);
+
+ void MergeSetsForNameClash(syncable::BaseTransaction* trans,
+ syncable::Entry* entry,
+ ConflictResolutionView* view);
+ void MergeSetsForIntroducedLoops(syncable::BaseTransaction* trans,
+ syncable::Entry* entry,
+ ConflictResolutionView* view);
+ void MergeSetsForNonEmptyDirectories(syncable::BaseTransaction* trans,
+ syncable::Entry* entry,
+ ConflictResolutionView* view);
+ void MergeSetsForPositionUpdate(syncable::BaseTransaction* trans,
+ syncable::Entry* entry,
+ ConflictResolutionView* view);
+
+ DISALLOW_COPY_AND_ASSIGN(BuildAndProcessConflictSetsCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_BUILD_AND_PROCESS_CONFLICT_SETS_COMMAND_H_
diff --git a/chrome/browser/sync/engine/build_commit_command.cc b/chrome/browser/sync/engine/build_commit_command.cc
new file mode 100644
index 0000000..f819d6c
--- /dev/null
+++ b/chrome/browser/sync/engine/build_commit_command.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/build_commit_command.h"
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::set;
+using std::string;
+using std::vector;
+using syncable::ExtendedAttribute;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::Name;
+
+namespace browser_sync {
+
+BuildCommitCommand::BuildCommitCommand() {}
+BuildCommitCommand::~BuildCommitCommand() {}
+
+void BuildCommitCommand::ExecuteImpl(SyncerSession *session) {
+ ClientToServerMessage message;
+ message.set_share(ToUTF8(session->account_name()).get_string());
+ message.set_message_contents(ClientToServerMessage::COMMIT);
+
+ CommitMessage* commit_message = message.mutable_commit();
+ commit_message->set_cache_guid(
+ session->write_transaction()->directory()->cache_guid());
+
+ const vector<Id>& commit_ids = session->commit_ids();
+ for (size_t i = 0; i < commit_ids.size(); i++) {
+ Id id = commit_ids[i];
+ SyncEntity* sync_entry =
+ static_cast<SyncEntity*>(commit_message->add_entries());
+ sync_entry->set_id(id);
+ MutableEntry meta_entry(session->write_transaction(),
+ syncable::GET_BY_ID,
+ id);
+ CHECK(meta_entry.good());
+ // this is the only change we make to the entry in this function.
+ meta_entry.Put(syncable::SYNCING, true);
+
+ Name name = meta_entry.GetName();
+ CHECK(!name.value().empty()); // Make sure this isn't an update.
+ sync_entry->set_name(ToUTF8(name.value()).get_string());
+ // Set the non_unique_name if we have one. If we do, the server ignores
+ // the |name| value (using |non_unique_name| instead), and will return
+ // in the CommitResponse a unique name if one is generated. Even though
+ // we could get away with only sending |name|, we send both because it
+ // may aid in logging.
+ if (name.value() != name.non_unique_value()) {
+ sync_entry->set_non_unique_name(
+ ToUTF8(name.non_unique_value()).get_string());
+ }
+ // deleted items with negative parent ids can be a problem so we set the
+ // parent to 0. (TODO(sync): Still true in protocol?
+ Id new_parent_id;
+ if (meta_entry.Get(syncable::IS_DEL) &&
+ !meta_entry.Get(syncable::PARENT_ID).ServerKnows()) {
+ new_parent_id = session->write_transaction()->root_id();
+ } else {
+ new_parent_id = meta_entry.Get(syncable::PARENT_ID);
+ }
+ sync_entry->set_parent_id(new_parent_id);
+ // TODO(sync): Investigate all places that think transactional commits
+ // actually exist.
+ //
+ // This is the only logic we'll need when transactional commits are
+ // moved to the server.
+ // If our parent has changes, send up the old one so the server can
+ // correctly deal with multiple parents.
+ if (new_parent_id != meta_entry.Get(syncable::SERVER_PARENT_ID) &&
+ 0 != meta_entry.Get(syncable::BASE_VERSION) &&
+ syncable::CHANGES_VERSION != meta_entry.Get(syncable::BASE_VERSION)) {
+ sync_entry->set_old_parent_id(meta_entry.Get(syncable::SERVER_PARENT_ID));
+ }
+
+ int64 version = meta_entry.Get(syncable::BASE_VERSION);
+ if (syncable::CHANGES_VERSION == version || 0 == version) {
+ // If this CHECK triggers during unit testing, check that we haven't
+ // altered an item that's an unapplied update.
+ CHECK(!id.ServerKnows()) << meta_entry;
+ sync_entry->set_version(0);
+ } else {
+ CHECK(id.ServerKnows()) << meta_entry;
+ sync_entry->set_version(meta_entry.Get(syncable::BASE_VERSION));
+ }
+ sync_entry->set_ctime(ClientTimeToServerTime(
+ meta_entry.Get(syncable::CTIME)));
+ sync_entry->set_mtime(ClientTimeToServerTime(
+ meta_entry.Get(syncable::MTIME)));
+
+ set<ExtendedAttribute> extended_attributes;
+ meta_entry.GetAllExtendedAttributes(
+ session->write_transaction(), &extended_attributes);
+ set<ExtendedAttribute>::iterator iter;
+ sync_pb::ExtendedAttributes* mutable_extended_attributes =
+ sync_entry->mutable_extended_attributes();
+ for (iter = extended_attributes.begin(); iter != extended_attributes.end();
+ ++iter) {
+ sync_pb::ExtendedAttributes_ExtendedAttribute *extended_attribute =
+ mutable_extended_attributes->add_extendedattribute();
+ extended_attribute->set_key(ToUTF8(iter->key()).get_string());
+ SyncerProtoUtil::CopyBlobIntoProtoBytes(iter->value(),
+ extended_attribute->mutable_value());
+ }
+
+ // Deletion is final on the server, let's move things and then delete them.
+ if (meta_entry.Get(syncable::IS_DEL)) {
+ sync_entry->set_deleted(true);
+ } else if (meta_entry.Get(syncable::IS_BOOKMARK_OBJECT)) {
+ sync_pb::SyncEntity_BookmarkData* bookmark =
+ sync_entry->mutable_bookmarkdata();
+ bookmark->set_bookmark_folder(meta_entry.Get(syncable::IS_DIR));
+ const Id& prev_id = meta_entry.Get(syncable::PREV_ID);
+ string prev_string = prev_id.IsRoot() ? string() : prev_id.GetServerId();
+ sync_entry->set_insert_after_item_id(prev_string);
+
+ if (!meta_entry.Get(syncable::IS_DIR)) {
+ string bookmark_url = ToUTF8(meta_entry.Get(syncable::BOOKMARK_URL));
+ bookmark->set_bookmark_url(bookmark_url);
+ SyncerProtoUtil::CopyBlobIntoProtoBytes(
+ meta_entry.Get(syncable::BOOKMARK_FAVICON),
+ bookmark->mutable_bookmark_favicon());
+ }
+ }
+ }
+ session->set_commit_message(message);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/build_commit_command.h b/chrome/browser/sync/engine/build_commit_command.h
new file mode 100644
index 0000000..445024f
--- /dev/null
+++ b/chrome/browser/sync/engine/build_commit_command.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+
+namespace browser_sync {
+
+class BuildCommitCommand : public SyncerCommand {
+ public:
+ BuildCommitCommand();
+ virtual ~BuildCommitCommand();
+
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_BUILD_COMMIT_COMMAND_H_
diff --git a/chrome/browser/sync/engine/change_reorder_buffer.cc b/chrome/browser/sync/engine/change_reorder_buffer.cc
new file mode 100644
index 0000000..a74c62e0
--- /dev/null
+++ b/chrome/browser/sync/engine/change_reorder_buffer.cc
@@ -0,0 +1,199 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/change_reorder_buffer.h"
+
+#include <limits>
+#include <queue>
+#include <set>
+#include <utility> // for pair<>
+#include <vector>
+
+#include "chrome/browser/sync/syncable/syncable.h"
+
+using std::numeric_limits;
+using std::pair;
+using std::queue;
+using std::set;
+using std::vector;
+
+namespace sync_api {
+
+// Traversal provides a way to collect a set of nodes from the syncable
+// directory structure and then traverse them, along with any intermediate
+// nodes, in a top-down fashion, starting from a single common ancestor. A
+// Traversal starts out empty and is grown by means of the ExpandToInclude
+// method. Once constructed, the top(), begin_children(), and end_children()
+// methods can be used to explore the nodes in root-to-leaf order.
+class ChangeReorderBuffer::Traversal {
+ public:
+ typedef pair<int64, int64> ParentChildLink;
+ typedef set<ParentChildLink> LinkSet;
+
+ Traversal() : top_(kInvalidId) { }
+
+ // Expand the traversal so that it includes the node indicated by
+ // |child_handle|.
+ void ExpandToInclude(syncable::BaseTransaction* trans,
+ int64 child_handle) {
+ // If |top_| is invalid, this is the first insertion -- easy.
+ if (top_ == kInvalidId) {
+ top_ = child_handle;
+ return;
+ }
+
+ int64 node_to_include = child_handle;
+ while (node_to_include != kInvalidId && node_to_include != top_) {
+ int64 node_parent = 0;
+
+ syncable::Entry node(trans, syncable::GET_BY_HANDLE, node_to_include);
+ CHECK(node.good());
+ if (node.Get(syncable::ID).IsRoot()) {
+ // If we've hit the root, and the root isn't already in the tree
+ // (it would have to be |top_| if it were), start a new expansion
+ // upwards from |top_| to unite the original traversal with the
+ // path we just added that goes from |child_handle| to the root.
+ node_to_include = top_;
+ top_ = node.Get(syncable::META_HANDLE);
+ } else {
+ // Otherwise, get the parent ID so that we can add a ParentChildLink.
+ syncable::Entry parent(trans, syncable::GET_BY_ID,
+ node.Get(syncable::PARENT_ID));
+ CHECK(parent.good());
+ node_parent = parent.Get(syncable::META_HANDLE);
+
+ ParentChildLink link(node_parent, node_to_include);
+
+ // If the link exists in the LinkSet |links_|, we don't need to search
+ // any higher; we are done.
+ if (links_.find(link) != links_.end())
+ return;
+
+ // Otherwise, extend |links_|, and repeat on the parent.
+ links_.insert(link);
+ node_to_include = node_parent;
+ }
+ }
+ }
+
+ // Return the top node of the traversal. Use this as a starting point
+ // for walking the tree.
+ int64 top() const { return top_; }
+
+ // Return an iterator corresponding to the first child (in the traversal)
+ // of the node specified by |parent_id|. Iterate this return value until
+ // it is equal to the value returned by end_children(parent_id). The
+ // enumeration thus provided is unordered.
+ LinkSet::const_iterator begin_children(int64 parent_id) const {
+ return links_.upper_bound(
+ ParentChildLink(parent_id, numeric_limits<int64>::min()));
+ }
+
+ // Return an iterator corresponding to the last child in the traversal
+ // of the node specified by |parent_id|.
+ LinkSet::const_iterator end_children(int64 parent_id) const {
+ return begin_children(parent_id + 1);
+ }
+
+ private:
+ // The topmost point in the directory hierarchy that is in the traversal,
+ // and thus the first node to be traversed. If the traversal is empty,
+ // this is kInvalidId. If the traversal contains exactly one member, |top_|
+ // will be the solitary member, and |links_| will be empty.
+ int64 top_;
+ // A set of single-level links that compose the traversal below |top_|. The
+ // (parent, child) ordering of values enables efficient lookup of children
+ // given the parent handle, which is used for top-down traversal. |links_|
+ // is expected to be connected -- every node that appears as a parent in a
+ // link must either appear as a child of another link, or else be the
+ // topmost node, |top_|.
+ LinkSet links_;
+
+ DISALLOW_COPY_AND_ASSIGN(Traversal);
+};
+
+void ChangeReorderBuffer::GetAllChangesInTreeOrder(
+ const BaseTransaction* sync_trans,
+ vector<ChangeRecord>* changelist) {
+ syncable::BaseTransaction* trans = sync_trans->GetWrappedTrans();
+
+ // Step 1: Iterate through the operations, doing three things:
+ // (a) Push deleted items straight into the |changelist|.
+ // (b) Construct a traversal spanning all non-deleted items.
+ // (c) Construct a set of all parent nodes of any position changes.
+ set<int64> parents_of_position_changes;
+ Traversal traversal;
+
+ OperationMap::const_iterator i;
+ for (i = operations_.begin(); i != operations_.end(); ++i) {
+ if (i->second == OP_DELETE) {
+ ChangeRecord record;
+ record.id = i->first;
+ record.action = ChangeRecord::ACTION_DELETE;
+ changelist->push_back(record);
+ } else {
+ traversal.ExpandToInclude(trans, i->first);
+ if (i->second == OP_ADD ||
+ i->second == OP_UPDATE_POSITION_AND_PROPERTIES) {
+ ReadNode node(sync_trans);
+ CHECK(node.InitByIdLookup(i->first));
+ parents_of_position_changes.insert(node.GetParentId());
+ }
+ }
+ }
+
+ // Step 2: Breadth-first expansion of the traversal, enumerating children in
+ // the syncable sibling order if there were any position updates.
+ queue<int64> to_visit;
+ to_visit.push(traversal.top());
+ while (!to_visit.empty()) {
+ int64 next = to_visit.front();
+ to_visit.pop();
+
+ // If the node has an associated action, output a change record.
+ i = operations_.find(next);
+ if (i != operations_.end()) {
+ ChangeRecord record;
+ record.id = next;
+ if (i->second == OP_ADD)
+ record.action = ChangeRecord::ACTION_ADD;
+ else
+ record.action = ChangeRecord::ACTION_UPDATE;
+ changelist->push_back(record);
+ }
+
+ // Now add the children of |next| to |to_visit|.
+ if (parents_of_position_changes.find(next) ==
+ parents_of_position_changes.end()) {
+ // No order changes on this parent -- traverse only the nodes listed
+ // in the traversal (and not in sibling order).
+ Traversal::LinkSet::const_iterator j = traversal.begin_children(next);
+ Traversal::LinkSet::const_iterator end = traversal.end_children(next);
+ for (; j != end; ++j) {
+ CHECK(j->first == next);
+ to_visit.push(j->second);
+ }
+ } else {
+ // There were ordering changes on the children of this parent, so
+ // enumerate all the children in the sibling order.
+ syncable::Entry parent(trans, syncable::GET_BY_HANDLE, next);
+ syncable::Id id = trans->directory()->
+ GetFirstChildId(trans, parent.Get(syncable::ID));
+ while (!id.IsRoot()) {
+ syncable::Entry child(trans, syncable::GET_BY_ID, id);
+ CHECK(child.good());
+ int64 handle = child.Get(syncable::META_HANDLE);
+ to_visit.push(handle);
+ // If there is no operation on this child node, record it as as an
+ // update, so that the listener gets notified of all nodes in the new
+ // ordering.
+ if (operations_.find(handle) == operations_.end())
+ operations_[handle] = OP_UPDATE_POSITION_AND_PROPERTIES;
+ id = child.Get(syncable::NEXT_ID);
+ }
+ }
+ }
+}
+
+} // namespace sync_api
diff --git a/chrome/browser/sync/engine/change_reorder_buffer.h b/chrome/browser/sync/engine/change_reorder_buffer.h
new file mode 100644
index 0000000..ddea1b6
--- /dev/null
+++ b/chrome/browser/sync/engine/change_reorder_buffer.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Defines ChangeReorderBuffer, which can be used to sort a list of item
+// actions to achieve the ordering constraint required by the SyncObserver
+// interface of the SyncAPI.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_CHANGE_REORDER_BUFFER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_CHANGE_REORDER_BUFFER_H_
+
+#include <map>
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncapi.h"
+
+namespace sync_api {
+
+// ChangeReorderBuffer is a utility type which accepts an unordered set
+// of changes (via its Push methods), and yields a vector of ChangeRecords
+// (via the GetAllChangesInTreeOrder method) that are in the order that
+// the SyncObserver expects them to be. A buffer is initially empty.
+//
+// The ordering produced by ChangeReorderBuffer is as follows:
+// (a) All Deleted items appear first.
+// (b) For Updated and/or Added items, parents appear before their children.
+// (c) When there are changes to the sibling order (this means Added items,
+// or Updated items with the |position_changed| parameter set to true),
+// all siblings under a parent will appear in the output, even if they
+// are not explicitly pushed. The sibling order will be preserved in
+// the output list -- items will appear before their sibling-order
+// successors.
+// (d) When there are no changes to the sibling order under a parent node,
+// the sibling order is not necessarily preserved in the output for
+// its children.
+class ChangeReorderBuffer {
+ public:
+ typedef SyncManager::ChangeRecord ChangeRecord;
+ ChangeReorderBuffer() { }
+
+ // Insert an item, identified by the metahandle |id|, into the reorder
+ // buffer. This item will appear in the output list as an ACTION_ADD
+ // ChangeRecord.
+ void PushAddedItem(int64 id) {
+ operations_[id] = OP_ADD;
+ }
+
+ // Insert an item, identified by the metahandle |id|, into the reorder
+ // buffer. This item will appear in the output list as an ACTION_DELETE
+ // ChangeRecord.
+ void PushDeletedItem(int64 id) {
+ operations_[id] = OP_DELETE;
+ }
+
+ // Insert an item, identified by the metahandle |id|, into the reorder
+ // buffer. This item will appear in the output list as an ACTION_UPDATE
+ // ChangeRecord. Also, if |position_changed| is true, all siblings of this
+ // item will appear in the output list as well; if it wasn't explicitly
+ // pushed, the siblings will have an ACTION_UPDATE ChangeRecord.
+ void PushUpdatedItem(int64 id, bool position_changed) {
+ operations_[id] = position_changed ? OP_UPDATE_POSITION_AND_PROPERTIES :
+ OP_UPDATE_PROPERTIES_ONLY;
+ }
+
+ // Reset the buffer, forgetting any pushed items, so that it can be used
+ // again to reorder a new set of changes.
+ void Clear() {
+ operations_.clear();
+ }
+
+ bool IsEmpty() const {
+ return operations_.empty();
+ }
+
+ // Output a reordered list of changes to |changelist| using the items
+ // that were pushed into the reorder buffer. |sync_trans| is used
+ // to determine the ordering.
+ void GetAllChangesInTreeOrder(const BaseTransaction* sync_trans,
+ std::vector<ChangeRecord>* changelist);
+
+ private:
+ class Traversal;
+ enum Operation {
+ OP_ADD, // AddedItem.
+ OP_DELETE, // DeletedItem.
+ OP_UPDATE_PROPERTIES_ONLY, // UpdatedItem with position_changed=0.
+ OP_UPDATE_POSITION_AND_PROPERTIES, // UpdatedItem with position_changed=1.
+ };
+ typedef std::map<int64, Operation> OperationMap;
+
+ // Stores the items that have been pushed into the buffer, and the
+ // type of operation that was associated with them.
+ OperationMap operations_;
+
+ DISALLOW_COPY_AND_ASSIGN(ChangeReorderBuffer);
+};
+
+} // namespace sync_api
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_CHANGE_REORDER_BUFFER_H_
diff --git a/chrome/browser/sync/engine/client_command_channel.h b/chrome/browser/sync/engine/client_command_channel.h
new file mode 100644
index 0000000..2f91a9b
--- /dev/null
+++ b/chrome/browser/sync/engine/client_command_channel.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_CLIENT_COMMAND_CHANNEL_H_
+#define CHROME_BROWSER_SYNC_ENGINE_CLIENT_COMMAND_CHANNEL_H_
+
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/util/event_sys.h"
+
+namespace browser_sync {
+
+// Commands for the client come back in sync responses, which is kind
+// of inconvenient because some services (like the bandwidth throttler)
+// want to know about them. So to avoid explicit dependencies on this
+// protocol behavior, the syncer dumps all client commands onto a shared
+// client command channel.
+
+struct ClientCommandChannelTraits {
+ typedef const sync_pb::ClientCommand* EventType;
+ static inline bool IsChannelShutdownEvent(const EventType &event) {
+ return 0 == event;
+ }
+};
+
+typedef EventChannel<ClientCommandChannelTraits, PThreadMutex>
+ ClientCommandChannel;
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_CLIENT_COMMAND_CHANNEL_H_
diff --git a/chrome/browser/sync/engine/conflict_resolution_view.cc b/chrome/browser/sync/engine/conflict_resolution_view.cc
new file mode 100644
index 0000000..aca5d89
--- /dev/null
+++ b/chrome/browser/sync/engine/conflict_resolution_view.cc
@@ -0,0 +1,167 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+
+#include <map>
+#include <set>
+
+#include "chrome/browser/sync/engine/sync_process_state.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+
+using std::map;
+using std::set;
+
+namespace browser_sync {
+
+ConflictResolutionView::ConflictResolutionView(SyncerSession* session)
+ : process_state_(session->sync_process_state_) {}
+
+int ConflictResolutionView::conflicting_updates() const {
+ return process_state_->conflicting_updates();
+}
+
+int ConflictResolutionView::conflicting_commits() const {
+ return process_state_->conflicting_commits();
+}
+
+void ConflictResolutionView::set_conflicting_commits(const int val) {
+ process_state_->set_conflicting_commits(val);
+}
+
+int ConflictResolutionView::num_sync_cycles() const {
+ return process_state_->num_sync_cycles_;
+}
+
+void ConflictResolutionView::increment_num_sync_cycles() {
+ ++(process_state_->num_sync_cycles_);
+}
+
+void ConflictResolutionView::zero_num_sync_cycles() {
+ process_state_->num_sync_cycles_ = 0;
+}
+
+int64 ConflictResolutionView::current_sync_timestamp() const {
+ return process_state_->current_sync_timestamp();
+}
+
+int64 ConflictResolutionView::servers_latest_timestamp() const {
+ return process_state_->servers_latest_timestamp();
+}
+
+ // True iff we're stuck. User should contact support.
+bool ConflictResolutionView::syncer_stuck() const {
+ return process_state_->syncer_stuck();
+}
+
+void ConflictResolutionView::set_syncer_stuck(const bool val) {
+ process_state_->set_syncer_stuck(val);
+}
+
+IdToConflictSetMap::const_iterator ConflictResolutionView::IdToConflictSetFind(
+ const syncable::Id& the_id) const {
+ return process_state_->IdToConflictSetFind(the_id);
+}
+
+IdToConflictSetMap::const_iterator
+ ConflictResolutionView::IdToConflictSetBegin() const {
+ return process_state_->IdToConflictSetBegin();
+}
+
+IdToConflictSetMap::const_iterator
+ ConflictResolutionView::IdToConflictSetEnd() const {
+ return process_state_->IdToConflictSetEnd();
+}
+
+IdToConflictSetMap::size_type
+ ConflictResolutionView::IdToConflictSetSize() const {
+ return process_state_->IdToConflictSetSize();
+}
+
+const ConflictSet*
+ ConflictResolutionView::IdToConflictSetGet(const syncable::Id& the_id) {
+ return process_state_->IdToConflictSetGet(the_id);
+}
+
+set<ConflictSet*>::const_iterator
+ ConflictResolutionView::ConflictSetsBegin() const {
+ return process_state_->ConflictSetsBegin();
+}
+
+set<ConflictSet*>::const_iterator
+ ConflictResolutionView::ConflictSetsEnd() const {
+ return process_state_->ConflictSetsEnd();
+}
+
+set<ConflictSet*>::size_type
+ ConflictResolutionView::ConflictSetsSize() const {
+ return process_state_->ConflictSetsSize();
+}
+
+void ConflictResolutionView::MergeSets(const syncable::Id& set1,
+ const syncable::Id& set2) {
+ process_state_->MergeSets(set1, set2);
+}
+
+void ConflictResolutionView::CleanupSets() {
+ process_state_->CleanupSets();
+}
+
+bool ConflictResolutionView::HasCommitConflicts() const {
+ return process_state_->HasConflictingItems();
+}
+
+bool ConflictResolutionView::HasBlockedItems() const {
+ return process_state_->HasBlockedItems();
+}
+
+int ConflictResolutionView::CommitConflictsSize() const {
+ return process_state_->ConflictingItemsSize();
+}
+
+int ConflictResolutionView::BlockedItemsSize() const {
+ return process_state_->BlockedItemsSize();
+}
+
+void ConflictResolutionView::AddCommitConflict(const syncable::Id& the_id) {
+ process_state_->AddConflictingItem(the_id);
+}
+
+void ConflictResolutionView::AddBlockedItem(const syncable::Id& the_id) {
+ process_state_->AddBlockedItem(the_id);
+}
+
+void ConflictResolutionView::EraseCommitConflict(
+ set<syncable::Id>::iterator it) {
+ process_state_->EraseConflictingItem(it);
+}
+
+void ConflictResolutionView::EraseBlockedItem(
+ set<syncable::Id>::iterator it) {
+ process_state_->EraseBlockedItem(it);
+}
+
+set<syncable::Id>::iterator
+ConflictResolutionView::CommitConflictsBegin() const {
+ return process_state_->ConflictingItemsBegin();
+}
+
+set<syncable::Id>::iterator
+ConflictResolutionView::BlockedItemsBegin() const {
+ return process_state_->BlockedItemsBegin();
+}
+
+set<syncable::Id>::iterator
+ ConflictResolutionView::CommitConflictsEnd() const {
+ return process_state_->ConflictingItemsEnd();
+}
+
+set<syncable::Id>::iterator
+ ConflictResolutionView::BlockedItemsEnd() const {
+ return process_state_->BlockedItemsEnd();
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/conflict_resolution_view.h b/chrome/browser/sync/engine/conflict_resolution_view.h
new file mode 100644
index 0000000..a60af65
--- /dev/null
+++ b/chrome/browser/sync/engine/conflict_resolution_view.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Conflict resolution view is intended to provide a restricted
+// view of the sync cycle state for the conflict resolver. Since the
+// resolver doesn't get to see all of the SyncProcess, we can allow
+// it to operate on a subsection of the data.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLUTION_VIEW_H_
+#define CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLUTION_VIEW_H_
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+
+namespace syncable {
+class Id;
+}
+
+namespace browser_sync {
+
+class SyncCycleState;
+class SyncProcessState;
+class SyncerSession;
+
+class ConflictResolutionView {
+ // THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+ public:
+
+ explicit ConflictResolutionView(SyncProcessState* state)
+ : process_state_(state) {
+ }
+
+ explicit ConflictResolutionView(SyncerSession* session);
+
+ ~ConflictResolutionView() {}
+
+ int conflicting_updates() const;
+
+ // TODO(sync) can successful commit go in session?
+ int successful_commits() const;
+
+ void increment_successful_commits();
+
+ void zero_successful_commits();
+
+ int conflicting_commits() const;
+
+ void set_conflicting_commits(const int val);
+
+ int num_sync_cycles() const;
+
+ void increment_num_sync_cycles();
+
+ void zero_num_sync_cycles();
+
+ // True iff we're stuck. Something has gone wrong with the syncer.
+ bool syncer_stuck() const;
+
+ void set_syncer_stuck(const bool val);
+
+ int64 current_sync_timestamp() const;
+
+ int64 servers_latest_timestamp() const;
+
+ IdToConflictSetMap::const_iterator IdToConflictSetFind(
+ const syncable::Id& the_id) const;
+
+ IdToConflictSetMap::const_iterator IdToConflictSetBegin() const;
+
+ IdToConflictSetMap::const_iterator IdToConflictSetEnd() const;
+
+ IdToConflictSetMap::size_type IdToConflictSetSize() const;
+
+ const ConflictSet* IdToConflictSetGet(const syncable::Id& the_id);
+
+ std::set<ConflictSet*>::const_iterator ConflictSetsBegin() const;
+
+ std::set<ConflictSet*>::const_iterator ConflictSetsEnd() const;
+
+ std::set<ConflictSet*>::size_type ConflictSetsSize() const;
+
+ void MergeSets(const syncable::Id& set1, const syncable::Id& set2);
+
+ void CleanupSets();
+
+ bool HasCommitConflicts() const;
+
+ bool HasBlockedItems() const;
+
+ int CommitConflictsSize() const;
+
+ int BlockedItemsSize() const;
+
+ void AddCommitConflict(const syncable::Id& the_id);
+
+ void AddBlockedItem(const syncable::Id& the_id);
+
+ void EraseCommitConflict(std::set<syncable::Id>::iterator it);
+
+ void EraseBlockedItem(std::set<syncable::Id>::iterator it);
+
+ std::set<syncable::Id>::iterator CommitConflictsBegin() const;
+
+ std::set<syncable::Id>::iterator BlockedItemsBegin() const;
+
+ std::set<syncable::Id>::iterator CommitConflictsEnd() const;
+
+ std::set<syncable::Id>::iterator BlockedItemsEnd() const;
+
+ private:
+ SyncProcessState* process_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConflictResolutionView);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLUTION_VIEW_H_
diff --git a/chrome/browser/sync/engine/conflict_resolver.cc b/chrome/browser/sync/engine/conflict_resolver.cc
new file mode 100644
index 0000000..9bfe419
--- /dev/null
+++ b/chrome/browser/sync/engine/conflict_resolver.cc
@@ -0,0 +1,758 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+
+#include <map>
+#include <set>
+
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+
+using std::map;
+using std::set;
+using syncable::BaseTransaction;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::Name;
+using syncable::ScopedDirLookup;
+using syncable::SyncName;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+const int SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT = 8;
+
+ConflictResolver::ConflictResolver() {
+}
+
+ConflictResolver::~ConflictResolver() {
+}
+
+namespace {
+// TODO(ncarter): Remove title/path conflicts and the code to resolve them.
+// This is historical cruft that seems to be actually reached by some users.
+inline PathString GetConflictPathnameBase(PathString base) {
+ time_t time_since = time(NULL);
+ struct tm* now = localtime(&time_since);
+ // Use a fixed format as the locale's format may include '/' characters or
+ // other illegal characters.
+ PathString date = IntToPathString(now->tm_year + 1900);
+ date.append(PSTR("-"));
+ ++now->tm_mon; // tm_mon is 0-based.
+ if (now->tm_mon < 10)
+ date.append(PSTR("0"));
+ date.append(IntToPathString(now->tm_mon));
+ date.append(PSTR("-"));
+ if (now->tm_mday < 10)
+ date.append(PSTR("0"));
+ date.append(IntToPathString(now->tm_mday));
+ return base + PSTR(" (Edited on ") + date + PSTR(")");
+}
+
+// TODO(ncarter): Remove title/path conflicts and the code to resolve them.
+Name FindNewName(BaseTransaction* trans,
+ Id parent_id,
+ const SyncName& original_name) {
+ const PathString name = original_name.value();
+ // 255 is defined in our spec.
+ const int allowed_length = kSyncProtocolMaxNameLengthBytes;
+ // TODO(sync): How do we get length on other platforms? The limit is
+ // checked in java on the server, so it's not the number of glyphs its the
+ // number of 16 bit characters in the UTF-16 representation.
+
+ // 10 characters for 32 bit numbers + 2 characters for brackets means 12
+ // characters should be more than enough for the name. Doubling this ensures
+ // that we will have enough space.
+ COMPILE_ASSERT(kSyncProtocolMaxNameLengthBytes >= 24,
+ maximum_name_too_short);
+ CHECK(name.length() <= allowed_length);
+
+ if (!Entry(trans,
+ syncable::GET_BY_PARENTID_AND_DBNAME,
+ parent_id,
+ name).good())
+ return Name::FromSyncName(original_name);
+ PathString base = name;
+ PathString ext;
+ PathString::size_type ext_index = name.rfind('.');
+ if (PathString::npos != ext_index && 0 != ext_index &&
+ name.length() - ext_index < allowed_length / 2) {
+ base = name.substr(0, ext_index);
+ ext = name.substr(ext_index);
+ }
+
+ PathString name_base = GetConflictPathnameBase(base);
+ if (name_base.length() + ext.length() > allowed_length) {
+ name_base.resize(allowed_length - ext.length());
+ TrimPathStringToValidCharacter(&name_base);
+ }
+ PathString new_name = name_base + ext;
+ int n = 2;
+ while (Entry(trans,
+ syncable::GET_BY_PARENTID_AND_DBNAME,
+ parent_id,
+ new_name).good()) {
+ PathString local_ext = PSTR("(");
+ local_ext.append(IntToPathString(n));
+ local_ext.append(PSTR(")"));
+ local_ext.append(ext);
+ if (name_base.length() + local_ext.length() > allowed_length) {
+ name_base.resize(allowed_length - local_ext.length());
+ TrimPathStringToValidCharacter(&name_base);
+ }
+ new_name = name_base + local_ext;
+ n++;
+ }
+
+ CHECK(new_name.length() <= kSyncProtocolMaxNameLengthBytes);
+ return Name(new_name);
+}
+
+} // namespace
+
+void ConflictResolver::IgnoreLocalChanges(MutableEntry* entry) {
+ // An update matches local actions, merge the changes.
+ // This is a little fishy because we don't actually merge them.
+ // In the future we should do a 3-way merge.
+ LOG(INFO) << "Server and local changes match, merging:" << entry;
+ // With IS_UNSYNCED false, changes should be merged.
+ // METRIC simple conflict resolved by merge.
+ entry->Put(syncable::IS_UNSYNCED, false);
+}
+
+void ConflictResolver::OverwriteServerChanges(WriteTransaction* trans,
+ MutableEntry * entry) {
+ // This is similar to an overwrite from the old client.
+ // This is equivalent to a scenario where we got the update before we'd
+ // made our local client changes.
+ // TODO(chron): This is really a general property clobber. We clobber
+ // the server side property. Perhaps we should actually do property merging.
+ entry->Put(syncable::BASE_VERSION, entry->Get(syncable::SERVER_VERSION));
+ entry->Put(syncable::IS_UNAPPLIED_UPDATE, false);
+ // METRIC conflict resolved by overwrite.
+}
+
+ConflictResolver::ProcessSimpleConflictResult
+ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
+ Id id,
+ SyncerSession* session) {
+ MutableEntry entry(trans, syncable::GET_BY_ID, id);
+ // Must be good as the entry won't have been cleaned up.
+ CHECK(entry.good());
+ // If an update fails, locally we have to be in a set or unsynced. We're not
+ // in a set here, so we must be unsynced.
+ if (!entry.Get(syncable::IS_UNSYNCED))
+ return NO_SYNC_PROGRESS;
+ if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE)) {
+ if (!entry.Get(syncable::PARENT_ID).ServerKnows()) {
+ LOG(INFO) << "Item conflicting because its parent not yet committed. "
+ "Id: "<< id;
+ } else {
+ LOG(INFO) << "No set for conflicting entry id " << id << ". There should "
+ "be an update/commit that will fix this soon. This message should "
+ "not repeat.";
+ }
+ return NO_SYNC_PROGRESS;
+ }
+ if (entry.Get(syncable::IS_DEL) && entry.Get(syncable::SERVER_IS_DEL)) {
+ // we've both deleted it, so lets just drop the need to commit/update this
+ // entry.
+ entry.Put(syncable::IS_UNSYNCED, false);
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, false);
+ // we've made changes, but they won't help syncing progress.
+ // METRIC simple conflict resolved by merge.
+ return NO_SYNC_PROGRESS;
+ }
+
+ if (!entry.Get(syncable::SERVER_IS_DEL)) {
+ // TODO(chron): Should we check more fields? Since IS_UNSYNCED is
+ // turned on, this is really probably enough as fields will be overwritten.
+ // Check if there's no changes.
+
+ // Verbose but easier to debug.
+ bool name_matches = entry.SyncNameMatchesServerName();
+ bool parent_matches = entry.Get(syncable::PARENT_ID) ==
+ entry.Get(syncable::SERVER_PARENT_ID);
+ bool entry_deleted = entry.Get(syncable::IS_DEL);
+
+ if (!entry_deleted && name_matches && parent_matches) {
+ LOG(INFO) << "Resolving simple conflict, ignoring local changes for:"
+ << entry;
+ IgnoreLocalChanges(&entry);
+ } else {
+ LOG(INFO) << "Resolving simple conflict, overwriting server"
+ " changes for:" << entry;
+ OverwriteServerChanges(trans, &entry);
+ }
+ return SYNC_PROGRESS;
+ } else { // SERVER_IS_DEL is true
+ // If a server deleted folder has local contents we should be in a set.
+ if (entry.Get(syncable::IS_DIR)) {
+ Directory::ChildHandles children;
+ trans->directory()->GetChildHandles(trans,
+ entry.Get(syncable::ID),
+ &children);
+ if (0 != children.size()) {
+ LOG(INFO) << "Entry is a server deleted directory with local contents, "
+ "should be in a set. (race condition).";
+ return NO_SYNC_PROGRESS;
+ }
+ }
+ // METRIC conflict resolved by entry split;
+
+ // If the entry's deleted on the server, we can have a directory here.
+ entry.Put(syncable::IS_UNSYNCED, true);
+
+ SyncerUtil::SplitServerInformationIntoNewEntry(trans, &entry);
+
+ MutableEntry server_update(trans, syncable::GET_BY_ID, id);
+ CHECK(server_update.good());
+ CHECK(server_update.Get(syncable::META_HANDLE) !=
+ entry.Get(syncable::META_HANDLE))
+ << server_update << entry;
+
+ return SYNC_PROGRESS;
+ }
+}
+
+namespace {
+
+bool NamesCollideWithChildrenOfFolder(BaseTransaction* trans,
+ const Directory::ChildHandles& children,
+ Id folder_id) {
+ Directory::ChildHandles::const_iterator i = children.begin();
+ while (i != children.end()) {
+ Entry child(trans, syncable::GET_BY_HANDLE, *i);
+ CHECK(child.good());
+ if (Entry(trans,
+ syncable::GET_BY_PARENTID_AND_DBNAME,
+ folder_id,
+ child.GetName().db_value()).good())
+ return true;
+ ++i;
+ }
+ return false;
+}
+
+void GiveEntryNewName(WriteTransaction* trans,
+ MutableEntry* entry) {
+ using namespace syncable;
+ Name new_name =
+ FindNewName(trans, entry->Get(syncable::PARENT_ID), entry->GetName());
+ LOG(INFO) << "Resolving name clash, renaming " << *entry << " to "
+ << new_name.db_value();
+ entry->PutName(new_name);
+ CHECK(entry->Get(syncable::IS_UNSYNCED));
+}
+
+} // namespace
+
+bool ConflictResolver::AttemptItemMerge(WriteTransaction* trans,
+ MutableEntry* locally_named,
+ MutableEntry* server_named) {
+ // To avoid complications we only merge new entries with server entries.
+ if (locally_named->Get(syncable::IS_DIR) !=
+ server_named->Get(syncable::SERVER_IS_DIR) ||
+ locally_named->Get(syncable::ID).ServerKnows() ||
+ locally_named->Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ server_named->Get(syncable::IS_UNSYNCED))
+ return false;
+ Id local_id = locally_named->Get(syncable::ID);
+ Id desired_id = server_named->Get(syncable::ID);
+ if (locally_named->Get(syncable::IS_DIR)) {
+ // Extra work for directory name clash. We have to make sure we don't have
+ // clashing child items, and update the parent id the children of the new
+ // entry.
+ Directory::ChildHandles children;
+ trans->directory()->GetChildHandles(trans, local_id, &children);
+ if (NamesCollideWithChildrenOfFolder(trans, children, desired_id))
+ return false;
+
+ LOG(INFO) << "Merging local changes to: " << desired_id << ". "
+ << *locally_named;
+
+ server_named->Put(syncable::ID, trans->directory()->NextId());
+ Directory::ChildHandles::iterator i;
+ for (i = children.begin() ; i != children.end() ; ++i) {
+ MutableEntry child_entry(trans, syncable::GET_BY_HANDLE, *i);
+ CHECK(child_entry.good());
+ CHECK(child_entry.Put(syncable::PARENT_ID, desired_id));
+ CHECK(child_entry.Put(syncable::IS_UNSYNCED, true));
+ Id id = child_entry.Get(syncable::ID);
+ // we only note new entries for quicker merging next round.
+ if (!id.ServerKnows())
+ children_of_merged_dirs_.insert(id);
+ }
+ } else {
+ if (!server_named->Get(syncable::IS_DEL))
+ return false;
+ }
+
+ LOG(INFO) << "Identical client and server items merging server changes. " <<
+ *locally_named << " server: " << *server_named;
+
+ // Clear server_named's server data and mark it deleted so it goes away
+ // quietly because it's now identical to a deleted local entry.
+ // locally_named takes on the ID of the server entry.
+ server_named->Put(syncable::ID, trans->directory()->NextId());
+ locally_named->Put(syncable::ID, desired_id);
+ locally_named->Put(syncable::IS_UNSYNCED, false);
+ CopyServerFields(server_named, locally_named);
+ ClearServerData(server_named);
+ server_named->Put(syncable::IS_DEL, true);
+ server_named->Put(syncable::BASE_VERSION, 0);
+ CHECK(SUCCESS ==
+ SyncerUtil::AttemptToUpdateEntryWithoutMerge(
+ trans, locally_named, NULL, NULL));
+ return true;
+}
+
+ConflictResolver::ServerClientNameClashReturn
+ConflictResolver::ProcessServerClientNameClash(WriteTransaction* trans,
+ MutableEntry* locally_named,
+ MutableEntry* server_named,
+ SyncerSession* session) {
+ if (!locally_named->ExistsOnClientBecauseDatabaseNameIsNonEmpty())
+ return NO_CLASH; // locally_named is a server update.
+ if (locally_named->Get(syncable::IS_DEL) ||
+ server_named->Get(syncable::SERVER_IS_DEL)) {
+ return NO_CLASH;
+ }
+ if (locally_named->Get(syncable::PARENT_ID) !=
+ server_named->Get(syncable::SERVER_PARENT_ID)) {
+ return NO_CLASH; // different parents
+ }
+
+ PathString name = locally_named->GetSyncNameValue();
+ if (0 != syncable::ComparePathNames(name,
+ server_named->Get(syncable::SERVER_NAME))) {
+ return NO_CLASH; // different names.
+ }
+
+ // First try to merge.
+ if (AttemptItemMerge(trans, locally_named, server_named)) {
+ // METRIC conflict resolved by merge
+ return SOLVED;
+ }
+ // We need to rename.
+ if (!locally_named->Get(syncable::IS_UNSYNCED)) {
+ LOG(ERROR) << "Locally named part of a name conflict not unsynced?";
+ locally_named->Put(syncable::IS_UNSYNCED, true);
+ }
+ if (!server_named->Get(syncable::IS_UNAPPLIED_UPDATE)) {
+ LOG(ERROR) << "Server named part of a name conflict not an update?";
+ }
+ GiveEntryNewName(trans, locally_named);
+
+ // METRIC conflict resolved by rename
+ return SOLVED;
+}
+
+ConflictResolver::ServerClientNameClashReturn
+ConflictResolver::ProcessNameClashesInSet(WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ SyncerSession* session) {
+ ConflictSet::const_iterator i,j;
+ for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
+ MutableEntry entryi(trans, syncable::GET_BY_ID, *i);
+ if (!entryi.Get(syncable::IS_UNSYNCED) &&
+ !entryi.Get(syncable::IS_UNAPPLIED_UPDATE))
+ // This set is broken / doesn't make sense, this may be transient.
+ return BOGUS_SET;
+ for (j = conflict_set->begin() ; *i != *j ; ++j) {
+ MutableEntry entryj(trans, syncable::GET_BY_ID, *j);
+ ServerClientNameClashReturn rv =
+ ProcessServerClientNameClash(trans, &entryi, &entryj, session);
+ if (NO_CLASH == rv)
+ rv = ProcessServerClientNameClash(trans, &entryj, &entryi, session);
+ if (NO_CLASH != rv)
+ return rv;
+ }
+ }
+ return NO_CLASH;
+}
+
+ConflictResolver::ConflictSetCountMapKey ConflictResolver::GetSetKey(
+ ConflictSet* set) {
+ // TODO(sync): Come up with a better scheme for set hashing. This scheme
+ // will make debugging easy.
+ // If this call to sort is removed, we need to add one before we use
+ // binary_search in ProcessConflictSet
+ sort(set->begin(), set->end());
+ std::stringstream rv;
+ for(ConflictSet::iterator i = set->begin() ; i != set->end() ; ++i )
+ rv << *i << ".";
+ return rv.str();
+}
+
+namespace {
+
+bool AttemptToFixCircularConflict(WriteTransaction* trans,
+ ConflictSet* conflict_set) {
+ ConflictSet::const_iterator i, j;
+ for(i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
+ MutableEntry entryi(trans, syncable::GET_BY_ID, *i);
+ if (entryi.Get(syncable::PARENT_ID) ==
+ entryi.Get(syncable::SERVER_PARENT_ID) ||
+ !entryi.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ !entryi.Get(syncable::IS_DIR)) {
+ continue;
+ }
+ Id parentid = entryi.Get(syncable::SERVER_PARENT_ID);
+ // Create the entry here as it's the only place we could ever get a parentid
+ // that doesn't correspond to a real entry.
+ Entry parent(trans, syncable::GET_BY_ID, parentid);
+ if (!parent.good()) // server parent update not received yet
+ continue;
+ // This loop walks upwards from the server parent. If we hit the root (0)
+ // all is well. If we hit the entry we're examining it means applying the
+ // parent id would cause a loop. We don't need more general loop detection
+ // because we know our local tree is valid.
+ while (!parentid.IsRoot()) {
+ Entry parent(trans, syncable::GET_BY_ID, parentid);
+ CHECK(parent.good());
+ if (parentid == *i)
+ break; // it's a loop
+ parentid = parent.Get(syncable::PARENT_ID);
+ }
+ if (parentid.IsRoot())
+ continue;
+ LOG(INFO) << "Overwriting server changes to avoid loop: " << entryi;
+ entryi.Put(syncable::BASE_VERSION, entryi.Get(syncable::SERVER_VERSION));
+ entryi.Put(syncable::IS_UNSYNCED, true);
+ entryi.Put(syncable::IS_UNAPPLIED_UPDATE, false);
+ // METRIC conflict resolved by breaking dir loop.
+ return true;
+ }
+ return false;
+}
+
+bool AttemptToFixUnsyncedEntryInDeletedServerTree(WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ const Entry& entry) {
+ if (!entry.Get(syncable::IS_UNSYNCED) || entry.Get(syncable::IS_DEL))
+ return false;
+ Id parentid = entry.Get(syncable::PARENT_ID);
+ MutableEntry parent(trans, syncable::GET_BY_ID, parentid);
+ if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ !parent.Get(syncable::SERVER_IS_DEL) ||
+ !binary_search(conflict_set->begin(), conflict_set->end(), parentid))
+ return false;
+ // We're trying to commit into a directory tree that's been deleted.
+ // To solve this we recreate the directory tree.
+ //
+ // We do this in two parts, first we ensure the tree is unaltered since the
+ // conflict was detected.
+ Id id = parentid;
+ while (!id.IsRoot()) {
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
+ break;
+ Entry parent(trans, syncable::GET_BY_ID, id);
+ if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ !parent.Get(syncable::SERVER_IS_DEL))
+ return false;
+ id = parent.Get(syncable::PARENT_ID);
+ }
+ // Now we fix up the entries.
+ id = parentid;
+ while (!id.IsRoot()) {
+ MutableEntry parent(trans, syncable::GET_BY_ID, id);
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
+ break;
+ LOG(INFO) << "Giving directory a new id so we can undelete it "
+ << parent;
+ ClearServerData(&parent);
+ SyncerUtil::ChangeEntryIDAndUpdateChildren(trans, &parent,
+ trans->directory()->NextId());
+ parent.Put(syncable::BASE_VERSION, 0);
+ parent.Put(syncable::IS_UNSYNCED, true);
+ id = parent.Get(syncable::PARENT_ID);
+ // METRIC conflict resolved by recreating dir tree.
+ }
+ return true;
+}
+
+bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ const Entry& entry) {
+ if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE) ||
+ entry.Get(syncable::SERVER_IS_DEL))
+ return false;
+ Id parent_id = entry.Get(syncable::SERVER_PARENT_ID);
+ MutableEntry parent(trans, syncable::GET_BY_ID, parent_id);
+ if (!parent.good() || !parent.Get(syncable::IS_DEL) ||
+ !binary_search(conflict_set->begin(), conflict_set->end(), parent_id)) {
+ return false;
+ }
+ // We've deleted a directory tree that's got contents on the server.
+ // We recreate the directory to solve the problem.
+ //
+ // We do this in two parts, first we ensure the tree is unaltered since
+ // the conflict was detected.
+ Id id = parent_id;
+ // As we will be crawling the path of deleted entries there's a chance
+ // we'll end up having to reparent an item as there will be an invalid
+ // parent.
+ Id reroot_id = syncable::kNullId;
+ // similarly crawling deleted items means we risk loops.
+ int loop_detection = conflict_set->size();
+ while (!id.IsRoot() && --loop_detection >= 0) {
+ Entry parent(trans, syncable::GET_BY_ID, id);
+ // If we get a bad parent, or a parent that's deleted on client and
+ // server we recreate the hierarchy in the root.
+ if (!parent.good()) {
+ reroot_id = id;
+ break;
+ }
+ CHECK(parent.Get(syncable::IS_DIR));
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id)) {
+ // We've got to an entry that's not in the set. If it has been
+ // deleted between set building and this point in time we
+ // return false. If it had been deleted earlier it would have been
+ // in the set.
+ // TODO(sync): Revisit syncer code organization to see if
+ // conflict resolution can be done in the same transaction as set
+ // building.
+ if (parent.Get(syncable::IS_DEL))
+ return false;
+ break;
+ }
+ if (!parent.Get(syncable::IS_DEL) ||
+ parent.Get(syncable::SERVER_IS_DEL) ||
+ !parent.Get(syncable::IS_UNSYNCED)) {
+ return false;
+ }
+ id = parent.Get(syncable::PARENT_ID);
+ }
+ // If we find we've been looping we re-root the hierarchy.
+ if (loop_detection < 0)
+ if (id == entry.Get(syncable::ID))
+ reroot_id = entry.Get(syncable::PARENT_ID);
+ else
+ reroot_id = id;
+ // Now we fix things up by undeleting all the folders in the item's
+ // path.
+ id = parent_id;
+ while (!id.IsRoot() && id != reroot_id) {
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
+ break;
+ MutableEntry entry(trans, syncable::GET_BY_ID, id);
+ Id parent_id = entry.Get(syncable::PARENT_ID);
+ if (parent_id == reroot_id)
+ parent_id = trans->root_id();
+ Name old_name = entry.GetName();
+ Name new_name = FindNewName(trans, parent_id, old_name);
+ LOG(INFO) << "Undoing our deletion of " << entry <<
+ ", will have name " << new_name.db_value();
+ if (new_name != old_name || parent_id != entry.Get(syncable::PARENT_ID))
+ CHECK(entry.PutParentIdAndName(parent_id, new_name));
+ entry.Put(syncable::IS_DEL, false);
+ id = entry.Get(syncable::PARENT_ID);
+ // METRIC conflict resolved by recreating dir tree.
+ }
+ return true;
+}
+
+bool AttemptToFixRemovedDirectoriesWithContent(WriteTransaction* trans,
+ ConflictSet* conflict_set) {
+ ConflictSet::const_iterator i,j;
+ for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
+ Entry entry(trans, syncable::GET_BY_ID, *i);
+ if (AttemptToFixUnsyncedEntryInDeletedServerTree(trans,
+ conflict_set, entry)) {
+ return true;
+ }
+ if (AttemptToFixUpdateEntryInDeletedLocalTree(trans, conflict_set, entry))
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+bool ConflictResolver::ProcessConflictSet(WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ int conflict_count,
+ SyncerSession* session) {
+ int set_size = conflict_set->size();
+ if (set_size < 2) {
+ LOG(WARNING) << "Skipping conflict set because it has size " << set_size;
+ // We can end up with sets of size one if we have a new item in a set that
+ // we tried to commit transactionally. This should not be a persistent
+ // situation.
+ return false;
+ }
+ if (conflict_count < 3) {
+ // Avoid resolving sets that could be the result of transient conflicts.
+ // Transient conflicts can occur because the client or server can be
+ // slightly out of date.
+ return false;
+ }
+
+ LOG(INFO) << "Fixing a set containing " << set_size << " items";
+
+ ServerClientNameClashReturn rv = ProcessNameClashesInSet(trans, conflict_set,
+ session);
+ if (SOLVED == rv)
+ return true;
+ if (NO_CLASH != rv)
+ return false;
+
+ // Fix circular conflicts.
+ if (AttemptToFixCircularConflict(trans, conflict_set))
+ return true;
+ // Check for problems involving contents of removed folders.
+ if (AttemptToFixRemovedDirectoriesWithContent(trans, conflict_set))
+ return true;
+ return false;
+}
+
+
+template <typename InputIt>
+bool ConflictResolver::LogAndSignalIfConflictStuck(
+ BaseTransaction* trans,
+ int attempt_count,
+ InputIt begin,
+ InputIt end,
+ ConflictResolutionView* view) {
+ if (attempt_count < SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT)
+ return false;
+
+ // Don't signal stuck if we're not up to date.
+ if (view->servers_latest_timestamp() != view->current_sync_timestamp())
+ return false;
+
+ LOG(ERROR) << "[BUG] Conflict set cannot be resolved, has "
+ << end - begin << " items:";
+ for (InputIt i = begin ; i != end ; ++i) {
+ Entry e(trans, syncable::GET_BY_ID, *i);
+ if (e.good())
+ LOG(ERROR) << " " << e;
+ else
+ LOG(ERROR) << " Bad ID:" << *i;
+ }
+
+ view->set_syncer_stuck(true);
+
+ return true;
+ // TODO(sync): If we're stuck for a while we need to alert the user,
+ // clear cache or reset syncing. At the very least we should stop trying
+ // something that's obviously not working.
+}
+
+bool ConflictResolver::ResolveSimpleConflicts(const ScopedDirLookup& dir,
+ ConflictResolutionView* view,
+ SyncerSession *session) {
+ WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ bool forward_progress = false;
+ // First iterate over simple conflict items (those that belong to no set).
+ set<Id>::const_iterator conflicting_item_it;
+ for (conflicting_item_it = view->CommitConflictsBegin();
+ conflicting_item_it != view->CommitConflictsEnd() ;
+ ++conflicting_item_it) {
+ Id id = *conflicting_item_it;
+ map<Id, ConflictSet*>::const_iterator item_set_it =
+ view->IdToConflictSetFind(id);
+ if (item_set_it == view->IdToConflictSetEnd() ||
+ 0 == item_set_it->second) {
+ // We have a simple conflict.
+ switch(ProcessSimpleConflict(&trans, id, session)) {
+ case NO_SYNC_PROGRESS:
+ {
+ int conflict_count = (simple_conflict_count_map_[id] += 2);
+ bool stuck = LogAndSignalIfConflictStuck(&trans, conflict_count,
+ &id, &id + 1, view);
+ break;
+ }
+ case SYNC_PROGRESS:
+ forward_progress = true;
+ break;
+ }
+ }
+ }
+ // Reduce the simple_conflict_count for each item currently tracked.
+ SimpleConflictCountMap::iterator i = simple_conflict_count_map_.begin();
+ while (i != simple_conflict_count_map_.end()) {
+ if (0 == --(i->second))
+ simple_conflict_count_map_.erase(i++);
+ else
+ ++i;
+ }
+ return forward_progress;
+}
+
+bool ConflictResolver::ResolveConflicts(const ScopedDirLookup& dir,
+ ConflictResolutionView* view,
+ SyncerSession *session) {
+ if (view->HasBlockedItems()) {
+ LOG(INFO) << "Delaying conflict resolution, have " <<
+ view->BlockedItemsSize() << " blocked items.";
+ return false;
+ }
+ bool rv = false;
+ if (ResolveSimpleConflicts(dir, view, session))
+ rv = true;
+ WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ set<Id> children_of_dirs_merged_last_round;
+ std::swap(children_of_merged_dirs_, children_of_dirs_merged_last_round);
+ set<ConflictSet*>::const_iterator set_it;
+ for (set_it = view->ConflictSetsBegin();
+ set_it != view->ConflictSetsEnd();
+ set_it++) {
+ ConflictSet* conflict_set = *set_it;
+ ConflictSetCountMapKey key = GetSetKey(conflict_set);
+ conflict_set_count_map_[key] += 2;
+ int conflict_count = conflict_set_count_map_[key];
+ // Keep a metric for new sets.
+ if (2 == conflict_count) {
+ // METRIC conflict sets seen ++
+ }
+ // See if this set contains entries whose parents were merged last round.
+ if (SortedCollectionsIntersect(children_of_dirs_merged_last_round.begin(),
+ children_of_dirs_merged_last_round.end(),
+ conflict_set->begin(),
+ conflict_set->end())) {
+ LOG(INFO) << "Accelerating resolution for hierarchical merge.";
+ conflict_count += 2;
+ }
+ // See if we should process this set.
+ if (ProcessConflictSet(&trans, conflict_set, conflict_count, session)) {
+ rv = true;
+ }
+ SyncerStatus status(session);
+ bool stuck = LogAndSignalIfConflictStuck(&trans, conflict_count,
+ conflict_set->begin(),
+ conflict_set->end(), view);
+ }
+ if (rv) {
+ // This code means we don't signal that syncing is stuck when any conflict
+ // resolution has occured.
+ // TODO(sync): As this will also reduce our sensitivity to problem
+ // conditions and increase the time for cascading resolutions we may have to
+ // revisit this code later, doing something more intelligent.
+ conflict_set_count_map_.clear();
+ simple_conflict_count_map_.clear();
+ }
+ ConflictSetCountMap::iterator i = conflict_set_count_map_.begin();
+ while (i != conflict_set_count_map_.end()) {
+ if (0 == --i->second) {
+ conflict_set_count_map_.erase(i++);
+ // METRIC self resolved conflict sets ++.
+ } else {
+ ++i;
+ }
+ }
+ return rv;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/conflict_resolver.h b/chrome/browser/sync/engine/conflict_resolver.h
new file mode 100644
index 0000000..7959106
--- /dev/null
+++ b/chrome/browser/sync/engine/conflict_resolver.h
@@ -0,0 +1,129 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A class that watches the syncer and attempts to resolve any conflicts that
+// occur.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLVER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLVER_H_
+
+#include <list>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace syncable {
+class BaseTransaction;
+class Id;
+class MutableEntry;
+class ScopedDirLookup;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class ConflictResolver {
+ friend class SyncerTest;
+ FRIEND_TEST(SyncerTest, ConflictResolverMergeOverwritesLocalEntry);
+ public:
+ ConflictResolver();
+ ~ConflictResolver();
+ // Called by the syncer at the end of a update/commit cycle.
+ // Returns true if the syncer should try to apply its updates again.
+ bool ResolveConflicts(const syncable::ScopedDirLookup& dir,
+ ConflictResolutionView* view,
+ SyncerSession *session);
+
+ // Called by ProcessServerClientNameClash. Returns true if it's merged the
+ // items, false otherwise. Does not re-check preconditions covered in
+ // ProcessServerClientNameClash (i.e. it assumes a name clash).
+ bool AttemptItemMerge(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* local_entry,
+ syncable::MutableEntry* server_entry);
+
+ private:
+ // We keep a map to record how often we've seen each conflict set. We use this
+ // to screen out false positives caused by transient server or client states,
+ // and to allow us to try to make smaller changes to fix situations before
+ // moving onto more drastic solutions.
+ typedef std::string ConflictSetCountMapKey;
+ typedef std::map<ConflictSetCountMapKey, int> ConflictSetCountMap;
+ typedef std::map<syncable::Id, int> SimpleConflictCountMap;
+
+ enum ProcessSimpleConflictResult {
+ NO_SYNC_PROGRESS, // No changes to advance syncing made.
+ SYNC_PROGRESS, // Progress made.
+ };
+
+ enum ServerClientNameClashReturn {
+ NO_CLASH,
+ SOLUTION_DEFERRED,
+ SOLVED,
+ BOGUS_SET,
+ };
+
+ // Get a key for the given set. NB: May reorder set contents.
+ // The key is currently not very efficient, but will ease debugging.
+ ConflictSetCountMapKey GetSetKey(ConflictSet* conflict_set);
+
+ void IgnoreLocalChanges(syncable::MutableEntry * entry);
+ void OverwriteServerChanges(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ ProcessSimpleConflictResult ProcessSimpleConflict(
+ syncable::WriteTransaction* trans,
+ syncable::Id id,
+ SyncerSession* session);
+
+ bool ResolveSimpleConflicts(const syncable::ScopedDirLookup& dir,
+ ConflictResolutionView* view,
+ SyncerSession* session);
+
+ bool ProcessConflictSet(syncable::WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ int conflict_count,
+ SyncerSession* session);
+
+ // Gives any unsynced entries in the given set new names if possible.
+ bool RenameUnsyncedEntries(syncable::WriteTransaction* trans,
+ ConflictSet* conflict_set);
+
+ ServerClientNameClashReturn ProcessServerClientNameClash(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* locally_named,
+ syncable::MutableEntry* server_named,
+ SyncerSession* session);
+ ServerClientNameClashReturn ProcessNameClashesInSet(
+ syncable::WriteTransaction* trans,
+ ConflictSet* conflict_set,
+ SyncerSession* session);
+
+ // Returns true if we're stuck
+ template <typename InputIt>
+ bool LogAndSignalIfConflictStuck(syncable::BaseTransaction* trans,
+ int attempt_count,
+ InputIt start, InputIt end,
+ ConflictResolutionView* view);
+
+ ConflictSetCountMap conflict_set_count_map_;
+ SimpleConflictCountMap simple_conflict_count_map_;
+
+ // Contains the ids of uncommitted items that are children of entries merged
+ // in the previous cycle. This is used to speed up the merge resolution of
+ // deep trees. Used to happen in store refresh.
+ // TODO(chron): Can we get rid of this optimization?
+ std::set<syncable::Id> children_of_merged_dirs_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConflictResolver);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLVER_H_
diff --git a/chrome/browser/sync/engine/download_updates_command.cc b/chrome/browser/sync/engine/download_updates_command.cc
new file mode 100644
index 0000000..0d84275
--- /dev/null
+++ b/chrome/browser/sync/engine/download_updates_command.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/download_updates_command.h"
+
+#include <string>
+
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using syncable::ScopedDirLookup;
+
+namespace browser_sync {
+
+using std::string;
+
+DownloadUpdatesCommand::DownloadUpdatesCommand() {}
+DownloadUpdatesCommand::~DownloadUpdatesCommand() {}
+
+void DownloadUpdatesCommand::ExecuteImpl(SyncerSession *session) {
+ ClientToServerMessage client_to_server_message;
+ ClientToServerResponse update_response;
+
+ client_to_server_message.set_share(
+ static_cast<const string&>(ToUTF8(session->account_name())));
+ client_to_server_message.set_message_contents(
+ ClientToServerMessage::GET_UPDATES);
+ GetUpdatesMessage* get_updates =
+ client_to_server_message.mutable_get_updates();
+
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ LOG(INFO) << "Getting updates from ts " << dir->last_sync_timestamp();
+ get_updates->set_from_timestamp(dir->last_sync_timestamp());
+
+ // Set GetUpdatesMessage.GetUpdatesCallerInfo information.
+ get_updates->mutable_caller_info()->set_source(session->TestAndSetSource());
+ get_updates->mutable_caller_info()->set_notifications_enabled(
+ session->notifications_enabled());
+
+ bool ok = SyncerProtoUtil::PostClientToServerMessage(
+ &client_to_server_message,
+ &update_response,
+ session);
+
+ if (!ok) {
+ SyncerStatus status(session);
+ status.increment_consecutive_problem_get_updates();
+ status.increment_consecutive_errors();
+ LOG(ERROR) << "PostClientToServerMessage() failed";
+ return;
+ }
+ session->set_update_response(update_response);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/download_updates_command.h b/chrome/browser/sync/engine/download_updates_command.h
new file mode 100644
index 0000000..2f48cb8
--- /dev/null
+++ b/chrome/browser/sync/engine/download_updates_command.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+
+namespace browser_sync {
+
+// Downloads updates from the server and places them in the SyncerSession.
+class DownloadUpdatesCommand : public SyncerCommand {
+ public:
+ DownloadUpdatesCommand();
+ virtual ~DownloadUpdatesCommand();
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DownloadUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_DOWNLOAD_UPDATES_COMMAND_H_
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.cc b/chrome/browser/sync/engine/get_commit_ids_command.cc
new file mode 100644
index 0000000..612b40c
--- /dev/null
+++ b/chrome/browser/sync/engine/get_commit_ids_command.cc
@@ -0,0 +1,242 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/get_commit_ids_command.h"
+
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::set;
+using std::vector;
+
+namespace browser_sync {
+
+GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size)
+ : requested_commit_batch_size_(commit_batch_size) {}
+
+GetCommitIdsCommand::~GetCommitIdsCommand() {}
+
+void GetCommitIdsCommand::ExecuteImpl(SyncerSession *session) {
+ // Gather the full set of unsynced items and store it in the session.
+ // They are not in the correct order for commit.
+ syncable::Directory::UnsyncedMetaHandles all_unsynced_handles;
+ SyncerUtil::GetUnsyncedEntries(session->write_transaction(),
+ &all_unsynced_handles);
+ session->set_unsynced_handles(all_unsynced_handles);
+
+ BuildCommitIds(session);
+
+ const vector<syncable::Id>& verified_commit_ids =
+ ordered_commit_set_.GetCommitIds();
+
+ for (size_t i = 0; i < verified_commit_ids.size(); i++)
+ LOG(INFO) << "Debug commit batch result:" << verified_commit_ids[i];
+
+ session->set_commit_ids(verified_commit_ids);
+}
+
+void GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Id parent_id) {
+ using namespace syncable;
+ OrderedCommitSet item_dependencies;
+
+ // Climb the tree adding entries leaf -> root.
+ while (!parent_id.ServerKnows()) {
+ Entry parent(trans, GET_BY_ID, parent_id);
+ CHECK(parent.good()) << "Bad user-only parent in item path.";
+ int64 handle = parent.Get(META_HANDLE);
+ if (ordered_commit_set_.HaveCommitItem(handle) ||
+ item_dependencies.HaveCommitItem(handle)) {
+ break;
+ }
+ if (!AddItemThenPredecessors(trans, &parent, IS_UNSYNCED,
+ &item_dependencies)) {
+ break; // Parent was already present in the set.
+ }
+ parent_id = parent.Get(PARENT_ID);
+ }
+
+ // Reverse what we added to get the correct order.
+ ordered_commit_set_.AppendReverse(item_dependencies);
+}
+
+bool GetCommitIdsCommand::AddItem(syncable::Entry* item,
+ OrderedCommitSet* result) {
+ int64 item_handle = item->Get(syncable::META_HANDLE);
+ if (result->HaveCommitItem(item_handle) ||
+ ordered_commit_set_.HaveCommitItem(item_handle)) {
+ return false;
+ }
+ result->AddCommitItem(item_handle, item->Get(syncable::ID));
+ return true;
+}
+
+bool GetCommitIdsCommand::AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ OrderedCommitSet* result) {
+ if (!AddItem(item, result))
+ return false;
+ if (item->Get(syncable::IS_DEL))
+ return true; // Deleted items have no predecessors.
+
+ syncable::Id prev_id = item->Get(syncable::PREV_ID);
+ while (!prev_id.IsRoot()) {
+ syncable::Entry prev(trans, syncable::GET_BY_ID, prev_id);
+ CHECK(prev.good()) << "Bad id when walking predecessors.";
+ if (!prev.Get(inclusion_filter))
+ break;
+ if (!AddItem(&prev, result))
+ break;
+ prev_id = prev.Get(syncable::PREV_ID);
+ }
+ return true;
+}
+
+void GetCommitIdsCommand::AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter) {
+ OrderedCommitSet item_dependencies;
+ AddItemThenPredecessors(trans, item, inclusion_filter, &item_dependencies);
+
+ // Reverse what we added to get the correct order.
+ ordered_commit_set_.AppendReverse(item_dependencies);
+}
+
+bool GetCommitIdsCommand::IsCommitBatchFull() {
+ return ordered_commit_set_.Size() >= requested_commit_batch_size_;
+}
+
+void GetCommitIdsCommand::AddCreatesAndMoves(SyncerSession *session) {
+ // Add moves and creates, and prepend their uncommitted parents.
+ for (CommitMetahandleIterator iterator(session, &ordered_commit_set_);
+ !IsCommitBatchFull() && iterator.Valid();
+ iterator.Increment()) {
+ int64 metahandle = iterator.Current();
+
+ syncable::Entry entry(session->write_transaction(),
+ syncable::GET_BY_HANDLE,
+ metahandle);
+ if (!entry.Get(syncable::IS_DEL)) {
+ AddUncommittedParentsAndTheirPredecessors(
+ session->write_transaction(), entry.Get(syncable::PARENT_ID));
+ AddPredecessorsThenItem(session->write_transaction(), &entry,
+ syncable::IS_UNSYNCED);
+ }
+ }
+
+ // It's possible that we overcommitted while trying to expand dependent
+ // items. If so, truncate the set down to the allowed size.
+ ordered_commit_set_.Truncate(requested_commit_batch_size_);
+}
+
+void GetCommitIdsCommand::AddDeletes(SyncerSession *session) {
+ set<syncable::Id> legal_delete_parents;
+
+ for (CommitMetahandleIterator iterator(session, &ordered_commit_set_);
+ !IsCommitBatchFull() && iterator.Valid();
+ iterator.Increment()) {
+ int64 metahandle = iterator.Current();
+
+ syncable::Entry entry(session->write_transaction(),
+ syncable::GET_BY_HANDLE,
+ metahandle);
+
+ if (entry.Get(syncable::IS_DEL)) {
+ syncable::Entry parent(session->write_transaction(),
+ syncable::GET_BY_ID,
+ entry.Get(syncable::PARENT_ID));
+ // If the parent is deleted and unsynced, then any children of that
+ // parent don't need to be added to the delete queue.
+ //
+ // Note: the parent could be synced if there was an update deleting a
+ // folder when we had a deleted all items in it.
+ // We may get more updates, or we may want to delete the entry.
+ if (parent.good() &&
+ parent.Get(syncable::IS_DEL) &&
+ parent.Get(syncable::IS_UNSYNCED)) {
+ // However, if an entry is moved, these rules can apply differently.
+ //
+ // If the entry was moved, then the destination parent was deleted,
+ // then we'll miss it in the roll up. We have to add it in manually.
+ // TODO(chron): Unit test for move / delete cases:
+ // Case 1: Locally moved, then parent deleted
+ // Case 2: Server moved, then locally issue recursive delete.
+ if (entry.Get(syncable::ID).ServerKnows() &&
+ entry.Get(syncable::PARENT_ID) !=
+ entry.Get(syncable::SERVER_PARENT_ID)) {
+ LOG(INFO) << "Inserting moved and deleted entry, will be missed by"
+ " delete roll." << entry.Get(syncable::ID);
+
+ ordered_commit_set_.AddCommitItem(metahandle,
+ entry.Get(syncable::ID));
+ }
+
+ // Skip this entry since it's a child of a parent that will be
+ // deleted. The server will unroll the delete and delete the
+ // child as well.
+ continue;
+ }
+
+ legal_delete_parents.insert(entry.Get(syncable::PARENT_ID));
+ }
+ }
+
+ // We could store all the potential entries with a particular parent during
+ // the above scan, but instead we rescan here. This is less efficient, but
+ // we're dropping memory alloc/dealloc in favor of linear scans of recently
+ // examined entries.
+ //
+ // Scan through the UnsyncedMetaHandles again. If we have a deleted
+ // entry, then check if the parent is in legal_delete_parents.
+ //
+ // Parent being in legal_delete_parents means for the child:
+ // a recursive delete is not currently happening (no recent deletes in same
+ // folder)
+ // parent did expect at least one old deleted child
+ // parent was not deleted
+
+ for (CommitMetahandleIterator iterator(session, &ordered_commit_set_);
+ !IsCommitBatchFull() && iterator.Valid();
+ iterator.Increment()) {
+ int64 metahandle = iterator.Current();
+ syncable::MutableEntry entry(session->write_transaction(),
+ syncable::GET_BY_HANDLE,
+ metahandle);
+ if (entry.Get(syncable::IS_DEL)) {
+ syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
+ if (legal_delete_parents.count(parent_id)) {
+ ordered_commit_set_.AddCommitItem(metahandle, entry.Get(syncable::ID));
+ }
+ }
+ }
+}
+
+void GetCommitIdsCommand::BuildCommitIds(SyncerSession *session) {
+ // Commits follow these rules:
+ // 1. Moves or creates are preceded by needed folder creates, from
+ // root to leaf. For folders whose contents are ordered, moves
+ // and creates appear in order.
+ // 2. Moves/Creates before deletes.
+ // 3. Deletes, collapsed.
+ // We commit deleted moves under deleted items as moves when collapsing
+ // delete trees.
+
+ // Add moves and creates, and prepend their uncommitted parents.
+ AddCreatesAndMoves(session);
+
+ // Add all deletes.
+ AddDeletes(session);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.h b/chrome/browser/sync/engine/get_commit_ids_command.h
new file mode 100644
index 0000000..2d80a04
--- /dev/null
+++ b/chrome/browser/sync/engine/get_commit_ids_command.h
@@ -0,0 +1,202 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
+
+#include <vector>
+#include <utility>
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::pair;
+using std::vector;
+
+namespace browser_sync {
+
+class GetCommitIdsCommand : public SyncerCommand {
+ friend class SyncerTest;
+
+ public:
+ explicit GetCommitIdsCommand(int commit_batch_size);
+ virtual ~GetCommitIdsCommand();
+
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ // Returns a vector of IDs that should be committed.
+ void BuildCommitIds(SyncerSession *session);
+
+ // These classes are public for testing.
+ // TODO(ncarter): This code is more generic than just Commit and can
+ // be reused elsewhere (e.g. PositionalRunBuilder, ChangeReorderBuffer
+ // do similar things). Merge all these implementations.
+ class OrderedCommitSet {
+ public:
+ // TODO(chron): Reserve space according to batch size?
+ OrderedCommitSet() {}
+ ~OrderedCommitSet() {}
+
+ bool HaveCommitItem(const int64 metahandle) const {
+ return inserted_metahandles_.count(metahandle) > 0;
+ }
+
+ void AddCommitItem(const int64 metahandle, const syncable::Id& commit_id) {
+ if (!HaveCommitItem(metahandle)) {
+ inserted_metahandles_.insert(metahandle);
+ metahandle_order_.push_back(metahandle);
+ commit_ids_.push_back(commit_id);
+ }
+ }
+
+ const vector<syncable::Id>& GetCommitIds() const {
+ return commit_ids_;
+ }
+
+ pair<int64, syncable::Id> GetCommitItemAt(const int position) const {
+ DCHECK(position < Size());
+ return pair<int64, syncable::Id> (
+ metahandle_order_[position], commit_ids_[position]);
+ }
+
+ int Size() const {
+ return commit_ids_.size();
+ }
+
+ void AppendReverse(const OrderedCommitSet& other) {
+ for (int i = other.Size() - 1; i >= 0; i--) {
+ pair<int64, syncable::Id> item = other.GetCommitItemAt(i);
+ AddCommitItem(item.first, item.second);
+ }
+ }
+
+ void Truncate(size_t max_size) {
+ if (max_size < metahandle_order_.size()) {
+ for (size_t i = max_size; i < metahandle_order_.size(); ++i) {
+ inserted_metahandles_.erase(metahandle_order_[i]);
+ }
+ commit_ids_.resize(max_size);
+ metahandle_order_.resize(max_size);
+ }
+ }
+
+ private:
+ // These three lists are different views of the same data; e.g they are
+ // isomorphic.
+ syncable::MetahandleSet inserted_metahandles_;
+ vector<syncable::Id> commit_ids_;
+ vector<int64> metahandle_order_;
+
+ DISALLOW_COPY_AND_ASSIGN(OrderedCommitSet);
+ };
+
+
+ // TODO(chron): Remove writes from this iterator. As a warning, this
+ // iterator causes writes to entries and so isn't a pure iterator.
+ // It will do Put(IS_UNSYNCED) as well as add things to the blocked
+ // session list. Refactor this out later.
+ class CommitMetahandleIterator {
+ public:
+ // TODO(chron): Cache ValidateCommitEntry responses across iterators to save
+ // UTF8 conversion and filename checking
+ CommitMetahandleIterator(SyncerSession* session,
+ OrderedCommitSet* commit_set)
+ : session_(session),
+ commit_set_(commit_set) {
+ handle_iterator_ = session->unsynced_handles().begin();
+
+ // TODO(chron): Remove writes from this iterator.
+ DCHECK(session->has_open_write_transaction());
+
+ if (Valid() && !ValidateMetahandleForCommit(*handle_iterator_))
+ Increment();
+ }
+ ~CommitMetahandleIterator() {}
+
+ int64 Current() const {
+ DCHECK(Valid());
+ return *handle_iterator_;
+ }
+
+ bool Increment() {
+ if (!Valid())
+ return false;
+
+ for (++handle_iterator_;
+ handle_iterator_ != session_->unsynced_handles().end();
+ ++handle_iterator_) {
+ if (ValidateMetahandleForCommit(*handle_iterator_))
+ return true;
+ }
+
+ return false;
+ }
+
+ bool Valid() const {
+ return !(handle_iterator_ == session_->unsynced_handles().end());
+ }
+
+ private:
+ bool ValidateMetahandleForCommit(int64 metahandle) {
+ if (commit_set_->HaveCommitItem(metahandle))
+ return false;
+
+ // We should really not WRITE in this iterator, but we can fix that
+ // later. ValidateCommitEntry writes to the DB, and we add the
+ // blocked items. We should move that somewhere else later.
+ syncable::MutableEntry entry(session_->write_transaction(),
+ syncable::GET_BY_HANDLE, metahandle);
+ VerifyCommitResult verify_result =
+ SyncerUtil::ValidateCommitEntry(&entry);
+ if (verify_result == VERIFY_BLOCKED) {
+ session_->AddBlockedItem(entry.Get(syncable::ID)); // TODO(chron): Ew.
+ } else if (verify_result == VERIFY_UNSYNCABLE) {
+ // drop unsyncable entries.
+ entry.Put(syncable::IS_UNSYNCED, false);
+ }
+ return verify_result == VERIFY_OK;
+ }
+
+ SyncerSession* session_;
+ vector<int64>::const_iterator handle_iterator_;
+ OrderedCommitSet* commit_set_;
+
+ DISALLOW_COPY_AND_ASSIGN(CommitMetahandleIterator);
+ };
+
+ private:
+ void AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Id parent_id);
+
+ // OrderedCommitSet helpers for adding predecessors in order.
+ // TODO(ncarter): Refactor these so that the |result| parameter goes
+ // away, and AddItem doesn't need to consider two OrderedCommitSets.
+ bool AddItem(syncable::Entry* item, OrderedCommitSet* result);
+ bool AddItemThenPredecessors(syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ OrderedCommitSet* result);
+ void AddPredecessorsThenItem(syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter);
+
+ bool IsCommitBatchFull();
+
+ void AddCreatesAndMoves(SyncerSession *session);
+
+ void AddDeletes(SyncerSession *session);
+
+ OrderedCommitSet ordered_commit_set_;
+
+ int requested_commit_batch_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(GetCommitIdsCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
diff --git a/chrome/browser/sync/engine/model_changing_syncer_command.cc b/chrome/browser/sync/engine/model_changing_syncer_command.cc
new file mode 100644
index 0000000..09b0782
--- /dev/null
+++ b/chrome/browser/sync/engine/model_changing_syncer_command.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+
+#include "chrome/browser/sync/engine/model_safe_worker.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/closure.h"
+
+namespace browser_sync {
+
+void ModelChangingSyncerCommand::ExecuteImpl(SyncerSession *session) {
+ work_session_ = session;
+ session->model_safe_worker()->DoWorkAndWaitUntilDone(
+ NewCallback(this, &ModelChangingSyncerCommand::StartChangingModel));
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/model_changing_syncer_command.h b/chrome/browser/sync/engine/model_changing_syncer_command.h
new file mode 100644
index 0000000..32361090
--- /dev/null
+++ b/chrome/browser/sync/engine/model_changing_syncer_command.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+
+namespace browser_sync {
+
+// An abstract SyncerCommand which dispatches its Execute step to the
+// model-safe worker thread. Classes derived from ModelChangingSyncerCommand
+// instead of SyncerCommand must implement ModelChangingExecuteImpl instead of
+// ExecuteImpl, but otherwise, the contract is the same.
+//
+// A command should derive from ModelChangingSyncerCommand instead of
+// SyncerCommand whenever the operation might change any client-visible
+// fields on any syncable::Entry. If the operation involves creating a
+// WriteTransaction, this is a sign that ModelChangingSyncerCommand is likely
+// necessary.
+class ModelChangingSyncerCommand : public SyncerCommand {
+ public:
+ ModelChangingSyncerCommand() : work_session_(NULL) { }
+ virtual ~ModelChangingSyncerCommand() { }
+
+ // SyncerCommand implementation. Sets work_session to session.
+ virtual void ExecuteImpl(SyncerSession* session);
+
+ // wrapper so implementations don't worry about storing work_session
+ void StartChangingModel() {
+ ModelChangingExecuteImpl(work_session_);
+ }
+
+ // Abstract method to be implemented by subclasses.
+ virtual void ModelChangingExecuteImpl(SyncerSession* session) = 0;
+
+ private:
+ // ExecuteImpl is expected to be run by SyncerCommand to set work_session.
+ // StartChangingModel is called to start this command running.
+ // Implementations will implement ModelChangingExecuteImpl and not
+ // worry about storing the session or setting it. They are given work_session.
+ SyncerSession* work_session_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelChangingSyncerCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_MODEL_CHANGING_SYNCER_COMMAND_H_
diff --git a/chrome/browser/sync/engine/model_safe_worker.h b/chrome/browser/sync/engine/model_safe_worker.h
new file mode 100644
index 0000000..ff470ac
--- /dev/null
+++ b/chrome/browser/sync/engine/model_safe_worker.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_MODEL_SAFE_WORKER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_MODEL_SAFE_WORKER_H_
+
+#include "chrome/browser/sync/util/closure.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+// The Syncer uses a ModelSafeWorker for all tasks that could potentially
+// modify syncable entries (e.g under a WriteTransaction). The ModelSafeWorker
+// only knows how to do one thing, and that is take some work (in a fully
+// pre-bound callback) and have it performed (as in Run()) from a thread which
+// is guaranteed to be "model-safe", where "safe" refers to not allowing us to
+// cause an embedding application model to fall out of sync with the
+// syncable::Directory due to a race.
+class ModelSafeWorker {
+ public:
+ ModelSafeWorker() { }
+ virtual ~ModelSafeWorker() { }
+
+ // Any time the Syncer performs model modifications (e.g employing a
+ // WriteTransaction), it should be done by this method to ensure it is done
+ // from a model-safe thread.
+ //
+ // TODO(timsteele): For now this is non-reentrant, meaning the work being
+ // done should be at a high enough level in the stack that
+ // DoWorkAndWaitUntilDone won't be called again by invoking Run() on |work|.
+ // This is not strictly necessary; it may be best to call
+ // DoWorkAndWaitUntilDone at lower levels, such as within ApplyUpdates, but
+ // this is sufficient to simplify and test out our dispatching approach.
+ virtual void DoWorkAndWaitUntilDone(Closure* work) {
+ work->Run(); // By default, do the work on the current thread.
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ModelSafeWorker);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_MODEL_SAFE_WORKER_H_
diff --git a/chrome/browser/sync/engine/net/gaia_authenticator.cc b/chrome/browser/sync/engine/net/gaia_authenticator.cc
new file mode 100644
index 0000000..7276cec
--- /dev/null
+++ b/chrome/browser/sync/engine/net/gaia_authenticator.cc
@@ -0,0 +1,483 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/port.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/engine/all_status.h"
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/engine/net/url_translator.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "googleurl/src/gurl.h"
+
+using std::pair;
+using std::string;
+using std::vector;
+
+// TODO(timsteele): Integrate the following two functions to string_util.h or
+// somewhere that makes them unit-testable.
+bool SplitStringIntoKeyValues(const string& line,
+ char key_value_delimiter,
+ string* key, vector<string>* values) {
+ key->clear();
+ values->clear();
+
+ // find the key string
+ int end_key_pos = line.find_first_of(key_value_delimiter);
+ if (end_key_pos == string::npos) {
+ DLOG(INFO) << "cannot parse key from line: " << line;
+ return false; // no key
+ }
+ key->assign(line, 0, end_key_pos);
+
+ // find the values string
+ string remains(line, end_key_pos, line.size() - end_key_pos);
+ int begin_values_pos = remains.find_first_not_of(key_value_delimiter);
+ if (begin_values_pos == string::npos) {
+ DLOG(INFO) << "cannot parse value from line: " << line;
+ return false; // no value
+ }
+ string values_string(remains, begin_values_pos,
+ remains.size() - begin_values_pos);
+
+ // construct the values vector
+ values->push_back(values_string);
+ return true;
+}
+
+bool SplitStringIntoKeyValuePairs(const string& line,
+ char key_value_delimiter,
+ char key_value_pair_delimiter,
+ vector<pair<string, string> >* kv_pairs) {
+ kv_pairs->clear();
+
+ vector<string> pairs;
+ SplitString(line, key_value_pair_delimiter, &pairs);
+
+ bool success = true;
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ string key;
+ vector<string> value;
+ if (!SplitStringIntoKeyValues(pairs[i],
+ key_value_delimiter,
+ &key, &value)) {
+ // Don't return here, to allow for keys without associated
+ // values; just record that our split failed.
+ success = false;
+ }
+ DCHECK_LE(value.size(), 1);
+ kv_pairs->push_back(make_pair(key, value.empty()? "" : value[0]));
+ }
+ return success;
+}
+
+namespace browser_sync {
+
+static const char kGaiaV1IssueAuthTokenPath[] = "/accounts/IssueAuthToken";
+
+static const char kGetUserInfoPath[] = "/accounts/GetUserInfo";
+
+// Sole constructor with initializers for all fields.
+GaiaAuthenticator::GaiaAuthenticator(const string& user_agent,
+ const string& service_id,
+ const string& gaia_url)
+ : user_agent_(user_agent),
+ service_id_(service_id),
+ gaia_url_(gaia_url),
+ request_count_(0),
+ early_auth_attempt_count_(0),
+ delay_(0),
+ next_allowed_auth_attempt_time_(0) {
+ GaiaAuthEvent done = { GaiaAuthEvent::GAIA_AUTHENTICATOR_DESTROYED, None,
+ this };
+ channel_ = new Channel(done);
+}
+
+GaiaAuthenticator::~GaiaAuthenticator() {
+ delete channel_;
+}
+
+bool GaiaAuthenticator::LaunchAuthenticate(const AuthParams& params,
+ bool synchronous) {
+ if (synchronous)
+ return AuthenticateImpl(params);
+ AuthParams* copy = new AuthParams;
+ *copy = params;
+ pthread_t thread_id;
+ int result = pthread_create(&thread_id, 0, &GaiaAuthenticator::ThreadMain,
+ copy);
+ if (result)
+ return false;
+ return true;
+}
+
+
+void* GaiaAuthenticator::ThreadMain(void* arg) {
+ NameCurrentThreadForDebugging("SyncEngine_GaiaAuthenticatorThread");
+ AuthParams* const params = reinterpret_cast<AuthParams*>(arg);
+ params->authenticator->AuthenticateImpl(*params);
+ delete params;
+ return 0;
+}
+
+// mutex_ must be entered before calling this function.
+GaiaAuthenticator::AuthParams GaiaAuthenticator::MakeParams(
+ const string& user_name,
+ const string& password,
+ SaveCredentials should_save_credentials,
+ const string& captcha_token,
+ const string& captcha_value,
+ SignIn try_first) {
+ AuthParams params;
+ params.request_id = ++request_count_;
+ params.email = user_name;
+ params.password = password;
+ params.should_save_credentials = should_save_credentials;
+ params.captcha_token = captcha_token;
+ params.captcha_value = captcha_value;
+ params.authenticator = this;
+ params.try_first = try_first;
+ return params;
+}
+
+bool GaiaAuthenticator::Authenticate(const string& user_name,
+ const string& password,
+ SaveCredentials should_save_credentials,
+ bool synchronous,
+ const string& captcha_token,
+ const string& captcha_value,
+ SignIn try_first) {
+ mutex_.Lock();
+ AuthParams const params =
+ MakeParams(user_name, password, should_save_credentials, captcha_token,
+ captcha_value, try_first);
+ mutex_.Unlock();
+ return LaunchAuthenticate(params, synchronous);
+}
+
+bool GaiaAuthenticator::AuthenticateImpl(const AuthParams& params) {
+ AuthResults results;
+ const bool succeeded = AuthenticateImpl(params, &results);
+ mutex_.Lock();
+ if (params.request_id == request_count_) {
+ auth_results_ = results;
+ GaiaAuthEvent event = { succeeded ? GaiaAuthEvent::GAIA_AUTH_SUCCEEDED
+ : GaiaAuthEvent::GAIA_AUTH_FAILED,
+ results.auth_error, this };
+ mutex_.Unlock();
+ channel_->NotifyListeners(event);
+ } else {
+ mutex_.Unlock();
+ }
+ return succeeded;
+}
+
+// This method makes an HTTP request to the Gaia server, and calls other
+// methods to help parse the response. If authentication succeeded, then
+// Gaia-issued cookies are available in the respective variables; if
+// authentication failed, then the exact error is available as an enum. If the
+// client wishes to save the credentials, the last parameter must be true.
+// If a subsequent request is made with fresh credentials, the saved credentials
+// are wiped out; any subsequent request to the zero-parameter overload of this
+// method preserves the saved credentials.
+bool GaiaAuthenticator::AuthenticateImpl(const AuthParams& params,
+ AuthResults* results) {
+ results->credentials_saved = params.should_save_credentials;
+ results->auth_error = ConnectionUnavailable;
+ // Save credentials if so requested.
+ if (params.should_save_credentials != DONT_SAVE_CREDENTIALS) {
+ results->email = params.email.data();
+ results->password = params.password;
+ } else { // Explicitly clear previously-saved credentials.
+ results->email = "";
+ results->password = "";
+ }
+
+ // The aim of this code is to start failing requests if due to a logic error
+ // in the program we're hammering GAIA.
+ time_t now = time(0);
+ if (now > next_allowed_auth_attempt_time_) {
+ next_allowed_auth_attempt_time_ = now + 1;
+ // If we're more than 2 minutes past the allowed time we reset the early
+ // attempt count.
+ if (now - next_allowed_auth_attempt_time_ > 2 * 60) {
+ delay_ = 1;
+ early_auth_attempt_count_ = 0;
+ }
+ } else {
+ ++early_auth_attempt_count_;
+ // Allow 3 attempts, but then limit.
+ if (early_auth_attempt_count_ > 3) {
+ delay_ = AllStatus::GetRecommendedDelaySeconds(delay_);
+ next_allowed_auth_attempt_time_ = now + delay_;
+ return false;
+ }
+ }
+
+ return PerformGaiaRequest(params, results);
+}
+
+bool GaiaAuthenticator::PerformGaiaRequest(const AuthParams& params,
+ AuthResults* results) {
+ GURL gaia_auth_url(gaia_url_);
+
+ string post_body;
+ post_body += "Email=" + CgiEscapeString(params.email);
+ post_body += "&Passwd=" + CgiEscapeString(params.password);
+ post_body += "&source=" + CgiEscapeString(user_agent_);
+ post_body += "&service=" + service_id_;
+ if (!params.captcha_token.empty() && !params.captcha_value.empty()) {
+ post_body += "&logintoken=" + CgiEscapeString(params.captcha_token);
+ post_body += "&logincaptcha=" + CgiEscapeString(params.captcha_value);
+ }
+ post_body += "&PersistentCookie=true";
+ // We set it to GOOGLE (and not HOSTED or HOSTED_OR_GOOGLE) because we only
+ // allow consumer logins.
+ post_body += "&accountType=GOOGLE";
+
+ string message_text;
+ unsigned long server_response_code;
+ if (!Post(gaia_auth_url, post_body, &server_response_code,
+ &message_text)) {
+ results->auth_error = ConnectionUnavailable;
+ return false;
+ }
+
+ // Parse reply in two different ways, depending on if request failed or
+ // succeeded.
+ if (RC_FORBIDDEN == server_response_code) {
+ ExtractAuthErrorFrom(message_text, results);
+ return false;
+ } else if (RC_REQUEST_OK == server_response_code) {
+ ExtractTokensFrom(message_text, results);
+ const bool old_gaia =
+ results->auth_token.empty() && !results->lsid.empty();
+ const bool long_lived_token =
+ params.should_save_credentials == PERSIST_TO_DISK;
+ if ((old_gaia || long_lived_token) &&
+ !IssueAuthToken(results, service_id_, long_lived_token))
+ return false;
+
+ return LookupEmail(results);
+ } else {
+ results->auth_error = Unknown;
+ return false;
+ }
+}
+
+bool GaiaAuthenticator::LookupEmail(AuthResults* results) {
+ // Use the provided Gaia server, but change the path to what V1 expects.
+ GURL url(gaia_url_); // Gaia server
+ GURL::Replacements repl;
+ // Needs to stay in scope till GURL is out of scope
+ string path(kGetUserInfoPath);
+ repl.SetPathStr(path);
+ url = url.ReplaceComponents(repl);
+
+ string post_body;
+ post_body += "LSID=";
+ post_body += CgiEscapeString(results->lsid);
+
+ unsigned long server_response_code;
+ string message_text;
+ if (!Post(url, post_body, &server_response_code, &message_text)) {
+ return false;
+ }
+
+ // Check if we received a valid AuthToken; if not, ignore it.
+ if (RC_FORBIDDEN == server_response_code) {
+ // Server says we're not authenticated.
+ ExtractAuthErrorFrom(message_text, results);
+ return false;
+ } else if (RC_REQUEST_OK == server_response_code) {
+ typedef vector<pair<string, string> > Tokens;
+ Tokens tokens;
+ SplitStringIntoKeyValuePairs(message_text, '=', '\n', &tokens);
+ for (Tokens::iterator i = tokens.begin(); i != tokens.end(); ++i) {
+ if ("accountType" == i->first) {
+ // We never authenticate an email as a hosted account.
+ DCHECK_EQ("GOOGLE", i->second);
+ results->signin = GMAIL_SIGNIN;
+ } else if ("email" == i->first) {
+ results->primary_email = i->second;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+// We need to call this explicitly when we need to obtain a long-lived session
+// token.
+bool GaiaAuthenticator::IssueAuthToken(AuthResults* results,
+ const string& service_id,
+ bool long_lived) {
+ // Use the provided Gaia server, but change the path to what V1 expects.
+ GURL url(gaia_url_); // Gaia server
+ GURL::Replacements repl;
+ // Needs to stay in scope till GURL is out of scope
+ string path(kGaiaV1IssueAuthTokenPath);
+ repl.SetPathStr(path);
+ url = url.ReplaceComponents(repl);
+
+ string post_body;
+ post_body += "LSID=";
+ post_body += CgiEscapeString(results->lsid);
+ post_body += "&service=" + service_id;
+ if (long_lived) {
+ post_body += "&Session=true";
+ }
+
+ unsigned long server_response_code;
+ string message_text;
+ if (!Post(url, post_body,
+ &server_response_code, &message_text)) {
+ return false;
+ }
+
+ // Check if we received a valid AuthToken; if not, ignore it.
+ if (RC_FORBIDDEN == server_response_code) {
+ // Server says we're not authenticated.
+ ExtractAuthErrorFrom(message_text, results);
+ return false;
+ } else if (RC_REQUEST_OK == server_response_code) {
+ // Note that the format of message_text is different from what is returned
+ // in the first request, or to the sole request that is made to Gaia V2.
+ // Specifically, the entire string is the AuthToken, and looks like:
+ // "<token>" rather than "AuthToken=<token>". Thus, we need not use
+ // ExtractTokensFrom(...), but simply assign the token.
+ int last_index = message_text.length() - 1;
+ if ('\n' == message_text[last_index])
+ message_text.erase(last_index);
+ results->auth_token = message_text;
+ return true;
+ }
+ return false;
+}
+
+// TOOD(sync): This passing around of AuthResults makes it really unclear who
+// actually owns the authentication state and when it is valid, but this is
+// endemic to this implementation. We should fix this.
+bool GaiaAuthenticator::AuthenticateService(const string& service_id,
+ const string& sid,
+ const string& lsid,
+ string* other_service_cookie) {
+ // Copy the AuthResults structure and overload the auth_token field
+ // in the copy, local_results, to mean the auth_token for service_id.
+ AuthResults local_results;
+ local_results.sid = sid;
+ local_results.lsid = lsid;
+
+ if (!IssueAuthToken(&local_results, service_id, true)) {
+ LOG(ERROR) << "[AUTH] Failed to obtain cookie for " << service_id;
+ return false;
+ }
+
+ swap(*other_service_cookie, local_results.auth_token);
+ return true;
+}
+
+// Helper method that extracts tokens from a successful reply, and saves them
+// in the right fields.
+void GaiaAuthenticator::ExtractTokensFrom(const string& response,
+ AuthResults* results) {
+ vector<pair<string, string> > tokens;
+ SplitStringIntoKeyValuePairs(response, '=', '\n', &tokens);
+ for (vector<pair<string, string> >::iterator i = tokens.begin();
+ i != tokens.end(); ++i) {
+ if (i->first == "SID") {
+ results->sid = i->second;
+ } else if (i->first == "LSID") {
+ results->lsid = i->second;
+ } else if (i->first == "Auth") {
+ results->auth_token = i->second;
+ }
+ }
+}
+
+// Helper method that extracts tokens from a failure response, and saves them
+// in the right fields.
+void GaiaAuthenticator::ExtractAuthErrorFrom(const string& response,
+ AuthResults* results) {
+ vector<pair<string, string> > tokens;
+ SplitStringIntoKeyValuePairs(response, '=', '\n', &tokens);
+ for (vector<pair<string, string> >::iterator i = tokens.begin();
+ i != tokens.end(); ++i) {
+ if (i->first == "Error") {
+ results->error_msg = i->second;
+ } else if (i->first == "Url") {
+ results->auth_error_url = i->second;
+ } else if (i->first == "CaptchaToken") {
+ results->captcha_token = i->second;
+ } else if (i->first == "CaptchaUrl") {
+ results->captcha_url = i->second;
+ }
+ }
+
+ // Convert string error messages to enum values. Each case has two different
+ // strings; the first one is the most current and the second one is
+ // deprecated, but available.
+ const string& error_msg = results->error_msg;
+ if (error_msg == "BadAuthentication" || error_msg == "badauth") {
+ results->auth_error = BadAuthentication;
+ } else if (error_msg == "NotVerified" || error_msg == "nv") {
+ results->auth_error = NotVerified;
+ } else if (error_msg == "TermsNotAgreed" || error_msg == "tna") {
+ results->auth_error = TermsNotAgreed;
+ } else if (error_msg == "Unknown" || error_msg == "unknown") {
+ results->auth_error = Unknown;
+ } else if (error_msg == "AccountDeleted" || error_msg == "adel") {
+ results->auth_error = AccountDeleted;
+ } else if (error_msg == "AccountDisabled" || error_msg == "adis") {
+ results->auth_error = AccountDisabled;
+ } else if (error_msg == "CaptchaRequired" || error_msg == "cr") {
+ results->auth_error = CaptchaRequired;
+ } else if (error_msg == "ServiceUnavailable" || error_msg == "ire") {
+ results->auth_error = ServiceUnavailable;
+ }
+}
+
+// Reset all stored credentials, perhaps in preparation for letting a different
+// user sign in.
+void GaiaAuthenticator::ResetCredentials() {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ AuthResults blank;
+ auth_results_ = blank;
+}
+
+void GaiaAuthenticator::SetUsernamePassword(const string& username,
+ const string& password) {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ auth_results_.password = password;
+ auth_results_.email = username;
+}
+
+void GaiaAuthenticator::SetUsername(const string& username) {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ auth_results_.email = username;
+}
+
+void GaiaAuthenticator::SetAuthToken(const string& auth_token,
+ SaveCredentials save) {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ auth_results_.auth_token = auth_token;
+ auth_results_.credentials_saved = save;
+}
+
+bool GaiaAuthenticator::Authenticate(const string& user_name,
+ const string& password,
+ SaveCredentials should_save_credentials,
+ bool synchronous, SignIn try_first) {
+ const string empty;
+ return Authenticate(user_name, password, should_save_credentials, synchronous,
+ empty, empty, try_first);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/net/gaia_authenticator.h b/chrome/browser/sync/engine/net/gaia_authenticator.h
new file mode 100644
index 0000000..e18984c
--- /dev/null
+++ b/chrome/browser/sync/engine/net/gaia_authenticator.h
@@ -0,0 +1,304 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Use this class to authenticate users with Gaia and access cookies sent
+// by the Gaia servers.
+//
+// Sample usage:
+// GaiaAuthenticator gaia_auth("User-Agent", SYNC_SERVICE_NAME,
+// browser_sync::kExternalGaiaUrl);
+// if (gaia_auth.Authenticate("email", "passwd", SAVE_IN_MEMORY_ONLY,
+// true)) { // Synchronous
+// // Do something with: gaia_auth.auth_token(), or gaia_auth.sid(),
+// // or gaia_auth.lsid()
+// }
+//
+// Sample asynchonous usage:
+// GaiaAuthenticator gaia_auth("User-Agent", SYNC_SERVICE_NAME,
+// browser_sync::kExternalGaiaUrl);
+// EventListenerHookup* hookup = NewListenerHookup(gaia_auth.channel(),
+// this, &OnAuthenticate);
+// gaia_auth.Authenticate("email", "passwd", true, false);
+// // OnAuthenticate() will get called with result;
+//
+// Credentials can also be preserved for subsequent requests, though these are
+// saved in plain-text in memory, and not very secure on client systems. The
+// email address associated with the Gaia account can be read; the password is
+// write-only.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_GAIA_AUTHENTICATOR_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_GAIA_AUTHENTICATOR_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/signin.h"
+#include "googleurl/src/gurl.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace browser_sync {
+
+static const char kGaiaUrl[] =
+ "https://www.google.com:443/accounts/ClientLogin";
+
+// Use of the following enum is odd. GaiaAuthenticator only looks at
+// and DONT_SAVE_CREDENTIALS and SAVE_IN_MEMORY_ONLY (PERSIST_TO_DISK is == to
+// SAVE_IN_MEMORY_ONLY for GaiaAuthenticator). The sync engine never uses
+// DONT_SAVE_CREDENTIALS. AuthWatcher does look in GaiaAuthenticator's results
+// object to decide if it should save credentials to disk. This currently
+// works so I'm leaving the odd dance alone.
+
+enum SaveCredentials {
+ DONT_SAVE_CREDENTIALS,
+ SAVE_IN_MEMORY_ONLY,
+ PERSIST_TO_DISK // Saved in both memory and disk
+};
+
+// Error codes from Gaia. These will be set correctly for both Gaia V1
+// (/ClientAuth) and V2 (/ClientLogin)
+enum AuthenticationError {
+ None = 0,
+ BadAuthentication = 1,
+ NotVerified = 2,
+ TermsNotAgreed = 3,
+ Unknown = 4,
+ AccountDeleted = 5,
+ AccountDisabled = 6,
+ CaptchaRequired = 7,
+ ServiceUnavailable = 8,
+ // Errors generated by this class not Gaia.
+ CredentialsNotSet = 9,
+ ConnectionUnavailable = 10
+};
+
+class GaiaAuthenticator;
+
+struct GaiaAuthEvent {
+ enum {
+ GAIA_AUTH_FAILED,
+ GAIA_AUTH_SUCCEEDED,
+ GAIA_AUTHENTICATOR_DESTROYED
+ }
+ what_happened;
+ AuthenticationError error;
+ const GaiaAuthenticator* authenticator;
+
+ // Lets us use GaiaAuthEvent as its own traits type in hookups.
+ typedef GaiaAuthEvent EventType;
+ static inline bool IsChannelShutdownEvent(const GaiaAuthEvent& event) {
+ return event.what_happened == GAIA_AUTHENTICATOR_DESTROYED;
+ }
+};
+
+// GaiaAuthenticator can be used to pass user credentials to Gaia and obtain
+// cookies set by the Gaia servers.
+class GaiaAuthenticator {
+ FRIEND_TEST(GaiaAuthenticatorTest, TestNewlineAtEndOfAuthTokenRemoved);
+ public:
+
+ // Since GaiaAuthenticator can be used for any service, or by any client, you
+ // must include a user-agent and a service-id when creating one. The
+ // user_agent is a short string used for simple log analysis. gaia_url is used
+ // to choose the server to authenticate with (e.g.
+ // http://www.google.com/accounts/ClientLogin).
+ GaiaAuthenticator(const std::string& user_agent,
+ const std::string& service_id,
+ const std::string& gaia_url);
+
+ virtual ~GaiaAuthenticator();
+
+ // Pass credentials to authenticate with, or use saved credentials via an
+ // overload. If authentication succeeds, you can retrieve the authentication
+ // token via the respective accessors. Returns a boolean indicating whether
+ // authentication succeeded or not.
+ bool Authenticate(const std::string& user_name, const std::string& password,
+ SaveCredentials should_save_credentials, bool synchronous,
+ const std::string& captcha_token,
+ const std::string& captcha_value,
+ SignIn try_first);
+
+ bool Authenticate(const std::string& user_name, const std::string& password,
+ SaveCredentials should_save_credentials, bool synchronous,
+ SignIn try_first);
+
+ bool AuthenticateService(const std::string& service_id,
+ const std::string& sid,
+ const std::string& lsid,
+ std::string* other_service_cookie);
+
+ // Resets all stored cookies to their default values.
+ void ResetCredentials();
+
+ void SetUsernamePassword(const std::string& username,
+ const std::string& password);
+
+ void SetUsername(const std::string& username);
+
+ void SetAuthToken(const std::string& auth_token, SaveCredentials);
+
+ struct AuthResults {
+ SaveCredentials credentials_saved;
+ std::string email;
+ std::string password;
+
+ // Fields that store various cookies.
+ std::string sid;
+ std::string lsid;
+ std::string auth_token;
+
+ std::string primary_email;
+
+ // Fields for items returned when authentication fails.
+ std::string error_msg;
+ enum AuthenticationError auth_error;
+ std::string auth_error_url;
+ std::string captcha_token;
+ std::string captcha_url;
+ SignIn signin;
+
+ AuthResults () : credentials_saved(DONT_SAVE_CREDENTIALS),
+ auth_error(None) { }
+ };
+
+ protected:
+
+ struct AuthParams {
+ GaiaAuthenticator* authenticator;
+ uint32 request_id;
+ SaveCredentials should_save_credentials;
+ std::string email;
+ std::string password;
+ std::string captcha_token;
+ std::string captcha_value;
+ SignIn try_first;
+ };
+
+ // mutex_ must be entered before calling this function.
+ AuthParams MakeParams(const std::string& user_name,
+ const std::string& password,
+ SaveCredentials should_save_credentials,
+ const std::string& captcha_token,
+ const std::string& captcha_value,
+ SignIn try_first);
+
+ // The real Authenticate implementations.
+ bool AuthenticateImpl(const AuthParams& params);
+ bool AuthenticateImpl(const AuthParams& params, AuthResults* results);
+ bool PerformGaiaRequest(const AuthParams& params, AuthResults* results);
+ bool LaunchAuthenticate(const AuthParams& params, bool synchronous);
+ static void *ThreadMain(void *arg);
+
+ // virtual for testing purposes
+ virtual bool Post(const GURL& url, const std::string& post_body,
+ unsigned long* response_code, std::string* response_body) {
+ return false;
+ }
+
+ // Caller should fill in results->LSID before calling. Result in
+ // results->primary_email.
+ bool LookupEmail(AuthResults* results);
+
+ public:
+ // Retrieve email
+ inline std::string email() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.email;
+ }
+
+ // Retrieve password
+ inline std::string password() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.password;
+ }
+
+ // Retrieve AuthToken, if previously authenticated; otherwise returns "".
+ inline std::string auth_token() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.auth_token;
+ }
+
+ // Retrieve SID cookie. For details, see the Google Accounts documentation.
+ inline std::string sid() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.sid;
+ }
+
+ // Retrieve LSID cookie. For details, see the Google Accounts documentation.
+ inline std::string lsid() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.lsid;
+ }
+
+ // Get last authentication error.
+ inline enum AuthenticationError auth_error() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.auth_error;
+ }
+
+ inline std::string auth_error_url() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.auth_error_url;
+ }
+
+ inline std::string captcha_token() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.captcha_token;
+ }
+
+ inline std::string captcha_url() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_.captcha_url;
+ }
+
+ inline AuthResults results() const {
+ PThreadScopedLock<PThreadMutex> enter(&mutex_);
+ return auth_results_;
+ }
+
+ typedef EventChannel<GaiaAuthEvent, PThreadMutex> Channel;
+
+ inline Channel* channel() const {
+ return channel_;
+ }
+
+ private:
+ bool IssueAuthToken(AuthResults* results, const std::string& service_id,
+ bool long_lived_token);
+
+ // Helper method to parse response when authentication succeeds.
+ void ExtractTokensFrom(const std::string& response, AuthResults* results);
+ // Helper method to parse response when authentication fails.
+ void ExtractAuthErrorFrom(const std::string& response, AuthResults* results);
+
+ // Fields for the obvious data items.
+ const std::string user_agent_;
+ const std::string service_id_;
+ const std::string gaia_url_;
+
+ AuthResults auth_results_;
+
+ // When multiple async requests are running, only the one that started most
+ // recently updates the values.
+ //
+ // Note that even though this code was written to handle multiple requests
+ // simultaneously, the sync code issues auth requests one at a time.
+ uint32 request_count_;
+
+ Channel* channel_;
+
+ // Used to compute backoff time for next allowed authentication.
+ int delay_; // In seconds.
+ time_t next_allowed_auth_attempt_time_;
+ int early_auth_attempt_count_;
+
+ // Protects auth_results_, and request_count_.
+ mutable PThreadMutex mutex_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_GAIA_AUTHENTICATOR_H_
diff --git a/chrome/browser/sync/engine/net/gaia_authenticator_unittest.cc b/chrome/browser/sync/engine/net/gaia_authenticator_unittest.cc
new file mode 100644
index 0000000..c7c6eb8
--- /dev/null
+++ b/chrome/browser/sync/engine/net/gaia_authenticator_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+
+#include <string>
+
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/util/sync_types.h"
+#include "googleurl/src/gurl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+
+namespace browser_sync {
+
+class GaiaAuthenticatorTest : public testing::Test { };
+
+class GaiaAuthMock : public GaiaAuthenticator {
+ public:
+ GaiaAuthMock() : GaiaAuthenticator("useragent",
+ "serviceid",
+ "http://gaia_url") {}
+ ~GaiaAuthMock() {}
+ protected:
+ bool Post(const GURL& url, const string& post_body,
+ unsigned long* response_code, string* response_body) {
+ *response_code = RC_REQUEST_OK;
+ response_body->assign("body\n");
+ return true;
+ }
+};
+
+TEST(GaiaAuthenticatorTest, TestNewlineAtEndOfAuthTokenRemoved) {
+ GaiaAuthMock mock_auth;
+ GaiaAuthenticator::AuthResults results;
+ EXPECT_TRUE(mock_auth.IssueAuthToken(&results, "sid", true));
+ EXPECT_EQ(0, results.auth_token.compare("body"));
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/net/http_return.h b/chrome/browser/sync/engine/net/http_return.h
new file mode 100644
index 0000000..fd5167b
--- /dev/null
+++ b/chrome/browser/sync/engine/net/http_return.h
@@ -0,0 +1,16 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_HTTP_RETURN_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_HTTP_RETURN_H_
+
+namespace browser_sync {
+enum HTTPReturnCode {
+ RC_REQUEST_OK = 200,
+ RC_UNAUTHORIZED = 401,
+ RC_FORBIDDEN = 403,
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_HTTP_RETURN_H_
diff --git a/chrome/browser/sync/engine/net/openssl_init.cc b/chrome/browser/sync/engine/net/openssl_init.cc
new file mode 100644
index 0000000..afaf006
--- /dev/null
+++ b/chrome/browser/sync/engine/net/openssl_init.cc
@@ -0,0 +1,129 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// OpenSSL multi-threading initialization
+
+#include "chrome/browser/sync/engine/net/openssl_init.h"
+
+#include <openssl/crypto.h>
+
+#include "base/logging.h"
+#include "chrome/browser/sync/util/compat-pthread.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+
+// OpenSSL requires multithreading callbacks to be initialized prior to using
+// the library so that it can manage thread locking as necessary.
+
+// Dynamic lock type
+//
+// This needs to be a struct and in global scope because OpenSSL relies on some
+// macro magic.
+struct CRYPTO_dynlock_value {
+ PThreadMutex mutex;
+ void Lock() {
+ mutex.Lock();
+ }
+ void Unlock() {
+ mutex.Unlock();
+ }
+};
+
+namespace {
+
+// This array stores all of the mutexes available to OpenSSL
+PThreadMutex* mutex_buf = NULL;
+
+// OpenSSL mutex handling callback functions
+
+// OpenSSL Callback - Locks/unlocks the specified mutex held by OpenSSL.
+void OpenSslMutexLockControl(int mode, int n, const char* file, int line) {
+ if (mode & CRYPTO_LOCK) {
+ mutex_buf[n].Lock();
+ } else {
+ mutex_buf[n].Unlock();
+ }
+}
+
+// OpenSSL Callback - Returns the thread ID
+unsigned long OpenSslGetThreadID(void) {
+ return GetCurrentThreadId();
+}
+
+// Dynamic locking functions
+
+// Allocate a new lock
+struct CRYPTO_dynlock_value* dyn_create_function(const char* file, int line) {
+ return new CRYPTO_dynlock_value;
+}
+
+void dyn_lock_function(int mode, struct CRYPTO_dynlock_value* lock,
+ const char* file, int line) {
+ if (mode & CRYPTO_LOCK) {
+ lock->Lock();
+ } else {
+ lock->Unlock();
+ }
+}
+
+void dyn_destroy_function(struct CRYPTO_dynlock_value* lock,
+ const char* file, int line) {
+ delete lock;
+}
+
+} // namespace
+
+// We want to log the version of the OpenSSL library being used, in particular
+// for the case where it's dynamically linked. We want the version from the
+// library, not from the header files. It seems the OpenSSL folks haven't
+// bothered with an accessor for this, so we just pluck it out.
+#ifdef OS_WINDOWS
+// TODO(sync): Figure out how to get the SSL version string on Windows.
+const char* SSL_version_str = "UNKNOWN";
+#else
+extern const char* SSL_version_str;
+#endif
+
+namespace browser_sync {
+
+// Initializes the OpenSSL multithreading callbacks. This isn't thread-safe,
+// but it is called early enough that it doesn't matter.
+void InitOpenSslMultithreading() {
+ LOG(INFO) << "Using OpenSSL headers version " << OPENSSL_VERSION_TEXT
+ << ", lib version " << SSL_version_str;
+
+ if (mutex_buf)
+ return;
+
+ mutex_buf = new PThreadMutex[CRYPTO_num_locks()];
+ CHECK(NULL != mutex_buf);
+
+ // OpenSSL has only one single global set of callbacks, so this
+ // initialization must be done only once, even though the OpenSSL lib may be
+ // used by multiple modules (jingle jabber connections and P2P tunnels).
+ CRYPTO_set_id_callback(OpenSslGetThreadID);
+ CRYPTO_set_locking_callback(OpenSslMutexLockControl);
+
+ CRYPTO_set_dynlock_create_callback(dyn_create_function);
+ CRYPTO_set_dynlock_lock_callback(dyn_lock_function);
+ CRYPTO_set_dynlock_destroy_callback(dyn_destroy_function);
+}
+
+// Cleans up the OpenSSL multithreading callbacks.
+void CleanupOpenSslMultithreading() {
+ if (!mutex_buf) {
+ return;
+ }
+
+ CRYPTO_set_dynlock_create_callback(NULL);
+ CRYPTO_set_dynlock_lock_callback(NULL);
+ CRYPTO_set_dynlock_destroy_callback(NULL);
+
+ CRYPTO_set_id_callback(NULL);
+ CRYPTO_set_locking_callback(NULL);
+
+ delete [] mutex_buf;
+ mutex_buf = NULL;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/net/openssl_init.h b/chrome/browser/sync/engine/net/openssl_init.h
new file mode 100644
index 0000000..8cd4558
--- /dev/null
+++ b/chrome/browser/sync/engine/net/openssl_init.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// OpenSSL multi-threading initialization
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_OPENSSL_INIT_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_OPENSSL_INIT_H_
+
+namespace browser_sync {
+
+// Initializes the OpenSSL multithreading callbacks. Returns false on failure.
+void InitOpenSslMultithreading();
+
+// Cleans up the OpenSSL multithreading callbacks.
+void CleanupOpenSslMultithreading();
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_OPENSSL_INIT_H_
diff --git a/chrome/browser/sync/engine/net/server_connection_manager.cc b/chrome/browser/sync/engine/net/server_connection_manager.cc
new file mode 100644
index 0000000..42b380b
--- /dev/null
+++ b/chrome/browser/sync/engine/net/server_connection_manager.cc
@@ -0,0 +1,375 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+
+#include <errno.h>
+
+#include <ostream>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/engine/net/url_translator.h"
+#include "chrome/browser/sync/engine/syncapi.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+
+namespace browser_sync {
+
+using std::ostream;
+using std::string;
+using std::vector;
+
+static const char kSyncServerSyncPath[] = "/command/";
+
+// At the /time/ path of the sync server, we expect to find a very simple
+// time of day service that we can use to synchronize the local clock with
+// server time.
+static const char kSyncServerGetTimePath[] = "/time";
+
+static const ServerConnectionEvent shutdown_event =
+ { ServerConnectionEvent::SHUTDOWN, HttpResponse::CONNECTION_UNAVAILABLE,
+ false };
+
+typedef PThreadScopedLock<PThreadMutex> MutexLock;
+
+struct ServerConnectionManager::PlatformMembers {
+ explicit PlatformMembers(const string& user_agent) { }
+ void Kill() { }
+ void Reset() { }
+ void Reset(MutexLock*) { }
+};
+
+bool ServerConnectionManager::Post::ReadBufferResponse(
+ string* buffer_out, HttpResponse* response, bool require_response) {
+ if (RC_REQUEST_OK != response->response_code) {
+ response->server_status = HttpResponse::SYNC_SERVER_ERROR;
+ return false;
+ }
+
+ if (require_response && (1 > response->content_length))
+ return false;
+
+ const int64 bytes_read = ReadResponse(buffer_out, response->content_length);
+ if (bytes_read != response->content_length) {
+ response->server_status = HttpResponse::IO_ERROR;
+ return false;
+ }
+ return true;
+}
+
+bool ServerConnectionManager::Post::ReadDownloadResponse(
+ HttpResponse* response, string* buffer_out) {
+ const int64 bytes_read = ReadResponse(buffer_out, response->content_length);
+
+ if (bytes_read != response->content_length) {
+ LOG(ERROR) << "Mismatched content lengths, server claimed " <<
+ response->content_length << ", but sent " << bytes_read;
+ response->server_status = HttpResponse::IO_ERROR;
+ return false;
+ }
+ return true;
+}
+
+namespace {
+ string StripTrailingSlash(const string& s) {
+ int stripped_end_pos = s.size();
+ if (s.at(stripped_end_pos - 1) == '/') {
+ stripped_end_pos = stripped_end_pos - 1;
+ }
+
+ return s.substr(0, stripped_end_pos);
+ }
+} // namespace
+
+// TODO(chron): Use a GURL instead of string concatenation.
+ string ServerConnectionManager::Post::MakeConnectionURL(
+ const string& sync_server, const string& path,
+ bool use_ssl) const {
+ string connection_url = (use_ssl ? "https://" : "http://");
+ connection_url += sync_server;
+ connection_url = StripTrailingSlash(connection_url);
+ connection_url += path;
+
+ return connection_url;
+}
+
+int ServerConnectionManager::Post::ReadResponse(string* out_buffer,
+ int length) {
+ int bytes_read = buffer_.length();
+ CHECK(length <= bytes_read);
+ out_buffer->assign(buffer_);
+ return bytes_read;
+}
+
+// A helper class that automatically notifies when the status changes:
+struct WatchServerStatus {
+ WatchServerStatus(ServerConnectionManager* conn_mgr, HttpResponse* response)
+ : conn_mgr_(conn_mgr), response_(response),
+ reset_count_(conn_mgr->reset_count_),
+ server_reachable_(conn_mgr->server_reachable_) {
+ response->server_status = conn_mgr->server_status_;
+ }
+ ~WatchServerStatus() {
+ // Don't update the status of the connection if it has been reset.
+ // TODO(timsteele): Do we need this? Is this used by multiple threads?
+ if (reset_count_ != conn_mgr_->reset_count_)
+ return;
+ if (conn_mgr_->server_status_ != response_->server_status) {
+ conn_mgr_->server_status_ = response_->server_status;
+ conn_mgr_->NotifyStatusChanged();
+ return;
+ }
+ // Notify if we've gone on or offline.
+ if (server_reachable_ != conn_mgr_->server_reachable_)
+ conn_mgr_->NotifyStatusChanged();
+ }
+ ServerConnectionManager* const conn_mgr_;
+ HttpResponse* const response_;
+ // TODO(timsteele): Should this be Barrier:AtomicIncrement?
+ base::subtle::AtomicWord reset_count_;
+ bool server_reachable_;
+};
+
+ServerConnectionManager::ServerConnectionManager(
+ const string& server, int port, bool use_ssl, const string& user_agent,
+ const string& client_id)
+ : sync_server_(server), sync_server_port_(port),
+ channel_(new Channel(shutdown_event)),
+ server_status_(HttpResponse::NONE), server_reachable_(false),
+ client_id_(client_id), use_ssl_(use_ssl),
+ user_agent_(user_agent),
+ platform_(new PlatformMembers(user_agent)),
+ reset_count_(0), error_count_(0),
+ terminate_all_io_(false),
+ proto_sync_path_(kSyncServerSyncPath),
+ get_time_path_(kSyncServerGetTimePath) {
+}
+
+ServerConnectionManager::~ServerConnectionManager() {
+ delete channel_;
+ delete platform_;
+ shutdown_event_mutex_.Lock();
+ int result = pthread_cond_broadcast(&shutdown_event_condition_.condvar_);
+ shutdown_event_mutex_.Unlock();
+ if (result) {
+ LOG(ERROR) << "Error signaling shutdown_event_condition_ last error = "
+ << result;
+ }
+}
+
+void ServerConnectionManager::NotifyStatusChanged() {
+ ServerConnectionEvent event = { ServerConnectionEvent::STATUS_CHANGED,
+ server_status_,
+ server_reachable_ };
+ channel_->NotifyListeners(event);
+}
+
+// Uses currently set auth token. Set by AuthWatcher.
+bool ServerConnectionManager::PostBufferWithCachedAuth(
+ const PostBufferParams* params) {
+ string path =
+ MakeSyncServerPath(proto_sync_path(), MakeSyncQueryString(client_id_));
+ return PostBufferToPath(params, path, auth_token_);
+}
+
+bool ServerConnectionManager::PostBufferWithAuth(const PostBufferParams* params,
+ const string& auth_token) {
+ string path = MakeSyncServerPath(proto_sync_path(),
+ MakeSyncQueryString(client_id_));
+
+ return PostBufferToPath(params, path, auth_token);
+}
+
+bool ServerConnectionManager::PostBufferToPath(const PostBufferParams* params,
+ const string& path,
+ const string& auth_token) {
+ WatchServerStatus watcher(this, params->response);
+ scoped_ptr<Post> post(MakePost());
+ post->set_timing_info(params->timing_info);
+ bool ok = post->Init(path.c_str(), auth_token, params->buffer_in,
+ params->response);
+
+ if (!ok || RC_REQUEST_OK != params->response->response_code) {
+ IncrementErrorCount();
+ return false;
+ }
+
+ if (post->ReadBufferResponse(params->buffer_out, params->response, true)) {
+ params->response->server_status = HttpResponse::SERVER_CONNECTION_OK;
+ server_reachable_ = true;
+ return true;
+ }
+ return false;
+}
+
+bool ServerConnectionManager::CheckTime(int32* out_time) {
+ // Verify that the server really is reachable by checking the time. We need
+ // to do this because of wifi interstitials that intercept messages from the
+ // client and return HTTP OK instead of a redirect.
+ HttpResponse response;
+ WatchServerStatus watcher(this, &response);
+ string post_body = "command=get_time";
+
+ // We only retry the CheckTime call if we were reset during the CheckTime
+ // attempt. We only try 3 times in case we're in a reset loop elsewhere.
+ base::subtle::AtomicWord start_reset_count = reset_count_ - 1;
+ for (int i = 0 ; i < 3 && start_reset_count != reset_count_ ; i++) {
+ start_reset_count = reset_count_;
+ scoped_ptr<Post> post(MakePost());
+
+ // Note that the server's get_time path doesn't require authentication.
+ string get_time_path =
+ MakeSyncServerPath(kSyncServerGetTimePath, post_body);
+ LOG(INFO) << "Requesting get_time from:" << get_time_path;
+
+ string blank_post_body;
+ bool ok = post->Init(get_time_path.c_str(), blank_post_body,
+ blank_post_body, &response);
+ if (!ok) {
+ LOG(INFO) << "Unable to check the time";
+ continue;
+ }
+ string time_response;
+ time_response.resize(response.content_length);
+ ok = post->ReadDownloadResponse(&response, &time_response);
+ if (!ok || string::npos !=
+ time_response.find_first_not_of("0123456789")) {
+ LOG(ERROR) << "unable to read a non-numeric response from get_time:"
+ << time_response;
+ continue;
+ }
+ *out_time = atoi(time_response.c_str());
+ LOG(INFO) << "Server was reachable.";
+ return true;
+ }
+ IncrementErrorCount();
+ return false;
+}
+
+bool ServerConnectionManager::IsServerReachable() {
+ int32 time;
+ return CheckTime(&time);
+}
+
+bool ServerConnectionManager::IsUserAuthenticated() {
+ return IsGoodReplyFromServer(server_status_);
+}
+
+bool ServerConnectionManager::CheckServerReachable() {
+ const bool server_is_reachable = IsServerReachable();
+ if (server_reachable_ != server_is_reachable) {
+ server_reachable_ = server_is_reachable;
+ NotifyStatusChanged();
+ }
+ return server_is_reachable;
+}
+
+void ServerConnectionManager::kill() {
+ {
+ MutexLock lock(&terminate_all_io_mutex_);
+ terminate_all_io_ = true;
+ }
+ platform_->Kill();
+ shutdown_event_mutex_.Lock();
+ int result = pthread_cond_broadcast(&shutdown_event_condition_.condvar_);
+ shutdown_event_mutex_.Unlock();
+ if (result) {
+ LOG(ERROR) << "Error signaling shutdown_event_condition_ last error = "
+ << result;
+ }
+}
+
+void ServerConnectionManager::ResetAuthStatus() {
+ ResetConnection();
+ server_status_ = HttpResponse::NONE;
+ NotifyStatusChanged();
+}
+
+void ServerConnectionManager::ResetConnection() {
+ base::subtle::NoBarrier_AtomicIncrement(&reset_count_, 1);
+ platform_->Reset();
+}
+
+bool ServerConnectionManager::IncrementErrorCount() {
+#ifdef OS_WINDOWS
+ error_count_mutex_.Lock();
+ error_count_++;
+
+ if (error_count_ > kMaxConnectionErrorsBeforeReset) {
+ error_count_ = 0;
+
+ // Be careful with this mutex because calling out to other methods can
+ // result in being called back. Unlock it here to prevent any potential
+ // double-acquisitions.
+ error_count_mutex_.Unlock();
+
+ if (!IsServerReachable()) {
+ LOG(WARNING) << "Too many connection failures, server is not reachable. "
+ << "Resetting connections.";
+ ResetConnection();
+ } else {
+ LOG(WARNING) << "Multiple connection failures while server is reachable.";
+ }
+ return false;
+ }
+
+ error_count_mutex_.Unlock();
+ return true;
+#endif
+ return true;
+}
+
+void ServerConnectionManager::SetServerParameters(const string& server_url,
+ int port, bool use_ssl) {
+ {
+ ParametersLock lock(&server_parameters_mutex_);
+ sync_server_ = server_url;
+ sync_server_port_ = port;
+ use_ssl_ = use_ssl;
+ }
+ platform_->Reset();
+}
+
+// Returns the current server parameters in server_url and port.
+void ServerConnectionManager::GetServerParameters(string* server_url,
+ int* port, bool* use_ssl) {
+ ParametersLock lock(&server_parameters_mutex_);
+ if (server_url != NULL)
+ *server_url = sync_server_;
+ if (port != NULL)
+ *port = sync_server_port_;
+ if (use_ssl != NULL)
+ *use_ssl = use_ssl_;
+}
+
+bool FillMessageWithShareDetails(sync_pb::ClientToServerMessage* csm,
+ syncable::DirectoryManager* manager,
+ const PathString &share) {
+ syncable::ScopedDirLookup dir(manager, share);
+ if (!dir.good()) {
+ LOG(INFO) << "Dir lookup failed";
+ return false;
+ }
+ string birthday = dir->store_birthday();
+ if (!birthday.empty())
+ csm->set_store_birthday(birthday);
+ csm->set_share(ToUTF8(share).get_string());
+ return true;
+}
+
+} // namespace browser_sync
+
+std::ostream& operator << (std::ostream& s,
+ const struct browser_sync::HttpResponse& hr) {
+ s << " Response Code (bogus on error): " << hr.response_code;
+ s << " Content-Length (bogus on error): " << hr.content_length;
+ s << " Server Status: " << hr.server_status;
+ return s;
+}
diff --git a/chrome/browser/sync/engine/net/server_connection_manager.h b/chrome/browser/sync/engine/net/server_connection_manager.h
new file mode 100644
index 0000000..8093d45
--- /dev/null
+++ b/chrome/browser/sync/engine/net/server_connection_manager.h
@@ -0,0 +1,345 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/scoped_ptr.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/signin.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+class WriteTransaction;
+class DirectoryManager;
+}
+
+namespace sync_pb {
+class ClientToServerMessage;
+};
+
+struct RequestTimingInfo;
+
+namespace browser_sync {
+
+class ClientToServerMessage;
+
+// How many connection errors are accepted before network handles are closed
+// and reopened.
+static const int32 kMaxConnectionErrorsBeforeReset = 10;
+
+// HttpResponse gathers the relevant output properties of an HTTP request.
+// Depending on the value of the server_status code, response_code, and
+// content_length may not be valid.
+struct HttpResponse {
+ enum ServerConnectionCode {
+ // For uninitialized state.
+ NONE,
+
+ // CONNECTION_UNAVAILABLE is returned when InternetConnect() fails.
+ CONNECTION_UNAVAILABLE,
+
+ // IO_ERROR is returned when reading/writing to a buffer has failed.
+ IO_ERROR,
+
+ // SYNC_SERVER_ERROR is returned when the HTTP status code indicates that
+ // a non-auth error has occured.
+ SYNC_SERVER_ERROR,
+
+ // SYNC_AUTH_ERROR is returned when the HTTP status code indicates that an
+ // auth error has occured (i.e. a 401)
+ SYNC_AUTH_ERROR,
+
+ // All the following connection codes are valid responses from the server.
+ // Means the server is up. If you update this list, be sure to also update
+ // IsGoodReplyFromServer().
+
+ // SERVER_CONNECTION_OK is returned when request was handled correctly.
+ SERVER_CONNECTION_OK,
+
+ // RETRY is returned when a Commit request fails with a RETRY response from
+ // the server.
+ //
+ // TODO(idana): the server no longer returns RETRY so we should remove this
+ // value.
+ RETRY,
+ };
+
+ // The HTTP Status code.
+ int64 response_code;
+
+ // The value of the Content-length header.
+ int64 content_length;
+
+ // The size of a download request's payload.
+ int64 payload_length;
+
+ // Identifies the type of failure, if any.
+ ServerConnectionCode server_status;
+};
+
+inline bool IsGoodReplyFromServer(HttpResponse::ServerConnectionCode code) {
+ return code >= HttpResponse::SERVER_CONNECTION_OK;
+}
+
+struct ServerConnectionEvent {
+ enum { SHUTDOWN, STATUS_CHANGED } what_happened;
+ HttpResponse::ServerConnectionCode connection_code;
+ bool server_reachable;
+
+ // Traits.
+ typedef ServerConnectionEvent EventType;
+ static inline bool IsChannelShutdownEvent(const EventType& event) {
+ return SHUTDOWN == event.what_happened;
+ }
+};
+
+struct WatchServerStatus;
+
+// Use this class to interact with the sync server.
+// The ServerConnectionManager currently supports POSTing protocol buffers.
+//
+// *** This class is thread safe. In fact, you should consider creating only
+// one instance for every server that you need to talk to.
+class ServerConnectionManager {
+ friend class Post;
+ friend struct WatchServerStatus;
+ public:
+ typedef EventChannel<ServerConnectionEvent, PThreadMutex> Channel;
+
+ // The lifetime of the GaiaAuthenticator must be longer than the instance
+ // of the ServerConnectionManager that you're creating.
+ ServerConnectionManager(const std::string& server, int port, bool use_ssl,
+ const std::string& user_agent,
+ const std::string& client_id);
+
+ virtual ~ServerConnectionManager();
+
+ // buffer_in - will be POSTed
+ // buffer_out - string will be overwritten with response
+ struct PostBufferParams {
+ const std::string& buffer_in;
+ std::string* buffer_out;
+ HttpResponse* response;
+ RequestTimingInfo* timing_info;
+ };
+
+ // Abstract class providing network-layer functionality to the
+ // ServerConnectionManager. Subclasses implement this using an HTTP stack of
+ // their choice.
+ class Post {
+ public:
+ explicit Post(ServerConnectionManager* scm) : scm_(scm), timing_info_(0) {
+ }
+ virtual ~Post() { }
+
+ // Called to initialize and perform an HTTP POST.
+ virtual bool Init(const char* path, const std::string& auth_token,
+ const std::string& payload,
+ HttpResponse* response) = 0;
+
+ bool ReadBufferResponse(std::string* buffer_out, HttpResponse* response,
+ bool require_response);
+ bool ReadDownloadResponse(HttpResponse* response, std::string* buffer_out);
+
+ void set_timing_info(RequestTimingInfo* timing_info) {
+ timing_info_ = timing_info;
+ }
+ RequestTimingInfo* timing_info() { return timing_info_; }
+
+ protected:
+ std::string MakeConnectionURL(const std::string& sync_server,
+ const std::string& path, bool use_ssl) const;
+
+ void GetServerParams(std::string* server, int* server_port,
+ bool* use_ssl) {
+ ServerConnectionManager::ParametersLock lock(
+ &scm_->server_parameters_mutex_);
+ server->assign(scm_->sync_server_);
+ *server_port = scm_->sync_server_port_;
+ *use_ssl = scm_->use_ssl_;
+ }
+
+ std::string buffer_;
+ ServerConnectionManager* scm_;
+
+ private:
+ int ReadResponse(void* buffer, int length);
+ int ReadResponse(std::string* buffer, int length);
+ RequestTimingInfo* timing_info_;
+ };
+
+ // POSTS buffer_in and reads a response into buffer_out. Uses our currently
+ // set auth token in our headers.
+ //
+ // Returns true if executed successfully.
+ virtual bool PostBufferWithCachedAuth(const PostBufferParams* params);
+
+ // POSTS buffer_in and reads a response into buffer_out. Add a specific auth
+ // token to http headers.
+ //
+ // Returns true if executed successfully.
+ virtual bool PostBufferWithAuth(const PostBufferParams* params,
+ const std::string& auth_token);
+
+ // Checks the time on the server. Returns false if the request failed. |time|
+ // is an out parameter that stores the value returned from the server.
+ virtual bool CheckTime(int32* out_time);
+
+ // Returns true if sync_server_ is reachable. This method verifies that the
+ // server is pingable and that traffic can be sent to and from it.
+ virtual bool IsServerReachable();
+
+ // Returns true if user has been successfully authenticated.
+ virtual bool IsUserAuthenticated();
+
+ // Updates status and broadcasts events on change.
+ bool CheckServerReachable();
+
+ // Signal the shutdown event to notify listeners.
+ virtual void kill();
+
+ inline Channel* channel() const { return channel_; }
+
+ inline std::string user_agent() const { return user_agent_; }
+
+ inline HttpResponse::ServerConnectionCode server_status() const {
+ return server_status_;
+ }
+
+ inline bool server_reachable() const { return server_reachable_; }
+
+ void ResetAuthStatus();
+
+ void ResetConnection();
+
+ void NotifyStatusChanged();
+
+ const std::string client_id() const { return client_id_; }
+
+ void SetDomainFromSignIn(SignIn signin_type, const std::string& signin);
+
+ // This changes the server info used by the connection manager. This allows
+ // a single client instance to talk to different backing servers. This is
+ // typically called during / after authentication so that the server url
+ // can be a function of the user's login id. A side effect of this call is
+ // that ResetConnection is called.
+ void SetServerParameters(const std::string& server_url, int port,
+ bool use_ssl);
+
+ // Returns the current server parameters in server_url, port and use_ssl.
+ void GetServerParameters(std::string* server_url, int* port, bool* use_ssl);
+
+ bool terminate_all_io() const {
+ PThreadScopedLock<PThreadMutex> lock(&terminate_all_io_mutex_);
+ return terminate_all_io_;
+ }
+
+ // Factory method to create a Post object we can use for communication with
+ // the server.
+ virtual Post* MakePost() {
+ return NULL; // For testing.
+ };
+
+ void set_auth_token(const std::string& auth_token) {
+ auth_token_.assign(auth_token);
+ }
+
+ protected:
+
+ PThreadMutex shutdown_event_mutex_;
+ PThreadCondVar shutdown_event_condition_;
+
+ // Protects access to sync_server_, sync_server_port_ and use_ssl_:
+ mutable PThreadMutex server_parameters_mutex_;
+ typedef PThreadScopedLock<PThreadMutex> ParametersLock;
+
+ // The sync_server_ is the server that requests will be made to.
+ std::string sync_server_;
+
+ // The sync_server_port_ is the port that HTTP requests will be made on.
+ int sync_server_port_;
+
+ // The unique id of the user's client.
+ const std::string client_id_;
+
+ // The user-agent string for HTTP.
+ std::string user_agent_;
+
+ // Indicates whether or not requests should be made using HTTPS.
+ bool use_ssl_;
+
+ // The paths we post to.
+ mutable PThreadMutex path_mutex_;
+ typedef PThreadScopedLock<PThreadMutex> ScopedPathLock;
+
+ std::string proto_sync_path_;
+ std::string get_time_path_;
+
+ // The auth token to use in authenticated requests. Set by the AuthWatcher.
+ std::string auth_token_;
+
+ inline std::string proto_sync_path() const {
+ ScopedPathLock lock(&path_mutex_);
+ return proto_sync_path_;
+ }
+ std::string get_time_path() const {
+ ScopedPathLock lock(&path_mutex_);
+ return get_time_path_;
+ }
+
+ // Called wherever a failure should be taken as an indication that we may
+ // be experiencing connection difficulties.
+ virtual bool IncrementErrorCount();
+ mutable PThreadMutex error_count_mutex_; // Protects error_count_
+ int error_count_; // Tracks the number of connection errors.
+
+ protected:
+ Channel* const channel_;
+ // Volatile so various threads can call server_status() without
+ // synchronization.
+ volatile HttpResponse::ServerConnectionCode server_status_;
+ bool server_reachable_;
+
+ struct PlatformMembers; // Contains platform specific member vars.
+ PlatformMembers* const platform_;
+
+ // A counter that is incremented everytime ResetAuthStatus() is called.
+ volatile base::subtle::AtomicWord reset_count_;
+
+ // NOTE: Tests rely on this protected function being virtual.
+ //
+ // Internal PostBuffer base function.
+ virtual bool PostBufferToPath(const PostBufferParams*,
+ const std::string& path,
+ const std::string& auth_token);
+
+ private:
+ mutable PThreadMutex terminate_all_io_mutex_;
+ bool terminate_all_io_; // when set to true, terminate all connections asap
+ DISALLOW_COPY_AND_ASSIGN(ServerConnectionManager);
+};
+
+// Fills a ClientToServerMessage with the appropriate share and birthday
+// settings.
+bool FillMessageWithShareDetails(sync_pb::ClientToServerMessage* csm,
+ syncable::DirectoryManager* manager,
+ const PathString &share);
+
+} // namespace browser_sync
+
+std::ostream& operator<<(std::ostream& s,
+ const struct browser_sync::HttpResponse& hr);
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_SERVER_CONNECTION_MANAGER_H_
diff --git a/chrome/browser/sync/engine/net/syncapi_server_connection_manager.cc b/chrome/browser/sync/engine/net/syncapi_server_connection_manager.cc
new file mode 100644
index 0000000..19981de
--- /dev/null
+++ b/chrome/browser/sync/engine/net/syncapi_server_connection_manager.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/net/syncapi_server_connection_manager.h"
+
+#include "chrome/browser/sync/engine/net/http_return.h"
+#include "chrome/browser/sync/engine/syncapi.h"
+
+using browser_sync::HttpResponse;
+using std::string;
+
+namespace sync_api {
+
+bool SyncAPIBridgedPost::Init(const char* path, const string& auth_token,
+ const string& payload, HttpResponse* response) {
+ string sync_server;
+ int sync_server_port = 0;
+ bool use_ssl = false;
+ GetServerParams(&sync_server, &sync_server_port, &use_ssl);
+ std::string connection_url = MakeConnectionURL(sync_server, path, use_ssl);
+
+ HttpPostProviderInterface* http = factory_->Create();
+ http->SetUserAgent(scm_->user_agent().c_str());
+ http->SetURL(connection_url.c_str(), sync_server_port);
+
+ if (!auth_token.empty()) {
+ string headers = "Authorization: GoogleLogin auth=" + auth_token;
+ http->SetExtraRequestHeaders(headers.c_str());
+ }
+
+ // Must be octet-stream, or the payload may be parsed for a cookie.
+ http->SetPostPayload("application/octet-stream", payload.length(),
+ payload.data());
+
+ // Issue the POST, blocking until it finishes.
+ int os_error_code = 0;
+ int response_code = 0;
+ if (!http->MakeSynchronousPost(&os_error_code, &response_code)) {
+ LOG(INFO) << "Http POST failed, error returns: " << os_error_code;
+ response->server_status = HttpResponse::IO_ERROR;
+ return false;
+ }
+
+ // We got a server response, copy over response codes and content.
+ response->response_code = response_code;
+ response->content_length =
+ static_cast<int64>(http->GetResponseContentLength());
+ response->payload_length =
+ static_cast<int64>(http->GetResponseContentLength());
+ if (response->response_code < 400)
+ response->server_status = HttpResponse::SERVER_CONNECTION_OK;
+ else if (response->response_code == browser_sync::RC_UNAUTHORIZED)
+ response->server_status = HttpResponse::SYNC_AUTH_ERROR;
+ else
+ response->server_status = HttpResponse::SYNC_SERVER_ERROR;
+
+ // Write the content into our buffer.
+ buffer_.assign(http->GetResponseContent(), http->GetResponseContentLength());
+
+ // We're done with the HttpPostProvider.
+ factory_->Destroy(http);
+ return true;
+}
+
+SyncAPIServerConnectionManager::~SyncAPIServerConnectionManager() {
+ delete post_provider_factory_;
+}
+
+void SyncAPIServerConnectionManager::SetHttpPostProviderFactory(
+ HttpPostProviderFactory* factory) {
+ if (post_provider_factory_)
+ delete post_provider_factory_;
+ post_provider_factory_ = factory;
+}
+
+} // namespace sync_api
diff --git a/chrome/browser/sync/engine/net/syncapi_server_connection_manager.h b/chrome/browser/sync/engine/net/syncapi_server_connection_manager.h
new file mode 100644
index 0000000..84a355e
--- /dev/null
+++ b/chrome/browser/sync/engine/net/syncapi_server_connection_manager.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
+
+#include <string>
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+
+namespace sync_api {
+
+class HttpPostProviderFactory;
+
+// This provides HTTP Post functionality through the interface provided
+// to the sync API by the application hosting the syncer backend.
+class SyncAPIBridgedPost :
+ public browser_sync::ServerConnectionManager::Post {
+ public:
+ SyncAPIBridgedPost(browser_sync::ServerConnectionManager* scm,
+ HttpPostProviderFactory* factory)
+ : Post(scm), factory_(factory) {
+ }
+
+ virtual ~SyncAPIBridgedPost() { }
+
+ virtual bool Init(const char* path,
+ const std::string& auth_token,
+ const std::string& payload,
+ browser_sync::HttpResponse* response);
+
+ private:
+ // Pointer to the factory we use for creating HttpPostProviders. We do not
+ // own |factory_|.
+ HttpPostProviderFactory* factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncAPIBridgedPost);
+};
+
+// A ServerConnectionManager subclass used by the syncapi layer. We use a
+// subclass so that we can override MakePost() to generate a POST object using
+// an instance of the HttpPostProviderFactory class.
+class SyncAPIServerConnectionManager :
+ public browser_sync::ServerConnectionManager {
+ public:
+ SyncAPIServerConnectionManager(const std::string& server,
+ int port,
+ bool use_ssl,
+ const std::string& client_version,
+ const std::string& client_id)
+ : ServerConnectionManager(server, port, use_ssl, client_version,
+ client_id),
+ post_provider_factory_(NULL) {
+ }
+
+ virtual ~SyncAPIServerConnectionManager();
+
+ // This method gives ownership of |factory| to |this|.
+ void SetHttpPostProviderFactory(HttpPostProviderFactory* factory);
+ protected:
+ virtual Post* MakePost() {
+ return new SyncAPIBridgedPost(this, post_provider_factory_);
+ }
+ private:
+ // A factory creating concrete HttpPostProviders for use whenever we need to
+ // issue a POST to sync servers.
+ HttpPostProviderFactory* post_provider_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncAPIServerConnectionManager);
+};
+
+} // namespace sync_api
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_SYNCAPI_SERVER_CONNECTION_MANAGER_H_
diff --git a/chrome/browser/sync/engine/net/url_translator.cc b/chrome/browser/sync/engine/net/url_translator.cc
new file mode 100644
index 0000000..0931c36
--- /dev/null
+++ b/chrome/browser/sync/engine/net/url_translator.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Contains the definition of a few helper functions used for generating sync
+// URLs.
+
+#include "chrome/browser/sync/engine/net/url_translator.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "net/base/escape.h"
+
+using std::string;
+
+namespace browser_sync {
+
+namespace {
+// Parameters that the server understands. (here, a-Z)
+const char kParameterAuthToken[] = "auth";
+const char kParameterClientID[] = "client_id";
+}
+
+// Convenience wrappers around CgiEscapePath().
+string CgiEscapeString(const char* src) {
+ return CgiEscapeString(string(src));
+}
+
+string CgiEscapeString(const string& src) {
+ return EscapePath(src);
+}
+
+// This method appends the query string to the sync server path.
+string MakeSyncServerPath(const string& path, const string& query_string) {
+ string result = path;
+ result.append("?");
+ result.append(query_string);
+ return result;
+}
+
+string MakeSyncQueryString(const string& client_id) {
+ string query;
+ query += kParameterClientID;
+ query += "=" + CgiEscapeString(client_id);
+ return query;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/net/url_translator.h b/chrome/browser/sync/engine/net/url_translator.h
new file mode 100644
index 0000000..717e15b
--- /dev/null
+++ b/chrome/browser/sync/engine/net/url_translator.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Contains the declaration of a few helper functions used for generating sync
+// URLs.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_NET_URL_TRANSLATOR_H_
+#define CHROME_BROWSER_SYNC_ENGINE_NET_URL_TRANSLATOR_H_
+
+#include <string>
+
+namespace browser_sync {
+
+// Convenience wrappers around CgiEscapePath(), used by gaia_auth.
+std::string CgiEscapeString(const char* src);
+std::string CgiEscapeString(const std::string& src);
+
+// This method appends the query string to the sync server path.
+std::string MakeSyncServerPath(const std::string& path,
+ const std::string& query_string);
+
+std::string MakeSyncQueryString(const std::string& client_id);
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_NET_URL_TRANSLATOR_H_
diff --git a/chrome/browser/sync/engine/post_commit_message_command.cc b/chrome/browser/sync/engine/post_commit_message_command.cc
new file mode 100644
index 0000000..3807607
--- /dev/null
+++ b/chrome/browser/sync/engine/post_commit_message_command.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/post_commit_message_command.h"
+
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+PostCommitMessageCommand::PostCommitMessageCommand() {}
+PostCommitMessageCommand::~PostCommitMessageCommand() {}
+
+void PostCommitMessageCommand::ExecuteImpl(SyncerSession *session) {
+ if (session->commit_ids_empty())
+ return; // nothing to commit
+ ClientToServerResponse response;
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good())
+ return;
+ if (!SyncerProtoUtil::PostClientToServerMessage(session->commit_message(),
+ &response, session)) {
+ // None of our changes got through, let's clear sync flags and wait for
+ // another list update.
+ SyncerStatus status(session);
+ status.increment_consecutive_problem_commits();
+ status.increment_consecutive_errors();
+ syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
+ // TODO(sync): why set this flag, it seems like a bad side-effect?
+ const vector<syncable::Id>& commit_ids = session->commit_ids();
+ for (size_t i = 0; i < commit_ids.size(); i++) {
+ syncable::MutableEntry entry(&trans, syncable::GET_BY_ID, commit_ids[i]);
+ entry.Put(syncable::SYNCING, false);
+ }
+ return;
+ } else {
+ session->set_item_committed();
+ }
+ session->set_commit_response(response);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/post_commit_message_command.h b/chrome/browser/sync/engine/post_commit_message_command.h
new file mode 100644
index 0000000..87aa4d7
--- /dev/null
+++ b/chrome/browser/sync/engine/post_commit_message_command.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+class PostCommitMessageCommand : public SyncerCommand {
+ public:
+ PostCommitMessageCommand();
+ virtual ~PostCommitMessageCommand();
+
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PostCommitMessageCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
diff --git a/chrome/browser/sync/engine/process_commit_response_command.cc b/chrome/browser/sync/engine/process_commit_response_command.cc
new file mode 100644
index 0000000..6a2a177
--- /dev/null
+++ b/chrome/browser/sync/engine/process_commit_response_command.cc
@@ -0,0 +1,374 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/process_commit_response_command.h"
+
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+using syncable::ScopedDirLookup;
+using syncable::WriteTransaction;
+using syncable::MutableEntry;
+using syncable::Entry;
+using syncable::Name;
+using syncable::SyncName;
+using syncable::DBName;
+
+using std::set;
+using std::vector;
+
+using syncable::BASE_VERSION;
+using syncable::GET_BY_ID;
+using syncable::ID;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::PARENT_ID;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SYNCER;
+using syncable::SYNCING;
+
+namespace browser_sync {
+
+void IncrementErrorCounters(SyncerStatus status) {
+ status.increment_consecutive_problem_commits();
+ status.increment_consecutive_errors();
+}
+void ResetErrorCounters(SyncerStatus status) {
+ status.zero_consecutive_problem_commits();
+ status.zero_consecutive_errors();
+}
+
+ProcessCommitResponseCommand::ProcessCommitResponseCommand() {}
+ProcessCommitResponseCommand::~ProcessCommitResponseCommand() {}
+
+void ProcessCommitResponseCommand::ModelChangingExecuteImpl(
+ SyncerSession *session) {
+ // TODO(sync): This function returns if it sees problems. We probably want
+ // to flag the need for an update or similar.
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ const ClientToServerResponse& response = session->commit_response();
+ const vector<syncable::Id>& commit_ids = session->commit_ids();
+
+ // TODO(sync): move counters out of here.
+ SyncerStatus status(session);
+
+ if (!response.has_commit()) {
+ // TODO(sync): What if we didn't try to commit anything?
+ LOG(WARNING) << "Commit response has no commit body!";
+ IncrementErrorCounters(status);
+ return;
+ }
+
+ const CommitResponse& cr = response.commit();
+ int commit_count = commit_ids.size();
+ if (cr.entryresponse_size() != commit_count) {
+ LOG(ERROR) << "Commit response has wrong number of entries! Expected:" <<
+ commit_count << " Got:" << cr.entryresponse_size();
+ for (int i = 0 ; i < cr.entryresponse_size() ; i++) {
+ LOG(ERROR) << "Response #" << i << " Value: " <<
+ cr.entryresponse(i).response_type();
+ if (cr.entryresponse(i).has_error_message())
+ LOG(ERROR) << " " << cr.entryresponse(i).error_message();
+ }
+ IncrementErrorCounters(status);
+ return;
+ }
+
+ // If we try to commit a parent and child together and the parent conflicts
+ // the child will have a bad parent causing an error. As this is not a
+ // critical error, we trap it and don't LOG(ERROR). To enable this we keep
+ // a map of conflicting new folders.
+ int transient_error_commits = 0;
+ int conflicting_commits = 0;
+ int error_commits = 0;
+ int successes = 0;
+ bool over_quota = false;
+ set<syncable::Id> conflicting_new_folder_ids;
+ set<syncable::Id> deleted_folders;
+ bool truncated_commit_logged = false;
+ { // Scope for WriteTransaction
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+ for (int i = 0; i < cr.entryresponse_size(); i++) {
+ CommitResponse::RESPONSE_TYPE response_type =
+ ProcessSingleCommitResponse(&trans, cr.entryresponse(i),
+ commit_ids[i],
+ &conflicting_new_folder_ids,
+ &deleted_folders, session);
+ switch (response_type) {
+ case CommitResponse::INVALID_MESSAGE:
+ ++error_commits;
+ break;
+ case CommitResponse::CONFLICT:
+ ++conflicting_commits;
+ session->AddCommitConflict(commit_ids[i]);
+ break;
+ case CommitResponse::SUCCESS:
+ // TODO(sync): worry about sync_rate_ rate calc?
+ ++successes;
+ status.increment_successful_commits();
+ break;
+ case CommitResponse::OVER_QUOTA:
+ over_quota = true;
+ // We handle over quota like a retry.
+ case CommitResponse::RETRY:
+ session->AddBlockedItem(commit_ids[i]);
+ break;
+ case CommitResponse::TRANSIENT_ERROR:
+ ++transient_error_commits;
+ break;
+ default:
+ LOG(FATAL) << "Bad return from ProcessSingleCommitResponse";
+ }
+ }
+ }
+
+ // TODO(sync): move status reporting elsewhere.
+ status.set_conflicting_commits(conflicting_commits);
+ status.set_error_commits(error_commits);
+ if (0 == successes) {
+ status.increment_consecutive_transient_error_commits_by(
+ transient_error_commits);
+ status.increment_consecutive_errors_by(transient_error_commits);
+ } else {
+ status.zero_consecutive_transient_error_commits();
+ status.zero_consecutive_errors();
+ }
+ // If all commits are errors count it as an error.
+ if (commit_count == error_commits) {
+ // A larger error step than normal because a POST just succeeded.
+ status.TallyBigNewError();
+ }
+ if (commit_count != (conflicting_commits + error_commits +
+ transient_error_commits)) {
+ ResetErrorCounters(status);
+ }
+ SyncerUtil::MarkDeletedChildrenSynced(dir, &deleted_folders);
+ session->set_over_quota(over_quota);
+
+ return;
+}
+
+void LogServerError(const CommitResponse_EntryResponse& res) {
+ if (res.has_error_message())
+ LOG(ERROR) << " " << res.error_message();
+ else
+ LOG(ERROR) << " No detailed error message returned from server";
+}
+
+CommitResponse::RESPONSE_TYPE
+ProcessCommitResponseCommand::ProcessSingleCommitResponse(
+ syncable::WriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
+ const syncable::Id& pre_commit_id,
+ std::set<syncable::Id>* conflicting_new_folder_ids,
+ set<syncable::Id>* deleted_folders,
+ SyncerSession* const session) {
+
+ const CommitResponse_EntryResponse& server_entry =
+ *static_cast<const CommitResponse_EntryResponse*>(&pb_server_entry);
+ MutableEntry local_entry(trans, GET_BY_ID, pre_commit_id);
+ CHECK(local_entry.good());
+ bool syncing_was_set = local_entry.Get(SYNCING);
+ local_entry.Put(SYNCING, false);
+
+ CommitResponse::RESPONSE_TYPE response = (CommitResponse::RESPONSE_TYPE)
+ server_entry.response_type();
+ if (!CommitResponse::RESPONSE_TYPE_IsValid(response)) {
+ LOG(ERROR) << "Commit response has unknown response type! Possibly out "
+ "of date client?";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+ if (CommitResponse::TRANSIENT_ERROR == response) {
+ LOG(INFO) << "Transient Error Committing: " << local_entry;
+ LogServerError(server_entry);
+ return CommitResponse::TRANSIENT_ERROR;
+ }
+ if (CommitResponse::INVALID_MESSAGE == response) {
+ LOG(ERROR) << "Error Commiting: " << local_entry;
+ LogServerError(server_entry);
+ return response;
+ }
+ if (CommitResponse::CONFLICT == response) {
+ LOG(INFO) << "Conflict Committing: " << local_entry;
+ if (!pre_commit_id.ServerKnows() && local_entry.Get(IS_DIR)) {
+ conflicting_new_folder_ids->insert(pre_commit_id);
+ }
+ return response;
+ }
+ if (CommitResponse::RETRY == response) {
+ LOG(INFO) << "Retry Committing: " << local_entry;
+ return response;
+ }
+ if (CommitResponse::OVER_QUOTA == response) {
+ LOG(INFO) << "Hit Quota Committing: " << local_entry;
+ return response;
+ }
+ if (!server_entry.has_id_string()) {
+ LOG(ERROR) << "Commit response has no id";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+
+ // implied by the IsValid call above, but here for clarity.
+ DCHECK_EQ(CommitResponse::SUCCESS, response) << response;
+ // Check to see if we've been given the ID of an existing entry. If so treat
+ // it as an error response and retry later.
+ if (pre_commit_id != server_entry.id()) {
+ Entry e(trans, GET_BY_ID, server_entry.id());
+ if (e.good()) {
+ LOG(ERROR) << "Got duplicate id when commiting id: " << pre_commit_id <<
+ ". Treating as an error return";
+ return CommitResponse::INVALID_MESSAGE;
+ }
+ }
+
+ if (server_entry.version() == 0) {
+ LOG(WARNING) << "Server returned a zero version on a commit response.";
+ }
+
+ ProcessSuccessfulCommitResponse(trans, server_entry, pre_commit_id,
+ &local_entry, syncing_was_set,
+ deleted_folders, session);
+ return response;
+}
+
+void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
+ syncable::WriteTransaction* trans,
+ const CommitResponse_EntryResponse& server_entry,
+ const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ bool syncing_was_set, set<syncable::Id>* deleted_folders,
+ SyncerSession* const session) {
+ int64 old_version = local_entry->Get(BASE_VERSION);
+ int64 new_version = server_entry.version();
+ bool bad_commit_version = false;
+ // TODO(sync): The !server_entry.has_id_string() clauses below were
+ // introduced when working with the new protocol.
+ if (!pre_commit_id.ServerKnows())
+ bad_commit_version = 0 == new_version;
+ else
+ bad_commit_version = old_version > new_version;
+ if (bad_commit_version) {
+ LOG(ERROR) << "Bad version in commit return for " << *local_entry <<
+ " new_id:" << server_entry.id() << " new_version:" <<
+ server_entry.version();
+ return;
+ }
+ if (server_entry.id() != pre_commit_id) {
+ if (pre_commit_id.ServerKnows()) {
+ // TODO(sync): In future it's possible that we'll want the opportunity
+ // to do a server triggered move aside here.
+ LOG(ERROR) << " ID change but not committing a new entry. " <<
+ pre_commit_id << " became " << server_entry.id() << ".";
+ return;
+ }
+ if (!server_entry.id().ServerKnows()) {
+ LOG(ERROR) << " New entries id < 0." << pre_commit_id << " became " <<
+ server_entry.id() << ".";
+ return;
+ }
+ MutableEntry same_id(trans, GET_BY_ID, server_entry.id());
+ // We should trap this before this function.
+ CHECK(!same_id.good()) << "ID clash with id " << server_entry.id() <<
+ " during commit " << same_id;
+ SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ trans, local_entry, server_entry.id());
+ LOG(INFO) << "Changing ID to " << server_entry.id();
+ }
+
+ local_entry->Put(BASE_VERSION, new_version);
+ LOG(INFO) << "Commit is changing base version of " <<
+ local_entry->Get(ID) << " to: " << new_version;
+
+ if (local_entry->Get(IS_UNAPPLIED_UPDATE)) {
+ // This is possible, but very unlikely.
+ local_entry->Put(IS_UNAPPLIED_UPDATE, false);
+ }
+
+ if (server_entry.has_name()) {
+ if (syncing_was_set) {
+ PerformCommitTimeNameAside(trans, server_entry, local_entry);
+ } else {
+ // IS_UNSYNCED will ensure that this entry gets committed again,
+ // even if we skip this name aside. IS_UNSYNCED was probably previously
+ // set, but let's just set it anyway.
+ local_entry->Put(IS_UNSYNCED, true);
+ LOG(INFO) << "Skipping commit time name aside because" <<
+ " entry was changed during commit.";
+ }
+ }
+
+ if (syncing_was_set && server_entry.has_position_in_parent()) {
+ // The server has the final say on positioning, so apply the absolute
+ // position that it returns.
+ local_entry->Put(SERVER_POSITION_IN_PARENT,
+ server_entry.position_in_parent());
+
+ // We just committed successfully, so we assume that the position
+ // value we got applies to the PARENT_ID we submitted.
+ syncable::Id new_prev = SyncerUtil::ComputePrevIdFromServerPosition(
+ trans, local_entry, local_entry->Get(PARENT_ID));
+ if (!local_entry->PutPredecessor(new_prev)) {
+ LOG(WARNING) << "PutPredecessor failed after successful commit";
+ }
+ }
+
+ if (syncing_was_set) {
+ local_entry->Put(IS_UNSYNCED, false);
+ }
+ if (local_entry->Get(IS_DIR) && local_entry->Get(IS_DEL)) {
+ deleted_folders->insert(local_entry->Get(ID));
+ }
+}
+
+void ProcessCommitResponseCommand::PerformCommitTimeNameAside(
+ syncable::WriteTransaction* trans,
+ const CommitResponse_EntryResponse& server_entry,
+ syncable::MutableEntry* local_entry) {
+ Name old_name(local_entry->GetName());
+
+ // Ensure that we don't collide with an existing entry.
+ SyncName server_name =
+ SyncerProtoUtil::NameFromCommitEntryResponse(server_entry);
+
+ LOG(INFO) << "Server provided committed name:" << server_name.value();
+ if (!server_name.value().empty() &&
+ static_cast<SyncName&>(old_name) != server_name) {
+ LOG(INFO) << "Server name differs from local name, attempting"
+ << " commit time name aside.";
+
+ DBName db_name(server_name.value());
+ db_name.MakeOSLegal();
+
+ // This is going to produce ~1 names instead of (Edited) names.
+ // Since this should be EXTREMELY rare, we do this for now.
+ db_name.MakeNoncollidingForEntry(trans, local_entry->Get(SERVER_PARENT_ID),
+ local_entry);
+
+ CHECK(!db_name.empty());
+
+ LOG(INFO) << "Server commit moved aside entry: " << old_name.db_value()
+ << " to new name " << db_name;
+
+ // Should be safe since we're in a "commit lock."
+ local_entry->PutName(Name::FromDBNameAndSyncName(db_name, server_name));
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/process_commit_response_command.h b/chrome/browser/sync/engine/process_commit_response_command.h
new file mode 100644
index 0000000..a025428
--- /dev/null
+++ b/chrome/browser/sync/engine/process_commit_response_command.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
+
+#include <set>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+
+namespace syncable {
+class Id;
+class WriteTransaction;
+class MutableEntry;
+}
+
+namespace browser_sync {
+
+class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
+ public:
+
+ ProcessCommitResponseCommand();
+ virtual ~ProcessCommitResponseCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession* session);
+ private:
+ CommitResponse::RESPONSE_TYPE ProcessSingleCommitResponse(
+ syncable::WriteTransaction* trans,
+ const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
+ const syncable::Id& pre_commit_id, std::set<syncable::Id>*
+ conflicting_new_directory_ids,
+ std::set<syncable::Id>* deleted_folders,
+ SyncerSession* const session);
+
+ void ProcessSuccessfulCommitResponse(syncable::WriteTransaction* trans,
+ const CommitResponse_EntryResponse& server_entry,
+ const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ bool syncing_was_set, std::set<syncable::Id>* deleted_folders,
+ SyncerSession* const session);
+
+ void PerformCommitTimeNameAside(
+ syncable::WriteTransaction* trans,
+ const CommitResponse_EntryResponse& server_entry,
+ syncable::MutableEntry* local_entry);
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
diff --git a/chrome/browser/sync/engine/process_updates_command.cc b/chrome/browser/sync/engine/process_updates_command.cc
new file mode 100644
index 0000000..6d5973c
--- /dev/null
+++ b/chrome/browser/sync/engine/process_updates_command.cc
@@ -0,0 +1,167 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/process_updates_command.h"
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+ProcessUpdatesCommand::ProcessUpdatesCommand() {}
+ProcessUpdatesCommand::~ProcessUpdatesCommand() {}
+
+void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncerSession* session) {
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ SyncerStatus status(session);
+
+ const GetUpdatesResponse updates = session->update_response().get_updates();
+ const int update_count = updates.entries_size();
+
+ LOG(INFO) << "Get updates from ts " << dir->last_sync_timestamp() <<
+ " returned " << update_count << " updates.";
+
+ if (updates.has_newest_timestamp()) {
+ int64 newest_timestamp = updates.newest_timestamp();
+ LOG(INFO) << "Newest Timestamp:" << newest_timestamp;
+ status.set_servers_latest_timestamp(newest_timestamp);
+ }
+
+ int64 new_timestamp = 0;
+ if (updates.has_new_timestamp()) {
+ new_timestamp = updates.new_timestamp();
+ LOG(INFO) << "Get Updates got new timestamp: " << new_timestamp;
+ if (0 == update_count) {
+ if (new_timestamp > dir->last_sync_timestamp()) {
+ dir->set_last_sync_timestamp(new_timestamp);
+ session->set_timestamp_dirty();
+ }
+ return;
+ }
+ }
+
+ if (0 == status.servers_latest_timestamp()) {
+ // Hack since new server never gives us the server's latest
+ // timestamp. But if a getupdates returns zero, then we know we
+ // are up to date.
+ status.set_servers_latest_timestamp(status.current_sync_timestamp());
+ }
+ // If we have updates that are ALL supposed to be skipped, we don't want
+ // to get them again. In fact, the account's final updates are all
+ // supposed to be skipped and we DON'T step past them, we will sync forever
+ int64 latest_skip_timestamp = 0;
+ bool any_non_skip_results = false;
+ vector<VerifiedUpdate>::iterator it;
+ for (it = session->VerifiedUpdatesBegin();
+ it < session->VerifiedUpdatesEnd();
+ ++it) {
+ const sync_pb::SyncEntity update = it->second;
+
+ any_non_skip_results = (it->first != VERIFY_SKIP);
+ if (!any_non_skip_results) {
+ // ALL updates were to be skipped, including this one
+ if (update.sync_timestamp() > latest_skip_timestamp) {
+ latest_skip_timestamp = update.sync_timestamp();
+ }
+ } else {
+ latest_skip_timestamp = 0;
+ }
+
+ if (it->first != VERIFY_SUCCESS && it->first != VERIFY_UNDELETE)
+ continue;
+ switch (ProcessUpdate(dir, update)) {
+ case SUCCESS_PROCESSED:
+ case SUCCESS_STORED:
+ // We can update the timestamp because we store the update
+ // even if we can't apply it now.
+ if (update.sync_timestamp() > new_timestamp)
+ new_timestamp = update.sync_timestamp();
+ break;
+ }
+
+ }
+
+ if (latest_skip_timestamp > new_timestamp)
+ new_timestamp = latest_skip_timestamp;
+
+ if (new_timestamp > dir->last_sync_timestamp()) {
+ dir->set_last_sync_timestamp(new_timestamp);
+ session->set_timestamp_dirty();
+ }
+
+ status.zero_consecutive_problem_get_updates();
+ status.zero_consecutive_errors();
+ status.set_current_sync_timestamp(dir->last_sync_timestamp());
+ status.set_syncing(true);
+ return;
+}
+
+namespace {
+// returns true if the entry is still ok to process
+bool ReverifyEntry(syncable::WriteTransaction* trans, const SyncEntity& entry,
+ syncable::MutableEntry* same_id) {
+
+ const bool deleted = entry.has_deleted() && entry.deleted();
+ const bool is_directory = entry.IsFolder();
+ const bool is_bookmark = entry.has_bookmarkdata();
+
+ return VERIFY_SUCCESS ==
+ SyncerUtil::VerifyUpdateConsistency(trans,
+ entry,
+ same_id,
+ deleted,
+ is_directory,
+ is_bookmark);
+}
+} // anonymous namespace
+
+// TODO(sync): Refactor this code.
+// Process a single update. Will avoid touching global state.
+ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
+ const syncable::ScopedDirLookup& dir, const sync_pb::SyncEntity& pb_entry) {
+
+ const SyncEntity& entry = *static_cast<const SyncEntity*>(&pb_entry);
+ using namespace syncable;
+ syncable::Id id = entry.id();
+ SyncName name = SyncerProtoUtil::NameFromSyncEntity(entry);
+
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+
+ SyncerUtil::CreateNewEntry(&trans, id);
+
+ // We take a two step approach. First we store the entries data in the
+ // server fields of a local entry and then move the data to the local fields
+ MutableEntry update_entry(&trans, GET_BY_ID, id);
+ // TODO(sync): do we need to run ALL these checks, or is a mere version
+ // check good enough?
+ if (!ReverifyEntry(&trans, entry, &update_entry)) {
+ return SUCCESS_PROCESSED; // the entry has become irrelevant
+ }
+
+ SyncerUtil::UpdateServerFieldsFromUpdate(&update_entry, entry, name);
+
+ if (update_entry.Get(SERVER_VERSION) == update_entry.Get(BASE_VERSION) &&
+ !update_entry.Get(IS_UNSYNCED)) {
+ CHECK(SyncerUtil::ServerAndLocalEntriesMatch(
+ &update_entry)) << update_entry;
+ }
+ return SUCCESS_PROCESSED;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/process_updates_command.h b/chrome/browser/sync/engine/process_updates_command.h
new file mode 100644
index 0000000..a6cee34
--- /dev/null
+++ b/chrome/browser/sync/engine/process_updates_command.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
+
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+
+namespace syncable {
+class ScopedDirLookup;
+}
+
+namespace sync_pb {
+class SyncEntity;
+}
+
+namespace browser_sync {
+
+class SyncerSession;
+
+// A syncer command for processing updates.
+//
+// Preconditions - updates in the SyncerSesssion have been downloaded
+// and verified.
+//
+// Postconditions - All of the verified SyncEntity data will be copied to
+// the server fields of the corresponding syncable entries.
+class ProcessUpdatesCommand : public ModelChangingSyncerCommand {
+ public:
+ ProcessUpdatesCommand();
+ virtual ~ProcessUpdatesCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession* session);
+ ServerUpdateProcessingResult ProcessUpdate(
+ const syncable::ScopedDirLookup& dir,
+ const sync_pb::SyncEntity& pb_entry);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_PROCESS_UPDATES_COMMAND_H_
diff --git a/chrome/browser/sync/engine/resolve_conflicts_command.cc b/chrome/browser/sync/engine/resolve_conflicts_command.cc
new file mode 100644
index 0000000..6caf9b4
--- /dev/null
+++ b/chrome/browser/sync/engine/resolve_conflicts_command.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/resolve_conflicts_command.h"
+
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+
+namespace browser_sync {
+
+ResolveConflictsCommand::ResolveConflictsCommand() {}
+ResolveConflictsCommand::~ResolveConflictsCommand() {}
+
+void ResolveConflictsCommand::ModelChangingExecuteImpl(
+ SyncerSession* session) {
+ if (!session->resolver())
+ return;
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good())
+ return;
+ ConflictResolutionView conflict_view(session);
+ session->set_conflicts_resolved(
+ session->resolver()->ResolveConflicts(dir, &conflict_view, session));
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/resolve_conflicts_command.h b/chrome/browser/sync/engine/resolve_conflicts_command.h
new file mode 100644
index 0000000..a75c631
--- /dev/null
+++ b/chrome/browser/sync/engine/resolve_conflicts_command.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
+
+
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
+#include "base/basictypes.h"
+
+namespace syncable {
+class WriteTransaction;
+class MutableEntry;
+class Id;
+}
+namespace browser_sync {
+
+class SyncerSession;
+
+class ResolveConflictsCommand : public ModelChangingSyncerCommand {
+ public:
+ ResolveConflictsCommand();
+ virtual ~ResolveConflictsCommand();
+
+ virtual void ModelChangingExecuteImpl(SyncerSession* session);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ResolveConflictsCommand);
+};
+} // namespace browser_sync
+
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
diff --git a/chrome/browser/sync/engine/sync_cycle_state.h b/chrome/browser/sync/engine/sync_cycle_state.h
new file mode 100644
index 0000000..7d38670c
--- /dev/null
+++ b/chrome/browser/sync/engine/sync_cycle_state.h
@@ -0,0 +1,253 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// The sync process consists of a sequence of sync cycles, each of which
+// (hopefully) moves the client into closer synchronization with the server.
+// This class holds state that is pertinent to a single sync cycle.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNC_CYCLE_STATE_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNC_CYCLE_STATE_H_
+
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+
+namespace syncable {
+class WriteTransaction;
+class Id;
+} // namespace syncable
+
+namespace browser_sync {
+
+typedef std::pair<VerifyResult, sync_pb::SyncEntity> VerifiedUpdate;
+typedef std::pair<UpdateAttemptResponse, syncable::Id> AppliedUpdate;
+
+// This is the type declaration for the eventsys channel that the syncer
+// uses to send events to other system components.
+struct SyncerEvent;
+
+// SyncCycleState holds the entire state of a single sync cycle;
+// GetUpdates, Commit, and Conflict Resolution. After said cycle, the
+// State may contain items that were unable to be processed because of
+// errors.
+class SyncCycleState {
+ public:
+ SyncCycleState()
+ : write_transaction_(NULL),
+ conflict_sets_built_(false),
+ conflicts_resolved_(false),
+ items_committed_(false),
+ over_quota_(false),
+ dirty_(true),
+ timestamp_dirty_(false) {}
+
+ void set_update_response(const ClientToServerResponse& update_response) {
+ update_response_.CopyFrom(update_response);
+ }
+
+ const ClientToServerResponse& update_response() const {
+ return update_response_;
+ }
+
+ void set_commit_response(const ClientToServerResponse& commit_response) {
+ commit_response_.CopyFrom(commit_response);
+ }
+
+ const ClientToServerResponse& commit_response() const {
+ return commit_response_;
+ }
+
+ void AddVerifyResult(const VerifyResult& verify_result,
+ const sync_pb::SyncEntity& entity) {
+ verified_updates_.push_back(std::make_pair(verify_result, entity));
+ }
+
+ bool HasVerifiedUpdates() const {
+ return !verified_updates_.empty();
+ }
+
+ // Log a successful or failing update attempt.
+ void AddAppliedUpdate(const UpdateAttemptResponse& response,
+ const syncable::Id& id) {
+ applied_updates_.push_back(std::make_pair(response, id));
+ }
+
+ bool HasAppliedUpdates() const {
+ return !applied_updates_.empty();
+ }
+
+ std::vector<AppliedUpdate>::iterator AppliedUpdatesBegin() {
+ return applied_updates_.begin();
+ }
+
+ std::vector<VerifiedUpdate>::iterator VerifiedUpdatesBegin() {
+ return verified_updates_.begin();
+ }
+
+ std::vector<AppliedUpdate>::iterator AppliedUpdatesEnd() {
+ return applied_updates_.end();
+ }
+
+ std::vector<VerifiedUpdate>::iterator VerifiedUpdatesEnd() {
+ return verified_updates_.end();
+ }
+
+ // Returns the number of update application attempts. This includes
+ // both failures and successes.
+ int AppliedUpdatesSize() const {
+ return applied_updates_.size();
+ }
+
+ // Count the number of successful update applications that have happend
+ // this cycle. Note that if an item is successfully applied twice,
+ // it will be double counted here.
+ int SuccessfullyAppliedUpdateCount() const {
+ int count = 0;
+ for (std::vector<AppliedUpdate>::const_iterator it =
+ applied_updates_.begin();
+ it != applied_updates_.end();
+ ++it) {
+ if (it->first == SUCCESS)
+ count++;
+ }
+ return count;
+ }
+
+ int VerifiedUpdatesSize() const {
+ return verified_updates_.size();
+ }
+
+ const std::vector<int64>& unsynced_handles() const {
+ return unsynced_handles_;
+ }
+
+ void set_unsynced_handles(const std::vector<int64>& unsynced_handles) {
+ UpdateDirty(unsynced_handles != unsynced_handles_);
+ unsynced_handles_ = unsynced_handles;
+ }
+
+ int64 unsynced_count() const { return unsynced_handles_.size(); }
+
+ const std::vector<syncable::Id>& commit_ids() const { return commit_ids_; }
+
+ void set_commit_ids(const std::vector<syncable::Id>& commit_ids) {
+ commit_ids_ = commit_ids;
+ }
+
+ bool commit_ids_empty() const { return commit_ids_.empty(); }
+
+ // The write transaction must be deleted by the caller of this function.
+ void set_write_transaction(syncable::WriteTransaction* write_transaction) {
+ DCHECK(!write_transaction_) << "Forgot to clear the write transaction.";
+ write_transaction_ = write_transaction;
+ }
+
+ syncable::WriteTransaction* write_transaction() const {
+ return write_transaction_;
+ }
+
+ bool has_open_write_transaction() { return write_transaction_ != NULL; }
+
+ // sets the write transaction to null, but doesn't free the memory.
+ void ClearWriteTransaction() { write_transaction_ = NULL; }
+
+ ClientToServerMessage* commit_message() { return &commit_message_; }
+
+ void set_commit_message(ClientToServerMessage message) {
+ commit_message_ = message;
+ }
+
+ void set_conflict_sets_built(bool b) {
+ conflict_sets_built_ = b;
+ }
+
+ bool conflict_sets_built() const {
+ return conflict_sets_built_;
+ }
+
+ void set_conflicts_resolved(bool b) {
+ conflicts_resolved_ = b;
+ }
+
+ bool conflicts_resolved() const {
+ return conflicts_resolved_;
+ }
+
+ void set_over_quota(bool b) {
+ UpdateDirty(b != over_quota_);
+ over_quota_ = b;
+ }
+
+ bool over_quota() const {
+ return over_quota_;
+ }
+
+ void set_items_committed(bool b) { items_committed_ = b; }
+
+ void set_item_committed() { items_committed_ |= true; }
+
+ bool items_committed() const { return items_committed_; }
+
+
+ // Returns true if this object has been modified since last SetClean() call
+ bool IsDirty() const { return dirty_; }
+
+ // Call to tell this status object that its new state has been seen
+ void SetClean() { dirty_ = false; }
+
+ // Indicate that we've made a change to directory timestamp.
+ void set_timestamp_dirty() {
+ timestamp_dirty_ = true;
+ }
+
+ bool is_timestamp_dirty() const {
+ return timestamp_dirty_;
+ }
+
+
+ private:
+ void UpdateDirty(bool new_info) { dirty_ |= new_info; }
+
+ // download updates supplies:
+ ClientToServerResponse update_response_;
+ ClientToServerResponse commit_response_;
+ ClientToServerMessage commit_message_;
+
+ syncable::WriteTransaction* write_transaction_;
+ std::vector<int64> unsynced_handles_;
+ std::vector<syncable::Id> commit_ids_;
+
+ // At a certain point during the sync process we'll want to build the
+ // conflict sets. This variable tracks whether or not that has happened.
+ bool conflict_sets_built_;
+ bool conflicts_resolved_;
+ bool items_committed_;
+ bool over_quota_;
+
+ // If we've set the timestamp to a new value during this cycle.
+ bool timestamp_dirty_;
+
+ bool dirty_;
+
+ // some container for updates that failed verification
+ std::vector<VerifiedUpdate> verified_updates_;
+
+ // Stores the result of the various ApplyUpdate attempts we've made.
+ // May contain duplicate entries.
+ std::vector<AppliedUpdate> applied_updates_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncCycleState);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNC_CYCLE_STATE_H_
diff --git a/chrome/browser/sync/engine/sync_process_state.cc b/chrome/browser/sync/engine/sync_process_state.cc
new file mode 100644
index 0000000..6f76eee
--- /dev/null
+++ b/chrome/browser/sync/engine/sync_process_state.cc
@@ -0,0 +1,325 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#include "chrome/browser/sync/engine/sync_process_state.h"
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+
+using std::map;
+using std::set;
+using std::vector;
+
+namespace browser_sync {
+
+SyncProcessState::SyncProcessState(const SyncProcessState& counts)
+ : account_name_(counts.account_name_),
+ dirman_(counts.dirman_),
+ syncer_event_channel_(counts.syncer_event_channel_),
+ connection_manager_(counts.connection_manager_),
+ resolver_(counts.resolver_),
+ model_safe_worker_(counts.model_safe_worker_) {
+ *this = counts;
+}
+
+SyncProcessState::SyncProcessState(syncable::DirectoryManager* dirman,
+ PathString account_name,
+ ServerConnectionManager* connection_manager,
+ ConflictResolver* const resolver,
+ SyncerEventChannel* syncer_event_channel,
+ ModelSafeWorker* model_safe_worker)
+ : account_name_(account_name),
+ dirman_(dirman),
+ syncer_event_channel_(syncer_event_channel),
+ connection_manager_(connection_manager),
+ model_safe_worker_(model_safe_worker),
+ resolver_(resolver),
+ syncer_stuck_(false),
+ num_sync_cycles_(0),
+ silenced_until_(0),
+ error_rate_(0),
+ current_sync_timestamp_(0),
+ servers_latest_timestamp_(0),
+ error_commits_(0),
+ stalled_commits_(0),
+ conflicting_commits_(0),
+ consecutive_problem_get_updates_(0),
+ consecutive_problem_commits_(0),
+ consecutive_transient_error_commits_(0),
+ consecutive_errors_(0),
+ successful_commits_(0),
+ dirty_(false),
+ auth_dirty_(false),
+ auth_failed_(false),
+ invalid_store_(false) {
+ syncable::ScopedDirLookup dir(dirman_, account_name_);
+
+ // The directory must be good here.
+ LOG_IF(ERROR, !dir.good());
+ syncing_ = !dir->initial_sync_ended();
+
+ // If we have never synced then we are invalid until made otherwise.
+ set_invalid_store((dir->last_sync_timestamp() <= 0));
+}
+
+SyncProcessState& SyncProcessState::operator=(const SyncProcessState& counts) {
+ if (this == &counts) {
+ return *this;
+ }
+ CleanupSets();
+ num_sync_cycles_ = counts.num_sync_cycles_;
+ silenced_until_ = counts.silenced_until_;
+ error_rate_ = counts.error_rate_;
+ current_sync_timestamp_ = counts.current_sync_timestamp_;
+ servers_latest_timestamp_ = counts.servers_latest_timestamp_;
+ error_commits_ = counts.error_commits_;
+ stalled_commits_ = counts.stalled_commits_;
+ conflicting_commits_ = counts.conflicting_commits_;
+ consecutive_problem_get_updates_ =
+ counts.consecutive_problem_get_updates_;
+ consecutive_problem_commits_ =
+ counts.consecutive_problem_commits_;
+ consecutive_transient_error_commits_ =
+ counts.consecutive_transient_error_commits_;
+ consecutive_errors_ = counts.consecutive_errors_;
+ conflicting_item_ids_ = counts.conflicting_item_ids_;
+ blocked_item_ids_ = counts.blocked_item_ids_;
+ successful_commits_ = counts.successful_commits_;
+ syncer_stuck_ = counts.syncer_stuck_;
+
+ // TODO(chron): Is it safe to set these?
+ //
+ // Pointers:
+ //
+ // connection_manager_
+ // account_name_
+ // dirman_
+ // model_safe_worker_
+ // syncer_event_channel_
+ //
+ // Status members:
+ // syncing_
+ // invalid_store_
+ // syncer_stuck_
+ // got_zero_updates_
+ // dirty_
+ // auth_dirty_
+ // auth_failed_
+
+ for (set<ConflictSet*>::const_iterator it =
+ counts.ConflictSetsBegin();
+ counts.ConflictSetsEnd() != it; ++it) {
+ const ConflictSet* old_set = *it;
+ ConflictSet* const new_set = new ConflictSet(*old_set);
+ conflict_sets_.insert(new_set);
+
+ for (ConflictSet::const_iterator setit = new_set->begin();
+ new_set->end() != setit; ++setit) {
+ id_to_conflict_set_[*setit] = new_set;
+ }
+ }
+ return *this;
+}
+
+// status maintenance functions
+void SyncProcessState::set_invalid_store(const bool val) {
+ UpdateDirty(val != invalid_store_);
+ invalid_store_ = val;
+}
+
+void SyncProcessState::set_syncer_stuck(const bool val) {
+ UpdateDirty(val != syncer_stuck_);
+ syncer_stuck_ = val;
+}
+
+void SyncProcessState::set_syncing(const bool val) {
+ UpdateDirty(val != syncing_);
+ syncing_ = val;
+}
+
+// Returns true if got zero updates has been set on the directory.
+bool SyncProcessState::IsShareUsable() const {
+ syncable::ScopedDirLookup dir(dirman(), account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return false;
+ }
+ return dir->initial_sync_ended();
+}
+
+void SyncProcessState::set_current_sync_timestamp(const int64 val) {
+ UpdateDirty(val != current_sync_timestamp_);
+ current_sync_timestamp_ = val;
+}
+
+void SyncProcessState::set_servers_latest_timestamp(const int64 val) {
+ UpdateDirty(val != servers_latest_timestamp_);
+ servers_latest_timestamp_ = val;
+}
+
+void SyncProcessState::set_error_commits(const int val) {
+ UpdateDirty(val != error_commits_);
+ error_commits_ = val;
+}
+
+void SyncProcessState::set_stalled_commits(const int val) {
+ UpdateDirty(val != conflicting_commits_);
+ conflicting_commits_ = val;
+}
+
+void SyncProcessState::set_conflicting_commits(const int val) {
+ UpdateDirty(val != stalled_commits_);
+ stalled_commits_ = val;
+}
+
+// WEIRD COUNTER functions
+void SyncProcessState::increment_consecutive_problem_get_updates() {
+ UpdateDirty(true);
+ ++consecutive_problem_get_updates_;
+}
+
+void SyncProcessState::zero_consecutive_problem_get_updates() {
+ UpdateDirty(0 != consecutive_problem_get_updates_);
+ consecutive_problem_get_updates_ = 0;
+}
+
+void SyncProcessState::increment_consecutive_problem_commits() {
+ UpdateDirty(true);
+ ++consecutive_problem_commits_;
+}
+
+void SyncProcessState::zero_consecutive_problem_commits() {
+ UpdateDirty(0 != consecutive_problem_commits_);
+ consecutive_problem_commits_ = 0;
+}
+
+void SyncProcessState::increment_consecutive_transient_error_commits_by(
+ int value) {
+ UpdateDirty(0 != value);
+ consecutive_transient_error_commits_ += value;
+}
+
+void SyncProcessState::zero_consecutive_transient_error_commits() {
+ UpdateDirty(0 != consecutive_transient_error_commits_);
+ consecutive_transient_error_commits_ = 0;
+}
+
+void SyncProcessState::increment_consecutive_errors_by(int value) {
+ UpdateDirty(0 != value);
+ consecutive_errors_ += value;
+}
+
+void SyncProcessState::zero_consecutive_errors() {
+ UpdateDirty(0 != consecutive_errors_);
+ consecutive_errors_ = 0;
+}
+
+void SyncProcessState::increment_successful_commits() {
+ UpdateDirty(true);
+ ++successful_commits_;
+}
+
+void SyncProcessState::zero_successful_commits() {
+ UpdateDirty(0 != successful_commits_);
+ successful_commits_ = 0;
+}
+
+// Methods for managing error rate tracking
+void SyncProcessState::TallyNewError() {
+ UpdateDirty(true);
+ error_rate_ += (65536 - error_rate_) >> 2;
+}
+
+void SyncProcessState::TallyBigNewError() {
+ UpdateDirty(true);
+ error_rate_ += (65536 - error_rate_) >> 2;
+}
+
+void SyncProcessState::ForgetOldError() {
+ error_rate_ -= error_rate_ >> 2;
+}
+
+void SyncProcessState::CheckErrorRateTooHigh() {
+ UpdateDirty(error_rate_ > ERROR_THRESHOLD);
+}
+
+
+void SyncProcessState::MergeSets(const syncable::Id& id1,
+ const syncable::Id& id2) {
+ // There are no single item sets, we just leave those entries == 0
+ vector<syncable::Id>* set1 = id_to_conflict_set_[id1];
+ vector<syncable::Id>* set2 = id_to_conflict_set_[id2];
+ vector<syncable::Id>* rv = 0;
+ if (0 == set1 && 0 == set2) {
+ // neither item currently has a set so we build one.
+ rv = new vector<syncable::Id>();
+ rv->push_back(id1);
+ if (id1 != id2) {
+ rv->push_back(id2);
+ } else {
+ LOG(WARNING) << "[BUG] Attempting to merge two identical conflict ids.";
+ }
+ conflict_sets_.insert(rv);
+ } else if (0 == set1) {
+ // add the item to the existing set.
+ rv = set2;
+ rv->push_back(id1);
+ } else if (0 == set2) {
+ // add the item to the existing set.
+ rv = set1;
+ rv->push_back(id2);
+ } else if (set1 == set2) {
+ // It's the same set already
+ return;
+ } else {
+ // merge the two sets.
+ rv = set1;
+ // point all the second sets id's back to the first.
+ vector<syncable::Id>::iterator i;
+ for (i = set2->begin() ; i != set2->end() ; ++i) {
+ id_to_conflict_set_[*i] = rv;
+ }
+ // copy the second set to the first.
+ rv->insert(rv->end(), set2->begin(), set2->end());
+ conflict_sets_.erase(set2);
+ delete set2;
+ }
+ id_to_conflict_set_[id1] = id_to_conflict_set_[id2] = rv;
+}
+
+void SyncProcessState::CleanupSets() {
+ // Clean up all the sets.
+ set<ConflictSet*>::iterator i;
+ for (i = conflict_sets_.begin(); i != conflict_sets_.end(); i++) {
+ delete *i;
+ }
+ conflict_sets_.clear();
+ id_to_conflict_set_.clear();
+}
+
+SyncProcessState::~SyncProcessState() {
+ CleanupSets();
+}
+
+void SyncProcessState::AuthFailed() {
+ // dirty if the last one DIDN'T fail.
+ UpdateAuthDirty(true != auth_failed_);
+ auth_failed_ = true;
+}
+
+void SyncProcessState::AuthSucceeded() {
+ // dirty if the last one DID fail.
+ UpdateAuthDirty(false != auth_failed_);
+ auth_failed_ = false;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/sync_process_state.h b/chrome/browser/sync/engine/sync_process_state.h
new file mode 100644
index 0000000..32c6808
--- /dev/null
+++ b/chrome/browser/sync/engine/sync_process_state.h
@@ -0,0 +1,384 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//
+// The sync process consists of a sequence of sync cycles, each of which
+// (hopefully) moves the client into closer synchronization with the server.
+// While SyncCycleState holds state that is pertinent to a single sync cycle,
+// this data structure holds state that must be passed from cycle to cycle.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNC_PROCESS_STATE_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNC_PROCESS_STATE_H_
+
+#include <map>
+#include <set>
+#include <utility> // for pair<>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/port.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace browser_sync {
+
+class ConflictResolver;
+class ModelSafeWorker;
+
+class SyncProcessState {
+ FRIEND_TEST(SyncerSyncProcessState, MergeSetsTest);
+ FRIEND_TEST(SyncerTest, CopySyncProcessState);
+ public:
+ ~SyncProcessState();
+ SyncProcessState(
+ syncable::DirectoryManager* dirman,
+ PathString account_name,
+ ServerConnectionManager* connection_manager,
+ ConflictResolver* const resolver,
+ SyncerEventChannel* syncer_event_channel,
+ ModelSafeWorker* model_safe_worker);
+
+ // intentionally not 'explicit' b/c it's a copy ctor:
+ SyncProcessState(const SyncProcessState& counts);
+ SyncProcessState& operator=(const SyncProcessState& that);
+
+ PathString account_name() const { return account_name_; }
+
+ syncable::DirectoryManager* dirman() const { return dirman_; }
+
+ ServerConnectionManager* connection_manager()
+ const {
+ return connection_manager_;
+ }
+
+ ConflictResolver* resolver() const { return resolver_; }
+
+ ModelSafeWorker* model_safe_worker() { return model_safe_worker_; }
+
+ SyncerEventChannel* syncer_event_channel() const {
+ return syncer_event_channel_;
+ }
+
+ // Functions that deal with conflict set stuff
+ IdToConflictSetMap::const_iterator IdToConflictSetFind(
+ const syncable::Id& the_id) const {
+ return id_to_conflict_set_.find(the_id);
+ }
+
+ IdToConflictSetMap::const_iterator IdToConflictSetBegin() const {
+ return id_to_conflict_set_.begin();
+ }
+
+ IdToConflictSetMap::const_iterator IdToConflictSetEnd() const {
+ return id_to_conflict_set_.end();
+ }
+
+ IdToConflictSetMap::size_type IdToConflictSetSize() const {
+ return id_to_conflict_set_.size();
+ }
+
+ const ConflictSet* IdToConflictSetGet(const syncable::Id& the_id) {
+ return id_to_conflict_set_[the_id];
+ }
+
+ std::set<ConflictSet*>::const_iterator ConflictSetsBegin() const {
+ return conflict_sets_.begin();
+ }
+
+ std::set<ConflictSet*>::const_iterator ConflictSetsEnd() const {
+ return conflict_sets_.end();
+ }
+
+ std::set<ConflictSet*>::size_type ConflictSetsSize() const {
+ return conflict_sets_.size();
+ }
+
+ void MergeSets(const syncable::Id& set1, const syncable::Id& set2);
+
+ void CleanupSets();
+ // END conflict set functions
+
+ // item id set manipulation functions
+ bool HasConflictingItems() const {
+ return !conflicting_item_ids_.empty();
+ }
+
+ bool HasBlockedItems() const {
+ return !blocked_item_ids_.empty();
+ }
+
+ int ConflictingItemsSize() const {
+ return conflicting_item_ids_.size();
+ }
+
+ int BlockedItemsSize() const {
+ return blocked_item_ids_.size();
+ }
+
+ void AddConflictingItem(const syncable::Id& the_id) {
+ std::pair<std::set<syncable::Id>::iterator, bool> ret =
+ conflicting_item_ids_.insert(the_id);
+ UpdateDirty(ret.second);
+ }
+
+ void AddBlockedItem(const syncable::Id& the_id) {
+ std::pair<std::set<syncable::Id>::iterator, bool> ret =
+ blocked_item_ids_.insert(the_id);
+ UpdateDirty(ret.second);
+ }
+
+ void EraseConflictingItem(std::set<syncable::Id>::iterator it) {
+ UpdateDirty(true);
+ conflicting_item_ids_.erase(it);
+ }
+
+ void EraseBlockedItem(std::set<syncable::Id>::iterator it) {
+ UpdateDirty(true);
+ blocked_item_ids_.erase(it);
+ }
+
+ void EraseConflictingItem(const syncable::Id& the_id) {
+ int items_erased = conflicting_item_ids_.erase(the_id);
+ UpdateDirty(0 != items_erased);
+ }
+
+ void EraseBlockedItem(const syncable::Id& the_id) {
+ int items_erased = blocked_item_ids_.erase(the_id);
+ UpdateDirty(0 != items_erased);
+ }
+
+ std::set<syncable::Id>::iterator ConflictingItemsBegin() {
+ return conflicting_item_ids_.begin();
+ }
+
+ std::set<syncable::Id>::iterator BlockedItemsBegin() {
+ return blocked_item_ids_.begin();
+ }
+
+ std::set<syncable::Id>::iterator ConflictingItemsEnd() {
+ return conflicting_item_ids_.end();
+ }
+
+ std::set<syncable::Id>::iterator BlockedItemsEnd() {
+ return blocked_item_ids_.end();
+ }
+
+ void SetConflictingItems(const std::set<syncable::Id>& s) {
+ UpdateDirty(true);
+ conflicting_item_ids_ = s;
+ }
+
+ void SetBlockedItems(const std::set<syncable::Id>& s) {
+ UpdateDirty(true);
+ blocked_item_ids_ = s;
+ }
+ // END item id set manipulation functions
+
+ // Assorted other state info
+ int conflicting_updates() const { return conflicting_item_ids_.size(); }
+
+ int num_sync_cycles_;
+
+ // When we're over bandwidth quota, we don't update until past this time.
+ time_t silenced_until_;
+
+ // Info that is tracked purely for status reporting
+
+ // During inital sync these two members can be used to measure sync progress.
+ int64 current_sync_timestamp() const { return current_sync_timestamp_; }
+
+ int64 servers_latest_timestamp() const { return servers_latest_timestamp_; }
+
+ void set_current_sync_timestamp(const int64 val);
+
+ void set_servers_latest_timestamp(const int64 val);
+
+ bool invalid_store() const { return invalid_store_; }
+
+ void set_invalid_store(const bool val);
+
+ bool syncer_stuck() const { return syncer_stuck_; }
+
+ void set_syncer_stuck(const bool val);
+
+ bool syncing() const { return syncing_; }
+
+ void set_syncing(const bool val);
+
+ bool IsShareUsable() const;
+
+ int error_commits() const { return error_commits_; }
+
+ void set_error_commits(const int val);
+
+ int conflicting_commits() const { return conflicting_commits_; }
+
+ void set_conflicting_commits(const int val);
+
+ int stalled_commits() const { return stalled_commits_; }
+
+ void set_stalled_commits(const int val);
+
+ // WEIRD COUNTER manipulation functions
+ int consecutive_problem_get_updates() const {
+ return consecutive_problem_get_updates_;
+ }
+
+ void increment_consecutive_problem_get_updates();
+
+ void zero_consecutive_problem_get_updates();
+
+ int consecutive_problem_commits() const {
+ return consecutive_problem_commits_;
+ }
+
+ void increment_consecutive_problem_commits();
+
+ void zero_consecutive_problem_commits();
+
+ int consecutive_transient_error_commits() const {
+ return consecutive_transient_error_commits_;
+ }
+
+ void increment_consecutive_transient_error_commits_by(int value);
+
+ void zero_consecutive_transient_error_commits();
+
+ int consecutive_errors() const { return consecutive_errors_; }
+
+ void increment_consecutive_errors_by(int value);
+
+ void zero_consecutive_errors();
+
+ int successful_commits() const { return successful_commits_; }
+
+ void increment_successful_commits();
+
+ void zero_successful_commits();
+ // end WEIRD COUNTER manipulation functions
+
+ // Methods for managing error rate tracking
+ void TallyNewError();
+
+ void TallyBigNewError();
+
+ void ForgetOldError();
+
+ void CheckErrorRateTooHigh();
+
+ // Methods for tracking authentication state
+ void AuthFailed();
+ void AuthSucceeded();
+
+ // Returns true if this object has been modified since last SetClean() call
+ bool IsDirty() const { return dirty_; }
+
+ // Call to tell this status object that its new state has been seen
+ void SetClean() { dirty_ = false; }
+
+ // Returns true if auth status has been modified since last SetClean() call
+ bool IsAuthDirty() const { return auth_dirty_; }
+
+ // Call to tell this status object that its auth state has been seen
+ void SetAuthClean() { auth_dirty_ = false; }
+
+ private:
+ // for testing
+ SyncProcessState()
+ : account_name_(PSTR("")),
+ dirman_(NULL),
+ syncer_event_channel_(NULL),
+ connection_manager_(NULL),
+ model_safe_worker_(NULL),
+ resolver_(NULL),
+ syncer_stuck_(false),
+ num_sync_cycles_(0),
+ silenced_until_(0),
+ error_rate_(0),
+ current_sync_timestamp_(0),
+ servers_latest_timestamp_(0),
+ error_commits_(0),
+ stalled_commits_(0),
+ conflicting_commits_(0),
+ consecutive_problem_get_updates_(0),
+ consecutive_problem_commits_(0),
+ consecutive_transient_error_commits_(0),
+ consecutive_errors_(0),
+ successful_commits_(0),
+ dirty_(false),
+ auth_dirty_(false),
+ auth_failed_(false),
+ syncing_(false),
+ invalid_store_(false) {}
+
+ ServerConnectionManager *connection_manager_;
+ const PathString account_name_;
+ syncable::DirectoryManager* const dirman_;
+ ConflictResolver* const resolver_;
+ ModelSafeWorker* const model_safe_worker_;
+
+ // For sending notifications from sync commands out to observers of the
+ // Syncer.
+ SyncerEventChannel* syncer_event_channel_;
+
+ // TODO(sync): move away from sets if it makes more sense.
+ std::set<syncable::Id> conflicting_item_ids_;
+ std::set<syncable::Id> blocked_item_ids_;
+ std::map<syncable::Id, ConflictSet*> id_to_conflict_set_;
+ std::set<ConflictSet*> conflict_sets_;
+
+ // Status information, as opposed to state info that may also be exposed for
+ // status reporting purposes.
+ static const int ERROR_THRESHOLD = 500;
+ int error_rate_; // A EMA in the range [0,65536)
+ int64 current_sync_timestamp_; // During inital sync these two members
+ int64 servers_latest_timestamp_; // can be used to measure sync progress.
+
+ // There remains sync state updating in:
+ // CommitUnsyncedEntries
+ bool syncing_;
+
+ // True when we get such an INVALID_STORE error from the server.
+ bool invalid_store_;
+ // True iff we're stuck. User should contact support.
+ bool syncer_stuck_;
+ // counts of various commit return values.
+ int error_commits_;
+ int conflicting_commits_;
+ int stalled_commits_;
+
+ // WEIRD COUNTERS
+ // Two variables that track the # on consecutive problem requests.
+ // consecutive_problem_get_updates_ resets when we get any updates (not on
+ // pings) and increments whenever the request fails.
+ int consecutive_problem_get_updates_;
+ // consecutive_problem_commits_ resets whenever we commit any number of
+ // items and increments whenever all commits fail for any reason.
+ int consecutive_problem_commits_;
+ // number of commits hitting transient errors since the last successful
+ // commit.
+ int consecutive_transient_error_commits_;
+ // Incremented when get_updates fails, commit fails, and when
+ // hitting transient errors. When any of these succeed, this counter
+ // is reset.
+ // TODO(chron): Reduce number of weird counters we use.
+ int consecutive_errors_;
+ int successful_commits_;
+
+ bool dirty_;
+ bool auth_dirty_;
+ bool auth_failed_;
+
+ void UpdateDirty(bool new_info) { dirty_ |= new_info; }
+
+ void UpdateAuthDirty(bool new_info) { auth_dirty_ |= new_info; }
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNC_PROCESS_STATE_H_
diff --git a/chrome/browser/sync/engine/syncapi.cc b/chrome/browser/sync/engine/syncapi.cc
new file mode 100644
index 0000000..3f6eb06
--- /dev/null
+++ b/chrome/browser/sync/engine/syncapi.cc
@@ -0,0 +1,1565 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncapi.h"
+
+#if defined(OS_WINDOWS)
+#include <windows.h>
+#include <iphlpapi.h>
+#endif
+
+#include <iomanip>
+#include <list>
+#include <string>
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/basictypes.h"
+#include "base/scoped_ptr.h"
+#include "base/string_util.h"
+#include "chrome/browser/sync/engine/all_status.h"
+#include "chrome/browser/sync/engine/auth_watcher.h"
+#include "chrome/browser/sync/engine/change_reorder_buffer.h"
+#include "chrome/browser/sync/engine/client_command_channel.h"
+#include "chrome/browser/sync/engine/model_safe_worker.h"
+#include "chrome/browser/sync/engine/net/gaia_authenticator.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/net/syncapi_server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_thread.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator_impl.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/closure.h"
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/user_settings.h"
+#include "googleurl/src/gurl.h"
+
+using browser_sync::AllStatus;
+using browser_sync::AllStatusEvent;
+using browser_sync::AuthWatcher;
+using browser_sync::AuthWatcherEvent;
+using browser_sync::ClientCommandChannel;
+using browser_sync::Syncer;
+using browser_sync::SyncerEvent;
+using browser_sync::SyncerStatus;
+using browser_sync::SyncerThread;
+using browser_sync::UserSettings;
+using browser_sync::TalkMediator;
+using browser_sync::TalkMediatorImpl;
+using std::list;
+using std::hex;
+using std::string;
+using std::vector;
+using syncable::Directory;
+using syncable::DirectoryManager;
+
+static const int kServerReachablePollingIntervalMsec = 60000 * 60;
+static const int kThreadExitTimeoutMsec = 60000;
+static const int kSSLPort = 443;
+
+// We shouldn't call InitLogFiles more than once since that will cause a crash.
+// So we use a global state variable to avoid that. This doesn't work in case
+// of multiple threads, and if some other part also tries to call InitLogFiles
+// apart from this file. But this is okay for now since this is the only
+// place we call InitLogFiles.
+namespace {
+static bool g_log_files_initialized = false;
+static base::AtExitManager g_at_exit_manager; // Necessary for NewCallback
+} // empty namespace
+
+struct ThreadParams {
+ browser_sync::ServerConnectionManager* conn_mgr;
+#if defined(OS_WINDOWS)
+ HANDLE exit_flag;
+#endif
+};
+
+// This thread calls CheckServerReachable() whenever a change occurs
+// in the table that maps IP addresses to interfaces, for example when
+// the user unplugs his network cable.
+void* AddressWatchThread(void* arg) {
+ NameCurrentThreadForDebugging("SyncEngine_AddressWatcher");
+ LOG(INFO) << "starting the address watch thread";
+ const ThreadParams* const params = reinterpret_cast<const ThreadParams*>(arg);
+#if defined(OS_WINDOWS)
+ OVERLAPPED overlapped = {0};
+ overlapped.hEvent = CreateEvent(NULL, FALSE, TRUE, NULL);
+ HANDLE file;
+ DWORD rc = WAIT_OBJECT_0;
+ while (true) {
+ // Only call NotifyAddrChange() after the IP address has changed or if this
+ // is the first time through the loop.
+ if (WAIT_OBJECT_0 == rc) {
+ ResetEvent(overlapped.hEvent);
+ DWORD notify_result = NotifyAddrChange(&file, &overlapped);
+ if (ERROR_IO_PENDING != notify_result) {
+ LOG(ERROR) << "NotifyAddrChange() returned unexpected result "
+ << hex << notify_result;
+ break;
+ }
+ }
+ HANDLE events[] = { overlapped.hEvent, params->exit_flag };
+ rc = WaitForMultipleObjects(ARRAYSIZE(events), events, FALSE,
+ kServerReachablePollingIntervalMsec);
+
+ // If the exit flag was signaled, the thread will exit.
+ if (WAIT_OBJECT_0 + 1 == rc)
+ break;
+
+ params->conn_mgr->CheckServerReachable();
+ }
+ CloseHandle(overlapped.hEvent);
+#else
+ // TODO(zork): Add this functionality to Linux.
+#endif
+ LOG(INFO) << "The address watch thread has stopped";
+ return 0;
+}
+
+namespace sync_api {
+class ModelSafeWorkerBridge;
+
+static const PSTR_CHAR kBookmarkSyncUserSettingsDatabase[] =
+ PSTR("BookmarkSyncSettings.sqlite3");
+static const PSTR_CHAR kDefaultNameForNewNodes[] = PSTR(" ");
+
+// The list of names which are reserved for use by the server.
+static const char16* kForbiddenServerNames[] =
+ { STRING16(""), STRING16("."), STRING16("..") };
+
+//////////////////////////////////////////////////////////////////////////
+// Static helper functions.
+
+// Helper function to look up the int64 metahandle of an object given the ID
+// string.
+static int64 IdToMetahandle(syncable::BaseTransaction* trans,
+ const syncable::Id& id) {
+ syncable::Entry entry(trans, syncable::GET_BY_ID, id);
+ if (!entry.good())
+ return kInvalidId;
+ return entry.Get(syncable::META_HANDLE);
+}
+
+// Checks whether |name| is a server-illegal name followed by zero or more space
+// characters. The three server-illegal names are the empty string, dot, and
+// dot-dot. Very long names (>255 bytes in UTF-8 Normalization Form C) are
+// also illegal, but are not considered here.
+static bool IsNameServerIllegalAfterTrimming(const string16& name) {
+ size_t untrimmed_count = name.find_last_not_of(' ') + 1;
+ for (int i = 0; i < arraysize(kForbiddenServerNames); ++i) {
+ if (name.compare(0, untrimmed_count, kForbiddenServerNames[i]) == 0)
+ return true;
+ }
+ return false;
+}
+
+static bool EndsWithSpace(const string16& string) {
+ return !string.empty() && *string.rbegin() == ' ';
+}
+
+static inline void String16ToPathString(const sync_char16 *in,
+ PathString *out) {
+ string16 in_str(in);
+#if defined(OS_WINDOWS)
+ out->assign(in_str);
+#else
+ UTF16ToUTF8(in_str.c_str(), in_str.length(), out);
+#endif
+}
+
+static inline void PathStringToString16(const PathString& in, string16* out) {
+#if defined(OS_WINDOWS)
+ out->assign(in);
+#else
+ UTF8ToUTF16(in.c_str(), in.length(), out);
+#endif
+}
+
+// When taking a name from the syncapi, append a space if it matches the
+// pattern of a server-illegal name followed by zero or more spaces.
+static void SyncAPINameToServerName(const sync_char16 *sync_api_name,
+ PathString* out) {
+ String16ToPathString(sync_api_name, out);
+ string16 sync_api_name_str(sync_api_name);
+ if (IsNameServerIllegalAfterTrimming(sync_api_name_str))
+ out->append(PSTR(" "));
+}
+
+// In the reverse direction, if a server name matches the pattern of a
+// server-illegal name followed by one or more spaces, remove the trailing
+// space.
+static void ServerNameToSyncAPIName(const PathString& server_name,
+ string16*out) {
+ string16 server_name_str;
+ PathStringToString16(server_name, &server_name_str);
+ if (IsNameServerIllegalAfterTrimming(server_name_str) &&
+ EndsWithSpace(server_name_str))
+ out->assign(server_name_str, 0, server_name_str.size() - 1);
+ else
+ out->assign(server_name_str);
+}
+
+// A UserShare encapsulates the syncable pieces that represent an authenticated
+// user and their data (share).
+// This encompasses all pieces required to build transaction objects on the
+// syncable share.
+struct UserShare {
+ // The DirectoryManager itself, which is the parent of Transactions and can
+ // be shared across multiple threads (unlike Directory).
+ scoped_ptr<DirectoryManager> dir_manager;
+
+ // The username of the sync user. This is empty until we have performed at
+ // least one successful GAIA authentication with this username, which means
+ // on first-run it is empty until an AUTH_SUCCEEDED event and on future runs
+ // it is set as soon as the client instructs us to authenticate for the last
+ // known valid user (AuthenticateForLastKnownUser()).
+ // Stored as a PathString to avoid string conversions each time a transaction
+ // is created.
+ PathString authenticated_name;
+};
+
+////////////////////////////////////
+// BaseNode member definitions.
+
+// BaseNode::BaseNodeInternal provides storage for member Get() functions that
+// need to return pointers (e.g. strings).
+struct BaseNode::BaseNodeInternal {
+ string16 url;
+ string16 title;
+ Directory::ChildHandles child_handles;
+ syncable::Blob favicon;
+};
+
+BaseNode::BaseNode() : data_(new BaseNode::BaseNodeInternal) {}
+
+BaseNode::~BaseNode() {
+ delete data_;
+}
+
+int64 BaseNode::GetParentId() const {
+ return IdToMetahandle(GetTransaction()->GetWrappedTrans(),
+ GetEntry()->Get(syncable::PARENT_ID));
+}
+
+int64 BaseNode::GetId() const {
+ return GetEntry()->Get(syncable::META_HANDLE);
+}
+
+bool BaseNode::GetIsFolder() const {
+ return GetEntry()->Get(syncable::IS_DIR);
+}
+
+const sync_char16* BaseNode::GetTitle() const {
+ // Store the string in data_ so that the returned pointer is valid.
+ ServerNameToSyncAPIName(GetEntry()->GetName().non_unique_value(),
+ &data_->title);
+ return data_->title.c_str();
+}
+
+const sync_char16* BaseNode::GetURL() const {
+ // Store the string in data_ so that the returned pointer is valid.
+ PathStringToString16(GetEntry()->Get(syncable::BOOKMARK_URL), &data_->url);
+ return data_->url.c_str();
+}
+
+const int64* BaseNode::GetChildIds(size_t* child_count) const {
+ DCHECK(child_count);
+ Directory* dir = GetTransaction()->GetLookup();
+ dir->GetChildHandles(GetTransaction()->GetWrappedTrans(),
+ GetEntry()->Get(syncable::ID), &data_->child_handles);
+
+ *child_count = data_->child_handles.size();
+ return (data_->child_handles.empty()) ? NULL : &data_->child_handles[0];
+}
+
+int64 BaseNode::GetPredecessorId() const {
+ syncable::Id id_string = GetEntry()->Get(syncable::PREV_ID);
+ if (id_string.IsRoot())
+ return kInvalidId;
+ return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
+}
+
+int64 BaseNode::GetSuccessorId() const {
+ syncable::Id id_string = GetEntry()->Get(syncable::NEXT_ID);
+ if (id_string.IsRoot())
+ return kInvalidId;
+ return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
+}
+
+int64 BaseNode::GetFirstChildId() const {
+ syncable::Directory* dir = GetTransaction()->GetLookup();
+ syncable::BaseTransaction* trans = GetTransaction()->GetWrappedTrans();
+ syncable::Id id_string =
+ dir->GetFirstChildId(trans, GetEntry()->Get(syncable::ID));
+ if (id_string.IsRoot())
+ return kInvalidId;
+ return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
+}
+
+const unsigned char* BaseNode::GetFaviconBytes(size_t* size_in_bytes) {
+ data_->favicon = GetEntry()->Get(syncable::BOOKMARK_FAVICON);
+ *size_in_bytes = data_->favicon.size();
+ if (*size_in_bytes)
+ return &(data_->favicon[0]);
+ else
+ return NULL;
+}
+
+int64 BaseNode::GetExternalId() const {
+ return GetEntry()->Get(syncable::LOCAL_EXTERNAL_ID);
+}
+
+////////////////////////////////////
+// WriteNode member definitions
+void WriteNode::SetIsFolder(bool folder) {
+ if (entry_->Get(syncable::IS_DIR) == folder)
+ return; // Skip redundant changes.
+
+ entry_->Put(syncable::IS_DIR, folder);
+ MarkForSyncing();
+}
+
+void WriteNode::SetTitle(const sync_char16* title) {
+ PathString server_legal_name;
+ SyncAPINameToServerName(title, &server_legal_name);
+ syncable::SyncName sync_name(server_legal_name);
+ syncable::DBName db_name(sync_name.value());
+ db_name.MakeOSLegal();
+ db_name.MakeNoncollidingForEntry(transaction_->GetWrappedTrans(),
+ entry_->Get(syncable::PARENT_ID), entry_);
+
+ syncable::Name new_name = syncable::Name::FromDBNameAndSyncName(db_name,
+ sync_name);
+ if (new_name == entry_->GetName())
+ return; // Skip redundant changes.
+
+ entry_->PutName(new_name);
+ MarkForSyncing();
+}
+
+void WriteNode::SetURL(const sync_char16* url) {
+ PathString url_string;
+ String16ToPathString(url, &url_string);
+ if (url_string == entry_->Get(syncable::BOOKMARK_URL))
+ return; // Skip redundant changes.
+
+ entry_->Put(syncable::BOOKMARK_URL, url_string);
+ MarkForSyncing();
+}
+
+void WriteNode::SetExternalId(int64 id) {
+ if (GetExternalId() != id)
+ entry_->Put(syncable::LOCAL_EXTERNAL_ID, id);
+}
+
+WriteNode::WriteNode(WriteTransaction* transaction)
+ : entry_(NULL), transaction_(transaction) {
+ DCHECK(transaction);
+}
+
+WriteNode::~WriteNode() {
+ delete entry_;
+}
+
+// Find an existing node matching the ID |id|, and bind this WriteNode
+// to it. Return true on success.
+bool WriteNode::InitByIdLookup(int64 id) {
+ DCHECK(!entry_) << "Init called twice";
+ DCHECK_NE(id, kInvalidId);
+ entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
+ syncable::GET_BY_HANDLE, id);
+ return (entry_->good() && !entry_->Get(syncable::IS_DEL));
+}
+
+// Create a new node with default properties, and bind this WriteNode to it.
+// Return true on success.
+bool WriteNode::InitByCreation(const BaseNode& parent,
+ const BaseNode* predecessor) {
+ DCHECK(!entry_) << "Init called twice";
+ // |predecessor| must be a child of |parent| or NULL.
+ if (predecessor && predecessor->GetParentId() != parent.GetId()) {
+ DCHECK(false);
+ return false;
+ }
+
+ syncable::Id parent_id = parent.GetEntry()->Get(syncable::ID);
+
+ // Start out with a dummy name, but make it unique. We expect
+ // the caller to set a meaningful name after creation.
+ syncable::DBName dummy(kDefaultNameForNewNodes);
+ dummy.MakeOSLegal();
+ dummy.MakeNoncollidingForEntry(transaction_->GetWrappedTrans(), parent_id,
+ NULL);
+
+ entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
+ syncable::CREATE, parent_id, dummy);
+
+ if (!entry_->good())
+ return false;
+
+ // Entries are untitled folders by default.
+ entry_->Put(syncable::IS_DIR, true);
+ // TODO(ncarter): Naming this bit IS_BOOKMARK_OBJECT is a bit unfortunate,
+ // since the rest of SyncAPI is essentially bookmark-agnostic.
+ entry_->Put(syncable::IS_BOOKMARK_OBJECT, true);
+
+ // Now set the predecessor, which sets IS_UNSYNCED as necessary.
+ PutPredecessor(predecessor);
+
+ return true;
+}
+
+bool WriteNode::SetPosition(const BaseNode& new_parent,
+ const BaseNode* predecessor) {
+ // |predecessor| must be a child of |new_parent| or NULL.
+ if (predecessor && predecessor->GetParentId() != new_parent.GetId()) {
+ DCHECK(false);
+ return false;
+ }
+
+ syncable::Id new_parent_id = new_parent.GetEntry()->Get(syncable::ID);
+
+ // Filter out redundant changes if both the parent and the predecessor match.
+ if (new_parent_id == entry_->Get(syncable::PARENT_ID)) {
+ const syncable::Id& old = entry_->Get(syncable::PREV_ID);
+ if ((!predecessor && old.IsRoot()) ||
+ (predecessor && (old == predecessor->GetEntry()->Get(syncable::ID)))) {
+ return true;
+ }
+ }
+
+ // Discard the old database name, derive a new database name from the sync
+ // name, and make it legal and unique.
+ syncable::Name name = syncable::Name::FromSyncName(GetEntry()->GetName());
+ name.db_value().MakeOSLegal();
+ name.db_value().MakeNoncollidingForEntry(GetTransaction()->GetWrappedTrans(),
+ new_parent_id, entry_);
+
+ // Atomically change the parent and name. This will fail if it would
+ // introduce a cycle in the hierarchy.
+ if (!entry_->PutParentIdAndName(new_parent_id, name))
+ return false;
+
+ // Now set the predecessor, which sets IS_UNSYNCED as necessary.
+ PutPredecessor(predecessor);
+
+ return true;
+}
+
+const syncable::Entry* WriteNode::GetEntry() const {
+ return entry_;
+}
+
+const BaseTransaction* WriteNode::GetTransaction() const {
+ return transaction_;
+}
+
+void WriteNode::Remove() {
+ entry_->Put(syncable::IS_DEL, true);
+ MarkForSyncing();
+}
+
+void WriteNode::PutPredecessor(const BaseNode* predecessor) {
+ syncable::Id predecessor_id = predecessor ?
+ predecessor->GetEntry()->Get(syncable::ID) : syncable::Id();
+ entry_->PutPredecessor(predecessor_id);
+ // Mark this entry as unsynced, to wake up the syncer.
+ MarkForSyncing();
+}
+
+void WriteNode::SetFaviconBytes(const unsigned char* bytes,
+ size_t size_in_bytes) {
+ syncable::Blob new_favicon(bytes, bytes + size_in_bytes);
+ if (new_favicon == entry_->Get(syncable::BOOKMARK_FAVICON))
+ return; // Skip redundant changes.
+
+ entry_->Put(syncable::BOOKMARK_FAVICON, new_favicon);
+ MarkForSyncing();
+}
+
+void WriteNode::MarkForSyncing() {
+ syncable::MarkForSyncing(entry_);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// ReadNode member definitions
+ReadNode::ReadNode(const BaseTransaction* transaction)
+ : entry_(NULL), transaction_(transaction) {
+ DCHECK(transaction);
+}
+
+ReadNode::~ReadNode() {
+ delete entry_;
+}
+
+void ReadNode::InitByRootLookup() {
+ DCHECK(!entry_) << "Init called twice";
+ syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
+ entry_ = new syncable::Entry(trans, syncable::GET_BY_ID, trans->root_id());
+ if (!entry_->good())
+ DCHECK(false) << "Could not lookup root node for reading.";
+}
+
+bool ReadNode::InitByIdLookup(int64 id) {
+ DCHECK(!entry_) << "Init called twice";
+ DCHECK_NE(id, kInvalidId);
+ syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
+ entry_ = new syncable::Entry(trans, syncable::GET_BY_HANDLE, id);
+ if (!entry_->good())
+ return false;
+ if (entry_->Get(syncable::IS_DEL))
+ return false;
+ LOG_IF(WARNING, !entry_->Get(syncable::IS_BOOKMARK_OBJECT))
+ << "SyncAPI InitByIdLookup referencing non-bookmark object.";
+ return true;
+}
+
+const syncable::Entry* ReadNode::GetEntry() const {
+ return entry_;
+}
+
+const BaseTransaction* ReadNode::GetTransaction() const {
+ return transaction_;
+}
+
+bool ReadNode::InitByTagLookup(const sync_char16* tag) {
+ DCHECK(!entry_) << "Init called twice";
+ PathString tag_string;
+ String16ToPathString(tag, &tag_string);
+ if (tag_string.empty())
+ return false;
+ syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
+ entry_ = new syncable::Entry(trans, syncable::GET_BY_TAG, tag_string);
+ if (!entry_->good())
+ return false;
+ if (entry_->Get(syncable::IS_DEL))
+ return false;
+ LOG_IF(WARNING, !entry_->Get(syncable::IS_BOOKMARK_OBJECT))
+ << "SyncAPI InitByTagLookup referencing non-bookmark object.";
+ return true;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+// ReadTransaction member definitions
+ReadTransaction::ReadTransaction(UserShare* share)
+ : BaseTransaction(share),
+ transaction_(NULL) {
+ transaction_ = new syncable::ReadTransaction(GetLookup(), __FILE__, __LINE__);
+}
+
+ReadTransaction::~ReadTransaction() {
+ delete transaction_;
+}
+
+syncable::BaseTransaction* ReadTransaction::GetWrappedTrans() const {
+ return transaction_;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// WriteTransaction member definitions
+WriteTransaction::WriteTransaction(UserShare* share)
+ : BaseTransaction(share),
+ transaction_(NULL) {
+ transaction_ = new syncable::WriteTransaction(GetLookup(), syncable::SYNCAPI,
+ __FILE__, __LINE__);
+}
+
+WriteTransaction::~WriteTransaction() {
+ delete transaction_;
+}
+
+syncable::BaseTransaction* WriteTransaction::GetWrappedTrans() const {
+ return transaction_;
+}
+
+// An implementation of Visitor that we use to "visit" the
+// ModelSafeWorkerInterface provided by a client of this API. The object we
+// visit is responsible for calling DoWork, which will invoke Run() on it's
+// cached work closure.
+class ModelSafeWorkerVisitor : public ModelSafeWorkerInterface::Visitor {
+ public:
+ explicit ModelSafeWorkerVisitor(Closure* work) : work_(work) { }
+ virtual ~ModelSafeWorkerVisitor() { }
+
+ // ModelSafeWorkerInterface::Visitor implementation.
+ virtual void DoWork() {
+ work_->Run();
+ }
+
+ private:
+ // The work to be done. We run this on DoWork and it cleans itself up
+ // after it is run.
+ Closure* work_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelSafeWorkerVisitor);
+};
+
+// This class is declared in the cc file to allow inheritance from sync types.
+// The ModelSafeWorkerBridge is a liason between a syncapi-client defined
+// ModelSafeWorkerInterface and the actual ModelSafeWorker used by the Syncer
+// for the current SyncManager.
+class ModelSafeWorkerBridge : public browser_sync::ModelSafeWorker {
+ public:
+ // Takes ownership of |worker|.
+ explicit ModelSafeWorkerBridge(ModelSafeWorkerInterface* worker)
+ : worker_(worker) {
+ }
+ virtual ~ModelSafeWorkerBridge() { }
+
+ // Overriding ModelSafeWorker.
+ virtual void DoWorkAndWaitUntilDone(Closure* work) {
+ // When the syncer has work to be done, we forward it to our worker who
+ // will invoke DoWork on |visitor| when appropriate (from model safe
+ // thread).
+ ModelSafeWorkerVisitor visitor(work);
+ worker_->CallDoWorkFromModelSafeThreadAndWait(&visitor);
+ }
+
+ private:
+ // The worker that we can forward work requests to, to ensure the work
+ // is performed on an appropriate model safe thread.
+ scoped_ptr<ModelSafeWorkerInterface> worker_;
+
+ DISALLOW_COPY_AND_ASSIGN(ModelSafeWorkerBridge);
+};
+
+// A GaiaAuthenticator that uses HttpPostProviders instead of CURL.
+class BridgedGaiaAuthenticator : public browser_sync::GaiaAuthenticator {
+ public:
+ BridgedGaiaAuthenticator(const string& user_agent, const string& service_id,
+ const string& gaia_url,
+ HttpPostProviderFactory* factory)
+ : GaiaAuthenticator(user_agent, service_id, gaia_url),
+ gaia_source_(user_agent), post_factory_(factory) {
+ }
+
+ virtual ~BridgedGaiaAuthenticator() {
+ }
+
+ virtual bool Post(const GURL& url, const string& post_body,
+ unsigned long* response_code, string* response_body) {
+ string connection_url = "https://";
+ connection_url += url.host();
+ connection_url += url.path();
+ HttpPostProviderInterface* http = post_factory_->Create();
+ http->SetUserAgent(gaia_source_.c_str());
+ // SSL is on 443 for Gaia Posts always.
+ http->SetURL(connection_url.c_str(), kSSLPort);
+ http->SetPostPayload("application/x-www-form-urlencoded",
+ post_body.length(), post_body.c_str());
+
+ int os_error_code = 0;
+ int int_response_code = 0;
+ if (!http->MakeSynchronousPost(&os_error_code, &int_response_code)) {
+ LOG(INFO) << "Http POST failed, error returns: " << os_error_code;
+ return false;
+ }
+ *response_code = static_cast<int>(int_response_code);
+ response_body->assign(http->GetResponseContent(),
+ http->GetResponseContentLength());
+ post_factory_->Destroy(http);
+ return true;
+ }
+ private:
+ const std::string gaia_source_;
+ scoped_ptr<HttpPostProviderFactory> post_factory_;
+ DISALLOW_COPY_AND_ASSIGN(BridgedGaiaAuthenticator);
+};
+
+//////////////////////////////////////////////////////////////////////////
+// SyncManager's implementation: SyncManager::SyncInternal
+class SyncManager::SyncInternal {
+ public:
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+ explicit SyncInternal(SyncManager* sync_manager)
+ : observer_(NULL),
+ command_channel_(0),
+ auth_problem_(AUTH_PROBLEM_NONE),
+ sync_manager_(sync_manager),
+ notification_pending_(false),
+ initialized_(false) {
+ }
+
+ ~SyncInternal() { }
+
+ bool Init(const PathString& database_location,
+ const std::string& sync_server_and_path,
+ int port,
+ const char* gaia_service_id,
+ const char* gaia_source,
+ bool use_ssl,
+ HttpPostProviderFactory* post_factory,
+ HttpPostProviderFactory* auth_post_factory,
+ ModelSafeWorkerInterface* model_safe_worker,
+ bool attempt_last_user_authentication,
+ const char* user_agent);
+
+ // Tell sync engine to submit credentials to GAIA for verification and start
+ // the syncing process on success. Successful GAIA authentication will kick
+ // off the following chain of events:
+ // 1. Cause sync engine to open the syncer database.
+ // 2. Trigger the AuthWatcher to create a Syncer for the directory and call
+ // SyncerThread::SyncDirectory; the SyncerThread will block until (4).
+ // 3. Tell the ServerConnectionManager to pass the newly received GAIA auth
+ // token to a sync server to obtain a sync token.
+ // 4. On receipt of this token, the ServerConnectionManager broadcasts
+ // a server-reachable event, which will unblock the SyncerThread,
+ // and the rest is the future.
+ //
+ // If authentication fails, an event will be broadcast all the way up to
+ // the SyncManager::Observer. It may, in turn, decide to try again with new
+ // credentials. Calling this method again is the appropriate course of action
+ // to "retry".
+ void Authenticate(const std::string& username, const std::string& password);
+
+ // Call periodically from a database-safe thread to persist recent changes
+ // to the syncapi model.
+ void SaveChanges();
+
+ // This listener is called upon completion of a syncable transaction, and
+ // builds the list of sync-engine initiated changes that will be forwarded to
+ // the SyncManager's Observers.
+ void HandleChangeEvent(const syncable::DirectoryChangeEvent& event);
+ void HandleTransactionCompleteChangeEvent(
+ const syncable::DirectoryChangeEvent& event);
+ void HandleCalculateChangesChangeEventFromSyncApi(
+ const syncable::DirectoryChangeEvent& event);
+ void HandleCalculateChangesChangeEventFromSyncer(
+ const syncable::DirectoryChangeEvent& event);
+
+ // This listener is called by the syncer channel for all syncer events.
+ void HandleSyncerEvent(const SyncerEvent& event);
+
+ // We have a direct hookup to the authwatcher to be notified for auth failures
+ // on startup, to serve our UI needs.
+ void HandleAuthWatcherEvent(const AuthWatcherEvent& event);
+
+ // Accessors for the private members.
+ DirectoryManager* dir_manager() { return share_.dir_manager.get(); }
+ SyncAPIServerConnectionManager* connection_manager() {
+ return connection_manager_.get();
+ }
+ SyncerThread* syncer_thread() { return syncer_thread_.get(); }
+ TalkMediator* talk_mediator() { return talk_mediator_.get(); }
+ AuthWatcher* auth_watcher() { return auth_watcher_.get(); }
+ AllStatus* allstatus() { return &allstatus_; }
+ void set_observer(Observer* observer) { observer_ = observer; }
+ UserShare* GetUserShare() { return &share_; }
+
+ // Return the currently active (validated) username as a PathString for
+ // use with syncable types.
+ const PathString& username_for_share() const {
+ return share_.authenticated_name;
+ }
+
+ // Returns the authenticated username from our AuthWatcher in UTF8.
+ // See SyncManager::GetAuthenticatedUsername for details.
+ const char* GetAuthenticatedUsername();
+
+ // Note about SyncManager::Status implementation: Status is a trimmed
+ // down AllStatus::Status, augmented with authentication failure information
+ // gathered from the internal AuthWatcher. The sync UI itself hooks up to
+ // various sources like the AuthWatcher individually, but with syncapi we try
+ // to keep everything status-related in one place. This means we have to
+ // privately manage state about authentication failures, and whenever the
+ // status or status summary is requested we aggregate this state with
+ // AllStatus::Status information.
+ Status ComputeAggregatedStatus();
+ Status::Summary ComputeAggregatedStatusSummary();
+
+ // See SyncManager::SetupForTestMode for information.
+ void SetupForTestMode(const sync_char16* test_username);
+
+ // See SyncManager::Shutdown for information.
+ void Shutdown();
+
+ // Whether we're initialized to the point of being able to accept changes
+ // (and hence allow transaction creation). See initialized_ for details.
+ bool initialized() const {
+ MutexLock lock(&initialized_mutex_);
+ return initialized_;
+ }
+ private:
+ // Try to authenticate using persisted credentials from a previous successful
+ // authentication. If no such credentials exist, calls OnAuthError on
+ // the client to collect credentials. Otherwise, there exist local
+ // credentials that were once used for a successful auth, so we'll try to
+ // re-use these.
+ // Failure of that attempt will be communicated as normal using
+ // OnAuthError. Since this entry point will bypass normal GAIA
+ // authentication and try to authenticate directly with the sync service
+ // using a cached token, authentication failure will generally occur due to
+ // expired credentials, or possibly because of a password change.
+ void AuthenticateForLastKnownUser();
+
+ // Helper to call OnAuthError when no authentication credentials
+ // are available.
+ void RaiseAuthNeededEvent();
+
+ // Helper to set initialized_ to true and raise an event to clients to
+ // notify that initialization is complete and it is safe to send us changes.
+ // If already initialized, this is a no-op.
+ void MarkAndNotifyInitializationComplete();
+
+ // Determine if the parents or predecessors differ between the old and new
+ // versions of an entry stored in |a| and |b|. Note that a node's index
+ // may change without its NEXT_ID changing if the node at NEXT_ID also
+ // moved (but the relative order is unchanged). To handle such cases,
+ // we rely on the caller to treat a position update on any sibling as
+ // updating the positions of all siblings.
+ static bool BookmarkPositionsDiffer(const syncable::EntryKernel& a,
+ const syncable::Entry& b) {
+ if (a.ref(syncable::NEXT_ID) != b.Get(syncable::NEXT_ID))
+ return true;
+ if (a.ref(syncable::PARENT_ID) != b.Get(syncable::PARENT_ID))
+ return true;
+ return false;
+ }
+
+ // Determine if any of the fields made visible to clients of the Sync API
+ // differ between the versions of an entry stored in |a| and |b|.
+ // A return value of false means that it should be OK to ignore this change.
+ static bool BookmarkPropertiesDiffer(const syncable::EntryKernel& a,
+ const syncable::Entry& b) {
+ if (a.ref(syncable::NAME) != b.Get(syncable::NAME))
+ return true;
+ if (a.ref(syncable::UNSANITIZED_NAME) != b.Get(syncable::UNSANITIZED_NAME))
+ return true;
+ if (a.ref(syncable::IS_DIR) != b.Get(syncable::IS_DIR))
+ return true;
+ if (a.ref(syncable::BOOKMARK_URL) != b.Get(syncable::BOOKMARK_URL))
+ return true;
+ if (a.ref(syncable::BOOKMARK_FAVICON) != b.Get(syncable::BOOKMARK_FAVICON))
+ return true;
+ if (BookmarkPositionsDiffer(a, b))
+ return true;
+ return false;
+ }
+
+ // We couple the DirectoryManager and username together in a UserShare member
+ // so we can return a handle to share_ to clients of the API for use when
+ // constructing any transaction type.
+ UserShare share_;
+
+ // A cached string for callers of GetAuthenticatedUsername. We just store the
+ // last result of auth_watcher_->email() here and change it on future calls,
+ // because callers of GetAuthenticatedUsername are supposed to copy the value
+ // if they need it for longer than the scope of the call.
+ std::string cached_auth_watcher_email_;
+
+ // A wrapper around a sqlite store used for caching authentication data,
+ // last user information, current sync-related URLs, and more.
+ scoped_ptr<UserSettings> user_settings_;
+
+ // Observer registered via SetObserver/RemoveObserver.
+ // WARNING: This can be NULL!
+ Observer* observer_;
+
+ // A sink for client commands from the syncer needed to create a SyncerThread.
+ ClientCommandChannel command_channel_;
+
+ // The ServerConnectionManager used to abstract communication between
+ // the client (the Syncer) and the sync server.
+ scoped_ptr<SyncAPIServerConnectionManager> connection_manager_;
+
+ // The thread that runs the Syncer. Needs to be explicitly Start()ed.
+ scoped_ptr<SyncerThread> syncer_thread_;
+
+ // Notification (xmpp) handler.
+ scoped_ptr<TalkMediator> talk_mediator_;
+
+ // A multi-purpose status watch object that aggregates stats from various
+ // sync components.
+ AllStatus allstatus_;
+
+ // AuthWatcher kicks off the authentication process and follows it through
+ // phase 1 (GAIA) to phase 2 (sync engine). As part of this work it determines
+ // the initial connectivity and causes the server connection event to be
+ // broadcast, which signals the syncer thread to start syncing.
+ // It has a heavy duty constructor requiring boilerplate so we heap allocate.
+ scoped_ptr<AuthWatcher> auth_watcher_;
+
+ // A store of change records produced by HandleChangeEvent during the
+ // CALCULATE_CHANGES step, and to be processed, and forwarded to the
+ // observer, by HandleChangeEvent during the TRANSACTION_COMPLETE step.
+ ChangeReorderBuffer change_buffer_;
+
+ // The event listener hookup that is registered for HandleChangeEvent.
+ scoped_ptr<EventListenerHookup> dir_change_hookup_;
+
+ // The event listener hookup registered for HandleSyncerEvent.
+ scoped_ptr<EventListenerHookup> syncer_event_;
+
+ // The event listener hookup registered for HandleAuthWatcherEvent.
+ scoped_ptr<EventListenerHookup> authwatcher_hookup_;
+
+ // Our cache of a recent authentication problem. If no authentication problem
+ // occurred, or if the last problem encountered has been cleared (by a
+ // subsequent AuthWatcherEvent), this is set to AUTH_PROBLEM_NONE.
+ AuthProblem auth_problem_;
+
+ // The sync dir_manager to which we belong.
+ SyncManager* const sync_manager_;
+
+ // Parameters for our thread listening to network status changes.
+ ThreadParams address_watch_params_;
+ thread_handle address_watch_thread_;
+
+ // True if the next SyncCycle should notify peers of an update.
+ bool notification_pending_;
+
+ // Set to true once Init has been called, and we know of an authenticated
+ // valid) username either from a fresh authentication attempt (as in
+ // first-use case) or from a previous attempt stored in our UserSettings
+ // (as in the steady-state), and the syncable::Directory has been opened,
+ // meaning we are ready to accept changes. Protected by initialized_mutex_
+ // as it can get read/set by both the SyncerThread and the AuthWatcherThread.
+ bool initialized_;
+ mutable PThreadMutex initialized_mutex_;
+};
+
+SyncManager::SyncManager() {
+ data_ = new SyncInternal(this);
+}
+
+bool SyncManager::Init(const sync_char16* database_location,
+ const char* sync_server_and_path,
+ int sync_server_port,
+ const char* gaia_service_id,
+ const char* gaia_source,
+ bool use_ssl,
+ HttpPostProviderFactory* post_factory,
+ HttpPostProviderFactory* auth_post_factory,
+ ModelSafeWorkerInterface* model_safe_worker,
+ bool attempt_last_user_authentication,
+ const char* user_agent) {
+ DCHECK(database_location);
+ DCHECK(post_factory);
+
+ PathString db_path;
+ String16ToPathString(database_location, &db_path);
+ string server_string(sync_server_and_path);
+ return data_->Init(db_path,
+ server_string,
+ sync_server_port,
+ gaia_service_id,
+ gaia_source,
+ use_ssl,
+ post_factory,
+ auth_post_factory,
+ model_safe_worker,
+ attempt_last_user_authentication,
+ user_agent);
+}
+
+void SyncManager::Authenticate(const char* username, const char* password) {
+ data_->Authenticate(std::string(username), std::string(password));
+}
+
+const char* SyncManager::GetAuthenticatedUsername() {
+ if (!data_)
+ return NULL;
+ return data_->GetAuthenticatedUsername();
+}
+
+const char* SyncManager::SyncInternal::GetAuthenticatedUsername() {
+ cached_auth_watcher_email_ = browser_sync::ToUTF8(
+ username_for_share()).get_string();
+ return cached_auth_watcher_email_.c_str();
+}
+
+bool SyncManager::SyncInternal::Init(
+ const PathString& database_location,
+ const std::string& sync_server_and_path,
+ int port,
+ const char* gaia_service_id,
+ const char* gaia_source,
+ bool use_ssl, HttpPostProviderFactory* post_factory,
+ HttpPostProviderFactory* auth_post_factory,
+ ModelSafeWorkerInterface* model_safe_worker,
+ bool attempt_last_user_authentication,
+ const char* user_agent) {
+
+ if (!g_log_files_initialized) {
+ // TODO(timsteele): Call InitLogFiles() or equivalent.
+ g_log_files_initialized = true;
+ }
+
+ // Set up UserSettings, creating the db if necessary. We need this to
+ // instantiate a URLFactory to give to the Syncer.
+ PathString settings_db_file = AppendSlash(database_location) +
+ kBookmarkSyncUserSettingsDatabase;
+ user_settings_.reset(new UserSettings());
+ if (!user_settings_->Init(settings_db_file))
+ return false;
+
+ share_.dir_manager.reset(new DirectoryManager(database_location));
+
+ string client_id = user_settings_->GetClientId();
+ connection_manager_.reset(new SyncAPIServerConnectionManager(
+ sync_server_and_path, port, use_ssl, user_agent, client_id));
+
+ // TODO(timsteele): This is temporary windows crap needed to listen for
+ // network status changes. We should either pump this up to the embedder to
+ // do (and call us in CheckServerReachable, for ex), or at least make this
+ // platform independent in here.
+ // TODO(ncarter): When this gets cleaned up, the implementation of
+ // CreatePThread can also be removed.
+#if defined(OS_WINDOWS)
+ HANDLE exit_flag = CreateEvent(NULL, TRUE /*manual reset*/, FALSE, NULL);
+ address_watch_params_.exit_flag = exit_flag;
+#endif
+ address_watch_params_.conn_mgr = connection_manager();
+ address_watch_thread_ = CreatePThread(AddressWatchThread,
+ &address_watch_params_);
+ DCHECK(NULL != address_watch_thread_);
+
+ // Hand over the bridged POST factory to be owned by the connection
+ // dir_manager.
+ connection_manager()->SetHttpPostProviderFactory(post_factory);
+
+ // Watch various objects for aggregated status.
+ allstatus()->WatchConnectionManager(connection_manager());
+
+ std::string gaia_url = browser_sync::kGaiaUrl;
+ const char* service_id = gaia_service_id ?
+ gaia_service_id : SYNC_SERVICE_NAME;
+
+ talk_mediator_.reset(new TalkMediatorImpl());
+ allstatus()->WatchTalkMediator(talk_mediator());
+
+ BridgedGaiaAuthenticator* gaia_auth = new BridgedGaiaAuthenticator(
+ gaia_source, service_id, gaia_url, auth_post_factory);
+
+ auth_watcher_.reset(new AuthWatcher(dir_manager(),
+ connection_manager(),
+ &allstatus_,
+ gaia_source,
+ service_id,
+ gaia_url,
+ user_settings_.get(),
+ gaia_auth,
+ talk_mediator()));
+
+ talk_mediator()->WatchAuthWatcher(auth_watcher());
+ allstatus()->WatchAuthWatcher(auth_watcher());
+ authwatcher_hookup_.reset(NewEventListenerHookup(auth_watcher_->channel(),
+ this, &SyncInternal::HandleAuthWatcherEvent));
+
+ // Tell the SyncerThread to use the ModelSafeWorker for bookmark model work.
+ // We set up both sides of the "bridge" here, with the ModelSafeWorkerBridge
+ // on the Syncer side, and |model_safe_worker| on the API client side.
+ ModelSafeWorkerBridge* worker = new ModelSafeWorkerBridge(model_safe_worker);
+
+ syncer_thread_.reset(new SyncerThread(&command_channel_,
+ dir_manager(),
+ connection_manager(),
+ &allstatus_,
+ worker));
+ syncer_thread()->WatchTalkMediator(talk_mediator());
+ allstatus()->WatchSyncerThread(syncer_thread());
+
+ syncer_thread()->Start(); // Start the syncer thread. This won't actually
+ // result in any syncing until at least the
+ // DirectoryManager broadcasts the OPENED event,
+ // and a valid server connection is detected.
+
+ if (attempt_last_user_authentication)
+ AuthenticateForLastKnownUser();
+ return true;
+}
+
+void SyncManager::SyncInternal::MarkAndNotifyInitializationComplete() {
+ // There is only one real time we need this mutex. If we get an auth
+ // success, and before the initial sync ends we get an auth failure. In this
+ // case we'll be listening to both the AuthWatcher and Syncer, and it's a race
+ // between their respective threads to call MarkAndNotify. We need to make
+ // sure the observer is notified once and only once.
+ {
+ MutexLock lock(&initialized_mutex_);
+ if (initialized_)
+ return;
+ initialized_ = true;
+ }
+
+ // Notify that initialization is complete.
+ if (observer_)
+ observer_->OnInitializationComplete();
+}
+
+void SyncManager::SyncInternal::Authenticate(const std::string& username,
+ const std::string& password) {
+ DCHECK(username_for_share().empty() ||
+ (username == browser_sync::ToUTF8(username_for_share()).get_string()))
+ << "Username change from valid username detected";
+ if (allstatus()->status().authenticated)
+ return;
+ if (password.empty()) {
+ // TODO(timsteele): Seems like this shouldn't be needed, but auth_watcher
+ // currently drops blank password attempts on the floor and doesn't update
+ // state; it only LOGs an error in this case. We want to make sure we set
+ // our AuthProblem state to denote an error.
+ RaiseAuthNeededEvent();
+ }
+ auth_watcher()->Authenticate(username, password, true);
+}
+
+void SyncManager::SyncInternal::AuthenticateForLastKnownUser() {
+ std::string username;
+ std::string auth_token;
+ if (!(auth_watcher()->settings()->GetLastUserAndServiceToken(
+ SYNC_SERVICE_NAME, &username, &auth_token))) {
+ RaiseAuthNeededEvent();
+ return;
+ }
+
+ browser_sync::ToPathString s(username);
+ if (s.good()) {
+ share_.authenticated_name = s.get_string16();
+ } else {
+ RaiseAuthNeededEvent();
+ return;
+ }
+
+ // We optimize by opening the directory before the "fresh" authentication
+ // attempt completes so that we can immediately begin processing changes.
+ if (!dir_manager()->Open(username_for_share())) {
+ DCHECK(false) << "Had last known user but could not open directory";
+ return;
+ }
+
+ // Set the sync data type so that the server only sends us bookmarks
+ // changes.
+ {
+ syncable::ScopedDirLookup lookup(dir_manager(), username_for_share());
+ if (!lookup.good()) {
+ DCHECK(false) << "ScopedDirLookup failed on successfully opened dir";
+ return;
+ }
+ if (lookup->initial_sync_ended())
+ MarkAndNotifyInitializationComplete();
+ }
+
+ auth_watcher()->AuthenticateWithToken(username, auth_token);
+}
+
+void SyncManager::SyncInternal::RaiseAuthNeededEvent() {
+ auth_problem_ = AUTH_PROBLEM_INVALID_GAIA_CREDENTIALS;
+ if (observer_)
+ observer_->OnAuthProblem(auth_problem_);
+}
+
+SyncManager::~SyncManager() {
+ delete data_;
+}
+
+void SyncManager::SetObserver(Observer* observer) {
+ data_->set_observer(observer);
+}
+
+void SyncManager::RemoveObserver() {
+ data_->set_observer(NULL);
+}
+
+void SyncManager::Shutdown() {
+ data_->Shutdown();
+}
+
+void SyncManager::SyncInternal::Shutdown() {
+ // First reset the AuthWatcher in case an auth attempt is in progress so that
+ // it terminates gracefully before we shutdown and close other components.
+ // Otherwise the attempt can complete after we've closed the directory, for
+ // example, and cause initialization to continue, which is bad.
+ auth_watcher_.reset();
+
+ if (syncer_thread()) {
+ if (!syncer_thread()->Stop(kThreadExitTimeoutMsec))
+ DCHECK(false) << "Unable to stop the syncer, it won't be happy...";
+ }
+
+ // Shutdown the xmpp buzz connection.
+ LOG(INFO) << "P2P: Mediator logout started.";
+ if (talk_mediator()) {
+ talk_mediator()->Logout();
+ }
+ LOG(INFO) << "P2P: Mediator logout completed.";
+
+ if (dir_manager()) {
+ dir_manager()->FinalSaveChangesForAll();
+ dir_manager()->Close(username_for_share());
+ }
+
+ // Reset the DirectoryManager and UserSettings so they relinquish sqlite
+ // handles to backing files.
+ share_.dir_manager.reset();
+ user_settings_.reset();
+
+ // We don't want to process any more events.
+ dir_change_hookup_.reset();
+ syncer_event_.reset();
+ authwatcher_hookup_.reset();
+
+#if defined(OS_WINDOWS)
+ // Stop the address watch thread by signaling the exit flag.
+ // TODO(timsteele): Same as todo in Init().
+ SetEvent(address_watch_params_.exit_flag);
+ const DWORD wait_result = WaitForSingleObject(address_watch_thread_,
+ kThreadExitTimeoutMsec);
+ LOG_IF(ERROR, WAIT_FAILED == wait_result) << "Waiting for addr change thread "
+ "to exit failed. GetLastError(): " << hex << GetLastError();
+ LOG_IF(ERROR, WAIT_TIMEOUT == wait_result) << "Thread exit timeout expired";
+ CloseHandle(address_watch_params_.exit_flag);
+#endif
+}
+
+// Listen to model changes, filter out ones initiated by the sync API, and
+// saves the rest (hopefully just backend Syncer changes resulting from
+// ApplyUpdates) to data_->changelist.
+void SyncManager::SyncInternal::HandleChangeEvent(
+ const syncable::DirectoryChangeEvent& event) {
+ if (event.todo == syncable::DirectoryChangeEvent::TRANSACTION_COMPLETE) {
+ HandleTransactionCompleteChangeEvent(event);
+ return;
+ } else if (event.todo == syncable::DirectoryChangeEvent::CALCULATE_CHANGES) {
+ if (event.writer == syncable::SYNCAPI) {
+ HandleCalculateChangesChangeEventFromSyncApi(event);
+ return;
+ }
+ HandleCalculateChangesChangeEventFromSyncer(event);
+ return;
+ } else if (event.todo == syncable::DirectoryChangeEvent::SHUTDOWN) {
+ dir_change_hookup_.reset();
+ }
+}
+
+void SyncManager::SyncInternal::HandleTransactionCompleteChangeEvent(
+ const syncable::DirectoryChangeEvent& event) {
+ DCHECK_EQ(event.todo, syncable::DirectoryChangeEvent::TRANSACTION_COMPLETE);
+ // This notification happens immediately after a syncable WriteTransaction
+ // falls out of scope.
+ if (change_buffer_.IsEmpty() || !observer_)
+ return;
+
+ ReadTransaction trans(GetUserShare());
+ vector<ChangeRecord> ordered_changes;
+ change_buffer_.GetAllChangesInTreeOrder(&trans, &ordered_changes);
+ if (!ordered_changes.empty()) {
+ observer_->OnChangesApplied(&trans, &ordered_changes[0],
+ ordered_changes.size());
+ }
+ change_buffer_.Clear();
+}
+
+void SyncManager::SyncInternal::HandleCalculateChangesChangeEventFromSyncApi(
+ const syncable::DirectoryChangeEvent& event) {
+ // We have been notified about a user action changing the bookmark model.
+ DCHECK_EQ(event.todo, syncable::DirectoryChangeEvent::CALCULATE_CHANGES);
+ DCHECK_EQ(event.writer, syncable::SYNCAPI);
+ LOG_IF(WARNING, !change_buffer_.IsEmpty()) <<
+ "CALCULATE_CHANGES called with unapplied old changes.";
+
+ bool exists_unsynced_items = false;
+ for (syncable::OriginalEntries::const_iterator i = event.originals->begin();
+ i != event.originals->end() && !exists_unsynced_items;
+ ++i) {
+ int64 id = i->ref(syncable::META_HANDLE);
+ syncable::Entry e(event.trans, syncable::GET_BY_HANDLE, id);
+ DCHECK(e.good());
+
+ if (e.IsRoot()) {
+ // Ignore root object, should it ever change.
+ continue;
+ } else if (!e.Get(syncable::IS_BOOKMARK_OBJECT)) {
+ // Ignore non-bookmark objects.
+ continue;
+ } else if (e.Get(syncable::IS_UNSYNCED)) {
+ // Unsynced items will cause us to nudge the the syncer.
+ exists_unsynced_items = true;
+ }
+ }
+ if (exists_unsynced_items && syncer_thread()) {
+ syncer_thread()->NudgeSyncer(200, SyncerThread::kLocal); // 1/5 a second.
+ }
+}
+
+void SyncManager::SyncInternal::HandleCalculateChangesChangeEventFromSyncer(
+ const syncable::DirectoryChangeEvent& event) {
+ // We only expect one notification per sync step, so change_buffer_ should
+ // contain no pending entries.
+ DCHECK_EQ(event.todo, syncable::DirectoryChangeEvent::CALCULATE_CHANGES);
+ DCHECK_EQ(event.writer, syncable::SYNCER);
+ LOG_IF(WARNING, !change_buffer_.IsEmpty()) <<
+ "CALCULATE_CHANGES called with unapplied old changes.";
+
+ for (syncable::OriginalEntries::const_iterator i = event.originals->begin();
+ i != event.originals->end(); ++i) {
+ int64 id = i->ref(syncable::META_HANDLE);
+ syncable::Entry e(event.trans, syncable::GET_BY_HANDLE, id);
+ bool existed_before = !i->ref(syncable::IS_DEL);
+ bool exists_now = e.good() && !e.Get(syncable::IS_DEL);
+ DCHECK(e.good());
+
+ // Ignore root object, should it ever change.
+ if (e.IsRoot())
+ continue;
+ // Ignore non-bookmark objects.
+ if (!e.Get(syncable::IS_BOOKMARK_OBJECT))
+ continue;
+
+ if (exists_now && !existed_before)
+ change_buffer_.PushAddedItem(id);
+ else if (!exists_now && existed_before)
+ change_buffer_.PushDeletedItem(id);
+ else if (exists_now && existed_before && BookmarkPropertiesDiffer(*i, e))
+ change_buffer_.PushUpdatedItem(id, BookmarkPositionsDiffer(*i, e));
+ }
+}
+
+SyncManager::Status::Summary
+SyncManager::SyncInternal::ComputeAggregatedStatusSummary() {
+ switch (allstatus()->status().icon) {
+ case AllStatus::OFFLINE:
+ return Status::OFFLINE;
+ case AllStatus::OFFLINE_UNSYNCED:
+ return Status::OFFLINE_UNSYNCED;
+ case AllStatus::SYNCING:
+ return Status::SYNCING;
+ case AllStatus::READY:
+ return Status::READY;
+ case AllStatus::CONFLICT:
+ return Status::CONFLICT;
+ case AllStatus::OFFLINE_UNUSABLE:
+ return Status::OFFLINE_UNUSABLE;
+ default:
+ return Status::INVALID;
+ }
+}
+
+SyncManager::Status SyncManager::SyncInternal::ComputeAggregatedStatus() {
+ Status return_status =
+ { ComputeAggregatedStatusSummary(),
+ allstatus()->status().authenticated,
+ allstatus()->status().server_up,
+ allstatus()->status().server_reachable,
+ allstatus()->status().server_broken,
+ allstatus()->status().notifications_enabled,
+ allstatus()->status().notifications_received,
+ allstatus()->status().notifications_sent,
+ allstatus()->status().unsynced_count,
+ allstatus()->status().conflicting_count,
+ allstatus()->status().syncing,
+ allstatus()->status().initial_sync_ended,
+ allstatus()->status().syncer_stuck,
+ allstatus()->status().updates_available,
+ allstatus()->status().updates_received,
+ allstatus()->status().disk_full,
+ allstatus()->status().max_consecutive_errors};
+ return return_status;
+}
+
+void SyncManager::SyncInternal::HandleSyncerEvent(const SyncerEvent& event) {
+ if (!initialized()) {
+ // We get here if A) We have successfully authenticated at least once (
+ // because we attach HandleSyncerEvent only once we receive notification of
+ // successful authentication [locally or otherwise]), but B) the initial
+ // sync had not completed at that time.
+ if (SyncerStatus(event.last_session).IsShareUsable())
+ MarkAndNotifyInitializationComplete();
+ return;
+ }
+
+ if (!observer_)
+ return;
+
+ // Only send an event if this is due to a cycle ending and this cycle
+ // concludes a canonical "sync" process; that is, based on what is known
+ // locally we are "all happy" and up-to-date. There may be new changes on
+ // the server, but we'll get them on a subsequent sync.
+ //
+ // Notifications are sent at the end of every sync cycle, regardless of
+ // whether we should sync again.
+ if (event.what_happened == SyncerEvent::SYNC_CYCLE_ENDED) {
+ if (!event.last_session->ShouldSyncAgain()) {
+ observer_->OnSyncCycleCompleted();
+ }
+
+ // TODO(chron): Consider changing this back to track ShouldSyncAgain
+ // Only notify peers if a commit has occurred and change the bookmark model.
+ if (event.last_session && event.last_session->items_committed()) {
+ notification_pending_ = true;
+ }
+
+ // SyncCycles are started by the following events: creation of the syncer,
+ // (re)connection to buzz, local changes, peer notifications of updates.
+ // Peers will be notified of changes made while there is no buzz connection
+ // immediately after a connection has been re-established.
+ // the next sync cycle.
+ // TODO(brg): Move this to TalkMediatorImpl as a SyncerThread event hook.
+ if (notification_pending_ && talk_mediator()) {
+ LOG(INFO) << "Sending XMPP notification...";
+ bool success = talk_mediator()->SendNotification();
+ if (success) {
+ notification_pending_ = false;
+ }
+ } else {
+ LOG(INFO) << "Didn't send XMPP notification!"
+ << " event.last_session: " << event.last_session
+ << " event.last_session->items_committed(): "
+ << event.last_session->items_committed()
+ << " talk_mediator(): " << talk_mediator();
+ }
+ }
+}
+
+void SyncManager::SyncInternal::HandleAuthWatcherEvent(
+ const AuthWatcherEvent& event) {
+ // We don't care about an authentication attempt starting event, and we
+ // don't want to reset our state to AUTH_PROBLEM_NONE because the fact that
+ // an _attempt_ is starting doesn't change the fact that we have an auth
+ // problem.
+ if (event.what_happened == AuthWatcherEvent::AUTHENTICATION_ATTEMPT_START)
+ return;
+ // We clear our last auth problem cache on new auth watcher events, and only
+ // set it to indicate a problem state for certain AuthWatcherEvent types.
+ auth_problem_ = AUTH_PROBLEM_NONE;
+ switch (event.what_happened) {
+ case AuthWatcherEvent::AUTH_SUCCEEDED:
+ // We now know the supplied username and password were valid. If this
+ // wasn't the first sync, authenticated_name should already be assigned.
+ if (username_for_share().empty()) {
+ browser_sync::ToPathString s(event.user_email);
+ if (s.good())
+ share_.authenticated_name = s.get_string16();
+ }
+
+ DCHECK(LowerCaseEqualsASCII(browser_sync::ToUTF8(
+ username_for_share()).get_string(),
+ StringToLowerASCII(event.user_email).c_str()))
+ << "username_for_share= "
+ << browser_sync::ToUTF8(username_for_share())
+ << ", event.user_email= " << event.user_email;
+
+ if (observer_)
+ observer_->OnAuthProblem(AUTH_PROBLEM_NONE);
+
+ // Hook up the DirectoryChangeEvent listener, HandleChangeEvent.
+ {
+ syncable::ScopedDirLookup lookup(dir_manager(), username_for_share());
+ if (!lookup.good()) {
+ DCHECK(false) << "ScopedDirLookup creation failed; unable to hook "
+ << "up directory change event listener!";
+ return;
+ }
+ dir_change_hookup_.reset(NewEventListenerHookup(
+ lookup->changes_channel(), this,
+ &SyncInternal::HandleChangeEvent));
+
+ if (lookup->initial_sync_ended())
+ MarkAndNotifyInitializationComplete();
+ }
+ {
+ // Start watching the syncer channel directly here.
+ DCHECK(syncer_thread() != NULL);
+ syncer_event_.reset(NewEventListenerHookup(syncer_thread()->channel(),
+ this, &SyncInternal::HandleSyncerEvent));
+ }
+ return;
+ // Authentication failures translate to Status::AuthProblem events.
+ case AuthWatcherEvent::GAIA_AUTH_FAILED: // Invalid GAIA credentials.
+ case AuthWatcherEvent::SERVICE_AUTH_FAILED: // Expired GAIA credentials.
+ auth_problem_ = AUTH_PROBLEM_INVALID_GAIA_CREDENTIALS;
+ break;
+ case AuthWatcherEvent::SERVICE_USER_NOT_SIGNED_UP:
+ auth_problem_ = AUTH_PROBLEM_USER_NOT_SIGNED_UP;
+ break;
+ case AuthWatcherEvent::SERVICE_CONNECTION_FAILED:
+ auth_problem_ = AUTH_PROBLEM_CONNECTION_FAILED;
+ break;
+ default: // We don't care about the many other AuthWatcherEvent types.
+ return;
+ }
+
+ // Fire notification that the status changed due to an authentication error.
+ if (observer_)
+ observer_->OnAuthProblem(auth_problem_);
+}
+
+SyncManager::Status::Summary SyncManager::GetStatusSummary() const {
+ return data_->ComputeAggregatedStatusSummary();
+}
+
+SyncManager::Status SyncManager::GetDetailedStatus() const {
+ return data_->ComputeAggregatedStatus();
+}
+
+SyncManager::SyncInternal* SyncManager::GetImpl() const { return data_; }
+
+void SyncManager::SaveChanges() {
+ data_->SaveChanges();
+}
+
+void SyncManager::SyncInternal::SaveChanges() {
+ syncable::ScopedDirLookup lookup(dir_manager(), username_for_share());
+ if (!lookup.good()) {
+ DCHECK(false) << "ScopedDirLookup creation failed; Unable to SaveChanges";
+ return;
+ }
+ lookup->SaveChanges();
+}
+
+void SyncManager::SetupForTestMode(const sync_char16* test_username) {
+ DCHECK(data_) << "SetupForTestMode requires initialization";
+ data_->SetupForTestMode(test_username);
+}
+
+void SyncManager::SyncInternal::SetupForTestMode(
+ const sync_char16* test_username) {
+ String16ToPathString(test_username, &share_.authenticated_name);
+
+ if (!dir_manager()->Open(username_for_share()))
+ DCHECK(false) << "Could not open directory when running in test mode";
+
+ // Hook up the DirectoryChangeEvent listener, HandleChangeEvent.
+ {
+ syncable::ScopedDirLookup lookup(dir_manager(), username_for_share());
+ if (!lookup.good()) {
+ DCHECK(false) << "ScopedDirLookup creation failed; unable to hook "
+ << "up directory change event listener!";
+ return;
+ }
+ dir_change_hookup_.reset(NewEventListenerHookup(
+ lookup->changes_channel(), this,
+ &SyncInternal::HandleChangeEvent));
+ }
+ MarkAndNotifyInitializationComplete();
+}
+
+//////////////////////////////////////////////////////////////////////////
+// BaseTransaction member definitions
+BaseTransaction::BaseTransaction(UserShare* share)
+ : lookup_(NULL) {
+ DCHECK(share && share->dir_manager.get());
+ lookup_ = new syncable::ScopedDirLookup(share->dir_manager.get(),
+ share->authenticated_name);
+ if (!(lookup_->good()))
+ DCHECK(false) << "ScopedDirLookup failed on valid DirManager.";
+}
+BaseTransaction::~BaseTransaction() {
+ delete lookup_;
+}
+
+UserShare* SyncManager::GetUserShare() const {
+ DCHECK(data_->initialized()) << "GetUserShare requires initialization!";
+ return data_->GetUserShare();
+}
+
+} // namespace sync_api
diff --git a/chrome/browser/sync/engine/syncer.cc b/chrome/browser/sync/engine/syncer.cc
new file mode 100644
index 0000000..0b02e2e
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer.cc
@@ -0,0 +1,338 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#include "chrome/browser/sync/engine/syncer.h"
+
+#include "base/format_macros.h"
+#include "chrome/browser/sync/engine/apply_updates_command.h"
+#include "chrome/browser/sync/engine/build_and_process_conflict_sets_command.h"
+#include "chrome/browser/sync/engine/build_commit_command.h"
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/download_updates_command.h"
+#include "chrome/browser/sync/engine/get_commit_ids_command.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/post_commit_message_command.h"
+#include "chrome/browser/sync/engine/process_commit_response_command.h"
+#include "chrome/browser/sync/engine/process_updates_command.h"
+#include "chrome/browser/sync/engine/resolve_conflicts_command.h"
+#include "chrome/browser/sync/engine/syncer_end_command.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/engine/verify_updates_command.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable-inl.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+using sync_pb::ClientCommand;
+using syncable::Blob;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::SERVER_BOOKMARK_FAVICON;
+using syncable::SERVER_BOOKMARK_URL;
+using syncable::SERVER_CTIME;
+using syncable::SERVER_IS_BOOKMARK_OBJECT;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_IS_DIR;
+using syncable::SERVER_MTIME;
+using syncable::SERVER_NAME;
+using syncable::SERVER_NON_UNIQUE_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_VERSION;
+using syncable::SYNCER;
+using syncable::ScopedDirLookup;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+Syncer::Syncer(
+ syncable::DirectoryManager* dirman,
+ const PathString &account_name,
+ ServerConnectionManager* connection_manager,
+ ModelSafeWorker* model_safe_worker)
+ : account_name_(account_name),
+ early_exit_requested_(false),
+ max_commit_batch_size_(kDefaultMaxCommitBatchSize),
+ connection_manager_(connection_manager),
+ dirman_(dirman),
+ silenced_until_(0),
+ command_channel_(NULL),
+ model_safe_worker_(model_safe_worker),
+ updates_source_(sync_pb::GetUpdatesCallerInfo::UNKNOWN),
+ notifications_enabled_(false),
+ pre_conflict_resolution_function_(NULL) {
+ SyncerEvent shutdown = { SyncerEvent::SHUTDOWN_USE_WITH_CARE };
+ syncer_event_channel_.reset(new SyncerEventChannel(shutdown));
+ shutdown_channel_.reset(new ShutdownChannel(this));
+
+ ScopedDirLookup dir(dirman_, account_name_);
+ // The directory must be good here.
+ CHECK(dir.good());
+}
+
+Syncer::~Syncer() {}
+
+void Syncer::RequestNudge(int milliseconds) {
+ SyncerEvent event;
+ event.what_happened = SyncerEvent::REQUEST_SYNC_NUDGE;
+ event.nudge_delay_milliseconds = milliseconds;
+ channel()->NotifyListeners(event);
+}
+
+bool Syncer::SyncShare() {
+ SyncProcessState state(dirman_, account_name_, connection_manager_,
+ &resolver_, syncer_event_channel_.get(),
+ model_safe_worker());
+ return SyncShare(&state);
+}
+
+bool Syncer::SyncShare(SyncProcessState *process_state) {
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, process_state);
+ session.set_source(TestAndSetUpdatesSource());
+ session.set_notifications_enabled(notifications_enabled());
+ SyncShare(&session, SYNCER_BEGIN, SYNCER_END);
+ return session.ShouldSyncAgain();
+}
+
+bool Syncer::SyncShare(SyncerStep first_step, SyncerStep last_step) {
+ SyncCycleState cycle_state;
+ SyncProcessState state(dirman_, account_name_, connection_manager_,
+ &resolver_, syncer_event_channel_.get(),
+ model_safe_worker());
+ SyncerSession session(&cycle_state, &state);
+ SyncShare(&session, first_step, last_step);
+ return session.ShouldSyncAgain();
+}
+
+void Syncer::SyncShare(SyncerSession *session) {
+ SyncShare(session, SYNCER_BEGIN, SYNCER_END);
+}
+
+void Syncer::SyncShare(SyncerSession *session,
+ const SyncerStep first_step,
+ const SyncerStep last_step) {
+ SyncerStep current_step = first_step;
+
+ SyncerStep next_step;
+ while (!ExitRequested()) {
+ switch (current_step) {
+ case SYNCER_BEGIN:
+ LOG(INFO) << "Syncer Begin";
+ next_step = DOWNLOAD_UPDATES;
+ break;
+ case DOWNLOAD_UPDATES: {
+ LOG(INFO) << "Downloading Updates";
+ DownloadUpdatesCommand download_updates;
+ download_updates.Execute(session);
+ next_step = PROCESS_CLIENT_COMMAND;
+ break;
+ }
+ case PROCESS_CLIENT_COMMAND: {
+ LOG(INFO) << "Processing Client Command";
+ ProcessClientCommand(session);
+ next_step = VERIFY_UPDATES;
+ break;
+ }
+ case VERIFY_UPDATES: {
+ LOG(INFO) << "Verifying Updates";
+ VerifyUpdatesCommand verify_updates;
+ verify_updates.Execute(session);
+ next_step = PROCESS_UPDATES;
+ break;
+ }
+ case PROCESS_UPDATES: {
+ LOG(INFO) << "Processing Updates";
+ ProcessUpdatesCommand process_updates;
+ process_updates.Execute(session);
+ // We should download all of the updates before attempting to process
+ // them.
+ if (session->CountUpdates() == 0) {
+ next_step = APPLY_UPDATES;
+ } else {
+ next_step = DOWNLOAD_UPDATES;
+ }
+ break;
+ }
+ case APPLY_UPDATES: {
+ LOG(INFO) << "Applying Updates";
+ ApplyUpdatesCommand apply_updates;
+ apply_updates.Execute(session);
+ next_step = BUILD_COMMIT_REQUEST;
+ break;
+ }
+ // These two steps are combined since they are executed within the same
+ // write transaction.
+ case BUILD_COMMIT_REQUEST: {
+ SyncerStatus status(session);
+ status.set_syncing(true);
+
+ LOG(INFO) << "Processing Commit Request";
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+ SyncerSession::ScopedSetWriteTransaction set_trans(session, &trans);
+
+ LOG(INFO) << "Getting the Commit IDs";
+ GetCommitIdsCommand get_commit_ids_command(max_commit_batch_size_);
+ get_commit_ids_command.Execute(session);
+
+ if (!session->commit_ids().empty()) {
+ LOG(INFO) << "Building a commit message";
+ BuildCommitCommand build_commit_command;
+ build_commit_command.Execute(session);
+
+ next_step = POST_COMMIT_MESSAGE;
+ } else {
+ next_step = BUILD_AND_PROCESS_CONFLICT_SETS;
+ }
+
+ break;
+ }
+ case POST_COMMIT_MESSAGE: {
+ LOG(INFO) << "Posting a commit request";
+ PostCommitMessageCommand post_commit_command;
+ post_commit_command.Execute(session);
+ next_step = PROCESS_COMMIT_RESPONSE;
+ break;
+ }
+ case PROCESS_COMMIT_RESPONSE: {
+ LOG(INFO) << "Processing the commit response";
+ ProcessCommitResponseCommand process_response_command;
+ process_response_command.Execute(session);
+ next_step = BUILD_AND_PROCESS_CONFLICT_SETS;
+ break;
+ }
+ case BUILD_AND_PROCESS_CONFLICT_SETS: {
+ LOG(INFO) << "Building and Processing Conflict Sets";
+ BuildAndProcessConflictSetsCommand build_process_conflict_sets;
+ build_process_conflict_sets.Execute(session);
+ if (session->conflict_sets_built())
+ next_step = SYNCER_END;
+ else
+ next_step = RESOLVE_CONFLICTS;
+ break;
+ }
+ case RESOLVE_CONFLICTS: {
+ LOG(INFO) << "Resolving Conflicts";
+
+ // Trigger the pre_conflict_resolution_function_, which is a testing
+ // hook for the unit tests, if it is non-NULL.
+ if (pre_conflict_resolution_function_) {
+ ScopedDirLookup dir(dirman_, account_name_);
+ if (!dir.good()) {
+ LOG(ERROR) << "Bad dir lookup in syncer loop";
+ return;
+ }
+ pre_conflict_resolution_function_(dir);
+ }
+
+ ResolveConflictsCommand resolve_conflicts_command;
+ resolve_conflicts_command.Execute(session);
+ if (session->HasConflictingUpdates())
+ next_step = APPLY_UPDATES_TO_RESOLVE_CONFLICTS;
+ else
+ next_step = SYNCER_END;
+ break;
+ }
+ case APPLY_UPDATES_TO_RESOLVE_CONFLICTS: {
+ LOG(INFO) << "Applying updates to resolve conflicts";
+ ApplyUpdatesCommand apply_updates;
+ int num_conflicting_updates = session->conflicting_update_count();
+ apply_updates.Execute(session);
+ int post_facto_conflicting_updates =
+ session->conflicting_update_count();
+ session->set_conflicts_resolved(session->conflicts_resolved() ||
+ num_conflicting_updates > post_facto_conflicting_updates);
+ if (session->conflicts_resolved())
+ next_step = RESOLVE_CONFLICTS;
+ else
+ next_step = SYNCER_END;
+ break;
+ }
+ case SYNCER_END: {
+ LOG(INFO) << "Syncer End";
+ SyncerEndCommand syncer_end_command;
+ // This will set "syncing" to false, and send out a notification.
+ syncer_end_command.Execute(session);
+ goto post_while;
+ }
+ default:
+ LOG(ERROR) << "Unknown command: " << current_step;
+ }
+ if (last_step == current_step)
+ break;
+ current_step = next_step;
+ }
+ post_while:
+ // Copy any lingering useful state out of the session.
+ silenced_until_ = session->silenced_until();
+ return;
+}
+
+void Syncer::ProcessClientCommand(SyncerSession *session) {
+ if (!session->update_response().has_client_command())
+ return;
+ const ClientCommand command = session->update_response().client_command();
+ if (command_channel_)
+ command_channel_->NotifyListeners(&command);
+
+ // The server limits the number of items a client can commit in one batch.
+ if (command.has_max_commit_batch_size())
+ max_commit_batch_size_ = command.max_commit_batch_size();
+}
+
+void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest) {
+ dest->Put(SERVER_NAME, src->Get(SERVER_NAME));
+ dest->Put(SERVER_NON_UNIQUE_NAME, src->Get(SERVER_NON_UNIQUE_NAME));
+ dest->Put(SERVER_PARENT_ID, src->Get(SERVER_PARENT_ID));
+ dest->Put(SERVER_MTIME, src->Get(SERVER_MTIME));
+ dest->Put(SERVER_CTIME, src->Get(SERVER_CTIME));
+ dest->Put(SERVER_VERSION, src->Get(SERVER_VERSION));
+ dest->Put(SERVER_IS_DIR, src->Get(SERVER_IS_DIR));
+ dest->Put(SERVER_IS_DEL, src->Get(SERVER_IS_DEL));
+ dest->Put(SERVER_IS_BOOKMARK_OBJECT, src->Get(SERVER_IS_BOOKMARK_OBJECT));
+ dest->Put(IS_UNAPPLIED_UPDATE, src->Get(IS_UNAPPLIED_UPDATE));
+ dest->Put(SERVER_BOOKMARK_URL, src->Get(SERVER_BOOKMARK_URL));
+ dest->Put(SERVER_BOOKMARK_FAVICON, src->Get(SERVER_BOOKMARK_FAVICON));
+ dest->Put(SERVER_POSITION_IN_PARENT, src->Get(SERVER_POSITION_IN_PARENT));
+}
+
+void ClearServerData(syncable::MutableEntry* entry) {
+ entry->Put(SERVER_NAME, PSTR(""));
+ entry->Put(SERVER_NON_UNIQUE_NAME, PSTR(""));
+ entry->Put(SERVER_PARENT_ID, syncable::kNullId);
+ entry->Put(SERVER_MTIME, 0);
+ entry->Put(SERVER_CTIME, 0);
+ entry->Put(SERVER_VERSION, 0);
+ entry->Put(SERVER_IS_DIR, false);
+ entry->Put(SERVER_IS_DEL, false);
+ entry->Put(SERVER_IS_BOOKMARK_OBJECT, false);
+ entry->Put(IS_UNAPPLIED_UPDATE, false);
+ entry->Put(SERVER_BOOKMARK_URL, PSTR(""));
+ entry->Put(SERVER_BOOKMARK_FAVICON, Blob());
+ entry->Put(SERVER_POSITION_IN_PARENT, 0);
+}
+
+std::string SyncEntityDebugString(const sync_pb::SyncEntity& entry) {
+ return StringPrintf("id: %s, parent_id: %s, "
+ "version: %"PRId64"d, "
+ "mtime: %" PRId64"d (client: %" PRId64"d), "
+ "ctime: %" PRId64"d (client: %" PRId64"d), "
+ "name: %s, sync_timestamp: %" PRId64"d, "
+ "%s ",
+ entry.id_string().c_str(),
+ entry.parent_id_string().c_str(),
+ entry.version(),
+ entry.mtime(), ServerTimeToClientTime(entry.mtime()),
+ entry.ctime(), ServerTimeToClientTime(entry.ctime()),
+ entry.name().c_str(), entry.sync_timestamp(),
+ entry.deleted() ? "deleted, ":"");
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer.h b/chrome/browser/sync/engine/syncer.h
new file mode 100644
index 0000000..f546f20
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer.h
@@ -0,0 +1,234 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/client_command_channel.h"
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_event.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace syncable {
+class Directory;
+class DirectoryManager;
+class Entry;
+class Id;
+class MutableEntry;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class ModelSafeWorker;
+class ServerConnectionManager;
+class SyncProcessState;
+class SyncerSession;
+class URLFactory;
+struct HttpResponse;
+
+static const int kDefaultMaxCommitBatchSize = 25;
+
+enum SyncerStep {
+ SYNCER_BEGIN,
+ DOWNLOAD_UPDATES,
+ PROCESS_CLIENT_COMMAND,
+ VERIFY_UPDATES,
+ PROCESS_UPDATES,
+ APPLY_UPDATES,
+ BUILD_COMMIT_REQUEST,
+ POST_COMMIT_MESSAGE,
+ PROCESS_COMMIT_RESPONSE,
+ BUILD_AND_PROCESS_CONFLICT_SETS,
+ RESOLVE_CONFLICTS,
+ APPLY_UPDATES_TO_RESOLVE_CONFLICTS,
+ SYNCER_END
+};
+
+// A Syncer provides a control interface for driving the individual steps
+// of the sync cycle. Each cycle (hopefully) moves the client into closer
+// synchronization with the server. The individual steps are modeled
+// as SyncerCommands, and the ordering of the steps is expressed using
+// the SyncerStep enum.
+//
+// A Syncer instance expects to run on a dedicated thread. Calls
+// to SyncShare() may take an unbounded amount of time, as SyncerCommands
+// may block on network i/o, on lock contention, or on tasks posted to
+// other threads.
+class Syncer {
+ public:
+ typedef std::vector<int64> UnsyncedMetaHandles;
+ typedef void (*TestCallbackFunction)(syncable::Directory* dir);
+
+ // The constructor may be called from a thread that is not the Syncer's
+ // dedicated thread, to allow some flexibility in the setup.
+ Syncer(
+ syncable::DirectoryManager* dirman,
+ const PathString &account_name,
+ ServerConnectionManager* connection_manager,
+ ModelSafeWorker* model_safe_worker);
+
+ ~Syncer();
+
+ // Called by other threads to tell the syncer to stop what it's doing
+ // and return early from SyncShare, if possible.
+ bool ExitRequested() { return early_exit_requested_; }
+ void RequestEarlyExit() { early_exit_requested_ = true; }
+
+ // SyncShare(...) variants cause one sync cycle to occur. The return value
+ // indicates whether we should sync again.
+ // The zero-argument version of SyncShare is provided for unit tests.
+ // When |sync_process_state| is provided, it is used as the syncer state
+ // for the sync cycle. It is treated as an input/output parameter.
+ // When |first_step| and |last_step| are provided, this means to perform
+ // a partial sync cycle, stopping after |last_step| is performed.
+ bool SyncShare();
+ bool SyncShare(SyncProcessState *sync_process_state);
+ bool SyncShare(SyncerStep first_step, SyncerStep last_step);
+
+ // Limit the batch size of commit operations to a specified number of items.
+ void set_max_commit_batch_size(int x) { max_commit_batch_size_ = x; }
+
+ ConflictResolver* conflict_resolver() { return &resolver_; }
+
+ PathString account_name() { return account_name_; }
+
+ SyncerEventChannel* channel() const { return syncer_event_channel_.get(); }
+
+ ShutdownChannel* shutdown_channel() const { return shutdown_channel_.get(); }
+
+ ModelSafeWorker* model_safe_worker() { return model_safe_worker_; }
+
+ // Syncer will take ownership of this channel and it will be destroyed along
+ // with the Syncer instance.
+ void set_shutdown_channel(ShutdownChannel* channel) {
+ shutdown_channel_.reset(channel);
+ }
+
+ void set_command_channel(ClientCommandChannel* channel) {
+ command_channel_ = channel;
+ }
+
+ // Volatile reader for the source member of the syncer session object. The
+ // value is set to the SYNC_CYCLE_CONTINUATION value to signal that it has
+ // been read.
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE TestAndSetUpdatesSource() {
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE old_source =
+ updates_source_;
+ set_updates_source(sync_pb::GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION);
+ return old_source;
+ }
+
+ void set_updates_source(
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE source) {
+ updates_source_ = source;
+ }
+
+ bool notifications_enabled() const {
+ return notifications_enabled_;
+ }
+
+ void set_notifications_enabled(bool state) {
+ notifications_enabled_ = state;
+ }
+
+ private:
+ void RequestNudge(int milliseconds);
+
+ // Implements the PROCESS_CLIENT_COMMAND syncer step.
+ void ProcessClientCommand(SyncerSession *session);
+
+ void SyncShare(SyncerSession* session);
+ void SyncShare(SyncerSession* session,
+ SyncerStep first_step,
+ SyncerStep last_step);
+
+ PathString account_name_;
+ bool early_exit_requested_;
+
+ int32 max_commit_batch_size_;
+
+ ServerConnectionManager* connection_manager_;
+
+ ConflictResolver resolver_;
+ syncable::DirectoryManager* const dirman_;
+
+ // When we're over bandwidth quota, we don't update until past this time.
+ time_t silenced_until_;
+
+ scoped_ptr<SyncerEventChannel> syncer_event_channel_;
+ scoped_ptr<ShutdownChannel> shutdown_channel_;
+ ClientCommandChannel* command_channel_;
+
+ // A worker capable of processing work closures on a thread that is
+ // guaranteed to be safe for model modifications. This is created and owned
+ // by the SyncerThread that created us.
+ ModelSafeWorker* model_safe_worker_;
+
+ // The source of the last nudge.
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE updates_source_;
+
+ // True only if the notification channel is authorized and open.
+ bool notifications_enabled_;
+
+ // A callback hook used in unittests to simulate changes between conflict set
+ // building and conflict resolution.
+ TestCallbackFunction pre_conflict_resolution_function_;
+
+ FRIEND_TEST(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted3);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingAndNewParent);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingAndNewParentAndChild);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingCounterexample);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingWithNesting);
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingWithNewItems);
+ FRIEND_TEST(SyncerTest, TestGetUnsyncedAndSimpleCommit);
+
+ DISALLOW_COPY_AND_ASSIGN(Syncer);
+};
+
+// Inline utility functions.
+
+// Given iterator ranges from two collections sorted according to a
+// common strict weak ordering, return true if the two ranges contain
+// any common items, and false if they do not.
+// This function is in this header so that it can be tested.
+template <class Iterator1, class Iterator2>
+bool SortedCollectionsIntersect(Iterator1 begin1, Iterator1 end1,
+ Iterator2 begin2, Iterator2 end2) {
+ Iterator1 i1 = begin1;
+ Iterator2 i2 = begin2;
+ while (i1 != end1 && i2 != end2) {
+ if (*i1 == *i2)
+ return true;
+ if (*i1 > *i2)
+ ++i2;
+ else
+ ++i1;
+ }
+ return false;
+}
+// Utility function declarations.
+void SplitServerInformationIntoNewEntry(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest);
+void ClearServerData(syncable::MutableEntry* entry);
+
+// Get update contents as a string. Intended for logging, and intended
+// to have a smaller footprint than the protobuf's built-in pretty printer.
+std::string SyncEntityDebugString(const sync_pb::SyncEntity& entry);
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_H_
diff --git a/chrome/browser/sync/engine/syncer_command.cc b/chrome/browser/sync/engine/syncer_command.cc
new file mode 100644
index 0000000..9f05b64
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_command.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+SyncerCommand::SyncerCommand() {}
+SyncerCommand::~SyncerCommand() {}
+
+void SyncerCommand::Execute(SyncerSession *session) {
+ ExecuteImpl(session);
+ SendNotifications(session);
+}
+
+void SyncerCommand::SendNotifications(SyncerSession *session) {
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+
+ SyncerStatus status(session);
+
+ if (status.IsDirty()) {
+ SyncerEvent event = { SyncerEvent::STATUS_CHANGED};
+ event.last_session = session;
+ session->syncer_event_channel()->NotifyListeners(event);
+ if (status.over_quota()) {
+ SyncerEvent quota_event = {SyncerEvent::OVER_QUOTA};
+ quota_event.last_session = session;
+ session->syncer_event_channel()->NotifyListeners(quota_event);
+ }
+ status.SetClean();
+ }
+ if (status.IsAuthDirty()) {
+ ServerConnectionEvent event;
+ event.what_happened = ServerConnectionEvent::STATUS_CHANGED;
+ event.server_reachable = true;
+ event.connection_code = HttpResponse::SYNC_AUTH_ERROR;
+ session->connection_manager()->channel()->NotifyListeners(event);
+ status.SetAuthClean();
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_command.h b/chrome/browser/sync/engine/syncer_command.h
new file mode 100644
index 0000000..3fcff7d
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_command.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_COMMAND_H_
+
+#include "base/basictypes.h"
+
+namespace browser_sync {
+
+class SyncerSession;
+
+// Implementation of a simple command pattern intended to be driven by the
+// Syncer. SyncerCommand is abstract and all subclasses must
+// implement ExecuteImpl(). This is done so that chunks of syncer operation
+// can be unit tested.
+//
+// Example Usage:
+//
+// SyncerSession session = ...;
+// SyncerCommand *cmd = SomeCommandFactory.createCommand(...);
+// cmd->Execute(session);
+// delete cmd;
+//
+
+class SyncerCommand {
+ public:
+ SyncerCommand();
+ virtual ~SyncerCommand();
+
+ // Execute dispatches to a derived class's ExecuteImpl.
+ void Execute(SyncerSession *session);
+
+ // ExecuteImpl is where derived classes actually do work.
+ virtual void ExecuteImpl(SyncerSession *session) = 0;
+ private:
+ void SendNotifications(SyncerSession *session);
+ DISALLOW_COPY_AND_ASSIGN(SyncerCommand);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_COMMAND_H_
diff --git a/chrome/browser/sync/engine/syncer_end_command.cc b/chrome/browser/sync/engine/syncer_end_command.cc
new file mode 100644
index 0000000..f25cec8
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_end_command.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#include "chrome/browser/sync/engine/syncer_end_command.h"
+
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+
+namespace browser_sync {
+
+SyncerEndCommand::SyncerEndCommand() {}
+SyncerEndCommand::~SyncerEndCommand() {}
+
+void SyncerEndCommand::ExecuteImpl(SyncerSession* session) {
+ ConflictResolutionView conflict_view(session);
+ conflict_view.increment_num_sync_cycles();
+ SyncerStatus status(session);
+ status.set_syncing(false);
+
+ if (!session->ShouldSyncAgain()) {
+ // This might be the first time we've fully completed a sync cycle.
+ DCHECK(session->got_zero_updates());
+
+ syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+
+ // This gets persisted to the directory's backing store.
+ dir->set_initial_sync_ended(true);
+ }
+
+ SyncerEvent event = { SyncerEvent::SYNC_CYCLE_ENDED };
+ event.last_session = session;
+ session->syncer_event_channel()->NotifyListeners(event);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_end_command.h b/chrome/browser/sync/engine/syncer_end_command.h
new file mode 100644
index 0000000..904bac4
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_end_command.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_END_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_END_COMMAND_H_
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncer_command.h"
+
+namespace browser_sync {
+
+class SyncerSession;
+
+// A syncer command for wrapping up a sync cycle.
+//
+// Preconditions - syncing is complete
+//
+// Postconditions - The UI has been told that we're done syncing
+
+class SyncerEndCommand : public SyncerCommand {
+ public:
+ SyncerEndCommand();
+ virtual ~SyncerEndCommand();
+
+ virtual void ExecuteImpl(SyncerSession* session);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SyncerEndCommand);
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_END_COMMAND_H_
diff --git a/chrome/browser/sync/engine/syncer_proto_util.cc b/chrome/browser/sync/engine/syncer_proto_util.cc
new file mode 100644
index 0000000..38ee50d
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_proto_util.cc
@@ -0,0 +1,276 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable-inl.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+
+using std::string;
+using std::stringstream;
+using syncable::BASE_VERSION;
+using syncable::CTIME;
+using syncable::ID;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNSYNCED;
+using syncable::MTIME;
+using syncable::PARENT_ID;
+using syncable::ScopedDirLookup;
+using syncable::SyncName;
+
+namespace browser_sync {
+
+namespace {
+
+// Time to backoff syncing after receiving a throttled response.
+static const int kSyncDelayAfterThrottled = 2 * 60 * 60; // 2 hours
+
+// Verifies the store birthday, alerting/resetting as appropriate if there's a
+// mismatch.
+bool VerifyResponseBirthday(const ScopedDirLookup& dir,
+ const ClientToServerResponse* response) {
+ // Process store birthday.
+ if (!response->has_store_birthday())
+ return true;
+ string birthday = dir->store_birthday();
+ if (response->store_birthday() == birthday)
+ return true;
+ LOG(INFO) << "New store birthday: " << response->store_birthday();
+ if (!birthday.empty()) {
+ LOG(ERROR) << "Birthday changed, showing syncer stuck";
+ return false;
+ }
+ dir->set_store_birthday(response->store_birthday());
+ return true;
+}
+
+void LogResponseProfilingData(const ClientToServerResponse& response) {
+ if (response.has_profiling_data()) {
+ stringstream response_trace;
+ response_trace << "Server response trace:";
+
+ if (response.profiling_data().has_user_lookup_time()) {
+ response_trace << " " << "user lookup: " <<
+ response.profiling_data().user_lookup_time() << "ms";
+ }
+
+ if (response.profiling_data().has_meta_data_write_time()) {
+ response_trace << " " << "meta write: " <<
+ response.profiling_data().meta_data_write_time() << "ms";
+ }
+
+ if (response.profiling_data().has_meta_data_read_time()) {
+ response_trace << " " << "meta read: " <<
+ response.profiling_data().meta_data_read_time() << "ms";
+ }
+
+ if (response.profiling_data().has_file_data_write_time()) {
+ response_trace << " " << "file write: " <<
+ response.profiling_data().file_data_write_time() << "ms";
+ }
+
+ if (response.profiling_data().has_file_data_read_time()) {
+ response_trace << " " << "file read: " <<
+ response.profiling_data().file_data_read_time() << "ms";
+ }
+
+ if (response.profiling_data().has_total_request_time()) {
+ response_trace << " " << "total time: " <<
+ response.profiling_data().total_request_time() << "ms";
+ }
+ LOG(INFO) << response_trace.str();
+ }
+}
+
+} // namespace
+
+// static
+bool SyncerProtoUtil::PostClientToServerMessage(
+ ClientToServerMessage* msg,
+ ClientToServerResponse* response,
+ SyncerSession *session) {
+
+ bool rv = false;
+ string tx, rx;
+ CHECK(response);
+
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good())
+ return false;
+ string birthday = dir->store_birthday();
+ if (!birthday.empty()) {
+ msg->set_store_birthday(birthday);
+ } else {
+ LOG(INFO) << "no birthday set";
+ }
+
+ msg->SerializeToString(&tx);
+ HttpResponse http_response;
+ ServerConnectionManager::PostBufferParams params = {
+ tx, &rx, &http_response
+ };
+
+ if (!session->connection_manager()->PostBufferWithCachedAuth(&params)) {
+ LOG(WARNING) << "Error posting from syncer:" << http_response;
+ } else {
+ rv = response->ParseFromString(rx);
+ }
+ SyncerStatus status(session);
+ if (rv) {
+ if (!VerifyResponseBirthday(dir, response)) {
+ // TODO(ncarter): Add a unit test for the case where the syncer
+ // becomes stuck due to a bad birthday.
+ status.set_syncer_stuck(true);
+ return false;
+ }
+
+ // We use an exponential moving average to determine the rate of errors.
+ // It's more reactive to recent situations and uses no extra storage.
+ status.ForgetOldError();
+ // If we're decaying send out an update.
+ status.CheckErrorRateTooHigh();
+
+ switch (response->error_code()) {
+ case ClientToServerResponse::SUCCESS:
+ if (!response->has_store_birthday() && birthday.empty()) {
+ LOG(ERROR) <<
+ "Server didn't provide birthday in proto buffer response.";
+ rv = false;
+ }
+ LogResponseProfilingData(*response);
+ break;
+ case ClientToServerResponse::USER_NOT_ACTIVATED:
+ case ClientToServerResponse::AUTH_INVALID:
+ case ClientToServerResponse::ACCESS_DENIED:
+ LOG(INFO) << "Authentication expired, re-requesting";
+ LOG(INFO) << "Not implemented in syncer yet!!!";
+ status.AuthFailed();
+ rv = false;
+ break;
+ case ClientToServerResponse::NOT_MY_BIRTHDAY:
+ LOG(WARNING) << "Not my birthday return.";
+ rv = false;
+ break;
+ case ClientToServerResponse::THROTTLED:
+ LOG(WARNING) << "Client silenced by server.";
+ session->set_silenced_until(time(0) + kSyncDelayAfterThrottled);
+ rv = false;
+ break;
+ }
+
+ } else if (session->connection_manager()->IsServerReachable()) {
+ status.TallyNewError();
+ }
+ return rv;
+}
+
+// static
+bool SyncerProtoUtil::Compare(const syncable::Entry& local_entry,
+ const SyncEntity& server_entry) {
+ SyncName name = NameFromSyncEntity(server_entry);
+
+ CHECK(local_entry.Get(ID) == server_entry.id()) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+ CHECK(server_entry.version() == local_entry.Get(BASE_VERSION)) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+ CHECK(!local_entry.Get(IS_UNSYNCED)) <<
+ " SyncerProtoUtil::Compare precondition not met.";
+
+ if (local_entry.Get(IS_DEL) && server_entry.deleted())
+ return true;
+ if (!ClientAndServerTimeMatch(local_entry.Get(CTIME), server_entry.ctime())) {
+ LOG(WARNING) << "ctime mismatch";
+ return false;
+ }
+
+ // These checks are somewhat prolix, but they're easier to debug than
+ // a big boolean statement.
+ SyncName client_name = local_entry.GetName();
+ if (client_name != name) {
+ LOG(WARNING) << "Client name mismatch";
+ return false;
+ }
+ if (local_entry.Get(PARENT_ID) != server_entry.parent_id()) {
+ LOG(WARNING) << "Parent ID mismatch";
+ return false;
+ }
+ if (local_entry.Get(IS_DIR) != server_entry.IsFolder()) {
+ LOG(WARNING) << "Dir field mismatch";
+ return false;
+ }
+ if (local_entry.Get(IS_DEL) != server_entry.deleted()) {
+ LOG(WARNING) << "Deletion mismatch";
+ return false;
+ }
+ if (!local_entry.Get(IS_DIR) &&
+ !ClientAndServerTimeMatch(local_entry.Get(MTIME),
+ server_entry.mtime())) {
+ LOG(WARNING) << "mtime mismatch";
+ return false;
+ }
+
+ return true;
+}
+
+// static
+void SyncerProtoUtil::CopyProtoBytesIntoBlob(const std::string& proto_bytes,
+ syncable::Blob* blob) {
+ syncable::Blob proto_blob(proto_bytes.begin(), proto_bytes.end());
+ blob->swap(proto_blob);
+}
+
+// static
+bool SyncerProtoUtil::ProtoBytesEqualsBlob(const std::string& proto_bytes,
+ const syncable::Blob& blob) {
+ if (proto_bytes.size() != blob.size())
+ return false;
+ return std::equal(proto_bytes.begin(), proto_bytes.end(), blob.begin());
+}
+
+// static
+void SyncerProtoUtil::CopyBlobIntoProtoBytes(const syncable::Blob& blob,
+ std::string* proto_bytes) {
+ std::string blob_string(blob.begin(), blob.end());
+ proto_bytes->swap(blob_string);
+}
+
+// static
+syncable::SyncName SyncerProtoUtil::NameFromSyncEntity(
+ const SyncEntity &entry) {
+ SyncName result(PSTR(""));
+
+ AppendUTF8ToPathString(entry.name(), &result.value());
+ if (entry.has_non_unique_name()) {
+ AppendUTF8ToPathString(entry.non_unique_name(),
+ &result.non_unique_value());
+ } else {
+ result.non_unique_value() = result.value();
+ }
+ return result;
+}
+
+// static
+syncable::SyncName SyncerProtoUtil::NameFromCommitEntryResponse(
+ const CommitResponse_EntryResponse& entry) {
+ SyncName result(PSTR(""));
+
+ AppendUTF8ToPathString(entry.name(), &result.value());
+ if (entry.has_non_unique_name()) {
+ AppendUTF8ToPathString(entry.non_unique_name(),
+ &result.non_unique_value());
+ } else {
+ result.non_unique_value() = result.value();
+ }
+ return result;
+}
+
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_proto_util.h b/chrome/browser/sync/engine/syncer_proto_util.h
new file mode 100644
index 0000000..ecee903
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_proto_util.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
+
+#include <string>
+
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/util/sync_types.h"
+#include "chrome/browser/sync/syncable/blob.h"
+
+namespace syncable {
+class Entry;
+class ScopedDirLookup;
+class SyncName;
+} // namespace syncable
+
+namespace sync_pb {
+class ClientToServerResponse;
+} // namespace sync_pb
+
+namespace browser_sync {
+
+class ClientToServerMessage;
+class SyncerSession;
+class SyncEntity;
+class CommitResponse_EntryResponse;
+
+class SyncerProtoUtil {
+ public:
+ // Posts the given message and fills the buffer with the returned value.
+ // Returns true on success. Also handles store birthday verification:
+ // session->status()->syncer_stuck_ is set true if the birthday is
+ // incorrect. A false value will always be returned if birthday is bad.
+ static bool PostClientToServerMessage(ClientToServerMessage* msg,
+ sync_pb::ClientToServerResponse* response, SyncerSession *session);
+
+ // Compares a syncable Entry to SyncEntity, returns true iff
+ // the data is identical.
+ //
+ // TODO(sync): The places where this function is used are arguable big
+ // causes of the fragility, because there's a tendency to freak out
+ // the moment the local and server values diverge. However, this almost
+ // always indicates a sync bug somewhere earlier in the sync cycle.
+ static bool Compare(const syncable::Entry& local_entry,
+ const SyncEntity& server_entry);
+
+ // Utility methods for converting between syncable::Blobs and protobuf
+ // byte fields.
+ static void CopyProtoBytesIntoBlob(const std::string& proto_bytes,
+ syncable::Blob* blob);
+ static bool ProtoBytesEqualsBlob(const std::string& proto_bytes,
+ const syncable::Blob& blob);
+ static void CopyBlobIntoProtoBytes(const syncable::Blob& blob,
+ std::string* proto_bytes);
+
+ // Extract the name fields from a sync entity.
+ static syncable::SyncName NameFromSyncEntity(
+ const SyncEntity& entry);
+
+ // Extract the name fields from a commit entry response.
+ static syncable::SyncName NameFromCommitEntryResponse(
+ const CommitResponse_EntryResponse& entry);
+
+ private:
+ SyncerProtoUtil() {}
+ DISALLOW_COPY_AND_ASSIGN(SyncerProtoUtil);
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
diff --git a/chrome/browser/sync/engine/syncer_proto_util_unittest.cc b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
new file mode 100644
index 0000000..c11a4ca
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
@@ -0,0 +1,119 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+
+#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/blob.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using syncable::Blob;
+using syncable::SyncName;
+
+namespace browser_sync {
+
+TEST(SyncerProtoUtil, TestBlobToProtocolBufferBytesUtilityFunctions) {
+ unsigned char test_data1[] = {1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 4, 2, 9};
+ unsigned char test_data2[] = {1, 99, 3, 4, 5, 6, 7, 8, 0, 1, 4, 2, 9};
+ unsigned char test_data3[] = {99, 2, 3, 4, 5, 6, 7, 8};
+
+ syncable::Blob test_blob1, test_blob2, test_blob3;
+ for (int i = 0; i < arraysize(test_data1); ++i)
+ test_blob1.push_back(test_data1[i]);
+ for (int i = 0; i < arraysize(test_data2); ++i)
+ test_blob2.push_back(test_data2[i]);
+ for (int i = 0; i < arraysize(test_data3); ++i)
+ test_blob3.push_back(test_data3[i]);
+
+ string test_message1(reinterpret_cast<char*>(test_data1),
+ arraysize(test_data1));
+ string test_message2(reinterpret_cast<char*>(test_data2),
+ arraysize(test_data2));
+ string test_message3(reinterpret_cast<char*>(test_data3),
+ arraysize(test_data3));
+
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob1));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob2));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ test_blob3));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob1));
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob2));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message2,
+ test_blob3));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob1));
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob2));
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message3,
+ test_blob3));
+
+ Blob blob1_copy;
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ blob1_copy));
+ SyncerProtoUtil::CopyProtoBytesIntoBlob(test_message1, &blob1_copy);
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(test_message1,
+ blob1_copy));
+
+ std::string message2_copy;
+ EXPECT_FALSE(SyncerProtoUtil::ProtoBytesEqualsBlob(message2_copy,
+ test_blob2));
+ SyncerProtoUtil::CopyBlobIntoProtoBytes(test_blob2, &message2_copy);
+ EXPECT_TRUE(SyncerProtoUtil::ProtoBytesEqualsBlob(message2_copy,
+ test_blob2));
+}
+
+// Tests NameFromSyncEntity and NameFromCommitEntryResponse when only the
+// name field is provided.
+TEST(SyncerProtoUtil, NameExtractionOneName) {
+ SyncEntity one_name_entity;
+ CommitResponse_EntryResponse one_name_response;
+
+ PathString one_name_string(PSTR("Eggheadednesses"));
+ one_name_entity.set_name("Eggheadednesses");
+ one_name_response.set_name("Eggheadednesses");
+
+ SyncName name_a = SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
+ EXPECT_EQ(one_name_string, name_a.value());
+ EXPECT_EQ(one_name_string, name_a.non_unique_value());
+
+ SyncName name_b =
+ SyncerProtoUtil::NameFromCommitEntryResponse(one_name_response);
+ EXPECT_EQ(one_name_string, name_b.value());
+ EXPECT_EQ(one_name_string, name_b.non_unique_value());
+
+ EXPECT_TRUE(name_a == name_b);
+}
+
+// Tests NameFromSyncEntity and NameFromCommitEntryResponse when both the
+// name field and the non_unique_name fields are provided.
+TEST(SyncerProtoUtil, NameExtractionTwoNames) {
+ SyncEntity two_name_entity;
+ CommitResponse_EntryResponse two_name_response;
+
+ PathString two_name_string_unique(PSTR("Oxyphenbutazone"));
+ two_name_entity.set_name("Oxyphenbutazone");
+ two_name_response.set_name("Oxyphenbutazone");
+ PathString two_name_string(PSTR("Neuroanatomists"));
+ two_name_entity.set_non_unique_name("Neuroanatomists");
+ two_name_response.set_non_unique_name("Neuroanatomists");
+
+ SyncName name_a = SyncerProtoUtil::NameFromSyncEntity(two_name_entity);
+ EXPECT_EQ(two_name_string_unique, name_a.value());
+ EXPECT_EQ(two_name_string, name_a.non_unique_value());
+
+ SyncName name_b =
+ SyncerProtoUtil::NameFromCommitEntryResponse(two_name_response);
+ EXPECT_EQ(two_name_string_unique, name_b.value());
+ EXPECT_EQ(two_name_string, name_b.non_unique_value());
+
+ EXPECT_TRUE(name_a == name_b);
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_session.h b/chrome/browser/sync/engine/syncer_session.h
new file mode 100644
index 0000000..e90930f
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_session.h
@@ -0,0 +1,364 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// SyncerSession holds the entire state of a single sync cycle;
+// GetUpdates, Commit, and Conflict Resolution. After said cycle, the
+// Session may contain items that were unable to be processed because of
+// errors.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_SESSION_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_SESSION_H_
+
+#include <utility>
+#include <vector>
+
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/sync_cycle_state.h"
+#include "chrome/browser/sync/engine/sync_process_state.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+namespace browser_sync {
+
+class ConflictResolver;
+class ModelSafeWorker;
+class ServerConnectionManager;
+class SyncerStatus;
+struct SyncerEvent;
+
+class SyncerSession {
+ friend class ConflictResolutionView;
+ friend class SyncerStatus;
+ public:
+ // A utility to set the session's write transaction member,
+ // and later clear it when it the utility falls out of scope.
+ class ScopedSetWriteTransaction {
+ public:
+ ScopedSetWriteTransaction(SyncerSession* session,
+ syncable::WriteTransaction* trans)
+ : session_(session) {
+ session_->set_write_transaction(trans);
+ }
+ ~ScopedSetWriteTransaction() {
+ session_->ClearWriteTransaction();
+ }
+ private:
+ SyncerSession* session_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedSetWriteTransaction);
+ };
+
+ SyncerSession(SyncCycleState* cycle_state, SyncProcessState* process_state)
+ : sync_process_state_(process_state),
+ sync_cycle_state_(cycle_state),
+ source_(sync_pb::GetUpdatesCallerInfo::UNKNOWN),
+ notifications_enabled_(false) {
+ DCHECK(NULL != process_state);
+ DCHECK(NULL != cycle_state);
+ }
+
+ // Perhaps this should dictate the next step. (ie, don't do apply if you
+ // didn't get any from download). or put it in the while loop.
+ void set_update_response(const ClientToServerResponse& update_response) {
+ sync_cycle_state_->set_update_response(update_response);
+ }
+
+ const ClientToServerResponse& update_response() const {
+ return sync_cycle_state_->update_response();
+ }
+
+ void set_commit_response(const ClientToServerResponse& commit_response) {
+ sync_cycle_state_->set_commit_response(commit_response);
+ }
+
+ const ClientToServerResponse& commit_response() const {
+ return sync_cycle_state_->commit_response();
+ }
+
+ void AddVerifyResult(const VerifyResult& verify_result,
+ const sync_pb::SyncEntity& entity) {
+ sync_cycle_state_->AddVerifyResult(verify_result, entity);
+ }
+
+ bool HasVerifiedUpdates() const {
+ return sync_cycle_state_->HasVerifiedUpdates();
+ }
+
+ void AddAppliedUpdate(const UpdateAttemptResponse& response,
+ const syncable::Id& id) {
+ sync_cycle_state_->AddAppliedUpdate(response, id);
+ }
+
+ bool HasAppliedUpdates() const {
+ return sync_cycle_state_->HasAppliedUpdates();
+ }
+
+ PathString account_name() const {
+ return sync_process_state_->account_name();
+ }
+
+ syncable::DirectoryManager* dirman() const {
+ return sync_process_state_->dirman();
+ }
+
+ ServerConnectionManager* connection_manager() const {
+ return sync_process_state_->connection_manager();
+ }
+
+ ConflictResolver* resolver() const {
+ return sync_process_state_->resolver();
+ }
+
+ SyncerEventChannel* syncer_event_channel() const {
+ return sync_process_state_->syncer_event_channel();
+ }
+
+ int conflicting_update_count() const {
+ return sync_process_state_->conflicting_updates();
+ }
+
+ time_t silenced_until() const {
+ return sync_process_state_->silenced_until_;
+ }
+
+ void set_silenced_until(time_t silenced_until) const {
+ sync_process_state_->silenced_until_ = silenced_until;
+ }
+
+ const std::vector<int64>& unsynced_handles() const {
+ return sync_cycle_state_->unsynced_handles();
+ }
+
+ void set_unsynced_handles(const std::vector<int64>& unsynced_handles) {
+ sync_cycle_state_->set_unsynced_handles(unsynced_handles);
+ }
+
+ int64 unsynced_count() const { return sync_cycle_state_->unsynced_count(); }
+
+ const std::vector<syncable::Id>& commit_ids() const {
+ return sync_cycle_state_->commit_ids();
+ }
+
+ void set_commit_ids(const std::vector<syncable::Id>& commit_ids) {
+ sync_cycle_state_->set_commit_ids(commit_ids);
+ }
+
+ bool commit_ids_empty() const {
+ return sync_cycle_state_->commit_ids_empty();
+ }
+
+ syncable::WriteTransaction* write_transaction() const {
+ return sync_cycle_state_->write_transaction();
+ }
+
+ bool has_open_write_transaction() const {
+ return sync_cycle_state_->has_open_write_transaction();
+ }
+
+ ClientToServerMessage* commit_message() const {
+ return sync_cycle_state_->commit_message();
+ }
+
+ void set_commit_message(const ClientToServerMessage& message) {
+ sync_cycle_state_->set_commit_message(message);
+ }
+
+ bool HasRemainingItemsToCommit() const {
+ return commit_ids().size() < unsynced_handles().size();
+ }
+
+ void AddCommitConflict(const syncable::Id& the_id) {
+ sync_process_state_->AddConflictingItem(the_id);
+ }
+
+ void AddBlockedItem(const syncable::Id& the_id) {
+ sync_process_state_->AddBlockedItem(the_id);
+ }
+
+ void EraseCommitConflict(const syncable::Id& the_id) {
+ sync_process_state_->EraseConflictingItem(the_id);
+ }
+
+ void EraseBlockedItem(const syncable::Id& the_id) {
+ sync_process_state_->EraseBlockedItem(the_id);
+ }
+
+ // Returns true if at least one update application failed due to
+ // a conflict during this sync cycle.
+ bool HasConflictingUpdates() const {
+ std::vector<AppliedUpdate>::const_iterator it;
+ for (it = sync_cycle_state_->AppliedUpdatesBegin();
+ it < sync_cycle_state_->AppliedUpdatesEnd();
+ ++it) {
+ if (it->first == CONFLICT) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ std::vector<VerifiedUpdate>::iterator VerifiedUpdatesBegin() const {
+ return sync_cycle_state_->VerifiedUpdatesBegin();
+ }
+
+ std::vector<VerifiedUpdate>::iterator VerifiedUpdatesEnd() const {
+ return sync_cycle_state_->VerifiedUpdatesEnd();
+ }
+
+ // Returns the number of updates received from the sync server.
+ int64 CountUpdates() const {
+ if (update_response().has_get_updates()) {
+ return update_response().get_updates().entries().size();
+ } else {
+ return 0;
+ }
+ }
+
+ bool got_zero_updates() const {
+ return CountUpdates() == 0;
+ }
+
+ void DumpSessionInfo() const {
+ LOG(INFO) << "Dumping session info";
+ if (update_response().has_get_updates()) {
+ LOG(INFO) << update_response().get_updates().entries().size()
+ << " updates downloaded by last get_updates";
+ } else {
+ LOG(INFO) << "No update response found";
+ }
+ LOG(INFO) << sync_cycle_state_->VerifiedUpdatesSize()
+ << " updates verified";
+ LOG(INFO) << sync_cycle_state_->AppliedUpdatesSize() << " updates applied";
+ LOG(INFO) << count_blocked_updates() << " updates blocked by open entry";
+ LOG(INFO) << commit_ids().size() << " items to commit";
+ LOG(INFO) << unsynced_count() << " unsynced items";
+ }
+
+ int64 count_blocked_updates() const {
+ std::vector<AppliedUpdate>::const_iterator it;
+ int64 count = 0;
+ for (it = sync_cycle_state_->AppliedUpdatesBegin();
+ it < sync_cycle_state_->AppliedUpdatesEnd();
+ ++it) {
+ if (it->first == BLOCKED) {
+ ++count;
+ }
+ }
+ return count;
+ }
+
+ void set_conflict_sets_built(const bool b) {
+ sync_cycle_state_->set_conflict_sets_built(b);
+ }
+
+ bool conflict_sets_built() const {
+ return sync_cycle_state_->conflict_sets_built();
+ }
+
+ void set_conflicts_resolved(const bool b) {
+ sync_cycle_state_->set_conflicts_resolved(b);
+ }
+
+ bool conflicts_resolved() const {
+ return sync_cycle_state_->conflicts_resolved();
+ }
+
+ ModelSafeWorker* model_safe_worker() const {
+ return sync_process_state_->model_safe_worker();
+ }
+
+ void set_items_committed(const bool b) {
+ sync_cycle_state_->set_items_committed(b);
+ }
+
+ void set_item_committed() {
+ sync_cycle_state_->set_item_committed();
+ }
+
+ bool items_committed() const {
+ return sync_cycle_state_->items_committed();
+ }
+
+ void set_over_quota(const bool b) {
+ sync_cycle_state_->set_over_quota(b);
+ }
+
+ // Volitile reader for the source member of the syncer session object. The
+ // value is set to the SYNC_CYCLE_CONTINUATION value to signal that it has
+ // been read.
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE TestAndSetSource() {
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE old_source =
+ source_;
+ set_source(sync_pb::GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION);
+ return old_source;
+ }
+
+ void set_source(sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE source) {
+ source_ = source;
+ }
+
+ bool notifications_enabled() const {
+ return notifications_enabled_;
+ }
+
+ void set_notifications_enabled(const bool state) {
+ notifications_enabled_ = state;
+ }
+
+ void set_timestamp_dirty() {
+ sync_cycle_state_->set_timestamp_dirty();
+ }
+
+ bool timestamp_dirty() const {
+ return sync_cycle_state_->is_timestamp_dirty();
+ }
+
+ // TODO(chron): Unit test for this method.
+ // returns true iff this session contains data that should go through
+ // the sync engine again.
+ bool ShouldSyncAgain() const {
+ return (HasRemainingItemsToCommit() &&
+ sync_process_state_->successful_commits() > 0) ||
+ conflict_sets_built() ||
+ conflicts_resolved() ||
+ // Or, we have conflicting updates, but we're making progress on
+ // resolving them...
+ !got_zero_updates() ||
+ timestamp_dirty();
+ }
+
+ private:
+ // The write transaction must be destructed by the caller of this function.
+ // Here, we just clear the reference.
+ void set_write_transaction(syncable::WriteTransaction* write_transaction) {
+ sync_cycle_state_->set_write_transaction(write_transaction);
+ }
+
+ // sets the write transaction to null, but doesn't free the memory.
+ void ClearWriteTransaction() {
+ sync_cycle_state_->ClearWriteTransaction();
+ }
+
+ SyncProcessState* sync_process_state_;
+ SyncCycleState* sync_cycle_state_;
+
+ // The source for initiating this syncer session.
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE source_;
+
+ // True if notifications are enabled when this session was created.
+ bool notifications_enabled_;
+
+ FRIEND_TEST(SyncerTest, TestCommitListOrderingCounterexample);
+ DISALLOW_COPY_AND_ASSIGN(SyncerSession);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_SESSION_H_
diff --git a/chrome/browser/sync/engine/syncer_status.cc b/chrome/browser/sync/engine/syncer_status.cc
new file mode 100644
index 0000000..f356bcd
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_status.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_status.h"
+
+namespace browser_sync {
+SyncerStatus::SyncerStatus(SyncerSession* s) {
+ sync_process_state_ = s->sync_process_state_;
+ sync_cycle_state_ = s->sync_cycle_state_;
+}
+SyncerStatus::~SyncerStatus() {}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_status.h b/chrome/browser/sync/engine/syncer_status.h
new file mode 100644
index 0000000..4f70ecd
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_status.h
@@ -0,0 +1,255 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// TODO(sync): We eventually want to fundamentally change how we represent
+// status and inform the UI about the ways in which our status has changed.
+// Right now, we're just trying to keep the various command classes from
+// having to worry about this class.
+//
+// The UI will request that we fill this struct so it can show the current
+// sync state.
+//
+// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_STATUS_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_STATUS_H_
+
+#include "base/atomicops.h"
+#include "base/port.h"
+#include "chrome/browser/sync/engine/sync_cycle_state.h"
+#include "chrome/browser/sync/engine/sync_process_state.h"
+
+namespace browser_sync {
+class SyncerSession;
+
+class SyncerStatus {
+ public:
+ explicit SyncerStatus(SyncCycleState* cycle_state, SyncProcessState* state)
+ : sync_process_state_(state),
+ sync_cycle_state_(cycle_state){}
+ explicit SyncerStatus(SyncerSession* s);
+ ~SyncerStatus();
+
+ bool invalid_store() const {
+ return sync_process_state_->invalid_store();
+ }
+
+ void set_invalid_store(const bool val) {
+ sync_process_state_->set_invalid_store(val);
+ }
+
+ bool syncer_stuck() const {
+ return sync_process_state_->syncer_stuck();
+ }
+
+ void set_syncer_stuck(const bool val) {
+ sync_process_state_->set_syncer_stuck(val);
+ }
+
+ bool syncing() const {
+ return sync_process_state_->syncing();
+ }
+
+ void set_syncing(const bool val) {
+ sync_process_state_->set_syncing(val);
+ }
+
+ bool IsShareUsable() const {
+ return sync_process_state_->IsShareUsable();
+ }
+
+ // During initial sync these two members can be used to
+ // measure sync progress.
+ int64 current_sync_timestamp() const {
+ return sync_process_state_->current_sync_timestamp();
+ }
+
+ void set_current_sync_timestamp(const int64 val) {
+ sync_process_state_->set_current_sync_timestamp(val);
+ }
+
+ int64 servers_latest_timestamp() const {
+ return sync_process_state_->servers_latest_timestamp();
+ }
+
+ void set_servers_latest_timestamp(const int64 val) {
+ sync_process_state_->set_servers_latest_timestamp(val);
+ }
+
+ int64 unsynced_count() const {
+ return sync_cycle_state_->unsynced_count();
+ }
+
+ int conflicting_updates() const {
+ return sync_process_state_->conflicting_updates();
+ }
+
+ int conflicting_commits() const {
+ return sync_process_state_->conflicting_commits();
+ }
+
+ void set_conflicting_commits(const int val) {
+ sync_process_state_->set_conflicting_commits(val);
+ }
+
+ int BlockedItemsSize() const {
+ return sync_process_state_->BlockedItemsSize();
+ }
+
+ // derive from sync_process_state blocked_item_ids_
+ int stalled_updates() const {
+ return sync_process_state_->BlockedItemsSize();
+ }
+
+ // in sync_process_state
+ int error_commits() const {
+ return sync_process_state_->error_commits();
+ }
+
+ void set_error_commits(const int val) {
+ sync_process_state_->set_error_commits(val);
+ }
+
+ // WEIRD COUNTER manipulation functions
+ int consecutive_problem_get_updates() const {
+ return sync_process_state_->consecutive_problem_get_updates();
+ }
+
+ void increment_consecutive_problem_get_updates() {
+ sync_process_state_->increment_consecutive_problem_get_updates();
+ }
+
+ void zero_consecutive_problem_get_updates() {
+ sync_process_state_->zero_consecutive_problem_get_updates();
+ }
+
+ int consecutive_problem_commits() const {
+ return sync_process_state_->consecutive_problem_commits();
+ }
+
+ void increment_consecutive_problem_commits() {
+ sync_process_state_->increment_consecutive_problem_commits();
+ }
+
+ void zero_consecutive_problem_commits() {
+ sync_process_state_->zero_consecutive_problem_commits();
+ }
+
+ int consecutive_transient_error_commits() const {
+ return sync_process_state_->consecutive_transient_error_commits();
+ }
+
+ void increment_consecutive_transient_error_commits_by(int value) {
+ sync_process_state_->increment_consecutive_transient_error_commits_by(
+ value);
+ }
+
+ void zero_consecutive_transient_error_commits() {
+ sync_process_state_->zero_consecutive_transient_error_commits();
+ }
+
+ int consecutive_errors() const {
+ return sync_process_state_->consecutive_errors();
+ }
+
+ void increment_consecutive_errors() {
+ increment_consecutive_errors_by(1);
+ }
+
+ void increment_consecutive_errors_by(int value) {
+ sync_process_state_->increment_consecutive_errors_by(value);
+ }
+
+ void zero_consecutive_errors() {
+ sync_process_state_->zero_consecutive_errors();
+ }
+
+ int successful_commits() const {
+ return sync_process_state_->successful_commits();
+ }
+
+ void increment_successful_commits() {
+ sync_process_state_->increment_successful_commits();
+ }
+
+ void zero_successful_commits() {
+ sync_process_state_->zero_successful_commits();
+ }
+ // end WEIRD COUNTER manipulation functions
+
+ bool over_quota() const { return sync_cycle_state_->over_quota(); }
+
+ // Methods for managing error rate tracking in sync_process_state
+ void TallyNewError() {
+ sync_process_state_->TallyNewError();
+ }
+
+ void TallyBigNewError() {
+ sync_process_state_->TallyBigNewError();
+ }
+
+ void ForgetOldError() {
+ sync_process_state_->ForgetOldError();
+ }
+
+ void CheckErrorRateTooHigh() {
+ sync_process_state_->CheckErrorRateTooHigh();
+ }
+
+ void AuthFailed() { sync_process_state_->AuthFailed(); }
+
+ void AuthSucceeded() { sync_process_state_->AuthSucceeded(); }
+
+ // Returns true if this object has been modified since last SetClean() call
+ bool IsDirty() const {
+ return sync_cycle_state_->IsDirty() || sync_process_state_->IsDirty();
+ }
+
+ // Returns true if auth status has been modified since last SetClean() call
+ bool IsAuthDirty() const { return sync_process_state_->IsAuthDirty(); }
+
+ // Call to tell this status object that its new state has been seen
+ void SetClean() {
+ sync_process_state_->SetClean();
+ sync_cycle_state_->SetClean();
+ }
+
+ // Call to tell this status object that its auth state has been seen
+ void SetAuthClean() { sync_process_state_->SetAuthClean(); }
+
+ void DumpStatusInfo() const {
+ LOG(INFO) << "Dumping status info: " << (IsDirty() ? "DIRTY" : "CLEAN");
+
+ LOG(INFO) << "invalid store = " << invalid_store();
+ LOG(INFO) << "syncer_stuck = " << syncer_stuck();
+ LOG(INFO) << "syncing = " << syncing();
+ LOG(INFO) << "over_quota = " << over_quota();
+
+ LOG(INFO) << "current_sync_timestamp = " << current_sync_timestamp();
+ LOG(INFO) << "servers_latest_timestamp = " << servers_latest_timestamp();
+ LOG(INFO) << "unsynced_count = " << unsynced_count();
+ LOG(INFO) << "conflicting_updates = " << conflicting_updates();
+ LOG(INFO) << "conflicting_commits = " << conflicting_commits();
+ LOG(INFO) << "BlockedItemsSize = " << BlockedItemsSize();
+ LOG(INFO) << "stalled_updates = " << stalled_updates();
+ LOG(INFO) << "error_commits = " << error_commits();
+
+ LOG(INFO) << "consecutive_problem_get_updates = "
+ << consecutive_problem_get_updates();
+ LOG(INFO) << "consecutive_problem_commits = "
+ << consecutive_problem_commits();
+ LOG(INFO) << "consecutive_transient_error_commits = "
+ << consecutive_transient_error_commits();
+ LOG(INFO) << "consecutive_errors = " << consecutive_errors();
+ LOG(INFO) << "successful_commits = " << successful_commits();
+ }
+
+ private:
+
+ SyncCycleState *sync_cycle_state_;
+ SyncProcessState *sync_process_state_;
+
+};
+} // namespace browser_sync
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_STATUS_H_
diff --git a/chrome/browser/sync/engine/syncer_thread.cc b/chrome/browser/sync/engine/syncer_thread.cc
new file mode 100644
index 0000000..e0832a7
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_thread.cc
@@ -0,0 +1,558 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_thread.h"
+
+#ifdef OS_MACOSX
+#include <CoreFoundation/CFNumber.h>
+#include <IOKit/IOTypes.h>
+#include <IOKit/IOKitLib.h>
+#endif
+
+#include <algorithm>
+#include <map>
+#include <queue>
+
+#include "chrome/browser/sync/engine/auth_watcher.h"
+#include "chrome/browser/sync/engine/model_safe_worker.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator.h"
+#include "chrome/browser/sync/notifier/listener/talk_mediator_impl.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+
+using std::priority_queue;
+using std::min;
+
+static inline bool operator < (const timespec& a, const timespec& b) {
+ return a.tv_sec == b.tv_sec ? a.tv_nsec < b.tv_nsec : a.tv_sec < b.tv_sec;
+}
+
+namespace {
+
+// returns the amount of time since the user last interacted with
+// the computer, in milliseconds
+int UserIdleTime() {
+#ifdef OS_WINDOWS
+ LASTINPUTINFO last_input_info;
+ last_input_info.cbSize = sizeof(LASTINPUTINFO);
+
+ // get time in windows ticks since system start of last activity
+ BOOL b = ::GetLastInputInfo(&last_input_info);
+ if (b == TRUE)
+ return ::GetTickCount() - last_input_info.dwTime;
+#elif defined(OS_MACOSX)
+ // It would be great to do something like:
+ //
+ // return 1000 *
+ // CGEventSourceSecondsSinceLastEventType(
+ // kCGEventSourceStateCombinedSessionState,
+ // kCGAnyInputEventType);
+ //
+ // Unfortunately, CGEvent* lives in ApplicationServices, and we're a daemon
+ // and can't link that high up the food chain. Thus this mucking in IOKit.
+
+ io_service_t hid_service =
+ IOServiceGetMatchingService(kIOMasterPortDefault,
+ IOServiceMatching("IOHIDSystem"));
+ if (!hid_service) {
+ LOG(WARNING) << "Could not obtain IOHIDSystem";
+ return 0;
+ }
+
+ CFTypeRef object = IORegistryEntryCreateCFProperty(hid_service,
+ CFSTR("HIDIdleTime"),
+ kCFAllocatorDefault,
+ 0);
+ if (!object) {
+ LOG(WARNING) << "Could not get IOHIDSystem's HIDIdleTime property";
+ IOObjectRelease(hid_service);
+ return 0;
+ }
+
+ int64 idle_time; // in nanoseconds
+ Boolean success;
+ if (CFGetTypeID(object) == CFNumberGetTypeID()) {
+ success = CFNumberGetValue((CFNumberRef)object,
+ kCFNumberSInt64Type,
+ &idle_time);
+ } else {
+ LOG(WARNING) << "IOHIDSystem's HIDIdleTime property isn't a number!";
+ }
+
+ CFRelease(object);
+ IOObjectRelease(hid_service);
+
+ if (!success) {
+ LOG(WARNING) << "Could not get IOHIDSystem's HIDIdleTime property's value";
+ return 0;
+ } else {
+ return idle_time / 1000000; // nano to milli
+ }
+#else
+ static bool was_logged = false;
+ if (!was_logged) {
+ was_logged = true;
+ LOG(INFO) << "UserIdleTime unimplemented on this platform, "
+ "synchronization will not throttle when user idle";
+ }
+#endif
+
+ return 0;
+}
+
+} // namespace
+
+namespace browser_sync {
+
+bool SyncerThread::NudgeSyncer(int milliseconds_from_now, NudgeSource source) {
+ MutexLock lock(&mutex_);
+ if (syncer_ == NULL) {
+ return false;
+ }
+ NudgeSyncImpl(milliseconds_from_now, source);
+ return true;
+}
+
+void* RunSyncerThread(void* syncer_thread) {
+ return (reinterpret_cast<SyncerThread*>(syncer_thread))->ThreadMain();
+}
+
+SyncerThread::SyncerThread(
+ ClientCommandChannel* command_channel,
+ syncable::DirectoryManager* mgr,
+ ServerConnectionManager* connection_manager,
+ AllStatus* all_status,
+ ModelSafeWorker* model_safe_worker)
+ : dirman_(mgr), scm_(connection_manager),
+ syncer_(NULL), syncer_events_(NULL), thread_running_(false),
+ syncer_short_poll_interval_seconds_(kDefaultShortPollIntervalSeconds),
+ syncer_long_poll_interval_seconds_(kDefaultLongPollIntervalSeconds),
+ syncer_polling_interval_(kDefaultShortPollIntervalSeconds),
+ syncer_max_interval_(kDefaultMaxPollIntervalMs),
+ stop_syncer_thread_(false), connected_(false), conn_mgr_hookup_(NULL),
+ p2p_authenticated_(false), p2p_subscribed_(false),
+ allstatus_(all_status), talk_mediator_hookup_(NULL),
+ command_channel_(command_channel), directory_manager_hookup_(NULL),
+ model_safe_worker_(model_safe_worker),
+ client_command_hookup_(NULL), disable_idle_detection_(false) {
+
+ SyncerEvent shutdown = { SyncerEvent::SHUTDOWN_USE_WITH_CARE };
+ syncer_event_channel_.reset(new SyncerEventChannel(shutdown));
+
+ if (dirman_) {
+ directory_manager_hookup_.reset(NewEventListenerHookup(
+ dirman_->channel(), this, &SyncerThread::HandleDirectoryManagerEvent));
+ }
+
+ if (scm_) {
+ WatchConnectionManager(scm_);
+ }
+
+ if (command_channel_) {
+ WatchClientCommands(command_channel_);
+ }
+}
+
+SyncerThread::~SyncerThread() {
+ client_command_hookup_.reset();
+ conn_mgr_hookup_.reset();
+ syncer_event_channel_.reset();
+ directory_manager_hookup_.reset();
+ syncer_events_.reset();
+ delete syncer_;
+ talk_mediator_hookup_.reset();
+ CHECK(!thread_running_);
+}
+
+// Creates and starts a syncer thread.
+// Returns true if it creates a thread or if there's currently a thread
+// running and false otherwise.
+bool SyncerThread::Start() {
+ MutexLock lock(&mutex_);
+ if (thread_running_) {
+ return true;
+ }
+ thread_running_ =
+ (0 == pthread_create(&thread_, NULL, RunSyncerThread, this));
+ if (thread_running_) {
+ pthread_detach(thread_);
+ }
+ return thread_running_;
+}
+
+// Stop processing. A max wait of at least 2*server RTT time is recommended.
+// returns true if we stopped, false otherwise.
+bool SyncerThread::Stop(int max_wait) {
+ MutexLock lock(&mutex_);
+ if (!thread_running_)
+ return true;
+ stop_syncer_thread_ = true;
+ if (NULL != syncer_) {
+ // try to early exit the syncer
+ syncer_->RequestEarlyExit();
+ }
+ pthread_cond_broadcast(&changed_.condvar_);
+ timespec deadline = { time(NULL) + (max_wait / 1000), 0 };
+ do {
+ const int wait_result = max_wait < 0 ?
+ pthread_cond_wait(&changed_.condvar_, &mutex_.mutex_) :
+ pthread_cond_timedwait(&changed_.condvar_, &mutex_.mutex_,
+ &deadline);
+ if (ETIMEDOUT == wait_result) {
+ LOG(ERROR) << "SyncerThread::Stop timed out. Problems likely.";
+ return false;
+ }
+ } while (thread_running_);
+ return true;
+}
+
+void SyncerThread::WatchClientCommands(ClientCommandChannel* channel) {
+ PThreadScopedLock<PThreadMutex> lock(&mutex_);
+ client_command_hookup_.reset(NewEventListenerHookup(channel, this,
+ &SyncerThread::HandleClientCommand));
+}
+
+void SyncerThread::HandleClientCommand(ClientCommandChannel::EventType
+ event) {
+ if (!event) {
+ return;
+ }
+
+ // mutex not really necessary for these
+ if (event->has_set_sync_poll_interval()) {
+ syncer_short_poll_interval_seconds_ = event->set_sync_poll_interval();
+ }
+
+ if (event->has_set_sync_long_poll_interval()) {
+ syncer_long_poll_interval_seconds_ = event->set_sync_long_poll_interval();
+ }
+}
+
+void SyncerThread::ThreadMainLoop() {
+ // Use the short poll value by default.
+ int poll_seconds = syncer_short_poll_interval_seconds_;
+ int user_idle_milliseconds = 0;
+ timespec last_sync_time = { 0 };
+ bool initial_sync_for_thread = true;
+ bool continue_sync_cycle = false;
+
+ while (!stop_syncer_thread_) {
+ if (!connected_) {
+ LOG(INFO) << "Syncer thread waiting for connection.";
+ while (!connected_ && !stop_syncer_thread_)
+ pthread_cond_wait(&changed_.condvar_, &mutex_.mutex_);
+ LOG_IF(INFO, connected_) << "Syncer thread found connection.";
+ continue;
+ }
+
+ if (syncer_ == NULL) {
+ LOG(INFO) << "Syncer thread waiting for database initialization.";
+ while (syncer_ == NULL && !stop_syncer_thread_)
+ pthread_cond_wait(&changed_.condvar_, &mutex_.mutex_);
+ LOG_IF(INFO, !(syncer_ == NULL)) << "Syncer was found after DB started.";
+ continue;
+ }
+
+ timespec const next_poll = { last_sync_time.tv_sec + poll_seconds,
+ last_sync_time.tv_nsec };
+ const timespec wake_time =
+ !nudge_queue_.empty() && nudge_queue_.top().first < next_poll ?
+ nudge_queue_.top().first : next_poll;
+ LOG(INFO) << "wake time is " << wake_time.tv_sec;
+ LOG(INFO) << "next poll is " << next_poll.tv_sec;
+
+ const int error = pthread_cond_timedwait(&changed_.condvar_, &mutex_.mutex_,
+ &wake_time);
+ if (ETIMEDOUT != error) {
+ continue; // Check all the conditions again.
+ }
+
+ const timespec now = GetPThreadAbsoluteTime(0);
+
+ // Handle a nudge, caused by either a notification or a local bookmark
+ // event. This will also update the source of the following SyncMain call.
+ UpdateNudgeSource(now, &continue_sync_cycle, &initial_sync_for_thread);
+
+ LOG(INFO) << "Calling Sync Main at time " << now.tv_sec;
+ SyncMain(syncer_);
+ last_sync_time = now;
+
+ LOG(INFO) << "Updating the next polling time after SyncMain";
+ poll_seconds = CalculatePollingWaitTime(allstatus_->status(),
+ poll_seconds,
+ &user_idle_milliseconds,
+ &continue_sync_cycle);
+ }
+}
+
+// We check how long the user's been idle and sync less often if the
+// machine is not in use. The aim is to reduce server load.
+int SyncerThread::CalculatePollingWaitTime(
+ const AllStatus::Status& status,
+ int last_poll_wait, // in s
+ int* user_idle_milliseconds,
+ bool* continue_sync_cycle) {
+ bool is_continuing_sync_cyle = *continue_sync_cycle;
+ *continue_sync_cycle = false;
+
+ // Determine if the syncer has unfinished work to do from allstatus_.
+ const bool syncer_has_work_to_do =
+ status.updates_available > status.updates_received
+ || status.unsynced_count > 0;
+ LOG(INFO) << "syncer_has_work_to_do is " << syncer_has_work_to_do;
+
+ // First calculate the expected wait time, figuring in any backoff because of
+ // user idle time. next_wait is in seconds
+ syncer_polling_interval_ = (!status.notifications_enabled) ?
+ syncer_short_poll_interval_seconds_ :
+ syncer_long_poll_interval_seconds_;
+ int default_next_wait = syncer_polling_interval_;
+ int actual_next_wait = default_next_wait;
+
+ if (syncer_has_work_to_do) {
+ // Provide exponential backoff due to consecutive errors, else attempt to
+ // complete the work as soon as possible.
+ if (!is_continuing_sync_cyle) {
+ actual_next_wait = AllStatus::GetRecommendedDelaySeconds(0);
+ } else {
+ actual_next_wait = AllStatus::GetRecommendedDelaySeconds(last_poll_wait);
+ }
+ *continue_sync_cycle = true;
+ } else if (!status.notifications_enabled) {
+ // Ensure that we start exponential backoff from our base polling
+ // interval when we are not continuing a sync cycle.
+ last_poll_wait = std::max(last_poll_wait, syncer_polling_interval_);
+
+ // Did the user start interacting with the computer again?
+ // If so, revise our idle time (and probably next_sync_time) downwards
+ int new_idle_time = disable_idle_detection_ ? 0 : UserIdleTime();
+ if (new_idle_time < *user_idle_milliseconds) {
+ *user_idle_milliseconds = new_idle_time;
+ }
+ actual_next_wait = CalculateSyncWaitTime(last_poll_wait * 1000,
+ *user_idle_milliseconds) / 1000;
+ DCHECK_GE(actual_next_wait, default_next_wait);
+ }
+
+ LOG(INFO) << "Sync wait: idle " << default_next_wait
+ << " non-idle or backoff " << actual_next_wait << ".";
+
+ return actual_next_wait;
+}
+
+void* SyncerThread::ThreadMain() {
+ NameCurrentThreadForDebugging("SyncEngine_SyncerThread");
+ mutex_.Lock();
+ ThreadMainLoop();
+ thread_running_ = false;
+ pthread_cond_broadcast(&changed_.condvar_);
+ mutex_.Unlock();
+ LOG(INFO) << "Syncer thread exiting.";
+ return 0;
+}
+
+void SyncerThread::SyncMain(Syncer* syncer) {
+ CHECK(syncer);
+ mutex_.Unlock();
+ while (syncer->SyncShare()) {
+ LOG(INFO) << "Looping in sync share";
+ }
+ LOG(INFO) << "Done looping in sync share";
+
+ mutex_.Lock();
+}
+
+void SyncerThread::UpdateNudgeSource(const timespec& now,
+ bool* continue_sync_cycle,
+ bool* initial_sync) {
+ bool nudged = false;
+ NudgeSource nudge_source = kUnknown;
+ // Has the previous sync cycle completed?
+ if (continue_sync_cycle) {
+ nudge_source = kContinuation;
+ }
+ // Update the nudge source if a new nudge has come through during the
+ // previous sync cycle.
+ while (!nudge_queue_.empty() && !(now < nudge_queue_.top().first)) {
+ if (!nudged) {
+ nudge_source = nudge_queue_.top().second;
+ *continue_sync_cycle = false; // Reset the continuation token on nudge.
+ nudged = true;
+ }
+ nudge_queue_.pop();
+ }
+ SetUpdatesSource(nudged, nudge_source, initial_sync);
+}
+
+void SyncerThread::SetUpdatesSource(bool nudged, NudgeSource nudge_source,
+ bool* initial_sync) {
+ sync_pb::GetUpdatesCallerInfo::GET_UPDATES_SOURCE updates_source =
+ sync_pb::GetUpdatesCallerInfo::UNKNOWN;
+ if (*initial_sync) {
+ updates_source = sync_pb::GetUpdatesCallerInfo::FIRST_UPDATE;
+ *initial_sync = false;
+ } else if (!nudged) {
+ updates_source = sync_pb::GetUpdatesCallerInfo::PERIODIC;
+ } else {
+ switch (nudge_source) {
+ case kNotification:
+ updates_source = sync_pb::GetUpdatesCallerInfo::NOTIFICATION;
+ break;
+ case kLocal:
+ updates_source = sync_pb::GetUpdatesCallerInfo::LOCAL;
+ break;
+ case kContinuation:
+ updates_source = sync_pb::GetUpdatesCallerInfo::SYNC_CYCLE_CONTINUATION;
+ break;
+ case kUnknown:
+ default:
+ updates_source = sync_pb::GetUpdatesCallerInfo::UNKNOWN;
+ break;
+ }
+ }
+ syncer_->set_updates_source(updates_source);
+}
+
+void SyncerThread::HandleSyncerEvent(const SyncerEvent& event) {
+ MutexLock lock(&mutex_);
+ channel()->NotifyListeners(event);
+ if (SyncerEvent::REQUEST_SYNC_NUDGE != event.what_happened) {
+ return;
+ }
+ NudgeSyncImpl(event.nudge_delay_milliseconds, kUnknown);
+}
+
+void SyncerThread::HandleDirectoryManagerEvent(
+ const syncable::DirectoryManagerEvent& event) {
+ LOG(INFO) << "Handling a directory manager event";
+ if (syncable::DirectoryManagerEvent::OPENED == event.what_happened) {
+ MutexLock lock(&mutex_);
+ LOG(INFO) << "Syncer starting up for: " << event.dirname;
+ // The underlying database structure is ready, and we should create
+ // the syncer.
+ CHECK(syncer_ == NULL);
+ syncer_ =
+ new Syncer(dirman_, event.dirname, scm_, model_safe_worker_.get());
+
+ syncer_->set_command_channel(command_channel_);
+ syncer_events_.reset(NewEventListenerHookup(
+ syncer_->channel(), this, &SyncerThread::HandleSyncerEvent));
+ pthread_cond_broadcast(&changed_.condvar_);
+ }
+}
+
+static inline void CheckConnected(bool* connected,
+ HttpResponse::ServerConnectionCode code,
+ pthread_cond_t* condvar) {
+ if (*connected) {
+ if (HttpResponse::CONNECTION_UNAVAILABLE == code) {
+ *connected = false;
+ pthread_cond_broadcast(condvar);
+ }
+ } else {
+ if (HttpResponse::SERVER_CONNECTION_OK == code) {
+ *connected = true;
+ pthread_cond_broadcast(condvar);
+ }
+ }
+}
+
+void SyncerThread::WatchConnectionManager(ServerConnectionManager* conn_mgr) {
+ conn_mgr_hookup_.reset(NewEventListenerHookup(conn_mgr->channel(), this,
+ &SyncerThread::HandleServerConnectionEvent));
+ CheckConnected(&connected_, conn_mgr->server_status(),
+ &changed_.condvar_);
+}
+
+void SyncerThread::HandleServerConnectionEvent(
+ const ServerConnectionEvent& event) {
+ if (ServerConnectionEvent::STATUS_CHANGED == event.what_happened) {
+ MutexLock lock(&mutex_);
+ CheckConnected(&connected_, event.connection_code,
+ &changed_.condvar_);
+ }
+}
+
+SyncerEventChannel* SyncerThread::channel() {
+ return syncer_event_channel_.get();
+}
+
+// inputs and return value in milliseconds
+int SyncerThread::CalculateSyncWaitTime(int last_interval, int user_idle_ms) {
+ // syncer_polling_interval_ is in seconds
+ int syncer_polling_interval_ms = syncer_polling_interval_ * 1000;
+
+ // This is our default and lower bound.
+ int next_wait = syncer_polling_interval_ms;
+
+ // Get idle time, bounded by max wait.
+ int idle = min(user_idle_ms, syncer_max_interval_);
+
+ // If the user has been idle for a while,
+ // we'll start decreasing the poll rate.
+ if (idle >= kPollBackoffThresholdMultiplier * syncer_polling_interval_ms) {
+ next_wait = std::min(AllStatus::GetRecommendedDelaySeconds(
+ last_interval / 1000), syncer_max_interval_ / 1000) * 1000;
+ }
+
+ return next_wait;
+}
+
+// Called with mutex_ already locked
+void SyncerThread::NudgeSyncImpl(int milliseconds_from_now,
+ NudgeSource source) {
+ const timespec nudge_time = GetPThreadAbsoluteTime(milliseconds_from_now);
+ NudgeObject nudge_object(nudge_time, source);
+ nudge_queue_.push(nudge_object);
+ pthread_cond_broadcast(&changed_.condvar_);
+}
+
+void SyncerThread::WatchTalkMediator(TalkMediator* mediator) {
+ talk_mediator_hookup_.reset(
+ NewEventListenerHookup(
+ mediator->channel(),
+ this,
+ &SyncerThread::HandleTalkMediatorEvent));
+}
+
+void SyncerThread::HandleTalkMediatorEvent(const TalkMediatorEvent& event) {
+ MutexLock lock(&mutex_);
+ switch (event.what_happened) {
+ case TalkMediatorEvent::LOGIN_SUCCEEDED:
+ LOG(INFO) << "P2P: Login succeeded.";
+ p2p_authenticated_ = true;
+ break;
+ case TalkMediatorEvent::LOGOUT_SUCCEEDED:
+ LOG(INFO) << "P2P: Login succeeded.";
+ p2p_authenticated_ = false;
+ break;
+ case TalkMediatorEvent::SUBSCRIPTIONS_ON:
+ LOG(INFO) << "P2P: Subscriptions successfully enabled.";
+ p2p_subscribed_ = true;
+ if (NULL != syncer_) {
+ LOG(INFO) << "Subscriptions on. Nudging syncer for initial push.";
+ NudgeSyncImpl(0, kLocal);
+ }
+ break;
+ case TalkMediatorEvent::SUBSCRIPTIONS_OFF:
+ LOG(INFO) << "P2P: Subscriptions are not enabled.";
+ p2p_subscribed_ = false;
+ break;
+ case TalkMediatorEvent::NOTIFICATION_RECEIVED:
+ LOG(INFO) << "P2P: Updates on server, pushing syncer";
+ if (NULL != syncer_) {
+ NudgeSyncImpl(0, kNotification);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (NULL != syncer_) {
+ syncer_->set_notifications_enabled(p2p_authenticated_ && p2p_subscribed_);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_thread.h b/chrome/browser/sync/engine/syncer_thread.h
new file mode 100644
index 0000000..30172d0
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_thread.h
@@ -0,0 +1,235 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A class to run the syncer on a thread.
+//
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_THREAD_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_THREAD_H_
+
+#include <list>
+#include <map>
+#include <queue>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/all_status.h"
+#include "chrome/browser/sync/engine/client_command_channel.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // For FRIEND_TEST
+
+class EventListenerHookup;
+
+namespace syncable {
+class DirectoryManager;
+struct DirectoryManagerEvent;
+}
+
+namespace browser_sync {
+
+class ModelSafeWorker;
+class ServerConnectionManager;
+class Syncer;
+class TalkMediator;
+class URLFactory;
+struct ServerConnectionEvent;
+struct SyncerEvent;
+struct SyncerShutdownEvent;
+struct TalkMediatorEvent;
+
+class SyncerThread {
+ FRIEND_TEST(SyncerThreadTest, CalculateSyncWaitTime);
+ FRIEND_TEST(SyncerThreadTest, CalculatePollingWaitTime);
+
+ public:
+ friend class SyncerThreadTest;
+
+ enum NudgeSource {
+ kUnknown = 0,
+ kNotification,
+ kLocal,
+ kContinuation
+ };
+
+ // Server can overwrite these values via client commands.
+ // Standard short poll. This is used when XMPP is off.
+ static const int kDefaultShortPollIntervalSeconds = 60;
+ // Long poll is used when XMPP is on.
+ static const int kDefaultLongPollIntervalSeconds = 3600;
+ // 30 minutes by default. If exponential backoff kicks in, this is
+ // the longest possible poll interval.
+ static const int kDefaultMaxPollIntervalMs = 30 * 60 * 1000;
+
+ SyncerThread(
+ ClientCommandChannel* command_channel,
+ syncable::DirectoryManager* mgr,
+ ServerConnectionManager* connection_manager,
+ AllStatus* all_status,
+ ModelSafeWorker* model_safe_worker);
+ ~SyncerThread();
+
+ void WatchConnectionManager(ServerConnectionManager* conn_mgr);
+ // Creates and starts a syncer thread.
+ // Returns true if it creates a thread or if there's currently a thread
+ // running and false otherwise.
+ bool Start();
+
+ // Stop processing. A max wait of at least 2*server RTT time is recommended.
+ // returns true if we stopped, false otherwise.
+ bool Stop(int max_wait);
+
+ // Nudges the syncer to sync with a delay specified. This API is for access
+ // from the SyncerThread's controller and will cause a mutex lock.
+ bool NudgeSyncer(int milliseconds_from_now, NudgeSource source);
+
+ // Registers this thread to watch talk mediator events.
+ void WatchTalkMediator(TalkMediator* talk_mediator);
+
+ void WatchClientCommands(ClientCommandChannel* channel);
+
+ SyncerEventChannel* channel();
+
+ private:
+ // A few members to gate the rate at which we nudge the syncer.
+ enum {
+ kNudgeRateLimitCount = 6,
+ kNudgeRateLimitTime = 180,
+ };
+
+ // A queue of all scheduled nudges. One insertion for every call to
+ // NudgeQueue().
+ typedef std::pair<timespec, NudgeSource> NudgeObject;
+
+ struct IsTimeSpecGreater {
+ inline bool operator() (const NudgeObject& lhs, const NudgeObject& rhs) {
+ return lhs.first.tv_sec == rhs.first.tv_sec ?
+ lhs.first.tv_nsec > rhs.first.tv_nsec :
+ lhs.first.tv_sec > rhs.first.tv_sec;
+ }
+ };
+
+ typedef std::priority_queue<NudgeObject,
+ std::vector<NudgeObject>, IsTimeSpecGreater>
+ NudgeQueue;
+
+ // Threshold multipler for how long before user should be considered idle.
+ static const int kPollBackoffThresholdMultiplier = 10;
+
+ friend void* RunSyncerThread(void* syncer_thread);
+ void* Run();
+ void HandleDirectoryManagerEvent(
+ const syncable::DirectoryManagerEvent& event);
+ void HandleSyncerEvent(const SyncerEvent& event);
+ void HandleClientCommand(ClientCommandChannel::EventType event);
+
+ void HandleServerConnectionEvent(const ServerConnectionEvent& event);
+
+ void HandleTalkMediatorEvent(const TalkMediatorEvent& event);
+
+ void* ThreadMain();
+ void ThreadMainLoop();
+
+ void SyncMain(Syncer* syncer);
+
+ // Calculates the next sync wait time in seconds. last_poll_wait is the time
+ // duration of the previous polling timeout which was used.
+ // user_idle_milliseconds is updated by this method, and is a report of the
+ // full amount of time since the last period of activity for the user. The
+ // continue_sync_cycle parameter is used to determine whether or not we are
+ // calculating a polling wait time that is a continuation of an sync cycle
+ // which terminated while the syncer still had work to do.
+ int CalculatePollingWaitTime(
+ const AllStatus::Status& status,
+ int last_poll_wait, // in s
+ int* user_idle_milliseconds,
+ bool* continue_sync_cycle);
+ // Helper to above function, considers effect of user idle time.
+ int CalculateSyncWaitTime(int last_wait, int user_idle_ms);
+
+ // Sets the source value of the controlled syncer's updates_source value.
+ // The initial sync boolean is updated if read as a sentinel. The following
+ // two methods work in concert to achieve this goal.
+ void UpdateNudgeSource(const timespec& now, bool* continue_sync_cycle,
+ bool* initial_sync);
+ void SetUpdatesSource(bool nudged, NudgeSource nudge_source,
+ bool* initial_sync);
+
+ // for unit tests only
+ void DisableIdleDetection() { disable_idle_detection_ = true; }
+
+ // false when we want to stop the thread.
+ bool stop_syncer_thread_;
+
+ // we use one mutex for all members except the channel.
+ PThreadMutex mutex_;
+ typedef PThreadScopedLock<PThreadMutex> MutexLock;
+
+ // Handle of the running thread.
+ pthread_t thread_;
+ bool thread_running_;
+
+ // Gets signaled whenever a thread outside of the syncer thread
+ // changes a member variable.
+ PThreadCondVar changed_;
+
+ // State of the server connection
+ bool connected_;
+
+ // State of the notification framework is tracked by these values.
+ bool p2p_authenticated_;
+ bool p2p_subscribed_;
+
+ scoped_ptr<EventListenerHookup> client_command_hookup_;
+ scoped_ptr<EventListenerHookup> conn_mgr_hookup_;
+ const AllStatus* allstatus_;
+
+ Syncer* syncer_;
+
+ syncable::DirectoryManager* dirman_;
+ ServerConnectionManager* scm_;
+
+ // Modifiable versions of kDefaultLongPollIntervalSeconds which can be
+ // updated by the server.
+ int syncer_short_poll_interval_seconds_;
+ int syncer_long_poll_interval_seconds_;
+
+ // The time we wait between polls in seconds. This is used as lower bound on
+ // our wait time. Updated once per loop from the command line flag.
+ int syncer_polling_interval_;
+
+ // The upper bound on the nominal wait between polls in seconds. Note that
+ // this bounds the "nominal" poll interval, while the the actual interval
+ // also takes previous failures into account.
+ int syncer_max_interval_;
+
+ scoped_ptr<SyncerEventChannel> syncer_event_channel_;
+
+ // This causes syncer to start syncing ASAP. If the rate of requests is
+ // too high the request will be silently dropped. mutex_ should be held when
+ // this is called.
+ void NudgeSyncImpl(int milliseconds_from_now, NudgeSource source);
+
+ NudgeQueue nudge_queue_;
+
+ scoped_ptr<EventListenerHookup> talk_mediator_hookup_;
+ ClientCommandChannel* const command_channel_;
+ scoped_ptr<EventListenerHookup> directory_manager_hookup_;
+ scoped_ptr<EventListenerHookup> syncer_events_;
+
+ // Handles any tasks that will result in model changes (modifications of
+ // syncable::Entries). Pass this to the syncer created and managed by |this|.
+ // Only non-null in syncapi case.
+ scoped_ptr<ModelSafeWorker> model_safe_worker_;
+
+ // Useful for unit tests
+ bool disable_idle_detection_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncerThread);
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_THREAD_H_
diff --git a/chrome/browser/sync/engine/syncer_thread_unittest.cc b/chrome/browser/sync/engine/syncer_thread_unittest.cc
new file mode 100644
index 0000000..dd81176
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_thread_unittest.cc
@@ -0,0 +1,299 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <list>
+#include <map>
+#include <set>
+#include <strstream>
+
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/syncer_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+class SyncerThreadTest : public testing::Test {
+ protected:
+ SyncerThreadTest() {}
+ virtual ~SyncerThreadTest() {}
+ virtual void SetUp() {}
+ virtual void TearDown() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SyncerThreadTest);
+};
+
+TEST_F(SyncerThreadTest, Construction) {
+ SyncerThread syncer_thread(NULL, NULL, NULL, NULL, NULL);
+}
+
+TEST_F(SyncerThreadTest, CalculateSyncWaitTime) {
+ SyncerThread syncer_thread(NULL, NULL, NULL, NULL, NULL);
+ syncer_thread.DisableIdleDetection();
+
+ // Syncer_polling_interval_ is less than max poll interval
+ int syncer_polling_interval = 1; // Needed since AssertionResult is not a
+ // friend of SyncerThread
+ syncer_thread.syncer_polling_interval_ = syncer_polling_interval;
+
+ // user_idle_ms is less than 10 * (syncer_polling_interval*1000).
+ ASSERT_EQ(syncer_polling_interval * 1000,
+ syncer_thread.CalculateSyncWaitTime(1000, 0));
+ ASSERT_EQ(syncer_polling_interval * 1000,
+ syncer_thread.CalculateSyncWaitTime(1000, 1));
+
+ // user_idle_ms is ge than 10 * (syncer_polling_interval*1000).
+ int last_poll_time = 2000;
+ ASSERT_LE(last_poll_time,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time, 10000));
+ ASSERT_GE(last_poll_time*3,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time, 10000));
+ ASSERT_LE(last_poll_time,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time, 100000));
+ ASSERT_GE(last_poll_time*3,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time, 100000));
+
+ // Maximum backoff time should be syncer_max_interval.
+ int near_threshold = SyncerThread::kDefaultMaxPollIntervalMs / 2 - 1;
+ int threshold = SyncerThread::kDefaultMaxPollIntervalMs;
+ int over_threshold = SyncerThread::kDefaultMaxPollIntervalMs + 1;
+ ASSERT_LE(near_threshold,
+ syncer_thread.CalculateSyncWaitTime(near_threshold, 10000));
+ ASSERT_GE(SyncerThread::kDefaultMaxPollIntervalMs,
+ syncer_thread.CalculateSyncWaitTime(near_threshold, 10000));
+ ASSERT_EQ(SyncerThread::kDefaultMaxPollIntervalMs,
+ syncer_thread.CalculateSyncWaitTime(threshold, 10000));
+ ASSERT_EQ(SyncerThread::kDefaultMaxPollIntervalMs,
+ syncer_thread.CalculateSyncWaitTime(over_threshold, 10000));
+
+ // Possible idle time must be capped by syncer_max_interval.
+ int over_sync_max_interval =
+ SyncerThread::kDefaultMaxPollIntervalMs + 1;
+ syncer_polling_interval = over_sync_max_interval / 100; // so 1000* is right
+ syncer_thread.syncer_polling_interval_ = syncer_polling_interval;
+ ASSERT_EQ(syncer_polling_interval * 1000,
+ syncer_thread.CalculateSyncWaitTime(1000, over_sync_max_interval));
+ syncer_polling_interval = 1;
+ syncer_thread.syncer_polling_interval_ = syncer_polling_interval;
+ ASSERT_LE(last_poll_time,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time,
+ over_sync_max_interval));
+ ASSERT_GE(last_poll_time * 3,
+ syncer_thread.CalculateSyncWaitTime(last_poll_time,
+ over_sync_max_interval));
+}
+
+TEST_F(SyncerThreadTest, CalculatePollingWaitTime) {
+ // Set up the environment
+ int user_idle_milliseconds_param = 0;
+
+ SyncerThread syncer_thread(NULL, NULL, NULL, NULL, NULL);
+ syncer_thread.DisableIdleDetection();
+
+ // Notifications disabled should result in a polling interval of
+ // kDefaultShortPollInterval
+ {
+ AllStatus::Status status = {};
+ status.notifications_enabled = 0;
+ bool continue_sync_cycle_param = false;
+
+ // No work and no backoff.
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+
+ // In this case the continue_sync_cycle is turned off.
+ continue_sync_cycle_param = true;
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+
+ // TODO(brg) : Find a way to test exponential backoff is inoperable.
+ // Exponential backoff should be turned on when notifications are disabled
+ // but this can not be tested since we can not set the last input info.
+ }
+
+ // Notifications enabled should result in a polling interval of
+ // SyncerThread::kDefaultLongPollIntervalSeconds
+ {
+ AllStatus::Status status = {};
+ status.notifications_enabled = 1;
+ bool continue_sync_cycle_param = false;
+
+ // No work and no backoff.
+ ASSERT_EQ(SyncerThread::kDefaultLongPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+
+ // In this case the continue_sync_cycle is turned off.
+ continue_sync_cycle_param = true;
+ ASSERT_EQ(SyncerThread::kDefaultLongPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+
+ // TODO(brg) : Find a way to test exponential backoff.
+ // Exponential backoff should be turned off when notifications are enabled,
+ // but this can not be tested since we can not set the last input info.
+ }
+
+ // There are two states which can cause a continuation, either the updates
+ // available do not match the updates received, or the unsynced count is
+ // non-zero.
+ {
+ AllStatus::Status status = {};
+ status.updates_available = 1;
+ status.updates_received = 0;
+ bool continue_sync_cycle_param = false;
+
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ continue_sync_cycle_param = false;
+ ASSERT_GE(3, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_GE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ status.updates_received = 1;
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 10,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+ }
+
+ {
+ AllStatus::Status status = {};
+ status.unsynced_count = 1;
+ bool continue_sync_cycle_param = false;
+
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ continue_sync_cycle_param = false;
+ ASSERT_GE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 0,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ status.unsynced_count = 0;
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 4,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+ }
+
+ // Regression for exponential backoff reset when the
+ // syncer is nudged.
+ {
+ AllStatus::Status status = {};
+ status.unsynced_count = 1;
+ bool continue_sync_cycle_param = false;
+
+ // Expect move from default polling interval to exponential backoff due to
+ // unsynced_count != 0.
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 3600,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ continue_sync_cycle_param = false;
+ ASSERT_GE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 3600,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ // Expect exponential backoff.
+ ASSERT_LE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 2,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_GE(6, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 2,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ // A nudge resets the continue_sync_cycle_param value, so our backoff
+ // should return to the minimum.
+ continue_sync_cycle_param = false;
+ ASSERT_LE(0, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 3600,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ continue_sync_cycle_param = false;
+ ASSERT_GE(2, syncer_thread.CalculatePollingWaitTime(
+ status,
+ 3600,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_TRUE(continue_sync_cycle_param);
+
+ // Setting unsynced_count = 0 returns us to the default polling interval.
+ status.unsynced_count = 0;
+ ASSERT_EQ(SyncerThread::kDefaultShortPollIntervalSeconds,
+ syncer_thread.CalculatePollingWaitTime(
+ status,
+ 4,
+ &user_idle_milliseconds_param,
+ &continue_sync_cycle_param));
+ ASSERT_FALSE(continue_sync_cycle_param);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_types.h b/chrome/browser/sync/engine/syncer_types.h
new file mode 100644
index 0000000..0d61984
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_types.h
@@ -0,0 +1,151 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_TYPES_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_TYPES_H_
+
+#include <map>
+#include <vector>
+
+#include "chrome/browser/sync/util/event_sys.h"
+
+namespace syncable {
+class BaseTransaction;
+class Id;
+}
+
+// The intent of this is to keep all shared data types and enums
+// for the syncer in a single place without having dependencies between
+// other files.
+namespace browser_sync {
+
+class SyncProcessState;
+class SyncCycleState;
+class SyncerSession;
+class Syncer;
+
+enum UpdateAttemptResponse {
+ // Update was applied or safely ignored
+ SUCCESS,
+
+ // This state is deprecated.
+ // TODO(sync): Remove this state.
+ BLOCKED,
+
+ // Conflicts with the local data representation.
+ // This can also mean that the entry doesn't currently make sense
+ // if we applied it.
+ CONFLICT,
+
+ // This return value is only returned by AttemptToUpdateEntryWithoutMerge
+ // if we have a name conflict. Users of AttemptToUpdateEntry should never
+ // see this return value, we'll return CONFLICT.
+ NAME_CONFLICT,
+};
+
+enum ServerUpdateProcessingResult {
+ // Success. Update applied and stored in SERVER_* fields or dropped if
+ // irrelevant.
+ SUCCESS_PROCESSED,
+
+ // Success. Update details stored in SERVER_* fields, but wasn't applied.
+ SUCCESS_STORED,
+
+ // Update is illegally inconsistent with earlier updates. e.g. A bookmark
+ // becoming a folder.
+ FAILED_INCONSISTENT,
+
+ // Update is illegal when considered alone. e.g. broken UTF-8 in the name.
+ FAILED_CORRUPT,
+
+ // Only used by VerifyUpdate. Indicates that an update is valid. As
+ // VerifyUpdate cannot return SUCCESS_STORED, we reuse the value.
+ SUCCESS_VALID = SUCCESS_STORED
+};
+
+// Different results from the verify phase will yield different
+// methods of processing in the ProcessUpdates phase. The SKIP
+// result means the entry doesn't go to the ProcessUpdates phase.
+enum VerifyResult {
+ VERIFY_FAIL,
+ VERIFY_SUCCESS,
+ VERIFY_UNDELETE,
+ VERIFY_SKIP,
+ VERIFY_UNDECIDED
+};
+
+enum VerifyCommitResult {
+ VERIFY_BLOCKED,
+ VERIFY_UNSYNCABLE,
+ VERIFY_OK,
+};
+
+struct SyncerEvent {
+ typedef SyncerEvent EventType;
+
+ enum EventCause {
+ COMMITS_SUCCEEDED, // Count is stored in successful_commit_count.
+
+ STATUS_CHANGED,
+
+ // Take care not to wait in shutdown handlers for the syncer to stop as it
+ // causes a race in the event system. Use SyncerShutdownEvent instead.
+ SHUTDOWN_USE_WITH_CARE,
+
+ // We're over our quota.
+ OVER_QUOTA,
+
+ // This event is how the syncer requests that it be synced.
+ REQUEST_SYNC_NUDGE,
+
+ // We have reached the SYNCER_END state in the main sync loop.
+ // Check the SyncerSession for information like whether we need to continue
+ // syncing (SyncerSession::ShouldSyncAgain).
+ SYNC_CYCLE_ENDED,
+ };
+
+ static bool IsChannelShutdownEvent(const SyncerEvent& e) {
+ return SHUTDOWN_USE_WITH_CARE == e.what_happened;
+ }
+
+ // This is used to put SyncerEvents into sorted STL structures.
+ bool operator < (const SyncerEvent& r) const {
+ return this->what_happened < r.what_happened;
+ }
+
+ EventCause what_happened;
+
+ // The last session used for syncing.
+ SyncerSession* last_session;
+
+ int successful_commit_count;
+
+ // How many milliseconds later should the syncer kick in?
+ // for REQUEST_SYNC_NUDGE only.
+ int nudge_delay_milliseconds;
+};
+
+struct SyncerShutdownEvent {
+ typedef Syncer* EventType;
+ static bool IsChannelShutdownEvent(Syncer* syncer) {
+ return true;
+ }
+};
+
+typedef EventChannel<SyncerEvent, PThreadMutex> SyncerEventChannel;
+
+typedef EventChannel<SyncerShutdownEvent, PThreadMutex> ShutdownChannel;
+
+// This struct is passed between parts of the syncer during the processing of
+// one sync loop. It lives on the stack. We don't expose the number of
+// conflicts during SyncShare as the conflicts may be solved automatically
+// by the conflict resolver.
+typedef std::vector<syncable::Id> ConflictSet;
+
+typedef std::map<syncable::Id, ConflictSet*> IdToConflictSetMap;
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_TYPES_H_
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
new file mode 100644
index 0000000..27bdb9b
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -0,0 +1,4588 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE entry.
+//
+// Syncer unit tests. Unfortunately a lot of these tests
+// are outdated and need to be reworked and updated.
+
+#include <list>
+#include <map>
+#include <set>
+#include <strstream>
+
+#include "base/at_exit.h"
+
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/engine/client_command_channel.h"
+#include "chrome/browser/sync/engine/conflict_resolution_view.h"
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/get_commit_ids_command.h"
+#include "chrome/browser/sync/engine/model_safe_worker.h"
+#include "chrome/browser/sync/engine/net/server_connection_manager.h"
+#include "chrome/browser/sync/engine/process_updates_command.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/test/sync/engine/mock_server_connection.h"
+#include "chrome/test/sync/engine/test_directory_setter_upper.h"
+#include "chrome/test/sync/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::map;
+using std::multimap;
+using std::set;
+using std::string;
+
+namespace browser_sync {
+
+using syncable::BaseTransaction;
+using syncable::Blob;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::ExtendedAttribute;
+using syncable::ExtendedAttributeKey;
+using syncable::Id;
+using syncable::MutableEntry;
+using syncable::MutableExtendedAttribute;
+using syncable::ReadTransaction;
+using syncable::ScopedDirLookup;
+using syncable::WriteTransaction;
+
+using syncable::BASE_VERSION;
+using syncable::CREATE;
+using syncable::CREATE_NEW_UPDATE_ITEM;
+using syncable::GET_BY_HANDLE;
+using syncable::GET_BY_ID;
+using syncable::GET_BY_PARENTID_AND_NAME;
+using syncable::GET_BY_PATH;
+using syncable::GET_BY_TAG;
+using syncable::ID;
+using syncable::IS_BOOKMARK_OBJECT;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::META_HANDLE;
+using syncable::MTIME;
+using syncable::NAME;
+using syncable::NEXT_ID;
+using syncable::PARENT_ID;
+using syncable::PREV_ID;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_VERSION;
+using syncable::SINGLETON_TAG;
+using syncable::UNITTEST;
+using syncable::UNSANITIZED_NAME;
+
+namespace {
+const char* kTestData = "Hello World!";
+const int kTestDataLen = 12;
+const int64 kTestLogRequestTimestamp = 123456;
+} // namespace
+
+
+class SyncerTest : public testing::Test {
+ protected:
+ SyncerTest() : client_command_channel_(0) {
+ }
+
+ void HandleClientCommand(const sync_pb::ClientCommand* event) {
+ last_client_command_ = *event;
+ }
+
+ void HandleSyncerEvent(SyncerEvent event) {
+ LOG(INFO) << "HandleSyncerEvent in unittest " << event.what_happened;
+ // we only test for entry-specific events, not status changed ones.
+ switch (event.what_happened) {
+ case SyncerEvent::STATUS_CHANGED:
+ // fall through
+ case SyncerEvent::SYNC_CYCLE_ENDED:
+ // fall through
+ case SyncerEvent::COMMITS_SUCCEEDED:
+ return;
+ case SyncerEvent::SHUTDOWN_USE_WITH_CARE:
+ case SyncerEvent::OVER_QUOTA:
+ case SyncerEvent::REQUEST_SYNC_NUDGE:
+ LOG(INFO) << "Handling event type " << event.what_happened;
+ break;
+ default:
+ CHECK(false) << "Handling unknown error type in unit tests!!";
+ }
+ syncer_events_.insert(event);
+ }
+
+ void LoopSyncShare(Syncer* syncer) {
+ SyncProcessState state(syncdb_.manager(), syncdb_.name(),
+ mock_server_.get(),
+ syncer->conflict_resolver(),
+ syncer->channel(),
+ syncer->model_safe_worker());
+ bool should_loop = false;
+ int loop_iterations = 0;
+ do {
+ ASSERT_LT(++loop_iterations, 100) << "infinite loop detected. please fix";
+ should_loop = syncer->SyncShare(&state);
+ } while (should_loop);
+ }
+
+ virtual void SetUp() {
+ syncdb_.SetUp();
+
+ mock_server_.reset(
+ new MockConnectionManager(syncdb_.manager(), syncdb_.name()));
+ model_safe_worker_.reset(new ModelSafeWorker());
+ // Safe to pass NULL as Authwatcher for now since the code path that
+ // uses it is not unittested yet.
+ syncer_ = new Syncer(syncdb_.manager(), syncdb_.name(),
+ mock_server_.get(),
+ model_safe_worker_.get());
+ CHECK(syncer_->channel());
+
+ hookup_.reset(NewEventListenerHookup(syncer_->channel(), this,
+ &SyncerTest::HandleSyncerEvent));
+
+ command_channel_hookup_.reset(NewEventListenerHookup(
+ &client_command_channel_, this, &SyncerTest::HandleClientCommand));
+ syncer_->set_command_channel(&client_command_channel_);
+
+ state_.reset(new SyncProcessState(syncdb_.manager(), syncdb_.name(),
+ mock_server_.get(),
+ syncer_->conflict_resolver(),
+ syncer_->channel(),
+ syncer_->model_safe_worker()));
+
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ syncable::Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, trans.root_id(), &children);
+ ASSERT_EQ(0, children.size());
+ syncer_events_.clear();
+ root_id_ = ids_.root();
+ parent_id_ = ids_.MakeServer("parent id");
+ child_id_ = ids_.MakeServer("child id");
+ }
+
+ virtual void TearDown() {
+ mock_server_.reset();
+ hookup_.reset();
+ command_channel_hookup_.reset();
+ delete syncer_;
+ syncdb_.TearDown();
+ }
+ void WriteTestDataToEntry(WriteTransaction* trans, MutableEntry* entry) {
+ EXPECT_FALSE(entry->Get(IS_DIR));
+ EXPECT_FALSE(entry->Get(IS_DEL));
+ Blob test_value(kTestData, kTestData + kTestDataLen);
+ ExtendedAttributeKey key(entry->Get(META_HANDLE), PSTR("DATA"));
+ MutableExtendedAttribute attr(trans, CREATE, key);
+ attr.mutable_value()->swap(test_value);
+ entry->Put(syncable::IS_UNSYNCED, true);
+ }
+ void VerifyTestDataInEntry(BaseTransaction* trans, Entry* entry) {
+ EXPECT_FALSE(entry->Get(IS_DIR));
+ EXPECT_FALSE(entry->Get(IS_DEL));
+ Blob test_value(kTestData, kTestData + kTestDataLen);
+ ExtendedAttributeKey key(entry->Get(META_HANDLE), PSTR("DATA"));
+ ExtendedAttribute attr(trans, GET_BY_HANDLE, key);
+ EXPECT_FALSE(attr.is_deleted());
+ EXPECT_EQ(test_value, attr.value());
+ }
+ bool SyncerStuck(SyncProcessState *state) {
+ SyncerStatus status(NULL, state);
+ return status.syncer_stuck();
+ }
+ void SyncRepeatedlyToTriggerConflictResolution(SyncProcessState *state) {
+ // We should trigger after less than 6 syncs, but we want to avoid brittle
+ // tests.
+ for (int i = 0 ; i < 6 ; ++i)
+ syncer_->SyncShare(state);
+ }
+ void SyncRepeatedlyToTriggerStuckSignal(SyncProcessState *state) {
+ // We should trigger after less than 10 syncs, but we want to avoid brittle
+ // tests.
+ for (int i = 0 ; i < 12 ; ++i)
+ syncer_->SyncShare(state);
+ }
+
+ // Enumeration of alterations to entries for commit ordering tests.
+ enum EntryFeature {
+ LIST_END = 0, // Denotes the end of the list of features from below.
+ SYNCED, // Items are unsynced by default
+ DELETED,
+ OLD_MTIME,
+ MOVED_FROM_ROOT,
+ };
+
+ struct CommitOrderingTest {
+ // expected commit index.
+ int commit_index;
+ // Details about the item
+ syncable::Id id;
+ syncable::Id parent_id;
+ EntryFeature features[10];
+
+ static const CommitOrderingTest LAST_COMMIT_ITEM;
+ };
+
+ void RunCommitOrderingTest(CommitOrderingTest* test) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ map<int, syncable::Id> expected_positions;
+ { // Transaction scope.
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ while (!test->id.IsRoot()) {
+ if (test->commit_index >= 0) {
+ map<int, syncable::Id>::value_type entry(test->commit_index,
+ test->id);
+ bool double_position = !expected_positions.insert(entry).second;
+ ASSERT_FALSE(double_position) << "Two id's expected at one position";
+ }
+ string utf8_name = test->id.GetServerId();
+ PathString name(utf8_name.begin(), utf8_name.end());
+ MutableEntry entry(&trans, CREATE, test->parent_id, name);
+ entry.Put(syncable::ID, test->id);
+ if (test->id.ServerKnows()) {
+ entry.Put(BASE_VERSION, 5);
+ entry.Put(SERVER_VERSION, 5);
+ entry.Put(SERVER_PARENT_ID, test->parent_id);
+ }
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ // Set the time to 30 seconds in the future to reduce the chance of
+ // flaky tests.
+ int64 now_server_time = ClientTimeToServerTime(syncable::Now());
+ int64 now_plus_30s = ServerTimeToClientTime(now_server_time + 30000);
+ int64 now_minus_2h = ServerTimeToClientTime(now_server_time - 7200000);
+ entry.Put(syncable::MTIME, now_plus_30s);
+ for (int i = 0 ; i < ARRAYSIZE(test->features) ; ++i) {
+ switch (test->features[i]) {
+ case LIST_END:
+ break;
+ case SYNCED:
+ entry.Put(syncable::IS_UNSYNCED, false);
+ break;
+ case DELETED:
+ entry.Put(syncable::IS_DEL, true);
+ break;
+ case OLD_MTIME:
+ entry.Put(MTIME, now_minus_2h);
+ break;
+ case MOVED_FROM_ROOT:
+ entry.Put(SERVER_PARENT_ID, trans.root_id());
+ break;
+ default:
+ FAIL() << "Bad value in CommitOrderingTest list";
+ }
+ }
+ test++;
+ }
+ }
+ LoopSyncShare(syncer_);
+ ASSERT_EQ(expected_positions.size(), mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ for (size_t i = 0; i < expected_positions.size(); ++i) {
+ EXPECT_EQ(1, expected_positions.count(i));
+ EXPECT_EQ(expected_positions[i], mock_server_->committed_ids()[i]);
+ }
+ }
+
+ void DoTruncationTest(const ScopedDirLookup& dir,
+ const vector<int64>& unsynced_handle_view,
+ const vector<syncable::Id>& expected_id_order) {
+ // The expected order is "x", "b", "c", "e", truncated appropriately.
+ for (size_t limit = expected_id_order.size() + 2; limit > 0; --limit) {
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ SyncerSession::ScopedSetWriteTransaction set_trans(&session, &wtrans);
+ session.set_unsynced_handles(unsynced_handle_view);
+
+ GetCommitIdsCommand command(limit);
+ command.BuildCommitIds(&session);
+ vector<syncable::Id> output = command.ordered_commit_set_.GetCommitIds();
+ int truncated_size = std::min(limit, expected_id_order.size());
+ ASSERT_EQ(truncated_size, output.size());
+ for (int i = 0; i < truncated_size; ++i) {
+ ASSERT_EQ(expected_id_order[i], output[i])
+ << "At index " << i << " with batch size limited to " << limit;
+ }
+ }
+ }
+
+ int64 CreateUnsyncedDirectory(const PathString& entry_name,
+ const string& idstring) {
+ return CreateUnsyncedDirectory(entry_name,
+ syncable::Id::CreateFromServerId(idstring));
+ }
+
+ int64 CreateUnsyncedDirectory(const PathString& entry_name,
+ const syncable::Id& id) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, syncable::CREATE, wtrans.root_id(),
+ entry_name);
+ EXPECT_TRUE(entry.good());
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::BASE_VERSION, id.ServerKnows() ? 1 : 0);
+ entry.Put(syncable::ID, id);
+ return entry.Get(META_HANDLE);
+ }
+
+ // Some ids to aid tests. Only the root one's value is specific. The rest
+ // are named for test clarity.
+ syncable::Id root_id_;
+ syncable::Id parent_id_;
+ syncable::Id child_id_;
+
+ TestIdFactory ids_;
+
+ TestDirectorySetterUpper syncdb_;
+ scoped_ptr<MockConnectionManager> mock_server_;
+ scoped_ptr<EventListenerHookup> hookup_;
+ scoped_ptr<EventListenerHookup> command_channel_hookup_;
+ ClientCommandChannel client_command_channel_;
+
+ Syncer* syncer_;
+ scoped_ptr<SyncProcessState> state_;
+ scoped_ptr<ModelSafeWorker> model_safe_worker_;
+ std::set<SyncerEvent> syncer_events_;
+ sync_pb::ClientCommand last_client_command_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncerTest);
+};
+
+TEST_F(SyncerTest, TestCallGatherUnsyncedEntries) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ Syncer::UnsyncedMetaHandles handles;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ SyncerUtil::GetUnsyncedEntries(&trans, &handles);
+ }
+ ASSERT_EQ(0, handles.size());
+ }
+ // TODO(sync): When we can dynamically connect and disconnect the mock
+ // ServerConnectionManager test disconnected GetUnsyncedEntries here. It's a
+ // regression for a very old bug.
+}
+
+TEST_F(SyncerTest, GetCommitIdsCommandTruncates) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ int64 handle_c = CreateUnsyncedDirectory(PSTR("C"), ids_.MakeLocal("c"));
+ int64 handle_x = CreateUnsyncedDirectory(PSTR("X"), ids_.MakeLocal("x"));
+ int64 handle_b = CreateUnsyncedDirectory(PSTR("B"), ids_.MakeLocal("b"));
+ int64 handle_d = CreateUnsyncedDirectory(PSTR("D"), ids_.MakeLocal("d"));
+ int64 handle_e = CreateUnsyncedDirectory(PSTR("E"), ids_.MakeLocal("e"));
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry_x(&wtrans, GET_BY_HANDLE, handle_x);
+ MutableEntry entry_b(&wtrans, GET_BY_HANDLE, handle_b);
+ MutableEntry entry_c(&wtrans, GET_BY_HANDLE, handle_c);
+ MutableEntry entry_d(&wtrans, GET_BY_HANDLE, handle_d);
+ MutableEntry entry_e(&wtrans, GET_BY_HANDLE, handle_e);
+ entry_x.Put(IS_BOOKMARK_OBJECT, true);
+ entry_b.Put(IS_BOOKMARK_OBJECT, true);
+ entry_c.Put(IS_BOOKMARK_OBJECT, true);
+ entry_d.Put(IS_BOOKMARK_OBJECT, true);
+ entry_e.Put(IS_BOOKMARK_OBJECT, true);
+ entry_b.Put(PARENT_ID, entry_x.Get(ID));
+ entry_c.Put(PARENT_ID, entry_x.Get(ID));
+ entry_c.PutPredecessor(entry_b.Get(ID));
+ entry_d.Put(PARENT_ID, entry_b.Get(ID));
+ entry_e.Put(PARENT_ID, entry_c.Get(ID));
+ }
+
+ // The arrangement is now: x (b (d) c (e)).
+ vector<int64> unsynced_handle_view;
+ vector<syncable::Id> expected_order;
+ // The expected order is "x", "b", "c", "e", truncated appropriately.
+ unsynced_handle_view.push_back(handle_e);
+ expected_order.push_back(ids_.MakeLocal("x"));
+ expected_order.push_back(ids_.MakeLocal("b"));
+ expected_order.push_back(ids_.MakeLocal("c"));
+ expected_order.push_back(ids_.MakeLocal("e"));
+ DoTruncationTest(dir, unsynced_handle_view, expected_order);
+}
+
+// TODO(chron): More corner case unit tests around validation
+TEST_F(SyncerTest, TestCommitMetahandleIterator) {
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ SyncerSession::ScopedSetWriteTransaction set_trans(&session, &wtrans);
+
+ GetCommitIdsCommand::OrderedCommitSet commit_set;
+ GetCommitIdsCommand::CommitMetahandleIterator iterator(&session,
+ &commit_set);
+ EXPECT_FALSE(iterator.Valid());
+ EXPECT_FALSE(iterator.Increment());
+ }
+
+ {
+ vector<int64> session_metahandles;
+ session_metahandles.push_back(
+ CreateUnsyncedDirectory(PSTR("test1"), "testid1"));
+ session_metahandles.push_back(
+ CreateUnsyncedDirectory(PSTR("test2"), "testid2"));
+ session_metahandles.push_back(
+ CreateUnsyncedDirectory(PSTR("test3"), "testid3"));
+ session.set_unsynced_handles(session_metahandles);
+
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ SyncerSession::ScopedSetWriteTransaction set_trans(&session, &wtrans);
+ GetCommitIdsCommand::OrderedCommitSet commit_set;
+ GetCommitIdsCommand::CommitMetahandleIterator iterator(&session,
+ &commit_set);
+
+ EXPECT_TRUE(iterator.Valid());
+ EXPECT_EQ(iterator.Current(), session_metahandles[0]);
+ EXPECT_TRUE(iterator.Increment());
+
+ EXPECT_TRUE(iterator.Valid());
+ EXPECT_EQ(iterator.Current(), session_metahandles[1]);
+ EXPECT_TRUE(iterator.Increment());
+
+ EXPECT_TRUE(iterator.Valid());
+ EXPECT_EQ(iterator.Current(), session_metahandles[2]);
+ EXPECT_FALSE(iterator.Increment());
+
+ EXPECT_FALSE(iterator.Valid());
+ }
+}
+
+TEST_F(SyncerTest, TestGetUnsyncedAndSimpleCommit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ PathString xattr_key = PSTR("key");
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ PSTR("Pete"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::BASE_VERSION, 1);
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, PSTR("Pete"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::ID, child_id_);
+ child.Put(syncable::BASE_VERSION, 1);
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(2, session.unsynced_count());
+ ASSERT_EQ(2, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
+ {
+ ReadTransaction rt(dir, __FILE__, __LINE__);
+ Entry entry(&rt, syncable::GET_BY_ID, child_id_);
+ ASSERT_TRUE(entry.good());
+ VerifyTestDataInEntry(&rt, &entry);
+ }
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoItemsTall) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-1001), ids_.FromNumber(-1000)},
+ {0, ids_.FromNumber(-1000), ids_.FromNumber(0)},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTall) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
+ {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
+ {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTallLimitedSize) {
+ syncer_->set_max_commit_batch_size(2);
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
+ {0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
+ {2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItem) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleUncommittedDeletedItem) {
+ CommitOrderingTest items[] = {
+ {-1, ids_.FromNumber(-1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest,
+ TestCommitListOrderingSingleLongDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoLongDeletedItemWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrdering3LongDeletedItemsWithSizeLimit) {
+ syncer_->set_max_commit_batch_size(2);
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {1, ids_.FromNumber(1001), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {2, ids_.FromNumber(1002), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingTwoDeletedItemsWithUnroll) {
+ CommitOrderingTest items[] = {
+ {0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingComplexDeletionScenario) {
+ CommitOrderingTest items[] = {
+ { 0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
+ {1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
+ {2, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest,
+ TestCommitListOrderingComplexDeletionScenarioWith2RecentDeletes) {
+ CommitOrderingTest items[] = {
+ { 0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
+ {1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
+ {-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
+ {2, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
+ {3, ids_.FromNumber(1005), ids_.FromNumber(1003), {DELETED}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingDeleteMovedItems) {
+ CommitOrderingTest items[] = {
+ {1, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
+ {0, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME,
+ MOVED_FROM_ROOT}},
+ CommitOrderingTest::LAST_COMMIT_ITEM,
+ };
+ RunCommitOrderingTest(items);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingWithNesting) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ int64 now_server_time = ClientTimeToServerTime(syncable::Now());
+ int64 now_minus_2h = ServerTimeToClientTime(now_server_time - 7200000);
+
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ {
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ PSTR("Bob"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, ids_.FromNumber(100));
+ parent.Put(syncable::BASE_VERSION, 1);
+ MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(100),
+ PSTR("Bob"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, ids_.FromNumber(101));
+ child.Put(syncable::BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, syncable::CREATE, ids_.FromNumber(101),
+ PSTR("Bob"));
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(syncable::ID, ids_.FromNumber(102));
+ grandchild.Put(syncable::IS_UNSYNCED, true);
+ grandchild.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ // Create three deleted items which deletions we expect to
+ // be sent to the server.
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
+ PSTR("Pete"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::IS_DEL, true);
+ parent.Put(syncable::ID, ids_.FromNumber(103));
+ parent.Put(syncable::BASE_VERSION, 1);
+ parent.Put(syncable::MTIME, now_minus_2h);
+ MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(103),
+ PSTR("Pete"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::IS_DEL, true);
+ child.Put(syncable::ID, ids_.FromNumber(104));
+ child.Put(syncable::BASE_VERSION, 1);
+ child.Put(syncable::MTIME, now_minus_2h);
+ MutableEntry grandchild(&wtrans, syncable::CREATE, ids_.FromNumber(104),
+ PSTR("Pete"));
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(syncable::IS_UNSYNCED, true);
+ grandchild.Put(syncable::ID, ids_.FromNumber(105));
+ grandchild.Put(syncable::IS_DEL, true);
+ grandchild.Put(syncable::IS_DIR, false);
+ grandchild.Put(syncable::BASE_VERSION, 1);
+ grandchild.Put(syncable::MTIME, now_minus_2h);
+ }
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(6, session.unsynced_count());
+ ASSERT_EQ(6, mock_server_->committed_ids().size());
+ // This test will NOT unroll deletes because SERVER_PARENT_ID is not set.
+ // It will treat these like moves.
+ vector<syncable::Id> commit_ids(mock_server_->committed_ids());
+ EXPECT_EQ(ids_.FromNumber(100), commit_ids[0]);
+ EXPECT_EQ(ids_.FromNumber(101), commit_ids[1]);
+ EXPECT_EQ(ids_.FromNumber(102), commit_ids[2]);
+ // We don't guarantee the delete orders in this test, only that they occur
+ // at the end.
+ std::sort(commit_ids.begin() + 3, commit_ids.end());
+ EXPECT_EQ(ids_.FromNumber(103), commit_ids[3]);
+ EXPECT_EQ(ids_.FromNumber(104), commit_ids[4]);
+ EXPECT_EQ(ids_.FromNumber(105), commit_ids[5]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("1"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("2"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, child_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, PSTR("A"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, ids_.FromNumber(102));
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, PSTR("B"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, ids_.FromNumber(-103));
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, child_id_, PSTR("A"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, ids_.FromNumber(-104));
+ MutableEntry child(&wtrans, syncable::CREATE, child_id_, PSTR("B"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, ids_.FromNumber(105));
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(6, session.unsynced_count());
+ ASSERT_EQ(6, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
+ EXPECT_EQ(ids_.FromNumber(102), mock_server_->committed_ids()[2]);
+ EXPECT_EQ(ids_.FromNumber(-103), mock_server_->committed_ids()[3]);
+ EXPECT_EQ(ids_.FromNumber(-104), mock_server_->committed_ids()[4]);
+ EXPECT_EQ(ids_.FromNumber(105), mock_server_->committed_ids()[5]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+
+ syncable::Id child2_id = ids_.NewServerId();
+
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("P"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ MutableEntry child1(&wtrans, syncable::CREATE, parent_id_, PSTR("1"));
+ ASSERT_TRUE(child1.good());
+ child1.Put(syncable::IS_UNSYNCED, true);
+ child1.Put(syncable::ID, child_id_);
+ MutableEntry child2(&wtrans, syncable::CREATE, parent_id_, PSTR("2"));
+ ASSERT_TRUE(child2.good());
+ child2.Put(syncable::IS_UNSYNCED, true);
+ child2.Put(syncable::ID, child2_id);
+ parent.Put(syncable::BASE_VERSION, 1);
+ child1.Put(syncable::BASE_VERSION, 1);
+ child2.Put(syncable::BASE_VERSION, 1);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(3, session.unsynced_count());
+ ASSERT_EQ(3, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
+ EXPECT_EQ(child2_id, mock_server_->committed_ids()[2]);
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("1"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+
+ syncable::Id parent2_id = ids_.NewLocalId();
+ syncable::Id child2_id = ids_.NewServerId();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, PSTR("A"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent2_id);
+ MutableEntry child(&wtrans, syncable::CREATE, parent2_id, PSTR("B"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, child2_id);
+ child.Put(syncable::BASE_VERSION, 1);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(3, session.unsynced_count());
+ ASSERT_EQ(3, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(parent2_id, mock_server_->committed_ids()[1]);
+ EXPECT_EQ(child2_id, mock_server_->committed_ids()[2]);
+ {
+ ReadTransaction rtrans(dir, __FILE__, __LINE__);
+ PathChar path[] = { '1', *kPathSeparator, 'A', 0};
+ Entry entry_1A(&rtrans, syncable::GET_BY_PATH, path);
+ ASSERT_TRUE(entry_1A.good());
+ Entry item_parent2(&rtrans, syncable::GET_BY_ID, parent2_id);
+ ASSERT_FALSE(item_parent2.good());
+ Entry item_child2(&rtrans, syncable::GET_BY_ID, child2_id);
+ EXPECT_EQ(entry_1A.Get(syncable::ID), item_child2.Get(syncable::PARENT_ID));
+ EXPECT_TRUE(entry_1A.Get(syncable::ID).ServerKnows());
+ }
+}
+
+TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("1"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 1);
+ }
+ int64 meta_handle_a, meta_handle_b;
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, PSTR("A"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, ids_.FromNumber(-101));
+ meta_handle_a = parent.Get(syncable::META_HANDLE);
+ MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(-101),
+ PSTR("B"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ child.Put(syncable::ID, ids_.FromNumber(-102));
+ meta_handle_b = child.Get(syncable::META_HANDLE);
+ }
+
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+
+ syncer_->SyncShare(&session);
+ EXPECT_EQ(3, session.unsynced_count());
+ ASSERT_EQ(3, mock_server_->committed_ids().size());
+ // If this test starts failing, be aware other sort orders could be valid.
+ EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
+ EXPECT_EQ(ids_.FromNumber(-101), mock_server_->committed_ids()[1]);
+ EXPECT_EQ(ids_.FromNumber(-102), mock_server_->committed_ids()[2]);
+ {
+ ReadTransaction rtrans(dir, __FILE__, __LINE__);
+ PathChar path[] = { '1', *kPathSeparator, 'A', 0};
+ Entry entry_1A(&rtrans, syncable::GET_BY_PATH, path);
+ ASSERT_TRUE(entry_1A.good());
+ Entry entry_id_minus_101(&rtrans, syncable::GET_BY_ID,
+ ids_.FromNumber(-101));
+ ASSERT_FALSE(entry_id_minus_101.good());
+ Entry entry_b(&rtrans, syncable::GET_BY_HANDLE, meta_handle_b);
+ EXPECT_EQ(entry_1A.Get(syncable::ID), entry_b.Get(syncable::PARENT_ID));
+ EXPECT_TRUE(entry_1A.Get(syncable::ID).ServerKnows());
+ }
+}
+
+TEST_F(SyncerTest, UpdateWithZeroLengthName) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // One illegal update
+ mock_server_->AddUpdateDirectory(1, 0, "", 1, 10);
+ // And one legal one that we're going to delete.
+ mock_server_->AddUpdateDirectory(2, 0, "FOO", 1, 10);
+ syncer_->SyncShare();
+ // Delete the legal one. The new update has a null name.
+ mock_server_->AddUpdateDirectory(2, 0, "", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+}
+
+#ifdef OS_WINDOWS
+TEST_F(SyncerTest, NameSanitizationWithClientRename) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "okay", 1, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("okay"));
+ ASSERT_TRUE(e.good());
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "prn", 1, 20);
+ syncer_->SyncShare();
+ {
+ WriteTransaction tr(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("prn~1"));
+ ASSERT_TRUE(e.good());
+ e.PutName(syncable::Name(PSTR("printer")));
+ e.Put(syncable::IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare();
+ {
+ vector<CommitMessage*>::const_reverse_iterator it =
+ mock_server_->commit_messages().rbegin();
+ ASSERT_TRUE(mock_server_->commit_messages().rend() != it);
+ const sync_pb::SyncEntity *const *s = (*it)->entries().data();
+ int s_len = (*it)->entries_size();
+ ASSERT_EQ(1, s_len);
+ ASSERT_EQ("printer", (*s)[0].name());
+ }
+}
+
+TEST_F(SyncerTest, NameSanitizationWithCascade) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "prn~1", 1, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("prn~1"));
+ ASSERT_TRUE(e.good());
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "prn", 1, 20);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("prn~2"));
+ ASSERT_TRUE(e.good());
+ }
+ mock_server_->AddUpdateDirectory(3, 0, "prn~2", 1, 30);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("prn~3"));
+ ASSERT_TRUE(e.good());
+ }
+}
+
+TEST_F(SyncerTest, GetStuckWithConflictingSanitizedNames) {
+ // We should get stuck here because we get two server updates with exactly the
+ // same name.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "foo:", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateDirectory(2, 0, "foo:", 1, 20);
+ SyncRepeatedlyToTriggerStuckSignal(state_.get());
+ EXPECT_TRUE(SyncerStuck(state_.get()));
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, MergeFolderWithSanitizedNameMatches) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, wtrans.root_id(), PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ parent.Put(UNSANITIZED_NAME, PSTR("Folder:"));
+ }
+ mock_server_->AddUpdateDirectory(100, 0, "Folder:", 10, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, trans.root_id(), &children);
+ EXPECT_EQ(1, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
+ EXPECT_EQ(0, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0, unsynced.size());
+ syncer_events_.clear();
+ }
+}
+
+// These two tests are the same as the two above, but they introduce case
+// changes.
+TEST_F(SyncerTest, GetStuckWithSanitizedNamesThatDifferOnlyByCase) {
+ // We should get stuck here because we get two server updates with exactly the
+ // same name.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "FOO:", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateDirectory(2, 0, "foo:", 1, 20);
+ SyncRepeatedlyToTriggerStuckSignal(state_.get());
+ EXPECT_TRUE(SyncerStuck(state_.get()));
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, MergeFolderWithSanitizedNameThatDiffersOnlyByCase) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, wtrans.root_id(), PSTR("FOLDER"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ parent.Put(UNSANITIZED_NAME, PSTR("FOLDER:"));
+ }
+ mock_server_->AddUpdateDirectory(100, 0, "Folder:", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ syncer_->SyncShare(); // Good gracious, these tests are not so good.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, trans.root_id(), &children);
+ EXPECT_EQ(1, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
+ EXPECT_EQ(0, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0, unsynced.size());
+ syncer_events_.clear();
+ }
+}
+#else // Mac / Linux ...
+
+TEST_F(SyncerTest, NameSanitizationWithClientRename) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "okay", 1, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("okay"));
+ ASSERT_TRUE(e.good());
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "a/b", 1, 20);
+ syncer_->SyncShare();
+ {
+ WriteTransaction tr(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ PSTR("a:b"));
+ ASSERT_TRUE(e.good());
+ e.PutName(syncable::Name(PSTR("ab")));
+ e.Put(syncable::IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare();
+ {
+ vector<CommitMessage*>::const_reverse_iterator it =
+ mock_server_->commit_messages().rbegin();
+ ASSERT_TRUE(mock_server_->commit_messages().rend() != it);
+ const sync_pb::SyncEntity *const *s = (*it)->entries().data();
+ int s_len = (*it)->entries_size();
+ ASSERT_EQ(1, s_len);
+ ASSERT_EQ("ab", (*s)[0].name());
+ }
+}
+#endif
+
+namespace {
+void VerifyExistsWithNameInRoot(syncable::Directory *dir,
+ const PathString &name,
+ const string &entry,
+ int line) {
+ ReadTransaction tr(dir, __FILE__, __LINE__);
+ Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
+ name);
+ EXPECT_TRUE(e.good()) << "failed on call from " << entry << ":" << line;
+}
+} // namespace
+
+TEST_F(SyncerTest, ExtendedAttributeWithNullCharacter) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ int xattr_count = 2;
+ PathString xattr_keys[] = { PSTR("key"), PSTR("key2") };
+ syncable::Blob xattr_values[2];
+ char* value[] = { "value", "val\0ue" };
+ int value_length[] = { 5, 6 };
+ for (int i = 0; i < xattr_count; i++) {
+ for (int j = 0; j < value_length[i]; j++)
+ xattr_values[i].push_back(value[i][j]);
+ }
+ sync_pb::SyncEntity* ent =
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateExtendedAttributes(
+ ent, xattr_keys, xattr_values, xattr_count);
+
+ // Add some other items.
+ mock_server_->AddUpdateBookmark(2, 0, "fred", 2, 10);
+ mock_server_->AddUpdateBookmark(3, 0, "sue", 15, 10);
+
+ syncer_->SyncShare();
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry1(&trans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry1.good());
+ EXPECT_EQ(1, entry1.Get(syncable::BASE_VERSION));
+ EXPECT_EQ(1, entry1.Get(syncable::SERVER_VERSION));
+ set<ExtendedAttribute> client_extended_attributes;
+ entry1.GetAllExtendedAttributes(&trans, &client_extended_attributes);
+ EXPECT_EQ(xattr_count, client_extended_attributes.size());
+ for (int i = 0; i < xattr_count; i++) {
+ ExtendedAttributeKey key(entry1.Get(syncable::META_HANDLE), xattr_keys[i]);
+ ExtendedAttribute expected_xattr(&trans, syncable::GET_BY_HANDLE, key);
+ EXPECT_TRUE(expected_xattr.good());
+ for (int j = 0; j < value_length[i]; ++j) {
+ EXPECT_EQ(xattr_values[i][j],
+ static_cast<char>(expected_xattr.value().at(j)));
+ }
+ }
+ Entry entry2(&trans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry2.good());
+ Entry entry3(&trans, syncable::GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(entry3.good());
+}
+
+TEST_F(SyncerTest, TestBasicUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ string id = "some_id";
+ string parent_id = "0";
+ string name = "in_root";
+ int64 version = 10;
+ int64 timestamp = 10;
+ mock_server_->AddUpdateDirectory(id, parent_id, name, version, timestamp);
+
+ syncer_->SyncShare(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ EXPECT_EQ(0, status.stalled_updates());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID,
+ syncable::Id::CreateFromServerId("some_id"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DIR));
+ EXPECT_EQ(entry.Get(SERVER_VERSION), version);
+ EXPECT_EQ(entry.Get(BASE_VERSION), version);
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, IllegalAndLegalUpdates) {
+ Id root = ids_.root();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Should apply just fine.
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 10, 10);
+
+ // Name clash: this is a conflict.
+ mock_server_->AddUpdateDirectory(2, 0, "in_root", 10, 10);
+
+ // Unknown parent: should never be applied. "-80" is a legal server ID,
+ // because any string sent by the server is a legal server ID in the sync
+ // protocol, but it's not the ID of any item known to the client. This
+ // update should succeed validation, but be stuck in the unapplied state
+ // until an item with the server ID "-80" arrives.
+ mock_server_->AddUpdateDirectory(3, -80, "bad_parent", 10, 10);
+
+ syncer_->SyncShare(state_.get());
+
+ ConflictResolutionView conflict_view(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ // Ids 2 and 3 are expected to be in conflict now.
+ EXPECT_EQ(2, conflict_view.conflicting_updates());
+ EXPECT_EQ(0, status.stalled_updates());
+
+ // These entries will be used in the second set of updates.
+ mock_server_->AddUpdateDirectory(4, 0, "newer_version", 20, 10);
+ mock_server_->AddUpdateDirectory(5, 0, "circular1", 10, 10);
+ mock_server_->AddUpdateDirectory(6, 5, "circular2", 10, 10);
+ mock_server_->AddUpdateDirectory(9, 3, "bad_parent_child", 10, 10);
+ mock_server_->AddUpdateDirectory(100, 9, "bad_parent_child2", 10, 10);
+ mock_server_->AddUpdateDirectory(10, 0, "dir_to_bookmark", 10, 10);
+
+ syncer_->SyncShare(state_.get());
+ // The three items with an unresolved parent should be unapplied (3, 9, 100).
+ // The name clash should also still be in conflict.
+ EXPECT_EQ(4, conflict_view.conflicting_updates());
+ EXPECT_EQ(0, status.stalled_updates());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(name_clash.good());
+ EXPECT_TRUE(name_clash.Get(IS_UNAPPLIED_UPDATE));
+
+ Entry bad_parent(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(bad_parent.good());
+ EXPECT_TRUE(name_clash.Get(IS_UNAPPLIED_UPDATE))
+ << "child of unknown parent should be in conflict";
+
+ Entry bad_parent_child(&trans, GET_BY_ID, ids_.FromNumber(9));
+ ASSERT_TRUE(bad_parent_child.good());
+ EXPECT_TRUE(bad_parent_child.Get(IS_UNAPPLIED_UPDATE))
+ << "grandchild of unknown parent should be in conflict";
+
+ Entry bad_parent_child2(&trans, GET_BY_ID, ids_.FromNumber(100));
+ ASSERT_TRUE(bad_parent_child2.good());
+ EXPECT_TRUE(bad_parent_child2.Get(IS_UNAPPLIED_UPDATE))
+ << "great-grandchild of unknown parent should be in conflict";
+ }
+
+ // Updating 1 should unblock the clashing item 2.
+ mock_server_->AddUpdateDirectory(1, 0, "new_name", 20, 20);
+
+ // Moving 5 under 6 will create a cycle: a conflict.
+ mock_server_->AddUpdateDirectory(5, 6, "circular3", 20, 20);
+
+ // Flip the is_dir bit: should fail verify & be dropped.
+ mock_server_->AddUpdateBookmark(10, 0, "dir_to_bookmark", 20, 20);
+ syncer_->SyncShare(state_.get());
+
+ // Version number older than last known: should fail verify & be dropped.
+ mock_server_->AddUpdateDirectory(4, 0, "old_version", 10, 10);
+ syncer_->SyncShare(state_.get());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry still_a_dir(&trans, GET_BY_ID, ids_.FromNumber(10));
+ ASSERT_TRUE(still_a_dir.good());
+ EXPECT_FALSE(still_a_dir.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(10, still_a_dir.Get(BASE_VERSION));
+ EXPECT_EQ(10, still_a_dir.Get(SERVER_VERSION));
+ EXPECT_TRUE(still_a_dir.Get(IS_DIR));
+
+ Entry rename(&trans, GET_BY_PARENTID_AND_NAME, root, PSTR("new_name"));
+ ASSERT_TRUE(rename.good());
+ EXPECT_FALSE(rename.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(ids_.FromNumber(1), rename.Get(ID));
+ EXPECT_EQ(20, rename.Get(BASE_VERSION));
+
+ Entry unblocked(&trans, GET_BY_PARENTID_AND_NAME, root, PSTR("in_root"));
+ ASSERT_TRUE(unblocked.good());
+ EXPECT_FALSE(unblocked.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(ids_.FromNumber(2), unblocked.Get(ID));
+ EXPECT_EQ(10, unblocked.Get(BASE_VERSION));
+
+ Entry ignored_old_version(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(ignored_old_version.good());
+ EXPECT_EQ(ignored_old_version.Get(NAME), PSTR("newer_version"));
+ EXPECT_FALSE(ignored_old_version.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(20, ignored_old_version.Get(BASE_VERSION));
+
+ Entry circular_parent_issue(&trans, GET_BY_ID, ids_.FromNumber(5));
+ ASSERT_TRUE(circular_parent_issue.good());
+ EXPECT_TRUE(circular_parent_issue.Get(IS_UNAPPLIED_UPDATE))
+ << "circular move should be in conflict";
+ EXPECT_EQ(circular_parent_issue.Get(PARENT_ID), root_id_);
+ EXPECT_EQ(circular_parent_issue.Get(SERVER_PARENT_ID), ids_.FromNumber(6));
+ EXPECT_EQ(10, circular_parent_issue.Get(BASE_VERSION));
+
+ Entry circular_parent_target(&trans, GET_BY_ID, ids_.FromNumber(6));
+ ASSERT_TRUE(circular_parent_target.good());
+ EXPECT_FALSE(circular_parent_target.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(circular_parent_issue.Get(ID),
+ circular_parent_target.Get(PARENT_ID));
+ EXPECT_EQ(10, circular_parent_target.Get(BASE_VERSION));
+ }
+
+ EXPECT_EQ(0, syncer_events_.size());
+ EXPECT_EQ(4, conflict_view.conflicting_updates());
+}
+
+TEST_F(SyncerTest, CommitTimeRename) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a folder and an entry
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&trans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ MutableEntry entry(&trans, CREATE, parent.Get(ID), PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Mix in a directory creation too for later
+ mock_server_->AddUpdateDirectory(2, 0, "dir_in_root", 10, 10);
+ mock_server_->SetCommitTimeRename("renamed_");
+ syncer_->SyncShare();
+
+ // Verify it was correctly renamed
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry_folder(&trans, GET_BY_PATH, PSTR("renamed_Folder"));
+ ASSERT_TRUE(entry_folder.good());
+
+ Entry entry_new(&trans, GET_BY_PATH,
+ PSTR("renamed_Folder") + PathString(kPathSeparator)
+ + PSTR("renamed_new_entry"));
+ ASSERT_TRUE(entry_new.good());
+
+ // And that the unrelated directory creation worked without a rename
+ Entry new_dir(&trans, GET_BY_PATH, PSTR("dir_in_root"));
+ EXPECT_TRUE(new_dir.good());
+ }
+}
+
+
+TEST_F(SyncerTest, CommitTimeRenameI18N) {
+ // This is utf-8 for the diacritized Internationalization
+ const char* i18nString = "\xc3\x8e\xc3\xb1\x74\xc3\xa9\x72\xc3\xb1"
+ "\xc3\xa5\x74\xc3\xae\xc3\xb6\xc3\xb1\xc3\xa5\x6c\xc3\xae"
+ "\xc2\x9e\xc3\xa5\x74\xc3\xae\xc3\xb6\xc3\xb1";
+
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a folder and entry
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&trans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ MutableEntry entry(&trans, CREATE, parent.Get(ID), PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Mix in a directory creation too for later
+ mock_server_->AddUpdateDirectory(2, 0, "dir_in_root", 10, 10);
+ mock_server_->SetCommitTimeRename(i18nString);
+ syncer_->SyncShare();
+
+ // Verify it was correctly renamed
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ PathString expectedFolder;
+ AppendUTF8ToPathString(i18nString, &expectedFolder);
+ AppendUTF8ToPathString("Folder", &expectedFolder);
+ Entry entry_folder(&trans, GET_BY_PATH, expectedFolder);
+ ASSERT_TRUE(entry_folder.good());
+ PathString expected = expectedFolder + PathString(kPathSeparator);
+ AppendUTF8ToPathString(i18nString, &expected);
+ AppendUTF8ToPathString("new_entry", &expected);
+
+ Entry entry_new(&trans, GET_BY_PATH, expected);
+ ASSERT_TRUE(entry_new.good());
+
+ // And that the unrelated directory creation worked without a rename
+ Entry new_dir(&trans, GET_BY_PATH, PSTR("dir_in_root"));
+ EXPECT_TRUE(new_dir.good());
+ }
+}
+
+TEST_F(SyncerTest, CommitTimeRenameCollision) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a folder to collide with
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry collider(&trans, CREATE, root_id_, PSTR("renamed_Folder"));
+ ASSERT_TRUE(collider.good());
+ collider.Put(IS_DIR, true);
+ collider.Put(IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare(); // Now we have a folder.
+
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(folder.good());
+ folder.Put(IS_DIR, true);
+ folder.Put(IS_UNSYNCED, true);
+ }
+
+ mock_server_->set_next_new_id(30000);
+ mock_server_->SetCommitTimeRename("renamed_");
+ syncer_->SyncShare(); // Should collide and rename aside.
+ // This case will only occur if we got a commit time rename aside
+ // and the server attempts to rename to an entry that we know about, but it
+ // does not.
+
+ // Verify it was correctly renamed; one of them should have a sanitized name.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry collider_folder(&trans, GET_BY_PARENTID_AND_NAME, root_id_,
+ PSTR("renamed_Folder"));
+ EXPECT_EQ(collider_folder.Get(UNSANITIZED_NAME), PSTR(""));
+ ASSERT_TRUE(collider_folder.good());
+
+ // ID is generated by next_new_id_ and server mock prepending of strings.
+ Entry entry_folder(&trans, GET_BY_ID,
+ syncable::Id::CreateFromServerId("mock_server:30000"));
+ ASSERT_TRUE(entry_folder.good());
+ // A little arbitrary but nothing we can do about that.
+ EXPECT_EQ(entry_folder.Get(NAME), PSTR("renamed_Folder~1"));
+ EXPECT_EQ(entry_folder.Get(UNSANITIZED_NAME), PSTR("renamed_Folder"));
+ }
+}
+
+
+// A commit with a lost response produces an update that has to be reunited with
+// its parent.
+TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a folder in the root.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_folder"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_DIR, true);
+ entry.Put(IS_UNSYNCED, true);
+ }
+
+ // Verify it and pull the ID out of the folder
+ syncable::Id folder_id;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_folder"));
+ ASSERT_TRUE(entry.good());
+ folder_id = entry.Get(ID);
+ ASSERT_TRUE(!folder_id.ServerKnows());
+ }
+
+ // Create an entry in the newly created folder.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, folder_id, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+
+ // Verify it and pull the ID out of the entry
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, folder_id,
+ PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value
+ int64 timestamp = 20; // arbitrary value.
+ int64 size = 20; // arbitrary.
+ syncable::Id new_folder_id =
+ syncable::Id::CreateFromServerId("folder_server_id");
+
+ // the following update should cause the folder to both apply the update, as
+ // well as reassociate the id
+ mock_server_->AddUpdateDirectory(new_folder_id, root_id_,
+ "new_folder", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ dir->cache_guid(), folder_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Alright! Apply that update!
+ syncer_->SyncShare();
+ {
+ // The folder's ID should have been updated.
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry folder(&trans, GET_BY_PATH, PSTR("new_folder"));
+ ASSERT_TRUE(folder.good());
+ EXPECT_EQ(new_version, folder.Get(BASE_VERSION));
+ EXPECT_EQ(new_folder_id, folder.Get(ID));
+ EXPECT_TRUE(folder.Get(ID).ServerKnows());
+
+ // We changed the id of the parent, old lookups should fail.
+ Entry bad_entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, folder_id,
+ PSTR("new_entry"));
+ EXPECT_FALSE(bad_entry.good());
+
+ // The child's parent should have changed as well.
+ Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, new_folder_id,
+ PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(!entry.Get(ID).ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+}
+
+// A commit with a lost response produces an update that has to be reunited with
+// its parent.
+TEST_F(SyncerTest, CommitReuniteUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create an entry in the root.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ // Verify it and pull the ID out
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value
+ int64 timestamp = 20; // arbitrary value.
+ syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
+
+ // Generate an update from the server with a relevant ID reassignment.
+ mock_server_->AddUpdateBookmark(new_entry_id, root_id_,
+ "new_entry", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ dir->cache_guid(), entry_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Alright! Apply that update!
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(new_version, entry.Get(BASE_VERSION));
+ EXPECT_EQ(new_entry_id, entry.Get(ID));
+ }
+}
+
+// A commit with a lost response must work even if the local entry
+// was deleted before the update is applied. We should not duplicate the local
+// entry in this case, but just create another one alongside.
+// We may wish to examine this behavior in the future as it can create hanging
+// uploads that never finish, that must be cleaned up on the server side
+// after some time.
+TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ // Create a entry in the root.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ // Verify it and pull the ID out
+ syncable::Id entry_id;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ entry_id = entry.Get(ID);
+ EXPECT_TRUE(!entry_id.ServerKnows());
+ VerifyTestDataInEntry(&trans, &entry);
+ }
+
+ // Now, to emulate a commit response failure, we just don't commit it.
+ int64 new_version = 150; // any larger value
+ int64 timestamp = 20; // arbitrary value.
+ int64 size = 20; // arbitrary.
+ syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
+
+ // Generate an update from the server with a relevant ID reassignment.
+ mock_server_->AddUpdateBookmark(new_entry_id, root_id_,
+ "new_entry", new_version, timestamp);
+ mock_server_->SetLastUpdateOriginatorFields(
+ dir->cache_guid(),
+ entry_id.GetServerId());
+
+ // We don't want it accidentally committed, just the update applied.
+ mock_server_->set_conflict_all_commits(true);
+
+ // Purposefully delete the entry now before the update application finishes.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::IS_DEL, true);
+ }
+
+ // Just don't CHECK fail in sync, have the update split.
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_DEL));
+
+ Entry old_entry(&trans, GET_BY_ID, entry_id);
+ ASSERT_TRUE(old_entry.good());
+ EXPECT_TRUE(old_entry.Get(IS_DEL));
+ }
+}
+
+// TODO(chron): Add more unsanitized name tests
+TEST_F(SyncerTest, ConflictMatchingEntryHandlesUnsanitizedNames) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A/A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B/B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(IS_UNAPPLIED_UPDATE, true);
+ A.Put(SERVER_VERSION, 20);
+
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNAPPLIED_UPDATE, true);
+ B.Put(SERVER_VERSION, 20);
+ }
+ LoopSyncShare(syncer_);
+ syncer_events_.clear();
+ mock_server_->set_conflict_all_commits(false);
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ EXPECT_EQ(A.Get(IS_UNSYNCED), false);
+ EXPECT_EQ(A.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(A.Get(SERVER_VERSION), 20);
+
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_EQ(B.Get(IS_UNSYNCED), false);
+ EXPECT_EQ(B.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(B.Get(SERVER_VERSION), 20);
+ }
+}
+
+TEST_F(SyncerTest, ConflictMatchingEntryHandlesNormalNames) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(IS_UNAPPLIED_UPDATE, true);
+ A.Put(SERVER_VERSION, 20);
+
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNAPPLIED_UPDATE, true);
+ B.Put(SERVER_VERSION, 20);
+ }
+ LoopSyncShare(syncer_);
+ syncer_events_.clear();
+ mock_server_->set_conflict_all_commits(false);
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ EXPECT_EQ(A.Get(IS_UNSYNCED), false);
+ EXPECT_EQ(A.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(A.Get(SERVER_VERSION), 20);
+
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_EQ(B.Get(IS_UNSYNCED), false);
+ EXPECT_EQ(B.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(B.Get(SERVER_VERSION), 20);
+ }
+}
+
+TEST_F(SyncerTest, ReverseFolderOrderingTest) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(4, 3, "ggchild", 10, 10);
+ mock_server_->AddUpdateDirectory(3, 2, "gchild", 10, 10);
+ mock_server_->AddUpdateDirectory(5, 4, "gggchild", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "child", 10, 10);
+ mock_server_->AddUpdateDirectory(1, 0, "parent", 10, 10);
+ LoopSyncShare(syncer_);
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry child(&trans, syncable::GET_BY_PARENTID_AND_NAME, ids_.FromNumber(4),
+ PSTR("gggchild"));
+ ASSERT_TRUE(child.good());
+}
+
+bool CreateFolderInBob(Directory* dir) {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
+ PSTR("bob"));
+ MutableEntry entry2(&trans, syncable::CREATE, bob.Get(syncable::ID),
+ PSTR("bob"));
+ CHECK(entry2.good());
+ entry2.Put(syncable::IS_DIR, true);
+ entry2.Put(syncable::IS_UNSYNCED, true);
+ return true;
+}
+
+TEST_F(SyncerTest, EntryCreatedInNewFolderMidSync) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, syncable::CREATE, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ }
+ mock_server_->SetMidCommitCallbackFunction(CreateFolderInBob);
+ syncer_->SyncShare(BUILD_COMMIT_REQUEST, SYNCER_END);
+ EXPECT_EQ(1, mock_server_->committed_ids().size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ PathChar path[] = {*kPathSeparator, 'b', 'o', 'b', 0};
+ Entry entry(&trans, syncable::GET_BY_PATH, path);
+ ASSERT_TRUE(entry.good());
+ PathChar path2[] = {*kPathSeparator, 'b', 'o', 'b',
+ *kPathSeparator, 'b', 'o', 'b', 0};
+ Entry entry2(&trans, syncable::GET_BY_PATH, path2);
+ ASSERT_TRUE(entry2.good());
+ }
+}
+
+bool TouchFredAndGingerInRoot(Directory* dir) {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry fred(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
+ PSTR("fred"));
+ CHECK(fred.good());
+ // Equivalent to touching the entry
+ fred.Put(syncable::IS_UNSYNCED, true);
+ fred.Put(syncable::SYNCING, false);
+ MutableEntry ginger(&trans, syncable::GET_BY_PARENTID_AND_NAME,
+ trans.root_id(), PSTR("ginger"));
+ CHECK(ginger.good());
+ ginger.Put(syncable::IS_UNSYNCED, true);
+ ginger.Put(syncable::SYNCING, false);
+ return true;
+}
+
+TEST_F(SyncerTest, NegativeIDInUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(-10, 0, "bad", 40, 40);
+ syncer_->SyncShare();
+ // The negative id would make us CHECK!
+}
+
+TEST_F(SyncerTest, UnappliedUpdateOnCreatedItemItemDoesNotCrash) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ // Create an item.
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry fred_match(&trans, CREATE, trans.root_id(),
+ PSTR("fred_match"));
+ ASSERT_TRUE(fred_match.good());
+ WriteTestDataToEntry(&trans, &fred_match);
+ }
+ // Commit it.
+ syncer_->SyncShare();
+ EXPECT_EQ(1, mock_server_->committed_ids().size());
+ mock_server_->set_conflict_all_commits(true);
+ syncable::Id fred_match_id;
+ {
+ // Now receive a change from outside.
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry fred_match(&trans, GET_BY_PATH, PSTR("fred_match"));
+ ASSERT_TRUE(fred_match.good());
+ EXPECT_TRUE(fred_match.Get(ID).ServerKnows());
+ fred_match_id = fred_match.Get(ID);
+ mock_server_->AddUpdateBookmark(fred_match_id, trans.root_id(),
+ "fred_match", 40, 40);
+ }
+ // Run the syncer.
+ for (int i = 0 ; i < 30 ; ++i) {
+ syncer_->SyncShare();
+ }
+}
+
+TEST_F(SyncerTest, NameClashWithResolverInconsistentUpdates) {
+ // I'm unsure what the client should really do when the scenario in this old
+ // test occurs. The set of updates we've received are not consistent.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ const char* base_name = "name_clash_with_resolver";
+ const char* full_name = "name_clash_with_resolver.htm";
+ PathChar* base_name_p = PSTR("name_clash_with_resolver");
+ mock_server_->AddUpdateBookmark(1, 0, full_name, 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, full_name, 10, 10);
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare();
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare();
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+
+ EXPECT_EQ(base_name_p, id1name.substr(0, strlen(base_name)));
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(id1name.length() - 4));
+ EXPECT_LE(id1name.length(), 200ul);
+ EXPECT_EQ(PSTR("name_clash_with_resolver.htm"), id2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, NameClashWithResolver) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ const char* base_name = "name_clash_with_resolver";
+ const char* full_name = "name_clash_with_resolver.htm";
+ PathChar* base_name_p = PSTR("name_clash_with_resolver");
+ PathChar* full_name_p = PSTR("name_clash_with_resolver.htm");
+ mock_server_->AddUpdateBookmark(1, 0, "fred", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ entry.Put(NAME, full_name_p);
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, full_name, 10, 10);
+ // We do NOT use LoopSyncShare here because of the way that
+ // mock_server_->conflict_n_commits works.
+ // It will only conflict the first n commits, so if we let the syncer loop,
+ // the second commit of the update will succeed even though it shouldn't.
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare(state_.get());
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare(state_.get());
+ EXPECT_EQ(0, syncer_events_.size());
+ syncer_events_.clear();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+
+ EXPECT_EQ(base_name_p, id1name.substr(0, strlen(base_name)));
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(id1name.length() - 4));
+ EXPECT_LE(id1name.length(), 200ul);
+ EXPECT_EQ(full_name_p, id2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, VeryLongNameClashWithResolver) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ string name;
+ PathString name_w;
+ name.resize(250, 'X');
+ name_w.resize(250, 'X');
+ name.append(".htm");
+ name_w.append(PSTR(".htm"));
+ mock_server_->AddUpdateBookmark(1, 0, "fred", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ entry.Put(NAME, name_w);
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, name, 10, 10);
+ mock_server_->set_conflict_n_commits(1);
+ // We do NOT use LoopSyncShare here because of the way that
+ // mock_server_->conflict_n_commits works.
+ // It will only conflict the first n commits, so if we let the syncer loop,
+ // the second commit of the update will succeed even though it shouldn't.
+ syncer_->SyncShare(state_.get());
+ mock_server_->set_conflict_n_commits(1);
+ syncer_->SyncShare(state_.get());
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(id1name.length() - 4));
+ EXPECT_EQ(name_w, id2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, NameClashWithResolverAndDotStartedName) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, ".bob.htm", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(NAME, PSTR(".htm"));
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(2, 0, ".htm", 10, 10);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(0, 4));
+ EXPECT_EQ(PSTR(".htm"), id2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, ThreeNamesClashWithResolver) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(1, 0, "in_root.htm", 10, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry.good());
+ ASSERT_FALSE(entry.Get(IS_DEL));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, "in_root.htm", 10, 10);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ ASSERT_FALSE(entry.Get(IS_DEL));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(3, 0, "in_root.htm", 10, 10);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(entry.good());
+ ASSERT_FALSE(entry.Get(IS_DEL));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(4, 0, "in_root.htm", 10, 10);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ ASSERT_TRUE(id3.good());
+ ASSERT_TRUE(id4.good());
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ EXPECT_EQ(root_id_, id4.Get(PARENT_ID));
+ PathString id1name = id1.Get(NAME);
+ ASSERT_GE(id1name.length(), 4ul);
+ EXPECT_EQ(PSTR("in_root"), id1name.substr(0, 7));
+ EXPECT_EQ(PSTR(".htm"), id1name.substr(id1name.length() - 4));
+ EXPECT_NE(PSTR("in_root.htm"), id1.Get(NAME));
+ PathString id2name = id2.Get(NAME);
+ ASSERT_GE(id2name.length(), 4ul);
+ EXPECT_EQ(PSTR("in_root"), id2name.substr(0, 7));
+ EXPECT_EQ(PSTR(".htm"), id2name.substr(id2name.length() - 4));
+ EXPECT_NE(PSTR("in_root.htm"), id2.Get(NAME));
+ PathString id3name = id3.Get(NAME);
+ ASSERT_GE(id3name.length(), 4ul);
+ EXPECT_EQ(PSTR("in_root"), id3name.substr(0, 7));
+ EXPECT_EQ(PSTR(".htm"), id3name.substr(id3name.length() - 4));
+ EXPECT_NE(PSTR("in_root.htm"), id3.Get(NAME));
+ EXPECT_EQ(PSTR("in_root.htm"), id4.Get(NAME));
+ }
+}
+
+/**
+ * In the event that we have a double changed entry, that is
+ * changed on both the client and the server, the conflict resolver
+ * should just drop one of them and accept the other.
+ */
+TEST_F(SyncerTest, DoublyChangedWithResolver) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_DIR, true);
+ parent.Put(syncable::ID, parent_id_);
+ parent.Put(syncable::BASE_VERSION, 5);
+ MutableEntry child(&wtrans, syncable::CREATE, parent_id_, PSTR("Pete.htm"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::ID, child_id_);
+ child.Put(syncable::BASE_VERSION, 10);
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+ mock_server_->AddUpdateBookmark(child_id_, parent_id_, "Pete.htm", 11, 10);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ syncable::Directory::ChildHandles children;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ dir->GetChildHandles(&trans, parent_id_, &children);
+ // We expect the conflict resolver to just clobber the entry.
+ Entry child(&trans, syncable::GET_BY_ID, child_id_);
+ ASSERT_TRUE(child.good());
+ EXPECT_TRUE(child.Get(syncable::IS_UNSYNCED));
+ EXPECT_FALSE(child.Get(syncable::IS_UNAPPLIED_UPDATE));
+ }
+
+ // Only one entry, since we just overwrite one.
+ EXPECT_EQ(1, children.size());
+ syncer_events_.clear();
+}
+
+// We got this repro case when someone was editing entries
+// while sync was occuring. The entry had changed out underneath
+// the user.
+TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ int64 test_time = 123456;
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, syncable::CREATE, root_id_, PSTR("Pete"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(ID).ServerKnows());
+ entry.Put(syncable::IS_DIR, true);
+ entry.Put(syncable::IS_UNSYNCED, true);
+ entry.Put(syncable::MTIME, test_time);
+ }
+ syncer_->SyncShare();
+ syncable::Id id;
+ int64 version;
+ int64 server_position_in_parent;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
+ PSTR("Pete"));
+ ASSERT_TRUE(entry.good());
+ id = entry.Get(ID);
+ EXPECT_TRUE(id.ServerKnows());
+ version = entry.Get(BASE_VERSION);
+ server_position_in_parent = entry.Get(SERVER_POSITION_IN_PARENT);
+ }
+ mock_server_->AddUpdateDirectory(id, root_id_, "Pete", version, 10);
+ mock_server_->SetLastUpdatePosition(server_position_in_parent);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, syncable::GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(entry.Get(MTIME), test_time);
+ }
+}
+
+TEST_F(SyncerTest, ParentAndChildBothMatch) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("test.htm"));
+ ASSERT_TRUE(child.good());
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+ mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder", 10, 10);
+ mock_server_->AddUpdateBookmark(child_id_, parent_id_, "test.htm", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, root_id_, &children);
+ EXPECT_EQ(1, children.size());
+ dir->GetChildHandles(&trans, parent_id_, &children);
+ EXPECT_EQ(1, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
+ EXPECT_EQ(0, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0, unsynced.size());
+ syncer_events_.clear();
+ }
+}
+
+TEST_F(SyncerTest, CommittingNewDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("bob"));
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare();
+ EXPECT_EQ(0, mock_server_->committed_ids().size());
+}
+
+// Original problem synopsis:
+// Check failed: entry->Get(BASE_VERSION) <= entry->Get(SERVER_VERSION)
+// Client creates entry, client finishes committing entry. Between
+// commit and getting update back, we delete the entry.
+// We get the update for the entry, but the local one was modified
+// so we store the entry but don't apply it. IS_UNAPPLIED_UPDATE is set.
+// We commit deletion and get a new version number.
+// We apply unapplied updates again before we get the update about the deletion.
+// This means we have an unapplied update where server_version < base_version.
+TEST_F(SyncerTest, UnappliedUpdateDuringCommit) {
+ // This test is a little fake
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("bob"));
+ entry.Put(ID, ids_.FromNumber(20));
+ entry.Put(BASE_VERSION, 1);
+ entry.Put(SERVER_VERSION, 1);
+ entry.Put(SERVER_PARENT_ID, ids_.FromNumber(9999)); // bad parent
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(IS_UNAPPLIED_UPDATE, true);
+ entry.Put(IS_DEL, false);
+ }
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ EXPECT_EQ(0, status.conflicting_updates());
+ syncer_events_.clear();
+}
+
+// Original problem synopsis:
+// Illegal parent
+// Unexpected error during sync if we:
+// make a new folder bob
+// wait for sync
+// make a new folder fred
+// move bob into fred
+// remove bob
+// remove fred
+// if no syncing occured midway, bob will have an illegal parent
+TEST_F(SyncerTest, DeletingEntryInFolder) {
+ // This test is a little fake
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("existing"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_DIR, true);
+ entry.Put(IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare(state_.get());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry newfolder(&trans, CREATE, trans.root_id(), PSTR("new"));
+ ASSERT_TRUE(newfolder.good());
+ newfolder.Put(IS_DIR, true);
+ newfolder.Put(IS_UNSYNCED, true);
+
+ MutableEntry existing(&trans, GET_BY_PATH, PSTR("existing"));
+ ASSERT_TRUE(existing.good());
+ existing.Put(PARENT_ID, newfolder.Get(ID));
+ existing.Put(IS_UNSYNCED, true);
+ EXPECT_TRUE(existing.Get(ID).ServerKnows());
+
+ newfolder.Put(IS_DEL, true);
+ existing.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ EXPECT_EQ(0, status.error_commits());
+ EXPECT_EQ(0, status.conflicting_commits());
+ EXPECT_EQ(0, status.BlockedItemsSize());
+}
+
+// TODO(sync): Is this test useful anymore?
+TEST_F(SyncerTest, DeletingEntryWithLocalEdits) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry newfolder(&trans, CREATE, ids_.FromNumber(1), PSTR("local"));
+ ASSERT_TRUE(newfolder.good());
+ newfolder.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare(SYNCER_BEGIN, APPLY_UPDATES);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry_by_path(&trans, syncable::GET_BY_PATH,
+ PathString(PSTR("bob")) + kPathSeparator + PSTR("local"));
+ ASSERT_TRUE(entry_by_path.good());
+ }
+}
+
+TEST_F(SyncerTest, FolderSwapUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20);
+ mock_server_->AddUpdateDirectory(7801, 0, "fred", 2, 20);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("fred"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, CorruptUpdateBadFolderSwapUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(4096, 0, "alice", 1, 10);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("bob"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("fred"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("alice"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ }
+ mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20);
+ mock_server_->AddUpdateDirectory(7801, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(4096, 0, "bob", 2, 20);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("bob"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("fred"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("alice"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(chron): New set of folder swap commit tests that don't rely
+// on transactional commits.
+TEST_F(SyncerTest, DISABLED_FolderSwapCommit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
+ id1.Put(IS_UNSYNCED, true);
+ id2.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ ASSERT_EQ(2, mock_server_->commit_messages().size());
+ CommitMessage* m0 = mock_server_->commit_messages()[0];
+ CommitMessage* m1 = mock_server_->commit_messages()[1];
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("fred"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_FALSE(id1.Get(IS_UNSYNCED));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_FALSE(id2.Get(IS_UNSYNCED));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(chron): New set of folder swap commit tests that don't rely
+// on transactional commits.
+TEST_F(SyncerTest, DISABLED_DualFolderSwapCommit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
+ mock_server_->AddUpdateDirectory(4, 0, "greg", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ MutableEntry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ MutableEntry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ ASSERT_TRUE(id3.good());
+ ASSERT_TRUE(id4.good());
+ EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
+ EXPECT_FALSE(id3.Put(NAME, PSTR("greg")));
+ EXPECT_TRUE(id3.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id4.Put(NAME, PSTR("sue")));
+ EXPECT_TRUE(id3.Put(NAME, PSTR("greg")));
+ id1.Put(IS_UNSYNCED, true);
+ id2.Put(IS_UNSYNCED, true);
+ id3.Put(IS_UNSYNCED, true);
+ id4.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ ASSERT_EQ(4, mock_server_->commit_messages().size());
+ CommitMessage* m0 = mock_server_->commit_messages()[0];
+ CommitMessage* m1 = mock_server_->commit_messages()[1];
+ CommitMessage* m2 = mock_server_->commit_messages()[2];
+ CommitMessage* m3 = mock_server_->commit_messages()[3];
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("fred"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_FALSE(id1.Get(IS_UNSYNCED));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_FALSE(id2.Get(IS_UNSYNCED));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("greg"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ EXPECT_FALSE(id3.Get(IS_UNSYNCED));
+ Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(id4.good());
+ EXPECT_EQ(PSTR("sue"), id4.Get(NAME));
+ EXPECT_EQ(root_id_, id4.Get(PARENT_ID));
+ EXPECT_FALSE(id4.Get(IS_UNSYNCED));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(chron): New set of folder swap commit tests that don't rely
+// on transactional commits.
+TEST_F(SyncerTest, DISABLED_TripleFolderRotateCommit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ MutableEntry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ ASSERT_TRUE(id3.good());
+ EXPECT_FALSE(id1.Put(NAME, PSTR("sue")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
+ EXPECT_TRUE(id3.Put(NAME, PSTR("fred")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("sue")));
+ id1.Put(IS_UNSYNCED, true);
+ id2.Put(IS_UNSYNCED, true);
+ id3.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ ASSERT_EQ(2, mock_server_->commit_messages().size());
+ CommitMessage* m0 = mock_server_->commit_messages()[0];
+ CommitMessage* m1 = mock_server_->commit_messages()[1];
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("sue"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_FALSE(id1.Get(IS_UNSYNCED));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_FALSE(id2.Get(IS_UNSYNCED));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("fred"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ EXPECT_FALSE(id3.Get(IS_UNSYNCED));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(chron): New set of folder swap commit tests that don't rely
+// on transactional commits.
+TEST_F(SyncerTest, DISABLED_ServerAndClientSwap) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
+ mock_server_->AddUpdateDirectory(4, 0, "greg", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id1.good());
+ ASSERT_TRUE(id2.good());
+ EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
+ EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
+ EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
+ id1.Put(IS_UNSYNCED, true);
+ id2.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(3, 0, "greg", 2, 20);
+ mock_server_->AddUpdateDirectory(4, 0, "sue", 2, 20);
+ syncer_->SyncShare();
+ ASSERT_EQ(2, mock_server_->commit_messages().size());
+ CommitMessage* m0 = mock_server_->commit_messages()[0];
+ CommitMessage* m1 = mock_server_->commit_messages()[1];
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(id1.good());
+ EXPECT_EQ(PSTR("fred"), id1.Get(NAME));
+ EXPECT_EQ(root_id_, id1.Get(PARENT_ID));
+ EXPECT_FALSE(id1.Get(IS_UNSYNCED));
+ Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(id2.good());
+ EXPECT_EQ(PSTR("bob"), id2.Get(NAME));
+ EXPECT_EQ(root_id_, id2.Get(PARENT_ID));
+ EXPECT_FALSE(id2.Get(IS_UNSYNCED));
+ Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(id3.good());
+ EXPECT_EQ(PSTR("greg"), id3.Get(NAME));
+ EXPECT_EQ(root_id_, id3.Get(PARENT_ID));
+ EXPECT_FALSE(id3.Get(IS_UNSYNCED));
+ Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(id4.good());
+ EXPECT_EQ(PSTR("sue"), id4.Get(NAME));
+ EXPECT_EQ(root_id_, id4.Get(PARENT_ID));
+ EXPECT_FALSE(id4.Get(IS_UNSYNCED));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, CommitManyItemsInOneGo) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ uint32 max_batches = 3;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * max_batches;
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ for (uint32 i = 0; i < items_to_commit; i++) {
+ string nameutf8 = StringPrintf("%d", i);
+ PathString name(nameutf8.begin(), nameutf8.end());
+ MutableEntry e(&trans, CREATE, trans.root_id(), name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ }
+ }
+ uint32 num_loops = 0;
+ while (syncer_->SyncShare()) {
+ num_loops++;
+ ASSERT_LT(num_loops, max_batches * 2);
+ }
+ EXPECT_GE(mock_server_->commit_messages().size(), max_batches);
+}
+
+TEST_F(SyncerTest, HugeConflict) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ PathString name = PSTR("f");
+ int item_count = 30; // We should be able to do 300 or 3000 w/o issue.
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ syncable::Id last_id = trans.root_id();
+ for (int i = 0; i < item_count ; i++) {
+ MutableEntry e(&trans, CREATE, last_id, name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ last_id = e.Get(ID);
+ }
+ }
+ syncer_->SyncShare();
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_PARENTID_AND_NAME, root_id_, name);
+ syncable::Id in_root = e.Get(ID);
+ syncable::Id last_id = e.Get(ID);
+ for (int i = 0; i < item_count - 1 ; i++) {
+ MutableEntry e(&trans, GET_BY_PARENTID_AND_NAME, last_id, name);
+ ASSERT_TRUE(e.good());
+ mock_server_->AddUpdateDirectory(in_root, root_id_, "BOB", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ if (0 == i)
+ e.Put(IS_UNSYNCED, true);
+ last_id = e.Get(ID);
+ }
+ }
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ CHECK(dir.good());
+}
+
+TEST_F(SyncerTest, CaseChangeNameClashConflict) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(e.good());
+ e.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(1, 0, "BOB", 2, 20);
+ syncer_->SyncShare(); // USED TO CAUSE AN ASSERT
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, UnsyncedItemAndUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(2, 0, "bob", 2, 20);
+ syncer_->SyncShare(); // USED TO CAUSE AN ASSERT
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, FolderMergeWithChildNameClash) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ syncable::Id local_folder_id, root_id;
+ mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder2", 10, 10);
+ mock_server_->AddUpdateBookmark(child_id_, parent_id_, "Bookmark", 10, 10);
+ syncer_->SyncShare();
+ int64 local_folder_handle;
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, root_id_, PSTR("Folder"));
+ ASSERT_TRUE(parent.good());
+ local_folder_id = parent.Get(ID);
+ local_folder_handle = parent.Get(META_HANDLE);
+ parent.Put(IS_DIR, true);
+ parent.Put(IS_UNSYNCED, true);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("Bookmark"));
+ ASSERT_TRUE(child.good());
+ WriteTestDataToEntry(&wtrans, &child);
+ }
+ mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, root_id_, &children);
+ ASSERT_EQ(2, children.size());
+ Entry parent(&trans, GET_BY_ID, parent_id_);
+ ASSERT_TRUE(parent.good());
+ EXPECT_EQ(parent.Get(NAME), PSTR("Folder"));
+ if (local_folder_handle == children[0]) {
+ EXPECT_EQ(children[1], parent.Get(META_HANDLE));
+ } else {
+ EXPECT_EQ(children[0], parent.Get(META_HANDLE));
+ EXPECT_EQ(children[1], local_folder_handle);
+ }
+ dir->GetChildHandles(&trans, local_folder_id, &children);
+ EXPECT_EQ(1, children.size());
+ dir->GetChildHandles(&trans, parent_id_, &children);
+ EXPECT_EQ(1, children.size());
+ Directory::UnappliedUpdateMetaHandles unapplied;
+ dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
+ EXPECT_EQ(0, unapplied.size());
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(2, unsynced.size());
+ }
+ mock_server_->set_conflict_all_commits(false);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ syncable::Directory::UnsyncedMetaHandles unsynced;
+ dir->GetUnsyncedMetaHandles(&trans, &unsynced);
+ EXPECT_EQ(0, unsynced.size());
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "Foo.htm", 10, 10);
+ syncer_->SyncShare();
+ int64 local_folder_handle;
+ syncable::Id local_folder_id;
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry new_entry(&wtrans, CREATE, wtrans.root_id(), PSTR("Bar.htm"));
+ ASSERT_TRUE(new_entry.good());
+ local_folder_id = new_entry.Get(ID);
+ local_folder_handle = new_entry.Get(META_HANDLE);
+ new_entry.Put(IS_UNSYNCED, true);
+ MutableEntry old(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(old.good());
+ WriteTestDataToEntry(&wtrans, &old);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Bar.htm", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_events_.clear();
+}
+
+// Circular links should be resolved by the server.
+TEST_F(SyncerTest, SiblingDirectoriesBecomeCircular) {
+ // we don't currently resolve this. This test ensures we don't
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ ASSERT_TRUE(A.Put(PARENT_ID, ids_.FromNumber(2)));
+ ASSERT_TRUE(A.Put(NAME, PSTR("B")));
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "A", 20, 20);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_events_.clear();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_EQ(A.Get(NAME), PSTR("B"));
+ EXPECT_EQ(B.Get(NAME), PSTR("B"));
+ }
+}
+
+TEST_F(SyncerTest, ConflictSetClassificationError) {
+ // This code used to cause a CHECK failure because we incorrectly thought
+ // a set was only unapplied updates.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(IS_UNAPPLIED_UPDATE, true);
+ A.Put(SERVER_NAME, PSTR("B"));
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNAPPLIED_UPDATE, true);
+ B.Put(SERVER_NAME, PSTR("A"));
+ }
+ syncer_->SyncShare();
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, SwapEntryNames) {
+ // Simple transaction test
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNSYNCED, true);
+ ASSERT_TRUE(A.Put(NAME, PSTR("C")));
+ ASSERT_TRUE(B.Put(NAME, PSTR("A")));
+ ASSERT_TRUE(A.Put(NAME, PSTR("B")));
+ }
+ syncer_->SyncShare();
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, DualDeletionWithNewItemNameClash) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateBookmark(2, 0, "B", 10, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ WriteTestDataToEntry(&trans, &B);
+ B.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateBookmark(2, 0, "A", 11, 11);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ EXPECT_FALSE(B.Get(IS_UNSYNCED));
+ EXPECT_FALSE(B.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, FixDirectoryLoopConflict) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ WriteTestDataToEntry(&trans, &bob);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 2, 10);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(bob.good());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(bob.Get(ID).ServerKnows());
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_DEL));
+ }
+ syncer_events_.clear();
+}
+
+// This test is disabled because we actually enforce the opposite behavior in:
+// ConflictResolverMergesLocalDeleteAndServerUpdate for bookmarks.
+TEST_F(SyncerTest, DISABLED_ResolveWeDeletedTheyWrote) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 2, 10);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(bob.good());
+ EXPECT_EQ(bob.Get(ID), ids_.FromNumber(1));
+ EXPECT_FALSE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_DEL));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, ServerDeletingFolderWeHaveMovedSomethingInto) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+// TODO(ncarter): This test is bogus, but it actually seems to hit an
+// interesting case the 4th time SyncShare is called.
+TEST_F(SyncerTest, DISABLED_ServerDeletingFolderWeHaveAnOpenEntryIn) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare(state_.get());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ WriteTestDataToEntry(&trans, &bob);
+ }
+ syncer_->SyncShare(state_.get());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ EXPECT_FALSE(bob.Get(IS_UNSYNCED));
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_events_.clear();
+ // These SyncShares would cause a CHECK because we'd think we were stuck.
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ EXPECT_EQ(0, syncer_events_.size());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, WeMovedSomethingIntoAFolderServerHasDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_EQ(fred.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+namespace {
+
+int move_bob_count;
+
+bool MoveBobIntoID2(Directory* dir) {
+ int first_count = move_bob_count;
+ if (--move_bob_count > 0)
+ return false;
+ int second_count = move_bob_count;
+ if (move_bob_count == 0) {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ Entry alice(&trans, GET_BY_ID, TestIdFactory::FromNumber(2));
+ CHECK(alice.good());
+ CHECK(!alice.Get(IS_DEL));
+ MutableEntry bob(&trans, GET_BY_ID, TestIdFactory::FromNumber(1));
+ CHECK(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, alice.Get(ID));
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+TEST_F(SyncerTest,
+ WeMovedSomethingIntoAFolderServerHasDeletedAndWeRenamed) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ fred.Put(IS_UNSYNCED, true);
+ fred.Put(NAME, PSTR("Alice"));
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ // This test is a little brittle. We want to move the item into the folder
+ // such that we think we're dealing with a simple conflict, but in reality
+ // it's actually a conflict set.
+ move_bob_count = 2;
+ mock_server_->SetMidCommitCallbackFunction(MoveBobIntoID2);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry alice(&trans, GET_BY_PATH, PSTR("Alice"));
+ ASSERT_TRUE(alice.good());
+ EXPECT_TRUE(alice.Get(IS_UNSYNCED));
+ EXPECT_FALSE(alice.Get(ID).ServerKnows());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(bob.Get(PARENT_ID), alice.Get(ID));
+ EXPECT_EQ(alice.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+
+TEST_F(SyncerTest,
+ WeMovedADirIntoAndCreatedAnEntryInAFolderServerHasDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ syncable::Id new_item_id;
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ MutableEntry new_item(&trans, CREATE, ids_.FromNumber(2), PSTR("new_item"));
+ WriteTestDataToEntry(&trans, &new_item);
+ new_item_id = new_item.Get(ID);
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ PathChar path[] = {'f', 'r', 'e', 'd', *kPathSeparator,
+ 'n', 'e', 'w', '_', 'i', 't', 'e', 'm', 0};
+ Entry new_item(&trans, GET_BY_PATH, path);
+ EXPECT_TRUE(new_item.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_EQ(fred.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, ServerMovedSomethingIntoAFolderWeHaveDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, ServerMovedAFolderIntoAFolderWeHaveDeletedAndMovedIntoIt) {
+ // This test combines circular folders and deleted parents.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(2));
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_DEL));
+ EXPECT_EQ(fred.Get(PARENT_ID), root_id_);
+ EXPECT_EQ(bob.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, NewServerItemInAFolderWeHaveDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(fred.good());
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ joe.Put(IS_UNSYNCED, true);
+ joe.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(fred.good());
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(joe.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), joe.Get(ID));
+ EXPECT_EQ(joe.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted2) {
+ // The difference here is that the hierarchy's not in the root. We have
+ // another entry that shouldn't be touched.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
+ mock_server_->AddUpdateDirectory(1, 4, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ joe.Put(IS_UNSYNCED, true);
+ joe.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(fred.good());
+ Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(susan.good());
+ EXPECT_FALSE(susan.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(joe.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), joe.Get(ID));
+ EXPECT_EQ(joe.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), susan.Get(ID));
+ EXPECT_EQ(susan.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+namespace {
+
+int countown_till_delete = 0;
+
+void DeleteSusanInRoot(Directory* dir) {
+ ASSERT_GT(countown_till_delete, 0);
+ if (0 != --countown_till_delete)
+ return;
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry susan(&trans, GET_BY_PATH, PSTR("susan"));
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, susan.Get(ID), &children);
+ ASSERT_EQ(0, children.size());
+ susan.Put(IS_DEL, true);
+ susan.Put(IS_UNSYNCED, true);
+}
+
+} // namespace
+
+TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted3) {
+ // Same as 2, except we deleted the folder the set is in between set building
+ // and conflict resolution.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
+ mock_server_->AddUpdateDirectory(1, 4, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
+ LoopSyncShare(syncer_);
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(IS_DEL, true);
+ MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ joe.Put(IS_UNSYNCED, true);
+ joe.Put(IS_DEL, true);
+ }
+ mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ countown_till_delete = 2;
+ syncer_->pre_conflict_resolution_function_ = DeleteSusanInRoot;
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(fred.good());
+ Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(susan.good());
+ EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(susan.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(joe.Get(IS_UNSYNCED));
+ }
+ EXPECT_EQ(0, countown_till_delete);
+ syncer_->pre_conflict_resolution_function_ = 0;
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(joe.good());
+ Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(fred.good());
+ Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(susan.good());
+ EXPECT_TRUE(susan.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_TRUE(joe.Get(IS_UNSYNCED));
+ EXPECT_EQ(fred.Get(PARENT_ID), joe.Get(ID));
+ EXPECT_EQ(joe.Get(PARENT_ID), bob.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), susan.Get(ID));
+ EXPECT_EQ(susan.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 2, "alice", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(3)); // Move into alice.
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->AddUpdateDirectory(3, 0, "alice", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
+ ASSERT_TRUE(fred.good());
+ PathChar path[] = {'f', 'r', 'e', 'd', *kPathSeparator,
+ 'a', 'l', 'i', 'c', 'e', 0};
+ Entry alice(&trans, GET_BY_PATH, path);
+ ASSERT_TRUE(alice.good());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(alice.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_FALSE(alice.Get(ID).ServerKnows());
+ EXPECT_EQ(alice.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), alice.Get(ID));
+ EXPECT_EQ(fred.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted2) {
+ // The difference here is that the hierarchy's not in the root. We have
+ // another entry that shouldn't be touched.
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
+ mock_server_->AddUpdateDirectory(2, 4, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(3, 2, "alice", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, ids_.FromNumber(3)); // Move into alice.
+ }
+ mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->AddUpdateDirectory(3, 0, "alice", 2, 20);
+ mock_server_->SetLastUpdateDeleted();
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ PathChar path[] = {'s', 'u', 's', 'a', 'n', *kPathSeparator,
+ 'f', 'r', 'e', 'd', 0};
+ Entry fred(&trans, GET_BY_PATH, path);
+ ASSERT_TRUE(fred.good());
+ PathChar path2[] = {'s', 'u', 's', 'a', 'n', *kPathSeparator,
+ 'f', 'r', 'e', 'd', *kPathSeparator,
+ 'a', 'l', 'i', 'c', 'e', 0};
+ Entry alice(&trans, GET_BY_PATH, path2);
+ ASSERT_TRUE(alice.good());
+ Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(susan.good());
+ Entry susan_by_path(&trans, GET_BY_PATH, PSTR("susan"));
+ ASSERT_TRUE(susan.good());
+ EXPECT_FALSE(susan.Get(IS_UNSYNCED));
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_TRUE(alice.Get(IS_UNSYNCED));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_FALSE(alice.Get(ID).ServerKnows());
+ EXPECT_EQ(alice.Get(PARENT_ID), fred.Get(ID));
+ EXPECT_EQ(bob.Get(PARENT_ID), alice.Get(ID));
+ EXPECT_EQ(fred.Get(PARENT_ID), susan.Get(ID));
+ EXPECT_EQ(susan.Get(PARENT_ID), root_id_);
+ EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
+ }
+ syncer_events_.clear();
+}
+
+// This test is to reproduce a check failure. Sometimes we would get a
+// bad ID back when creating an entry.
+TEST_F(SyncerTest, DuplicateIDReturn) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(folder.good());
+ folder.Put(IS_UNSYNCED, true);
+ folder.Put(IS_DIR, true);
+ MutableEntry folder2(&trans, CREATE, trans.root_id(), PSTR("fred"));
+ ASSERT_TRUE(folder2.good());
+ folder2.Put(IS_UNSYNCED, false);
+ folder2.Put(IS_DIR, true);
+ folder2.Put(BASE_VERSION, 3);
+ folder2.Put(ID, syncable::Id::CreateFromServerId("mock_server:10000"));
+ }
+ mock_server_->set_next_new_id(10000);
+ EXPECT_EQ(1, dir->unsynced_entity_count());
+ syncer_->SyncShare(); // we get back a bad id in here (should never happen).
+ EXPECT_EQ(1, dir->unsynced_entity_count());
+ syncer_->SyncShare(); // another bad id in here.
+ EXPECT_EQ(0, dir->unsynced_entity_count());
+ syncer_events_.clear();
+}
+
+// This test is not very useful anymore. It used to trigger
+// a more interesting condition.
+TEST_F(SyncerTest, SimpleConflictOnAnEntry) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, CREATE, trans.root_id(), PSTR("bob"));
+ ASSERT_TRUE(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ WriteTestDataToEntry(&trans, &bob);
+ }
+ syncer_->SyncShare();
+ syncable::Id bobid;
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_PATH, PSTR("bob"));
+ ASSERT_TRUE(bob.good());
+ EXPECT_FALSE(bob.Get(IS_UNSYNCED));
+ bob.Put(IS_UNSYNCED, true);
+ bobid = bob.Get(ID);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "jim", 2, 20);
+ mock_server_->set_conflict_all_commits(true);
+ SyncRepeatedlyToTriggerConflictResolution(state_.get());
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, DeletedEntryWithBadParentInLoopCalculation) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(bob.good());
+ // This is valid, because the parent could have gone away a long time ago.
+ bob.Put(PARENT_ID, ids_.FromNumber(54));
+ bob.Put(IS_DEL, true);
+ bob.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateDirectory(2, 1, "fred", 1, 10);
+ syncer_->SyncShare();
+ syncer_->SyncShare();
+}
+
+TEST_F(SyncerTest, ConflictResolverMergeOverwritesLocalEntry) {
+ // This test would die because it would rename
+ // a entry to a name that was taken in the namespace
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+
+ ConflictSet conflict_set;
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry local_deleted(&trans, CREATE, trans.root_id(), PSTR("name"));
+ local_deleted.Put(ID, ids_.FromNumber(1));
+ local_deleted.Put(BASE_VERSION, 1);
+ local_deleted.Put(IS_DEL, true);
+ local_deleted.Put(IS_UNSYNCED, true);
+
+ MutableEntry in_the_way(&trans, CREATE, trans.root_id(), PSTR("name"));
+ in_the_way.Put(ID, ids_.FromNumber(2));
+ in_the_way.Put(BASE_VERSION, 1);
+
+ MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, ids_.FromNumber(3));
+ update.Put(BASE_VERSION, 1);
+ update.Put(SERVER_NAME, PSTR("name"));
+ update.Put(PARENT_ID, ids_.FromNumber(0));
+ update.Put(IS_UNAPPLIED_UPDATE, true);
+
+ conflict_set.push_back(ids_.FromNumber(1));
+ conflict_set.push_back(ids_.FromNumber(3));
+ }
+ {
+ SyncCycleState cycle_state;
+ SyncerSession session(&cycle_state, state_.get());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ syncer_->conflict_resolver()->ProcessConflictSet(&trans, &conflict_set, 50,
+ &session);
+ }
+}
+
+TEST_F(SyncerTest, ConflictResolverMergesLocalDeleteAndServerUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry local_deleted(&trans, CREATE, trans.root_id(), PSTR("name"));
+ local_deleted.Put(ID, ids_.FromNumber(1));
+ local_deleted.Put(BASE_VERSION, 1);
+ local_deleted.Put(IS_DEL, true);
+ local_deleted.Put(IS_DIR, false);
+ local_deleted.Put(IS_UNSYNCED, true);
+ local_deleted.Put(IS_BOOKMARK_OBJECT, true);
+ }
+
+ mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10);
+
+ // We don't care about actually committing, just the resolution
+ mock_server_->set_conflict_all_commits(true);
+ syncer_->SyncShare();
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry local_deleted(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_EQ(local_deleted.Get(BASE_VERSION), 10);
+ EXPECT_EQ(local_deleted.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(local_deleted.Get(IS_UNSYNCED), true);
+ EXPECT_EQ(local_deleted.Get(IS_DEL), true);
+ EXPECT_EQ(local_deleted.Get(IS_DIR), false);
+ }
+}
+
+// See what happens if the IS_DIR bit gets flipped. This can cause us
+// all kinds of disasters.
+TEST_F(SyncerTest, UpdateFlipsTheFolderBit) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+
+ // Local object: a deleted directory (container), revision 1, unsynced.
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry local_deleted(&trans, CREATE, trans.root_id(), PSTR("name"));
+ local_deleted.Put(ID, ids_.FromNumber(1));
+ local_deleted.Put(BASE_VERSION, 1);
+ local_deleted.Put(IS_DEL, true);
+ local_deleted.Put(IS_DIR, true);
+ local_deleted.Put(IS_UNSYNCED, true);
+ }
+
+ // Server update: entry-type object (not a container), revision 10.
+ mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10);
+
+ // Don't attempt to commit
+ mock_server_->set_conflict_all_commits(true);
+
+ // The syncer should not attempt to apply the invalid update.
+ syncer_->SyncShare();
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry local_deleted(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_EQ(local_deleted.Get(BASE_VERSION), 1);
+ EXPECT_EQ(local_deleted.Get(IS_UNAPPLIED_UPDATE), false);
+ EXPECT_EQ(local_deleted.Get(IS_UNSYNCED), true);
+ EXPECT_EQ(local_deleted.Get(IS_DEL), true);
+ EXPECT_EQ(local_deleted.Get(IS_DIR), true);
+ }
+}
+
+TEST(SyncerSyncProcessState, MergeSetsTest) {
+ TestIdFactory id_factory;
+ syncable::Id id[7];
+ for (int i = 1; i < 7; i++) {
+ id[i] = id_factory.NewServerId();
+ }
+ SyncProcessState c;
+ c.MergeSets(id[1], id[2]);
+ c.MergeSets(id[2], id[3]);
+ c.MergeSets(id[4], id[5]);
+ c.MergeSets(id[5], id[6]);
+ EXPECT_EQ(6, c.IdToConflictSetSize());
+ for (int i = 1; i < 7; i++) {
+ EXPECT_TRUE(NULL != c.IdToConflictSetGet(id[i]));
+ EXPECT_EQ(c.IdToConflictSetGet(id[(i & ~3) + 1]),
+ c.IdToConflictSetGet(id[i]));
+ }
+ c.MergeSets(id[1], id[6]);
+ for (int i = 1; i < 7; i++) {
+ EXPECT_TRUE(NULL != c.IdToConflictSetGet(id[i]));
+ EXPECT_EQ(c.IdToConflictSetGet(id[1]), c.IdToConflictSetGet(id[i]));
+ }
+
+ // Check dupes don't cause double sets
+ SyncProcessState identical_set;
+ identical_set.MergeSets(id[1], id[1]);
+ EXPECT_EQ(identical_set.IdToConflictSetSize(), 1);
+ EXPECT_EQ(identical_set.IdToConflictSetGet(id[1])->size(), 1);
+}
+
+// Bug Synopsis:
+// Merge conflict resolution will merge a new local entry
+// with another entry that needs updates, resulting in CHECK.
+TEST_F(SyncerTest, MergingExistingItems) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(1, 0, "base", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("Copy of base"));
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50);
+ SyncRepeatedlyToTriggerConflictResolution(state_.get());
+}
+
+// In this test a long changelog contains a child at the start of the changelog
+// and a parent at the end. While these updates are in progress the client would
+// appear stuck.
+TEST_F(SyncerTest, LongChangelistCreatesFakeOrphanedEntries) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ const int DEPTH = 400;
+ // First we an item in a folder in the root. However the folder won't come
+ // till much later.
+ mock_server_->AddUpdateDirectory(99999, 1, "stuck", 1, 1);
+ mock_server_->SetNewestTimestamp(DEPTH);
+ syncer_->SyncShare(state_.get());
+
+ // Very long changelist. We should never be stuck.
+ for (int i = 0; i < DEPTH; i++) {
+ mock_server_->SetNewTimestamp(i);
+ mock_server_->SetNewestTimestamp(DEPTH);
+ syncer_->SyncShare(state_.get());
+ EXPECT_FALSE(SyncerStuck(state_.get()));
+ }
+ // And finally the folder.
+ mock_server_->AddUpdateDirectory(1, 0, "folder", 1, 1);
+ mock_server_->SetNewestTimestamp(DEPTH);
+ LoopSyncShare(syncer_);
+ LoopSyncShare(syncer_);
+ // Check that everything's as expected after the commit.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("folder"));
+ ASSERT_TRUE(entry.good());
+ Entry child(&trans, GET_BY_PARENTID_AND_NAME, entry.Get(ID), PSTR("stuck"));
+ EXPECT_TRUE(child.good());
+ }
+}
+
+TEST_F(SyncerTest, DontMergeTwoExistingItems) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(1, 0, "base", 10, 10);
+ mock_server_->AddUpdateBookmark(2, 0, "base2", 10, 10);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Put(NAME, PSTR("Copy of base")));
+ entry.Put(IS_UNSYNCED, true);
+ }
+ mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50);
+ SyncRepeatedlyToTriggerConflictResolution(state_.get());
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry1(&trans, GET_BY_ID, ids_.FromNumber(1));
+ EXPECT_FALSE(entry1.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry1.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry1.Get(IS_DEL));
+ Entry entry2(&trans, GET_BY_ID, ids_.FromNumber(2));
+ EXPECT_FALSE(entry2.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(entry2.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry2.Get(IS_DEL));
+ EXPECT_NE(entry1.Get(NAME), entry2.Get(NAME));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 1, 1);
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 1, 2);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 2, 3);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ }
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 2, 4);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+ // This used to be rejected as it's an undeletion.
+ // Now, it results in moving the delete path aside.
+ mock_server_->AddUpdateDirectory(2, 1, "bar", 3, 5);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ EXPECT_TRUE(entry.Get(IS_UNAPPLIED_UPDATE));
+ }
+}
+
+TEST_F(SyncerTest, TestMoveSanitizedNamedFolder) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "foo", 1, 1);
+ mock_server_->AddUpdateDirectory(2, 0, ":::", 1, 2);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(entry.Put(PARENT_ID, ids_.FromNumber(1)));
+ EXPECT_TRUE(entry.Put(IS_UNSYNCED, true));
+ }
+ syncer_->SyncShare();
+ // We use the same sync ts as before so our times match up.
+ mock_server_->AddUpdateDirectory(2, 1, ":::", 2, 2);
+ syncer_->SyncShare();
+}
+
+TEST_F(SyncerTest, QuicklyMergeDualCreatedHierarchy) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ int depth = 10;
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ syncable::Id parent = root_id_;
+ for (int i = 0 ; i < depth ; ++i) {
+ MutableEntry entry(&trans, CREATE, parent, PSTR("folder"));
+ entry.Put(IS_DIR, true);
+ entry.Put(IS_UNSYNCED, true);
+ parent = entry.Get(ID);
+ }
+ }
+ for (int i = 0 ; i < depth ; ++i) {
+ mock_server_->AddUpdateDirectory(i + 1, i, "folder", 1, 1);
+ }
+ syncer_->SyncShare(state_.get());
+ syncer_->SyncShare(state_.get());
+ SyncerStatus status(NULL, state_.get());
+ EXPECT_LT(status.consecutive_problem_commits(), 5);
+ EXPECT_EQ(0, dir->unsynced_entity_count());
+}
+
+TEST(SortedCollectionsIntersect, SortedCollectionsIntersectTest) {
+ int negative[] = {-3, -2, -1};
+ int straddle[] = {-1, 0, 1};
+ int positive[] = {1, 2, 3};
+ EXPECT_TRUE(SortedCollectionsIntersect(negative, negative + 3,
+ straddle, straddle + 3));
+ EXPECT_FALSE(SortedCollectionsIntersect(negative, negative + 3,
+ positive, positive + 3));
+ EXPECT_TRUE(SortedCollectionsIntersect(straddle, straddle + 3,
+ positive, positive + 3));
+ EXPECT_FALSE(SortedCollectionsIntersect(straddle + 2, straddle + 3,
+ positive, positive));
+ EXPECT_FALSE(SortedCollectionsIntersect(straddle, straddle + 3,
+ positive + 1, positive + 1));
+ EXPECT_TRUE(SortedCollectionsIntersect(straddle, straddle + 3,
+ positive, positive + 1));
+}
+
+// Don't crash when this occurs.
+TEST_F(SyncerTest, UpdateWhereParentIsNotAFolder) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(1, 0, "B", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 1, "BookmarkParent", 10, 10);
+ // Used to cause a CHECK
+ syncer_->SyncShare();
+ {
+ ReadTransaction rtrans(dir, __FILE__, __LINE__);
+ Entry good_entry(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(good_entry.good());
+ EXPECT_FALSE(good_entry.Get(IS_UNAPPLIED_UPDATE));
+ Entry bad_parent(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(bad_parent.good());
+ EXPECT_TRUE(bad_parent.Get(IS_UNAPPLIED_UPDATE));
+ }
+}
+
+const char kRootId[] = "0";
+
+TEST_F(SyncerTest, DirectoryUpdateTest) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory("in_root_id", kRootId,
+ "in_root_name", 2, 2);
+ mock_server_->AddUpdateDirectory("in_in_root_id", "in_root_id",
+ "in_in_root_name", 3, 3);
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ // Entry will have been dropped.
+ Entry by_path(&trans, GET_BY_PATH, PSTR("in_root_name"));
+ EXPECT_TRUE(by_path.good());
+ Entry by_path2(&trans, GET_BY_PATH, PSTR("in_root_name") +
+ PathString(kPathSeparator) +
+ PSTR("in_in_root_name"));
+ EXPECT_TRUE(by_path2.good());
+ }
+}
+
+TEST_F(SyncerTest, DirectoryCommitTest) {
+ syncable::Id in_root, in_dir;
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, syncable::CREATE, root_id_, PSTR("foo"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(syncable::IS_UNSYNCED, true);
+ parent.Put(syncable::IS_DIR, true);
+ in_root = parent.Get(syncable::ID);
+ MutableEntry child(&wtrans, syncable::CREATE, parent.Get(ID), PSTR("bar"));
+ ASSERT_TRUE(child.good());
+ child.Put(syncable::IS_UNSYNCED, true);
+ child.Put(syncable::IS_DIR, true);
+ in_dir = parent.Get(syncable::ID);
+ }
+ syncer_->SyncShare();
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry by_path(&trans, GET_BY_PATH, PSTR("foo"));
+ ASSERT_TRUE(by_path.good());
+ EXPECT_NE(by_path.Get(syncable::ID), in_root);
+ Entry by_path2(&trans, GET_BY_PATH, PSTR("foo") +
+ PathString(kPathSeparator) +
+ PSTR("bar"));
+ ASSERT_TRUE(by_path2.good());
+ EXPECT_NE(by_path2.Get(syncable::ID), in_dir);
+ }
+}
+
+namespace {
+
+void CheckEntryVersion(syncable::DirectoryManager* dirmgr, PathString name) {
+ ScopedDirLookup dir(dirmgr, name);
+ ASSERT_TRUE(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PSTR("foo"));
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(entry.Get(BASE_VERSION), 1);
+}
+
+} // namespace
+
+TEST_F(SyncerTest, ConflictSetSizeReducedToOne) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateBookmark(2, 0, "in_root", 1, 1);
+ syncer_->SyncShare();
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry oentry(&trans, GET_BY_PATH, PSTR("in_root"));
+ ASSERT_TRUE(oentry.good());
+ oentry.Put(NAME, PSTR("old_in_root"));
+ WriteTestDataToEntry(&trans, &oentry);
+ MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("in_root"));
+ ASSERT_TRUE(entry.good());
+ WriteTestDataToEntry(&trans, &entry);
+ }
+ mock_server_->set_conflict_all_commits(true);
+ // This SyncShare call used to result in a CHECK failure.
+ syncer_->SyncShare();
+ syncer_events_.clear();
+}
+
+TEST_F(SyncerTest, TestClientCommand) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ using sync_pb::ClientCommand;
+
+ ClientCommand* command = mock_server_->GetNextClientCommand();
+ command->set_set_sync_poll_interval(8);
+ command->set_set_sync_long_poll_interval(800);
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 1, 1);
+ syncer_->SyncShare();
+
+ EXPECT_TRUE(last_client_command_.has_set_sync_poll_interval());
+ EXPECT_TRUE(last_client_command_.has_set_sync_long_poll_interval());
+ EXPECT_EQ(8, last_client_command_.set_sync_poll_interval());
+ EXPECT_EQ(800, last_client_command_.set_sync_long_poll_interval());
+
+ command = mock_server_->GetNextClientCommand();
+ command->set_set_sync_poll_interval(180);
+ command->set_set_sync_long_poll_interval(190);
+ mock_server_->AddUpdateDirectory(1, 0, "in_root", 1, 1);
+ syncer_->SyncShare();
+
+ EXPECT_TRUE(last_client_command_.has_set_sync_poll_interval());
+ EXPECT_TRUE(last_client_command_.has_set_sync_long_poll_interval());
+ EXPECT_EQ(180, last_client_command_.set_sync_poll_interval());
+ EXPECT_EQ(190, last_client_command_.set_sync_long_poll_interval());
+}
+
+TEST_F(SyncerTest, EnsureWeSendUpOldParent) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ mock_server_->AddUpdateDirectory(1, 0, "folder_one", 1, 1);
+ mock_server_->AddUpdateDirectory(2, 0, "folder_two", 1, 1);
+ syncer_->SyncShare();
+ {
+ // A moved entry should send an old parent.
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_PATH, PSTR("folder_one"));
+ ASSERT_TRUE(entry.good());
+ entry.Put(PARENT_ID, ids_.FromNumber(2));
+ entry.Put(IS_UNSYNCED, true);
+ // A new entry should send no parent.
+ MutableEntry create(&trans, CREATE, trans.root_id(), PSTR("new_folder"));
+ create.Put(IS_UNSYNCED, true);
+ }
+ syncer_->SyncShare();
+ const sync_pb::CommitMessage& commit = mock_server_->last_sent_commit();
+ ASSERT_EQ(2, commit.entries_size());
+ EXPECT_EQ(commit.entries(0).parent_id_string(), "2");
+ EXPECT_EQ(commit.entries(0).old_parent_id(), "0");
+ EXPECT_FALSE(commit.entries(1).has_old_parent_id());
+}
+
+TEST_F(SyncerTest, Test64BitVersionSupport) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ int64 really_big_int = std::numeric_limits<int64>::max() - 12;
+ const PathString name(PSTR("ringo's dang orang ran rings around my o-ring"));
+
+ // Try writing max int64 to the version fields of a meta entry.
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, syncable::CREATE, wtrans.root_id(), name);
+ ASSERT_TRUE(entry.good());
+ entry.Put(syncable::BASE_VERSION, really_big_int);
+ entry.Put(syncable::SERVER_VERSION, really_big_int);
+ entry.Put(syncable::ID, syncable::Id::CreateFromServerId("ID"));
+ }
+ // Now read it back out and make sure the value is max int64.
+ ReadTransaction rtrans(dir, __FILE__, __LINE__);
+ Entry entry(&rtrans, syncable::GET_BY_PATH, name);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(really_big_int, entry.Get(syncable::BASE_VERSION));
+}
+
+TEST_F(SyncerTest, TestDSStoreDirectorySyncsNormally) {
+ syncable::Id item_id = parent_id_;
+ mock_server_->AddUpdateDirectory(item_id,
+ root_id_, ".DS_Store", 1, 1);
+ syncer_->SyncShare();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry ds_dir(&trans, GET_BY_PATH, PSTR(".DS_Store"));
+ ASSERT_TRUE(ds_dir.good());
+}
+
+TEST_F(SyncerTest, TestSimpleUndelete) {
+ Id id = ids_.MakeServer("undeletion item"), root = ids_.root();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ mock_server_->set_conflict_all_commits(true);
+ // let there be an entry from the server.
+ mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10);
+ syncer_->SyncShare();
+ // check it out and delete it
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ // delete it locally
+ entry.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare();
+ // Confirm we see IS_DEL and not SERVER_IS_DEL.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+ syncer_->SyncShare();
+ // Update from server confirming deletion
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 11);
+ mock_server_->SetLastUpdateDeleted();
+ syncer_->SyncShare();
+ // IS_DEL AND SERVER_IS_DEL now both true.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_TRUE(entry.Get(SERVER_IS_DEL));
+ }
+ // Undelete from server
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12);
+ syncer_->SyncShare();
+ // IS_DEL and SERVER_IS_DEL now both false.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
+ Id id = ids_.MakeServer("undeletion item"), root = ids_.root();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ // let there be a entry, from the server.
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10);
+ syncer_->SyncShare();
+ // check it out and delete it
+ {
+ WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ // delete it locally
+ entry.Put(IS_DEL, true);
+ }
+ syncer_->SyncShare();
+ // Confirm we see IS_DEL and not SERVER_IS_DEL.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+ syncer_->SyncShare();
+ // Say we do not get an update from server confirming deletion.
+ // Undelete from server
+ mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12);
+ syncer_->SyncShare();
+ // IS_DEL and SERVER_IS_DEL now both false.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_ID, id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(entry.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entry.Get(IS_DEL));
+ EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, TestUndeleteIgnoreCorrectlyUnappliedUpdate) {
+ Id id1 = ids_.MakeServer("first"), id2 = ids_.MakeServer("second");
+ Id root = ids_.root();
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ // duplicate! expect path clashing!
+ mock_server_->set_conflict_all_commits(true);
+ mock_server_->AddUpdateBookmark(id1, root, "foo", 1, 10);
+ mock_server_->AddUpdateBookmark(id2, root, "foo", 1, 10);
+ syncer_->SyncShare();
+ mock_server_->AddUpdateBookmark(id2, root, "foo2", 1, 10);
+ syncer_->SyncShare(); // Now just don't explode.
+}
+
+TEST_F(SyncerTest, CopySyncProcessState) {
+ scoped_ptr<SyncProcessState> b;
+ {
+ SyncProcessState a;
+ a.MergeSets(ids_.FromNumber(1), ids_.FromNumber(2));
+ a.MergeSets(ids_.FromNumber(2), ids_.FromNumber(3));
+ a.MergeSets(ids_.FromNumber(4), ids_.FromNumber(5));
+ EXPECT_EQ(a.ConflictSetsSize(), 2);
+ {
+ SyncProcessState b = a;
+ b = b;
+ EXPECT_EQ(b.ConflictSetsSize(), 2);
+ }
+ EXPECT_EQ(a.ConflictSetsSize(), 2);
+ a.MergeSets(ids_.FromNumber(3), ids_.FromNumber(4));
+ EXPECT_EQ(a.ConflictSetsSize(), 1);
+ b.reset(new SyncProcessState(a));
+ }
+ EXPECT_EQ(b->ConflictSetsSize(), 1);
+}
+
+TEST_F(SyncerTest, SingletonTagUpdates) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ // As a hurdle, introduce an item whose name is the same as the
+ // tag value we'll use later.
+ int64 hurdle_handle = CreateUnsyncedDirectory(PSTR("bob"), "id_bob");
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
+ ASSERT_TRUE(hurdle.good());
+ ASSERT_TRUE(!hurdle.Get(IS_DEL));
+ ASSERT_TRUE(hurdle.Get(SINGLETON_TAG).empty());
+ ASSERT_TRUE(hurdle.GetName().value() == PSTR("bob"));
+
+ // Try to lookup by the tagname. These should fail.
+ Entry tag_alpha(&trans, GET_BY_TAG, PSTR("alpha"));
+ EXPECT_FALSE(tag_alpha.good());
+ Entry tag_bob(&trans, GET_BY_TAG, PSTR("bob"));
+ EXPECT_FALSE(tag_bob.good());
+ }
+
+ // Now download some tagged items as updates.
+ mock_server_->AddUpdateDirectory(1, 0, "update1", 1, 10);
+ mock_server_->SetLastUpdateSingletonTag("alpha");
+ mock_server_->AddUpdateDirectory(2, 0, "update2", 2, 20);
+ mock_server_->SetLastUpdateSingletonTag("bob");
+ syncer_->SyncShare();
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ // The new items should be applied as new entries, and we should be able
+ // to look them up by their tag values.
+ Entry tag_alpha(&trans, GET_BY_TAG, PSTR("alpha"));
+ ASSERT_TRUE(tag_alpha.good());
+ ASSERT_TRUE(!tag_alpha.Get(IS_DEL));
+ ASSERT_TRUE(tag_alpha.Get(SINGLETON_TAG) == PSTR("alpha"));
+ ASSERT_TRUE(tag_alpha.GetName().value() == PSTR("update1"));
+ Entry tag_bob(&trans, GET_BY_TAG, PSTR("bob"));
+ ASSERT_TRUE(tag_bob.good());
+ ASSERT_TRUE(!tag_bob.Get(IS_DEL));
+ ASSERT_TRUE(tag_bob.Get(SINGLETON_TAG) == PSTR("bob"));
+ ASSERT_TRUE(tag_bob.GetName().value() == PSTR("update2"));
+ // The old item should be unchanged.
+ Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
+ ASSERT_TRUE(hurdle.good());
+ ASSERT_TRUE(!hurdle.Get(IS_DEL));
+ ASSERT_TRUE(hurdle.Get(SINGLETON_TAG).empty());
+ ASSERT_TRUE(hurdle.GetName().value() == PSTR("bob"));
+ }
+}
+
+namespace {
+
+class SyncerPositionUpdateTest : public SyncerTest {
+ public:
+ SyncerPositionUpdateTest() : next_update_id_(1), next_revision_(1) {}
+
+ protected:
+ void ExpectLocalItemsInServerOrder() {
+ if (position_map_.empty())
+ return;
+
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Id prev_id;
+ DCHECK(prev_id.IsRoot());
+ PosMap::iterator next = position_map_.begin();
+ for (PosMap::iterator i = next++; i != position_map_.end(); ++i) {
+ Id id = i->second;
+ Entry entry_with_id(&trans, GET_BY_ID, id);
+ EXPECT_TRUE(entry_with_id.good());
+ EXPECT_EQ(entry_with_id.Get(PREV_ID), prev_id);
+ EXPECT_EQ(entry_with_id.Get(SERVER_POSITION_IN_PARENT), i->first);
+ if (next == position_map_.end()) {
+ EXPECT_TRUE(entry_with_id.Get(NEXT_ID).IsRoot());
+ } else {
+ EXPECT_EQ(entry_with_id.Get(NEXT_ID), next->second);
+ next++;
+ }
+ prev_id = id;
+ }
+ }
+
+ void AddRootItemWithPosition(int64 position) {
+ string id = string("ServerId") + Int64ToString(next_update_id_++);
+ string name = "my name is my id -- " + id;
+ int revision = next_revision_++;
+ mock_server_->AddUpdateDirectory(id, kRootId, name, revision, revision);
+ mock_server_->SetLastUpdatePosition(position);
+ position_map_.insert(
+ PosMap::value_type(position, Id::CreateFromServerId(id)));
+ }
+ private:
+ typedef multimap<int64, Id> PosMap;
+ PosMap position_map_;
+ int next_update_id_;
+ int next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(SyncerPositionUpdateTest);
+};
+
+} // namespace
+
+TEST_F(SyncerPositionUpdateTest, InOrderPositive) {
+ // Add a bunch of items in increasing order, starting with just
+ // positive position values.
+ AddRootItemWithPosition(100);
+ AddRootItemWithPosition(199);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(400);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, InOrderNegative) {
+ // Test negative position values, but in increasing order.
+ AddRootItemWithPosition(-400);
+ AddRootItemWithPosition(-201);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(100);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, ReverseOrder) {
+ // Test when items are sent in the reverse order.
+ AddRootItemWithPosition(400);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(100);
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(-201);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(-400);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+}
+
+TEST_F(SyncerPositionUpdateTest, RandomOrderInBatches) {
+ // Mix it all up, interleaving position values,
+ // and try multiple batches of updates.
+ AddRootItemWithPosition(400);
+ AddRootItemWithPosition(201);
+ AddRootItemWithPosition(-400);
+ AddRootItemWithPosition(100);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+
+ AddRootItemWithPosition(-150);
+ AddRootItemWithPosition(-200);
+ AddRootItemWithPosition(200);
+ AddRootItemWithPosition(-201);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+
+ AddRootItemWithPosition(-144);
+
+ syncer_->SyncShare();
+ ExpectLocalItemsInServerOrder();
+}
+
+namespace {
+
+class SyncerPositionTiebreakingTest : public SyncerTest {
+ public:
+ SyncerPositionTiebreakingTest()
+ : low_id_(Id::CreateFromServerId("A")),
+ mid_id_(Id::CreateFromServerId("M")),
+ high_id_(Id::CreateFromServerId("Z")),
+ next_revision_(1) {
+ DCHECK(low_id_ < mid_id_);
+ DCHECK(mid_id_ < high_id_);
+ DCHECK(low_id_ < high_id_);
+ }
+
+ // Adds the item by its Id, using a constant value for the position
+ // so that the syncer has to resolve the order some other way.
+ void Add(const Id& id) {
+ int revision = next_revision_++;
+ mock_server_->AddUpdateDirectory(id.GetServerId(), kRootId,
+ id.GetServerId(), revision, revision);
+ // The update position doesn't vary.
+ mock_server_->SetLastUpdatePosition(90210);
+ }
+
+ void ExpectLocalOrderIsByServerId() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Id null_id;
+ Entry low(&trans, GET_BY_ID, low_id_);
+ Entry mid(&trans, GET_BY_ID, mid_id_);
+ Entry high(&trans, GET_BY_ID, high_id_);
+ EXPECT_TRUE(low.good());
+ EXPECT_TRUE(mid.good());
+ EXPECT_TRUE(high.good());
+ EXPECT_EQ(low.Get(PREV_ID), null_id);
+ EXPECT_EQ(mid.Get(PREV_ID), low_id_);
+ EXPECT_EQ(high.Get(PREV_ID), mid_id_);
+ EXPECT_EQ(high.Get(NEXT_ID), null_id);
+ EXPECT_EQ(mid.Get(NEXT_ID), high_id_);
+ EXPECT_EQ(low.Get(NEXT_ID), mid_id_);
+ }
+
+ protected:
+ // When there's a tiebreak on the numeric position, it's supposed to be
+ // broken by string comparison of the ids. These ids are in increasing
+ // order.
+ const Id low_id_;
+ const Id mid_id_;
+ const Id high_id_;
+
+ private:
+ int next_revision_;
+ DISALLOW_COPY_AND_ASSIGN(SyncerPositionTiebreakingTest);
+};
+
+} // namespace
+
+TEST_F(SyncerPositionTiebreakingTest, LowMidHigh) {
+ Add(low_id_);
+ Add(mid_id_);
+ Add(high_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, LowHighMid) {
+ Add(low_id_);
+ Add(high_id_);
+ Add(mid_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, HighMidLow) {
+ Add(high_id_);
+ Add(mid_id_);
+ Add(low_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, HighLowMid) {
+ Add(high_id_);
+ Add(low_id_);
+ Add(mid_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, MidHighLow) {
+ Add(mid_id_);
+ Add(high_id_);
+ Add(low_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+TEST_F(SyncerPositionTiebreakingTest, MidLowHigh) {
+ Add(mid_id_);
+ Add(low_id_);
+ Add(high_id_);
+ syncer_->SyncShare();
+ ExpectLocalOrderIsByServerId();
+}
+
+const SyncerTest::CommitOrderingTest
+SyncerTest::CommitOrderingTest::LAST_COMMIT_ITEM = {-1, TestIdFactory::root()};
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_util.cc b/chrome/browser/sync/engine/syncer_util.cc
new file mode 100644
index 0000000..75f7b82
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_util.cc
@@ -0,0 +1,845 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncer_util.h"
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/engine/conflict_resolver.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+using syncable::BASE_VERSION;
+using syncable::BOOKMARK_FAVICON;
+using syncable::BOOKMARK_URL;
+using syncable::Blob;
+using syncable::CHANGES_VERSION;
+using syncable::CREATE;
+using syncable::CREATE_NEW_UPDATE_ITEM;
+using syncable::CTIME;
+using syncable::ComparePathNames;
+using syncable::Directory;
+using syncable::Entry;
+using syncable::ExtendedAttributeKey;
+using syncable::GET_BY_HANDLE;
+using syncable::GET_BY_ID;
+using syncable::GET_BY_PARENTID_AND_DBNAME;
+using syncable::ID;
+using syncable::IS_BOOKMARK_OBJECT;
+using syncable::IS_DEL;
+using syncable::IS_DIR;
+using syncable::IS_UNAPPLIED_UPDATE;
+using syncable::IS_UNSYNCED;
+using syncable::Id;
+using syncable::META_HANDLE;
+using syncable::MTIME;
+using syncable::MutableEntry;
+using syncable::MutableExtendedAttribute;
+using syncable::NEXT_ID;
+using syncable::Name;
+using syncable::PARENT_ID;
+using syncable::PREV_ID;
+using syncable::ReadTransaction;
+using syncable::SERVER_BOOKMARK_FAVICON;
+using syncable::SERVER_BOOKMARK_URL;
+using syncable::SERVER_CTIME;
+using syncable::SERVER_IS_BOOKMARK_OBJECT;
+using syncable::SERVER_IS_DEL;
+using syncable::SERVER_IS_DIR;
+using syncable::SERVER_MTIME;
+using syncable::SERVER_NAME;
+using syncable::SERVER_PARENT_ID;
+using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_VERSION;
+using syncable::SINGLETON_TAG;
+using syncable::SYNCER;
+using syncable::SyncName;
+using syncable::UNSANITIZED_NAME;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+using std::string;
+using std::vector;
+
+// TODO(ncarter): Remove unique-in-parent title support and name conflicts.
+// static
+syncable::Id SyncerUtil::GetNameConflictingItemId(
+ syncable::BaseTransaction* trans,
+ const syncable::Id& parent_id,
+ const PathString& server_name) {
+
+ Entry same_path(trans, GET_BY_PARENTID_AND_DBNAME, parent_id, server_name);
+ if (same_path.good() && !same_path.GetName().HasBeenSanitized())
+ return same_path.Get(ID);
+ Name doctored_name(server_name);
+ doctored_name.db_value().MakeOSLegal();
+ if (!doctored_name.HasBeenSanitized())
+ return syncable::kNullId;
+ Directory::ChildHandles children;
+ trans->directory()->GetChildHandles(trans, parent_id, &children);
+ Directory::ChildHandles::iterator i = children.begin();
+ while (i != children.end()) {
+ Entry child_entry(trans, GET_BY_HANDLE, *i++);
+ CHECK(child_entry.good());
+ if (0 == ComparePathNames(child_entry.Get(UNSANITIZED_NAME), server_name))
+ return child_entry.Get(ID);
+ }
+ return syncable::kNullId;
+}
+
+// Returns the number of unsynced entries.
+// static
+int SyncerUtil::GetUnsyncedEntries(syncable::BaseTransaction* trans,
+ vector<int64> *handles) {
+ trans->directory()->GetUnsyncedMetaHandles(trans, handles);
+ LOG_IF(INFO, handles->size() > 0)
+ << "Have " << handles->size() << " unsynced items.";
+ return handles->size();
+}
+
+// static
+void SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id,
+ syncable::Directory::ChildHandles* children) {
+ syncable::Id old_id = entry->Get(ID);
+ if (!entry->Put(ID, new_id)) {
+ Entry old_entry(trans, GET_BY_ID, new_id);
+ CHECK(old_entry.good());
+ LOG(FATAL) << "Attempt to change ID to " << new_id
+ << " conflicts with existing entry.\n\n"
+ << *entry << "\n\n" << old_entry;
+ }
+ if (entry->Get(IS_DIR)) {
+ // Get all child entries of the old id
+ trans->directory()->GetChildHandles(trans, old_id, children);
+ Directory::ChildHandles::iterator i = children->begin();
+ while (i != children->end()) {
+ MutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
+ CHECK(child_entry.good());
+ CHECK(child_entry.Put(PARENT_ID, new_id));
+ }
+ }
+ // Update Id references on the previous and next nodes in the sibling
+ // order. Do this by reinserting into the linked list; the first
+ // step in PutPredecessor is to Unlink from the existing order, which
+ // will overwrite the stale Id value from the adjacent nodes.
+ if (entry->Get(PREV_ID) == entry->Get(NEXT_ID) &&
+ entry->Get(PREV_ID) == old_id) {
+ // We just need a shallow update to |entry|'s fields since it is already
+ // self looped.
+ entry->Put(NEXT_ID, new_id);
+ entry->Put(PREV_ID, new_id);
+ } else {
+ entry->PutPredecessor(entry->Get(PREV_ID));
+ }
+}
+
+// static
+void SyncerUtil::ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id) {
+ syncable::Directory::ChildHandles children;
+ ChangeEntryIDAndUpdateChildren(trans, entry, new_id, &children);
+}
+
+// static
+void SyncerUtil::AttemptReuniteLostCommitResponses(
+ syncable::WriteTransaction* trans,
+ const SyncEntity& server_entry,
+ const string& client_id) {
+ // If a commit succeeds, but the response does not come back fast enough
+ // then the syncer might assume that it was never committed.
+ // The server will track the client that sent up the original commit and
+ // return this in a get updates response. When this matches a local
+ // uncommitted item, we must mutate our local item and version to pick up
+ // the committed version of the same item whose commit response was lost.
+ // There is however still a race condition if the server has not
+ // completed the commit by the time the syncer tries to get updates
+ // again. To mitigate this, we need to have the server time out in
+ // a reasonable span, our commit batches have to be small enough
+ // to process within our HTTP response "assumed alive" time.
+
+ // We need to check if we have a that didn't get its server
+ // id updated correctly. The server sends down a client ID
+ // and a local (negative) id. If we have a entry by that
+ // description, we should update the ID and version to the
+ // server side ones to avoid multiple commits to the same name.
+ if (server_entry.has_originator_cache_guid() &&
+ server_entry.originator_cache_guid() == client_id) {
+ syncable::Id server_id = syncable::Id::CreateFromClientString(
+ server_entry.originator_client_item_id());
+ CHECK(!server_id.ServerKnows());
+ syncable::MutableEntry local_entry(trans, GET_BY_ID, server_id);
+
+ // If it exists, then our local client lost a commit response.
+ if (local_entry.good() && !local_entry.Get(IS_DEL)) {
+ int64 old_version = local_entry.Get(BASE_VERSION);
+ int64 new_version = server_entry.version();
+ CHECK(old_version <= 0);
+ CHECK(new_version > 0);
+ // Otherwise setting the base version could cause a consistency failure.
+ // An entry should never be version 0 and SYNCED.
+ CHECK(local_entry.Get(IS_UNSYNCED));
+
+ // just a quick sanity check
+ CHECK(!local_entry.Get(ID).ServerKnows());
+
+ LOG(INFO) << "Reuniting lost commit response IDs" <<
+ " server id: " << server_entry.id() << " local id: " <<
+ local_entry.Get(ID) << " new version: " << new_version;
+
+ local_entry.Put(BASE_VERSION, new_version);
+
+ ChangeEntryIDAndUpdateChildren(trans, &local_entry, server_entry.id());
+
+ // We need to continue normal processing on this update after we
+ // reunited its ID.
+ }
+ // !local_entry.Good() means we don't have a left behind entry for this
+ // ID. We successfully committed before. In the future we should get rid
+ // of this system and just have client side generated IDs as a whole.
+ }
+}
+
+// static
+UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntry(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ SyncerSession* const session) {
+
+ syncable::Id conflicting_id;
+ UpdateAttemptResponse result =
+ AttemptToUpdateEntryWithoutMerge(trans, entry, session,
+ &conflicting_id);
+ if (result != NAME_CONFLICT) {
+ return result;
+ }
+ syncable::MutableEntry same_path(trans, syncable::GET_BY_ID, conflicting_id);
+ CHECK(same_path.good());
+
+ ConflictResolver* resolver = session->resolver();
+
+ if (resolver &&
+ resolver->AttemptItemMerge(trans, &same_path, entry)) {
+ return SUCCESS;
+ }
+ LOG(INFO) << "Not updating item, path collision. Update:\n" << *entry
+ << "\nSame Path:\n" << same_path;
+ return CONFLICT;
+}
+
+// static
+UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntryWithoutMerge(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ SyncerSession* const session, syncable::Id* const conflicting_id) {
+
+ CHECK(entry->good());
+ if (!entry->Get(IS_UNAPPLIED_UPDATE))
+ return SUCCESS; // No work to do
+ syncable::Id id = entry->Get(ID);
+
+ if (entry->Get(IS_UNSYNCED)) {
+ LOG(INFO) << "Skipping update, returning conflict for: " << id
+ << " ; it's unsynced.";
+ return CONFLICT;
+ }
+ if (!entry->Get(SERVER_IS_DEL)) {
+ syncable::Id new_parent = entry->Get(SERVER_PARENT_ID);
+ Entry parent(trans, GET_BY_ID, new_parent);
+ // A note on non-directory parents:
+ // We catch most unfixable tree invariant errors at update receipt time,
+ // however we deal with this case here because we may receive the child
+ // first then the illegal parent. Instead of dealing with it twice in
+ // different ways we deal with it once here to reduce the amount of code and
+ // potential errors.
+ if (!parent.good() || parent.Get(IS_DEL) || !parent.Get(IS_DIR)) {
+ return CONFLICT;
+ }
+ if (entry->Get(PARENT_ID) != new_parent) {
+ if (!entry->Get(IS_DEL) && !IsLegalNewParent(trans, id, new_parent)) {
+ LOG(INFO) << "Not updating item " << id << ", illegal new parent "
+ "(would cause loop).";
+ return CONFLICT;
+ }
+ }
+ PathString server_name = entry->Get(SERVER_NAME);
+ syncable::Id conflict_id =
+ SyncerUtil::GetNameConflictingItemId(trans,
+ entry->Get(SERVER_PARENT_ID),
+ server_name);
+ if (conflict_id != syncable::kNullId && conflict_id != id) {
+ if (conflicting_id)
+ *conflicting_id = conflict_id;
+ return NAME_CONFLICT;
+ }
+ } else if (entry->Get(IS_DIR)) {
+ Directory::ChildHandles handles;
+ trans->directory()->GetChildHandles(trans, id, &handles);
+ if (!handles.empty()) {
+ // If we have still-existing children, then we need to deal with
+ // them before we can process this change.
+ LOG(INFO) << "Not deleting directory; it's not empty " << *entry;
+ return CONFLICT;
+ }
+ }
+
+ int64 old_version = entry->Get(BASE_VERSION);
+ SyncerUtil::UpdateLocalDataFromServerData(trans, entry);
+
+ return SUCCESS;
+}
+
+// Pass in name and checksum because of UTF8 conversion.
+// static
+void SyncerUtil::UpdateServerFieldsFromUpdate(
+ MutableEntry* local_entry,
+ const SyncEntity& server_entry,
+ const SyncName& name) {
+ if (server_entry.deleted()) {
+ // The server returns very lightweight replies for deletions, so
+ // we don't clobber a bunch of fields on delete.
+ local_entry->Put(SERVER_IS_DEL, true);
+ local_entry->Put(SERVER_VERSION,
+ std::max(local_entry->Get(SERVER_VERSION),
+ local_entry->Get(BASE_VERSION)) + 1L);
+ local_entry->Put(IS_UNAPPLIED_UPDATE, true);
+ return;
+ }
+
+ CHECK(local_entry->Get(ID) == server_entry.id())
+ << "ID Changing not supported here";
+ local_entry->Put(SERVER_PARENT_ID, server_entry.parent_id());
+ local_entry->PutServerName(name);
+ local_entry->Put(SERVER_VERSION, server_entry.version());
+ local_entry->Put(SERVER_CTIME,
+ ServerTimeToClientTime(server_entry.ctime()));
+ local_entry->Put(SERVER_MTIME,
+ ServerTimeToClientTime(server_entry.mtime()));
+ local_entry->Put(SERVER_IS_BOOKMARK_OBJECT, server_entry.has_bookmarkdata());
+ local_entry->Put(SERVER_IS_DIR, server_entry.IsFolder());
+ if (server_entry.has_singleton_tag()) {
+ PathString tag;
+ AppendUTF8ToPathString(server_entry.singleton_tag(), &tag);
+ local_entry->Put(SINGLETON_TAG, tag);
+ }
+ if (server_entry.has_bookmarkdata() && !server_entry.deleted()) {
+ const SyncEntity::BookmarkData& bookmark = server_entry.bookmarkdata();
+ if (bookmark.has_bookmark_url()) {
+ PathString url;
+ AppendUTF8ToPathString(bookmark.bookmark_url(), &url);
+ local_entry->Put(SERVER_BOOKMARK_URL, url);
+ }
+ if (bookmark.has_bookmark_favicon()) {
+ Blob favicon_blob;
+ SyncerProtoUtil::CopyProtoBytesIntoBlob(bookmark.bookmark_favicon(),
+ &favicon_blob);
+ local_entry->Put(SERVER_BOOKMARK_FAVICON, favicon_blob);
+ }
+ }
+ if (server_entry.has_position_in_parent()) {
+ local_entry->Put(SERVER_POSITION_IN_PARENT,
+ server_entry.position_in_parent());
+ }
+
+ local_entry->Put(SERVER_IS_DEL, server_entry.deleted());
+ // We only mark the entry as unapplied if its version is greater than
+ // the local data. If we're processing the update that corresponds to one of
+ // our commit we don't apply it as time differences may occur.
+ if (server_entry.version() > local_entry->Get(BASE_VERSION)) {
+ local_entry->Put(IS_UNAPPLIED_UPDATE, true);
+ }
+ ApplyExtendedAttributes(local_entry, server_entry);
+}
+
+// static
+void SyncerUtil::ApplyExtendedAttributes(
+ syncable::MutableEntry* local_entry,
+ const SyncEntity& server_entry) {
+ local_entry->DeleteAllExtendedAttributes(local_entry->trans());
+ if (server_entry.has_extended_attributes()) {
+ const sync_pb::ExtendedAttributes & extended_attributes =
+ server_entry.extended_attributes();
+ for (int i = 0; i < extended_attributes.extendedattribute_size(); i++) {
+ PathString pathstring_key;
+ AppendUTF8ToPathString(
+ extended_attributes.extendedattribute(i).key(), &pathstring_key);
+ ExtendedAttributeKey key(local_entry->Get(META_HANDLE), pathstring_key);
+ MutableExtendedAttribute local_attribute(local_entry->trans(),
+ CREATE, key);
+ SyncerProtoUtil::CopyProtoBytesIntoBlob(
+ extended_attributes.extendedattribute(i).value(),
+ local_attribute.mutable_value());
+ }
+ }
+}
+
+// Creates a new Entry iff no Entry exists with the given id.
+// static
+void SyncerUtil::CreateNewEntry(syncable::WriteTransaction *trans,
+ const syncable::Id& id) {
+ syncable::MutableEntry entry(trans, syncable::GET_BY_ID, id);
+ if (!entry.good()) {
+ syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ id);
+ }
+}
+
+// static
+bool SyncerUtil::ServerAndLocalOrdersMatch(syncable::Entry* entry) {
+ // Find the closest up-to-date local sibling by walking the linked list.
+ syncable::Id local_up_to_date_predecessor = entry->Get(PREV_ID);
+ while (!local_up_to_date_predecessor.IsRoot()) {
+ Entry local_prev(entry->trans(), GET_BY_ID, local_up_to_date_predecessor);
+ if (!local_prev.good() || local_prev.Get(IS_DEL))
+ return false;
+ if (!local_prev.Get(IS_UNAPPLIED_UPDATE) && !local_prev.Get(IS_UNSYNCED))
+ break;
+ local_up_to_date_predecessor = local_prev.Get(PREV_ID);
+ }
+ // Now find the closest up-to-date sibling in the server order.
+
+ syncable::Id server_up_to_date_predecessor =
+ ComputePrevIdFromServerPosition(entry->trans(), entry,
+ entry->Get(SERVER_PARENT_ID));
+ return server_up_to_date_predecessor == local_up_to_date_predecessor;
+}
+
+// static
+bool SyncerUtil::ServerAndLocalEntriesMatch(syncable::Entry* entry) {
+ if (!ClientAndServerTimeMatch(
+ entry->Get(CTIME), ClientTimeToServerTime(entry->Get(SERVER_CTIME)))) {
+ LOG(WARNING) << "Client and server time mismatch";
+ return false;
+ }
+ if (entry->Get(IS_DEL) && entry->Get(SERVER_IS_DEL))
+ return true;
+ // Name should exactly match here.
+ if (!entry->SyncNameMatchesServerName()) {
+ LOG(WARNING) << "Unsanitized name mismatch";
+ return false;
+ }
+
+ if (entry->Get(PARENT_ID) != entry->Get(SERVER_PARENT_ID) ||
+ entry->Get(IS_DIR) != entry->Get(SERVER_IS_DIR) ||
+ entry->Get(IS_DEL) != entry->Get(SERVER_IS_DEL)) {
+ LOG(WARNING) << "Metabit mismatch";
+ return false;
+ }
+
+ if (!ServerAndLocalOrdersMatch(entry)) {
+ LOG(WARNING) << "Server/local ordering mismatch";
+ return false;
+ }
+
+ if (entry->Get(IS_BOOKMARK_OBJECT)) {
+ if (!entry->Get(IS_DIR)) {
+ if (entry->Get(BOOKMARK_URL) != entry->Get(SERVER_BOOKMARK_URL)) {
+ LOG(WARNING) << "Bookmark URL mismatch";
+ return false;
+ }
+ }
+ }
+ if (entry->Get(IS_DIR))
+ return true;
+ // For historical reasons, a folder's MTIME changes when its contents change.
+ // TODO(ncarter): Remove the special casing of MTIME.
+ bool time_match = ClientAndServerTimeMatch(entry->Get(MTIME),
+ ClientTimeToServerTime(entry->Get(SERVER_MTIME)));
+ if (!time_match) {
+ LOG(WARNING) << "Time mismatch";
+ }
+ return time_match;
+}
+
+// static
+void SyncerUtil::SplitServerInformationIntoNewEntry(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry) {
+ syncable::Id id = entry->Get(ID);
+ ChangeEntryIDAndUpdateChildren(trans, entry, trans->directory()->NextId());
+ entry->Put(BASE_VERSION, 0);
+
+ MutableEntry new_entry(trans, CREATE_NEW_UPDATE_ITEM, id);
+ CopyServerFields(entry, &new_entry);
+ ClearServerData(entry);
+
+ LOG(INFO) << "Splitting server information, local entry: " << *entry <<
+ " server entry: " << new_entry;
+}
+
+// This function is called on an entry when we can update the user-facing data
+// from the server data.
+// static
+void SyncerUtil::UpdateLocalDataFromServerData(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry) {
+ CHECK(!entry->Get(IS_UNSYNCED));
+ CHECK(entry->Get(IS_UNAPPLIED_UPDATE));
+ LOG(INFO) << "Updating entry : " << *entry;
+ entry->Put(IS_BOOKMARK_OBJECT, entry->Get(SERVER_IS_BOOKMARK_OBJECT));
+ // This strange dance around the IS_DEL flag
+ // avoids problems when setting the name.
+ if (entry->Get(SERVER_IS_DEL)) {
+ entry->Put(IS_DEL, true);
+ } else {
+ Name name = Name::FromSyncName(entry->GetServerName());
+ name.db_value().MakeOSLegal();
+ bool was_doctored_before_made_noncolliding = name.HasBeenSanitized();
+ name.db_value().MakeNoncollidingForEntry(trans,
+ entry->Get(SERVER_PARENT_ID),
+ entry);
+ bool was_doctored = name.HasBeenSanitized();
+ if (was_doctored) {
+ // If we're changing the name of entry, either its name
+ // should be illegal, or some other entry should have an unsanitized
+ // name. There's should be a CHECK in every code path.
+ Entry blocking_entry(trans, GET_BY_PARENTID_AND_DBNAME,
+ entry->Get(SERVER_PARENT_ID),
+ name.value());
+ if (blocking_entry.good())
+ CHECK(blocking_entry.GetName().HasBeenSanitized());
+ else
+ CHECK(was_doctored_before_made_noncolliding);
+ }
+ CHECK(entry->PutParentIdAndName(entry->Get(SERVER_PARENT_ID), name))
+ << "Name Clash in UpdateLocalDataFromServerData: "
+ << *entry;
+ CHECK(entry->Put(IS_DEL, false));
+ Id new_predecessor = ComputePrevIdFromServerPosition(trans, entry,
+ entry->Get(SERVER_PARENT_ID));
+ CHECK(entry->PutPredecessor(new_predecessor))
+ << " Illegal predecessor after converting from server position.";
+ }
+
+ entry->Put(CTIME, entry->Get(SERVER_CTIME));
+ entry->Put(MTIME, entry->Get(SERVER_MTIME));
+ entry->Put(BASE_VERSION, entry->Get(SERVER_VERSION));
+ entry->Put(IS_DIR, entry->Get(SERVER_IS_DIR));
+ entry->Put(IS_DEL, entry->Get(SERVER_IS_DEL));
+ entry->Put(BOOKMARK_URL, entry->Get(SERVER_BOOKMARK_URL));
+ entry->Put(BOOKMARK_FAVICON, entry->Get(SERVER_BOOKMARK_FAVICON));
+ entry->Put(IS_UNAPPLIED_UPDATE, false);
+}
+
+// static
+VerifyCommitResult SyncerUtil::ValidateCommitEntry(
+ syncable::MutableEntry* entry) {
+ syncable::Id id = entry->Get(ID);
+ if (id == entry->Get(PARENT_ID)) {
+ CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry;
+ // If the root becomes unsynced it can cause us problems.
+ LOG(ERROR) << "Root item became unsynced " << *entry;
+ return VERIFY_UNSYNCABLE;
+ }
+ if (entry->IsRoot()) {
+ LOG(ERROR) << "Permanent item became unsynced " << *entry;
+ return VERIFY_UNSYNCABLE;
+ }
+ if (entry->Get(IS_DEL) && !entry->Get(ID).ServerKnows()) {
+ // drop deleted uncommitted entries.
+ return VERIFY_UNSYNCABLE;
+ }
+ return VERIFY_OK;
+}
+
+// static
+bool SyncerUtil::AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ vector<syncable::Id>* commit_ids) {
+
+ if (!inserted_items->insert(item->Get(META_HANDLE)).second)
+ return false;
+ commit_ids->push_back(item->Get(ID));
+ if (item->Get(IS_DEL))
+ return true; // Deleted items have no predecessors.
+
+ Id prev_id = item->Get(PREV_ID);
+ while (!prev_id.IsRoot()) {
+ Entry prev(trans, GET_BY_ID, prev_id);
+ CHECK(prev.good()) << "Bad id when walking predecessors.";
+ if (!prev.Get(inclusion_filter))
+ break;
+ if (!inserted_items->insert(prev.Get(META_HANDLE)).second)
+ break;
+ commit_ids->push_back(prev_id);
+ prev_id = prev.Get(PREV_ID);
+ }
+ return true;
+}
+
+// static
+void SyncerUtil::AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ vector<syncable::Id>* commit_ids) {
+
+ vector<syncable::Id>::size_type initial_size = commit_ids->size();
+ if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items,
+ commit_ids))
+ return;
+ // Reverse what we added to get the correct order.
+ std::reverse(commit_ids->begin() + initial_size, commit_ids->end());
+}
+
+// TODO(ncarter): This is redundant to some code in GetCommitIdsCommand. Unify
+// them.
+// static
+void SyncerUtil::AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::MetahandleSet* inserted_items,
+ vector<syncable::Id>* commit_ids,
+ syncable::Id parent_id) {
+ vector<syncable::Id>::size_type intial_commit_ids_size = commit_ids->size();
+ // Climb the tree adding entries leaf -> root.
+ while (!parent_id.ServerKnows()) {
+ Entry parent(trans, GET_BY_ID, parent_id);
+ CHECK(parent.good()) << "Bad user-only parent in item path.";
+ if (!AddItemThenPredecessors(trans, &parent, IS_UNSYNCED, inserted_items,
+ commit_ids))
+ break; // Parent was already present in |inserted_items|.
+ parent_id = parent.Get(PARENT_ID);
+ }
+ // Reverse what we added to get the correct order.
+ std::reverse(commit_ids->begin() + intial_commit_ids_size, commit_ids->end());
+}
+
+// static
+void SyncerUtil::MarkDeletedChildrenSynced(
+ const syncable::ScopedDirLookup &dir,
+ std::set<syncable::Id>* deleted_folders) {
+ // There's two options here.
+ // 1. Scan deleted unsynced entries looking up their pre-delete tree for any
+ // of the deleted folders.
+ // 2. Take each folder and do a tree walk of all entries underneath it.
+ // #2 has a lower big O cost, but writing code to limit the time spent inside
+ // the transaction during each step is simpler with 1. Changing this decision
+ // may be sensible if this code shows up in profiling.
+ if (deleted_folders->empty())
+ return;
+ Directory::UnsyncedMetaHandles handles;
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ dir->GetUnsyncedMetaHandles(&trans, &handles);
+ }
+ if (handles.empty())
+ return;
+ Directory::UnsyncedMetaHandles::iterator it;
+ for (it = handles.begin() ; it != handles.end() ; ++it) {
+ // Single transaction / entry we deal with.
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_HANDLE, *it);
+ if (!entry.Get(IS_UNSYNCED) || !entry.Get(IS_DEL))
+ continue;
+ syncable::Id id = entry.Get(PARENT_ID);
+ while (id != trans.root_id()) {
+ if (deleted_folders->find(id) != deleted_folders->end()) {
+ // We've synced the deletion of this deleted entries parent
+ entry.Put(IS_UNSYNCED, false);
+ break;
+ }
+ Entry parent(&trans, GET_BY_ID, id);
+ if (!parent.good() || !parent.Get(IS_DEL))
+ break;
+ id = parent.Get(PARENT_ID);
+ }
+ }
+}
+
+// static
+VerifyResult SyncerUtil::VerifyNewEntry(
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id,
+ const bool deleted) {
+ if (same_id->good()) {
+ // Not a new entry.
+ return VERIFY_UNDECIDED;
+ }
+ if (deleted) {
+ // Deletion of an item we've never seen can be ignored.
+ return VERIFY_SKIP;
+ }
+
+ return VERIFY_SUCCESS;
+}
+
+// Assumes we have an existing entry; check here for updates that break
+// consistency rules.
+// static
+VerifyResult SyncerUtil::VerifyUpdateConsistency(
+ syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id,
+ const bool deleted,
+ const bool is_directory,
+ const bool has_bookmark_data) {
+
+ CHECK(same_id->good());
+
+ // If the entry is a delete, we don't really need to worry at this stage.
+ if (deleted)
+ return VERIFY_SUCCESS;
+
+ if (same_id->Get(SERVER_VERSION) > 0) {
+ // Then we've had an update for this entry before.
+ if (is_directory != same_id->Get(SERVER_IS_DIR) ||
+ has_bookmark_data != same_id->Get(SERVER_IS_BOOKMARK_OBJECT)) {
+ if (same_id->Get(IS_DEL)) { // if we've deleted the item, we don't care.
+ return VERIFY_SKIP;
+ } else {
+ LOG(ERROR) << "Server update doesn't agree with previous updates. ";
+ LOG(ERROR) << " Entry: " << *same_id;
+ LOG(ERROR) << " Update: " << SyncEntityDebugString(entry);
+ return VERIFY_FAIL;
+ }
+ }
+
+ if (!deleted &&
+ (same_id->Get(SERVER_IS_DEL) ||
+ (!same_id->Get(IS_UNSYNCED) && same_id->Get(IS_DEL) &&
+ same_id->Get(BASE_VERSION) > 0))) {
+ // An undelete. The latter case in the above condition is for
+ // when the server does not give us an update following the
+ // commit of a delete, before undeleting. Undeletion is possible
+ // in the server's storage backend, so it's possible on the client,
+ // though not expected to be something that is commonly possible.
+ VerifyResult result =
+ SyncerUtil::VerifyUndelete(trans, entry, same_id);
+ if (VERIFY_UNDECIDED != result)
+ return result;
+ }
+ }
+ if (same_id->Get(BASE_VERSION) > 0) {
+ // We've committed this entry in the past.
+ if (is_directory != same_id->Get(IS_DIR) ||
+ has_bookmark_data != same_id->Get(IS_BOOKMARK_OBJECT)) {
+ LOG(ERROR) << "Server update doesn't agree with committed item. ";
+ LOG(ERROR) << " Entry: " << *same_id;
+ LOG(ERROR) << " Update: " << SyncEntityDebugString(entry);
+ return VERIFY_FAIL;
+ }
+ if (same_id->Get(BASE_VERSION) == entry.version() &&
+ !same_id->Get(IS_UNSYNCED) &&
+ !SyncerProtoUtil::Compare(*same_id, entry)) {
+ // TODO(sync): This constraint needs to be relaxed. For now it's OK to
+ // fail the verification and deal with it when we ApplyUpdates.
+ LOG(ERROR) << "Server update doesn't match local data with same "
+ "version. A bug should be filed. Entry: " << *same_id <<
+ "Update: " << SyncEntityDebugString(entry);
+ return VERIFY_FAIL;
+ }
+ if (same_id->Get(SERVER_VERSION) > entry.version()) {
+ LOG(WARNING) << "We've already seen a more recent update from the server";
+ LOG(WARNING) << " Entry: " << *same_id;
+ LOG(WARNING) << " Update: " << SyncEntityDebugString(entry);
+ return VERIFY_SKIP;
+ }
+ }
+ return VERIFY_SUCCESS;
+}
+
+// Assumes we have an existing entry; verify an update that seems to be
+// expressing an 'undelete'
+// static
+VerifyResult SyncerUtil::VerifyUndelete(syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id) {
+ CHECK(same_id->good());
+ LOG(INFO) << "Server update is attempting undelete. " << *same_id
+ << "Update:" << SyncEntityDebugString(entry);
+ // Move the old one aside and start over. It's too tricky to
+ // get the old one back into a state that would pass
+ // CheckTreeInvariants().
+ if (same_id->Get(IS_DEL)) {
+ same_id->Put(ID, trans->directory()->NextId());
+ same_id->Put(BASE_VERSION, CHANGES_VERSION);
+ same_id->Put(SERVER_VERSION, 0);
+ return VERIFY_SUCCESS;
+ }
+ if (entry.version() < same_id->Get(SERVER_VERSION)) {
+ LOG(WARNING) << "Update older than current server version for" <<
+ *same_id << "Update:" << SyncEntityDebugString(entry);
+ return VERIFY_SUCCESS; // Expected in new sync protocol.
+ }
+ return VERIFY_UNDECIDED;
+}
+
+// static
+syncable::Id SyncerUtil::ComputePrevIdFromServerPosition(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* update_item,
+ const syncable::Id& parent_id) {
+ const int64 position_in_parent = update_item->Get(SERVER_POSITION_IN_PARENT);
+
+ // TODO(ncarter): This computation is linear in the number of children, but
+ // we could make it logarithmic if we kept an index on server position.
+ syncable::Id closest_sibling;
+ syncable::Id next_id = trans->directory()->GetFirstChildId(trans, parent_id);
+ while (!next_id.IsRoot()) {
+ syncable::Entry candidate(trans, GET_BY_ID, next_id);
+ if (!candidate.good()) {
+ LOG(WARNING) << "Should not happen";
+ return closest_sibling;
+ }
+ next_id = candidate.Get(NEXT_ID);
+
+ // Defensively prevent self-comparison.
+ if (candidate.Get(META_HANDLE) == update_item->Get(META_HANDLE)) {
+ continue;
+ }
+
+ // Ignore unapplied updates -- they might not even be server-siblings.
+ if (candidate.Get(IS_UNAPPLIED_UPDATE)) {
+ continue;
+ }
+
+ // Unsynced items don't have a valid server position.
+ if (!candidate.Get(IS_UNSYNCED)) {
+ // If |candidate| is after |update_entry| according to the server
+ // ordering, then we're done. ID is the tiebreaker.
+ if ((candidate.Get(SERVER_POSITION_IN_PARENT) > position_in_parent) ||
+ (candidate.Get(SERVER_POSITION_IN_PARENT) == position_in_parent) &&
+ (candidate.Get(ID) > update_item->Get(ID))) {
+ return closest_sibling;
+ }
+ }
+
+ // We can't trust the SERVER_ fields of unsynced items, but they are
+ // potentially legitimate local predecessors. In the case where
+ // |update_item| and an unsynced item wind up in the same insertion
+ // position, we need to choose how to order them. The following check puts
+ // the unapplied update first; removing it would put the unsynced item(s)
+ // first.
+ if (candidate.Get(IS_UNSYNCED)) {
+ continue;
+ }
+
+ // |update_entry| is considered to be somewhere after |candidate|, so
+ // store it as the upper bound.
+ closest_sibling = candidate.Get(ID);
+ }
+
+ return closest_sibling;
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_util.h b/chrome/browser/sync/engine/syncer_util.h
new file mode 100644
index 0000000..91e0c814
--- /dev/null
+++ b/chrome/browser/sync/engine/syncer_util.h
@@ -0,0 +1,206 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Utility functions manipulating syncable::Entries, intended for use by
+// the syncer.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_UTIL_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_UTIL_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+class SyncerSession;
+class SyncEntity;
+
+
+class SyncerUtil {
+ public:
+ // TODO(ncarter): Remove unique-in-parent title support and name conflicts.
+ static syncable::Id GetNameConflictingItemId(
+ syncable::BaseTransaction* trans,
+ const syncable::Id& parent_id,
+ const PathString& server_name);
+
+ static void ChangeEntryIDAndUpdateChildren(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id,
+ syncable::Directory::ChildHandles* children);
+
+ // Returns the number of unsynced entries.
+ static int GetUnsyncedEntries(syncable::BaseTransaction* trans,
+ std::vector<int64> *handles);
+
+ static void ChangeEntryIDAndUpdateChildren(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry,
+ const syncable::Id& new_id);
+
+ static void AttemptReuniteLostCommitResponses(
+ syncable::WriteTransaction* trans,
+ const SyncEntity& server_entry,
+ const std::string& client_id);
+
+ static UpdateAttemptResponse AttemptToUpdateEntry(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ SyncerSession* const session);
+
+ static UpdateAttemptResponse AttemptToUpdateEntryWithoutMerge(
+ syncable::WriteTransaction* const trans,
+ syncable::MutableEntry* const entry,
+ SyncerSession* const session, syncable::Id* const conflicting_id);
+
+ // Pass in name to avoid redundant UTF8 conversion.
+ static void UpdateServerFieldsFromUpdate(
+ syncable::MutableEntry* local_entry,
+ const SyncEntity& server_entry,
+ const syncable::SyncName& name);
+
+ static void ApplyExtendedAttributes(
+ syncable::MutableEntry* local_entry,
+ const SyncEntity& server_entry);
+
+ // Creates a new Entry iff no Entry exists with the given id.
+ static void CreateNewEntry(syncable::WriteTransaction *trans,
+ const syncable::Id& id);
+
+ static bool ServerAndLocalEntriesMatch(syncable::Entry* entry);
+
+ static void SplitServerInformationIntoNewEntry(
+ syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ // This function is called on an entry when we can update the user-facing data
+ // from the server data.
+ static void UpdateLocalDataFromServerData(syncable::WriteTransaction* trans,
+ syncable::MutableEntry* entry);
+
+ static VerifyCommitResult ValidateCommitEntry(syncable::MutableEntry* entry);
+
+ static VerifyResult VerifyNewEntry(const SyncEntity& entry,
+ syncable::MutableEntry* same_id,
+ const bool deleted);
+
+ // Assumes we have an existing entry; check here for updates that break
+ // consistency rules.
+ static VerifyResult VerifyUpdateConsistency(syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id,
+ const bool deleted,
+ const bool is_directory,
+ const bool is_bookmark);
+
+ // Assumes we have an existing entry; verify an update that seems to be
+ // expressing an 'undelete'
+ static VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id);
+
+ // Compute a local predecessor position for |update_item|. The position
+ // is determined by the SERVER_POSITION_IN_PARENT value of |update_item|,
+ // as well as the SERVER_POSITION_IN_PARENT values of any up-to-date
+ // children of |parent_id|.
+ static syncable::Id ComputePrevIdFromServerPosition(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* update_item,
+ const syncable::Id& parent_id);
+
+ // Append |item|, followed by a chain of its predecessors selected by
+ // |inclusion_filter|, to the |commit_ids| vector and tag them as included by
+ // storing in the set |inserted_items|. |inclusion_filter| (typically one of
+ // IS_UNAPPLIED_UPDATE or IS_UNSYNCED) selects which type of predecessors to
+ // include. Returns true if |item| was added, and false if it was already in
+ // the list.
+ //
+ // Use AddPredecessorsThenItem instead of this method if you want the
+ // item to be the last, rather than first, item appended.
+ static bool AddItemThenPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids);
+
+ // Exactly like AddItemThenPredecessors, except items are appended in the
+ // reverse (and generally more useful) order: a chain of predecessors from
+ // far to near, and finally the item.
+ static void AddPredecessorsThenItem(
+ syncable::BaseTransaction* trans,
+ syncable::Entry* item,
+ syncable::IndexedBitField inclusion_filter,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids);
+
+ static void AddUncommittedParentsAndTheirPredecessors(
+ syncable::BaseTransaction* trans,
+ syncable::MetahandleSet* inserted_items,
+ std::vector<syncable::Id>* commit_ids,
+ syncable::Id parent_id);
+
+ static void MarkDeletedChildrenSynced(
+ const syncable::ScopedDirLookup &dir,
+ std::set<syncable::Id>* deleted_folders);
+
+ // Examine the up-to-date predecessors of this item according to the server
+ // position, and then again according to the local position. Return true
+ // if they match. For an up-to-date item, this should be the case.
+ static bool ServerAndLocalOrdersMatch(syncable::Entry* entry);
+
+ private:
+ // Private ctor/dtor since this class shouldn't be instantiated.
+ SyncerUtil() {}
+ virtual ~SyncerUtil() {}
+ DISALLOW_COPY_AND_ASSIGN(SyncerUtil);
+};
+
+#ifndef OS_WINDOWS
+
+// time.h on Linux and Mac both return seconds since the epoch, this should
+// be converted to milliseconds.
+inline int64 ServerTimeToClientTime(int64 server_time) {
+ return server_time / GG_LONGLONG(1000);
+}
+
+inline int64 ClientTimeToServerTime(int64 client_time) {
+ return client_time * GG_LONGLONG(1000);
+}
+
+// As we truncate server times on the client for posix and on the server for
+// windows we need two ClientAndServerTimeMatch fucntions.
+inline bool ClientAndServerTimeMatch(int64 client_time, int64 server_time) {
+ // Compare at the coarser timescale (client)
+ return client_time == ServerTimeToClientTime(server_time);
+}
+#else
+// The sync server uses Java Times (ms since 1970)
+// and the client uses FILETIMEs (ns since 1601) so we need to convert
+// between the timescales.
+inline int64 ServerTimeToClientTime(int64 server_time) {
+ return server_time * GG_LONGLONG(10000) + GG_LONGLONG(116444736000000000);
+}
+
+inline int64 ClientTimeToServerTime(int64 client_time) {
+ return (client_time - GG_LONGLONG(116444736000000000)) / GG_LONGLONG(10000);
+}
+
+inline bool ClientAndServerTimeMatch(int64 client_time, int64 server_time) {
+ // Compare at the coarser timescale (server)
+ return ClientTimeToServerTime(client_time) == server_time;
+}
+#endif
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_UTIL_H_
diff --git a/chrome/browser/sync/engine/syncproto.h b/chrome/browser/sync/engine/syncproto.h
new file mode 100644
index 0000000..fe05a75
--- /dev/null
+++ b/chrome/browser/sync/engine/syncproto.h
@@ -0,0 +1,72 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Wrappers to help us work with ids and protobuffers.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCPROTO_H_
+#define CHROME_BROWSER_SYNC_ENGINE_SYNCPROTO_H_
+
+#include "chrome/browser/sync/protocol/sync.pb.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+
+namespace browser_sync {
+
+template<class Base>
+class IdWrapper : public Base {
+ public:
+ syncable::Id id() const {
+ return syncable::Id::CreateFromServerId(Base::id_string());
+ }
+ void set_id(const syncable::Id& id) {
+ Base::set_id_string(id.GetServerId());
+ }
+};
+
+// These wrapper classes contain no data, so their super
+// classes can be cast to them directly.
+class SyncEntity : public IdWrapper<sync_pb::SyncEntity> {
+ public:
+ void set_parent_id(const syncable::Id& id) {
+ set_parent_id_string(id.GetServerId());
+ }
+ syncable::Id parent_id() const {
+ return syncable::Id::CreateFromServerId(parent_id_string());
+ }
+ void set_old_parent_id(const syncable::Id& id) {
+ IdWrapper<sync_pb::SyncEntity>::set_old_parent_id(
+ id.GetServerId());
+ }
+ syncable::Id old_parent_id() const {
+ return syncable::Id::CreateFromServerId(
+ sync_pb::SyncEntity::old_parent_id());
+ }
+ // Binary predicate helper to determine whether an Entity represents a folder
+ // or non-folder object. Use this instead of checking these properties
+ // directly, because the addition of bookmarks to the protobuf schema
+ // makes the check slightly more tricky.
+ bool IsFolder() const {
+ return (!has_bookmarkdata() || bookmarkdata().bookmark_folder());
+ }
+};
+
+class CommitResponse_EntryResponse
+ : public IdWrapper<sync_pb::CommitResponse_EntryResponse> {
+};
+
+class ClientToServerMessage : public sync_pb::ClientToServerMessage {
+ public:
+ ClientToServerMessage() {
+ set_protocol_version(protocol_version());
+ }
+};
+
+typedef sync_pb::CommitMessage CommitMessage;
+typedef sync_pb::ClientToServerResponse ClientToServerResponse;
+typedef sync_pb::CommitResponse CommitResponse;
+typedef sync_pb::GetUpdatesResponse GetUpdatesResponse;
+typedef sync_pb::GetUpdatesMessage GetUpdatesMessage;
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCPROTO_H_
diff --git a/chrome/browser/sync/engine/syncproto_unittest.cc b/chrome/browser/sync/engine/syncproto_unittest.cc
new file mode 100644
index 0000000..951b852
--- /dev/null
+++ b/chrome/browser/sync/engine/syncproto_unittest.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace browser_sync {
+
+class SyncProtoTest : public testing::Test {
+};
+
+TEST_F(SyncProtoTest, ProtocolVersionPresent) {
+ ClientToServerMessage csm;
+ EXPECT_TRUE(csm.has_protocol_version());
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/update_applicator.cc b/chrome/browser/sync/engine/update_applicator.cc
new file mode 100644
index 0000000..17e6b36
--- /dev/null
+++ b/chrome/browser/sync/engine/update_applicator.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/engine/update_applicator.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+
+using std::vector;
+
+namespace browser_sync {
+
+UpdateApplicator::UpdateApplicator(SyncerSession* session,
+ const vi64iter& begin,
+ const vi64iter& end)
+ : session_(session), begin_(begin), end_(end), pointer_(begin),
+ progress_(false) {
+ size_t item_count = end - begin;
+ LOG(INFO) << "UpdateApplicator created for " << item_count << " items.";
+ successful_ids_.reserve(item_count);
+ }
+
+// returns true if there's more to do.
+bool UpdateApplicator::AttemptOneApplication(
+ syncable::WriteTransaction* trans) {
+ // If there are no updates left to consider, we're done.
+ if (end_ == begin_)
+ return false;
+ if (pointer_ == end_) {
+ if (!progress_)
+ return false;
+
+ LOG(INFO) << "UpdateApplicator doing additional pass.";
+ pointer_ = begin_;
+ progress_ = false;
+
+ // Clear the tracked failures to avoid double-counting.
+ conflicting_ids_.clear();
+ blocked_ids_.clear();
+ }
+ syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE, *pointer_);
+ UpdateAttemptResponse updateResponse =
+ SyncerUtil::AttemptToUpdateEntry(trans, &entry, session_);
+ switch (updateResponse) {
+ case SUCCESS:
+ --end_;
+ *pointer_ = *end_;
+ progress_ = true;
+ successful_ids_.push_back(entry.Get(syncable::ID));
+ break;
+ case CONFLICT:
+ pointer_++;
+ conflicting_ids_.push_back(entry.Get(syncable::ID));
+ break;
+ case BLOCKED:
+ pointer_++;
+ blocked_ids_.push_back(entry.Get(syncable::ID));
+ break;
+ }
+ LOG(INFO) << "Apply Status for " << entry.Get(syncable::META_HANDLE)
+ << " is " << updateResponse;
+
+ return true;
+}
+
+bool UpdateApplicator::AllUpdatesApplied() const {
+ return conflicting_ids_.empty() && blocked_ids_.empty() &&
+ begin_ == end_;
+}
+
+void UpdateApplicator::SaveProgressIntoSessionState() {
+ DCHECK(begin_ == end_ || ((pointer_ == end_) && !progress_))
+ << "SaveProgress called before updates exhausted.";
+
+ vector<syncable::Id>::const_iterator i;
+ for (i = conflicting_ids_.begin(); i != conflicting_ids_.end(); ++i) {
+ session_->EraseBlockedItem(*i);
+ session_->AddCommitConflict(*i);
+ session_->AddAppliedUpdate(CONFLICT, *i);
+ }
+ for (i = blocked_ids_.begin(); i != blocked_ids_.end(); ++i) {
+ session_->AddBlockedItem(*i);
+ session_->EraseCommitConflict(*i);
+ session_->AddAppliedUpdate(BLOCKED, *i);
+ }
+ for (i = successful_ids_.begin(); i != successful_ids_.end(); ++i) {
+ session_->EraseCommitConflict(*i);
+ session_->EraseBlockedItem(*i);
+ session_->AddAppliedUpdate(SUCCESS, *i);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/update_applicator.h b/chrome/browser/sync/engine/update_applicator.h
new file mode 100644
index 0000000..3d500171
--- /dev/null
+++ b/chrome/browser/sync/engine/update_applicator.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// An UpdateApplicator is used to iterate over a number of unapplied
+// updates, applying them to the client using the given syncer session.
+//
+// UpdateApplicator might resemble an iterator, but it actually keeps retrying
+// failed updates until no remaining updates can be successfully applied.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_UPDATE_APPLICATOR_H_
+#define CHROME_BROWSER_SYNC_ENGINE_UPDATE_APPLICATOR_H_
+
+#include <vector>
+#include <set>
+
+#include "base/basictypes.h"
+#include "base/port.h"
+
+namespace syncable {
+class Id;
+class WriteTransaction;
+} // namespace syncable
+
+namespace browser_sync {
+
+class SyncerSession;
+
+class UpdateApplicator {
+ public:
+ typedef std::vector<int64>::iterator vi64iter;
+
+ UpdateApplicator(SyncerSession* session, const vi64iter& begin,
+ const vi64iter& end);
+ // returns true if there's more we can do.
+ bool AttemptOneApplication(syncable::WriteTransaction* trans);
+ // return true if we've applied all updates.
+ bool AllUpdatesApplied() const;
+
+ // This class does not automatically save its progress into the
+ // SyncerSession -- to get that to happen, call this method after
+ // update application is finished (i.e., when AttemptOneAllocation
+ // stops returning true).
+ void SaveProgressIntoSessionState();
+
+ private:
+ SyncerSession* const session_;
+ vi64iter const begin_;
+ vi64iter end_;
+ vi64iter pointer_;
+ bool progress_;
+
+ // Track the result of the various items.
+ std::vector<syncable::Id> conflicting_ids_;
+ std::vector<syncable::Id> blocked_ids_;
+ std::vector<syncable::Id> successful_ids_;
+};
+
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_UPDATE_APPLICATOR_H_
diff --git a/chrome/browser/sync/engine/verify_updates_command.cc b/chrome/browser/sync/engine/verify_updates_command.cc
new file mode 100644
index 0000000..dee544d
--- /dev/null
+++ b/chrome/browser/sync/engine/verify_updates_command.cc
@@ -0,0 +1,102 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#include "chrome/browser/sync/engine/verify_updates_command.h"
+
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/engine/syncer_proto_util.h"
+#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace browser_sync {
+
+using syncable::ScopedDirLookup;
+using syncable::SyncName;
+using syncable::WriteTransaction;
+
+using syncable::GET_BY_ID;
+using syncable::SYNCER;
+
+VerifyUpdatesCommand::VerifyUpdatesCommand() {}
+VerifyUpdatesCommand::~VerifyUpdatesCommand() {}
+
+void VerifyUpdatesCommand::ExecuteImpl(SyncerSession *session) {
+ LOG(INFO) << "Beginning Update Verification";
+ ScopedDirLookup dir(session->dirman(), session->account_name());
+ if (!dir.good()) {
+ LOG(ERROR) << "Scoped dir lookup failed!";
+ return;
+ }
+ WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
+ GetUpdatesResponse updates = session->update_response().get_updates();
+ int update_count = updates.entries().size();
+
+ LOG(INFO) << update_count << " entries to verify";
+ for (int i = 0; i < update_count; i++) {
+ const SyncEntity entry =
+ *reinterpret_cast<const SyncEntity *>(&(updates.entries(i)));
+ // Needs to be done separately in order to make sure the update processing
+ // still happens like normal. We should really just use one type of
+ // ID in fact, there isn't actually a need for server_knows and not IDs.
+ SyncerUtil::AttemptReuniteLostCommitResponses(&trans, entry,
+ trans.directory()->cache_guid());
+ VerifyResult result = VerifyUpdate(&trans, entry);
+ session->AddVerifyResult(result, entry);
+ }
+}
+
+VerifyResult VerifyUpdatesCommand::VerifyUpdate(
+ syncable::WriteTransaction* trans, const SyncEntity& entry) {
+ syncable::Id id = entry.id();
+
+ const bool deleted = entry.has_deleted() && entry.deleted();
+ const bool is_directory = entry.IsFolder();
+ const bool is_bookmark = entry.has_bookmarkdata();
+
+ if (!id.ServerKnows()) {
+ LOG(ERROR) << "Illegal negative id in received updates";
+ return VERIFY_FAIL;
+ }
+ if (!entry.parent_id().ServerKnows()) {
+ LOG(ERROR) << "Illegal parent id in received updates";
+ return VERIFY_FAIL;
+ }
+ {
+ SyncName name = SyncerProtoUtil::NameFromSyncEntity(entry);
+ if ((name.value().empty() || name.non_unique_value().empty()) &&
+ !deleted) {
+ LOG(ERROR) << "Zero length name in non-deleted update";
+ return VERIFY_FAIL;
+ }
+ }
+
+ syncable::MutableEntry same_id(trans, GET_BY_ID, id);
+ VerifyResult result = VERIFY_UNDECIDED;
+ result = SyncerUtil::VerifyNewEntry(entry, &same_id, deleted);
+
+ if (VERIFY_UNDECIDED == result) {
+ if (deleted)
+ result = VERIFY_SUCCESS;
+ }
+
+ // If we have an existing entry, we check here for updates that break
+ // consistency rules.
+ if (VERIFY_UNDECIDED == result) {
+ result = SyncerUtil::VerifyUpdateConsistency(trans, entry, &same_id,
+ deleted, is_directory, is_bookmark);
+ }
+
+ if (VERIFY_UNDECIDED == result)
+ return VERIFY_SUCCESS; // No news is good news.
+ else
+ return result; // This might be VERIFY_SUCCESS as well
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/verify_updates_command.h b/chrome/browser/sync/engine/verify_updates_command.h
new file mode 100644
index 0000000..c7970e9
--- /dev/null
+++ b/chrome/browser/sync/engine/verify_updates_command.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_
+#define CHROME_BROWSER_SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_
+
+#include "base/basictypes.h"
+
+#include "chrome/browser/sync/engine/syncer_command.h"
+#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncproto.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+class WriteTransaction;
+}
+
+namespace browser_sync {
+
+// Verifies the response from a GetUpdates request. All invalid updates
+// will be noted in the SyncerSession after this command is executed.
+class VerifyUpdatesCommand : public SyncerCommand {
+ public:
+ VerifyUpdatesCommand();
+ virtual ~VerifyUpdatesCommand();
+ virtual void ExecuteImpl(SyncerSession *session);
+
+ VerifyResult VerifyUpdate(syncable::WriteTransaction* trans,
+ const SyncEntity& entry);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VerifyUpdatesCommand);
+};
+} // namespace browser_sync
+
+#endif // CHROME_BROWSER_SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_