summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--chrome/browser/sync/engine/all_status.h7
-rw-r--r--chrome/browser/sync/engine/apply_updates_command.h2
-rw-r--r--chrome/browser/sync/engine/apply_updates_command_unittest.cc1
-rw-r--r--chrome/browser/sync/engine/auth_watcher.h20
-rw-r--r--chrome/browser/sync/engine/authenticator.cc8
-rw-r--r--chrome/browser/sync/engine/authenticator.h7
-rw-r--r--chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc35
-rw-r--r--chrome/browser/sync/engine/build_and_process_conflict_sets_command.h4
-rw-r--r--chrome/browser/sync/engine/build_commit_command.cc15
-rw-r--r--chrome/browser/sync/engine/change_reorder_buffer.h10
-rw-r--r--chrome/browser/sync/engine/client_command_channel.h10
-rw-r--r--chrome/browser/sync/engine/conflict_resolution_view.cc7
-rw-r--r--chrome/browser/sync/engine/conflict_resolution_view.h8
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.cc64
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.h10
-rw-r--r--chrome/browser/sync/engine/download_updates_command.cc2
-rw-r--r--chrome/browser/sync/engine/download_updates_command.h2
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.cc14
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.h24
-rw-r--r--chrome/browser/sync/engine/model_changing_syncer_command.cc2
-rw-r--r--chrome/browser/sync/engine/post_commit_message_command.cc4
-rw-r--r--chrome/browser/sync/engine/post_commit_message_command.h2
-rw-r--r--chrome/browser/sync/engine/process_commit_response_command.cc16
-rw-r--r--chrome/browser/sync/engine/process_commit_response_command.h1
-rw-r--r--chrome/browser/sync/engine/process_updates_command.cc40
-rw-r--r--chrome/browser/sync/engine/resolve_conflicts_command.h6
-rw-r--r--chrome/browser/sync/engine/sync_cycle_state.h27
-rw-r--r--chrome/browser/sync/engine/sync_process_state.cc24
-rw-r--r--chrome/browser/sync/engine/sync_process_state.h46
-rw-r--r--chrome/browser/sync/engine/syncapi.cc69
-rw-r--r--chrome/browser/sync/engine/syncer.cc10
-rw-r--r--chrome/browser/sync/engine/syncer.h15
-rw-r--r--chrome/browser/sync/engine/syncer_command.cc4
-rw-r--r--chrome/browser/sync/engine/syncer_command.h21
-rw-r--r--chrome/browser/sync/engine/syncer_end_command.h5
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util.cc18
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util.h24
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util_unittest.cc8
-rw-r--r--chrome/browser/sync/engine/syncer_session.h21
-rw-r--r--chrome/browser/sync/engine/syncer_status.h39
-rw-r--r--chrome/browser/sync/engine/syncer_thread.cc31
-rw-r--r--chrome/browser/sync/engine/syncer_thread.h32
-rw-r--r--chrome/browser/sync/engine/syncer_thread_unittest.cc9
-rw-r--r--chrome/browser/sync/engine/syncer_types.h23
-rw-r--r--chrome/browser/sync/engine/syncer_unittest.cc168
-rw-r--r--chrome/browser/sync/engine/syncer_util.cc52
-rw-r--r--chrome/browser/sync/engine/syncer_util.h9
-rw-r--r--chrome/browser/sync/engine/syncproto.h4
-rw-r--r--chrome/browser/sync/engine/update_applicator.cc2
-rw-r--r--chrome/browser/sync/engine/update_applicator.h12
-rw-r--r--chrome/browser/sync/engine/verify_updates_command.cc4
-rw-r--r--chrome/browser/sync/engine/verify_updates_command.h7
52 files changed, 486 insertions, 519 deletions
diff --git a/chrome/browser/sync/engine/all_status.h b/chrome/browser/sync/engine/all_status.h
index e7fb0ba..bd3e1a9 100644
--- a/chrome/browser/sync/engine/all_status.h
+++ b/chrome/browser/sync/engine/all_status.h
@@ -1,11 +1,10 @@
// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
//
-// The all status object watches various sync engine components and aggregates
+// The AllStatus object watches various sync engine components and aggregates
// the status of all of them into one place.
-//
+
#ifndef CHROME_BROWSER_SYNC_ENGINE_ALL_STATUS_H_
#define CHROME_BROWSER_SYNC_ENGINE_ALL_STATUS_H_
@@ -18,6 +17,7 @@
#include "chrome/browser/sync/util/pthread_helpers.h"
namespace browser_sync {
+
class AuthWatcher;
class GaiaAuthenticator;
class ScopedStatusLockWithNotify;
@@ -155,6 +155,7 @@ class AllStatus {
scoped_ptr<EventListenerHookup> talk_mediator_hookup_;
mutable PThreadMutex mutex_; // Protects all data members.
+ DISALLOW_COPY_AND_ASSIGN(AllStatus);
};
struct AllStatusEvent {
diff --git a/chrome/browser/sync/engine/apply_updates_command.h b/chrome/browser/sync/engine/apply_updates_command.h
index 320e42c..7588f4b 100644
--- a/chrome/browser/sync/engine/apply_updates_command.h
+++ b/chrome/browser/sync/engine/apply_updates_command.h
@@ -22,7 +22,7 @@ class ApplyUpdatesCommand : public ModelChangingSyncerCommand {
ApplyUpdatesCommand();
virtual ~ApplyUpdatesCommand();
- virtual void ModelChangingExecuteImpl(SyncerSession *session);
+ virtual void ModelChangingExecuteImpl(SyncerSession* session);
private:
DISALLOW_COPY_AND_ASSIGN(ApplyUpdatesCommand);
diff --git a/chrome/browser/sync/engine/apply_updates_command_unittest.cc b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
index ea4e253..afa0969 100644
--- a/chrome/browser/sync/engine/apply_updates_command_unittest.cc
+++ b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
@@ -162,5 +162,4 @@ TEST_F(ApplyUpdatesCommandTest, ItemsBothKnownAndUnknown) {
<< "The updates with known ancestors should be successfully applied";
}
-
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/auth_watcher.h b/chrome/browser/sync/engine/auth_watcher.h
index f1bd424d..b0dba61 100644
--- a/chrome/browser/sync/engine/auth_watcher.h
+++ b/chrome/browser/sync/engine/auth_watcher.h
@@ -1,7 +1,7 @@
// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
+//
// AuthWatcher watches authentication events and user open and close
// events and accordingly opens and closes shares.
@@ -24,6 +24,7 @@ class DirectoryManager;
}
namespace browser_sync {
+
class AllStatus;
class AuthWatcher;
class ServerConnectionManager;
@@ -66,7 +67,7 @@ struct AuthWatcherEvent {
class AuthWatcher {
public:
- // Normal progression is local -> gaia -> token
+ // Normal progression is local -> gaia -> token.
enum Status { LOCALLY_AUTHENTICATED, GAIA_AUTHENTICATED, NOT_AUTHENTICATED };
typedef syncable::DirectoryManagerEvent DirectoryManagerEvent;
typedef syncable::DirectoryManager DirectoryManager;
@@ -83,8 +84,8 @@ class AuthWatcher {
TalkMediator* talk_mediator);
~AuthWatcher();
- // Returns true if the open share has gotten zero
- // updates from the sync server (initial sync complete.)
+ // Returns true if the open share has gotten zero updates from the sync
+ // server (initial sync complete).
bool LoadDirectoryListAndOpen(const PathString& login);
typedef EventChannel<AuthWatcherEvent, PThreadMutex> Channel;
@@ -120,6 +121,9 @@ class AuthWatcher {
// For synchronizing other destructors.
void WaitForAuthThreadFinish();
+ bool AuthenticateWithToken(const std::string& email,
+ const std::string& auth_token);
+
protected:
void Reset();
void ClearAuthenticationData();
@@ -137,7 +141,7 @@ class AuthWatcher {
const bool save_credentials);
// These two helpers should only be called from the auth function.
- // returns false iff we had problems and should try GAIA_AUTH again.
+ // Returns false iff we had problems and should try GAIA_AUTH again.
bool ProcessGaiaAuthSuccess();
void ProcessGaiaAuthFailure();
@@ -151,11 +155,6 @@ class AuthWatcher {
const std::string& sync_service_token() const { return sync_service_token_; }
- public:
- bool AuthenticateWithToken(const std::string& email,
- const std::string& auth_token);
-
- protected:
typedef PThreadScopedLock<PThreadMutex> MutexLock;
// Passed to newly created threads.
@@ -197,6 +196,7 @@ class AuthWatcher {
bool thread_handle_valid_;
bool authenticating_now_;
AuthWatcherEvent::AuthenticationTrigger current_attempt_trigger_;
+ DISALLOW_COPY_AND_ASSIGN(AuthWatcher);
};
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/authenticator.cc b/chrome/browser/sync/engine/authenticator.cc
index cd168d2..8cc3724 100644
--- a/chrome/browser/sync/engine/authenticator.cc
+++ b/chrome/browser/sync/engine/authenticator.cc
@@ -15,12 +15,12 @@ namespace browser_sync {
using std::string;
-Authenticator::Authenticator(ServerConnectionManager *manager,
+Authenticator::Authenticator(ServerConnectionManager* manager,
UserSettings* settings)
: server_connection_manager_(manager), settings_(settings) {
}
-Authenticator::Authenticator(ServerConnectionManager *manager)
+Authenticator::Authenticator(ServerConnectionManager* manager)
: server_connection_manager_(manager), settings_(NULL) {
}
@@ -31,8 +31,8 @@ Authenticator::AuthenticationResult Authenticator::Authenticate() {
Authenticator::AuthenticationResult Authenticator::Authenticate(
string username, string password, bool save_credentials) {
- // TODO(scrub): need to figure out if this routine is used anywhere other than
- // the test code.
+ // TODO(sync): need to figure out if this routine is used anywhere other
+ // than the test code.
GaiaAuthenticator auth_service("ChromiumBrowser", "chromiumsync",
"https://www.google.com:443/accounts/ClientLogin");
const SignIn signin_type =
diff --git a/chrome/browser/sync/engine/authenticator.h b/chrome/browser/sync/engine/authenticator.h
index 6c5005b..95e4698 100644
--- a/chrome/browser/sync/engine/authenticator.h
+++ b/chrome/browser/sync/engine/authenticator.h
@@ -1,7 +1,7 @@
// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
+//
// The authenticator is a cross-platform class that handles authentication for
// the sync client.
//
@@ -86,8 +86,8 @@ class Authenticator {
// TODO(sync): Make this function private when we're done.
AuthenticationResult AuthenticateToken(std::string auth_token);
- const char * display_email() const { return display_email_.c_str(); }
- const char * display_name() const { return display_name_.c_str(); }
+ const char* display_email() const { return display_email_.c_str(); }
+ const char* display_name() const { return display_name_.c_str(); }
private:
// Stores the information in the UserIdentification returned from the server.
AuthenticationResult HandleSuccessfulTokenRequest(
@@ -99,6 +99,7 @@ class Authenticator {
std::string display_name_;
std::string obfuscated_id_;
UserSettings* const settings_;
+ DISALLOW_COPY_AND_ASSIGN(Authenticator);
};
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
index 0eb279a..c4f8ce3 100644
--- a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
+++ b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
@@ -26,23 +26,23 @@ BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSetsCommand() {}
BuildAndProcessConflictSetsCommand::~BuildAndProcessConflictSetsCommand() {}
void BuildAndProcessConflictSetsCommand::ModelChangingExecuteImpl(
- SyncerSession *session) {
+ SyncerSession* session) {
session->set_conflict_sets_built(BuildAndProcessConflictSets(session));
}
bool BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSets(
- SyncerSession *session) {
+ SyncerSession* session) {
syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
if (!dir.good())
return false;
bool had_single_direction_sets = false;
- { // scope for transaction
+ { // Scope for transaction.
syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
ConflictResolutionView conflict_view(session);
BuildConflictSets(&trans, &conflict_view);
had_single_direction_sets =
ProcessSingleDirectionConflictSets(&trans, session);
- // we applied some updates transactionally, lets try syncing again.
+ // We applied some updates transactionally, lets try syncing again.
if (had_single_direction_sets)
return true;
}
@@ -86,9 +86,9 @@ namespace {
void StoreLocalDataForUpdateRollback(syncable::Entry* entry,
syncable::EntryKernel* backup) {
CHECK(!entry->Get(syncable::IS_UNSYNCED)) << " Storing Rollback data for "
- "entry that's unsynced." << *entry ;
+ "entry that's unsynced." << *entry ;
CHECK(entry->Get(syncable::IS_UNAPPLIED_UPDATE)) << " Storing Rollback data "
- "for entry that's not an unapplied update." << *entry ;
+ "for entry that's not an unapplied update." << *entry ;
*backup = entry->GetKernelCopy();
}
@@ -174,15 +174,18 @@ bool BuildAndProcessConflictSetsCommand::ApplyUpdatesTransactionally(
syncable::WriteTransaction* trans,
const vector<syncable::Id>* const update_set,
SyncerSession* const session) {
- vector<int64> handles; // The handles in the |update_set| order.
- vector<syncable::Id> rollback_ids; // Holds the same Ids as update_set, but
- // sorted so that runs of adjacent nodes
- // appear in order.
+ // The handles in the |update_set| order.
+ vector<int64> handles;
+
+ // Holds the same Ids as update_set, but sorted so that runs of adjacent
+ // nodes appear in order.
+ vector<syncable::Id> rollback_ids;
rollback_ids.reserve(update_set->size());
- syncable::MetahandleSet rollback_ids_inserted_items; // Tracks what's added
- // to |rollback_ids|.
+ // Tracks what's added to |rollback_ids|.
+ syncable::MetahandleSet rollback_ids_inserted_items;
vector<syncable::Id>::const_iterator it;
+
// 1. Build |rollback_ids| in the order required for successful rollback.
// Specifically, for positions to come out right, restoring an item
// requires that its predecessor in the sibling order is properly
@@ -295,8 +298,8 @@ void BuildAndProcessConflictSetsCommand::MergeSetsForIntroducedLoops(
// or itself. If it gets to the root it does nothing. If it finds a loop all
// moved unsynced entries in the list of crawled entries have their sets
// merged with the entry.
- // TODO(sync): Build test cases to cover this function when the argument
- // list has settled.
+ // TODO(sync): Build test cases to cover this function when the argument list
+ // has settled.
syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID);
syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
if (!parent.good()) {
@@ -389,8 +392,8 @@ void CrawlDeletedTreeMergingSets(syncable::BaseTransaction* trans,
Checker checker) {
syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
syncable::Id double_step_parent_id = parent_id;
- // This block builds sets where we've got an entry in a directory the
- // server wants to delete.
+ // This block builds sets where we've got an entry in a directory the server
+ // wants to delete.
//
// Here we're walking up the tree to find all entries that the pass checks
// deleted. We can be extremely strict here as anything unexpected means
diff --git a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h
index 79559ba..706748b 100644
--- a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h
+++ b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.h
@@ -28,10 +28,10 @@ class BuildAndProcessConflictSetsCommand : public ModelChangingSyncerCommand {
BuildAndProcessConflictSetsCommand();
virtual ~BuildAndProcessConflictSetsCommand();
- virtual void ModelChangingExecuteImpl(SyncerSession *session);
+ virtual void ModelChangingExecuteImpl(SyncerSession* session);
private:
- bool BuildAndProcessConflictSets(SyncerSession *session);
+ bool BuildAndProcessConflictSets(SyncerSession* session);
bool ProcessSingleDirectionConflictSets(
syncable::WriteTransaction* trans, SyncerSession* const session);
diff --git a/chrome/browser/sync/engine/build_commit_command.cc b/chrome/browser/sync/engine/build_commit_command.cc
index f819d6c..46200c6 100644
--- a/chrome/browser/sync/engine/build_commit_command.cc
+++ b/chrome/browser/sync/engine/build_commit_command.cc
@@ -30,7 +30,7 @@ namespace browser_sync {
BuildCommitCommand::BuildCommitCommand() {}
BuildCommitCommand::~BuildCommitCommand() {}
-void BuildCommitCommand::ExecuteImpl(SyncerSession *session) {
+void BuildCommitCommand::ExecuteImpl(SyncerSession* session) {
ClientToServerMessage message;
message.set_share(ToUTF8(session->account_name()).get_string());
message.set_message_contents(ClientToServerMessage::COMMIT);
@@ -49,7 +49,7 @@ void BuildCommitCommand::ExecuteImpl(SyncerSession *session) {
syncable::GET_BY_ID,
id);
CHECK(meta_entry.good());
- // this is the only change we make to the entry in this function.
+ // This is the only change we make to the entry in this function.
meta_entry.Put(syncable::SYNCING, true);
Name name = meta_entry.GetName();
@@ -64,8 +64,8 @@ void BuildCommitCommand::ExecuteImpl(SyncerSession *session) {
sync_entry->set_non_unique_name(
ToUTF8(name.non_unique_value()).get_string());
}
- // deleted items with negative parent ids can be a problem so we set the
- // parent to 0. (TODO(sync): Still true in protocol?
+ // Deleted items with negative parent ids can be a problem so we set the
+ // parent to 0. (TODO(sync): Still true in protocol?).
Id new_parent_id;
if (meta_entry.Get(syncable::IS_DEL) &&
!meta_entry.Get(syncable::PARENT_ID).ServerKnows()) {
@@ -77,10 +77,9 @@ void BuildCommitCommand::ExecuteImpl(SyncerSession *session) {
// TODO(sync): Investigate all places that think transactional commits
// actually exist.
//
- // This is the only logic we'll need when transactional commits are
- // moved to the server.
- // If our parent has changes, send up the old one so the server can
- // correctly deal with multiple parents.
+ // This is the only logic we'll need when transactional commits are moved
+ // to the server. If our parent has changes, send up the old one so the
+ // server can correctly deal with multiple parents.
if (new_parent_id != meta_entry.Get(syncable::SERVER_PARENT_ID) &&
0 != meta_entry.Get(syncable::BASE_VERSION) &&
syncable::CHANGES_VERSION != meta_entry.Get(syncable::BASE_VERSION)) {
diff --git a/chrome/browser/sync/engine/change_reorder_buffer.h b/chrome/browser/sync/engine/change_reorder_buffer.h
index ddea1b6..5194d6b 100644
--- a/chrome/browser/sync/engine/change_reorder_buffer.h
+++ b/chrome/browser/sync/engine/change_reorder_buffer.h
@@ -72,9 +72,9 @@ class ChangeReorderBuffer {
return operations_.empty();
}
- // Output a reordered list of changes to |changelist| using the items
- // that were pushed into the reorder buffer. |sync_trans| is used
- // to determine the ordering.
+ // Output a reordered list of changes to |changelist| using the items that
+ // were pushed into the reorder buffer. |sync_trans| is used to determine the
+ // ordering.
void GetAllChangesInTreeOrder(const BaseTransaction* sync_trans,
std::vector<ChangeRecord>* changelist);
@@ -88,8 +88,8 @@ class ChangeReorderBuffer {
};
typedef std::map<int64, Operation> OperationMap;
- // Stores the items that have been pushed into the buffer, and the
- // type of operation that was associated with them.
+ // Stores the items that have been pushed into the buffer, and the type of
+ // operation that was associated with them.
OperationMap operations_;
DISALLOW_COPY_AND_ASSIGN(ChangeReorderBuffer);
diff --git a/chrome/browser/sync/engine/client_command_channel.h b/chrome/browser/sync/engine/client_command_channel.h
index 2f91a9b..a13a5eb 100644
--- a/chrome/browser/sync/engine/client_command_channel.h
+++ b/chrome/browser/sync/engine/client_command_channel.h
@@ -10,11 +10,11 @@
namespace browser_sync {
-// Commands for the client come back in sync responses, which is kind
-// of inconvenient because some services (like the bandwidth throttler)
-// want to know about them. So to avoid explicit dependencies on this
-// protocol behavior, the syncer dumps all client commands onto a shared
-// client command channel.
+// Commands for the client come back in sync responses, which is kind of
+// inconvenient because some services (like the bandwidth throttler) want to
+// know about them. So to avoid explicit dependencies on this protocol
+// behavior, the syncer dumps all client commands onto a shared client command
+// channel.
struct ClientCommandChannelTraits {
typedef const sync_pb::ClientCommand* EventType;
diff --git a/chrome/browser/sync/engine/conflict_resolution_view.cc b/chrome/browser/sync/engine/conflict_resolution_view.cc
index aca5d89..0b672a1 100644
--- a/chrome/browser/sync/engine/conflict_resolution_view.cc
+++ b/chrome/browser/sync/engine/conflict_resolution_view.cc
@@ -52,7 +52,7 @@ int64 ConflictResolutionView::servers_latest_timestamp() const {
return process_state_->servers_latest_timestamp();
}
- // True iff we're stuck. User should contact support.
+// True iff we're stuck. User should contact support.
bool ConflictResolutionView::syncer_stuck() const {
return process_state_->syncer_stuck();
}
@@ -155,12 +155,11 @@ ConflictResolutionView::BlockedItemsBegin() const {
}
set<syncable::Id>::iterator
- ConflictResolutionView::CommitConflictsEnd() const {
+ConflictResolutionView::CommitConflictsEnd() const {
return process_state_->ConflictingItemsEnd();
}
-set<syncable::Id>::iterator
- ConflictResolutionView::BlockedItemsEnd() const {
+set<syncable::Id>::iterator ConflictResolutionView::BlockedItemsEnd() const {
return process_state_->BlockedItemsEnd();
}
diff --git a/chrome/browser/sync/engine/conflict_resolution_view.h b/chrome/browser/sync/engine/conflict_resolution_view.h
index a60af65..8ea0d2f 100644
--- a/chrome/browser/sync/engine/conflict_resolution_view.h
+++ b/chrome/browser/sync/engine/conflict_resolution_view.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Conflict resolution view is intended to provide a restricted
-// view of the sync cycle state for the conflict resolver. Since the
-// resolver doesn't get to see all of the SyncProcess, we can allow
-// it to operate on a subsection of the data.
+// Conflict resolution view is intended to provide a restricted view of the
+// sync cycle state for the conflict resolver. Since the resolver doesn't get
+// to see all of the SyncProcess, we can allow it to operate on a subsection of
+// the data.
#ifndef CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLUTION_VIEW_H_
#define CHROME_BROWSER_SYNC_ENGINE_CONFLICT_RESOLUTION_VIEW_H_
diff --git a/chrome/browser/sync/engine/conflict_resolver.cc b/chrome/browser/sync/engine/conflict_resolver.cc
index 9bfe419..27ed93f 100644
--- a/chrome/browser/sync/engine/conflict_resolver.cc
+++ b/chrome/browser/sync/engine/conflict_resolver.cc
@@ -244,8 +244,7 @@ bool NamesCollideWithChildrenOfFolder(BaseTransaction* trans,
return false;
}
-void GiveEntryNewName(WriteTransaction* trans,
- MutableEntry* entry) {
+void GiveEntryNewName(WriteTransaction* trans, MutableEntry* entry) {
using namespace syncable;
Name new_name =
FindNewName(trans, entry->Get(syncable::PARENT_ID), entry->GetName());
@@ -289,7 +288,7 @@ bool ConflictResolver::AttemptItemMerge(WriteTransaction* trans,
CHECK(child_entry.Put(syncable::PARENT_ID, desired_id));
CHECK(child_entry.Put(syncable::IS_UNSYNCED, true));
Id id = child_entry.Get(syncable::ID);
- // we only note new entries for quicker merging next round.
+ // We only note new entries for quicker merging next round.
if (!id.ServerKnows())
children_of_merged_dirs_.insert(id);
}
@@ -299,7 +298,7 @@ bool ConflictResolver::AttemptItemMerge(WriteTransaction* trans,
}
LOG(INFO) << "Identical client and server items merging server changes. " <<
- *locally_named << " server: " << *server_named;
+ *locally_named << " server: " << *server_named;
// Clear server_named's server data and mark it deleted so it goes away
// quietly because it's now identical to a deleted local entry.
@@ -323,20 +322,20 @@ ConflictResolver::ProcessServerClientNameClash(WriteTransaction* trans,
MutableEntry* server_named,
SyncerSession* session) {
if (!locally_named->ExistsOnClientBecauseDatabaseNameIsNonEmpty())
- return NO_CLASH; // locally_named is a server update.
+ return NO_CLASH; // Locally_named is a server update.
if (locally_named->Get(syncable::IS_DEL) ||
server_named->Get(syncable::SERVER_IS_DEL)) {
return NO_CLASH;
}
if (locally_named->Get(syncable::PARENT_ID) !=
server_named->Get(syncable::SERVER_PARENT_ID)) {
- return NO_CLASH; // different parents
+ return NO_CLASH; // Different parents.
}
PathString name = locally_named->GetSyncNameValue();
if (0 != syncable::ComparePathNames(name,
server_named->Get(syncable::SERVER_NAME))) {
- return NO_CLASH; // different names.
+ return NO_CLASH; // Different names.
}
// First try to merge.
@@ -387,7 +386,7 @@ ConflictResolver::ConflictSetCountMapKey ConflictResolver::GetSetKey(
// TODO(sync): Come up with a better scheme for set hashing. This scheme
// will make debugging easy.
// If this call to sort is removed, we need to add one before we use
- // binary_search in ProcessConflictSet
+ // binary_search in ProcessConflictSet.
sort(set->begin(), set->end());
std::stringstream rv;
for(ConflictSet::iterator i = set->begin() ; i != set->end() ; ++i )
@@ -422,7 +421,7 @@ bool AttemptToFixCircularConflict(WriteTransaction* trans,
Entry parent(trans, syncable::GET_BY_ID, parentid);
CHECK(parent.good());
if (parentid == *i)
- break; // it's a loop
+ break; // It's a loop.
parentid = parent.Get(syncable::PARENT_ID);
}
if (parentid.IsRoot())
@@ -445,11 +444,11 @@ bool AttemptToFixUnsyncedEntryInDeletedServerTree(WriteTransaction* trans,
Id parentid = entry.Get(syncable::PARENT_ID);
MutableEntry parent(trans, syncable::GET_BY_ID, parentid);
if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
- !parent.Get(syncable::SERVER_IS_DEL) ||
+ !parent.Get(syncable::SERVER_IS_DEL) ||
!binary_search(conflict_set->begin(), conflict_set->end(), parentid))
return false;
- // We're trying to commit into a directory tree that's been deleted.
- // To solve this we recreate the directory tree.
+ // We're trying to commit into a directory tree that's been deleted. To
+ // solve this we recreate the directory tree.
//
// We do this in two parts, first we ensure the tree is unaltered since the
// conflict was detected.
@@ -494,35 +493,32 @@ bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
!binary_search(conflict_set->begin(), conflict_set->end(), parent_id)) {
return false;
}
- // We've deleted a directory tree that's got contents on the server.
- // We recreate the directory to solve the problem.
+ // We've deleted a directory tree that's got contents on the server. We
+ // recreate the directory to solve the problem.
//
// We do this in two parts, first we ensure the tree is unaltered since
// the conflict was detected.
Id id = parent_id;
- // As we will be crawling the path of deleted entries there's a chance
- // we'll end up having to reparent an item as there will be an invalid
- // parent.
+ // As we will be crawling the path of deleted entries there's a chance we'll
+ // end up having to reparent an item as there will be an invalid parent.
Id reroot_id = syncable::kNullId;
- // similarly crawling deleted items means we risk loops.
+ // Similarly crawling deleted items means we risk loops.
int loop_detection = conflict_set->size();
while (!id.IsRoot() && --loop_detection >= 0) {
Entry parent(trans, syncable::GET_BY_ID, id);
- // If we get a bad parent, or a parent that's deleted on client and
- // server we recreate the hierarchy in the root.
+ // If we get a bad parent, or a parent that's deleted on client and server
+ // we recreate the hierarchy in the root.
if (!parent.good()) {
reroot_id = id;
break;
}
CHECK(parent.Get(syncable::IS_DIR));
if (!binary_search(conflict_set->begin(), conflict_set->end(), id)) {
- // We've got to an entry that's not in the set. If it has been
- // deleted between set building and this point in time we
- // return false. If it had been deleted earlier it would have been
- // in the set.
- // TODO(sync): Revisit syncer code organization to see if
- // conflict resolution can be done in the same transaction as set
- // building.
+ // We've got to an entry that's not in the set. If it has been deleted
+ // between set building and this point in time we return false. If it had
+ // been deleted earlier it would have been in the set.
+ // TODO(sync): Revisit syncer code organization to see if conflict
+ // resolution can be done in the same transaction as set building.
if (parent.Get(syncable::IS_DEL))
return false;
break;
@@ -540,8 +536,7 @@ bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
reroot_id = entry.Get(syncable::PARENT_ID);
else
reroot_id = id;
- // Now we fix things up by undeleting all the folders in the item's
- // path.
+ // Now we fix things up by undeleting all the folders in the item's path.
id = parent_id;
while (!id.IsRoot() && id != reroot_id) {
if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
@@ -617,7 +612,6 @@ bool ConflictResolver::ProcessConflictSet(WriteTransaction* trans,
return false;
}
-
template <typename InputIt>
bool ConflictResolver::LogAndSignalIfConflictStuck(
BaseTransaction* trans,
@@ -645,14 +639,14 @@ bool ConflictResolver::LogAndSignalIfConflictStuck(
view->set_syncer_stuck(true);
return true;
- // TODO(sync): If we're stuck for a while we need to alert the user,
- // clear cache or reset syncing. At the very least we should stop trying
- // something that's obviously not working.
+ // TODO(sync): If we're stuck for a while we need to alert the user, clear
+ // cache or reset syncing. At the very least we should stop trying something
+ // that's obviously not working.
}
bool ConflictResolver::ResolveSimpleConflicts(const ScopedDirLookup& dir,
ConflictResolutionView* view,
- SyncerSession *session) {
+ SyncerSession* session) {
WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
bool forward_progress = false;
// First iterate over simple conflict items (those that belong to no set).
@@ -693,7 +687,7 @@ bool ConflictResolver::ResolveSimpleConflicts(const ScopedDirLookup& dir,
bool ConflictResolver::ResolveConflicts(const ScopedDirLookup& dir,
ConflictResolutionView* view,
- SyncerSession *session) {
+ SyncerSession* session) {
if (view->HasBlockedItems()) {
LOG(INFO) << "Delaying conflict resolution, have " <<
view->BlockedItemsSize() << " blocked items.";
diff --git a/chrome/browser/sync/engine/conflict_resolver.h b/chrome/browser/sync/engine/conflict_resolver.h
index 7959106..eb12210 100644
--- a/chrome/browser/sync/engine/conflict_resolver.h
+++ b/chrome/browser/sync/engine/conflict_resolver.h
@@ -39,7 +39,7 @@ class ConflictResolver {
// Returns true if the syncer should try to apply its updates again.
bool ResolveConflicts(const syncable::ScopedDirLookup& dir,
ConflictResolutionView* view,
- SyncerSession *session);
+ SyncerSession* session);
// Called by ProcessServerClientNameClash. Returns true if it's merged the
// items, false otherwise. Does not re-check preconditions covered in
@@ -69,11 +69,11 @@ class ConflictResolver {
BOGUS_SET,
};
- // Get a key for the given set. NB: May reorder set contents.
- // The key is currently not very efficient, but will ease debugging.
+ // Get a key for the given set. NOTE: May reorder set contents. The key is
+ // currently not very efficient, but will ease debugging.
ConflictSetCountMapKey GetSetKey(ConflictSet* conflict_set);
- void IgnoreLocalChanges(syncable::MutableEntry * entry);
+ void IgnoreLocalChanges(syncable::MutableEntry* entry);
void OverwriteServerChanges(syncable::WriteTransaction* trans,
syncable::MutableEntry* entry);
@@ -105,7 +105,7 @@ class ConflictResolver {
ConflictSet* conflict_set,
SyncerSession* session);
- // Returns true if we're stuck
+ // Returns true if we're stuck.
template <typename InputIt>
bool LogAndSignalIfConflictStuck(syncable::BaseTransaction* trans,
int attempt_count,
diff --git a/chrome/browser/sync/engine/download_updates_command.cc b/chrome/browser/sync/engine/download_updates_command.cc
index 0d84275..889567e 100644
--- a/chrome/browser/sync/engine/download_updates_command.cc
+++ b/chrome/browser/sync/engine/download_updates_command.cc
@@ -22,7 +22,7 @@ using std::string;
DownloadUpdatesCommand::DownloadUpdatesCommand() {}
DownloadUpdatesCommand::~DownloadUpdatesCommand() {}
-void DownloadUpdatesCommand::ExecuteImpl(SyncerSession *session) {
+void DownloadUpdatesCommand::ExecuteImpl(SyncerSession* session) {
ClientToServerMessage client_to_server_message;
ClientToServerResponse update_response;
diff --git a/chrome/browser/sync/engine/download_updates_command.h b/chrome/browser/sync/engine/download_updates_command.h
index 2f48cb8..efd5468 100644
--- a/chrome/browser/sync/engine/download_updates_command.h
+++ b/chrome/browser/sync/engine/download_updates_command.h
@@ -16,7 +16,7 @@ class DownloadUpdatesCommand : public SyncerCommand {
public:
DownloadUpdatesCommand();
virtual ~DownloadUpdatesCommand();
- virtual void ExecuteImpl(SyncerSession *session);
+ virtual void ExecuteImpl(SyncerSession* session);
private:
DISALLOW_COPY_AND_ASSIGN(DownloadUpdatesCommand);
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.cc b/chrome/browser/sync/engine/get_commit_ids_command.cc
index 612b40c..5c1f015 100644
--- a/chrome/browser/sync/engine/get_commit_ids_command.cc
+++ b/chrome/browser/sync/engine/get_commit_ids_command.cc
@@ -8,8 +8,8 @@
#include <utility>
#include <vector>
-#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/syncable/syncable.h"
#include "chrome/browser/sync/util/sync_types.h"
@@ -23,9 +23,9 @@ GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size)
GetCommitIdsCommand::~GetCommitIdsCommand() {}
-void GetCommitIdsCommand::ExecuteImpl(SyncerSession *session) {
- // Gather the full set of unsynced items and store it in the session.
- // They are not in the correct order for commit.
+void GetCommitIdsCommand::ExecuteImpl(SyncerSession* session) {
+ // Gather the full set of unsynced items and store it in the session. They
+ // are not in the correct order for commit.
syncable::Directory::UnsyncedMetaHandles all_unsynced_handles;
SyncerUtil::GetUnsyncedEntries(session->write_transaction(),
&all_unsynced_handles);
@@ -117,7 +117,7 @@ bool GetCommitIdsCommand::IsCommitBatchFull() {
return ordered_commit_set_.Size() >= requested_commit_batch_size_;
}
-void GetCommitIdsCommand::AddCreatesAndMoves(SyncerSession *session) {
+void GetCommitIdsCommand::AddCreatesAndMoves(SyncerSession* session) {
// Add moves and creates, and prepend their uncommitted parents.
for (CommitMetahandleIterator iterator(session, &ordered_commit_set_);
!IsCommitBatchFull() && iterator.Valid();
@@ -140,7 +140,7 @@ void GetCommitIdsCommand::AddCreatesAndMoves(SyncerSession *session) {
ordered_commit_set_.Truncate(requested_commit_batch_size_);
}
-void GetCommitIdsCommand::AddDeletes(SyncerSession *session) {
+void GetCommitIdsCommand::AddDeletes(SyncerSession* session) {
set<syncable::Id> legal_delete_parents;
for (CommitMetahandleIterator iterator(session, &ordered_commit_set_);
@@ -222,7 +222,7 @@ void GetCommitIdsCommand::AddDeletes(SyncerSession *session) {
}
}
-void GetCommitIdsCommand::BuildCommitIds(SyncerSession *session) {
+void GetCommitIdsCommand::BuildCommitIds(SyncerSession* session) {
// Commits follow these rules:
// 1. Moves or creates are preceded by needed folder creates, from
// root to leaf. For folders whose contents are ordered, moves
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.h b/chrome/browser/sync/engine/get_commit_ids_command.h
index 2d80a04..59b737f 100644
--- a/chrome/browser/sync/engine/get_commit_ids_command.h
+++ b/chrome/browser/sync/engine/get_commit_ids_command.h
@@ -5,12 +5,12 @@
#ifndef CHROME_BROWSER_SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
#define CHROME_BROWSER_SYNC_ENGINE_GET_COMMIT_IDS_COMMAND_H_
-#include <vector>
#include <utility>
+#include <vector>
#include "chrome/browser/sync/engine/syncer_command.h"
-#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/util/sync_types.h"
using std::pair;
@@ -25,15 +25,15 @@ class GetCommitIdsCommand : public SyncerCommand {
explicit GetCommitIdsCommand(int commit_batch_size);
virtual ~GetCommitIdsCommand();
- virtual void ExecuteImpl(SyncerSession *session);
+ virtual void ExecuteImpl(SyncerSession* session);
// Returns a vector of IDs that should be committed.
void BuildCommitIds(SyncerSession *session);
// These classes are public for testing.
// TODO(ncarter): This code is more generic than just Commit and can
- // be reused elsewhere (e.g. PositionalRunBuilder, ChangeReorderBuffer
- // do similar things). Merge all these implementations.
+ // be reused elsewhere (e.g. ChangeReorderBuffer do similar things). Merge
+ // all these implementations.
class OrderedCommitSet {
public:
// TODO(chron): Reserve space according to batch size?
@@ -145,8 +145,8 @@ class GetCommitIdsCommand : public SyncerCommand {
return false;
// We should really not WRITE in this iterator, but we can fix that
- // later. ValidateCommitEntry writes to the DB, and we add the
- // blocked items. We should move that somewhere else later.
+ // later. ValidateCommitEntry writes to the DB, and we add the blocked
+ // items. We should move that somewhere else later.
syncable::MutableEntry entry(session_->write_transaction(),
syncable::GET_BY_HANDLE, metahandle);
VerifyCommitResult verify_result =
@@ -154,7 +154,7 @@ class GetCommitIdsCommand : public SyncerCommand {
if (verify_result == VERIFY_BLOCKED) {
session_->AddBlockedItem(entry.Get(syncable::ID)); // TODO(chron): Ew.
} else if (verify_result == VERIFY_UNSYNCABLE) {
- // drop unsyncable entries.
+ // Drop unsyncable entries.
entry.Put(syncable::IS_UNSYNCED, false);
}
return verify_result == VERIFY_OK;
@@ -173,8 +173,8 @@ class GetCommitIdsCommand : public SyncerCommand {
syncable::Id parent_id);
// OrderedCommitSet helpers for adding predecessors in order.
- // TODO(ncarter): Refactor these so that the |result| parameter goes
- // away, and AddItem doesn't need to consider two OrderedCommitSets.
+ // TODO(ncarter): Refactor these so that the |result| parameter goes away,
+ // and AddItem doesn't need to consider two OrderedCommitSets.
bool AddItem(syncable::Entry* item, OrderedCommitSet* result);
bool AddItemThenPredecessors(syncable::BaseTransaction* trans,
syncable::Entry* item,
@@ -186,9 +186,9 @@ class GetCommitIdsCommand : public SyncerCommand {
bool IsCommitBatchFull();
- void AddCreatesAndMoves(SyncerSession *session);
+ void AddCreatesAndMoves(SyncerSession* session);
- void AddDeletes(SyncerSession *session);
+ void AddDeletes(SyncerSession* session);
OrderedCommitSet ordered_commit_set_;
diff --git a/chrome/browser/sync/engine/model_changing_syncer_command.cc b/chrome/browser/sync/engine/model_changing_syncer_command.cc
index 09b0782..f549c89 100644
--- a/chrome/browser/sync/engine/model_changing_syncer_command.cc
+++ b/chrome/browser/sync/engine/model_changing_syncer_command.cc
@@ -10,7 +10,7 @@
namespace browser_sync {
-void ModelChangingSyncerCommand::ExecuteImpl(SyncerSession *session) {
+void ModelChangingSyncerCommand::ExecuteImpl(SyncerSession* session) {
work_session_ = session;
session->model_safe_worker()->DoWorkAndWaitUntilDone(
NewCallback(this, &ModelChangingSyncerCommand::StartChangingModel));
diff --git a/chrome/browser/sync/engine/post_commit_message_command.cc b/chrome/browser/sync/engine/post_commit_message_command.cc
index 3807607..02aac9f 100644
--- a/chrome/browser/sync/engine/post_commit_message_command.cc
+++ b/chrome/browser/sync/engine/post_commit_message_command.cc
@@ -19,9 +19,9 @@ namespace browser_sync {
PostCommitMessageCommand::PostCommitMessageCommand() {}
PostCommitMessageCommand::~PostCommitMessageCommand() {}
-void PostCommitMessageCommand::ExecuteImpl(SyncerSession *session) {
+void PostCommitMessageCommand::ExecuteImpl(SyncerSession* session) {
if (session->commit_ids_empty())
- return; // nothing to commit
+ return; // Nothing to commit.
ClientToServerResponse response;
syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
if (!dir.good())
diff --git a/chrome/browser/sync/engine/post_commit_message_command.h b/chrome/browser/sync/engine/post_commit_message_command.h
index 87aa4d7..85e9f71 100644
--- a/chrome/browser/sync/engine/post_commit_message_command.h
+++ b/chrome/browser/sync/engine/post_commit_message_command.h
@@ -16,7 +16,7 @@ class PostCommitMessageCommand : public SyncerCommand {
PostCommitMessageCommand();
virtual ~PostCommitMessageCommand();
- virtual void ExecuteImpl(SyncerSession *session);
+ virtual void ExecuteImpl(SyncerSession* session);
private:
DISALLOW_COPY_AND_ASSIGN(PostCommitMessageCommand);
diff --git a/chrome/browser/sync/engine/process_commit_response_command.cc b/chrome/browser/sync/engine/process_commit_response_command.cc
index 6a2a177..379a292 100644
--- a/chrome/browser/sync/engine/process_commit_response_command.cc
+++ b/chrome/browser/sync/engine/process_commit_response_command.cc
@@ -8,10 +8,10 @@
#include <vector>
#include "base/basictypes.h"
-#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/engine/syncer_proto_util.h"
#include "chrome/browser/sync/engine/syncer_session.h"
#include "chrome/browser/sync/engine/syncer_status.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/engine/syncproto.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
#include "chrome/browser/sync/syncable/syncable.h"
@@ -56,7 +56,7 @@ ProcessCommitResponseCommand::ProcessCommitResponseCommand() {}
ProcessCommitResponseCommand::~ProcessCommitResponseCommand() {}
void ProcessCommitResponseCommand::ModelChangingExecuteImpl(
- SyncerSession *session) {
+ SyncerSession* session) {
// TODO(sync): This function returns if it sees problems. We probably want
// to flag the need for an update or similar.
ScopedDirLookup dir(session->dirman(), session->account_name());
@@ -104,7 +104,7 @@ void ProcessCommitResponseCommand::ModelChangingExecuteImpl(
set<syncable::Id> conflicting_new_folder_ids;
set<syncable::Id> deleted_folders;
bool truncated_commit_logged = false;
- { // Scope for WriteTransaction
+ { // Scope for WriteTransaction.
WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
for (int i = 0; i < cr.entryresponse_size(); i++) {
CommitResponse::RESPONSE_TYPE response_type =
@@ -226,7 +226,7 @@ ProcessCommitResponseCommand::ProcessSingleCommitResponse(
return CommitResponse::INVALID_MESSAGE;
}
- // implied by the IsValid call above, but here for clarity.
+ // Implied by the IsValid call above, but here for clarity.
DCHECK_EQ(CommitResponse::SUCCESS, response) << response;
// Check to see if we've been given the ID of an existing entry. If so treat
// it as an error response and retry later.
@@ -305,12 +305,12 @@ void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
if (syncing_was_set) {
PerformCommitTimeNameAside(trans, server_entry, local_entry);
} else {
- // IS_UNSYNCED will ensure that this entry gets committed again,
- // even if we skip this name aside. IS_UNSYNCED was probably previously
- // set, but let's just set it anyway.
+ // IS_UNSYNCED will ensure that this entry gets committed again, even if
+ // we skip this name aside. IS_UNSYNCED was probably previously set, but
+ // let's just set it anyway.
local_entry->Put(IS_UNSYNCED, true);
LOG(INFO) << "Skipping commit time name aside because" <<
- " entry was changed during commit.";
+ " entry was changed during commit.";
}
}
diff --git a/chrome/browser/sync/engine/process_commit_response_command.h b/chrome/browser/sync/engine/process_commit_response_command.h
index a025428..4e7c94b 100644
--- a/chrome/browser/sync/engine/process_commit_response_command.h
+++ b/chrome/browser/sync/engine/process_commit_response_command.h
@@ -49,6 +49,7 @@ class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
};
+
} // namespace browser_sync
#endif // CHROME_BROWSER_SYNC_ENGINE_PROCESS_COMMIT_RESPONSE_COMMAND_H_
diff --git a/chrome/browser/sync/engine/process_updates_command.cc b/chrome/browser/sync/engine/process_updates_command.cc
index 6d5973c..31c204b 100644
--- a/chrome/browser/sync/engine/process_updates_command.cc
+++ b/chrome/browser/sync/engine/process_updates_command.cc
@@ -8,9 +8,9 @@
#include "base/basictypes.h"
#include "chrome/browser/sync/engine/syncer.h"
-#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/engine/syncer_proto_util.h"
#include "chrome/browser/sync/engine/syncer_session.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/engine/syncproto.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
#include "chrome/browser/sync/syncable/syncable.h"
@@ -57,14 +57,13 @@ void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncerSession* session) {
}
if (0 == status.servers_latest_timestamp()) {
- // Hack since new server never gives us the server's latest
- // timestamp. But if a getupdates returns zero, then we know we
- // are up to date.
+ // Hack since new server never gives us the server's latest timestamp. But
+ // if a getupdates returns zero, then we know we are up to date.
status.set_servers_latest_timestamp(status.current_sync_timestamp());
}
- // If we have updates that are ALL supposed to be skipped, we don't want
- // to get them again. In fact, the account's final updates are all
- // supposed to be skipped and we DON'T step past them, we will sync forever
+ // If we have updates that are ALL supposed to be skipped, we don't want to
+ // get them again. In fact, the account's final updates are all supposed to
+ // be skipped and we DON'T step past them, we will sync forever.
int64 latest_skip_timestamp = 0;
bool any_non_skip_results = false;
vector<VerifiedUpdate>::iterator it;
@@ -75,7 +74,7 @@ void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncerSession* session) {
any_non_skip_results = (it->first != VERIFY_SKIP);
if (!any_non_skip_results) {
- // ALL updates were to be skipped, including this one
+ // ALL updates were to be skipped, including this one.
if (update.sync_timestamp() > latest_skip_timestamp) {
latest_skip_timestamp = update.sync_timestamp();
}
@@ -88,8 +87,8 @@ void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncerSession* session) {
switch (ProcessUpdate(dir, update)) {
case SUCCESS_PROCESSED:
case SUCCESS_STORED:
- // We can update the timestamp because we store the update
- // even if we can't apply it now.
+ // We can update the timestamp because we store the update even if we
+ // can't apply it now.
if (update.sync_timestamp() > new_timestamp)
new_timestamp = update.sync_timestamp();
break;
@@ -113,7 +112,7 @@ void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncerSession* session) {
}
namespace {
-// returns true if the entry is still ok to process
+// Returns true if the entry is still ok to process.
bool ReverifyEntry(syncable::WriteTransaction* trans, const SyncEntity& entry,
syncable::MutableEntry* same_id) {
@@ -121,15 +120,14 @@ bool ReverifyEntry(syncable::WriteTransaction* trans, const SyncEntity& entry,
const bool is_directory = entry.IsFolder();
const bool is_bookmark = entry.has_bookmarkdata();
- return VERIFY_SUCCESS ==
- SyncerUtil::VerifyUpdateConsistency(trans,
- entry,
- same_id,
- deleted,
- is_directory,
- is_bookmark);
+ return VERIFY_SUCCESS == SyncerUtil::VerifyUpdateConsistency(trans,
+ entry,
+ same_id,
+ deleted,
+ is_directory,
+ is_bookmark);
}
-} // anonymous namespace
+} // namespace
// TODO(sync): Refactor this code.
// Process a single update. Will avoid touching global state.
@@ -148,8 +146,8 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
// We take a two step approach. First we store the entries data in the
// server fields of a local entry and then move the data to the local fields
MutableEntry update_entry(&trans, GET_BY_ID, id);
- // TODO(sync): do we need to run ALL these checks, or is a mere version
- // check good enough?
+ // TODO(sync): do we need to run ALL these checks, or is a mere version check
+ // good enough?
if (!ReverifyEntry(&trans, entry, &update_entry)) {
return SUCCESS_PROCESSED; // the entry has become irrelevant
}
diff --git a/chrome/browser/sync/engine/resolve_conflicts_command.h b/chrome/browser/sync/engine/resolve_conflicts_command.h
index a75c631..776326b 100644
--- a/chrome/browser/sync/engine/resolve_conflicts_command.h
+++ b/chrome/browser/sync/engine/resolve_conflicts_command.h
@@ -5,15 +5,15 @@
#ifndef CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
#define CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
-
-#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
#include "base/basictypes.h"
+#include "chrome/browser/sync/engine/model_changing_syncer_command.h"
namespace syncable {
class WriteTransaction;
class MutableEntry;
class Id;
}
+
namespace browser_sync {
class SyncerSession;
@@ -28,7 +28,7 @@ class ResolveConflictsCommand : public ModelChangingSyncerCommand {
private:
DISALLOW_COPY_AND_ASSIGN(ResolveConflictsCommand);
};
-} // namespace browser_sync
+} // namespace browser_sync
#endif // CHROME_BROWSER_SYNC_ENGINE_RESOLVE_CONFLICTS_COMMAND_H_
diff --git a/chrome/browser/sync/engine/sync_cycle_state.h b/chrome/browser/sync/engine/sync_cycle_state.h
index 7d38670c..375ca1f 100644
--- a/chrome/browser/sync/engine/sync_cycle_state.h
+++ b/chrome/browser/sync/engine/sync_cycle_state.h
@@ -1,7 +1,6 @@
// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
//
// The sync process consists of a sequence of sync cycles, each of which
// (hopefully) moves the client into closer synchronization with the server.
@@ -31,8 +30,8 @@ namespace browser_sync {
typedef std::pair<VerifyResult, sync_pb::SyncEntity> VerifiedUpdate;
typedef std::pair<UpdateAttemptResponse, syncable::Id> AppliedUpdate;
-// This is the type declaration for the eventsys channel that the syncer
-// uses to send events to other system components.
+// This is the type declaration for the eventsys channel that the syncer uses
+// to send events to other system components.
struct SyncerEvent;
// SyncCycleState holds the entire state of a single sync cycle;
@@ -101,15 +100,15 @@ class SyncCycleState {
return verified_updates_.end();
}
- // Returns the number of update application attempts. This includes
- // both failures and successes.
+ // Returns the number of update application attempts. This includes both
+ // failures and successes.
int AppliedUpdatesSize() const {
return applied_updates_.size();
}
- // Count the number of successful update applications that have happend
- // this cycle. Note that if an item is successfully applied twice,
- // it will be double counted here.
+ // Count the number of successful update applications that have happend this
+ // cycle. Note that if an item is successfully applied twice, it will be
+ // double counted here.
int SuccessfullyAppliedUpdateCount() const {
int count = 0;
for (std::vector<AppliedUpdate>::const_iterator it =
@@ -157,7 +156,7 @@ class SyncCycleState {
bool has_open_write_transaction() { return write_transaction_ != NULL; }
- // sets the write transaction to null, but doesn't free the memory.
+ // Sets the write transaction to null, but doesn't free the memory.
void ClearWriteTransaction() { write_transaction_ = NULL; }
ClientToServerMessage* commit_message() { return &commit_message_; }
@@ -197,11 +196,10 @@ class SyncCycleState {
bool items_committed() const { return items_committed_; }
-
- // Returns true if this object has been modified since last SetClean() call
+ // Returns true if this object has been modified since last SetClean() call.
bool IsDirty() const { return dirty_; }
- // Call to tell this status object that its new state has been seen
+ // Call to tell this status object that its new state has been seen.
void SetClean() { dirty_ = false; }
// Indicate that we've made a change to directory timestamp.
@@ -213,11 +211,10 @@ class SyncCycleState {
return timestamp_dirty_;
}
-
private:
void UpdateDirty(bool new_info) { dirty_ |= new_info; }
- // download updates supplies:
+ // Download updates supplies:
ClientToServerResponse update_response_;
ClientToServerResponse commit_response_;
ClientToServerMessage commit_message_;
@@ -238,7 +235,7 @@ class SyncCycleState {
bool dirty_;
- // some container for updates that failed verification
+ // Some container for updates that failed verification.
std::vector<VerifiedUpdate> verified_updates_;
// Stores the result of the various ApplyUpdate attempts we've made.
diff --git a/chrome/browser/sync/engine/sync_process_state.cc b/chrome/browser/sync/engine/sync_process_state.cc
index 6f76eee..369a109 100644
--- a/chrome/browser/sync/engine/sync_process_state.cc
+++ b/chrome/browser/sync/engine/sync_process_state.cc
@@ -130,7 +130,7 @@ SyncProcessState& SyncProcessState::operator=(const SyncProcessState& counts) {
return *this;
}
-// status maintenance functions
+// Status maintenance functions.
void SyncProcessState::set_invalid_store(const bool val) {
UpdateDirty(val != invalid_store_);
invalid_store_ = val;
@@ -181,7 +181,7 @@ void SyncProcessState::set_conflicting_commits(const int val) {
stalled_commits_ = val;
}
-// WEIRD COUNTER functions
+// WEIRD COUNTER functions.
void SyncProcessState::increment_consecutive_problem_get_updates() {
UpdateDirty(true);
++consecutive_problem_get_updates_;
@@ -233,7 +233,7 @@ void SyncProcessState::zero_successful_commits() {
successful_commits_ = 0;
}
-// Methods for managing error rate tracking
+// Methods for managing error rate tracking.
void SyncProcessState::TallyNewError() {
UpdateDirty(true);
error_rate_ += (65536 - error_rate_) >> 2;
@@ -260,7 +260,7 @@ void SyncProcessState::MergeSets(const syncable::Id& id1,
vector<syncable::Id>* set2 = id_to_conflict_set_[id2];
vector<syncable::Id>* rv = 0;
if (0 == set1 && 0 == set2) {
- // neither item currently has a set so we build one.
+ // Neither item currently has a set so we build one.
rv = new vector<syncable::Id>();
rv->push_back(id1);
if (id1 != id2) {
@@ -270,25 +270,25 @@ void SyncProcessState::MergeSets(const syncable::Id& id1,
}
conflict_sets_.insert(rv);
} else if (0 == set1) {
- // add the item to the existing set.
+ // Add the item to the existing set.
rv = set2;
rv->push_back(id1);
} else if (0 == set2) {
- // add the item to the existing set.
+ // Add the item to the existing set.
rv = set1;
rv->push_back(id2);
} else if (set1 == set2) {
- // It's the same set already
+ // It's the same set already.
return;
} else {
- // merge the two sets.
+ // Merge the two sets.
rv = set1;
- // point all the second sets id's back to the first.
+ // Point all the second sets id's back to the first.
vector<syncable::Id>::iterator i;
for (i = set2->begin() ; i != set2->end() ; ++i) {
id_to_conflict_set_[*i] = rv;
}
- // copy the second set to the first.
+ // Copy the second set to the first.
rv->insert(rv->end(), set2->begin(), set2->end());
conflict_sets_.erase(set2);
delete set2;
@@ -311,13 +311,13 @@ SyncProcessState::~SyncProcessState() {
}
void SyncProcessState::AuthFailed() {
- // dirty if the last one DIDN'T fail.
+ // Dirty if the last one DIDN'T fail.
UpdateAuthDirty(true != auth_failed_);
auth_failed_ = true;
}
void SyncProcessState::AuthSucceeded() {
- // dirty if the last one DID fail.
+ // Dirty if the last one DID fail.
UpdateAuthDirty(false != auth_failed_);
auth_failed_ = false;
}
diff --git a/chrome/browser/sync/engine/sync_process_state.h b/chrome/browser/sync/engine/sync_process_state.h
index 32c6808..eeb0cef 100644
--- a/chrome/browser/sync/engine/sync_process_state.h
+++ b/chrome/browser/sync/engine/sync_process_state.h
@@ -1,7 +1,6 @@
// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
//
// The sync process consists of a sequence of sync cycles, each of which
// (hopefully) moves the client into closer synchronization with the server.
@@ -43,7 +42,7 @@ class SyncProcessState {
SyncerEventChannel* syncer_event_channel,
ModelSafeWorker* model_safe_worker);
- // intentionally not 'explicit' b/c it's a copy ctor:
+ // Intentionally not 'explicit' b/c it's a copy ctor:
SyncProcessState(const SyncProcessState& counts);
SyncProcessState& operator=(const SyncProcessState& that);
@@ -51,8 +50,7 @@ class SyncProcessState {
syncable::DirectoryManager* dirman() const { return dirman_; }
- ServerConnectionManager* connection_manager()
- const {
+ ServerConnectionManager* connection_manager() const {
return connection_manager_;
}
@@ -64,7 +62,7 @@ class SyncProcessState {
return syncer_event_channel_;
}
- // Functions that deal with conflict set stuff
+ // Functions that deal with conflict set stuff.
IdToConflictSetMap::const_iterator IdToConflictSetFind(
const syncable::Id& the_id) const {
return id_to_conflict_set_.find(the_id);
@@ -103,7 +101,7 @@ class SyncProcessState {
void CleanupSets();
// END conflict set functions
- // item id set manipulation functions
+ // Item id set manipulation functions.
bool HasConflictingItems() const {
return !conflicting_item_ids_.empty();
}
@@ -179,15 +177,16 @@ class SyncProcessState {
}
// END item id set manipulation functions
- // Assorted other state info
+ // Assorted other state info.
int conflicting_updates() const { return conflicting_item_ids_.size(); }
+ // TODO(sync): make these two members private and add getters/setters.
int num_sync_cycles_;
// When we're over bandwidth quota, we don't update until past this time.
time_t silenced_until_;
- // Info that is tracked purely for status reporting
+ // Info that is tracked purely for status reporting.
// During inital sync these two members can be used to measure sync progress.
int64 current_sync_timestamp() const { return current_sync_timestamp_; }
@@ -224,7 +223,7 @@ class SyncProcessState {
void set_stalled_commits(const int val);
- // WEIRD COUNTER manipulation functions
+ // WEIRD COUNTER manipulation functions.
int consecutive_problem_get_updates() const {
return consecutive_problem_get_updates_;
}
@@ -260,9 +259,9 @@ class SyncProcessState {
void increment_successful_commits();
void zero_successful_commits();
- // end WEIRD COUNTER manipulation functions
+ // end WEIRD COUNTER manipulation functions.
- // Methods for managing error rate tracking
+ // Methods for managing error rate tracking.
void TallyNewError();
void TallyBigNewError();
@@ -271,24 +270,24 @@ class SyncProcessState {
void CheckErrorRateTooHigh();
- // Methods for tracking authentication state
+ // Methods for tracking authentication state.
void AuthFailed();
void AuthSucceeded();
- // Returns true if this object has been modified since last SetClean() call
+ // Returns true if this object has been modified since last SetClean() call.
bool IsDirty() const { return dirty_; }
- // Call to tell this status object that its new state has been seen
+ // Call to tell this status object that its new state has been seen.
void SetClean() { dirty_ = false; }
- // Returns true if auth status has been modified since last SetClean() call
+ // Returns true if auth status has been modified since last SetClean() call.
bool IsAuthDirty() const { return auth_dirty_; }
- // Call to tell this status object that its auth state has been seen
+ // Call to tell this status object that its auth state has been seen.
void SetAuthClean() { auth_dirty_ = false; }
private:
- // for testing
+ // For testing.
SyncProcessState()
: account_name_(PSTR("")),
dirman_(NULL),
@@ -316,7 +315,7 @@ class SyncProcessState {
syncing_(false),
invalid_store_(false) {}
- ServerConnectionManager *connection_manager_;
+ ServerConnectionManager* connection_manager_;
const PathString account_name_;
syncable::DirectoryManager* const dirman_;
ConflictResolver* const resolver_;
@@ -336,7 +335,7 @@ class SyncProcessState {
// status reporting purposes.
static const int ERROR_THRESHOLD = 500;
int error_rate_; // A EMA in the range [0,65536)
- int64 current_sync_timestamp_; // During inital sync these two members
+ int64 current_sync_timestamp_; // During inital sync these two members
int64 servers_latest_timestamp_; // can be used to measure sync progress.
// There remains sync state updating in:
@@ -357,15 +356,14 @@ class SyncProcessState {
// consecutive_problem_get_updates_ resets when we get any updates (not on
// pings) and increments whenever the request fails.
int consecutive_problem_get_updates_;
- // consecutive_problem_commits_ resets whenever we commit any number of
- // items and increments whenever all commits fail for any reason.
+ // consecutive_problem_commits_ resets whenever we commit any number of items
+ // and increments whenever all commits fail for any reason.
int consecutive_problem_commits_;
// number of commits hitting transient errors since the last successful
// commit.
int consecutive_transient_error_commits_;
- // Incremented when get_updates fails, commit fails, and when
- // hitting transient errors. When any of these succeed, this counter
- // is reset.
+ // Incremented when get_updates fails, commit fails, and when hitting
+ // transient errors. When any of these succeed, this counter is reset.
// TODO(chron): Reduce number of weird counters we use.
int consecutive_errors_;
int successful_commits_;
diff --git a/chrome/browser/sync/engine/syncapi.cc b/chrome/browser/sync/engine/syncapi.cc
index c30a5ea..db78281 100644
--- a/chrome/browser/sync/engine/syncapi.cc
+++ b/chrome/browser/sync/engine/syncapi.cc
@@ -68,12 +68,12 @@ static const int kSSLPort = 443;
// We shouldn't call InitLogFiles more than once since that will cause a crash.
// So we use a global state variable to avoid that. This doesn't work in case
// of multiple threads, and if some other part also tries to call InitLogFiles
-// apart from this file. But this is okay for now since this is the only
-// place we call InitLogFiles.
+// apart from this file. But this is okay for now since this is the only place
+// we call InitLogFiles.
namespace {
static bool g_log_files_initialized = false;
-static base::AtExitManager g_at_exit_manager; // Necessary for NewCallback
-} // empty namespace
+static base::AtExitManager g_at_exit_manager; // Necessary for NewCallback.
+} // namespace
struct ThreadParams {
browser_sync::ServerConnectionManager* conn_mgr;
@@ -82,9 +82,9 @@ struct ThreadParams {
#endif
};
-// This thread calls CheckServerReachable() whenever a change occurs
-// in the table that maps IP addresses to interfaces, for example when
-// the user unplugs his network cable.
+// This thread calls CheckServerReachable() whenever a change occurs in the
+// table that maps IP addresses to interfaces, for example when the user
+// unplugs his network cable.
void* AddressWatchThread(void* arg) {
NameCurrentThreadForDebugging("SyncEngine_AddressWatcher");
LOG(INFO) << "starting the address watch thread";
@@ -369,8 +369,8 @@ WriteNode::~WriteNode() {
delete entry_;
}
-// Find an existing node matching the ID |id|, and bind this WriteNode
-// to it. Return true on success.
+// Find an existing node matching the ID |id|, and bind this WriteNode to it.
+// Return true on success.
bool WriteNode::InitByIdLookup(int64 id) {
DCHECK(!entry_) << "Init called twice";
DCHECK_NE(id, kInvalidId);
@@ -790,32 +790,31 @@ class SyncManager::SyncInternal {
}
private:
// Try to authenticate using persisted credentials from a previous successful
- // authentication. If no such credentials exist, calls OnAuthError on
- // the client to collect credentials. Otherwise, there exist local
- // credentials that were once used for a successful auth, so we'll try to
- // re-use these.
- // Failure of that attempt will be communicated as normal using
- // OnAuthError. Since this entry point will bypass normal GAIA
- // authentication and try to authenticate directly with the sync service
- // using a cached token, authentication failure will generally occur due to
- // expired credentials, or possibly because of a password change.
+ // authentication. If no such credentials exist, calls OnAuthError on the
+ // client to collect credentials. Otherwise, there exist local credentials
+ // that were once used for a successful auth, so we'll try to re-use these.
+ // Failure of that attempt will be communicated as normal using OnAuthError.
+ // Since this entry point will bypass normal GAIA authentication and try to
+ // authenticate directly with the sync service using a cached token,
+ // authentication failure will generally occur due to expired credentials, or
+ // possibly because of a password change.
void AuthenticateForLastKnownUser();
- // Helper to call OnAuthError when no authentication credentials
- // are available.
+ // Helper to call OnAuthError when no authentication credentials are
+ // available.
void RaiseAuthNeededEvent();
- // Helper to set initialized_ to true and raise an event to clients to
- // notify that initialization is complete and it is safe to send us changes.
- // If already initialized, this is a no-op.
+ // Helper to set initialized_ to true and raise an event to clients to notify
+ // that initialization is complete and it is safe to send us changes. If
+ // already initialized, this is a no-op.
void MarkAndNotifyInitializationComplete();
// Determine if the parents or predecessors differ between the old and new
- // versions of an entry stored in |a| and |b|. Note that a node's index
- // may change without its NEXT_ID changing if the node at NEXT_ID also
- // moved (but the relative order is unchanged). To handle such cases,
- // we rely on the caller to treat a position update on any sibling as
- // updating the positions of all siblings.
+ // versions of an entry stored in |a| and |b|. Note that a node's index may
+ // change without its NEXT_ID changing if the node at NEXT_ID also moved (but
+ // the relative order is unchanged). To handle such cases, we rely on the
+ // caller to treat a position update on any sibling as updating the positions
+ // of all siblings.
static bool BookmarkPositionsDiffer(const syncable::EntryKernel& a,
const syncable::Entry& b) {
if (a.ref(syncable::NEXT_ID) != b.Get(syncable::NEXT_ID))
@@ -826,8 +825,8 @@ class SyncManager::SyncInternal {
}
// Determine if any of the fields made visible to clients of the Sync API
- // differ between the versions of an entry stored in |a| and |b|.
- // A return value of false means that it should be OK to ignore this change.
+ // differ between the versions of an entry stored in |a| and |b|. A return
+ // value of false means that it should be OK to ignore this change.
static bool BookmarkPropertiesDiffer(const syncable::EntryKernel& a,
const syncable::Entry& b) {
if (a.ref(syncable::NAME) != b.Get(syncable::NAME))
@@ -867,8 +866,8 @@ class SyncManager::SyncInternal {
// A sink for client commands from the syncer needed to create a SyncerThread.
ClientCommandChannel command_channel_;
- // The ServerConnectionManager used to abstract communication between
- // the client (the Syncer) and the sync server.
+ // The ServerConnectionManager used to abstract communication between the
+ // client (the Syncer) and the sync server.
scoped_ptr<SyncAPIServerConnectionManager> connection_manager_;
// The thread that runs the Syncer. Needs to be explicitly Start()ed.
@@ -1369,9 +1368,9 @@ SyncManager::Status SyncManager::SyncInternal::ComputeAggregatedStatus() {
void SyncManager::SyncInternal::HandleSyncerEvent(const SyncerEvent& event) {
if (!initialized()) {
- // We get here if A) We have successfully authenticated at least once (
- // because we attach HandleSyncerEvent only once we receive notification of
- // successful authentication [locally or otherwise]), but B) the initial
+ // We get here if A) We have successfully authenticated at least once
+ // (because we attach HandleSyncerEvent only once we receive notification
+ // of successful authentication [locally or otherwise]), but B) the initial
// sync had not completed at that time.
if (SyncerStatus(event.last_session).IsShareUsable())
MarkAndNotifyInitializationComplete();
diff --git a/chrome/browser/sync/engine/syncer.cc b/chrome/browser/sync/engine/syncer.cc
index 0b02e2e..a297dca 100644
--- a/chrome/browser/sync/engine/syncer.cc
+++ b/chrome/browser/sync/engine/syncer.cc
@@ -49,7 +49,7 @@ namespace browser_sync {
Syncer::Syncer(
syncable::DirectoryManager* dirman,
- const PathString &account_name,
+ const PathString& account_name,
ServerConnectionManager* connection_manager,
ModelSafeWorker* model_safe_worker)
: account_name_(account_name),
@@ -88,7 +88,7 @@ bool Syncer::SyncShare() {
return SyncShare(&state);
}
-bool Syncer::SyncShare(SyncProcessState *process_state) {
+bool Syncer::SyncShare(SyncProcessState* process_state) {
SyncCycleState cycle_state;
SyncerSession session(&cycle_state, process_state);
session.set_source(TestAndSetUpdatesSource());
@@ -107,11 +107,11 @@ bool Syncer::SyncShare(SyncerStep first_step, SyncerStep last_step) {
return session.ShouldSyncAgain();
}
-void Syncer::SyncShare(SyncerSession *session) {
+void Syncer::SyncShare(SyncerSession* session) {
SyncShare(session, SYNCER_BEGIN, SYNCER_END);
}
-void Syncer::SyncShare(SyncerSession *session,
+void Syncer::SyncShare(SyncerSession* session,
const SyncerStep first_step,
const SyncerStep last_step) {
SyncerStep current_step = first_step;
@@ -275,7 +275,7 @@ void Syncer::SyncShare(SyncerSession *session,
return;
}
-void Syncer::ProcessClientCommand(SyncerSession *session) {
+void Syncer::ProcessClientCommand(SyncerSession* session) {
if (!session->update_response().has_client_command())
return;
const ClientCommand command = session->update_response().client_command();
diff --git a/chrome/browser/sync/engine/syncer.h b/chrome/browser/sync/engine/syncer.h
index f546f20..8c267c1 100644
--- a/chrome/browser/sync/engine/syncer.h
+++ b/chrome/browser/sync/engine/syncer.h
@@ -74,12 +74,9 @@ class Syncer {
// The constructor may be called from a thread that is not the Syncer's
// dedicated thread, to allow some flexibility in the setup.
- Syncer(
- syncable::DirectoryManager* dirman,
- const PathString &account_name,
+ Syncer(syncable::DirectoryManager* dirman, const PathString& account_name,
ServerConnectionManager* connection_manager,
ModelSafeWorker* model_safe_worker);
-
~Syncer();
// Called by other threads to tell the syncer to stop what it's doing
@@ -95,7 +92,7 @@ class Syncer {
// When |first_step| and |last_step| are provided, this means to perform
// a partial sync cycle, stopping after |last_step| is performed.
bool SyncShare();
- bool SyncShare(SyncProcessState *sync_process_state);
+ bool SyncShare(SyncProcessState* sync_process_state);
bool SyncShare(SyncerStep first_step, SyncerStep last_step);
// Limit the batch size of commit operations to a specified number of items.
@@ -200,10 +197,10 @@ class Syncer {
// Inline utility functions.
-// Given iterator ranges from two collections sorted according to a
-// common strict weak ordering, return true if the two ranges contain
-// any common items, and false if they do not.
-// This function is in this header so that it can be tested.
+// Given iterator ranges from two collections sorted according to a common
+// strict weak ordering, return true if the two ranges contain any common
+// items, and false if they do not. This function is in this header so that it
+// can be tested.
template <class Iterator1, class Iterator2>
bool SortedCollectionsIntersect(Iterator1 begin1, Iterator1 end1,
Iterator2 begin2, Iterator2 end2) {
diff --git a/chrome/browser/sync/engine/syncer_command.cc b/chrome/browser/sync/engine/syncer_command.cc
index 9f05b64..61df463 100644
--- a/chrome/browser/sync/engine/syncer_command.cc
+++ b/chrome/browser/sync/engine/syncer_command.cc
@@ -16,12 +16,12 @@ namespace browser_sync {
SyncerCommand::SyncerCommand() {}
SyncerCommand::~SyncerCommand() {}
-void SyncerCommand::Execute(SyncerSession *session) {
+void SyncerCommand::Execute(SyncerSession* session) {
ExecuteImpl(session);
SendNotifications(session);
}
-void SyncerCommand::SendNotifications(SyncerSession *session) {
+void SyncerCommand::SendNotifications(SyncerSession* session) {
syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
if (!dir.good()) {
LOG(ERROR) << "Scoped dir lookup failed!";
diff --git a/chrome/browser/sync/engine/syncer_command.h b/chrome/browser/sync/engine/syncer_command.h
index 3fcff7d..718c4e3 100644
--- a/chrome/browser/sync/engine/syncer_command.h
+++ b/chrome/browser/sync/engine/syncer_command.h
@@ -12,17 +12,16 @@ namespace browser_sync {
class SyncerSession;
// Implementation of a simple command pattern intended to be driven by the
-// Syncer. SyncerCommand is abstract and all subclasses must
-// implement ExecuteImpl(). This is done so that chunks of syncer operation
-// can be unit tested.
+// Syncer. SyncerCommand is abstract and all subclasses must implement
+// ExecuteImpl(). This is done so that chunks of syncer operation can be unit
+// tested.
//
// Example Usage:
//
-// SyncerSession session = ...;
-// SyncerCommand *cmd = SomeCommandFactory.createCommand(...);
-// cmd->Execute(session);
-// delete cmd;
-//
+// SyncerSession session = ...;
+// SyncerCommand *cmd = SomeCommandFactory.createCommand(...);
+// cmd->Execute(session);
+// delete cmd;
class SyncerCommand {
public:
@@ -30,12 +29,12 @@ class SyncerCommand {
virtual ~SyncerCommand();
// Execute dispatches to a derived class's ExecuteImpl.
- void Execute(SyncerSession *session);
+ void Execute(SyncerSession* session);
// ExecuteImpl is where derived classes actually do work.
- virtual void ExecuteImpl(SyncerSession *session) = 0;
+ virtual void ExecuteImpl(SyncerSession* session) = 0;
private:
- void SendNotifications(SyncerSession *session);
+ void SendNotifications(SyncerSession* session);
DISALLOW_COPY_AND_ASSIGN(SyncerCommand);
};
diff --git a/chrome/browser/sync/engine/syncer_end_command.h b/chrome/browser/sync/engine/syncer_end_command.h
index 904bac4..253907a 100644
--- a/chrome/browser/sync/engine/syncer_end_command.h
+++ b/chrome/browser/sync/engine/syncer_end_command.h
@@ -14,9 +14,9 @@ class SyncerSession;
// A syncer command for wrapping up a sync cycle.
//
-// Preconditions - syncing is complete
+// Preconditions - syncing is complete.
//
-// Postconditions - The UI has been told that we're done syncing
+// Postconditions - The UI has been told that we're done syncing.
class SyncerEndCommand : public SyncerCommand {
public:
@@ -27,6 +27,7 @@ class SyncerEndCommand : public SyncerCommand {
private:
DISALLOW_COPY_AND_ASSIGN(SyncerEndCommand);
};
+
} // namespace browser_sync
#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_END_COMMAND_H_
diff --git a/chrome/browser/sync/engine/syncer_proto_util.cc b/chrome/browser/sync/engine/syncer_proto_util.cc
index 38ee50d..33aa14d 100644
--- a/chrome/browser/sync/engine/syncer_proto_util.cc
+++ b/chrome/browser/sync/engine/syncer_proto_util.cc
@@ -93,11 +93,8 @@ void LogResponseProfilingData(const ClientToServerResponse& response) {
} // namespace
// static
-bool SyncerProtoUtil::PostClientToServerMessage(
- ClientToServerMessage* msg,
- ClientToServerResponse* response,
- SyncerSession *session) {
-
+bool SyncerProtoUtil::PostClientToServerMessage(ClientToServerMessage* msg,
+ ClientToServerResponse* response, SyncerSession* session) {
bool rv = false;
string tx, rx;
CHECK(response);
@@ -126,8 +123,8 @@ bool SyncerProtoUtil::PostClientToServerMessage(
SyncerStatus status(session);
if (rv) {
if (!VerifyResponseBirthday(dir, response)) {
- // TODO(ncarter): Add a unit test for the case where the syncer
- // becomes stuck due to a bad birthday.
+ // TODO(ncarter): Add a unit test for the case where the syncer becomes
+ // stuck due to a bad birthday.
status.set_syncer_stuck(true);
return false;
}
@@ -191,8 +188,8 @@ bool SyncerProtoUtil::Compare(const syncable::Entry& local_entry,
return false;
}
- // These checks are somewhat prolix, but they're easier to debug than
- // a big boolean statement.
+ // These checks are somewhat prolix, but they're easier to debug than a big
+ // boolean statement.
SyncName client_name = local_entry.GetName();
if (client_name != name) {
LOG(WARNING) << "Client name mismatch";
@@ -244,7 +241,7 @@ void SyncerProtoUtil::CopyBlobIntoProtoBytes(const syncable::Blob& blob,
// static
syncable::SyncName SyncerProtoUtil::NameFromSyncEntity(
- const SyncEntity &entry) {
+ const SyncEntity& entry) {
SyncName result(PSTR(""));
AppendUTF8ToPathString(entry.name(), &result.value());
@@ -272,5 +269,4 @@ syncable::SyncName SyncerProtoUtil::NameFromCommitEntryResponse(
return result;
}
-
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_proto_util.h b/chrome/browser/sync/engine/syncer_proto_util.h
index ecee903..e3583cf 100644
--- a/chrome/browser/sync/engine/syncer_proto_util.h
+++ b/chrome/browser/sync/engine/syncer_proto_util.h
@@ -8,8 +8,8 @@
#include <string>
#include "chrome/browser/sync/engine/syncer_session.h"
-#include "chrome/browser/sync/util/sync_types.h"
#include "chrome/browser/sync/syncable/blob.h"
+#include "chrome/browser/sync/util/sync_types.h"
namespace syncable {
class Entry;
@@ -35,20 +35,20 @@ class SyncerProtoUtil {
// session->status()->syncer_stuck_ is set true if the birthday is
// incorrect. A false value will always be returned if birthday is bad.
static bool PostClientToServerMessage(ClientToServerMessage* msg,
- sync_pb::ClientToServerResponse* response, SyncerSession *session);
+ sync_pb::ClientToServerResponse* response, SyncerSession* session);
- // Compares a syncable Entry to SyncEntity, returns true iff
- // the data is identical.
+ // Compares a syncable Entry to SyncEntity, returns true iff the data is
+ // identical.
//
- // TODO(sync): The places where this function is used are arguable big
- // causes of the fragility, because there's a tendency to freak out
- // the moment the local and server values diverge. However, this almost
- // always indicates a sync bug somewhere earlier in the sync cycle.
+ // TODO(sync): The places where this function is used are arguable big causes
+ // of the fragility, because there's a tendency to freak out the moment the
+ // local and server values diverge. However, this almost always indicates a
+ // sync bug somewhere earlier in the sync cycle.
static bool Compare(const syncable::Entry& local_entry,
const SyncEntity& server_entry);
- // Utility methods for converting between syncable::Blobs and protobuf
- // byte fields.
+ // Utility methods for converting between syncable::Blobs and protobuf byte
+ // fields.
static void CopyProtoBytesIntoBlob(const std::string& proto_bytes,
syncable::Blob* blob);
static bool ProtoBytesEqualsBlob(const std::string& proto_bytes,
@@ -57,8 +57,7 @@ class SyncerProtoUtil {
std::string* proto_bytes);
// Extract the name fields from a sync entity.
- static syncable::SyncName NameFromSyncEntity(
- const SyncEntity& entry);
+ static syncable::SyncName NameFromSyncEntity(const SyncEntity& entry);
// Extract the name fields from a commit entry response.
static syncable::SyncName NameFromCommitEntryResponse(
@@ -68,6 +67,7 @@ class SyncerProtoUtil {
SyncerProtoUtil() {}
DISALLOW_COPY_AND_ASSIGN(SyncerProtoUtil);
};
+
} // namespace browser_sync
#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_PROTO_UTIL_H_
diff --git a/chrome/browser/sync/engine/syncer_proto_util_unittest.cc b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
index c11a4ca..1682b05 100644
--- a/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
@@ -69,8 +69,8 @@ TEST(SyncerProtoUtil, TestBlobToProtocolBufferBytesUtilityFunctions) {
test_blob2));
}
-// Tests NameFromSyncEntity and NameFromCommitEntryResponse when only the
-// name field is provided.
+// Tests NameFromSyncEntity and NameFromCommitEntryResponse when only the name
+// field is provided.
TEST(SyncerProtoUtil, NameExtractionOneName) {
SyncEntity one_name_entity;
CommitResponse_EntryResponse one_name_response;
@@ -91,8 +91,8 @@ TEST(SyncerProtoUtil, NameExtractionOneName) {
EXPECT_TRUE(name_a == name_b);
}
-// Tests NameFromSyncEntity and NameFromCommitEntryResponse when both the
-// name field and the non_unique_name fields are provided.
+// Tests NameFromSyncEntity and NameFromCommitEntryResponse when both the name
+// field and the non_unique_name fields are provided.
TEST(SyncerProtoUtil, NameExtractionTwoNames) {
SyncEntity two_name_entity;
CommitResponse_EntryResponse two_name_response;
diff --git a/chrome/browser/sync/engine/syncer_session.h b/chrome/browser/sync/engine/syncer_session.h
index e90930f..4e1ff26 100644
--- a/chrome/browser/sync/engine/syncer_session.h
+++ b/chrome/browser/sync/engine/syncer_session.h
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// SyncerSession holds the entire state of a single sync cycle;
-// GetUpdates, Commit, and Conflict Resolution. After said cycle, the
-// Session may contain items that were unable to be processed because of
-// errors.
+// SyncerSession holds the entire state of a single sync cycle; GetUpdates,
+// Commit, and Conflict Resolution. After said cycle, the Session may contain
+// items that were unable to be processed because of errors.
//
// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
@@ -38,8 +37,8 @@ class SyncerSession {
friend class ConflictResolutionView;
friend class SyncerStatus;
public:
- // A utility to set the session's write transaction member,
- // and later clear it when it the utility falls out of scope.
+ // A utility to set the session's write transaction member, and later clear
+ // it when it the utility falls out of scope.
class ScopedSetWriteTransaction {
public:
ScopedSetWriteTransaction(SyncerSession* session,
@@ -190,8 +189,8 @@ class SyncerSession {
sync_process_state_->EraseBlockedItem(the_id);
}
- // Returns true if at least one update application failed due to
- // a conflict during this sync cycle.
+ // Returns true if at least one update application failed due to a conflict
+ // during this sync cycle.
bool HasConflictingUpdates() const {
std::vector<AppliedUpdate>::const_iterator it;
for (it = sync_cycle_state_->AppliedUpdatesBegin();
@@ -321,8 +320,8 @@ class SyncerSession {
}
// TODO(chron): Unit test for this method.
- // returns true iff this session contains data that should go through
- // the sync engine again.
+ // returns true iff this session contains data that should go through the
+ // sync engine again.
bool ShouldSyncAgain() const {
return (HasRemainingItemsToCommit() &&
sync_process_state_->successful_commits() > 0) ||
@@ -341,7 +340,7 @@ class SyncerSession {
sync_cycle_state_->set_write_transaction(write_transaction);
}
- // sets the write transaction to null, but doesn't free the memory.
+ // Sets the write transaction to null, but doesn't free the memory.
void ClearWriteTransaction() {
sync_cycle_state_->ClearWriteTransaction();
}
diff --git a/chrome/browser/sync/engine/syncer_status.h b/chrome/browser/sync/engine/syncer_status.h
index 4f70ecd..8da52a8 100644
--- a/chrome/browser/sync/engine/syncer_status.h
+++ b/chrome/browser/sync/engine/syncer_status.h
@@ -4,11 +4,11 @@
//
// TODO(sync): We eventually want to fundamentally change how we represent
// status and inform the UI about the ways in which our status has changed.
-// Right now, we're just trying to keep the various command classes from
-// having to worry about this class.
+// Right now, we're just trying to keep the various command classes from having
+// to worry about this class.
//
-// The UI will request that we fill this struct so it can show the current
-// sync state.
+// The UI will request that we fill this struct so it can show the current sync
+// state.
//
// THIS CLASS PROVIDES NO SYNCHRONIZATION GUARANTEES.
@@ -21,11 +21,12 @@
#include "chrome/browser/sync/engine/sync_process_state.h"
namespace browser_sync {
+
class SyncerSession;
class SyncerStatus {
public:
- explicit SyncerStatus(SyncCycleState* cycle_state, SyncProcessState* state)
+ SyncerStatus(SyncCycleState* cycle_state, SyncProcessState* state)
: sync_process_state_(state),
sync_cycle_state_(cycle_state){}
explicit SyncerStatus(SyncerSession* s);
@@ -59,8 +60,8 @@ class SyncerStatus {
return sync_process_state_->IsShareUsable();
}
- // During initial sync these two members can be used to
- // measure sync progress.
+ // During initial sync these two members can be used to measure sync
+ // progress.
int64 current_sync_timestamp() const {
return sync_process_state_->current_sync_timestamp();
}
@@ -97,12 +98,10 @@ class SyncerStatus {
return sync_process_state_->BlockedItemsSize();
}
- // derive from sync_process_state blocked_item_ids_
int stalled_updates() const {
return sync_process_state_->BlockedItemsSize();
}
- // in sync_process_state
int error_commits() const {
return sync_process_state_->error_commits();
}
@@ -111,7 +110,7 @@ class SyncerStatus {
sync_process_state_->set_error_commits(val);
}
- // WEIRD COUNTER manipulation functions
+ // WEIRD COUNTER manipulation functions.
int consecutive_problem_get_updates() const {
return sync_process_state_->consecutive_problem_get_updates();
}
@@ -176,11 +175,11 @@ class SyncerStatus {
void zero_successful_commits() {
sync_process_state_->zero_successful_commits();
}
- // end WEIRD COUNTER manipulation functions
+ // End WEIRD COUNTER manipulation functions.
bool over_quota() const { return sync_cycle_state_->over_quota(); }
- // Methods for managing error rate tracking in sync_process_state
+ // Methods for managing error rate tracking in sync_process_state.
void TallyNewError() {
sync_process_state_->TallyNewError();
}
@@ -201,21 +200,21 @@ class SyncerStatus {
void AuthSucceeded() { sync_process_state_->AuthSucceeded(); }
- // Returns true if this object has been modified since last SetClean() call
+ // Returns true if this object has been modified since last SetClean() call.
bool IsDirty() const {
return sync_cycle_state_->IsDirty() || sync_process_state_->IsDirty();
}
- // Returns true if auth status has been modified since last SetClean() call
+ // Returns true if auth status has been modified since last SetClean() call.
bool IsAuthDirty() const { return sync_process_state_->IsAuthDirty(); }
- // Call to tell this status object that its new state has been seen
+ // Call to tell this status object that its new state has been seen.
void SetClean() {
sync_process_state_->SetClean();
sync_cycle_state_->SetClean();
}
- // Call to tell this status object that its auth state has been seen
+ // Call to tell this status object that its auth state has been seen.
void SetAuthClean() { sync_process_state_->SetAuthClean(); }
void DumpStatusInfo() const {
@@ -246,10 +245,10 @@ class SyncerStatus {
}
private:
-
- SyncCycleState *sync_cycle_state_;
- SyncProcessState *sync_process_state_;
-
+ SyncCycleState* sync_cycle_state_;
+ SyncProcessState* sync_process_state_;
};
+
} // namespace browser_sync
+
#endif // CHROME_BROWSER_SYNC_ENGINE_SYNCER_STATUS_H_
diff --git a/chrome/browser/sync/engine/syncer_thread.cc b/chrome/browser/sync/engine/syncer_thread.cc
index e0832a7..c96e587 100644
--- a/chrome/browser/sync/engine/syncer_thread.cc
+++ b/chrome/browser/sync/engine/syncer_thread.cc
@@ -31,14 +31,14 @@ static inline bool operator < (const timespec& a, const timespec& b) {
namespace {
-// returns the amount of time since the user last interacted with
-// the computer, in milliseconds
+// Returns the amount of time since the user last interacted with the computer,
+// in milliseconds
int UserIdleTime() {
#ifdef OS_WINDOWS
LASTINPUTINFO last_input_info;
last_input_info.cbSize = sizeof(LASTINPUTINFO);
- // get time in windows ticks since system start of last activity
+ // Get time in windows ticks since system start of last activity.
BOOL b = ::GetLastInputInfo(&last_input_info);
if (b == TRUE)
return ::GetTickCount() - last_input_info.dwTime;
@@ -167,8 +167,8 @@ SyncerThread::~SyncerThread() {
}
// Creates and starts a syncer thread.
-// Returns true if it creates a thread or if there's currently a thread
-// running and false otherwise.
+// Returns true if it creates a thread or if there's currently a thread running
+// and false otherwise.
bool SyncerThread::Start() {
MutexLock lock(&mutex_);
if (thread_running_) {
@@ -183,14 +183,14 @@ bool SyncerThread::Start() {
}
// Stop processing. A max wait of at least 2*server RTT time is recommended.
-// returns true if we stopped, false otherwise.
+// Returns true if we stopped, false otherwise.
bool SyncerThread::Stop(int max_wait) {
MutexLock lock(&mutex_);
if (!thread_running_)
return true;
stop_syncer_thread_ = true;
if (NULL != syncer_) {
- // try to early exit the syncer
+ // Try to early exit the syncer.
syncer_->RequestEarlyExit();
}
pthread_cond_broadcast(&changed_.condvar_);
@@ -214,13 +214,12 @@ void SyncerThread::WatchClientCommands(ClientCommandChannel* channel) {
&SyncerThread::HandleClientCommand));
}
-void SyncerThread::HandleClientCommand(ClientCommandChannel::EventType
- event) {
+void SyncerThread::HandleClientCommand(ClientCommandChannel::EventType event) {
if (!event) {
return;
}
- // mutex not really necessary for these
+ // Mutex not really necessary for these.
if (event->has_set_sync_poll_interval()) {
syncer_short_poll_interval_seconds_ = event->set_sync_poll_interval();
}
@@ -287,8 +286,8 @@ void SyncerThread::ThreadMainLoop() {
}
}
-// We check how long the user's been idle and sync less often if the
-// machine is not in use. The aim is to reduce server load.
+// We check how long the user's been idle and sync less often if the machine is
+// not in use. The aim is to reduce server load.
int SyncerThread::CalculatePollingWaitTime(
const AllStatus::Status& status,
int last_poll_wait, // in s
@@ -479,7 +478,7 @@ SyncerEventChannel* SyncerThread::channel() {
return syncer_event_channel_.get();
}
-// inputs and return value in milliseconds
+// Inputs and return value in milliseconds.
int SyncerThread::CalculateSyncWaitTime(int last_interval, int user_idle_ms) {
// syncer_polling_interval_ is in seconds
int syncer_polling_interval_ms = syncer_polling_interval_ * 1000;
@@ -490,8 +489,8 @@ int SyncerThread::CalculateSyncWaitTime(int last_interval, int user_idle_ms) {
// Get idle time, bounded by max wait.
int idle = min(user_idle_ms, syncer_max_interval_);
- // If the user has been idle for a while,
- // we'll start decreasing the poll rate.
+ // If the user has been idle for a while, we'll start decreasing the poll
+ // rate.
if (idle >= kPollBackoffThresholdMultiplier * syncer_polling_interval_ms) {
next_wait = std::min(AllStatus::GetRecommendedDelaySeconds(
last_interval / 1000), syncer_max_interval_ / 1000) * 1000;
@@ -500,7 +499,7 @@ int SyncerThread::CalculateSyncWaitTime(int last_interval, int user_idle_ms) {
return next_wait;
}
-// Called with mutex_ already locked
+// Called with mutex_ already locked.
void SyncerThread::NudgeSyncImpl(int milliseconds_from_now,
NudgeSource source) {
const timespec nudge_time = GetPThreadAbsoluteTime(milliseconds_from_now);
diff --git a/chrome/browser/sync/engine/syncer_thread.h b/chrome/browser/sync/engine/syncer_thread.h
index 30172d0..ca22969e 100644
--- a/chrome/browser/sync/engine/syncer_thread.h
+++ b/chrome/browser/sync/engine/syncer_thread.h
@@ -3,7 +3,6 @@
// found in the LICENSE file.
//
// A class to run the syncer on a thread.
-//
#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_THREAD_H_
#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_THREAD_H_
@@ -59,15 +58,13 @@ class SyncerThread {
static const int kDefaultShortPollIntervalSeconds = 60;
// Long poll is used when XMPP is on.
static const int kDefaultLongPollIntervalSeconds = 3600;
- // 30 minutes by default. If exponential backoff kicks in, this is
- // the longest possible poll interval.
+ // 30 minutes by default. If exponential backoff kicks in, this is the
+ // longest possible poll interval.
static const int kDefaultMaxPollIntervalMs = 30 * 60 * 1000;
- SyncerThread(
- ClientCommandChannel* command_channel,
+ SyncerThread(ClientCommandChannel* command_channel,
syncable::DirectoryManager* mgr,
- ServerConnectionManager* connection_manager,
- AllStatus* all_status,
+ ServerConnectionManager* connection_manager, AllStatus* all_status,
ModelSafeWorker* model_safe_worker);
~SyncerThread();
@@ -111,9 +108,8 @@ class SyncerThread {
}
};
- typedef std::priority_queue<NudgeObject,
- std::vector<NudgeObject>, IsTimeSpecGreater>
- NudgeQueue;
+ typedef std::priority_queue<NudgeObject, std::vector<NudgeObject>,
+ IsTimeSpecGreater> NudgeQueue;
// Threshold multipler for how long before user should be considered idle.
static const int kPollBackoffThresholdMultiplier = 10;
@@ -157,13 +153,13 @@ class SyncerThread {
void SetUpdatesSource(bool nudged, NudgeSource nudge_source,
bool* initial_sync);
- // for unit tests only
+ // For unit tests only.
void DisableIdleDetection() { disable_idle_detection_ = true; }
- // false when we want to stop the thread.
+ // False when we want to stop the thread.
bool stop_syncer_thread_;
- // we use one mutex for all members except the channel.
+ // We use one mutex for all members except the channel.
PThreadMutex mutex_;
typedef PThreadScopedLock<PThreadMutex> MutexLock;
@@ -171,11 +167,11 @@ class SyncerThread {
pthread_t thread_;
bool thread_running_;
- // Gets signaled whenever a thread outside of the syncer thread
- // changes a member variable.
+ // Gets signaled whenever a thread outside of the syncer thread changes a
+ // member variable.
PThreadCondVar changed_;
- // State of the server connection
+ // State of the server connection.
bool connected_;
// State of the notification framework is tracked by these values.
@@ -207,8 +203,8 @@ class SyncerThread {
scoped_ptr<SyncerEventChannel> syncer_event_channel_;
- // This causes syncer to start syncing ASAP. If the rate of requests is
- // too high the request will be silently dropped. mutex_ should be held when
+ // This causes syncer to start syncing ASAP. If the rate of requests is too
+ // high the request will be silently dropped. mutex_ should be held when
// this is called.
void NudgeSyncImpl(int milliseconds_from_now, NudgeSource source);
diff --git a/chrome/browser/sync/engine/syncer_thread_unittest.cc b/chrome/browser/sync/engine/syncer_thread_unittest.cc
index dd81176..6b758a7 100644
--- a/chrome/browser/sync/engine/syncer_thread_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_thread_unittest.cc
@@ -85,14 +85,14 @@ TEST_F(SyncerThreadTest, CalculateSyncWaitTime) {
}
TEST_F(SyncerThreadTest, CalculatePollingWaitTime) {
- // Set up the environment
+ // Set up the environment.
int user_idle_milliseconds_param = 0;
SyncerThread syncer_thread(NULL, NULL, NULL, NULL, NULL);
syncer_thread.DisableIdleDetection();
// Notifications disabled should result in a polling interval of
- // kDefaultShortPollInterval
+ // kDefaultShortPollInterval.
{
AllStatus::Status status = {};
status.notifications_enabled = 0;
@@ -123,7 +123,7 @@ TEST_F(SyncerThreadTest, CalculatePollingWaitTime) {
}
// Notifications enabled should result in a polling interval of
- // SyncerThread::kDefaultLongPollIntervalSeconds
+ // SyncerThread::kDefaultLongPollIntervalSeconds.
{
AllStatus::Status status = {};
status.notifications_enabled = 1;
@@ -229,8 +229,7 @@ TEST_F(SyncerThreadTest, CalculatePollingWaitTime) {
ASSERT_FALSE(continue_sync_cycle_param);
}
- // Regression for exponential backoff reset when the
- // syncer is nudged.
+ // Regression for exponential backoff reset when the syncer is nudged.
{
AllStatus::Status status = {};
status.unsynced_count = 1;
diff --git a/chrome/browser/sync/engine/syncer_types.h b/chrome/browser/sync/engine/syncer_types.h
index 0d61984..9d10357 100644
--- a/chrome/browser/sync/engine/syncer_types.h
+++ b/chrome/browser/sync/engine/syncer_types.h
@@ -1,7 +1,6 @@
// Copyright (c) 2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_TYPES_H_
#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_TYPES_H_
@@ -16,9 +15,8 @@ class BaseTransaction;
class Id;
}
-// The intent of this is to keep all shared data types and enums
-// for the syncer in a single place without having dependencies between
-// other files.
+// The intent of this is to keep all shared data types and enums for the syncer
+// in a single place without having dependencies between other files.
namespace browser_sync {
class SyncProcessState;
@@ -27,16 +25,15 @@ class SyncerSession;
class Syncer;
enum UpdateAttemptResponse {
- // Update was applied or safely ignored
+ // Update was applied or safely ignored.
SUCCESS,
// This state is deprecated.
// TODO(sync): Remove this state.
BLOCKED,
- // Conflicts with the local data representation.
- // This can also mean that the entry doesn't currently make sense
- // if we applied it.
+ // Conflicts with the local data representation. This can also mean that the
+ // entry doesn't currently make sense if we applied it.
CONFLICT,
// This return value is only returned by AttemptToUpdateEntryWithoutMerge
@@ -65,9 +62,9 @@ enum ServerUpdateProcessingResult {
SUCCESS_VALID = SUCCESS_STORED
};
-// Different results from the verify phase will yield different
-// methods of processing in the ProcessUpdates phase. The SKIP
-// result means the entry doesn't go to the ProcessUpdates phase.
+// Different results from the verify phase will yield different methods of
+// processing in the ProcessUpdates phase. The SKIP result means the entry
+// doesn't go to the ProcessUpdates phase.
enum VerifyResult {
VERIFY_FAIL,
VERIFY_SUCCESS,
@@ -122,8 +119,8 @@ struct SyncerEvent {
int successful_commit_count;
- // How many milliseconds later should the syncer kick in?
- // for REQUEST_SYNC_NUDGE only.
+ // How many milliseconds later should the syncer kick in? For
+ // REQUEST_SYNC_NUDGE only.
int nudge_delay_milliseconds;
};
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
index 27bdb9b..1c5f6a8 100644
--- a/chrome/browser/sync/engine/syncer_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -90,7 +90,6 @@ const int kTestDataLen = 12;
const int64 kTestLogRequestTimestamp = 123456;
} // namespace
-
class SyncerTest : public testing::Test {
protected:
SyncerTest() : client_command_channel_(0) {
@@ -198,17 +197,17 @@ class SyncerTest : public testing::Test {
EXPECT_FALSE(attr.is_deleted());
EXPECT_EQ(test_value, attr.value());
}
- bool SyncerStuck(SyncProcessState *state) {
+ bool SyncerStuck(SyncProcessState* state) {
SyncerStatus status(NULL, state);
return status.syncer_stuck();
}
- void SyncRepeatedlyToTriggerConflictResolution(SyncProcessState *state) {
+ void SyncRepeatedlyToTriggerConflictResolution(SyncProcessState* state) {
// We should trigger after less than 6 syncs, but we want to avoid brittle
// tests.
for (int i = 0 ; i < 6 ; ++i)
syncer_->SyncShare(state);
}
- void SyncRepeatedlyToTriggerStuckSignal(SyncProcessState *state) {
+ void SyncRepeatedlyToTriggerStuckSignal(SyncProcessState* state) {
// We should trigger after less than 10 syncs, but we want to avoid brittle
// tests.
for (int i = 0 ; i < 12 ; ++i)
@@ -419,7 +418,7 @@ TEST_F(SyncerTest, GetCommitIdsCommandTruncates) {
DoTruncationTest(dir, unsynced_handle_view, expected_order);
}
-// TODO(chron): More corner case unit tests around validation
+// TODO(chron): More corner case unit tests around validation.
TEST_F(SyncerTest, TestCommitMetahandleIterator) {
SyncCycleState cycle_state;
SyncerSession session(&cycle_state, state_.get());
@@ -665,8 +664,8 @@ TEST_F(SyncerTest, TestCommitListOrderingWithNesting) {
grandchild.Put(syncable::BASE_VERSION, 1);
}
{
- // Create three deleted items which deletions we expect to
- // be sent to the server.
+ // Create three deleted items which deletions we expect to be sent to the
+ // server.
MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(),
PSTR("Pete"));
ASSERT_TRUE(parent.good());
@@ -1121,9 +1120,9 @@ TEST_F(SyncerTest, NameSanitizationWithClientRename) {
#endif
namespace {
-void VerifyExistsWithNameInRoot(syncable::Directory *dir,
- const PathString &name,
- const string &entry,
+void VerifyExistsWithNameInRoot(syncable::Directory* dir,
+ const PathString& name,
+ const string& entry,
int line) {
ReadTransaction tr(dir, __FILE__, __LINE__);
Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
@@ -1328,7 +1327,7 @@ TEST_F(SyncerTest, IllegalAndLegalUpdates) {
TEST_F(SyncerTest, CommitTimeRename) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
- // Create a folder and an entry
+ // Create a folder and an entry.
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry parent(&trans, CREATE, root_id_, PSTR("Folder"));
@@ -1340,12 +1339,12 @@ TEST_F(SyncerTest, CommitTimeRename) {
WriteTestDataToEntry(&trans, &entry);
}
- // Mix in a directory creation too for later
+ // Mix in a directory creation too for later.
mock_server_->AddUpdateDirectory(2, 0, "dir_in_root", 10, 10);
mock_server_->SetCommitTimeRename("renamed_");
syncer_->SyncShare();
- // Verify it was correctly renamed
+ // Verify it was correctly renamed.
{
ReadTransaction trans(dir, __FILE__, __LINE__);
Entry entry_folder(&trans, GET_BY_PATH, PSTR("renamed_Folder"));
@@ -1356,7 +1355,7 @@ TEST_F(SyncerTest, CommitTimeRename) {
+ PSTR("renamed_new_entry"));
ASSERT_TRUE(entry_new.good());
- // And that the unrelated directory creation worked without a rename
+ // And that the unrelated directory creation worked without a rename.
Entry new_dir(&trans, GET_BY_PATH, PSTR("dir_in_root"));
EXPECT_TRUE(new_dir.good());
}
@@ -1364,14 +1363,14 @@ TEST_F(SyncerTest, CommitTimeRename) {
TEST_F(SyncerTest, CommitTimeRenameI18N) {
- // This is utf-8 for the diacritized Internationalization
+ // This is utf-8 for the diacritized Internationalization.
const char* i18nString = "\xc3\x8e\xc3\xb1\x74\xc3\xa9\x72\xc3\xb1"
"\xc3\xa5\x74\xc3\xae\xc3\xb6\xc3\xb1\xc3\xa5\x6c\xc3\xae"
"\xc2\x9e\xc3\xa5\x74\xc3\xae\xc3\xb6\xc3\xb1";
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
- // Create a folder and entry
+ // Create a folder and entry.
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry parent(&trans, CREATE, root_id_, PSTR("Folder"));
@@ -1383,12 +1382,12 @@ TEST_F(SyncerTest, CommitTimeRenameI18N) {
WriteTestDataToEntry(&trans, &entry);
}
- // Mix in a directory creation too for later
+ // Mix in a directory creation too for later.
mock_server_->AddUpdateDirectory(2, 0, "dir_in_root", 10, 10);
mock_server_->SetCommitTimeRename(i18nString);
syncer_->SyncShare();
- // Verify it was correctly renamed
+ // Verify it was correctly renamed.
{
ReadTransaction trans(dir, __FILE__, __LINE__);
PathString expectedFolder;
@@ -1403,7 +1402,7 @@ TEST_F(SyncerTest, CommitTimeRenameI18N) {
Entry entry_new(&trans, GET_BY_PATH, expected);
ASSERT_TRUE(entry_new.good());
- // And that the unrelated directory creation worked without a rename
+ // And that the unrelated directory creation worked without a rename.
Entry new_dir(&trans, GET_BY_PATH, PSTR("dir_in_root"));
EXPECT_TRUE(new_dir.good());
}
@@ -1412,7 +1411,7 @@ TEST_F(SyncerTest, CommitTimeRenameI18N) {
TEST_F(SyncerTest, CommitTimeRenameCollision) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
- // Create a folder to collide with
+ // Create a folder to collide with.
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry collider(&trans, CREATE, root_id_, PSTR("renamed_Folder"));
@@ -1470,7 +1469,7 @@ TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
entry.Put(IS_UNSYNCED, true);
}
- // Verify it and pull the ID out of the folder
+ // Verify it and pull the ID out of the folder.
syncable::Id folder_id;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
@@ -1488,7 +1487,7 @@ TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
WriteTestDataToEntry(&trans, &entry);
}
- // Verify it and pull the ID out of the entry
+ // Verify it and pull the ID out of the entry.
syncable::Id entry_id;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
@@ -1501,14 +1500,14 @@ TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
}
// Now, to emulate a commit response failure, we just don't commit it.
- int64 new_version = 150; // any larger value
+ int64 new_version = 150; // any larger value.
int64 timestamp = 20; // arbitrary value.
int64 size = 20; // arbitrary.
syncable::Id new_folder_id =
syncable::Id::CreateFromServerId("folder_server_id");
// the following update should cause the folder to both apply the update, as
- // well as reassociate the id
+ // well as reassociate the id.
mock_server_->AddUpdateDirectory(new_folder_id, root_id_,
"new_folder", new_version, timestamp);
mock_server_->SetLastUpdateOriginatorFields(
@@ -1554,7 +1553,7 @@ TEST_F(SyncerTest, CommitReuniteUpdate) {
ASSERT_TRUE(entry.good());
WriteTestDataToEntry(&trans, &entry);
}
- // Verify it and pull the ID out
+ // Verify it and pull the ID out.
syncable::Id entry_id;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
@@ -1566,7 +1565,7 @@ TEST_F(SyncerTest, CommitReuniteUpdate) {
}
// Now, to emulate a commit response failure, we just don't commit it.
- int64 new_version = 150; // any larger value
+ int64 new_version = 150; // any larger value.
int64 timestamp = 20; // arbitrary value.
syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
@@ -1590,12 +1589,11 @@ TEST_F(SyncerTest, CommitReuniteUpdate) {
}
}
-// A commit with a lost response must work even if the local entry
-// was deleted before the update is applied. We should not duplicate the local
-// entry in this case, but just create another one alongside.
-// We may wish to examine this behavior in the future as it can create hanging
-// uploads that never finish, that must be cleaned up on the server side
-// after some time.
+// A commit with a lost response must work even if the local entry was deleted
+// before the update is applied. We should not duplicate the local entry in
+// this case, but just create another one alongside. We may wish to examine
+// this behavior in the future as it can create hanging uploads that never
+// finish, that must be cleaned up on the server side after some time.
TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
@@ -1606,7 +1604,7 @@ TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
ASSERT_TRUE(entry.good());
WriteTestDataToEntry(&trans, &entry);
}
- // Verify it and pull the ID out
+ // Verify it and pull the ID out.
syncable::Id entry_id;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
@@ -1618,7 +1616,7 @@ TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
}
// Now, to emulate a commit response failure, we just don't commit it.
- int64 new_version = 150; // any larger value
+ int64 new_version = 150; // any larger value.
int64 timestamp = 20; // arbitrary value.
int64 size = 20; // arbitrary.
syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
@@ -1655,7 +1653,7 @@ TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
}
}
-// TODO(chron): Add more unsanitized name tests
+// TODO(chron): Add more unsanitized name tests.
TEST_F(SyncerTest, ConflictMatchingEntryHandlesUnsanitizedNames) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -1797,7 +1795,7 @@ bool TouchFredAndGingerInRoot(Directory* dir) {
MutableEntry fred(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
PSTR("fred"));
CHECK(fred.good());
- // Equivalent to touching the entry
+ // Equivalent to touching the entry.
fred.Put(syncable::IS_UNSYNCED, true);
fred.Put(syncable::SYNCING, false);
MutableEntry ginger(&trans, syncable::GET_BY_PARENTID_AND_NAME,
@@ -2076,9 +2074,9 @@ TEST_F(SyncerTest, ThreeNamesClashWithResolver) {
}
/**
- * In the event that we have a double changed entry, that is
- * changed on both the client and the server, the conflict resolver
- * should just drop one of them and accept the other.
+ * In the event that we have a double changed entry, that is changed on both
+ * the client and the server, the conflict resolver should just drop one of
+ * them and accept the other.
*/
TEST_F(SyncerTest, DoublyChangedWithResolver) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
@@ -2115,9 +2113,8 @@ TEST_F(SyncerTest, DoublyChangedWithResolver) {
syncer_events_.clear();
}
-// We got this repro case when someone was editing entries
-// while sync was occuring. The entry had changed out underneath
-// the user.
+// We got this repro case when someone was editing entries while sync was
+// occuring. The entry had changed out underneath the user.
TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -2215,7 +2212,7 @@ TEST_F(SyncerTest, CommittingNewDeleted) {
// We apply unapplied updates again before we get the update about the deletion.
// This means we have an unapplied update where server_version < base_version.
TEST_F(SyncerTest, UnappliedUpdateDuringCommit) {
- // This test is a little fake
+ // This test is a little fake.
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
{
@@ -2224,7 +2221,7 @@ TEST_F(SyncerTest, UnappliedUpdateDuringCommit) {
entry.Put(ID, ids_.FromNumber(20));
entry.Put(BASE_VERSION, 1);
entry.Put(SERVER_VERSION, 1);
- entry.Put(SERVER_PARENT_ID, ids_.FromNumber(9999)); // bad parent
+ entry.Put(SERVER_PARENT_ID, ids_.FromNumber(9999)); // Bad parent.
entry.Put(IS_UNSYNCED, true);
entry.Put(IS_UNAPPLIED_UPDATE, true);
entry.Put(IS_DEL, false);
@@ -2247,7 +2244,7 @@ TEST_F(SyncerTest, UnappliedUpdateDuringCommit) {
// remove fred
// if no syncing occured midway, bob will have an illegal parent
TEST_F(SyncerTest, DeletingEntryInFolder) {
- // This test is a little fake
+ // This test is a little fake.
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
{
@@ -2371,8 +2368,8 @@ TEST_F(SyncerTest, CorruptUpdateBadFolderSwapUpdate) {
syncer_events_.clear();
}
-// TODO(chron): New set of folder swap commit tests that don't rely
-// on transactional commits.
+// TODO(chron): New set of folder swap commit tests that don't rely on
+// transactional commits.
TEST_F(SyncerTest, DISABLED_FolderSwapCommit) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -2413,8 +2410,8 @@ TEST_F(SyncerTest, DISABLED_FolderSwapCommit) {
syncer_events_.clear();
}
-// TODO(chron): New set of folder swap commit tests that don't rely
-// on transactional commits.
+// TODO(chron): New set of folder swap commit tests that don't rely on
+// transactional commits.
TEST_F(SyncerTest, DISABLED_DualFolderSwapCommit) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -2479,8 +2476,8 @@ TEST_F(SyncerTest, DISABLED_DualFolderSwapCommit) {
syncer_events_.clear();
}
-// TODO(chron): New set of folder swap commit tests that don't rely
-// on transactional commits.
+// TODO(chron): New set of folder swap commit tests that don't rely on
+// transactional commits.
TEST_F(SyncerTest, DISABLED_TripleFolderRotateCommit) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -2531,8 +2528,8 @@ TEST_F(SyncerTest, DISABLED_TripleFolderRotateCommit) {
syncer_events_.clear();
}
-// TODO(chron): New set of folder swap commit tests that don't rely
-// on transactional commits.
+// TODO(chron): New set of folder swap commit tests that don't rely on
+// transactional commits.
TEST_F(SyncerTest, DISABLED_ServerAndClientSwap) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -2763,7 +2760,7 @@ TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath) {
// Circular links should be resolved by the server.
TEST_F(SyncerTest, SiblingDirectoriesBecomeCircular) {
- // we don't currently resolve this. This test ensures we don't
+ // we don't currently resolve this. This test ensures we don't.
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
@@ -2818,7 +2815,7 @@ TEST_F(SyncerTest, ConflictSetClassificationError) {
}
TEST_F(SyncerTest, SwapEntryNames) {
- // Simple transaction test
+ // Simple transaction test.
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
@@ -3581,8 +3578,8 @@ TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted2) {
syncer_events_.clear();
}
-// This test is to reproduce a check failure. Sometimes we would get a
-// bad ID back when creating an entry.
+// This test is to reproduce a check failure. Sometimes we would get a bad ID
+// back when creating an entry.
TEST_F(SyncerTest, DuplicateIDReturn) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
@@ -3608,8 +3605,8 @@ TEST_F(SyncerTest, DuplicateIDReturn) {
syncer_events_.clear();
}
-// This test is not very useful anymore. It used to trigger
-// a more interesting condition.
+// This test is not very useful anymore. It used to trigger a more interesting
+// condition.
TEST_F(SyncerTest, SimpleConflictOnAnEntry) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -3656,8 +3653,8 @@ TEST_F(SyncerTest, DeletedEntryWithBadParentInLoopCalculation) {
}
TEST_F(SyncerTest, ConflictResolverMergeOverwritesLocalEntry) {
- // This test would die because it would rename
- // a entry to a name that was taken in the namespace
+ // This test would die because it would rename a entry to a name that was
+ // taken in the namespace
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -3711,7 +3708,7 @@ TEST_F(SyncerTest, ConflictResolverMergesLocalDeleteAndServerUpdate) {
mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10);
- // We don't care about actually committing, just the resolution
+ // We don't care about actually committing, just the resolution.
mock_server_->set_conflict_all_commits(true);
syncer_->SyncShare();
@@ -3747,7 +3744,7 @@ TEST_F(SyncerTest, UpdateFlipsTheFolderBit) {
// Server update: entry-type object (not a container), revision 10.
mock_server_->AddUpdateBookmark(ids_.FromNumber(1), root_id_, "name", 10, 10);
- // Don't attempt to commit
+ // Don't attempt to commit.
mock_server_->set_conflict_all_commits(true);
// The syncer should not attempt to apply the invalid update.
@@ -3787,7 +3784,7 @@ TEST(SyncerSyncProcessState, MergeSetsTest) {
EXPECT_EQ(c.IdToConflictSetGet(id[1]), c.IdToConflictSetGet(id[i]));
}
- // Check dupes don't cause double sets
+ // Check dupes don't cause double sets.
SyncProcessState identical_set;
identical_set.MergeSets(id[1], id[1]);
EXPECT_EQ(identical_set.IdToConflictSetSize(), 1);
@@ -3795,8 +3792,8 @@ TEST(SyncerSyncProcessState, MergeSetsTest) {
}
// Bug Synopsis:
-// Merge conflict resolution will merge a new local entry
-// with another entry that needs updates, resulting in CHECK.
+// Merge conflict resolution will merge a new local entry with another entry
+// that needs updates, resulting in CHECK.
TEST_F(SyncerTest, MergingExistingItems) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -3896,8 +3893,8 @@ TEST_F(SyncerTest, TestUndeleteUpdate) {
mock_server_->AddUpdateDirectory(1, 0, "foo", 2, 4);
mock_server_->SetLastUpdateDeleted();
syncer_->SyncShare();
- // This used to be rejected as it's an undeletion.
- // Now, it results in moving the delete path aside.
+ // This used to be rejected as it's an undeletion. Now, it results in moving
+ // the delete path aside.
mock_server_->AddUpdateDirectory(2, 1, "bar", 3, 5);
syncer_->SyncShare();
{
@@ -4170,10 +4167,10 @@ TEST_F(SyncerTest, TestSimpleUndelete) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
mock_server_->set_conflict_all_commits(true);
- // let there be an entry from the server.
+ // Let there be an entry from the server.
mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10);
syncer_->SyncShare();
- // check it out and delete it
+ // Check it out and delete it.
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&wtrans, GET_BY_ID, id);
@@ -4181,7 +4178,7 @@ TEST_F(SyncerTest, TestSimpleUndelete) {
EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
EXPECT_FALSE(entry.Get(IS_UNSYNCED));
EXPECT_FALSE(entry.Get(IS_DEL));
- // delete it locally
+ // Delete it locally.
entry.Put(IS_DEL, true);
}
syncer_->SyncShare();
@@ -4196,7 +4193,7 @@ TEST_F(SyncerTest, TestSimpleUndelete) {
EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
}
syncer_->SyncShare();
- // Update from server confirming deletion
+ // Update from server confirming deletion.
mock_server_->AddUpdateBookmark(id, root, "foo", 2, 11);
mock_server_->SetLastUpdateDeleted();
syncer_->SyncShare();
@@ -4210,7 +4207,7 @@ TEST_F(SyncerTest, TestSimpleUndelete) {
EXPECT_TRUE(entry.Get(IS_DEL));
EXPECT_TRUE(entry.Get(SERVER_IS_DEL));
}
- // Undelete from server
+ // Undelete from server.
mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12);
syncer_->SyncShare();
// IS_DEL and SERVER_IS_DEL now both false.
@@ -4229,11 +4226,11 @@ TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
Id id = ids_.MakeServer("undeletion item"), root = ids_.root();
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
- // let there be a entry, from the server.
+ // Let there be a entry, from the server.
mock_server_->set_conflict_all_commits(true);
mock_server_->AddUpdateBookmark(id, root, "foo", 1, 10);
syncer_->SyncShare();
- // check it out and delete it
+ // Check it out and delete it.
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&wtrans, GET_BY_ID, id);
@@ -4241,7 +4238,7 @@ TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
EXPECT_FALSE(entry.Get(IS_UNAPPLIED_UPDATE));
EXPECT_FALSE(entry.Get(IS_UNSYNCED));
EXPECT_FALSE(entry.Get(IS_DEL));
- // delete it locally
+ // Delete it locally.
entry.Put(IS_DEL, true);
}
syncer_->SyncShare();
@@ -4256,8 +4253,8 @@ TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
}
syncer_->SyncShare();
- // Say we do not get an update from server confirming deletion.
- // Undelete from server
+ // Say we do not get an update from server confirming deletion. Undelete
+ // from server
mock_server_->AddUpdateBookmark(id, root, "foo", 2, 12);
syncer_->SyncShare();
// IS_DEL and SERVER_IS_DEL now both false.
@@ -4277,7 +4274,7 @@ TEST_F(SyncerTest, TestUndeleteIgnoreCorrectlyUnappliedUpdate) {
Id root = ids_.root();
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
- // duplicate! expect path clashing!
+ // Duplicate! expect path clashing!
mock_server_->set_conflict_all_commits(true);
mock_server_->AddUpdateBookmark(id1, root, "foo", 1, 10);
mock_server_->AddUpdateBookmark(id2, root, "foo", 1, 10);
@@ -4310,8 +4307,8 @@ TEST_F(SyncerTest, CopySyncProcessState) {
TEST_F(SyncerTest, SingletonTagUpdates) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
- // As a hurdle, introduce an item whose name is the same as the
- // tag value we'll use later.
+ // As a hurdle, introduce an item whose name is the same as the tag value
+ // we'll use later.
int64 hurdle_handle = CreateUnsyncedDirectory(PSTR("bob"), "id_bob");
{
ReadTransaction trans(dir, __FILE__, __LINE__);
@@ -4413,8 +4410,8 @@ class SyncerPositionUpdateTest : public SyncerTest {
} // namespace
TEST_F(SyncerPositionUpdateTest, InOrderPositive) {
- // Add a bunch of items in increasing order, starting with just
- // positive position values.
+ // Add a bunch of items in increasing order, starting with just positive
+ // position values.
AddRootItemWithPosition(100);
AddRootItemWithPosition(199);
AddRootItemWithPosition(200);
@@ -4453,8 +4450,8 @@ TEST_F(SyncerPositionUpdateTest, ReverseOrder) {
}
TEST_F(SyncerPositionUpdateTest, RandomOrderInBatches) {
- // Mix it all up, interleaving position values,
- // and try multiple batches of updates.
+ // Mix it all up, interleaving position values, and try multiple batches of
+ // updates.
AddRootItemWithPosition(400);
AddRootItemWithPosition(201);
AddRootItemWithPosition(-400);
@@ -4585,4 +4582,5 @@ TEST_F(SyncerPositionTiebreakingTest, MidLowHigh) {
const SyncerTest::CommitOrderingTest
SyncerTest::CommitOrderingTest::LAST_COMMIT_ITEM = {-1, TestIdFactory::root()};
+
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_util.cc b/chrome/browser/sync/engine/syncer_util.cc
index 75f7b82..e3bca85 100644
--- a/chrome/browser/sync/engine/syncer_util.cc
+++ b/chrome/browser/sync/engine/syncer_util.cc
@@ -102,7 +102,7 @@ syncable::Id SyncerUtil::GetNameConflictingItemId(
// Returns the number of unsynced entries.
// static
int SyncerUtil::GetUnsyncedEntries(syncable::BaseTransaction* trans,
- vector<int64> *handles) {
+ vector<int64> *handles) {
trans->directory()->GetUnsyncedMetaHandles(trans, handles);
LOG_IF(INFO, handles->size() > 0)
<< "Have " << handles->size() << " unsynced items.";
@@ -124,7 +124,7 @@ void SyncerUtil::ChangeEntryIDAndUpdateChildren(
<< *entry << "\n\n" << old_entry;
}
if (entry->Get(IS_DIR)) {
- // Get all child entries of the old id
+ // Get all child entries of the old id.
trans->directory()->GetChildHandles(trans, old_id, children);
Directory::ChildHandles::iterator i = children->begin();
while (i != children->end()) {
@@ -196,7 +196,7 @@ void SyncerUtil::AttemptReuniteLostCommitResponses(
// An entry should never be version 0 and SYNCED.
CHECK(local_entry.Get(IS_UNSYNCED));
- // just a quick sanity check
+ // Just a quick sanity check.
CHECK(!local_entry.Get(ID).ServerKnows());
LOG(INFO) << "Reuniting lost commit response IDs" <<
@@ -251,7 +251,7 @@ UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntryWithoutMerge(
CHECK(entry->good());
if (!entry->Get(IS_UNAPPLIED_UPDATE))
- return SUCCESS; // No work to do
+ return SUCCESS; // No work to do.
syncable::Id id = entry->Get(ID);
if (entry->Get(IS_UNSYNCED)) {
@@ -312,8 +312,8 @@ void SyncerUtil::UpdateServerFieldsFromUpdate(
const SyncEntity& server_entry,
const SyncName& name) {
if (server_entry.deleted()) {
- // The server returns very lightweight replies for deletions, so
- // we don't clobber a bunch of fields on delete.
+ // The server returns very lightweight replies for deletions, so we don't
+ // clobber a bunch of fields on delete.
local_entry->Put(SERVER_IS_DEL, true);
local_entry->Put(SERVER_VERSION,
std::max(local_entry->Get(SERVER_VERSION),
@@ -358,9 +358,9 @@ void SyncerUtil::UpdateServerFieldsFromUpdate(
}
local_entry->Put(SERVER_IS_DEL, server_entry.deleted());
- // We only mark the entry as unapplied if its version is greater than
- // the local data. If we're processing the update that corresponds to one of
- // our commit we don't apply it as time differences may occur.
+ // We only mark the entry as unapplied if its version is greater than the
+ // local data. If we're processing the update that corresponds to one of our
+ // commit we don't apply it as time differences may occur.
if (server_entry.version() > local_entry->Get(BASE_VERSION)) {
local_entry->Put(IS_UNAPPLIED_UPDATE, true);
}
@@ -392,7 +392,7 @@ void SyncerUtil::ApplyExtendedAttributes(
// Creates a new Entry iff no Entry exists with the given id.
// static
void SyncerUtil::CreateNewEntry(syncable::WriteTransaction *trans,
- const syncable::Id& id) {
+ const syncable::Id& id) {
syncable::MutableEntry entry(trans, syncable::GET_BY_ID, id);
if (!entry.good()) {
syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM,
@@ -412,8 +412,8 @@ bool SyncerUtil::ServerAndLocalOrdersMatch(syncable::Entry* entry) {
break;
local_up_to_date_predecessor = local_prev.Get(PREV_ID);
}
- // Now find the closest up-to-date sibling in the server order.
+ // Now find the closest up-to-date sibling in the server order.
syncable::Id server_up_to_date_predecessor =
ComputePrevIdFromServerPosition(entry->trans(), entry,
entry->Get(SERVER_PARENT_ID));
@@ -493,8 +493,8 @@ void SyncerUtil::UpdateLocalDataFromServerData(
CHECK(entry->Get(IS_UNAPPLIED_UPDATE));
LOG(INFO) << "Updating entry : " << *entry;
entry->Put(IS_BOOKMARK_OBJECT, entry->Get(SERVER_IS_BOOKMARK_OBJECT));
- // This strange dance around the IS_DEL flag
- // avoids problems when setting the name.
+ // This strange dance around the IS_DEL flag avoids problems when setting
+ // the name.
if (entry->Get(SERVER_IS_DEL)) {
entry->Put(IS_DEL, true);
} else {
@@ -506,9 +506,9 @@ void SyncerUtil::UpdateLocalDataFromServerData(
entry);
bool was_doctored = name.HasBeenSanitized();
if (was_doctored) {
- // If we're changing the name of entry, either its name
- // should be illegal, or some other entry should have an unsanitized
- // name. There's should be a CHECK in every code path.
+ // If we're changing the name of entry, either its name should be
+ // illegal, or some other entry should have an unsanitized name.
+ // There's should be a CHECK in every code path.
Entry blocking_entry(trans, GET_BY_PARENTID_AND_DBNAME,
entry->Get(SERVER_PARENT_ID),
name.value());
@@ -552,7 +552,7 @@ VerifyCommitResult SyncerUtil::ValidateCommitEntry(
return VERIFY_UNSYNCABLE;
}
if (entry->Get(IS_DEL) && !entry->Get(ID).ServerKnows()) {
- // drop deleted uncommitted entries.
+ // Drop deleted uncommitted entries.
return VERIFY_UNSYNCABLE;
}
return VERIFY_OK;
@@ -593,7 +593,6 @@ void SyncerUtil::AddPredecessorsThenItem(
syncable::IndexedBitField inclusion_filter,
syncable::MetahandleSet* inserted_items,
vector<syncable::Id>* commit_ids) {
-
vector<syncable::Id>::size_type initial_size = commit_ids->size();
if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items,
commit_ids))
@@ -654,7 +653,7 @@ void SyncerUtil::MarkDeletedChildrenSynced(
syncable::Id id = entry.Get(PARENT_ID);
while (id != trans.root_id()) {
if (deleted_folders->find(id) != deleted_folders->end()) {
- // We've synced the deletion of this deleted entries parent
+ // We've synced the deletion of this deleted entries parent.
entry.Put(IS_UNSYNCED, false);
break;
}
@@ -704,7 +703,7 @@ VerifyResult SyncerUtil::VerifyUpdateConsistency(
// Then we've had an update for this entry before.
if (is_directory != same_id->Get(SERVER_IS_DIR) ||
has_bookmark_data != same_id->Get(SERVER_IS_BOOKMARK_OBJECT)) {
- if (same_id->Get(IS_DEL)) { // if we've deleted the item, we don't care.
+ if (same_id->Get(IS_DEL)) { // If we've deleted the item, we don't care.
return VERIFY_SKIP;
} else {
LOG(ERROR) << "Server update doesn't agree with previous updates. ";
@@ -762,14 +761,13 @@ VerifyResult SyncerUtil::VerifyUpdateConsistency(
// expressing an 'undelete'
// static
VerifyResult SyncerUtil::VerifyUndelete(syncable::WriteTransaction* trans,
- const SyncEntity& entry,
- syncable::MutableEntry* same_id) {
+ const SyncEntity& entry,
+ syncable::MutableEntry* same_id) {
CHECK(same_id->good());
LOG(INFO) << "Server update is attempting undelete. " << *same_id
<< "Update:" << SyncEntityDebugString(entry);
- // Move the old one aside and start over. It's too tricky to
- // get the old one back into a state that would pass
- // CheckTreeInvariants().
+ // Move the old one aside and start over. It's too tricky to get the old one
+ // back into a state that would pass CheckTreeInvariants().
if (same_id->Get(IS_DEL)) {
same_id->Put(ID, trans->directory()->NextId());
same_id->Put(BASE_VERSION, CHANGES_VERSION);
@@ -834,8 +832,8 @@ syncable::Id SyncerUtil::ComputePrevIdFromServerPosition(
continue;
}
- // |update_entry| is considered to be somewhere after |candidate|, so
- // store it as the upper bound.
+ // |update_entry| is considered to be somewhere after |candidate|, so store
+ // it as the upper bound.
closest_sibling = candidate.Get(ID);
}
diff --git a/chrome/browser/sync/engine/syncer_util.h b/chrome/browser/sync/engine/syncer_util.h
index 91e0c814..a6e2f08 100644
--- a/chrome/browser/sync/engine/syncer_util.h
+++ b/chrome/browser/sync/engine/syncer_util.h
@@ -1,9 +1,9 @@
// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// Utility functions manipulating syncable::Entries, intended for use by
-// the syncer.
+//
+// Utility functions manipulating syncable::Entries, intended for use by the
+// syncer.
#ifndef CHROME_BROWSER_SYNC_ENGINE_SYNCER_UTIL_H_
#define CHROME_BROWSER_SYNC_ENGINE_SYNCER_UTIL_H_
@@ -14,8 +14,8 @@
#include "chrome/browser/sync/engine/syncer.h"
#include "chrome/browser/sync/engine/syncer_types.h"
-#include "chrome/browser/sync/syncable/syncable_id.h"
#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
#include "chrome/browser/sync/util/path_helpers.h"
#include "chrome/browser/sync/util/sync_types.h"
@@ -24,7 +24,6 @@ namespace browser_sync {
class SyncerSession;
class SyncEntity;
-
class SyncerUtil {
public:
// TODO(ncarter): Remove unique-in-parent title support and name conflicts.
diff --git a/chrome/browser/sync/engine/syncproto.h b/chrome/browser/sync/engine/syncproto.h
index fe05a75..25901dc 100644
--- a/chrome/browser/sync/engine/syncproto.h
+++ b/chrome/browser/sync/engine/syncproto.h
@@ -23,8 +23,8 @@ class IdWrapper : public Base {
}
};
-// These wrapper classes contain no data, so their super
-// classes can be cast to them directly.
+// These wrapper classes contain no data, so their super classes can be cast to
+// them directly.
class SyncEntity : public IdWrapper<sync_pb::SyncEntity> {
public:
void set_parent_id(const syncable::Id& id) {
diff --git a/chrome/browser/sync/engine/update_applicator.cc b/chrome/browser/sync/engine/update_applicator.cc
index 17e6b36..59ea303 100644
--- a/chrome/browser/sync/engine/update_applicator.cc
+++ b/chrome/browser/sync/engine/update_applicator.cc
@@ -25,7 +25,7 @@ UpdateApplicator::UpdateApplicator(SyncerSession* session,
successful_ids_.reserve(item_count);
}
-// returns true if there's more to do.
+// Returns true if there's more to do.
bool UpdateApplicator::AttemptOneApplication(
syncable::WriteTransaction* trans) {
// If there are no updates left to consider, we're done.
diff --git a/chrome/browser/sync/engine/update_applicator.h b/chrome/browser/sync/engine/update_applicator.h
index 3d500171..8675b81 100644
--- a/chrome/browser/sync/engine/update_applicator.h
+++ b/chrome/browser/sync/engine/update_applicator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// An UpdateApplicator is used to iterate over a number of unapplied
-// updates, applying them to the client using the given syncer session.
+// An UpdateApplicator is used to iterate over a number of unapplied updates,
+// applying them to the client using the given syncer session.
//
// UpdateApplicator might resemble an iterator, but it actually keeps retrying
// failed updates until no remaining updates can be successfully applied.
@@ -11,8 +11,8 @@
#ifndef CHROME_BROWSER_SYNC_ENGINE_UPDATE_APPLICATOR_H_
#define CHROME_BROWSER_SYNC_ENGINE_UPDATE_APPLICATOR_H_
-#include <vector>
#include <set>
+#include <vector>
#include "base/basictypes.h"
#include "base/port.h"
@@ -38,9 +38,9 @@ class UpdateApplicator {
bool AllUpdatesApplied() const;
// This class does not automatically save its progress into the
- // SyncerSession -- to get that to happen, call this method after
- // update application is finished (i.e., when AttemptOneAllocation
- // stops returning true).
+ // SyncerSession -- to get that to happen, call this method after update
+ // application is finished (i.e., when AttemptOneAllocation stops returning
+ // true).
void SaveProgressIntoSessionState();
private:
diff --git a/chrome/browser/sync/engine/verify_updates_command.cc b/chrome/browser/sync/engine/verify_updates_command.cc
index dee544d..7a23477 100644
--- a/chrome/browser/sync/engine/verify_updates_command.cc
+++ b/chrome/browser/sync/engine/verify_updates_command.cc
@@ -6,9 +6,9 @@
#include "chrome/browser/sync/engine/verify_updates_command.h"
#include "chrome/browser/sync/engine/syncer.h"
-#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/engine/syncer_proto_util.h"
#include "chrome/browser/sync/engine/syncer_types.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
#include "chrome/browser/sync/engine/syncproto.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
#include "chrome/browser/sync/syncable/syncable.h"
@@ -27,7 +27,7 @@ using syncable::SYNCER;
VerifyUpdatesCommand::VerifyUpdatesCommand() {}
VerifyUpdatesCommand::~VerifyUpdatesCommand() {}
-void VerifyUpdatesCommand::ExecuteImpl(SyncerSession *session) {
+void VerifyUpdatesCommand::ExecuteImpl(SyncerSession* session) {
LOG(INFO) << "Beginning Update Verification";
ScopedDirLookup dir(session->dirman(), session->account_name());
if (!dir.good()) {
diff --git a/chrome/browser/sync/engine/verify_updates_command.h b/chrome/browser/sync/engine/verify_updates_command.h
index c7970e9..87c6ff7 100644
--- a/chrome/browser/sync/engine/verify_updates_command.h
+++ b/chrome/browser/sync/engine/verify_updates_command.h
@@ -18,19 +18,20 @@ class WriteTransaction;
namespace browser_sync {
-// Verifies the response from a GetUpdates request. All invalid updates
-// will be noted in the SyncerSession after this command is executed.
+// Verifies the response from a GetUpdates request. All invalid updates will be
+// noted in the SyncerSession after this command is executed.
class VerifyUpdatesCommand : public SyncerCommand {
public:
VerifyUpdatesCommand();
virtual ~VerifyUpdatesCommand();
- virtual void ExecuteImpl(SyncerSession *session);
+ virtual void ExecuteImpl(SyncerSession* session);
VerifyResult VerifyUpdate(syncable::WriteTransaction* trans,
const SyncEntity& entry);
private:
DISALLOW_COPY_AND_ASSIGN(VerifyUpdatesCommand);
};
+
} // namespace browser_sync
#endif // CHROME_BROWSER_SYNC_ENGINE_VERIFY_UPDATES_COMMAND_H_