summaryrefslogtreecommitdiffstats
path: root/sync/engine
diff options
context:
space:
mode:
Diffstat (limited to 'sync/engine')
-rw-r--r--sync/engine/build_commit_command.cc64
-rw-r--r--sync/engine/build_commit_command.h25
-rw-r--r--sync/engine/build_commit_command_unittest.cc11
-rw-r--r--sync/engine/commit.cc100
-rw-r--r--sync/engine/commit.h37
-rw-r--r--sync/engine/get_commit_ids_command.cc40
-rw-r--r--sync/engine/get_commit_ids_command.h25
-rw-r--r--sync/engine/net/server_connection_manager.h2
-rw-r--r--sync/engine/post_commit_message_command.cc49
-rw-r--r--sync/engine/post_commit_message_command.h28
-rw-r--r--sync/engine/process_commit_response_command.cc132
-rw-r--r--sync/engine/process_commit_response_command.h41
-rw-r--r--sync/engine/process_commit_response_command_unittest.cc102
-rw-r--r--sync/engine/syncer.cc60
-rw-r--r--sync/engine/syncer.h4
-rw-r--r--sync/engine/syncer_unittest.cc185
16 files changed, 548 insertions, 357 deletions
diff --git a/sync/engine/build_commit_command.cc b/sync/engine/build_commit_command.cc
index 86d1d8b..f384c18 100644
--- a/sync/engine/build_commit_command.cc
+++ b/sync/engine/build_commit_command.cc
@@ -12,9 +12,10 @@
#include "base/string_util.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/sessions/ordered_commit_set.h"
#include "sync/sessions/sync_session.h"
-#include "sync/syncable/syncable.h"
#include "sync/syncable/syncable_changes_version.h"
+#include "sync/syncable/syncable.h"
#include "sync/util/time.h"
using std::set;
@@ -49,7 +50,12 @@ int64 BuildCommitCommand::GetGap() {
return 1LL << 20;
}
-BuildCommitCommand::BuildCommitCommand() {}
+BuildCommitCommand::BuildCommitCommand(
+ const sessions::OrderedCommitSet& batch_commit_set,
+ ClientToServerMessage* commit_message)
+ : batch_commit_set_(batch_commit_set), commit_message_(commit_message) {
+}
+
BuildCommitCommand::~BuildCommitCommand() {}
void BuildCommitCommand::AddExtensionsActivityToMessage(
@@ -57,21 +63,29 @@ void BuildCommitCommand::AddExtensionsActivityToMessage(
// We only send ExtensionsActivity to the server if bookmarks are being
// committed.
ExtensionsActivityMonitor* monitor = session->context()->extensions_monitor();
- if (!session->status_controller().HasBookmarkCommitActivity()) {
- // Return the records to the activity monitor.
- monitor->PutRecords(session->extensions_activity());
- session->mutable_extensions_activity()->clear();
- return;
- }
- const ExtensionsActivityMonitor::Records& records =
- session->extensions_activity();
- for (ExtensionsActivityMonitor::Records::const_iterator it = records.begin();
- it != records.end(); ++it) {
- sync_pb::ChromiumExtensionsActivity* activity_message =
- message->add_extensions_activity();
- activity_message->set_extension_id(it->second.extension_id);
- activity_message->set_bookmark_writes_since_last_commit(
- it->second.bookmark_write_count);
+ if (batch_commit_set_.HasBookmarkCommitId()) {
+ // This isn't perfect, since the set of extensions activity may not
+ // correlate exactly with the items being committed. That's OK as
+ // long as we're looking for a rough estimate of extensions activity,
+ // not an precise mapping of which commits were triggered by which
+ // extension.
+ //
+ // We will push this list of extensions activity back into the
+ // ExtensionsActivityMonitor if this commit fails. That's why we must keep
+ // a copy of these records in the session.
+ monitor->GetAndClearRecords(session->mutable_extensions_activity());
+
+ const ExtensionsActivityMonitor::Records& records =
+ session->extensions_activity();
+ for (ExtensionsActivityMonitor::Records::const_iterator it =
+ records.begin();
+ it != records.end(); ++it) {
+ sync_pb::ChromiumExtensionsActivity* activity_message =
+ message->add_extensions_activity();
+ activity_message->set_extension_id(it->second.extension_id);
+ activity_message->set_bookmark_writes_since_last_commit(
+ it->second.bookmark_write_count);
+ }
}
}
@@ -86,16 +100,15 @@ void SetEntrySpecifics(MutableEntry* meta_entry, SyncEntity* sync_entry) {
} // namespace
SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
- ClientToServerMessage message;
- message.set_share(session->context()->account_name());
- message.set_message_contents(ClientToServerMessage::COMMIT);
+ commit_message_->set_share(session->context()->account_name());
+ commit_message_->set_message_contents(ClientToServerMessage::COMMIT);
- CommitMessage* commit_message = message.mutable_commit();
+ CommitMessage* commit_message = commit_message_->mutable_commit();
commit_message->set_cache_guid(
session->write_transaction()->directory()->cache_guid());
AddExtensionsActivityToMessage(session, commit_message);
SyncerProtoUtil::AddRequestBirthday(
- session->write_transaction()->directory(), &message);
+ session->write_transaction()->directory(), commit_message_);
// Cache previously computed position values. Because |commit_ids|
// is already in sibling order, we should always hit this map after
@@ -105,9 +118,8 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
// whose ID is the map's key.
std::map<Id, std::pair<int64, int64> > position_map;
- const vector<Id>& commit_ids = session->status_controller().commit_ids();
- for (size_t i = 0; i < commit_ids.size(); i++) {
- Id id = commit_ids[i];
+ for (size_t i = 0; i < batch_commit_set_.Size(); i++) {
+ Id id = batch_commit_set_.GetCommitIdAt(i);
SyncEntity* sync_entry =
static_cast<SyncEntity*>(commit_message->add_entries());
sync_entry->set_id(id);
@@ -208,8 +220,6 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
SetEntrySpecifics(&meta_entry, sync_entry);
}
}
- session->mutable_status_controller()->
- mutable_commit_message()->CopyFrom(message);
return SYNCER_OK;
}
diff --git a/sync/engine/build_commit_command.h b/sync/engine/build_commit_command.h
index d18c94b..91eb3203 100644
--- a/sync/engine/build_commit_command.h
+++ b/sync/engine/build_commit_command.h
@@ -14,9 +14,26 @@
namespace browser_sync {
+namespace sessions {
+class OrderedCommitSet;
+}
+
+// A class that contains the code used to serialize a set of sync items into a
+// protobuf commit message. This conversion process references the
+// syncable::Directory, which is why it must be called within the same
+// transaction as the GetCommitIdsCommand that fetched the set of items to be
+// committed.
+//
+// See SyncerCommand documentation for more info.
class BuildCommitCommand : public SyncerCommand {
public:
- BuildCommitCommand();
+ // The batch_commit_set parameter contains a set of references to the items
+ // that should be committed.
+ //
+ // The commit_message parameter is an output parameter which will contain the
+ // fully initialized commit message once ExecuteImpl() has been called.
+ BuildCommitCommand(const sessions::OrderedCommitSet& batch_commit_set,
+ ClientToServerMessage* commit_message);
virtual ~BuildCommitCommand();
// SyncerCommand implementation.
@@ -44,6 +61,12 @@ class BuildCommitCommand : public SyncerCommand {
int64 InterpolatePosition(int64 lo, int64 hi);
DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
+
+ // Input parameter; see constructor comment.
+ const sessions::OrderedCommitSet& batch_commit_set_;
+
+ // Output parameter; see constructor comment.
+ ClientToServerMessage* commit_message_;
};
} // namespace browser_sync
diff --git a/sync/engine/build_commit_command_unittest.cc b/sync/engine/build_commit_command_unittest.cc
index f0a5bea..6263615 100644
--- a/sync/engine/build_commit_command_unittest.cc
+++ b/sync/engine/build_commit_command_unittest.cc
@@ -10,7 +10,16 @@ namespace browser_sync {
// A test fixture for tests exercising ClearDataCommandTest.
class BuildCommitCommandTest : public SyncerCommandTest {
protected:
- BuildCommitCommandTest() {}
+ BuildCommitCommandTest()
+ : batch_commit_set_(ModelSafeRoutingInfo()),
+ command_(batch_commit_set_, &commit_message_) {
+ }
+
+ private:
+ sessions::OrderedCommitSet batch_commit_set_;
+ ClientToServerMessage commit_message_;
+
+ protected:
BuildCommitCommand command_;
private:
diff --git a/sync/engine/commit.cc b/sync/engine/commit.cc
new file mode 100644
index 0000000..ae6b8ae
--- /dev/null
+++ b/sync/engine/commit.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/commit.h"
+
+#include "base/debug/trace_event.h"
+#include "sync/engine/build_commit_command.h"
+#include "sync/engine/get_commit_ids_command.h"
+#include "sync/engine/process_commit_response_command.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/sessions/sync_session.h"
+
+using syncable::SYNCER;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+using sessions::StatusController;
+
+// Helper function that finds sync items that are ready to be committed to the
+// server and serializes them into a commit message protobuf. It will return
+// false iff there are no entries ready to be committed at this time.
+//
+// The OrderedCommitSet parameter is an output parameter which will contain
+// the set of all items which are to be committed. The number of items in
+// the set shall not exceed the maximum batch size. (The default batch size
+// is currently 25, though it can be overwritten by the server.)
+//
+// The ClientToServerMessage parameter is an output parameter which will contain
+// the commit message which should be sent to the server. It is valid iff the
+// return value of this function is true.
+bool PrepareCommitMessage(sessions::SyncSession* session,
+ sessions::OrderedCommitSet* commit_set,
+ ClientToServerMessage* commit_message) {
+ TRACE_EVENT0("sync", "PrepareCommitMessage");
+
+ commit_set->Clear();
+ commit_message->Clear();
+
+ WriteTransaction trans(FROM_HERE, SYNCER, session->context()->directory());
+ sessions::ScopedSetSessionWriteTransaction set_trans(session, &trans);
+
+ // Fetch the items to commit.
+ const size_t batch_size = session->context()->max_commit_batch_size();
+ GetCommitIdsCommand get_commit_ids_command(batch_size, commit_set);
+ get_commit_ids_command.Execute(session);
+
+ DVLOG(1) << "Commit message will contain " << commit_set->Size() << " items.";
+ if (commit_set->Empty())
+ return false;
+
+ // Serialize the message.
+ BuildCommitCommand build_commit_command(*commit_set, commit_message);
+ build_commit_command.Execute(session);
+
+ return true;
+}
+
+SyncerError BuildAndPostCommits(Syncer* syncer,
+ sessions::SyncSession* session) {
+ StatusController* status_controller = session->mutable_status_controller();
+
+ sessions::OrderedCommitSet commit_set(session->routing_info());
+ ClientToServerMessage commit_message;
+ while (PrepareCommitMessage(session, &commit_set, &commit_message)
+ && !syncer->ExitRequested()) {
+ ClientToServerResponse commit_response;
+
+ DVLOG(1) << "Sending commit message.";
+ TRACE_EVENT_BEGIN0("sync", "PostCommit");
+ status_controller->set_last_post_commit_result(
+ SyncerProtoUtil::PostClientToServerMessage(commit_message,
+ &commit_response,
+ session));
+ TRACE_EVENT_END0("sync", "PostCommit");
+
+ // ProcessCommitResponse includes some code that cleans up after a failure
+ // to post a commit message, so we must run it regardless of whether or not
+ // the commit succeeds.
+
+ TRACE_EVENT_BEGIN0("sync", "ProcessCommitResponse");
+ ProcessCommitResponseCommand process_response_command(
+ commit_set, commit_message, commit_response);
+ status_controller->set_last_process_commit_response_result(
+ process_response_command.Execute(session));
+ TRACE_EVENT_END0("sync", "ProcessCommitResponse");
+
+ // Exit early if either the commit or the response processing failed.
+ if (status_controller->last_post_commit_result() != SYNCER_OK)
+ return status_controller->last_post_commit_result();
+ if (status_controller->last_process_commit_response_result() != SYNCER_OK)
+ return status_controller->last_process_commit_response_result();
+ }
+
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/commit.h b/sync/engine/commit.h
new file mode 100644
index 0000000..f5ccd79
--- /dev/null
+++ b/sync/engine/commit.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_COMMIT_H_
+#define SYNC_ENGINE_COMMIT_H_
+#pragma once
+
+#include "sync/util/syncer_error.h"
+
+namespace browser_sync {
+
+namespace sessions {
+class SyncSession;
+}
+
+class Syncer;
+
+// This function will commit batches of unsynced items to the server until the
+// number of unsynced and ready to commit items reaches zero or an error is
+// encountered. A request to exit early will be treated as an error and will
+// abort any blocking operations.
+//
+// The Syncer parameter is provided only for access to its ExitRequested()
+// method. This is technically unnecessary since an early exit request should
+// be detected as we attempt to contact the sync server.
+//
+// The SyncSession parameter contains pointers to various bits of state,
+// including the syncable::Directory that contains all sync items and the
+// ServerConnectionManager used to contact the server.
+SyncerError BuildAndPostCommits(
+ Syncer* syncer,
+ sessions::SyncSession* session);
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_COMMIT_H_
diff --git a/sync/engine/get_commit_ids_command.cc b/sync/engine/get_commit_ids_command.cc
index ecc5cf9..a09d5d7 100644
--- a/sync/engine/get_commit_ids_command.cc
+++ b/sync/engine/get_commit_ids_command.cc
@@ -22,8 +22,12 @@ using sessions::OrderedCommitSet;
using sessions::SyncSession;
using sessions::StatusController;
-GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size)
- : requested_commit_batch_size_(commit_batch_size) {}
+GetCommitIdsCommand::GetCommitIdsCommand(
+ const size_t commit_batch_size,
+ sessions::OrderedCommitSet* commit_set)
+ : requested_commit_batch_size_(commit_batch_size),
+ commit_set_(commit_set) {
+}
GetCommitIdsCommand::~GetCommitIdsCommand() {}
@@ -61,17 +65,12 @@ SyncerError GetCommitIdsCommand::ExecuteImpl(SyncSession* session) {
session->routing_info(),
ready_unsynced_set);
- StatusController* status = session->mutable_status_controller();
- syncable::Directory::UnsyncedMetaHandles ready_unsynced_vector(
- ready_unsynced_set.begin(), ready_unsynced_set.end());
- status->set_unsynced_handles(ready_unsynced_vector);
const vector<syncable::Id>& verified_commit_ids =
- ordered_commit_set_->GetAllCommitIds();
+ commit_set_->GetAllCommitIds();
for (size_t i = 0; i < verified_commit_ids.size(); i++)
DVLOG(1) << "Debug commit batch result:" << verified_commit_ids[i];
- status->set_commit_set(*ordered_commit_set_.get());
return SYNCER_OK;
}
@@ -180,7 +179,7 @@ bool GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
CHECK(parent.good()) << "Bad user-only parent in item path.";
int64 handle = parent.Get(syncable::META_HANDLE);
- if (ordered_commit_set_->HaveCommitItem(handle)) {
+ if (commit_set_->HaveCommitItem(handle)) {
// We've already added this parent (and therefore all of its parents).
// We can return early.
break;
@@ -188,7 +187,7 @@ bool GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
if (!AddItemThenPredecessors(trans, ready_unsynced_set, parent,
&item_dependencies)) {
// There was a parent/predecessor in conflict. We return without adding
- // anything to |ordered_commit_set_|.
+ // anything to |commit_set|.
DVLOG(1) << "Parent or parent's predecessor was in conflict, omitting "
<< item;
return false;
@@ -226,7 +225,7 @@ bool GetCommitIdsCommand::AddItemThenPredecessors(
const syncable::Entry& item,
OrderedCommitSet* result) const {
int64 item_handle = item.Get(syncable::META_HANDLE);
- if (ordered_commit_set_->HaveCommitItem(item_handle)) {
+ if (commit_set_->HaveCommitItem(item_handle)) {
// We've already added this item to the commit set, and so must have
// already added the predecessors as well.
return true;
@@ -243,7 +242,7 @@ bool GetCommitIdsCommand::AddItemThenPredecessors(
if (!prev.Get(syncable::IS_UNSYNCED))
break;
int64 handle = prev.Get(syncable::META_HANDLE);
- if (ordered_commit_set_->HaveCommitItem(handle)) {
+ if (commit_set_->HaveCommitItem(handle)) {
// We've already added this item to the commit set, and so must have
// already added the predecessors as well.
return true;
@@ -276,7 +275,7 @@ bool GetCommitIdsCommand::AddPredecessorsThenItem(
}
bool GetCommitIdsCommand::IsCommitBatchFull() const {
- return ordered_commit_set_->Size() >= requested_commit_batch_size_;
+ return commit_set_->Size() >= requested_commit_batch_size_;
}
void GetCommitIdsCommand::AddCreatesAndMoves(
@@ -287,7 +286,7 @@ void GetCommitIdsCommand::AddCreatesAndMoves(
for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
!IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
int64 metahandle = *iter;
- if (ordered_commit_set_->HaveCommitItem(metahandle))
+ if (commit_set_->HaveCommitItem(metahandle))
continue;
syncable::Entry entry(write_transaction,
@@ -308,14 +307,14 @@ void GetCommitIdsCommand::AddCreatesAndMoves(
ready_unsynced_set,
entry,
&item_dependencies)) {
- ordered_commit_set_->Append(item_dependencies);
+ commit_set_->Append(item_dependencies);
}
}
}
// It's possible that we overcommitted while trying to expand dependent
// items. If so, truncate the set down to the allowed size.
- ordered_commit_set_->Truncate(requested_commit_batch_size_);
+ commit_set_->Truncate(requested_commit_batch_size_);
}
void GetCommitIdsCommand::AddDeletes(
@@ -326,7 +325,7 @@ void GetCommitIdsCommand::AddDeletes(
for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
!IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
int64 metahandle = *iter;
- if (ordered_commit_set_->HaveCommitItem(metahandle))
+ if (commit_set_->HaveCommitItem(metahandle))
continue;
syncable::Entry entry(write_transaction, syncable::GET_BY_HANDLE,
@@ -357,7 +356,7 @@ void GetCommitIdsCommand::AddDeletes(
DVLOG(1) << "Inserting moved and deleted entry, will be missed by "
<< "delete roll." << entry.Get(syncable::ID);
- ordered_commit_set_->AddCommitItem(metahandle,
+ commit_set_->AddCommitItem(metahandle,
entry.Get(syncable::ID),
entry.GetModelType());
}
@@ -388,14 +387,14 @@ void GetCommitIdsCommand::AddDeletes(
for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
!IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
int64 metahandle = *iter;
- if (ordered_commit_set_->HaveCommitItem(metahandle))
+ if (commit_set_->HaveCommitItem(metahandle))
continue;
syncable::MutableEntry entry(write_transaction, syncable::GET_BY_HANDLE,
metahandle);
if (entry.Get(syncable::IS_DEL)) {
syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
if (legal_delete_parents.count(parent_id)) {
- ordered_commit_set_->AddCommitItem(metahandle, entry.Get(syncable::ID),
+ commit_set_->AddCommitItem(metahandle, entry.Get(syncable::ID),
entry.GetModelType());
}
}
@@ -406,7 +405,6 @@ void GetCommitIdsCommand::BuildCommitIds(
syncable::WriteTransaction* write_transaction,
const ModelSafeRoutingInfo& routes,
const std::set<int64>& ready_unsynced_set) {
- ordered_commit_set_.reset(new OrderedCommitSet(routes));
// Commits follow these rules:
// 1. Moves or creates are preceded by needed folder creates, from
// root to leaf. For folders whose contents are ordered, moves
diff --git a/sync/engine/get_commit_ids_command.h b/sync/engine/get_commit_ids_command.h
index 334fb6b..9aca7cd 100644
--- a/sync/engine/get_commit_ids_command.h
+++ b/sync/engine/get_commit_ids_command.h
@@ -20,11 +20,23 @@ using std::vector;
namespace browser_sync {
+// A class that contains the code used to search the syncable::Directory for
+// locally modified items that are ready to be committed to the server.
+//
+// See SyncerCommand documentation for more info.
class GetCommitIdsCommand : public SyncerCommand {
friend class SyncerTest;
public:
- explicit GetCommitIdsCommand(int commit_batch_size);
+ // The batch_size parameter is the maximum number of entries we are allowed
+ // to commit in a single batch. This value can be modified by the server.
+ //
+ // The ordered_commit_set parameter is an output parameter that will contain a
+ // set of items that are ready to commit. Its size shall not exceed the
+ // provided batch_size. This contents of this "set" will be ordered; see the
+ // comments in this class' implementation for details.
+ GetCommitIdsCommand(const size_t commit_batch_size,
+ sessions::OrderedCommitSet* ordered_commit_set);
virtual ~GetCommitIdsCommand();
// SyncerCommand implementation.
@@ -93,8 +105,7 @@ class GetCommitIdsCommand : public SyncerCommand {
sessions::OrderedCommitSet* result) const;
// Appends all commit ready predecessors of |item|, followed by |item| itself,
- // to |ordered_commit_set_|, iff item and all its predecessors not in
- // conflict.
+ // to |commit_set|, iff item and all its predecessors not in conflict.
// Return values:
// False: if there was an entry in conflict.
// True: if all entries were checked for commit readiness and added to
@@ -103,7 +114,7 @@ class GetCommitIdsCommand : public SyncerCommand {
const ModelSafeRoutingInfo& routes,
const std::set<int64>& ready_unsynced_set,
const syncable::Entry& item,
- sessions::OrderedCommitSet* result) const;
+ sessions::OrderedCommitSet* commit_set) const;
bool IsCommitBatchFull() const;
@@ -114,9 +125,11 @@ class GetCommitIdsCommand : public SyncerCommand {
void AddDeletes(syncable::WriteTransaction* write_transaction,
const std::set<int64>& ready_unsynced_set);
- scoped_ptr<sessions::OrderedCommitSet> ordered_commit_set_;
+ // Input parameter; see constructor comment.
+ const size_t requested_commit_batch_size_;
- int requested_commit_batch_size_;
+ // Output parameter; see constructor comment.
+ sessions::OrderedCommitSet* commit_set_;
DISALLOW_COPY_AND_ASSIGN(GetCommitIdsCommand);
};
diff --git a/sync/engine/net/server_connection_manager.h b/sync/engine/net/server_connection_manager.h
index 2121d8e..e6e18da 100644
--- a/sync/engine/net/server_connection_manager.h
+++ b/sync/engine/net/server_connection_manager.h
@@ -28,8 +28,6 @@ class ClientToServerMessage;
namespace browser_sync {
-class ClientToServerMessage;
-
static const int32 kUnsetResponseCode = -1;
static const int32 kUnsetContentLength = -1;
static const int32 kUnsetPayloadLength = -1;
diff --git a/sync/engine/post_commit_message_command.cc b/sync/engine/post_commit_message_command.cc
deleted file mode 100644
index 8f51f20..0000000
--- a/sync/engine/post_commit_message_command.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/post_commit_message_command.h"
-
-#include <vector>
-
-#include "base/location.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/engine/syncproto.h"
-#include "sync/sessions/sync_session.h"
-
-using std::vector;
-
-namespace browser_sync {
-
-PostCommitMessageCommand::PostCommitMessageCommand() {}
-PostCommitMessageCommand::~PostCommitMessageCommand() {}
-
-SyncerError PostCommitMessageCommand::ExecuteImpl(
- sessions::SyncSession* session) {
- if (session->status_controller().commit_ids().empty())
- return SYNCER_OK; // Nothing to commit.
- ClientToServerResponse response;
- syncable::Directory* dir = session->context()->directory();
- sessions::StatusController* status = session->mutable_status_controller();
- SyncerError result = SyncerProtoUtil::PostClientToServerMessage(
- status->commit_message(), &response, session);
- if (result != SYNCER_OK) {
- // None of our changes got through. Clear the SYNCING bit which was
- // set to true during BuildCommitCommand, and which may still be true.
- // Not to be confused with IS_UNSYNCED, this bit is used to detect local
- // changes to items that happen during the server Commit operation.
- syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
- const vector<syncable::Id>& commit_ids = status->commit_ids();
- for (size_t i = 0; i < commit_ids.size(); i++) {
- syncable::MutableEntry entry(&trans, syncable::GET_BY_ID, commit_ids[i]);
- entry.Put(syncable::SYNCING, false);
- }
- return result;
- }
-
- status->set_items_committed();
- status->mutable_commit_response()->CopyFrom(response);
- return SYNCER_OK;
-}
-
-} // namespace browser_sync
diff --git a/sync/engine/post_commit_message_command.h b/sync/engine/post_commit_message_command.h
deleted file mode 100644
index 50fae39..0000000
--- a/sync/engine/post_commit_message_command.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
-#define SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
-#pragma once
-
-#include "base/compiler_specific.h"
-#include "sync/engine/syncer_command.h"
-
-namespace browser_sync {
-
-class PostCommitMessageCommand : public SyncerCommand {
- public:
- PostCommitMessageCommand();
- virtual ~PostCommitMessageCommand();
-
- // SyncerCommand implementation.
- virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(PostCommitMessageCommand);
-};
-
-} // namespace browser_sync
-
-#endif // SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
diff --git a/sync/engine/process_commit_response_command.cc b/sync/engine/process_commit_response_command.cc
index 6b2f633..6514bc1 100644
--- a/sync/engine/process_commit_response_command.cc
+++ b/sync/engine/process_commit_response_command.cc
@@ -48,7 +48,15 @@ using sessions::StatusController;
using sessions::SyncSession;
using sessions::ConflictProgress;
-ProcessCommitResponseCommand::ProcessCommitResponseCommand() {}
+ProcessCommitResponseCommand::ProcessCommitResponseCommand(
+ const sessions::OrderedCommitSet& commit_set,
+ const ClientToServerMessage& commit_message,
+ const ClientToServerResponse& commit_response)
+ : commit_set_(commit_set),
+ commit_message_(commit_message),
+ commit_response_(commit_response) {
+}
+
ProcessCommitResponseCommand::~ProcessCommitResponseCommand() {}
std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
@@ -57,10 +65,9 @@ std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
syncable::Directory* dir = session.context()->directory();
syncable::ReadTransaction trans(FROM_HERE, dir);
- const StatusController& status = session.status_controller();
- for (size_t i = 0; i < status.commit_ids().size(); ++i) {
+ for (size_t i = 0; i < commit_set_.Size(); ++i) {
groups_with_commits.insert(
- GetGroupForModelType(status.GetUnrestrictedCommitModelTypeAt(i),
+ GetGroupForModelType(commit_set_.GetModelTypeAt(i),
session.routing_info()));
}
@@ -68,19 +75,18 @@ std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
}
SyncerError ProcessCommitResponseCommand::ModelNeutralExecuteImpl(
- sessions::SyncSession* session) {
- const StatusController& status = session->status_controller();
- const ClientToServerResponse& response(status.commit_response());
- const vector<syncable::Id>& commit_ids(status.commit_ids());
+ SyncSession* session) {
+ syncable::Directory* dir = session->context()->directory();
+ const vector<syncable::Id>& commit_ids = commit_set_.GetAllCommitIds();
- if (!response.has_commit()) {
- // TODO(sync): What if we didn't try to commit anything?
+ if (!commit_response_.has_commit()) {
LOG(WARNING) << "Commit response has no commit body!";
+ ClearSyncingBits(dir, commit_ids);
return SERVER_RESPONSE_VALIDATION_FAILED;
}
- const CommitResponse& cr = response.commit();
- int commit_count = commit_ids.size();
+ const CommitResponse& cr = commit_response_.commit();
+ int commit_count = commit_set_.Size();
if (cr.entryresponse_size() != commit_count) {
LOG(ERROR) << "Commit response has wrong number of entries! Expected:" <<
commit_count << " Got:" << cr.entryresponse_size();
@@ -90,6 +96,7 @@ SyncerError ProcessCommitResponseCommand::ModelNeutralExecuteImpl(
if (cr.entryresponse(i).has_error_message())
LOG(ERROR) << " " << cr.entryresponse(i).error_message();
}
+ ClearSyncingBits(dir, commit_ids);
return SERVER_RESPONSE_VALIDATION_FAILED;
}
return SYNCER_OK;
@@ -99,60 +106,60 @@ SyncerError ProcessCommitResponseCommand::ModelChangingExecuteImpl(
SyncSession* session) {
SyncerError result = ProcessCommitResponse(session);
ExtensionsActivityMonitor* monitor = session->context()->extensions_monitor();
- if (session->status_controller().HasBookmarkCommitActivity() &&
- session->status_controller().syncer_status()
+
+ // This is to be run on one model only: the bookmark model.
+ if (session->status_controller().HasBookmarkCommitActivity()) {
+ // If the commit failed, return the data to the ExtensionsActivityMonitor.
+ if (session->status_controller().syncer_status()
.num_successful_bookmark_commits == 0) {
- monitor->PutRecords(session->extensions_activity());
+ monitor->PutRecords(session->extensions_activity());
+ }
+ // Clear our cached data in either case.
session->mutable_extensions_activity()->clear();
}
+
return result;
}
SyncerError ProcessCommitResponseCommand::ProcessCommitResponse(
SyncSession* session) {
syncable::Directory* dir = session->context()->directory();
-
StatusController* status = session->mutable_status_controller();
- const ClientToServerResponse& response(status->commit_response());
- const CommitResponse& cr = response.commit();
- const sync_pb::CommitMessage& commit_message =
- status->commit_message().commit();
-
- // If we try to commit a parent and child together and the parent conflicts
- // the child will have a bad parent causing an error. As this is not a
- // critical error, we trap it and don't LOG(ERROR). To enable this we keep
- // a map of conflicting new folders.
+ const CommitResponse& cr = commit_response_.commit();
+ const sync_pb::CommitMessage& commit_message = commit_message_.commit();
+
int transient_error_commits = 0;
int conflicting_commits = 0;
int error_commits = 0;
int successes = 0;
- set<syncable::Id> conflicting_new_folder_ids;
+
set<syncable::Id> deleted_folders;
ConflictProgress* conflict_progress = status->mutable_conflict_progress();
- OrderedCommitSet::Projection proj = status->commit_id_projection();
+ OrderedCommitSet::Projection proj = status->commit_id_projection(
+ commit_set_);
+
if (!proj.empty()) { // Scope for WriteTransaction.
WriteTransaction trans(FROM_HERE, SYNCER, dir);
for (size_t i = 0; i < proj.size(); i++) {
- CommitResponse::ResponseType response_type =
- ProcessSingleCommitResponse(&trans, cr.entryresponse(proj[i]),
- commit_message.entries(proj[i]),
- status->GetCommitIdAt(proj[i]),
- &conflicting_new_folder_ids,
- &deleted_folders);
+ CommitResponse::ResponseType response_type = ProcessSingleCommitResponse(
+ &trans,
+ cr.entryresponse(proj[i]),
+ commit_message.entries(proj[i]),
+ commit_set_.GetCommitIdAt(proj[i]),
+ &deleted_folders);
switch (response_type) {
case CommitResponse::INVALID_MESSAGE:
++error_commits;
break;
case CommitResponse::CONFLICT:
++conflicting_commits;
- // Only server CONFLICT responses will activate conflict resolution.
conflict_progress->AddServerConflictingItemById(
- status->GetCommitIdAt(proj[i]));
+ commit_set_.GetCommitIdAt(proj[i]));
break;
case CommitResponse::SUCCESS:
// TODO(sync): worry about sync_rate_ rate calc?
++successes;
- if (status->GetCommitModelTypeAt(proj[i]) == syncable::BOOKMARKS)
+ if (commit_set_.GetModelTypeAt(proj[i]) == syncable::BOOKMARKS)
status->increment_num_successful_bookmark_commits();
status->increment_num_successful_commits();
break;
@@ -171,16 +178,36 @@ SyncerError ProcessCommitResponseCommand::ProcessCommitResponse(
SyncerUtil::MarkDeletedChildrenSynced(dir, &deleted_folders);
int commit_count = static_cast<int>(proj.size());
- if (commit_count == (successes + conflicting_commits)) {
- // We consider conflicting commits as a success because things will work out
- // on their own when we receive them. Flags will be set so that
- // HasMoreToSync() will cause SyncScheduler to enter another sync cycle to
- // handle this condition.
+ if (commit_count == successes) {
return SYNCER_OK;
} else if (error_commits > 0) {
return SERVER_RETURN_UNKNOWN_ERROR;
} else if (transient_error_commits > 0) {
return SERVER_RETURN_TRANSIENT_ERROR;
+ } else if (conflicting_commits > 0) {
+ // This means that the server already has an item with this version, but
+ // we haven't seen that update yet.
+ //
+ // A well-behaved client should respond to this by proceeding to the
+ // download updates phase, fetching the conflicting items, then attempting
+ // to resolve the conflict. That's not what this client does.
+ //
+ // We don't currently have any code to support that exceptional control
+ // flow. We don't intend to add any because this response code will be
+ // deprecated soon. Instead, we handle this in the same way that we handle
+ // transient errors. We abort the current sync cycle, wait a little while,
+ // then try again. The retry sync cycle will attempt to download updates
+ // which should be sufficient to trigger client-side conflict resolution.
+ //
+ // Not treating this as an error would be dangerous. There's a risk that
+ // the commit loop would loop indefinitely. The loop won't exit until the
+ // number of unsynced items hits zero or an error is detected. If we're
+ // constantly receiving conflict responses and we don't treat them as
+ // errors, there would be no reason to leave that loop.
+ //
+ // TODO: Remove this option when the CONFLICT return value is fully
+ // deprecated.
+ return SERVER_RETURN_TRANSIENT_ERROR;
} else {
LOG(FATAL) << "Inconsistent counts when processing commit response";
return SYNCER_OK;
@@ -200,7 +227,6 @@ ProcessCommitResponseCommand::ProcessSingleCommitResponse(
const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
const sync_pb::SyncEntity& commit_request_entry,
const syncable::Id& pre_commit_id,
- std::set<syncable::Id>* conflicting_new_folder_ids,
set<syncable::Id>* deleted_folders) {
const CommitResponse_EntryResponse& server_entry =
@@ -229,10 +255,6 @@ ProcessCommitResponseCommand::ProcessSingleCommitResponse(
}
if (CommitResponse::CONFLICT == response) {
DVLOG(1) << "Conflict Committing: " << local_entry;
- // TODO(nick): conflicting_new_folder_ids is a purposeless anachronism.
- if (!pre_commit_id.ServerKnows() && local_entry.Get(IS_DIR)) {
- conflicting_new_folder_ids->insert(pre_commit_id);
- }
return response;
}
if (CommitResponse::RETRY == response) {
@@ -478,4 +500,22 @@ void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
}
}
+void ProcessCommitResponseCommand::ClearSyncingBits(
+ syncable::Directory *dir,
+ const vector<syncable::Id>& commit_ids) {
+ // This is part of the cleanup in the case of a failed commit. Normally we
+ // would unset the SYNCING bit when processing the commit response. In the
+ // failure case we don't process the response, so we need to clear those bits
+ // here.
+ syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
+ for (size_t i = 0; i < commit_ids.size(); i++) {
+ syncable::MutableEntry entry(&trans, syncable::GET_BY_ID, commit_ids[i]);
+ if (entry.good()) {
+ entry.Put(syncable::SYNCING, false);
+ } else {
+ LOG(WARNING) << "Id: " << commit_ids[i] << " disappeared";
+ }
+ }
+}
+
} // namespace browser_sync
diff --git a/sync/engine/process_commit_response_command.h b/sync/engine/process_commit_response_command.h
index 8e288de..0680cc4 100644
--- a/sync/engine/process_commit_response_command.h
+++ b/sync/engine/process_commit_response_command.h
@@ -18,14 +18,43 @@ namespace syncable {
class Id;
class WriteTransaction;
class MutableEntry;
+class Directory;
}
namespace browser_sync {
+namespace sessions {
+class OrderedCommitSet;
+}
+
+// A class that processes the server's response to our commmit attempt. Note
+// that some of the preliminary processing is performed in
+// PostClientToServerMessage command.
+//
+// As part of processing the commit response, this command will modify sync
+// entries. It can rename items, update their versions, etc.
+//
+// This command will return a non-SYNCER_OK value if an error occurred while
+// processing the response, or if the server's response indicates that it had
+// trouble processing the request.
+//
+// See SyncerCommand documentation for more info.
class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
public:
- ProcessCommitResponseCommand();
+ // The commit_set parameter contains references to all the items which were
+ // to be committed in this batch.
+ //
+ // The commmit_message parameter contains the message that was sent to the
+ // server.
+ //
+ // The commit_response parameter contains the response received from the
+ // server. This may be uninitialized if we were unable to contact the server
+ // or a serious error was encountered.
+ ProcessCommitResponseCommand(
+ const sessions::OrderedCommitSet& commit_set,
+ const ClientToServerMessage& commit_message,
+ const ClientToServerResponse& commit_response);
virtual ~ProcessCommitResponseCommand();
protected:
@@ -43,7 +72,6 @@ class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
const sync_pb::CommitResponse_EntryResponse& pb_commit_response,
const sync_pb::SyncEntity& pb_committed_entry,
const syncable::Id& pre_commit_id,
- std::set<syncable::Id>* conflicting_new_directory_ids,
std::set<syncable::Id>* deleted_folders);
// Actually does the work of execute.
@@ -93,6 +121,15 @@ class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
const sync_pb::SyncEntity& committed_entry,
const CommitResponse_EntryResponse& entry_response);
+ // Helper to clean up in case of failure.
+ void ClearSyncingBits(
+ syncable::Directory *dir,
+ const std::vector<syncable::Id>& commit_ids);
+
+ const sessions::OrderedCommitSet& commit_set_;
+ const ClientToServerMessage& commit_message_;
+ const ClientToServerResponse& commit_response_;
+
DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
};
diff --git a/sync/engine/process_commit_response_command_unittest.cc b/sync/engine/process_commit_response_command_unittest.cc
index 517bc51..195d58a 100644
--- a/sync/engine/process_commit_response_command_unittest.cc
+++ b/sync/engine/process_commit_response_command_unittest.cc
@@ -47,11 +47,7 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
(*mutable_routing_info())[syncable::PREFERENCES] = GROUP_UI;
(*mutable_routing_info())[syncable::AUTOFILL] = GROUP_DB;
- commit_set_.reset(new sessions::OrderedCommitSet(routing_info()));
SyncerCommandTest::SetUp();
- // Need to explicitly use this-> to avoid obscure template
- // warning.
- this->ExpectNoGroupsToChange(command_);
}
protected:
@@ -113,12 +109,14 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
// record and a commit response for it in the syncer session. If item_id
// is a local ID, the item will be a create operation. Otherwise, it
// will be an edit.
- void CreateUnprocessedCommitResult(const Id& item_id,
- const Id& parent_id,
- const string& name,
- syncable::ModelType model_type) {
- sessions::StatusController* sync_state =
- session()->mutable_status_controller();
+ void CreateUnprocessedCommitResult(
+ const Id& item_id,
+ const Id& parent_id,
+ const string& name,
+ syncable::ModelType model_type,
+ sessions::OrderedCommitSet *commit_set,
+ browser_sync::ClientToServerMessage *commit,
+ browser_sync::ClientToServerResponse *response) {
bool is_folder = true;
int64 metahandle = 0;
CreateUnsyncedItem(item_id, parent_id, name, is_folder, model_type,
@@ -126,18 +124,14 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
// ProcessCommitResponseCommand consumes commit_ids from the session
// state, so we need to update that. O(n^2) because it's a test.
- commit_set_->AddCommitItem(metahandle, item_id, model_type);
- sync_state->set_commit_set(*commit_set_.get());
+ commit_set->AddCommitItem(metahandle, item_id, model_type);
WriteTransaction trans(FROM_HERE, UNITTEST, directory());
MutableEntry entry(&trans, syncable::GET_BY_ID, item_id);
ASSERT_TRUE(entry.good());
entry.Put(syncable::SYNCING, true);
- // ProcessCommitResponseCommand looks at both the commit message as well
- // as the commit response, so we need to synthesize both here.
- sync_pb::ClientToServerMessage* commit =
- sync_state->mutable_commit_message();
+ // Add to the commit message.
commit->set_message_contents(ClientToServerMessage::COMMIT);
SyncEntity* entity = static_cast<SyncEntity*>(
commit->mutable_commit()->add_entries());
@@ -148,8 +142,7 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
entity->mutable_specifics()->CopyFrom(entry.Get(syncable::SPECIFICS));
entity->set_id(item_id);
- sync_pb::ClientToServerResponse* response =
- sync_state->mutable_commit_response();
+ // Add to the response message.
response->set_error_code(sync_pb::SyncEnums::SUCCESS);
sync_pb::CommitResponse_EntryResponse* entry_response =
response->mutable_commit()->add_entryresponse();
@@ -176,20 +169,15 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
}
}
- void SetLastErrorCode(CommitResponse::ResponseType error_code) {
- sessions::StatusController* sync_state =
- session()->mutable_status_controller();
- sync_pb::ClientToServerResponse* response =
- sync_state->mutable_commit_response();
+ void SetLastErrorCode(CommitResponse::ResponseType error_code,
+ sync_pb::ClientToServerResponse* response) {
sync_pb::CommitResponse_EntryResponse* entry_response =
response->mutable_commit()->mutable_entryresponse(
response->mutable_commit()->entryresponse_size() - 1);
entry_response->set_response_type(error_code);
}
- ProcessCommitResponseCommand command_;
TestIdFactory id_factory_;
- scoped_ptr<sessions::OrderedCommitSet> commit_set_;
private:
int64 next_old_revision_;
int64 next_new_revision_;
@@ -198,6 +186,10 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
};
TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
+ sessions::OrderedCommitSet commit_set(session()->routing_info());
+ browser_sync::ClientToServerMessage request;
+ browser_sync::ClientToServerResponse response;
+
Id bookmark_folder_id = id_factory_.NewLocalId();
Id bookmark_id1 = id_factory_.NewLocalId();
Id bookmark_id2 = id_factory_.NewLocalId();
@@ -205,22 +197,30 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
Id autofill_id1 = id_factory_.NewLocalId();
Id autofill_id2 = id_factory_.NewLocalId();
CreateUnprocessedCommitResult(bookmark_folder_id, id_factory_.root(),
- "A bookmark folder", syncable::BOOKMARKS);
+ "A bookmark folder", syncable::BOOKMARKS,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(bookmark_id1, bookmark_folder_id,
- "bookmark 1", syncable::BOOKMARKS);
+ "bookmark 1", syncable::BOOKMARKS,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(bookmark_id2, bookmark_folder_id,
- "bookmark 2", syncable::BOOKMARKS);
+ "bookmark 2", syncable::BOOKMARKS,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(pref_id1, id_factory_.root(),
- "Pref 1", syncable::PREFERENCES);
+ "Pref 1", syncable::PREFERENCES,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(pref_id2, id_factory_.root(),
- "Pref 2", syncable::PREFERENCES);
+ "Pref 2", syncable::PREFERENCES,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(autofill_id1, id_factory_.root(),
- "Autofill 1", syncable::AUTOFILL);
+ "Autofill 1", syncable::AUTOFILL,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(autofill_id2, id_factory_.root(),
- "Autofill 2", syncable::AUTOFILL);
+ "Autofill 2", syncable::AUTOFILL,
+ &commit_set, &request, &response);
- ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
- command_.ExecuteImpl(session());
+ ProcessCommitResponseCommand command(commit_set, request, response);
+ ExpectGroupsToChange(command, GROUP_UI, GROUP_DB);
+ command.ExecuteImpl(session());
ReadTransaction trans(FROM_HERE, directory());
Id new_fid;
@@ -270,10 +270,15 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
// how this scenario used to fail, reversing the order for the second half
// of the children.
TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
+ sessions::OrderedCommitSet commit_set(session()->routing_info());
+ browser_sync::ClientToServerMessage request;
+ browser_sync::ClientToServerResponse response;
+
// Create the parent folder, a new item whose ID will change on commit.
Id folder_id = id_factory_.NewLocalId();
CreateUnprocessedCommitResult(folder_id, id_factory_.root(), "A",
- syncable::BOOKMARKS);
+ syncable::BOOKMARKS,
+ &commit_set, &request, &response);
// Verify that the item is reachable.
{
@@ -292,7 +297,8 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
// Alternate between new and old child items, just for kicks.
Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
CreateUnprocessedCommitResult(
- id, folder_id, base::StringPrintf("Item %d", i), syncable::BOOKMARKS);
+ id, folder_id, base::StringPrintf("Item %d", i), syncable::BOOKMARKS,
+ &commit_set, &request, &response);
}
// The second 25 children will be unsynced items but NOT part of the commit
// batch. When the ID of the parent folder changes during the commit,
@@ -309,8 +315,9 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
// 25 items. This should apply the values indicated by
// each CommitResponse_EntryResponse to the syncable Entries. All new
// items in the commit batch should have their IDs changed to server IDs.
- ExpectGroupToChange(command_, GROUP_UI);
- command_.ExecuteImpl(session());
+ ProcessCommitResponseCommand command(commit_set, request, response);
+ ExpectGroupToChange(command, GROUP_UI);
+ command.ExecuteImpl(session());
ReadTransaction trans(FROM_HERE, directory());
// Lookup the parent folder by finding a child of the root. We can't use
@@ -391,20 +398,26 @@ INSTANTIATE_TEST_CASE_P(ProcessCommitResponse,
// happens to the extensions activity records. Commits could fail or succeed,
// depending on the test parameter.
TEST_P(MixedResult, ExtensionActivity) {
+ sessions::OrderedCommitSet commit_set(session()->routing_info());
+ browser_sync::ClientToServerMessage request;
+ browser_sync::ClientToServerResponse response;
+
EXPECT_NE(routing_info().find(syncable::BOOKMARKS)->second,
routing_info().find(syncable::AUTOFILL)->second)
<< "To not be lame, this test requires more than one active group.";
// Bookmark item setup.
CreateUnprocessedCommitResult(id_factory_.NewServerId(),
- id_factory_.root(), "Some bookmark", syncable::BOOKMARKS);
+ id_factory_.root(), "Some bookmark", syncable::BOOKMARKS,
+ &commit_set, &request, &response);
if (ShouldFailBookmarkCommit())
- SetLastErrorCode(CommitResponse::TRANSIENT_ERROR);
+ SetLastErrorCode(CommitResponse::TRANSIENT_ERROR, &response);
// Autofill item setup.
CreateUnprocessedCommitResult(id_factory_.NewServerId(),
- id_factory_.root(), "Some autofill", syncable::AUTOFILL);
+ id_factory_.root(), "Some autofill", syncable::AUTOFILL,
+ &commit_set, &request, &response);
if (ShouldFailAutofillCommit())
- SetLastErrorCode(CommitResponse::TRANSIENT_ERROR);
+ SetLastErrorCode(CommitResponse::TRANSIENT_ERROR, &response);
// Put some extensions activity in the session.
{
@@ -415,8 +428,9 @@ TEST_P(MixedResult, ExtensionActivity) {
(*records)["xyz"].extension_id = "xyz";
(*records)["xyz"].bookmark_write_count = 4U;
}
- ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
- command_.ExecuteImpl(session());
+ ProcessCommitResponseCommand command(commit_set, request, response);
+ command.ExecuteImpl(session());
+ ExpectGroupsToChange(command, GROUP_UI, GROUP_DB);
ExtensionsActivityMonitor::Records final_monitor_records;
context()->extensions_monitor()->GetAndClearRecords(&final_monitor_records);
diff --git a/sync/engine/syncer.cc b/sync/engine/syncer.cc
index 5de4ae5..ac6b5eb7 100644
--- a/sync/engine/syncer.cc
+++ b/sync/engine/syncer.cc
@@ -14,11 +14,10 @@
#include "sync/engine/build_commit_command.h"
#include "sync/engine/cleanup_disabled_types_command.h"
#include "sync/engine/clear_data_command.h"
+#include "sync/engine/commit.h"
#include "sync/engine/conflict_resolver.h"
#include "sync/engine/download_updates_command.h"
-#include "sync/engine/get_commit_ids_command.h"
#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/post_commit_message_command.h"
#include "sync/engine/process_commit_response_command.h"
#include "sync/engine/process_updates_command.h"
#include "sync/engine/resolve_conflicts_command.h"
@@ -65,9 +64,7 @@ const char* SyncerStepToString(const SyncerStep step)
ENUM_CASE(PROCESS_UPDATES);
ENUM_CASE(STORE_TIMESTAMPS);
ENUM_CASE(APPLY_UPDATES);
- ENUM_CASE(BUILD_COMMIT_REQUEST);
- ENUM_CASE(POST_COMMIT_MESSAGE);
- ENUM_CASE(PROCESS_COMMIT_RESPONSE);
+ ENUM_CASE(COMMIT);
ENUM_CASE(RESOLVE_CONFLICTS);
ENUM_CASE(APPLY_UPDATES_TO_RESOLVE_CONFLICTS);
ENUM_CASE(CLEAR_PRIVATE_DATA);
@@ -180,59 +177,12 @@ void Syncer::SyncShare(sessions::SyncSession* session,
last_step = SYNCER_END;
next_step = SYNCER_END;
} else {
- next_step = BUILD_COMMIT_REQUEST;
+ next_step = COMMIT;
}
break;
}
- // These two steps are combined since they are executed within the same
- // write transaction.
- case BUILD_COMMIT_REQUEST: {
- syncable::Directory* dir = session->context()->directory();
- WriteTransaction trans(FROM_HERE, SYNCER, dir);
- sessions::ScopedSetSessionWriteTransaction set_trans(session, &trans);
-
- DVLOG(1) << "Getting the Commit IDs";
- GetCommitIdsCommand get_commit_ids_command(
- session->context()->max_commit_batch_size());
- get_commit_ids_command.Execute(session);
-
- if (!session->status_controller().commit_ids().empty()) {
- DVLOG(1) << "Building a commit message";
-
- // This isn't perfect, since the set of extensions activity may not
- // correlate exactly with the items being committed. That's OK as
- // long as we're looking for a rough estimate of extensions activity,
- // not an precise mapping of which commits were triggered by which
- // extension.
- //
- // We will push this list of extensions activity back into the
- // ExtensionsActivityMonitor if this commit turns out to not contain
- // any bookmarks, or if the commit fails.
- session->context()->extensions_monitor()->GetAndClearRecords(
- session->mutable_extensions_activity());
-
- BuildCommitCommand build_commit_command;
- build_commit_command.Execute(session);
-
- next_step = POST_COMMIT_MESSAGE;
- } else {
- next_step = RESOLVE_CONFLICTS;
- }
-
- break;
- }
- case POST_COMMIT_MESSAGE: {
- PostCommitMessageCommand post_commit_command;
- session->mutable_status_controller()->set_last_post_commit_result(
- post_commit_command.Execute(session));
- next_step = PROCESS_COMMIT_RESPONSE;
- break;
- }
- case PROCESS_COMMIT_RESPONSE: {
- ProcessCommitResponseCommand process_response_command;
- session->mutable_status_controller()->
- set_last_process_commit_response_result(
- process_response_command.Execute(session));
+ case COMMIT: {
+ BuildAndPostCommits(this, session);
next_step = RESOLVE_CONFLICTS;
break;
}
diff --git a/sync/engine/syncer.h b/sync/engine/syncer.h
index 86af488..a5b7fc8 100644
--- a/sync/engine/syncer.h
+++ b/sync/engine/syncer.h
@@ -35,9 +35,7 @@ enum SyncerStep {
PROCESS_UPDATES,
STORE_TIMESTAMPS,
APPLY_UPDATES,
- BUILD_COMMIT_REQUEST,
- POST_COMMIT_MESSAGE,
- PROCESS_COMMIT_RESPONSE,
+ COMMIT,
RESOLVE_CONFLICTS,
APPLY_UPDATES_TO_RESOLVE_CONFLICTS,
CLEAR_PRIVATE_DATA, // TODO(tim): Rename 'private' to 'user'.
diff --git a/sync/engine/syncer_unittest.cc b/sync/engine/syncer_unittest.cc
index 9253bd3..245c685 100644
--- a/sync/engine/syncer_unittest.cc
+++ b/sync/engine/syncer_unittest.cc
@@ -391,37 +391,31 @@ class SyncerTest : public testing::Test,
void DoTruncationTest(const vector<int64>& unsynced_handle_view,
const vector<syncable::Id>& expected_id_order) {
for (size_t limit = expected_id_order.size() + 2; limit > 0; --limit) {
- StatusController* status = session_->mutable_status_controller();
WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
ScopedSetSessionWriteTransaction set_trans(session_.get(), &wtrans);
ModelSafeRoutingInfo routes;
GetModelSafeRoutingInfo(&routes);
- GetCommitIdsCommand command(limit);
+ sessions::OrderedCommitSet output_set(routes);
+ GetCommitIdsCommand command(limit, &output_set);
std::set<int64> ready_unsynced_set;
command.FilterUnreadyEntries(&wtrans, syncable::ModelTypeSet(),
syncable::ModelTypeSet(), false,
unsynced_handle_view, &ready_unsynced_set);
command.BuildCommitIds(session_->write_transaction(), routes,
ready_unsynced_set);
- syncable::Directory::UnsyncedMetaHandles ready_unsynced_vector(
- ready_unsynced_set.begin(), ready_unsynced_set.end());
- status->set_unsynced_handles(ready_unsynced_vector);
- vector<syncable::Id> output =
- command.ordered_commit_set_->GetAllCommitIds();
size_t truncated_size = std::min(limit, expected_id_order.size());
- ASSERT_EQ(truncated_size, output.size());
+ ASSERT_EQ(truncated_size, output_set.Size());
for (size_t i = 0; i < truncated_size; ++i) {
- ASSERT_EQ(expected_id_order[i], output[i])
+ ASSERT_EQ(expected_id_order[i], output_set.GetCommitIdAt(i))
<< "At index " << i << " with batch size limited to " << limit;
}
sessions::OrderedCommitSet::Projection proj;
- proj = command.ordered_commit_set_->GetCommitIdProjection(GROUP_PASSIVE);
+ proj = output_set.GetCommitIdProjection(GROUP_PASSIVE);
ASSERT_EQ(truncated_size, proj.size());
for (size_t i = 0; i < truncated_size; ++i) {
SCOPED_TRACE(::testing::Message("Projection mismatch with i = ") << i);
- syncable::Id projected =
- command.ordered_commit_set_->GetCommitIdAt(proj[i]);
+ syncable::Id projected = output_set.GetCommitIdAt(proj[i]);
ASSERT_EQ(expected_id_order[proj[i]], projected);
// Since this projection is the identity, the following holds.
ASSERT_EQ(expected_id_order[i], projected);
@@ -734,9 +728,6 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
}
SyncShareNudge();
{
- // We remove any unready entries from the status controller's unsynced
- // handles, so this should remain 0 even though the entries didn't commit.
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Nothing should have commited due to bookmarks being encrypted and
// the cryptographer having pending keys. A would have been resolved
// as a simple conflict, but still be unsynced until the next sync cycle.
@@ -751,8 +742,6 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
}
SyncShareNudge();
{
- // 2 unsynced handles to reflect the items that committed succesfully.
- EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
// All properly encrypted and non-conflicting items should commit. "A" was
// conflicting, but last sync cycle resolved it as simple conflict, so on
// this sync cycle it committed succesfullly.
@@ -780,9 +769,9 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
}
SyncShareNudge();
{
- // We attempted to commit two items.
- EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
- EXPECT_TRUE(session_->status_controller().did_commit_items());
+ const StatusController& status_controller = session_->status_controller();
+ // Expect success.
+ EXPECT_EQ(status_controller.last_post_commit_result(), SYNCER_OK);
// None should be unsynced anymore.
ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
@@ -831,7 +820,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
mock_server_->AddUpdateSpecifics(4, 0, "D", 10, 10, false, 0, pref);
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Initial state. Everything is normal.
ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 10, 10, ids_, &rtrans);
@@ -852,7 +840,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
encrypted_pref);
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// All should be unapplied due to being undecryptable and have a valid
// BASE_SERVER_SPECIFICS.
ReadTransaction rtrans(FROM_HERE, directory());
@@ -873,7 +860,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
encrypted_pref);
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Items 1, 2, and 4 should have newer server versions, 3 remains the same.
// All should remain unapplied due to be undecryptable.
ReadTransaction rtrans(FROM_HERE, directory());
@@ -892,7 +878,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
encrypted_bookmark);
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Items 2 and 4 should be the only ones with BASE_SERVER_SPECIFICS set.
// Items 1 is now unencrypted, so should have applied normally.
ReadTransaction rtrans(FROM_HERE, directory());
@@ -928,7 +913,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
}
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Item 1 remains unsynced due to there being pending keys.
// Items 2, 3, 4 should remain unsynced since they were not up to date.
ReadTransaction rtrans(FROM_HERE, directory());
@@ -945,31 +929,23 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
}
// First cycle resolves conflicts, second cycle commits changes.
SyncShareNudge();
- EXPECT_EQ(2, session_->status_controller().syncer_status().
- num_server_overwrites);
- EXPECT_EQ(1, session_->status_controller().syncer_status().
- num_local_overwrites);
- // We attempted to commit item 1.
- EXPECT_EQ(1U, session_->status_controller().unsynced_handles().size());
- EXPECT_TRUE(session_->status_controller().did_commit_items());
- SyncShareNudge();
- {
- // Everything should be resolved now. The local changes should have
- // overwritten the server changes for 2 and 4, while the server changes
- // overwrote the local for entry 3.
- // We attempted to commit two handles.
- EXPECT_EQ(0, session_->status_controller().syncer_status().
- num_server_overwrites);
- EXPECT_EQ(0, session_->status_controller().syncer_status().
- num_local_overwrites);
- EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
- EXPECT_TRUE(session_->status_controller().did_commit_items());
- ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, false, false, false, 0, 41, 41, ids_, &rtrans);
- VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
- VERIFY_ENTRY(3, false, false, false, 1, 30, 30, ids_, &rtrans);
- VERIFY_ENTRY(4, false, false, false, 0, 31, 31, ids_, &rtrans);
- }
+ EXPECT_EQ(2, status().syncer_status().num_server_overwrites);
+ EXPECT_EQ(1, status().syncer_status().num_local_overwrites);
+ // We successfully commited item(s).
+ EXPECT_EQ(status().last_post_commit_result(), SYNCER_OK);
+ SyncShareNudge();
+
+ // Everything should be resolved now. The local changes should have
+ // overwritten the server changes for 2 and 4, while the server changes
+ // overwrote the local for entry 3.
+ EXPECT_EQ(0, status().syncer_status().num_server_overwrites);
+ EXPECT_EQ(0, status().syncer_status().num_local_overwrites);
+ EXPECT_EQ(status().last_post_commit_result(), SYNCER_OK);
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, false, false, false, 0, 41, 41, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, false, false, 1, 30, 30, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, false, false, 0, 31, 31, ids_, &rtrans);
}
#undef VERIFY_ENTRY
@@ -1198,7 +1174,6 @@ TEST_F(SyncerTest, TestGetUnsyncedAndSimpleCommit) {
}
SyncShareNudge();
- EXPECT_EQ(2u, status().unsynced_handles().size());
ASSERT_EQ(2u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1242,7 +1217,6 @@ TEST_F(SyncerTest, TestPurgeWhileUnsynced) {
syncable::ModelTypeSet(syncable::PREFERENCES));
SyncShareNudge();
- EXPECT_EQ(2U, status().unsynced_handles().size());
ASSERT_EQ(2U, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1481,7 +1455,6 @@ TEST_F(SyncerTest, TestCommitListOrderingWithNesting) {
}
SyncShareNudge();
- EXPECT_EQ(6u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(6u, mock_server_->committed_ids().size());
// This test will NOT unroll deletes because SERVER_PARENT_ID is not set.
// It will treat these like moves.
@@ -1549,7 +1522,6 @@ TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
}
SyncShareNudge();
- EXPECT_EQ(6u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(6u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1588,7 +1560,6 @@ TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
}
SyncShareNudge();
- EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1634,7 +1605,6 @@ TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
}
SyncShareNudge();
- EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1705,7 +1675,6 @@ TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
}
SyncShareNudge();
- EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -2332,8 +2301,9 @@ TEST_F(EntryCreatedInNewFolderTest, EntryCreatedInNewFolderMidSync) {
mock_server_->SetMidCommitCallback(
base::Bind(&EntryCreatedInNewFolderTest::CreateFolderInBob,
base::Unretained(this)));
- syncer_->SyncShare(session_.get(), BUILD_COMMIT_REQUEST, SYNCER_END);
- EXPECT_EQ(1u, mock_server_->committed_ids().size());
+ syncer_->SyncShare(session_.get(), COMMIT, SYNCER_END);
+ // We loop until no unsynced handles remain, so we will commit both ids.
+ EXPECT_EQ(2u, mock_server_->committed_ids().size());
{
ReadTransaction trans(FROM_HERE, directory());
Entry parent_entry(&trans, syncable::GET_BY_ID,
@@ -2345,7 +2315,7 @@ TEST_F(EntryCreatedInNewFolderTest, EntryCreatedInNewFolderMidSync) {
Entry child(&trans, syncable::GET_BY_ID, child_id);
ASSERT_TRUE(child.good());
EXPECT_EQ(parent_entry.Get(ID), child.Get(PARENT_ID));
-}
+ }
}
TEST_F(SyncerTest, NegativeIDInUpdate) {
@@ -2691,9 +2661,12 @@ TEST_F(SyncerTest, NameCollidingFolderSwapWorksFine) {
saw_syncer_event_ = false;
}
-TEST_F(SyncerTest, CommitManyItemsInOneGo) {
- uint32 max_batches = 3;
- uint32 items_to_commit = kDefaultMaxCommitBatchSize * max_batches;
+// Committing more than kDefaultMaxCommitBatchSize items requires that
+// we post more than one commit command to the server. This test makes
+// sure that scenario works as expected.
+TEST_F(SyncerTest, CommitManyItemsInOneGo_Success) {
+ uint32 num_batches = 3;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
{
WriteTransaction trans(FROM_HERE, UNITTEST, directory());
for (uint32 i = 0; i < items_to_commit; i++) {
@@ -2705,12 +2678,71 @@ TEST_F(SyncerTest, CommitManyItemsInOneGo) {
e.Put(SPECIFICS, DefaultBookmarkSpecifics());
}
}
- uint32 num_loops = 0;
- while (SyncShareNudge()) {
- num_loops++;
- ASSERT_LT(num_loops, max_batches * 2);
+ ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
+
+ EXPECT_FALSE(SyncShareNudge());
+ EXPECT_EQ(num_batches, mock_server_->commit_messages().size());
+ EXPECT_EQ(0, directory()->unsynced_entity_count());
+}
+
+// Test that a single failure to contact the server will cause us to exit the
+// commit loop immediately.
+TEST_F(SyncerTest, CommitManyItemsInOneGo_PostBufferFail) {
+ uint32 num_batches = 3;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ for (uint32 i = 0; i < items_to_commit; i++) {
+ string nameutf8 = base::StringPrintf("%d", i);
+ string name(nameutf8.begin(), nameutf8.end());
+ MutableEntry e(&trans, CREATE, trans.root_id(), name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ e.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ }
}
- EXPECT_GE(mock_server_->commit_messages().size(), max_batches);
+ ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
+
+ // The second commit should fail. It will be preceded by one successful
+ // GetUpdate and one succesful commit.
+ mock_server_->FailNthPostBufferToPathCall(3);
+ SyncShareNudge();
+
+ EXPECT_EQ(1U, mock_server_->commit_messages().size());
+ EXPECT_FALSE(session_->Succeeded());
+ EXPECT_EQ(SYNC_SERVER_ERROR,
+ session_->status_controller().error().last_post_commit_result);
+ EXPECT_EQ(items_to_commit - kDefaultMaxCommitBatchSize,
+ directory()->unsynced_entity_count());
+}
+
+// Test that a single conflict response from the server will cause us to exit
+// the commit loop immediately.
+TEST_F(SyncerTest, CommitManyItemsInOneGo_CommitConflict) {
+ uint32 num_batches = 2;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ for (uint32 i = 0; i < items_to_commit; i++) {
+ string nameutf8 = base::StringPrintf("%d", i);
+ string name(nameutf8.begin(), nameutf8.end());
+ MutableEntry e(&trans, CREATE, trans.root_id(), name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ e.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ }
+ }
+ ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
+
+ // Return a CONFLICT response for the first item.
+ mock_server_->set_conflict_n_commits(1);
+ SyncShareNudge();
+
+ // We should stop looping at the first sign of trouble.
+ EXPECT_EQ(1U, mock_server_->commit_messages().size());
+ EXPECT_FALSE(session_->Succeeded());
+ EXPECT_EQ(items_to_commit - (kDefaultMaxCommitBatchSize - 1),
+ directory()->unsynced_entity_count());
}
TEST_F(SyncerTest, HugeConflict) {
@@ -4266,18 +4298,27 @@ TEST_F(SyncerUndeletionTest, UndeleteDuringCommit) {
base::Bind(&SyncerUndeletionTest::Undelete, base::Unretained(this)));
SyncShareNudge();
- // The item ought to exist as an unsynced undeletion (meaning,
- // we think that the next commit ought to be a recreation commit).
+ // We will continue to commit until all nodes are synced, so we expect
+ // that both the delete and following undelete were committed. We haven't
+ // downloaded any updates, though, so the SERVER fields will be the same
+ // as they were at the start of the cycle.
EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectUnsyncedUndeletion();
+
+ // Server fields lag behind.
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL));
+
+ // We have committed the second (undelete) update.
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
// Now, encounter a GetUpdates corresponding to the deletion from
// the server. The undeletion should prevail again and be committed.
// None of this should trigger any conflict detection -- it is perfectly
// normal to recieve updates from our own commits.
mock_server_->SetMidCommitCallback(base::Closure());
- mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
+ mock_server_->AddUpdateFromLastCommit();
SyncShareNudge();
EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());