summaryrefslogtreecommitdiffstats
path: root/chrome/browser
diff options
context:
space:
mode:
Diffstat (limited to 'chrome/browser')
-rwxr-xr-xchrome/browser/sync/engine/syncapi.cc149
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncapi.h40
-rwxr-xr-xchrome/browser/sync/engine/syncapi_unittest.cc169
-rwxr-xr-xchrome/browser/sync/engine/syncer_unittest.cc185
-rwxr-xr-xchrome/browser/sync/engine/syncer_util.cc71
-rwxr-xr-xchrome/browser/sync/engine/syncer_util.h6
-rw-r--r--chrome/browser/sync/engine/syncproto.h2
-rwxr-xr-xchrome/browser/sync/engine/verify_updates_command.cc24
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/protocol/sync.proto46
-rwxr-xr-xchrome/browser/sync/syncable/directory_backing_store.cc27
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.h2
-rwxr-xr-xchrome/browser/sync/syncable/directory_backing_store_unittest.cc177
-rwxr-xr-xchrome/browser/sync/syncable/syncable.cc99
-rwxr-xr-xchrome/browser/sync/syncable/syncable.h28
-rwxr-xr-xchrome/browser/sync/syncable/syncable_columns.h3
-rwxr-xr-xchrome/browser/sync/syncable/syncable_unittest.cc195
16 files changed, 1136 insertions, 87 deletions
diff --git a/chrome/browser/sync/engine/syncapi.cc b/chrome/browser/sync/engine/syncapi.cc
index 03c86ff..0fdf3a2 100755
--- a/chrome/browser/sync/engine/syncapi.cc
+++ b/chrome/browser/sync/engine/syncapi.cc
@@ -382,23 +382,6 @@ static void ServerNameToSyncAPIName(const std::string& server_name,
}
}
-// A UserShare encapsulates the syncable pieces that represent an authenticated
-// user and their data (share).
-// This encompasses all pieces required to build transaction objects on the
-// syncable share.
-struct UserShare {
- // The DirectoryManager itself, which is the parent of Transactions and can
- // be shared across multiple threads (unlike Directory).
- scoped_ptr<DirectoryManager> dir_manager;
-
- // The username of the sync user. This is empty until we have performed at
- // least one successful GAIA authentication with this username, which means
- // on first-run it is empty until an AUTH_SUCCEEDED event and on future runs
- // it is set as soon as the client instructs us to authenticate for the last
- // known valid user (AuthenticateForLastKnownUser()).
- std::string authenticated_name;
-};
-
////////////////////////////////////
// BaseNode member definitions.
@@ -570,6 +553,35 @@ bool WriteNode::InitByIdLookup(int64 id) {
return (entry_->good() && !entry_->Get(syncable::IS_DEL));
}
+// Find a node by client tag, and bind this WriteNode to it.
+// Return true if the write node was found, and was not deleted.
+// Undeleting a deleted node is possible by ClientTag.
+bool WriteNode::InitByClientTagLookup(const std::string& tag) {
+ DCHECK(!entry_) << "Init called twice";
+ if (tag.empty())
+ return false;
+ entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
+ syncable::GET_BY_CLIENT_TAG, tag);
+ return (entry_->good() && !entry_->Get(syncable::IS_DEL));
+}
+
+void WriteNode::PutModelType(syncable::ModelType model_type) {
+ // Set an empty specifics of the appropriate datatype. The presence
+ // of the specific extension will identify the model type.
+ switch (model_type) {
+ case syncable::BOOKMARKS:
+ PutBookmarkSpecificsAndMarkForSyncing(
+ sync_pb::BookmarkSpecifics::default_instance());
+ break;
+ case syncable::PREFERENCES:
+ PutPreferenceSpecificsAndMarkForSyncing(
+ sync_pb::PreferenceSpecifics::default_instance());
+ default:
+ NOTREACHED();
+ }
+ DCHECK(GetModelType() == model_type);
+}
+
// Create a new node with default properties, and bind this WriteNode to it.
// Return true on success.
bool WriteNode::InitByCreation(syncable::ModelType model_type,
@@ -597,21 +609,7 @@ bool WriteNode::InitByCreation(syncable::ModelType model_type,
// Entries are untitled folders by default.
entry_->Put(syncable::IS_DIR, true);
- // Set an empty specifics of the appropriate datatype. The presence
- // of the specific extension will identify the model type.
- switch (model_type) {
- case syncable::BOOKMARKS:
- PutBookmarkSpecificsAndMarkForSyncing(
- sync_pb::BookmarkSpecifics::default_instance());
- break;
- case syncable::PREFERENCES:
- PutPreferenceSpecificsAndMarkForSyncing(
- sync_pb::PreferenceSpecifics::default_instance());
- break;
- default:
- NOTREACHED();
- }
- DCHECK(GetModelType() == model_type);
+ PutModelType(model_type);
// Now set the predecessor, which sets IS_UNSYNCED as necessary.
PutPredecessor(predecessor);
@@ -619,6 +617,82 @@ bool WriteNode::InitByCreation(syncable::ModelType model_type,
return true;
}
+// Create a new node with default properties and a client defined unique tag,
+// and bind this WriteNode to it.
+// Return true on success. If the tag exists in the database, then
+// we will attempt to undelete the node.
+// TODO(chron): Code datatype into hash tag.
+// TODO(chron): Is model type ever lost?
+bool WriteNode::InitUniqueByCreation(syncable::ModelType model_type,
+ const BaseNode& parent,
+ const std::string& tag) {
+ DCHECK(!entry_) << "Init called twice";
+
+ syncable::Id parent_id = parent.GetEntry()->Get(syncable::ID);
+
+ // Start out with a dummy name. We expect
+ // the caller to set a meaningful name after creation.
+ string dummy(kDefaultNameForNewNodes);
+
+ // Check if we have this locally and need to undelete it.
+ scoped_ptr<syncable::MutableEntry> existing_entry(
+ new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
+ syncable::GET_BY_CLIENT_TAG, tag));
+
+ if (existing_entry->good()) {
+ if (existing_entry->Get(syncable::IS_DEL)) {
+ // Rules for undelete:
+ // BASE_VERSION: Must keep the same.
+ // ID: Essential to keep the same.
+ // META_HANDLE: Must be the same, so we can't "split" the entry.
+ // IS_DEL: Must be set to false, will cause reindexing.
+ // This one is weird because IS_DEL is true for "update only"
+ // items. It should be OK to undelete an update only.
+ // MTIME/CTIME: Seems reasonable to just leave them alone.
+ // IS_UNSYNCED: Must set this to true or face database insurrection.
+ // We do this below this block.
+ // IS_UNAPPLIED_UPDATE: Either keep it the same or also set BASE_VERSION
+ // to SERVER_VERSION. We keep it the same here.
+ // IS_DIR: We'll leave it the same.
+ // SPECIFICS: Reset it.
+
+ existing_entry->Put(syncable::IS_DEL, false);
+
+ // Client tags are immutable and must be paired with the ID.
+ // If a server update comes down with an ID and client tag combo,
+ // and it already exists, always overwrite it and store only one copy.
+ // We have to undelete entries because we can't disassociate IDs from
+ // tags and updates.
+
+ existing_entry->Put(syncable::NON_UNIQUE_NAME, dummy);
+ existing_entry->Put(syncable::PARENT_ID, parent_id);
+ entry_ = existing_entry.release();
+ } else {
+ return false;
+ }
+ } else {
+ entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
+ syncable::CREATE, parent_id, dummy);
+ if (!entry_->good()) {
+ return false;
+ }
+
+ // Only set IS_DIR for new entries. Don't bitflip undeleted ones.
+ entry_->Put(syncable::UNIQUE_CLIENT_TAG, tag);
+ }
+
+ // We don't support directory and tag combinations.
+ entry_->Put(syncable::IS_DIR, false);
+
+ // Will clear specifics data.
+ PutModelType(model_type);
+
+ // Now set the predecessor, which sets IS_UNSYNCED as necessary.
+ PutPredecessor(NULL);
+
+ return true;
+}
+
bool WriteNode::SetPosition(const BaseNode& new_parent,
const BaseNode* predecessor) {
// |predecessor| must be a child of |new_parent| or NULL.
@@ -715,6 +789,15 @@ bool ReadNode::InitByIdLookup(int64 id) {
return true;
}
+bool ReadNode::InitByClientTagLookup(const std::string& tag) {
+ DCHECK(!entry_) << "Init called twice";
+ if (tag.empty())
+ return false;
+ entry_ = new syncable::Entry(transaction_->GetWrappedTrans(),
+ syncable::GET_BY_CLIENT_TAG, tag);
+ return (entry_->good() && !entry_->Get(syncable::IS_DEL));
+}
+
const syncable::Entry* ReadNode::GetEntry() const {
return entry_;
}
@@ -728,7 +811,7 @@ bool ReadNode::InitByTagLookup(const std::string& tag) {
if (tag.empty())
return false;
syncable::BaseTransaction* trans = transaction_->GetWrappedTrans();
- entry_ = new syncable::Entry(trans, syncable::GET_BY_TAG, tag);
+ entry_ = new syncable::Entry(trans, syncable::GET_BY_SERVER_TAG, tag);
if (!entry_->good())
return false;
if (entry_->Get(syncable::IS_DEL))
diff --git a/chrome/browser/sync/engine/syncapi.h b/chrome/browser/sync/engine/syncapi.h
index c10f05c..96b869b 100644..100755
--- a/chrome/browser/sync/engine/syncapi.h
+++ b/chrome/browser/sync/engine/syncapi.h
@@ -43,6 +43,7 @@
#include "base/basictypes.h"
#include "base/file_path.h"
+#include "base/scoped_ptr.h"
#include "build/build_config.h"
#include "chrome/browser/google_service_auth_error.h"
#include "chrome/browser/sync/syncable/model_type.h"
@@ -77,7 +78,23 @@ class BaseTransaction;
class HttpPostProviderFactory;
class SyncManager;
class WriteTransaction;
-struct UserShare;
+
+// A UserShare encapsulates the syncable pieces that represent an authenticated
+// user and their data (share).
+// This encompasses all pieces required to build transaction objects on the
+// syncable share.
+struct UserShare {
+ // The DirectoryManager itself, which is the parent of Transactions and can
+ // be shared across multiple threads (unlike Directory).
+ scoped_ptr<syncable::DirectoryManager> dir_manager;
+
+ // The username of the sync user. This is empty until we have performed at
+ // least one successful GAIA authentication with this username, which means
+ // on first-run it is empty until an AUTH_SUCCEEDED event and on future runs
+ // it is set as soon as the client instructs us to authenticate for the last
+ // known valid user (AuthenticateForLastKnownUser()).
+ std::string authenticated_name;
+};
// A valid BaseNode will never have an ID of zero.
static const int64 kInvalidId = 0;
@@ -94,6 +111,11 @@ class BaseNode {
// ID will result in failure.
virtual bool InitByIdLookup(int64 id) = 0;
+ // All subclasses of BaseNode must also provide a way to initialize themselves
+ // by doing a client tag lookup. Returns false on failure. A deleted node
+ // will return FALSE.
+ virtual bool InitByClientTagLookup(const std::string& tag) = 0;
+
// Each object is identified by a 64-bit id (internally, the syncable
// metahandle). These ids are strictly local handles. They will persist
// on this client, but the same object on a different client may have a
@@ -183,6 +205,7 @@ class WriteNode : public BaseNode {
// BaseNode implementation.
virtual bool InitByIdLookup(int64 id);
+ virtual bool InitByClientTagLookup(const std::string& tag);
// Create a new node with the specified parent and predecessor. |model_type|
// dictates the type of the item, and controls which EntitySpecifics proto
@@ -194,6 +217,16 @@ class WriteNode : public BaseNode {
const BaseNode& parent,
const BaseNode* predecessor);
+ // Create nodes using this function if they're unique items that
+ // you want to fetch using client_tag. Note that the behavior of these
+ // items is slightly different than that of normal items.
+ // Most importantly, if it exists locally, this function will
+ // actually undelete it
+ // Client unique tagged nodes must NOT be folders.
+ bool InitUniqueByCreation(syncable::ModelType model_type,
+ const BaseNode& parent,
+ const std::string& client_tag);
+
// These Set() functions correspond to the Get() functions of BaseNode.
void SetIsFolder(bool folder);
void SetTitle(const std::wstring& title);
@@ -232,6 +265,9 @@ class WriteNode : public BaseNode {
private:
void* operator new(size_t size); // Node is meant for stack use only.
+ // Helper to set model type. This will clear any specifics data.
+ void PutModelType(syncable::ModelType model_type);
+
// Helper to set the previous node.
void PutPredecessor(const BaseNode* predecessor);
@@ -274,6 +310,7 @@ class ReadNode : public BaseNode {
// BaseNode implementation.
virtual bool InitByIdLookup(int64 id);
+ virtual bool InitByClientTagLookup(const std::string& tag);
// There is always a root node, so this can't fail. The root node is
// never mutable, so root lookup is only possible on a ReadNode.
@@ -283,6 +320,7 @@ class ReadNode : public BaseNode {
// Look up the node with the particular tag. If it does not exist,
// return false. Since these nodes are special, lookup is only
// provided through ReadNode.
+ // TODO(chron): Rename this function.
bool InitByTagLookup(const std::string& tag);
// Implementation of BaseNode's abstract virtual accessors.
diff --git a/chrome/browser/sync/engine/syncapi_unittest.cc b/chrome/browser/sync/engine/syncapi_unittest.cc
new file mode 100755
index 0000000..e6c6f27
--- /dev/null
+++ b/chrome/browser/sync/engine/syncapi_unittest.cc
@@ -0,0 +1,169 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Unit tests for the SyncApi. Note that a lot of the underlying
+// functionality is provided by the Syncable layer, which has its own
+// unit tests. We'll test SyncApi specific things in this harness.
+
+#include "base/scoped_ptr.h"
+#include "base/scoped_temp_dir.h"
+#include "chrome/browser/sync/engine/syncapi.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/test/sync/engine/test_directory_setter_upper.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sync_api {
+
+class SyncApiTest : public testing::Test {
+ public:
+ virtual void SetUp() {
+ setter_upper_.SetUp();
+ share_.dir_manager.reset(setter_upper_.manager());
+ share_.authenticated_name = setter_upper_.name();
+ }
+
+ virtual void TearDown() {
+ share_.dir_manager.release();
+ setter_upper_.TearDown();
+ }
+
+ protected:
+ UserShare share_;
+ browser_sync::TestDirectorySetterUpper setter_upper_;
+};
+
+TEST_F(SyncApiTest, SanityCheckTest) {
+ {
+ ReadTransaction trans(&share_);
+ EXPECT_TRUE(trans.GetWrappedTrans() != NULL);
+ }
+ {
+ WriteTransaction trans(&share_);
+ EXPECT_TRUE(trans.GetWrappedTrans() != NULL);
+ }
+ {
+ // No entries but root should exist
+ ReadTransaction trans(&share_);
+ ReadNode node(&trans);
+ // Metahandle 1 can be root, sanity check 2
+ EXPECT_FALSE(node.InitByIdLookup(2));
+ }
+}
+
+TEST_F(SyncApiTest, BasicTagWrite) {
+ {
+ WriteTransaction trans(&share_);
+ ReadNode root_node(&trans);
+ root_node.InitByRootLookup();
+ EXPECT_EQ(root_node.GetFirstChildId(), 0);
+
+ WriteNode wnode(&trans);
+ EXPECT_TRUE(wnode.InitUniqueByCreation(syncable::BOOKMARKS,
+ root_node, "testtag"));
+ wnode.SetIsFolder(false);
+ }
+ {
+ ReadTransaction trans(&share_);
+ ReadNode node(&trans);
+ EXPECT_TRUE(node.InitByClientTagLookup("testtag"));
+
+ ReadNode root_node(&trans);
+ root_node.InitByRootLookup();
+ EXPECT_NE(node.GetId(), 0);
+ EXPECT_EQ(node.GetId(), root_node.GetFirstChildId());
+ }
+}
+
+TEST_F(SyncApiTest, ReadMissingTagsFails) {
+ {
+ ReadTransaction trans(&share_);
+ ReadNode node(&trans);
+ EXPECT_FALSE(node.InitByClientTagLookup("testtag"));
+ }
+ {
+ WriteTransaction trans(&share_);
+ WriteNode node(&trans);
+ EXPECT_FALSE(node.InitByClientTagLookup("testtag"));
+ }
+}
+
+// TODO(chron): Hook this all up to the server and write full integration tests
+// for update->undelete behavior.
+TEST_F(SyncApiTest, TestDeleteBehavior) {
+
+ int64 node_id;
+ int64 folder_id;
+ std::wstring test_title(L"test1");
+
+ {
+ WriteTransaction trans(&share_);
+ ReadNode root_node(&trans);
+ root_node.InitByRootLookup();
+
+ // we'll use this spare folder later
+ WriteNode folder_node(&trans);
+ EXPECT_TRUE(folder_node.InitByCreation(syncable::BOOKMARKS,
+ root_node, NULL));
+ folder_id = folder_node.GetId();
+
+ WriteNode wnode(&trans);
+ EXPECT_TRUE(wnode.InitUniqueByCreation(syncable::BOOKMARKS,
+ root_node, "testtag"));
+ wnode.SetIsFolder(false);
+ wnode.SetTitle(test_title);
+
+ node_id = wnode.GetId();
+ }
+
+ // Ensure we can delete something with a tag.
+ {
+ WriteTransaction trans(&share_);
+ WriteNode wnode(&trans);
+ EXPECT_TRUE(wnode.InitByClientTagLookup("testtag"));
+ EXPECT_FALSE(wnode.GetIsFolder());
+ EXPECT_EQ(wnode.GetTitle(), test_title);
+
+ wnode.Remove();
+ }
+
+ // Lookup of a node which was deleted should return failure,
+ // but have found some data about the node.
+ {
+ ReadTransaction trans(&share_);
+ ReadNode node(&trans);
+ EXPECT_FALSE(node.InitByClientTagLookup("testtag"));
+ // Note that for proper function of this API this doesn't need to be
+ // filled, we're checking just to make sure the DB worked in this test.
+ EXPECT_EQ(node.GetTitle(), test_title);
+ }
+
+ {
+ WriteTransaction trans(&share_);
+ ReadNode folder_node(&trans);
+ EXPECT_TRUE(folder_node.InitByIdLookup(folder_id));
+
+ WriteNode wnode(&trans);
+ // This will undelete the tag.
+ EXPECT_TRUE(wnode.InitUniqueByCreation(syncable::BOOKMARKS,
+ folder_node, "testtag"));
+ EXPECT_EQ(wnode.GetIsFolder(), false);
+ EXPECT_EQ(wnode.GetParentId(), folder_node.GetId());
+ EXPECT_EQ(wnode.GetId(), node_id);
+ EXPECT_NE(wnode.GetTitle(), test_title); // Title should be cleared
+ wnode.SetTitle(test_title);
+ }
+
+ // Now look up should work.
+ {
+ ReadTransaction trans(&share_);
+ ReadNode node(&trans);
+ EXPECT_TRUE(node.InitByClientTagLookup("testtag"));
+ EXPECT_EQ(node.GetTitle(), test_title);
+ EXPECT_EQ(node.GetModelType(), syncable::BOOKMARKS);
+ }
+}
+
+} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
index 2177e5b..0d78832 100755
--- a/chrome/browser/sync/engine/syncer_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -64,7 +64,8 @@ using syncable::CREATE;
using syncable::CREATE_NEW_UPDATE_ITEM;
using syncable::GET_BY_HANDLE;
using syncable::GET_BY_ID;
-using syncable::GET_BY_TAG;
+using syncable::GET_BY_CLIENT_TAG;
+using syncable::GET_BY_SERVER_TAG;
using syncable::ID;
using syncable::IS_DEL;
using syncable::IS_DIR;
@@ -82,7 +83,8 @@ using syncable::SERVER_PARENT_ID;
using syncable::SERVER_POSITION_IN_PARENT;
using syncable::SERVER_SPECIFICS;
using syncable::SERVER_VERSION;
-using syncable::SINGLETON_TAG;
+using syncable::UNIQUE_CLIENT_TAG;
+using syncable::UNIQUE_SERVER_TAG;
using syncable::SPECIFICS;
using syncable::UNITTEST;
@@ -3525,11 +3527,14 @@ TEST_F(SyncerTest, TestUndeleteUpdate) {
mock_server_->AddUpdateDirectory(2, 1, "bar", 2, 3);
mock_server_->SetLastUpdateDeleted();
syncer_->SyncShare(this);
+
+ int64 metahandle;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
Entry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(entry.good());
EXPECT_TRUE(entry.Get(IS_DEL));
+ metahandle = entry.Get(META_HANDLE);
}
mock_server_->AddUpdateDirectory(1, 0, "foo", 2, 4);
mock_server_->SetLastUpdateDeleted();
@@ -3545,6 +3550,7 @@ TEST_F(SyncerTest, TestUndeleteUpdate) {
EXPECT_TRUE(entry.Get(IS_DEL));
EXPECT_FALSE(entry.Get(SERVER_IS_DEL));
EXPECT_TRUE(entry.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_NE(entry.Get(META_HANDLE), metahandle);
}
}
@@ -3907,7 +3913,160 @@ TEST_F(SyncerTest, TestUndeleteIgnoreCorrectlyUnappliedUpdate) {
syncer_->SyncShare(this); // Now just don't explode.
}
-TEST_F(SyncerTest, SingletonTagUpdates) {
+TEST_F(SyncerTest, ClientTagServerCreatedUpdatesWork) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem1", 1, 10);
+ mock_server_->SetLastUpdateClientTag("permfolder");
+
+ syncer_->SyncShare(this);
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "permfolder");
+ EXPECT_EQ(perm_folder.Get(NON_UNIQUE_NAME), "permitem1");
+ }
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->SetLastUpdateClientTag("permfolder");
+ syncer_->SyncShare(this);
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "permfolder");
+ EXPECT_EQ(perm_folder.Get(NON_UNIQUE_NAME), "permitem_renamed");
+ }
+}
+
+TEST_F(SyncerTest, ClientTagIllegalUpdateIgnored) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem1", 1, 10);
+ mock_server_->SetLastUpdateClientTag("permfolder");
+
+ syncer_->SyncShare(this);
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "permfolder");
+ EXPECT_TRUE(perm_folder.Get(NON_UNIQUE_NAME) == "permitem1");
+ EXPECT_TRUE(perm_folder.Get(ID).ServerKnows());
+ }
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->SetLastUpdateClientTag("wrongtag");
+ syncer_->SyncShare(this);
+
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ // This update is rejected because it has the same ID, but a
+ // different tag than one that is already on the client.
+ // The client has a ServerKnows ID, which cannot be overwritten.
+ Entry rejected_update(&trans, GET_BY_CLIENT_TAG, "wrongtag");
+ EXPECT_FALSE(rejected_update.good());
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "permfolder");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(NON_UNIQUE_NAME), "permitem1");
+ }
+}
+
+TEST_F(SyncerTest, ClientTagClientCreatedConflictUpdate) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ int64 original_metahandle;
+
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry perm_folder(&trans, CREATE, ids_.root(), "clientname");
+ ASSERT_TRUE(perm_folder.good());
+ perm_folder.Put(UNIQUE_CLIENT_TAG, "clientperm");
+ perm_folder.Put(IS_UNSYNCED, true);
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(ID).ServerKnows());
+ original_metahandle = perm_folder.Get(META_HANDLE);
+ }
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->SetLastUpdateClientTag("clientperm");
+ mock_server_->set_conflict_all_commits(true);
+
+ syncer_->SyncShare(this);
+ // This should cause client tag overwrite.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "clientperm");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(BASE_VERSION), 10);
+ // Entry should have been moved aside.
+ EXPECT_NE(perm_folder.Get(META_HANDLE), original_metahandle);
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "clientperm");
+ EXPECT_TRUE(perm_folder.Get(NON_UNIQUE_NAME) == "permitem_renamed");
+
+ Entry moved_aside(&trans, GET_BY_HANDLE, original_metahandle);
+ EXPECT_TRUE(moved_aside.good());
+ EXPECT_TRUE(moved_aside.Get(IS_DEL));
+ }
+}
+
+TEST_F(SyncerTest, ClientTagOverwitesDeletedClientEntry) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+
+ {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry perm_folder(&trans, CREATE, ids_.root(), "clientname");
+ ASSERT_TRUE(perm_folder.good());
+ perm_folder.Put(UNIQUE_CLIENT_TAG, "clientperm");
+ perm_folder.Put(IS_UNSYNCED, true);
+ perm_folder.Put(IS_DEL, true);
+ }
+
+ mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->SetLastUpdateClientTag("clientperm");
+ mock_server_->set_conflict_all_commits(true);
+
+ syncer_->SyncShare(this);
+ // This should cause client tag overwrite.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "clientperm");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(perm_folder.Get(BASE_VERSION), 10);
+ EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "clientperm");
+ EXPECT_TRUE(perm_folder.Get(NON_UNIQUE_NAME) == "permitem_renamed");
+ }
+}
+
+TEST_F(SyncerTest, UniqueServerTagUpdates) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
// As a hurdle, introduce an item whose name is the same as the tag value
@@ -3918,21 +4077,21 @@ TEST_F(SyncerTest, SingletonTagUpdates) {
Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
ASSERT_TRUE(hurdle.good());
ASSERT_TRUE(!hurdle.Get(IS_DEL));
- ASSERT_TRUE(hurdle.Get(SINGLETON_TAG).empty());
+ ASSERT_TRUE(hurdle.Get(UNIQUE_SERVER_TAG).empty());
ASSERT_TRUE(hurdle.Get(NON_UNIQUE_NAME) == "bob");
// Try to lookup by the tagname. These should fail.
- Entry tag_alpha(&trans, GET_BY_TAG, "alpha");
+ Entry tag_alpha(&trans, GET_BY_SERVER_TAG, "alpha");
EXPECT_FALSE(tag_alpha.good());
- Entry tag_bob(&trans, GET_BY_TAG, "bob");
+ Entry tag_bob(&trans, GET_BY_SERVER_TAG, "bob");
EXPECT_FALSE(tag_bob.good());
}
// Now download some tagged items as updates.
mock_server_->AddUpdateDirectory(1, 0, "update1", 1, 10);
- mock_server_->SetLastUpdateSingletonTag("alpha");
+ mock_server_->SetLastUpdateServerTag("alpha");
mock_server_->AddUpdateDirectory(2, 0, "update2", 2, 20);
- mock_server_->SetLastUpdateSingletonTag("bob");
+ mock_server_->SetLastUpdateServerTag("bob");
syncer_->SyncShare(this);
{
@@ -3940,21 +4099,21 @@ TEST_F(SyncerTest, SingletonTagUpdates) {
// The new items should be applied as new entries, and we should be able
// to look them up by their tag values.
- Entry tag_alpha(&trans, GET_BY_TAG, "alpha");
+ Entry tag_alpha(&trans, GET_BY_SERVER_TAG, "alpha");
ASSERT_TRUE(tag_alpha.good());
ASSERT_TRUE(!tag_alpha.Get(IS_DEL));
- ASSERT_TRUE(tag_alpha.Get(SINGLETON_TAG) == "alpha");
+ ASSERT_TRUE(tag_alpha.Get(UNIQUE_SERVER_TAG) == "alpha");
ASSERT_TRUE(tag_alpha.Get(NON_UNIQUE_NAME) == "update1");
- Entry tag_bob(&trans, GET_BY_TAG, "bob");
+ Entry tag_bob(&trans, GET_BY_SERVER_TAG, "bob");
ASSERT_TRUE(tag_bob.good());
ASSERT_TRUE(!tag_bob.Get(IS_DEL));
- ASSERT_TRUE(tag_bob.Get(SINGLETON_TAG) == "bob");
+ ASSERT_TRUE(tag_bob.Get(UNIQUE_SERVER_TAG) == "bob");
ASSERT_TRUE(tag_bob.Get(NON_UNIQUE_NAME) == "update2");
// The old item should be unchanged.
Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
ASSERT_TRUE(hurdle.good());
ASSERT_TRUE(!hurdle.Get(IS_DEL));
- ASSERT_TRUE(hurdle.Get(SINGLETON_TAG).empty());
+ ASSERT_TRUE(hurdle.Get(UNIQUE_SERVER_TAG).empty());
ASSERT_TRUE(hurdle.Get(NON_UNIQUE_NAME) == "bob");
}
}
diff --git a/chrome/browser/sync/engine/syncer_util.cc b/chrome/browser/sync/engine/syncer_util.cc
index 4ae201d..d42305f 100755
--- a/chrome/browser/sync/engine/syncer_util.cc
+++ b/chrome/browser/sync/engine/syncer_util.cc
@@ -55,7 +55,8 @@ using syncable::SERVER_PARENT_ID;
using syncable::SERVER_POSITION_IN_PARENT;
using syncable::SERVER_SPECIFICS;
using syncable::SERVER_VERSION;
-using syncable::SINGLETON_TAG;
+using syncable::UNIQUE_CLIENT_TAG;
+using syncable::UNIQUE_SERVER_TAG;
using syncable::SPECIFICS;
using syncable::SYNCER;
using syncable::WriteTransaction;
@@ -129,6 +130,61 @@ void SyncerUtil::ChangeEntryIDAndUpdateChildren(
}
// static
+void SyncerUtil::AttemptReuniteClientTag(syncable::WriteTransaction* trans,
+ const SyncEntity& server_entry) {
+
+ // Expected entry points of this function:
+ // SyncEntity has NOT been applied to SERVER fields.
+ // SyncEntity has NOT been applied to LOCAL fields.
+ // DB has not yet been modified, no entries created for this update.
+
+ // When a server sends down a client tag, the following cases can occur:
+ // 1) Client has entry for tag already, ID is server style, matches
+ // 2) Client has entry for tag already, ID is server, doesn't match.
+ // 3) Client has entry for tag already, ID is local, (never matches)
+ // 4) Client has no entry for tag
+
+ // Case 1, we don't have to do anything since the update will
+ // work just fine. Update will end up in the proper entry, via ID lookup.
+ // Case 2 - Should never happen. We'd need to change the
+ // ID of the local entry, we refuse. We'll skip this in VERIFY.
+ // Case 3 - We need to replace the local ID with the server ID. Conflict
+ // resolution must occur, but this is prior to update application! This case
+ // should be rare. For now, clobber client changes entirely.
+ // Case 4 - Perfect. Same as case 1.
+
+ syncable::MutableEntry local_entry(trans, syncable::GET_BY_CLIENT_TAG,
+ server_entry.client_defined_unique_tag());
+
+ // The SyncAPI equivalent of this function will return !good if IS_DEL.
+ // The syncable version will return good even if IS_DEL.
+ // TODO(chron): Unit test the case with IS_DEL and make sure.
+ if (local_entry.good()) {
+ if (local_entry.Get(ID).ServerKnows()) {
+ // In release config, this will just continue and we'll
+ // throw VERIFY_FAIL later.
+ // This is Case 1 on success, Case 2 if it fails.
+ DCHECK(local_entry.Get(ID) == server_entry.id());
+ } else {
+ // Case 3: We have a local entry with the same client tag.
+ // We can't have two updates with the same client tag though.
+ // One of these has to go. Let's delete the client entry and move it
+ // aside. This will cause a delete + create. The client API user must
+ // handle this correctly. In this situation the client must have created
+ // this entry but not yet committed it for the first time. Usually the
+ // client probably wants the server data for this instead.
+ // Other strategies to handle this are a bit flaky.
+ DCHECK(local_entry.Get(IS_UNAPPLIED_UPDATE) == false);
+ local_entry.Put(IS_UNSYNCED, false);
+ local_entry.Put(IS_DEL, true);
+ // Needs to get out of the index before our update can be put in.
+ local_entry.Put(UNIQUE_CLIENT_TAG, "");
+ }
+ }
+ // Case 4: Client has no entry for tag, all green.
+}
+
+// static
void SyncerUtil::AttemptReuniteLostCommitResponses(
syncable::WriteTransaction* trans,
const SyncEntity& server_entry,
@@ -288,9 +344,13 @@ void SyncerUtil::UpdateServerFieldsFromUpdate(
local_entry->Put(SERVER_MTIME,
ServerTimeToClientTime(server_entry.mtime()));
local_entry->Put(SERVER_IS_DIR, server_entry.IsFolder());
- if (server_entry.has_singleton_tag()) {
- const string& tag = server_entry.singleton_tag();
- local_entry->Put(SINGLETON_TAG, tag);
+ if (server_entry.has_server_defined_unique_tag()) {
+ const string& tag = server_entry.server_defined_unique_tag();
+ local_entry->Put(UNIQUE_SERVER_TAG, tag);
+ }
+ if (server_entry.has_client_defined_unique_tag()) {
+ const string& tag = server_entry.client_defined_unique_tag();
+ local_entry->Put(UNIQUE_CLIENT_TAG, tag);
}
// Store the datatype-specific part as a protobuf.
if (server_entry.has_specifics()) {
@@ -300,7 +360,7 @@ void SyncerUtil::UpdateServerFieldsFromUpdate(
} else if (server_entry.has_bookmarkdata()) {
// Legacy protocol response for bookmark data.
const SyncEntity::BookmarkData& bookmark = server_entry.bookmarkdata();
- UpdateBookmarkSpecifics(server_entry.singleton_tag(),
+ UpdateBookmarkSpecifics(server_entry.server_defined_unique_tag(),
bookmark.bookmark_url(),
bookmark.bookmark_favicon(),
local_entry);
@@ -707,6 +767,7 @@ VerifyResult SyncerUtil::VerifyUndelete(syncable::WriteTransaction* trans,
// back into a state that would pass CheckTreeInvariants().
if (same_id->Get(IS_DEL)) {
same_id->Put(ID, trans->directory()->NextId());
+ same_id->Put(UNIQUE_CLIENT_TAG, "");
same_id->Put(BASE_VERSION, CHANGES_VERSION);
same_id->Put(SERVER_VERSION, 0);
return VERIFY_SUCCESS;
diff --git a/chrome/browser/sync/engine/syncer_util.h b/chrome/browser/sync/engine/syncer_util.h
index 3f8d371..eaba33e 100755
--- a/chrome/browser/sync/engine/syncer_util.h
+++ b/chrome/browser/sync/engine/syncer_util.h
@@ -39,6 +39,12 @@ class SyncerUtil {
syncable::MutableEntry* entry,
const syncable::Id& new_id);
+ // If the server sent down a client tagged entry, we have to affix it to
+ // the correct local entry.
+ static void AttemptReuniteClientTag(
+ syncable::WriteTransaction* trans,
+ const SyncEntity& server_entry);
+
static void AttemptReuniteLostCommitResponses(
syncable::WriteTransaction* trans,
const SyncEntity& server_entry,
diff --git a/chrome/browser/sync/engine/syncproto.h b/chrome/browser/sync/engine/syncproto.h
index ba087df..c34a709 100644
--- a/chrome/browser/sync/engine/syncproto.h
+++ b/chrome/browser/sync/engine/syncproto.h
@@ -68,7 +68,7 @@ class SyncEntity : public IdWrapper<sync_pb::SyncEntity> {
// Loose check for server-created top-level folders that aren't
// bound to a particular model type.
- if (!singleton_tag().empty() && IsFolder())
+ if (!server_defined_unique_tag().empty() && IsFolder())
return syncable::TOP_LEVEL_FOLDER;
// This is an item of a datatype we can't understand. Maybe it's
diff --git a/chrome/browser/sync/engine/verify_updates_command.cc b/chrome/browser/sync/engine/verify_updates_command.cc
index 76b3851..d84c38c 100755
--- a/chrome/browser/sync/engine/verify_updates_command.cc
+++ b/chrome/browser/sync/engine/verify_updates_command.cc
@@ -48,11 +48,31 @@ void VerifyUpdatesCommand::ExecuteImpl(sessions::SyncSession* session) {
// ID in fact, there isn't actually a need for server_knows and not IDs.
SyncerUtil::AttemptReuniteLostCommitResponses(&trans, entry,
trans.directory()->cache_guid());
+
+ // Affix IDs properly prior to inputting data into server entry.
+ SyncerUtil::AttemptReuniteClientTag(&trans, entry);
+
VerifyResult result = VerifyUpdate(&trans, entry);
status->mutable_update_progress()->AddVerifyResult(result, entry);
}
}
+namespace {
+// In the event that IDs match, but tags differ AttemptReuniteClient tag
+// will have refused to unify the update.
+// We should not attempt to apply it at all since it violates consistency
+// rules.
+VerifyResult VerifyTagConsistency(const SyncEntity& entry,
+ const syncable::MutableEntry& same_id) {
+ if (entry.has_client_defined_unique_tag() &&
+ entry.client_defined_unique_tag() !=
+ same_id.Get(syncable::UNIQUE_CLIENT_TAG)) {
+ return VERIFY_FAIL;
+ }
+ return VERIFY_UNDECIDED;
+}
+} // namespace
+
VerifyResult VerifyUpdatesCommand::VerifyUpdate(
syncable::WriteTransaction* trans, const SyncEntity& entry) {
syncable::Id id = entry.id();
@@ -82,6 +102,10 @@ VerifyResult VerifyUpdatesCommand::VerifyUpdate(
result = SyncerUtil::VerifyNewEntry(entry, &same_id, deleted);
if (VERIFY_UNDECIDED == result) {
+ result = VerifyTagConsistency(entry, same_id);
+ }
+
+ if (VERIFY_UNDECIDED == result) {
if (deleted)
result = VERIFY_SUCCESS;
}
diff --git a/chrome/browser/sync/protocol/sync.proto b/chrome/browser/sync/protocol/sync.proto
index ffc69a7..0b25a23 100644..100755
--- a/chrome/browser/sync/protocol/sync.proto
+++ b/chrome/browser/sync/protocol/sync.proto
@@ -121,14 +121,23 @@ message SyncEntity {
// Present only in GetUpdatesResponse.
optional int64 sync_timestamp = 9;
- // If present, singleton_tag identifies this item as being a uniquely
+ // If present, this tag identifies this item as being a uniquely
// instanced item. The server ensures that there is never more
- // than one entity in a user's store with the same singleton_tag value.
+ // than one entity in a user's store with the same tag value.
// This value is used to identify and find e.g. the "Google Chrome" settings
// folder without relying on it existing at a particular path, or having
// a particular name, in the data store.
+ //
+ // This variant of the tag is created by the server, so clients can't create
+ // an item with a tag using this field.
+ //
+ // Use client_defined_unique_tag if you want to create one from the client.
+ //
+ // An item can't have both a client_defined_unique_tag and
+ // a server_defined_unique_tag.
+ //
// Present only in GetUpdatesResponse.
- optional string singleton_tag = 10;
+ optional string server_defined_unique_tag = 10;
// If this group is present, it implies that this SyncEntity corresponds to
// a bookmark or a bookmark folder.
@@ -197,6 +206,35 @@ message SyncEntity {
// Indicate whether this is a folder or not. Available in version 23+.
optional bool folder = 22 [default = false];
+
+ // A client defined unique hash for this entity.
+ // Similar to server_defined_unique_tag.
+ //
+ // When initially committing an entity, a client can request that the entity
+ // is unique per that account. To do so, the client should specify a
+ // client_defined_unique_tag. At most one entity per tag value may exist.
+ // per account. The server will enforce uniqueness on this tag
+ // and fail attempts to create duplicates of this tag.
+ // Will be returned in any updates for this entity.
+ //
+ // The difference between server_defined_unique_tag and
+ // client_defined_unique_tag is the creator of the entity. Server defined
+ // tags are entities created by the server at account creation,
+ // while client defined tags are entities created by the client at any time.
+ //
+ // During GetUpdates, a sync entity update will come back with ONE of:
+ // a) Originator and cache id - If client committed the item as non "unique"
+ // b) Server tag - If server committed the item as unique
+ // c) Client tag - If client committed the item as unique
+ //
+ // May be present in CommitMessages for the initial creation of an entity.
+ // If present in Commit updates for the entity, it will be ignored.
+ //
+ // Available in version 24+.
+ //
+ // May be returned in GetUpdatesMessage and sent up in CommitMessage.
+ //
+ optional string client_defined_unique_tag = 23;
};
message CommitMessage {
@@ -252,7 +290,7 @@ message AuthenticateMessage {
message ClientToServerMessage {
required string share = 1;
- optional int32 protocol_version = 2 [default = 23];
+ optional int32 protocol_version = 2 [default = 24];
enum Contents {
COMMIT = 1;
GET_UPDATES = 2;
diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc
index 216e785..20c6581 100755
--- a/chrome/browser/sync/syncable/directory_backing_store.cc
+++ b/chrome/browser/sync/syncable/directory_backing_store.cc
@@ -38,7 +38,7 @@ namespace syncable {
static const string::size_type kUpdateStatementBufferSize = 2048;
// Increment this version whenever updating DB tables.
-extern const int32 kCurrentDBVersion = 69; // Extern only for our unittest.
+extern const int32 kCurrentDBVersion = 70; // Extern only for our unittest.
namespace {
@@ -315,6 +315,11 @@ DirOpenResult DirectoryBackingStore::InitializeTables() {
version_on_disk = 69;
}
+ if (version_on_disk == 69) {
+ if (MigrateVersion69To70())
+ version_on_disk = 70;
+ }
+
// If one of the migrations requested it, drop columns that aren't current.
// It's only safe to do this after migrating all the way to the current
// version.
@@ -659,6 +664,26 @@ bool DirectoryBackingStore::MigrateVersion67To68() {
return true;
}
+bool DirectoryBackingStore::MigrateVersion69To70() {
+ // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
+ SetVersion(70);
+ // We use these metas column names but if in the future
+ // we rename the column again, we need to inline the old
+ // intermediate name / column spec.
+ if (!AddColumn(&g_metas_columns[UNIQUE_SERVER_TAG])) {
+ return false;
+ }
+ if (!AddColumn(&g_metas_columns[UNIQUE_CLIENT_TAG])) {
+ return false;
+ }
+ needs_column_refresh_ = true;
+
+ SQLStatement statement;
+ statement.prepare(load_dbhandle_,
+ "UPDATE metas SET unique_server_tag = singleton_tag");
+ return statement.step() == SQLITE_DONE;
+}
+
namespace {
// Callback passed to MigrateToSpecifics for the v68->v69 migration. See
diff --git a/chrome/browser/sync/syncable/directory_backing_store.h b/chrome/browser/sync/syncable/directory_backing_store.h
index a4294ae..65bf1c8 100644
--- a/chrome/browser/sync/syncable/directory_backing_store.h
+++ b/chrome/browser/sync/syncable/directory_backing_store.h
@@ -77,6 +77,7 @@ class DirectoryBackingStore {
private:
FRIEND_TEST(DirectoryBackingStoreTest, MigrateVersion67To68);
FRIEND_TEST(DirectoryBackingStoreTest, MigrateVersion68To69);
+ FRIEND_TEST(DirectoryBackingStoreTest, MigrateVersion69To70);
FRIEND_TEST(MigrationTest, ToCurrentVersion);
// General Directory initialization and load helpers.
@@ -138,6 +139,7 @@ class DirectoryBackingStore {
// Individual version migrations.
bool MigrateVersion67To68();
bool MigrateVersion68To69();
+ bool MigrateVersion69To70();
// The handle to our sqlite on-disk store for initialization and loading, and
// for saving changes periodically via SaveChanges, respectively.
diff --git a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
index 2bdded7..70cd23c 100755
--- a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
+++ b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
@@ -43,6 +43,7 @@ class MigrationTest : public testing::TestWithParam<int> {
}
void SetUpVersion67Database();
void SetUpVersion68Database();
+ void SetUpVersion69Database();
private:
ScopedTempDir temp_dir_;
@@ -282,6 +283,127 @@ void MigrationTest::SetUpVersion68Database() {
ASSERT_TRUE(connection.CommitTransaction());
}
+void MigrationTest::SetUpVersion69Database() {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+ ASSERT_TRUE(connection.BeginTransaction());
+ ASSERT_TRUE(connection.Execute(
+ "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
+ "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
+ "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
+ "base_version bigint default -1,server_version bigint default 0,"
+ "mtime bigint default 0,server_mtime bigint default 0,"
+ "ctime bigint default 0,server_ctime bigint default 0,"
+ "server_position_in_parent bigint default 0,"
+ "local_external_id bigint default 0,id varchar(255) default 'r',"
+ "parent_id varchar(255) default 'r',"
+ "server_parent_id varchar(255) default 'r',"
+ "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
+ "is_unsynced bit default 0,is_unapplied_update bit default 0,"
+ "is_del bit default 0,is_dir bit default 0,"
+ "is_bookmark_object bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,"
+ "server_is_bookmark_object bit default 0,"
+ "non_unique_name varchar,server_non_unique_name varchar(255),"
+ "bookmark_url varchar,server_bookmark_url varchar,"
+ "singleton_tag varchar,bookmark_favicon blob,"
+ "server_bookmark_favicon blob, specifics blob, "
+ "server_specifics blob);"
+ "INSERT INTO metas VALUES(1,-1,0,129079956640320000,0,129079956640320000,"
+ "0,0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,NULL,NULL,NULL,NULL,"
+ "NULL,NULL,X'',X'');"
+ "INSERT INTO metas VALUES(2,669,669,128976886618480000,"
+ "128976886618480000,128976886618480000,128976886618480000,-2097152,"
+ "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
+ "'Deleted Item','Deleted Item','http://www.google.com/',"
+ "'http://www.google.com/2',NULL,'AASGASGA','ASADGADGADG',"
+ "X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F120841415"
+ "34741534741',X'C28810260A17687474703A2F2F7777772E676F6F676C652E636F"
+ "6D2F32120B4153414447414447414447');"
+ "INSERT INTO metas VALUES(4,681,681,129002163642690000,"
+ "129002163642690000,129002163642690000,129002163642690000,-3145728,"
+ "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
+ "'Welcome to Chromium','Welcome to Chromium',"
+ "'http://www.google.com/chrome/intl/en/welcome.html',"
+ "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,NULL,"
+ "X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
+ "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A3168"
+ "7474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F6"
+ "56E2F77656C636F6D652E68746D6C1200');"
+ "INSERT INTO metas VALUES(5,677,677,129001555500000000,"
+ "129001555500000000,129001555500000000,129001555500000000,1048576,7,"
+ "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
+ "'Google','Google','http://www.google.com/',"
+ "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG',X'C28810220A166"
+ "87474703A2F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'"
+ "C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464"
+ "447415347');"
+ "INSERT INTO metas VALUES(6,694,694,129053976170000000,"
+ "129053976170000000,129053976170000000,129053976170000000,-4194304,6"
+ ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,'The Internet',"
+ "'The Internet',NULL,NULL,NULL,NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(7,663,663,128976864758480000,"
+ "128976864758480000,128976864758480000,128976864758480000,1048576,0,"
+ "'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,'Google Chrome',"
+ "'Google Chrome',NULL,NULL,'google_chrome',NULL,NULL,NULL,NULL);"
+ "INSERT INTO metas VALUES(8,664,664,128976864758480000,"
+ "128976864758480000,128976864758480000,128976864758480000,1048576,0,"
+ "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
+ "'Bookmarks',NULL,NULL,'google_chrome_bookmarks',NULL,NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(9,665,665,128976864758480000,"
+ "128976864758480000,128976864758480000,128976864758480000,1048576,1,"
+ "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,1,"
+ "'Bookmark Bar','Bookmark Bar',NULL,NULL,'bookmark_bar',NULL,NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(10,666,666,128976864758480000,"
+ "128976864758480000,128976864758480000,128976864758480000,2097152,2,"
+ "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
+ "'Other Bookmarks','Other Bookmarks',NULL,NULL,'other_bookmarks',"
+ "NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(11,683,683,129079956948440000,"
+ "129079956948440000,129079956948440000,129079956948440000,-1048576,"
+ "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
+ "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
+ "'AGATWA','AFAGVASF',X'C28810220A18687474703A2F2F6465762E6368726F6D6"
+ "9756D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F646576"
+ "2E6368726F6D69756D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO metas VALUES(12,685,685,129079957513650000,"
+ "129079957513650000,129079957513650000,129079957513650000,0,9,"
+ "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,NULL,NULL,NULL,"
+ "X'C2881000',X'C2881000');"
+ "INSERT INTO metas VALUES(13,687,687,129079957985300000,"
+ "129079957985300000,129079957985300000,129079957985300000,-917504,"
+ "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
+ "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'ICANN | Internet Corporation for Assigned Names and Numbers',"
+ "'http://www.icann.com/','http://www.icann.com/',NULL,'PNGAXF0AAFF',"
+ "'DAAFASF',X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F1"
+ "20B504E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963"
+ "616E6E2E636F6D2F120744414146415346');"
+ "INSERT INTO metas VALUES(14,692,692,129079958383000000,"
+ "129079958383000000,129079958383000000,129079958383000000,1048576,11,"
+ "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
+ "'The WebKit Open Source Project','The WebKit Open Source Project',"
+ "'http://webkit.org/','http://webkit.org/x',NULL,'PNGX','PNG2Y',"
+ "X'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E4758',X'C2"
+ "88101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473259');"
+ "CREATE TABLE share_info (id VARCHAR(128) primary key, "
+ "last_sync_timestamp INT, name VARCHAR(128), "
+ "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
+ "db_create_version VARCHAR(128), db_create_time int, "
+ "next_id bigint default -2, cache_guid VARCHAR(32));"
+ "INSERT INTO share_info VALUES('nick@chromium.org',694,"
+ "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
+ "'Unknown',1263522064,-65542,"
+ "'9010788312004066376x-6609234393368420856x');"
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO share_version VALUES('nick@chromium.org',69);"
+ ));
+ ASSERT_TRUE(connection.CommitTransaction());
+}
TEST_F(DirectoryBackingStoreTest, MigrateVersion67To68) {
SetUpVersion67Database();
@@ -289,7 +411,7 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion67To68) {
sql::Connection connection;
ASSERT_TRUE(connection.Open(GetDatabasePath()));
- // Columns existing befor version 67.
+ // Columns existing before version 67.
ASSERT_TRUE(connection.DoesColumnExist("metas", "name"));
ASSERT_TRUE(connection.DoesColumnExist("metas", "unsanitized_name"));
ASSERT_TRUE(connection.DoesColumnExist("metas", "server_name"));
@@ -345,6 +467,39 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion68To69) {
ASSERT_FALSE(s.Step());
}
+TEST_F(DirectoryBackingStoreTest, MigrateVersion69To70) {
+ SetUpVersion69Database();
+
+ {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "singleton_tag"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "unique_server_tag"));
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "unique_client_tag"));
+ }
+
+ scoped_ptr<DirectoryBackingStore> dbs(
+ new DirectoryBackingStore(GetUsername(), GetDatabasePath()));
+
+ dbs->BeginLoad();
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion69To70());
+ ASSERT_EQ(70, dbs->GetVersion());
+ dbs->EndLoad();
+ ASSERT_TRUE(dbs->needs_column_refresh_);
+
+ sql::Connection connection;
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_server_tag"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_client_tag"));
+ sql::Statement s(connection.GetUniqueStatement("SELECT id"
+ " FROM metas WHERE unique_server_tag = 'google_chrome'"));
+ ASSERT_TRUE(s.Step());
+ EXPECT_EQ("s_ID_7", s.ColumnString(0));
+}
+
TEST_P(MigrationTest, ToCurrentVersion) {
switch (GetParam()) {
case 67:
@@ -353,6 +508,9 @@ TEST_P(MigrationTest, ToCurrentVersion) {
case 68:
SetUpVersion68Database();
break;
+ case 69:
+ SetUpVersion69Database();
+ break;
default:
FAIL() << "Need to supply database dump for version " << GetParam();
}
@@ -385,6 +543,11 @@ TEST_P(MigrationTest, ToCurrentVersion) {
ASSERT_FALSE(connection.DoesColumnExist("metas", "bookmark_favicon"));
ASSERT_FALSE(connection.DoesColumnExist("metas", "bookmark_url"));
ASSERT_FALSE(connection.DoesColumnExist("metas", "server_bookmark_url"));
+
+ // Renamed a column in Version 70
+ ASSERT_FALSE(connection.DoesColumnExist("metas", "singleton_tag"));
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "unique_server_tag"));
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "unique_client_tag"));
}
MetahandlesIndex index;
@@ -410,7 +573,7 @@ TEST_P(MigrationTest, ToCurrentVersion) {
(*it)->ref(SERVER_SPECIFICS).GetExtension(sync_pb::bookmark).url());
EXPECT_EQ("ASADGADGADG",
(*it)->ref(SERVER_SPECIFICS).GetExtension(sync_pb::bookmark).favicon());
- EXPECT_EQ("", (*it)->ref(SINGLETON_TAG));
+ EXPECT_EQ("", (*it)->ref(UNIQUE_SERVER_TAG));
EXPECT_EQ("Deleted Item", (*it)->ref(NON_UNIQUE_NAME));
EXPECT_EQ("Deleted Item", (*it)->ref(SERVER_NON_UNIQUE_NAME));
@@ -439,19 +602,19 @@ TEST_P(MigrationTest, ToCurrentVersion) {
ASSERT_TRUE(++it != index.end());
ASSERT_EQ(7, (*it)->ref(META_HANDLE));
- EXPECT_EQ("google_chrome", (*it)->ref(SINGLETON_TAG));
+ EXPECT_EQ("google_chrome", (*it)->ref(UNIQUE_SERVER_TAG));
EXPECT_FALSE((*it)->ref(SPECIFICS).HasExtension(sync_pb::bookmark));
EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).HasExtension(sync_pb::bookmark));
ASSERT_TRUE(++it != index.end());
ASSERT_EQ(8, (*it)->ref(META_HANDLE));
- EXPECT_EQ("google_chrome_bookmarks", (*it)->ref(SINGLETON_TAG));
+ EXPECT_EQ("google_chrome_bookmarks", (*it)->ref(UNIQUE_SERVER_TAG));
EXPECT_TRUE((*it)->ref(SPECIFICS).HasExtension(sync_pb::bookmark));
EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).HasExtension(sync_pb::bookmark));
ASSERT_TRUE(++it != index.end());
ASSERT_EQ(9, (*it)->ref(META_HANDLE));
- EXPECT_EQ("bookmark_bar", (*it)->ref(SINGLETON_TAG));
+ EXPECT_EQ("bookmark_bar", (*it)->ref(UNIQUE_SERVER_TAG));
EXPECT_TRUE((*it)->ref(SPECIFICS).HasExtension(sync_pb::bookmark));
EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).HasExtension(sync_pb::bookmark));
@@ -467,7 +630,7 @@ TEST_P(MigrationTest, ToCurrentVersion) {
(*it)->ref(SERVER_SPECIFICS).GetExtension(sync_pb::bookmark).has_url());
EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).
GetExtension(sync_pb::bookmark).has_favicon());
- EXPECT_EQ("other_bookmarks", (*it)->ref(SINGLETON_TAG));
+ EXPECT_EQ("other_bookmarks", (*it)->ref(UNIQUE_SERVER_TAG));
EXPECT_EQ("Other Bookmarks", (*it)->ref(NON_UNIQUE_NAME));
EXPECT_EQ("Other Bookmarks", (*it)->ref(SERVER_NON_UNIQUE_NAME));
@@ -485,7 +648,7 @@ TEST_P(MigrationTest, ToCurrentVersion) {
(*it)->ref(SERVER_SPECIFICS).GetExtension(sync_pb::bookmark).url());
EXPECT_EQ("AFAGVASF",
(*it)->ref(SERVER_SPECIFICS).GetExtension(sync_pb::bookmark).favicon());
- EXPECT_EQ("", (*it)->ref(SINGLETON_TAG));
+ EXPECT_EQ("", (*it)->ref(UNIQUE_SERVER_TAG));
EXPECT_EQ("Home (The Chromium Projects)", (*it)->ref(NON_UNIQUE_NAME));
EXPECT_EQ("Home (The Chromium Projects)", (*it)->ref(SERVER_NON_UNIQUE_NAME));
diff --git a/chrome/browser/sync/syncable/syncable.cc b/chrome/browser/sync/syncable/syncable.cc
index 09f75b5..f656e05 100755
--- a/chrome/browser/sync/syncable/syncable.cc
+++ b/chrome/browser/sync/syncable/syncable.cc
@@ -161,6 +161,7 @@ Directory::Kernel::Kernel(const FilePath& db_path,
metahandles_index(new Directory::MetahandlesIndex),
ids_index(new Directory::IdsIndex),
parent_id_child_index(new Directory::ParentIdChildIndex),
+ client_tag_index(new Directory::ClientTagIndex),
extended_attributes(new ExtendedAttributes),
unapplied_update_metahandles(new MetahandleSet),
unsynced_metahandles(new MetahandleSet),
@@ -196,6 +197,7 @@ Directory::Kernel::~Kernel() {
delete unapplied_update_metahandles;
delete extended_attributes;
delete parent_id_child_index;
+ delete client_tag_index;
delete ids_index;
for_each(metahandles_index->begin(), metahandles_index->end(), DeleteEntry);
delete metahandles_index;
@@ -222,6 +224,9 @@ void Directory::InitializeIndices() {
if (!entry->ref(IS_DEL))
kernel_->parent_id_child_index->insert(entry);
kernel_->ids_index->insert(entry);
+ if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
+ kernel_->client_tag_index->insert(entry);
+ }
if (entry->ref(IS_UNSYNCED))
kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE));
if (entry->ref(IS_UNAPPLIED_UPDATE))
@@ -278,17 +283,29 @@ EntryKernel* Directory::GetEntryById(const Id& id) {
EntryKernel* Directory::GetEntryById(const Id& id,
ScopedKernelLock* const lock) {
DCHECK(kernel_);
- // First look up in memory
+ // Find it in the in memory ID index.
kernel_->needle.put(ID, id);
IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
if (id_found != kernel_->ids_index->end()) {
- // Found it in memory. Easy.
return *id_found;
}
return NULL;
}
-EntryKernel* Directory::GetEntryByTag(const string& tag) {
+EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
+ ScopedKernelLock lock(this);
+ DCHECK(kernel_);
+ // Find it in the ClientTagIndex.
+ kernel_->needle.put(UNIQUE_CLIENT_TAG, tag);
+ ClientTagIndex::iterator found = kernel_->client_tag_index->find(
+ &kernel_->needle);
+ if (found != kernel_->client_tag_index->end()) {
+ return *found;
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
ScopedKernelLock lock(this);
DCHECK(kernel_);
// We don't currently keep a separate index for the tags. Since tags
@@ -298,7 +315,7 @@ EntryKernel* Directory::GetEntryByTag(const string& tag) {
// looking for a match.
MetahandlesIndex& set = *kernel_->metahandles_index;
for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) {
- if ((*i)->ref(SINGLETON_TAG) == tag) {
+ if ((*i)->ref(UNIQUE_SERVER_TAG) == tag) {
return *i;
}
}
@@ -417,6 +434,9 @@ void Directory::InsertEntry(EntryKernel* entry, ScopedKernelLock* lock) {
CHECK(kernel_->parent_id_child_index->insert(entry).second) << error;
}
CHECK(kernel_->ids_index->insert(entry).second) << error;
+
+ // Should NEVER be created with a client tag.
+ CHECK(entry->ref(UNIQUE_CLIENT_TAG).empty());
}
void Directory::Undelete(EntryKernel* const entry) {
@@ -548,6 +568,9 @@ void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
DCHECK(1 == num_erased);
num_erased = kernel_->metahandles_index->erase(entry);
DCHECK(1 == num_erased);
+
+ num_erased = kernel_->client_tag_index->erase(entry); // Might not be in it
+ DCHECK(!entry->ref(UNIQUE_CLIENT_TAG).empty() == num_erased);
delete entry;
}
}
@@ -798,6 +821,7 @@ void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
++entries_done;
continue;
}
+
if (!e.Get(IS_DEL)) {
CHECK(id != parentid) << e;
CHECK(!e.Get(NON_UNIQUE_NAME).empty()) << e;
@@ -823,6 +847,11 @@ void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
CHECK(e.Get(IS_DEL)) << e;
CHECK(id.ServerKnows()) << e;
} else {
+ if (e.Get(IS_DIR)) {
+ // TODO(chron): Implement this mode if clients ever need it.
+ // For now, you can't combine a client tag and a directory.
+ CHECK(e.Get(UNIQUE_CLIENT_TAG).empty()) << e;
+ }
// Uncommitted item.
if (!e.Get(IS_DEL)) {
CHECK(e.Get(IS_UNSYNCED)) << e;
@@ -967,9 +996,14 @@ Entry::Entry(BaseTransaction* trans, GetById, const Id& id)
kernel_ = trans->directory()->GetEntryById(id);
}
-Entry::Entry(BaseTransaction* trans, GetByTag, const string& tag)
+Entry::Entry(BaseTransaction* trans, GetByClientTag, const string& tag)
: basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryByTag(tag);
+ kernel_ = trans->directory()->GetEntryByClientTag(tag);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByServerTag, const string& tag)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByServerTag(tag);
}
Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
@@ -1007,7 +1041,7 @@ syncable::ModelType Entry::GetServerModelType() const {
return TOP_LEVEL_FOLDER;
// Loose check for server-created top-level folders that aren't
// bound to a particular model type.
- if (!Get(SINGLETON_TAG).empty() && Get(SERVER_IS_DIR))
+ if (!Get(UNIQUE_SERVER_TAG).empty() && Get(SERVER_IS_DIR))
return TOP_LEVEL_FOLDER;
// Otherwise, we don't have a server type yet. That should only happen
@@ -1032,7 +1066,7 @@ syncable::ModelType Entry::GetModelType() const {
return TOP_LEVEL_FOLDER;
// Loose check for server-created top-level folders that aren't
// bound to a particular model type.
- if (!Get(SINGLETON_TAG).empty() && Get(IS_DIR))
+ if (!Get(UNIQUE_SERVER_TAG).empty() && Get(IS_DIR))
return TOP_LEVEL_FOLDER;
// Otherwise, we don't have a local type yet. That should only happen
@@ -1081,7 +1115,7 @@ void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
const Id& id)
- : Entry(trans), write_transaction_(trans) {
+ : Entry(trans), write_transaction_(trans) {
Entry same_id(trans, GET_BY_ID, id);
if (same_id.good()) {
kernel_ = NULL; // already have an item with this ID.
@@ -1100,13 +1134,19 @@ MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
}
MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
- : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
+ : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
trans->SaveOriginal(kernel_);
}
MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
int64 metahandle)
- : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) {
+ : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag,
+ const std::string& tag)
+ : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) {
trans->SaveOriginal(kernel_);
}
@@ -1170,8 +1210,45 @@ bool MutableEntry::Put(StringField field, const string& value) {
return PutImpl(field, value);
}
+bool MutableEntry::PutUniqueClientTag(const string& new_tag) {
+ // There is no SERVER_UNIQUE_CLIENT_TAG. This field is similar to ID.
+ string old_tag = kernel_->ref(UNIQUE_CLIENT_TAG);
+ if (old_tag == new_tag) {
+ return true;
+ }
+
+ if (!new_tag.empty()) {
+ // Make sure your new value is not in there already.
+ EntryKernel lookup_kernel_ = *kernel_;
+ lookup_kernel_.put(UNIQUE_CLIENT_TAG, new_tag);
+ bool new_tag_conflicts =
+ (dir()->kernel_->client_tag_index->count(&lookup_kernel_) > 0);
+ if (new_tag_conflicts) {
+ return false;
+ }
+
+ // We're sure that the new tag doesn't exist now so,
+ // erase the old tag and finish up.
+ dir()->kernel_->client_tag_index->erase(kernel_);
+ kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
+ kernel_->mark_dirty();
+ CHECK(dir()->kernel_->client_tag_index->insert(kernel_).second);
+ } else {
+ // The new tag is empty. Since the old tag is not equal to the new tag,
+ // The old tag isn't empty, and thus must exist in the index.
+ CHECK(dir()->kernel_->client_tag_index->erase(kernel_));
+ kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
+ kernel_->mark_dirty();
+ }
+ return true;
+}
+
bool MutableEntry::PutImpl(StringField field, const string& value) {
DCHECK(kernel_);
+ if (field == UNIQUE_CLIENT_TAG) {
+ return PutUniqueClientTag(value);
+ }
+
if (kernel_->ref(field) != value) {
kernel_->put(field, value);
kernel_->mark_dirty();
diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h
index be091d0..fd328b3 100755
--- a/chrome/browser/sync/syncable/syncable.h
+++ b/chrome/browser/sync/syncable/syncable.h
@@ -136,7 +136,8 @@ enum StringField {
// A tag string which identifies this node as a particular top-level
// permanent object. The tag can be thought of as a unique key that
// identifies a singleton instance.
- SINGLETON_TAG,
+ UNIQUE_SERVER_TAG, // Tagged by the server
+ UNIQUE_CLIENT_TAG, // Tagged by the client
STRING_FIELDS_END,
};
@@ -192,8 +193,12 @@ enum GetById {
GET_BY_ID
};
-enum GetByTag {
- GET_BY_TAG
+enum GetByClientTag {
+ GET_BY_CLIENT_TAG
+};
+
+enum GetByServerTag {
+ GET_BY_SERVER_TAG
};
enum GetByHandle {
@@ -325,7 +330,8 @@ class Entry {
// succeeded.
Entry(BaseTransaction* trans, GetByHandle, int64 handle);
Entry(BaseTransaction* trans, GetById, const Id& id);
- Entry(BaseTransaction* trans, GetByTag, const std::string& tag);
+ Entry(BaseTransaction* trans, GetByServerTag, const std::string& tag);
+ Entry(BaseTransaction* trans, GetByClientTag, const std::string& tag);
bool good() const { return 0 != kernel_; }
@@ -431,6 +437,7 @@ class MutableEntry : public Entry {
MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
MutableEntry(WriteTransaction* trans, GetByHandle, int64);
MutableEntry(WriteTransaction* trans, GetById, const Id&);
+ MutableEntry(WriteTransaction* trans, GetByClientTag, const std::string& tag);
inline WriteTransaction* write_transaction() const {
return write_transaction_;
@@ -510,6 +517,7 @@ class MutableEntry : public Entry {
void* operator new(size_t size) { return (::operator new)(size); }
bool PutImpl(StringField field, const std::string& value);
+ bool PutUniqueClientTag(const std::string& value);
// Adjusts the successor and predecessor entries so that they no longer
// refer to this entry.
@@ -723,7 +731,8 @@ class Directory {
EntryKernel* GetEntryByHandle(const int64 handle);
EntryKernel* GetEntryByHandle(const int64 metahandle, ScopedKernelLock* lock);
EntryKernel* GetEntryById(const Id& id);
- EntryKernel* GetEntryByTag(const std::string& tag);
+ EntryKernel* GetEntryByServerTag(const std::string& tag);
+ EntryKernel* GetEntryByClientTag(const std::string& tag);
EntryKernel* GetRootEntry();
bool ReindexId(EntryKernel* const entry, const Id& new_id);
void ReindexParentId(EntryKernel* const entry, const Id& new_parent_id);
@@ -845,6 +854,7 @@ class Directory {
// processed |snapshot| failed, for example, due to no disk space.
void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
+ // For new entry creation only
void InsertEntry(EntryKernel* entry, ScopedKernelLock* lock);
void InsertEntry(EntryKernel* entry);
@@ -874,6 +884,13 @@ class Directory {
// This index contains EntryKernels ordered by parent ID and metahandle.
// It allows efficient lookup of the children of a given parent.
typedef std::set<EntryKernel*, LessParentIdAndHandle> ParentIdChildIndex;
+
+ // Contains both deleted and existing entries with tags.
+ // We can't store only existing tags because the client would create
+ // items that had a duplicated ID in the end, resulting in a DB key
+ // violation. ID reassociation would fail after an attempted commit.
+ typedef std::set<EntryKernel*,
+ LessField<StringField, UNIQUE_CLIENT_TAG> > ClientTagIndex;
typedef std::vector<int64> MetahandlesToPurge;
private:
@@ -907,6 +924,7 @@ class Directory {
MetahandlesIndex* metahandles_index; // Entries indexed by metahandle
IdsIndex* ids_index; // Entries indexed by id
ParentIdChildIndex* parent_id_child_index;
+ ClientTagIndex* client_tag_index;
// So we don't have to create an EntryKernel every time we want to
// look something up in an index. Needle in haystack metaphor.
EntryKernel needle;
diff --git a/chrome/browser/sync/syncable/syncable_columns.h b/chrome/browser/sync/syncable/syncable_columns.h
index 56f0212..398a00f 100755
--- a/chrome/browser/sync/syncable/syncable_columns.h
+++ b/chrome/browser/sync/syncable/syncable_columns.h
@@ -50,7 +50,8 @@ static const ColumnSpec g_metas_columns[] = {
// Strings
{"non_unique_name", "varchar"},
{"server_non_unique_name", "varchar(255)"},
- {"singleton_tag", "varchar"},
+ {"unique_server_tag", "varchar"},
+ {"unique_client_tag", "varchar"},
//////////////////////////////////////
// Blobs.
{"specifics", "blob"},
diff --git a/chrome/browser/sync/syncable/syncable_unittest.cc b/chrome/browser/sync/syncable/syncable_unittest.cc
index 407508a..d30296a 100755
--- a/chrome/browser/sync/syncable/syncable_unittest.cc
+++ b/chrome/browser/sync/syncable/syncable_unittest.cc
@@ -34,6 +34,7 @@
#include "base/logging.h"
#include "base/platform_thread.h"
#include "base/scoped_ptr.h"
+#include "base/scoped_temp_dir.h"
#include "chrome/browser/sync/protocol/bookmark_specifics.pb.h"
#include "chrome/browser/sync/syncable/directory_backing_store.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
@@ -77,11 +78,24 @@ void ExpectDataFromExtendedAttributeEquals(BaseTransaction* trans,
}
} // namespace
-TEST(Syncable, General) {
- remove("SimpleTest.sqlite3");
+class SyncableGeneralTest : public testing::Test {
+ public:
+ virtual void SetUp() {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ db_path_ = temp_dir_.path().Append(
+ FILE_PATH_LITERAL("SyncableTest.sqlite3"));
+ }
+
+ virtual void TearDown() {
+ }
+ protected:
+ ScopedTempDir temp_dir_;
+ FilePath db_path_;
+};
+
+TEST_F(SyncableGeneralTest, General) {
Directory dir;
- FilePath test_db(FILE_PATH_LITERAL("SimpleTest.sqlite3"));
- dir.Open(test_db, "SimpleTest");
+ dir.Open(db_path_, "SimpleTest");
int64 written_metahandle;
const Id id = TestIdFactory::FromNumber(99);
@@ -157,9 +171,85 @@ TEST(Syncable, General) {
}
dir.SaveChanges();
- remove("SimpleTest.sqlite3");
}
+TEST_F(SyncableGeneralTest, ClientIndexRebuildsProperly) {
+ int64 written_metahandle;
+ TestIdFactory factory;
+ const Id id = factory.NewServerId();
+ string name = "cheesepuffs";
+ string tag = "dietcoke";
+
+ // Test creating a new meta entry.
+ {
+ Directory dir;
+ dir.Open(db_path_, "IndexTest");
+ {
+ WriteTransaction wtrans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), name);
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ me.Put(UNIQUE_CLIENT_TAG, tag);
+ written_metahandle = me.Get(META_HANDLE);
+ }
+ dir.SaveChanges();
+ }
+
+ // The DB was closed. Now reopen it. This will cause index regeneration.
+ {
+ Directory dir;
+ dir.Open(db_path_, "IndexTest");
+
+ ReadTransaction trans(&dir, __FILE__, __LINE__);
+ Entry me(&trans, GET_BY_CLIENT_TAG, tag);
+ ASSERT_TRUE(me.good());
+ EXPECT_EQ(me.Get(ID), id);
+ EXPECT_EQ(me.Get(BASE_VERSION), 1);
+ EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), tag);
+ EXPECT_EQ(me.Get(META_HANDLE), written_metahandle);
+ }
+}
+
+TEST_F(SyncableGeneralTest, ClientIndexRebuildsDeletedProperly) {
+ TestIdFactory factory;
+ const Id id = factory.NewServerId();
+ string tag = "dietcoke";
+
+ // Test creating a deleted, unsynced, server meta entry.
+ {
+ Directory dir;
+ dir.Open(db_path_, "IndexTest");
+ {
+ WriteTransaction wtrans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), "deleted");
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ me.Put(UNIQUE_CLIENT_TAG, tag);
+ me.Put(IS_DEL, true);
+ me.Put(IS_UNSYNCED, true); // Or it might be purged.
+ }
+ dir.SaveChanges();
+ }
+
+ // The DB was closed. Now reopen it. This will cause index regeneration.
+ // Should still be present and valid in the client tag index.
+ {
+ Directory dir;
+ dir.Open(db_path_, "IndexTest");
+
+ ReadTransaction trans(&dir, __FILE__, __LINE__);
+ Entry me(&trans, GET_BY_CLIENT_TAG, tag);
+ ASSERT_TRUE(me.good());
+ EXPECT_EQ(me.Get(ID), id);
+ EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), tag);
+ EXPECT_TRUE(me.Get(IS_DEL));
+ EXPECT_TRUE(me.Get(IS_UNSYNCED));
+ }
+}
+
+
namespace {
// A Directory whose backing store always fails SaveChanges by returning false.
@@ -1131,5 +1221,100 @@ TEST_F(SyncableDirectoryTest, Bug1509232) {
dir_.get()->SaveChanges();
}
+class SyncableClientTagTest : public SyncableDirectoryTest {
+ public:
+ static const int kBaseVersion = 1;
+ const char* test_name_;
+ const char* test_tag_;
+
+ SyncableClientTagTest() : test_name_("test_name"), test_tag_("dietcoke") {}
+
+ bool CreateWithDefaultTag(Id id, bool deleted) {
+ return CreateWithTag(test_tag_, id, deleted);
+ }
+
+ // Attempt to create an entry with a default tag.
+ bool CreateWithTag(const char* tag, Id id, bool deleted) {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), test_name_);
+ CHECK(me.good());
+ me.Put(ID, id);
+ if (id.ServerKnows()) {
+ me.Put(BASE_VERSION, kBaseVersion);
+ }
+ me.Put(IS_DEL, deleted);
+ me.Put(IS_UNSYNCED, true);
+ me.Put(IS_DIR, false);
+ return me.Put(UNIQUE_CLIENT_TAG, tag);
+ }
+
+ // Verify an entry exists with the default tag.
+ void VerifyTag(Id id, bool deleted) {
+ // Should still be present and valid in the client tag index.
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
+ CHECK(me.good());
+ EXPECT_EQ(me.Get(ID), id);
+ EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), test_tag_);
+ EXPECT_EQ(me.Get(IS_DEL), deleted);
+ EXPECT_EQ(me.Get(IS_UNSYNCED), true);
+ }
+
+ protected:
+ TestIdFactory factory_;
+};
+
+TEST_F(SyncableClientTagTest, TestClientTagClear) {
+ Id server_id = factory_.NewServerId();
+ EXPECT_TRUE(CreateWithDefaultTag(server_id, false));
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
+ EXPECT_TRUE(me.good());
+ me.Put(UNIQUE_CLIENT_TAG, "");
+ }
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry by_tag(&trans, GET_BY_CLIENT_TAG, test_tag_);
+ EXPECT_FALSE(by_tag.good());
+
+ Entry by_id(&trans, GET_BY_ID, server_id);
+ EXPECT_TRUE(by_id.good());
+ EXPECT_TRUE(by_id.Get(UNIQUE_CLIENT_TAG).empty());
+ }
+}
+
+TEST_F(SyncableClientTagTest, TestClientTagIndexServerId) {
+ Id server_id = factory_.NewServerId();
+ EXPECT_TRUE(CreateWithDefaultTag(server_id, false));
+ VerifyTag(server_id, false);
+}
+
+TEST_F(SyncableClientTagTest, TestClientTagIndexClientId) {
+ Id client_id = factory_.NewLocalId();
+ EXPECT_TRUE(CreateWithDefaultTag(client_id, false));
+ VerifyTag(client_id, false);
+}
+
+TEST_F(SyncableClientTagTest, TestDeletedClientTagIndexClientId) {
+ Id client_id = factory_.NewLocalId();
+ EXPECT_TRUE(CreateWithDefaultTag(client_id, true));
+ VerifyTag(client_id, true);
+}
+
+TEST_F(SyncableClientTagTest, TestDeletedClientTagIndexServerId) {
+ Id server_id = factory_.NewServerId();
+ EXPECT_TRUE(CreateWithDefaultTag(server_id, true));
+ VerifyTag(server_id, true);
+}
+
+TEST_F(SyncableClientTagTest, TestClientTagIndexDuplicateServer) {
+ EXPECT_TRUE(CreateWithDefaultTag(factory_.NewServerId(), true));
+ EXPECT_FALSE(CreateWithDefaultTag(factory_.NewServerId(), true));
+ EXPECT_FALSE(CreateWithDefaultTag(factory_.NewServerId(), false));
+ EXPECT_FALSE(CreateWithDefaultTag(factory_.NewLocalId(), false));
+ EXPECT_FALSE(CreateWithDefaultTag(factory_.NewLocalId(), true));
+}
+
} // namespace
} // namespace syncable