summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sync/sync.gyp2
-rw-r--r--sync/syncable/delete_journal.cc144
-rw-r--r--sync/syncable/delete_journal.h103
-rw-r--r--sync/syncable/directory.cc34
-rw-r--r--sync/syncable/directory.h20
-rw-r--r--sync/syncable/directory_backing_store.cc155
-rw-r--r--sync/syncable/directory_backing_store.h27
-rw-r--r--sync/syncable/directory_backing_store_unittest.cc32
-rw-r--r--sync/syncable/entry_kernel.h13
-rw-r--r--sync/syncable/in_memory_directory_backing_store.cc3
-rw-r--r--sync/syncable/in_memory_directory_backing_store.h1
-rw-r--r--sync/syncable/invalid_directory_backing_store.cc1
-rw-r--r--sync/syncable/invalid_directory_backing_store.h1
-rw-r--r--sync/syncable/mutable_entry.cc13
-rw-r--r--sync/syncable/on_disk_directory_backing_store.cc12
-rw-r--r--sync/syncable/on_disk_directory_backing_store.h2
-rw-r--r--sync/syncable/syncable_base_transaction.h3
-rw-r--r--sync/syncable/syncable_unittest.cc130
-rw-r--r--sync/syncable/syncable_write_transaction.cc1
-rw-r--r--sync/test/test_directory_backing_store.cc3
-rw-r--r--sync/test/test_directory_backing_store.h1
21 files changed, 603 insertions, 98 deletions
diff --git a/sync/sync.gyp b/sync/sync.gyp
index 17671bc..6b93e6d 100644
--- a/sync/sync.gyp
+++ b/sync/sync.gyp
@@ -127,6 +127,8 @@
'sessions/sync_session_context.cc',
'sessions/sync_session_context.h',
'syncable/blob.h',
+ 'syncable/delete_journal.cc',
+ 'syncable/delete_journal.h',
'syncable/dir_open_result.h',
'syncable/directory.cc',
'syncable/directory.h',
diff --git a/sync/syncable/delete_journal.cc b/sync/syncable/delete_journal.cc
new file mode 100644
index 0000000..9450670
--- /dev/null
+++ b/sync/syncable/delete_journal.cc
@@ -0,0 +1,144 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/delete_journal.h"
+
+#include "base/stl_util.h"
+#include "sync/internal_api/public/base/model_type.h"
+
+namespace syncer {
+namespace syncable {
+
+DeleteJournal::DeleteJournal(JournalIndex* initial_journal) {
+ CHECK(initial_journal);
+ delete_journals_.swap(*initial_journal);
+}
+
+DeleteJournal::~DeleteJournal() {
+ STLDeleteElements(&delete_journals_);
+}
+
+size_t DeleteJournal::GetDeleteJournalSize(BaseTransaction* trans) const {
+ DCHECK(trans);
+ return delete_journals_.size();
+}
+
+void DeleteJournal::UpdateDeleteJournalForServerDelete(
+ BaseTransaction* trans, bool was_deleted, const EntryKernel& entry) {
+ DCHECK(trans);
+
+ // Should be sufficient to check server type only but check for local
+ // type too because of incomplete test setup.
+ if (!(IsDeleteJournalEnabled(entry.GetServerModelType()) ||
+ IsDeleteJournalEnabled(
+ GetModelTypeFromSpecifics(entry.ref(SPECIFICS))))) {
+ return;
+ }
+
+ JournalIndex::iterator it = delete_journals_.find(&entry);
+
+ if (entry.ref(SERVER_IS_DEL)) {
+ if (it == delete_journals_.end()) {
+ // New delete.
+ EntryKernel* t = new EntryKernel(entry);
+ delete_journals_.insert(t);
+ delete_journals_to_purge_.erase(t->ref(META_HANDLE));
+ }
+ } else {
+ // Undelete. This could happen in two cases:
+ // * An entry was deleted then undeleted, i.e. server delete was
+ // overwritten because of entry has unsynced data locally.
+ // * A data type was broken, i.e. encountered unrecoverable error, in last
+ // sync session and all its entries were duplicated in delete journals.
+ // On restart, entries are recreated from downloads and recreation calls
+ // UpdateDeleteJournals() to remove live entries from delete journals,
+ // thus only deleted entries remain in journals.
+ if (it != delete_journals_.end()) {
+ delete_journals_to_purge_.insert((*it)->ref(META_HANDLE));
+ delete *it;
+ delete_journals_.erase(it);
+ } else if (was_deleted) {
+ delete_journals_to_purge_.insert(entry.ref(META_HANDLE));
+ }
+ }
+}
+
+void DeleteJournal::GetDeleteJournals(BaseTransaction* trans,
+ ModelType type,
+ EntryKernelSet* deleted_entries) {
+ DCHECK(trans);
+ DCHECK(!passive_delete_journal_types_.Has(type));
+ for (JournalIndex::const_iterator it = delete_journals_.begin();
+ it != delete_journals_.end(); ++it) {
+ if ((*it)->GetServerModelType() == type ||
+ GetModelTypeFromSpecifics((*it)->ref(SPECIFICS)) == type) {
+ deleted_entries->insert(*it);
+ }
+ }
+ passive_delete_journal_types_.Put(type);
+}
+
+void DeleteJournal::PurgeDeleteJournals(BaseTransaction* trans,
+ const MetahandleSet& to_purge) {
+ DCHECK(trans);
+ JournalIndex::iterator it = delete_journals_.begin();
+ while (it != delete_journals_.end()) {
+ int64 handle = (*it)->ref(META_HANDLE);
+ if (to_purge.count(handle)) {
+ delete *it;
+ delete_journals_.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ delete_journals_to_purge_.insert(to_purge.begin(), to_purge.end());
+}
+
+void DeleteJournal::TakeSnapshotAndClear(BaseTransaction* trans,
+ EntryKernelSet* journal_entries,
+ MetahandleSet* journals_to_purge) {
+ DCHECK(trans);
+ // Move passive delete journals to snapshot. Will copy back if snapshot fails
+ // to save.
+ JournalIndex::iterator it = delete_journals_.begin();
+ while (it != delete_journals_.end()) {
+ if (passive_delete_journal_types_.Has((*it)->GetServerModelType()) ||
+ passive_delete_journal_types_.Has(GetModelTypeFromSpecifics(
+ (*it)->ref(SPECIFICS)))) {
+ journal_entries->insert(*it);
+ delete_journals_.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ *journals_to_purge = delete_journals_to_purge_;
+ delete_journals_to_purge_.clear();
+}
+
+void DeleteJournal::AddJournalBatch(BaseTransaction* trans,
+ const EntryKernelSet& entries) {
+ DCHECK(trans);
+ EntryKernel needle;
+ for (EntryKernelSet::const_iterator i = entries.begin();
+ i != entries.end(); ++i) {
+ needle.put(ID, (*i)->ref(ID));
+ if (delete_journals_.find(&needle) == delete_journals_.end()) {
+ delete_journals_.insert(new EntryKernel(**i));
+ }
+ delete_journals_to_purge_.erase((*i)->ref(META_HANDLE));
+ }
+}
+
+/* static */
+bool DeleteJournal::IsDeleteJournalEnabled(ModelType type) {
+ switch (type) {
+ case BOOKMARKS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace syncable
+} // namespace syncer
diff --git a/sync/syncable/delete_journal.h b/sync/syncable/delete_journal.h
new file mode 100644
index 0000000..5b9d044
--- /dev/null
+++ b/sync/syncable/delete_journal.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_DELETE_JOURNAL_H_
+#define SYNC_SYNCABLE_DELETE_JOURNAL_H_
+
+#include <set>
+
+#include "base/gtest_prod_util.h"
+#include "base/synchronization/lock.h"
+#include "sync/syncable/metahandle_set.h"
+#include "sync/syncable/syncable-inl.h"
+
+namespace syncer {
+namespace syncable {
+
+class BaseTransaction;
+struct EntryKernel;
+
+typedef std::set<const EntryKernel*, LessField<IdField, ID> > JournalIndex;
+
+// DeleteJournal manages deleted entries that are not in sync directory until
+// it's safe to drop them after the deletion is confirmed with native models.
+// DeleteJournal is thread-safe and can be accessed on any thread. Has to hold
+// a valid transaction object when calling methods of DeleteJournal, thus each
+// method requires a non-null |trans| parameter.
+class DeleteJournal {
+ public:
+ FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, ManageDeleteJournals);
+
+ // Initialize |delete_journals_| using |intitial_journal|, whose content is
+ // destroyed during initialization.
+ explicit DeleteJournal(JournalIndex* initial_journal);
+ ~DeleteJournal();
+
+ // For testing only.
+ size_t GetDeleteJournalSize(BaseTransaction* trans) const;
+
+ // Add/remove |entry| to/from |delete_journals_| according to its
+ // SERVER_IS_DEL field and |was_deleted|. Called on sync thread.
+ void UpdateDeleteJournalForServerDelete(BaseTransaction* trans,
+ bool was_deleted,
+ const EntryKernel& entry);
+
+ // Return entries of specified type in |delete_journals_|. This should be
+ // called ONCE in model association. |deleted_entries| can be used to
+ // detect deleted sync data that's not persisted in native model to
+ // prevent back-from-dead problem. |deleted_entries| are only valid during
+ // lifetime of |trans|. |type| is added to |passive_delete_journal_types_| to
+ // enable periodically saving/clearing of delete journals of |type| because
+ // new journals added later are not needed until next model association.
+ // Can be called on any thread.
+ void GetDeleteJournals(BaseTransaction* trans, ModelType type,
+ EntryKernelSet* deleted_entries);
+
+ // Purge entries of specified type in |delete_journals_| if their handles are
+ // in |to_purge|. This should be called after model association and
+ // |to_purge| should contain handles of the entries whose deletions are
+ // confirmed in native model. Can be called on any thread.
+ void PurgeDeleteJournals(BaseTransaction* trans,
+ const MetahandleSet& to_purge);
+
+ // Move entries in |delete_journals_| whose types are in
+ // |passive_delete_journal_types_| to |journal_entries|. Move handles in
+ // |delete_journals_to_purge_| to |journals_to_purge|. Called on sync thread.
+ void TakeSnapshotAndClear(BaseTransaction* trans,
+ EntryKernelSet* journal_entries,
+ MetahandleSet* journals_to_purge);
+
+ // Add |entries| to |delete_journals_| regardless of their SERVER_IS_DEL
+ // value. This is used to:
+ // * restore delete journals from snapshot if snapshot failed to save.
+ // * batch add entries of a data type with unrecoverable error to delete
+ // journal before purging them.
+ // Called on sync thread.
+ void AddJournalBatch(BaseTransaction* trans, const EntryKernelSet& entries);
+
+ // Return true if delete journals of |type| are maintained.
+ static bool IsDeleteJournalEnabled(ModelType type);
+
+ private:
+ // Contains deleted entries that may not be persisted in native models. And
+ // in case of unrecoverable error, all purged entries are moved here for
+ // bookkeeping to prevent back-from-dead entries that are deleted elsewhere
+ // when sync's down.
+ JournalIndex delete_journals_;
+
+ // Contains meta handles of deleted entries that have been persisted or
+ // undeleted, thus can be removed from database.
+ MetahandleSet delete_journals_to_purge_;
+
+ // Delete journals of these types can be cleared from memory after being
+ // saved to database.
+ ModelTypeSet passive_delete_journal_types_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeleteJournal);
+};
+
+} // namespace syncable
+} // namespace syncer
+
+#endif // SYNC_SYNCABLE_DELETE_JOURNAL_H_
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc
index c9b5132..3400c85 100644
--- a/sync/syncable/directory.cc
+++ b/sync/syncable/directory.cc
@@ -10,6 +10,7 @@
#include "base/string_number_conversions.h"
#include "sync/internal_api/public/base/node_ordinal.h"
#include "sync/internal_api/public/util/unrecoverable_error_handler.h"
+#include "sync/syncable/delete_journal.h"
#include "sync/syncable/entry.h"
#include "sync/syncable/entry_kernel.h"
#include "sync/syncable/in_memory_directory_backing_store.h"
@@ -101,7 +102,10 @@ Directory::SaveChangesSnapshot::SaveChangesSnapshot()
: kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
}
-Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {}
+Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
+ STLDeleteElements(&dirty_metas);
+ STLDeleteElements(&delete_journals);
+}
Directory::Kernel::Kernel(
const std::string& name,
@@ -196,17 +200,19 @@ DirOpenResult Directory::OpenImpl(
DirectoryChangeDelegate* delegate,
const WeakHandle<TransactionObserver>&
transaction_observer) {
-
KernelLoadInfo info;
// Temporary indices before kernel_ initialized in case Load fails. We 0(1)
// swap these later.
MetahandlesIndex metas_bucket;
- DirOpenResult result = store_->Load(&metas_bucket, &info);
+ JournalIndex delete_journals;
+
+ DirOpenResult result = store_->Load(&metas_bucket, &delete_journals, &info);
if (OPENED != result)
return result;
kernel_ = new Kernel(name, info, delegate, transaction_observer);
kernel_->metahandles_index->swap(metas_bucket);
+ delete_journal_.reset(new DeleteJournal(&delete_journals));
InitializeIndices();
// Write back the share info to reserve some space in 'next_id'. This will
@@ -219,6 +225,11 @@ DirOpenResult Directory::OpenImpl(
return OPENED;
}
+DeleteJournal* Directory::delete_journal() {
+ DCHECK(delete_journal_.get());
+ return delete_journal_.get();
+}
+
void Directory::Close() {
store_.reset();
if (kernel_) {
@@ -236,7 +247,6 @@ void Directory::OnUnrecoverableError(const BaseTransaction* trans,
message);
}
-
EntryKernel* Directory::GetEntryById(const Id& id) {
ScopedKernelLock lock(this);
return GetEntryById(id, &lock);
@@ -466,7 +476,8 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
// Skip over false positives; it happens relatively infrequently.
if (!entry->is_dirty())
continue;
- snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry);
+ snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
+ new EntryKernel(*entry));
DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i));
// We don't bother removing from the index here as we blow the entire thing
// in a moment, and it unnecessarily complicates iteration.
@@ -488,6 +499,9 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
snapshot->kernel_info_status = kernel_->info_status;
// This one we reset on failure.
kernel_->info_status = KERNEL_SHARE_INFO_VALID;
+
+ delete_journal_->TakeSnapshotAndClear(
+ &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
}
bool Directory::SaveChanges() {
@@ -518,7 +532,7 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
// Now drop everything we can out of memory.
for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
i != snapshot.dirty_metas.end(); ++i) {
- kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
+ kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE));
MetahandlesIndex::iterator found =
kernel_->metahandles_index->find(&kernel_->needle);
EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
@@ -605,6 +619,7 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types) {
}
void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
+ WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
ScopedKernelLock lock(this);
kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
@@ -615,7 +630,7 @@ void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
// that SaveChanges will at least try again later.
for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
i != snapshot.dirty_metas.end(); ++i) {
- kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
+ kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE));
MetahandlesIndex::iterator found =
kernel_->metahandles_index->find(&kernel_->needle);
if (found != kernel_->metahandles_index->end()) {
@@ -625,6 +640,11 @@ void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(),
snapshot.metahandles_to_purge.end());
+
+ // Restore delete journals.
+ delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
+ delete_journal_->PurgeDeleteJournals(&trans,
+ snapshot.delete_journals_to_purge);
}
void Directory::GetDownloadProgress(
diff --git a/sync/syncable/directory.h b/sync/syncable/directory.h
index 3b88112..fdb1b8a 100644
--- a/sync/syncable/directory.h
+++ b/sync/syncable/directory.h
@@ -14,6 +14,7 @@
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/util/report_unrecoverable_error_function.h"
#include "sync/internal_api/public/util/weak_handle.h"
+#include "sync/syncable/delete_journal.h"
#include "sync/syncable/dir_open_result.h"
#include "sync/syncable/entry_kernel.h"
#include "sync/syncable/metahandle_set.h"
@@ -113,16 +114,6 @@ enum UnlinkReason {
DATA_TYPE_PURGE // To be used when purging a dataype.
};
-class EntryKernelLessByMetaHandle {
- public:
- inline bool operator()(const EntryKernel& a,
- const EntryKernel& b) const {
- return a.ref(META_HANDLE) < b.ref(META_HANDLE);
- }
-};
-
-typedef std::set<EntryKernel, EntryKernelLessByMetaHandle> EntryKernelSet;
-
enum InvariantCheckLevel {
OFF = 0, // No checking.
VERIFY_CHANGES = 1, // Checks only mutated entries. Does not check hierarchy.
@@ -139,6 +130,7 @@ class SYNC_EXPORT Directory {
friend class ScopedKernelUnlock;
friend class WriteTransaction;
friend class SyncableDirectoryTest;
+ FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, ManageDeleteJournals);
FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
TakeSnapshotGetsAllDirtyHandlesTest);
FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
@@ -211,6 +203,8 @@ class SYNC_EXPORT Directory {
PersistedKernelInfo kernel_info;
EntryKernelSet dirty_metas;
MetahandleSet metahandles_to_purge;
+ EntryKernelSet delete_journals;
+ MetahandleSet delete_journals_to_purge;
};
// Does not take ownership of |encryptor|.
@@ -339,6 +333,8 @@ class SYNC_EXPORT Directory {
DirectoryChangeDelegate* delegate,
const WeakHandle<TransactionObserver>& transaction_observer);
+ DeleteJournal* delete_journal();
+
private:
// These private versions expect the kernel lock to already be held
// before calling.
@@ -634,6 +630,10 @@ class SYNC_EXPORT Directory {
Cryptographer* const cryptographer_;
InvariantCheckLevel invariant_check_level_;
+
+ // Maintain deleted entries not in |kernel_| until it's verified that they
+ // are deleted in native models as well.
+ scoped_ptr<DeleteJournal> delete_journal_;
};
} // namespace syncable
diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
index f18bada..442c9e7 100644
--- a/sync/syncable/directory_backing_store.cc
+++ b/sync/syncable/directory_backing_store.cc
@@ -171,12 +171,24 @@ DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
DirectoryBackingStore::~DirectoryBackingStore() {
}
-bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) {
+bool DirectoryBackingStore::DeleteEntries(EntryTable from,
+ const MetahandleSet& handles) {
if (handles.empty())
return true;
- sql::Statement statement(db_->GetCachedStatement(
+ sql::Statement statement;
+ // Call GetCachedStatement() separately to get different statements for
+ // different tables.
+ switch (from) {
+ case METAS_TABLE:
+ statement.Assign(db_->GetCachedStatement(
SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
+ break;
+ case DELETE_JOURNAL_TABLE:
+ statement.Assign(db_->GetCachedStatement(
+ SQL_FROM_HERE, "DELETE FROM deleted_metas WHERE metahandle = ?"));
+ break;
+ }
for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
++i) {
@@ -196,9 +208,9 @@ bool DirectoryBackingStore::SaveChanges(
// Back out early if there is nothing to write.
bool save_info =
(Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
- if (snapshot.dirty_metas.empty()
- && snapshot.metahandles_to_purge.empty()
- && !save_info) {
+ if (snapshot.dirty_metas.empty() && snapshot.metahandles_to_purge.empty() &&
+ snapshot.delete_journals.empty() &&
+ snapshot.delete_journals_to_purge.empty() && !save_info) {
return true;
}
@@ -206,14 +218,26 @@ bool DirectoryBackingStore::SaveChanges(
if (!transaction.Begin())
return false;
+ PrepareSaveEntryStatement(METAS_TABLE, &save_meta_statment_);
for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
i != snapshot.dirty_metas.end(); ++i) {
- DCHECK(i->is_dirty());
- if (!SaveEntryToDB(*i))
+ DCHECK((*i)->is_dirty());
+ if (!SaveEntryToDB(&save_meta_statment_, **i))
+ return false;
+ }
+
+ if (!DeleteEntries(METAS_TABLE, snapshot.metahandles_to_purge))
+ return false;
+
+ PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE,
+ &save_delete_journal_statment_);
+ for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
+ i != snapshot.delete_journals.end(); ++i) {
+ if (!SaveEntryToDB(&save_delete_journal_statment_, **i))
return false;
}
- if (!DeleteEntries(snapshot.metahandles_to_purge))
+ if (!DeleteEntries(DELETE_JOURNAL_TABLE, snapshot.delete_journals_to_purge))
return false;
if (save_info) {
@@ -465,22 +489,12 @@ bool DirectoryBackingStore::RefreshColumns() {
}
bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
- string select;
- select.reserve(kUpdateStatementBufferSize);
- select.append("SELECT ");
- AppendColumnList(&select);
- select.append(" FROM metas ");
-
- sql::Statement s(db_->GetUniqueStatement(select.c_str()));
+ return LoadEntriesInternal("metas", entry_bucket);
+}
- while (s.Step()) {
- scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
- // A null kernel is evidence of external data corruption.
- if (!kernel.get())
- return false;
- entry_bucket->insert(kernel.release());
- }
- return s.Succeeded();
+bool DirectoryBackingStore::LoadDeleteJournals(
+ JournalIndex* delete_journals) {
+ return LoadEntriesInternal("deleted_metas", delete_journals);
}
bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
@@ -538,38 +552,12 @@ bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
return true;
}
-bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
- // This statement is constructed at runtime, so we can't use
- // GetCachedStatement() to let the Connection cache it. We will construct
- // and cache it ourselves the first time this function is called.
- if (!save_entry_statement_.is_valid()) {
- string query;
- query.reserve(kUpdateStatementBufferSize);
- query.append("INSERT OR REPLACE INTO metas ");
- string values;
- values.reserve(kUpdateStatementBufferSize);
- values.append("VALUES ");
- const char* separator = "( ";
- int i = 0;
- for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
- query.append(separator);
- values.append(separator);
- separator = ", ";
- query.append(ColumnName(i));
- values.append("?");
- }
- query.append(" ) ");
- values.append(" )");
- query.append(values);
-
- save_entry_statement_.Assign(
- db_->GetUniqueStatement(query.c_str()));
- } else {
- save_entry_statement_.Reset(true);
- }
-
- BindFields(entry, &save_entry_statement_);
- return save_entry_statement_.Run();
+/* static */
+bool DirectoryBackingStore::SaveEntryToDB(sql::Statement* save_statement,
+ const EntryKernel& entry) {
+ save_statement->Reset(true);
+ BindFields(entry, save_statement);
+ return save_statement->Run();
}
bool DirectoryBackingStore::DropDeletedEntries() {
@@ -1093,7 +1081,6 @@ bool DirectoryBackingStore::MigrateVersion83To84() {
query.append(ComposeCreateTableColumnSpecs());
if (!db_->Execute(query.c_str()))
return false;
-
SetVersion(84);
return true;
}
@@ -1314,5 +1301,61 @@ bool DirectoryBackingStore::VerifyReferenceIntegrity(
return is_ok;
}
+template<class T>
+bool DirectoryBackingStore::LoadEntriesInternal(const std::string& table,
+ T* bucket) {
+ string select;
+ select.reserve(kUpdateStatementBufferSize);
+ select.append("SELECT ");
+ AppendColumnList(&select);
+ select.append(" FROM " + table);
+
+ sql::Statement s(db_->GetUniqueStatement(select.c_str()));
+
+ while (s.Step()) {
+ scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
+ // A null kernel is evidence of external data corruption.
+ if (!kernel.get())
+ return false;
+ bucket->insert(kernel.release());
+ }
+ return s.Succeeded();
+}
+
+void DirectoryBackingStore::PrepareSaveEntryStatement(
+ EntryTable table, sql::Statement* save_statement) {
+ if (save_statement->is_valid())
+ return;
+
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ switch (table) {
+ case METAS_TABLE:
+ query.append("INSERT OR REPLACE INTO metas ");
+ break;
+ case DELETE_JOURNAL_TABLE:
+ query.append("INSERT OR REPLACE INTO deleted_metas ");
+ break;
+ }
+
+ string values;
+ values.reserve(kUpdateStatementBufferSize);
+ values.append(" VALUES ");
+ const char* separator = "( ";
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
+ query.append(separator);
+ values.append(separator);
+ separator = ", ";
+ query.append(ColumnName(i));
+ values.append("?");
+ }
+ query.append(" ) ");
+ values.append(" )");
+ query.append(values);
+ save_statement->Assign(db_->GetUniqueStatement(
+ base::StringPrintf(query.c_str(), "metas").c_str()));
+}
+
} // namespace syncable
} // namespace syncer
diff --git a/sync/syncable/directory_backing_store.h b/sync/syncable/directory_backing_store.h
index b7a97d3..3ee8844 100644
--- a/sync/syncable/directory_backing_store.h
+++ b/sync/syncable/directory_backing_store.h
@@ -59,6 +59,7 @@ class SYNC_EXPORT_PRIVATE DirectoryBackingStore : public base::NonThreadSafe {
// NOTE: On success (return value of OPENED), the buckets are populated with
// newly allocated items, meaning ownership is bestowed upon the caller.
virtual DirOpenResult Load(MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) = 0;
// Updates the on-disk store with the input |snapshot| as a database
@@ -97,10 +98,12 @@ class SYNC_EXPORT_PRIVATE DirectoryBackingStore : public base::NonThreadSafe {
// Load helpers for entries and attributes.
bool LoadEntries(MetahandlesIndex* entry_bucket);
+ bool LoadDeleteJournals(JournalIndex* delete_journals);
bool LoadInfo(Directory::KernelLoadInfo* info);
// Save/update helpers for entries. Return false if sqlite commit fails.
- bool SaveEntryToDB(const EntryKernel& entry);
+ static bool SaveEntryToDB(sql::Statement* save_statement,
+ const EntryKernel& entry);
bool SaveNewEntryToDB(const EntryKernel& entry);
bool UpdateEntryToDB(const EntryKernel& entry);
@@ -110,9 +113,13 @@ class SYNC_EXPORT_PRIVATE DirectoryBackingStore : public base::NonThreadSafe {
// Close save_dbhandle_. Broken out for testing.
void EndSave();
- // Removes each entry whose metahandle is in |handles| from the database.
- // Does synchronous I/O. Returns false on error.
- bool DeleteEntries(const MetahandleSet& handles);
+ enum EntryTable {
+ METAS_TABLE,
+ DELETE_JOURNAL_TABLE,
+ };
+ // Removes each entry whose metahandle is in |handles| from the table
+ // specified by |from| table. Does synchronous I/O. Returns false on error.
+ bool DeleteEntries(EntryTable from, const MetahandleSet& handles);
// Drop all tables in preparation for reinitialization.
void DropAllTables();
@@ -167,13 +174,23 @@ class SYNC_EXPORT_PRIVATE DirectoryBackingStore : public base::NonThreadSafe {
bool MigrateVersion84To85();
scoped_ptr<sql::Connection> db_;
- sql::Statement save_entry_statement_;
+ sql::Statement save_meta_statment_;
+ sql::Statement save_delete_journal_statment_;
std::string dir_name_;
// Set to true if migration left some old columns around that need to be
// discarded.
bool needs_column_refresh_;
+ private:
+ // Helper function for loading entries from specified table.
+ template<class T>
+ bool LoadEntriesInternal(const std::string& table, T* bucket);
+
+ // Prepares |save_statement| for saving entries in |table|.
+ void PrepareSaveEntryStatement(EntryTable table,
+ sql::Statement* save_statement);
+
DISALLOW_COPY_AND_ASSIGN(DirectoryBackingStore);
};
diff --git a/sync/syncable/directory_backing_store_unittest.cc b/sync/syncable/directory_backing_store_unittest.cc
index bfa4cfb..917e874 100644
--- a/sync/syncable/directory_backing_store_unittest.cc
+++ b/sync/syncable/directory_backing_store_unittest.cc
@@ -47,9 +47,10 @@ class MigrationTest : public testing::TestWithParam<int> {
static bool LoadAndIgnoreReturnedData(DirectoryBackingStore *dbs) {
MetahandlesIndex metas;
+ JournalIndex delete_journals;;
STLElementDeleter<MetahandlesIndex> index_deleter(&metas);
Directory::KernelLoadInfo kernel_load_info;
- return dbs->Load(&metas, &kernel_load_info) == OPENED;
+ return dbs->Load(&metas, &delete_journals, &kernel_load_info) == OPENED;
}
void SetUpVersion67Database(sql::Connection* connection);
@@ -2217,8 +2218,8 @@ void MigrationTest::SetUpVersion84Database(sql::Connection* connection) {
"server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
"e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
"har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob, transaction_verion bigint default 0"
- ");"
+ ", server_ordinal_in_parent blob, transaction_version bigint default "
+ "0);"
"CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
"y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
"ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
@@ -2339,8 +2340,8 @@ void MigrationTest::SetUpVersion85Database(sql::Connection* connection) {
"server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
"e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
"har,specifics blob,server_specifics blob, base_server_specifics BLOB"
- ", server_ordinal_in_parent blob, transaction_verion bigint default 0"
- ");"
+ ", server_ordinal_in_parent blob, transaction_version bigint default "
+ "0);"
"CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
"y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
"ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
@@ -2747,11 +2748,12 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion78To79) {
// Ensure the next_id has been incremented.
MetahandlesIndex entry_bucket;
+ JournalIndex delete_journals;;
STLElementDeleter<MetahandlesIndex> deleter(&entry_bucket);
Directory::KernelLoadInfo load_info;
s.Clear();
- ASSERT_TRUE(dbs->Load(&entry_bucket, &load_info));
+ ASSERT_TRUE(dbs->Load(&entry_bucket, &delete_journals, &load_info));
EXPECT_LE(load_info.kernel_info.next_id, kInitialNextId - 65536);
}
@@ -2769,10 +2771,11 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion79To80) {
// Ensure the bag_of_chips has been set.
MetahandlesIndex entry_bucket;
+ JournalIndex delete_journals;;
STLElementDeleter<MetahandlesIndex> deleter(&entry_bucket);
Directory::KernelLoadInfo load_info;
- ASSERT_TRUE(dbs->Load(&entry_bucket, &load_info));
+ ASSERT_TRUE(dbs->Load(&entry_bucket, &delete_journals, &load_info));
// Check that the initial value is the serialization of an empty ChipBag.
sync_pb::ChipBag chip_bag;
std::string serialized_chip_bag;
@@ -2830,10 +2833,11 @@ TEST_F(DirectoryBackingStoreTest, DetectInvalidOrdinal) {
// Trying to unpack this entry should signal that the DB is corrupted.
MetahandlesIndex entry_bucket;
+ JournalIndex delete_journals;;
STLElementDeleter<MetahandlesIndex> deleter(&entry_bucket);
Directory::KernelLoadInfo kernel_load_info;
ASSERT_EQ(FAILED_DATABASE_CORRUPT,
- dbs->Load(&entry_bucket, &kernel_load_info));
+ dbs->Load(&entry_bucket, &delete_journals, &kernel_load_info));
}
TEST_F(DirectoryBackingStoreTest, MigrateVersion81To82) {
@@ -2973,12 +2977,13 @@ TEST_P(MigrationTest, ToCurrentVersion) {
syncable::Directory::KernelLoadInfo dir_info;
MetahandlesIndex index;
+ JournalIndex delete_journals;;
STLElementDeleter<MetahandlesIndex> index_deleter(&index);
{
scoped_ptr<TestDirectoryBackingStore> dbs(
new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_EQ(OPENED, dbs->Load(&index, &dir_info));
+ ASSERT_EQ(OPENED, dbs->Load(&index, &delete_journals, &dir_info));
ASSERT_FALSE(dbs->needs_column_refresh_);
ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
}
@@ -3250,16 +3255,18 @@ TEST_F(DirectoryBackingStoreTest, DeleteEntries) {
scoped_ptr<TestDirectoryBackingStore> dbs(
new TestDirectoryBackingStore(GetUsername(), &connection));
MetahandlesIndex index;
+ JournalIndex delete_journals;
Directory::KernelLoadInfo kernel_load_info;
STLElementDeleter<MetahandlesIndex> index_deleter(&index);
- dbs->Load(&index, &kernel_load_info);
+ dbs->Load(&index, &delete_journals, &kernel_load_info);
size_t initial_size = index.size();
ASSERT_LT(0U, initial_size) << "Test requires entries to delete.";
int64 first_to_die = (*index.begin())->ref(META_HANDLE);
MetahandleSet to_delete;
to_delete.insert(first_to_die);
- EXPECT_TRUE(dbs->DeleteEntries(to_delete));
+ EXPECT_TRUE(dbs->DeleteEntries(TestDirectoryBackingStore::METAS_TABLE,
+ to_delete));
STLDeleteElements(&index);
dbs->LoadEntries(&index);
@@ -3281,7 +3288,8 @@ TEST_F(DirectoryBackingStoreTest, DeleteEntries) {
to_delete.insert((*it)->ref(META_HANDLE));
}
- EXPECT_TRUE(dbs->DeleteEntries(to_delete));
+ EXPECT_TRUE(dbs->DeleteEntries(TestDirectoryBackingStore::METAS_TABLE,
+ to_delete));
STLDeleteElements(&index);
dbs->LoadEntries(&index);
diff --git a/sync/syncable/entry_kernel.h b/sync/syncable/entry_kernel.h
index 6dc88a7..e9cf828 100644
--- a/sync/syncable/entry_kernel.h
+++ b/sync/syncable/entry_kernel.h
@@ -5,6 +5,8 @@
#ifndef SYNC_SYNCABLE_ENTRY_KERNEL_H_
#define SYNC_SYNCABLE_ENTRY_KERNEL_H_
+#include <set>
+
#include "base/time.h"
#include "base/values.h"
#include "sync/base/sync_export.h"
@@ -323,6 +325,17 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
bool dirty_;
};
+class EntryKernelLessByMetaHandle {
+ public:
+ inline bool operator()(const EntryKernel* a,
+ const EntryKernel* b) const {
+ return a->ref(META_HANDLE) < b->ref(META_HANDLE);
+ }
+};
+
+typedef std::set<const EntryKernel*, EntryKernelLessByMetaHandle>
+ EntryKernelSet;
+
struct EntryKernelMutation {
EntryKernel original, mutated;
};
diff --git a/sync/syncable/in_memory_directory_backing_store.cc b/sync/syncable/in_memory_directory_backing_store.cc
index ec09d45..9451354 100644
--- a/sync/syncable/in_memory_directory_backing_store.cc
+++ b/sync/syncable/in_memory_directory_backing_store.cc
@@ -13,6 +13,7 @@ InMemoryDirectoryBackingStore::InMemoryDirectoryBackingStore(
DirOpenResult InMemoryDirectoryBackingStore::Load(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) {
if (!db_->is_open()) {
if (!db_->OpenInMemory())
@@ -26,6 +27,8 @@ DirOpenResult InMemoryDirectoryBackingStore::Load(
return FAILED_DATABASE_CORRUPT;
if (!LoadEntries(entry_bucket))
return FAILED_DATABASE_CORRUPT;
+ if (!LoadDeleteJournals(delete_journals))
+ return FAILED_DATABASE_CORRUPT;
if (!LoadInfo(kernel_load_info))
return FAILED_DATABASE_CORRUPT;
if (!VerifyReferenceIntegrity(*entry_bucket))
diff --git a/sync/syncable/in_memory_directory_backing_store.h b/sync/syncable/in_memory_directory_backing_store.h
index 79dbed8..846d96a 100644
--- a/sync/syncable/in_memory_directory_backing_store.h
+++ b/sync/syncable/in_memory_directory_backing_store.h
@@ -26,6 +26,7 @@ class SYNC_EXPORT_PRIVATE InMemoryDirectoryBackingStore
explicit InMemoryDirectoryBackingStore(const std::string& dir_name);
virtual DirOpenResult Load(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
private:
diff --git a/sync/syncable/invalid_directory_backing_store.cc b/sync/syncable/invalid_directory_backing_store.cc
index e655098..f57231b 100644
--- a/sync/syncable/invalid_directory_backing_store.cc
+++ b/sync/syncable/invalid_directory_backing_store.cc
@@ -16,6 +16,7 @@ InvalidDirectoryBackingStore::~InvalidDirectoryBackingStore() {
DirOpenResult InvalidDirectoryBackingStore::Load(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) {
return FAILED_OPEN_DATABASE;
}
diff --git a/sync/syncable/invalid_directory_backing_store.h b/sync/syncable/invalid_directory_backing_store.h
index 9d5f139..13cbb96 100644
--- a/sync/syncable/invalid_directory_backing_store.h
+++ b/sync/syncable/invalid_directory_backing_store.h
@@ -19,6 +19,7 @@ class SYNC_EXPORT_PRIVATE InvalidDirectoryBackingStore
virtual ~InvalidDirectoryBackingStore();
virtual DirOpenResult Load(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
private:
DISALLOW_COPY_AND_ASSIGN(InvalidDirectoryBackingStore);
diff --git a/sync/syncable/mutable_entry.cc b/sync/syncable/mutable_entry.cc
index 143881d..75e51f1 100644
--- a/sync/syncable/mutable_entry.cc
+++ b/sync/syncable/mutable_entry.cc
@@ -273,9 +273,20 @@ bool MutableEntry::Put(ProtoField field,
bool MutableEntry::Put(BitField field, bool value) {
DCHECK(kernel_);
write_transaction_->SaveOriginal(kernel_);
- if (kernel_->ref(field) != value) {
+ bool old_value = kernel_->ref(field);
+ if (old_value != value) {
kernel_->put(field, value);
kernel_->mark_dirty(GetDirtyIndexHelper());
+
+ // Update delete journal for existence status change on server side here
+ // instead of in PutIsDel() because IS_DEL may not be updated due to
+ // early returns when processing updates. And because
+ // UpdateDeleteJournalForServerDelete() checks for SERVER_IS_DEL, it has
+ // to be called on sync thread.
+ if (field == SERVER_IS_DEL) {
+ dir()->delete_journal()->UpdateDeleteJournalForServerDelete(
+ write_transaction(), old_value, *kernel_);
+ }
}
return true;
}
diff --git a/sync/syncable/on_disk_directory_backing_store.cc b/sync/syncable/on_disk_directory_backing_store.cc
index 4201210..7264e0c 100644
--- a/sync/syncable/on_disk_directory_backing_store.cc
+++ b/sync/syncable/on_disk_directory_backing_store.cc
@@ -36,6 +36,7 @@ OnDiskDirectoryBackingStore::~OnDiskDirectoryBackingStore() { }
DirOpenResult OnDiskDirectoryBackingStore::TryLoad(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) {
DCHECK(CalledOnValidThread());
if (!db_->is_open()) {
@@ -50,6 +51,8 @@ DirOpenResult OnDiskDirectoryBackingStore::TryLoad(
return FAILED_DATABASE_CORRUPT;
if (!LoadEntries(entry_bucket))
return FAILED_DATABASE_CORRUPT;
+ if (!LoadDeleteJournals(delete_journals))
+ return FAILED_DATABASE_CORRUPT;
if (!LoadInfo(kernel_load_info))
return FAILED_DATABASE_CORRUPT;
if (!VerifyReferenceIntegrity(*entry_bucket))
@@ -61,8 +64,10 @@ DirOpenResult OnDiskDirectoryBackingStore::TryLoad(
DirOpenResult OnDiskDirectoryBackingStore::Load(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) {
- DirOpenResult result = TryLoad(entry_bucket, kernel_load_info);
+ DirOpenResult result = TryLoad(entry_bucket, delete_journals,
+ kernel_load_info);
if (result == OPENED) {
UMA_HISTOGRAM_ENUMERATION(
"Sync.DirectoryOpenResult", FIRST_TRY_SUCCESS, RESULT_COUNT);
@@ -72,12 +77,13 @@ DirOpenResult OnDiskDirectoryBackingStore::Load(
ReportFirstTryOpenFailure();
// The fallback: delete the current database and return a fresh one. We can
- // fetch the user's data from the could.
+ // fetch the user's data from the cloud.
STLDeleteElements(entry_bucket);
+ STLDeleteElements(delete_journals);
db_.reset(new sql::Connection);
file_util::Delete(backing_filepath_, false);
- result = TryLoad(entry_bucket, kernel_load_info);
+ result = TryLoad(entry_bucket, delete_journals, kernel_load_info);
if (result == OPENED) {
UMA_HISTOGRAM_ENUMERATION(
"Sync.DirectoryOpenResult", SECOND_TRY_SUCCESS, RESULT_COUNT);
diff --git a/sync/syncable/on_disk_directory_backing_store.h b/sync/syncable/on_disk_directory_backing_store.h
index 95a61e1..06e14b8 100644
--- a/sync/syncable/on_disk_directory_backing_store.h
+++ b/sync/syncable/on_disk_directory_backing_store.h
@@ -22,12 +22,14 @@ class SYNC_EXPORT_PRIVATE OnDiskDirectoryBackingStore
virtual ~OnDiskDirectoryBackingStore();
virtual DirOpenResult Load(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
// A helper function that will make one attempt to load the directory.
// Unlike Load(), it does not attempt to recover from failure.
DirOpenResult TryLoad(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info);
protected:
diff --git a/sync/syncable/syncable_base_transaction.h b/sync/syncable/syncable_base_transaction.h
index c8652b6..763f68d 100644
--- a/sync/syncable/syncable_base_transaction.h
+++ b/sync/syncable/syncable_base_transaction.h
@@ -23,8 +23,9 @@ enum WriterTag {
AUTHWATCHER,
UNITTEST,
VACUUM_AFTER_SAVE,
+ HANDLE_SAVE_FAILURE,
PURGE_ENTRIES,
- SYNCAPI
+ SYNCAPI,
};
// Make sure to update this if you update WriterTag.
diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc
index 89ea175..7c72ed4b 100644
--- a/sync/syncable/syncable_unittest.cc
+++ b/sync/syncable/syncable_unittest.cc
@@ -13,6 +13,7 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop.h"
+#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "base/synchronization/condition_variable.h"
#include "base/test/values_test_util.h"
@@ -599,7 +600,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) {
ASSERT_EQ(expected_dirty_metahandles.size(), snapshot.dirty_metas.size());
for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
i != snapshot.dirty_metas.end(); ++i) {
- ASSERT_TRUE(i->is_dirty());
+ ASSERT_TRUE((*i)->is_dirty());
}
dir_->VacuumAfterSaveChanges(snapshot);
}
@@ -632,7 +633,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) {
EXPECT_EQ(expected_dirty_metahandles.size(), snapshot.dirty_metas.size());
for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
i != snapshot.dirty_metas.end(); ++i) {
- EXPECT_TRUE(i->is_dirty());
+ EXPECT_TRUE((*i)->is_dirty());
}
dir_->VacuumAfterSaveChanges(snapshot);
}
@@ -721,12 +722,135 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsOnlyDirtyHandlesTest) {
EXPECT_EQ(number_changed, snapshot.dirty_metas.size());
for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
i != snapshot.dirty_metas.end(); ++i) {
- EXPECT_TRUE(i->is_dirty());
+ EXPECT_TRUE((*i)->is_dirty());
}
dir_->VacuumAfterSaveChanges(snapshot);
}
}
+// Test delete journals management.
+TEST_F(SyncableDirectoryTest, ManageDeleteJournals) {
+ sync_pb::EntitySpecifics bookmark_specifics;
+ AddDefaultFieldValue(BOOKMARKS, &bookmark_specifics);
+ bookmark_specifics.mutable_bookmark()->set_url("url");
+
+ Id id1 = TestIdFactory::FromNumber(-1);
+ Id id2 = TestIdFactory::FromNumber(-2);
+ int64 handle1 = 0;
+ int64 handle2 = 0;
+ {
+ // Create two bookmark entries and save in database.
+ CreateEntry("item1", id1);
+ CreateEntry("item2", id2);
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ MutableEntry item1(&trans, GET_BY_ID, id1);
+ ASSERT_TRUE(item1.good());
+ handle1 = item1.Get(META_HANDLE);
+ item1.Put(SPECIFICS, bookmark_specifics);
+ item1.Put(SERVER_SPECIFICS, bookmark_specifics);
+ MutableEntry item2(&trans, GET_BY_ID, id2);
+ ASSERT_TRUE(item2.good());
+ handle2 = item2.Get(META_HANDLE);
+ item2.Put(SPECIFICS, bookmark_specifics);
+ item2.Put(SERVER_SPECIFICS, bookmark_specifics);
+ }
+ ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
+ }
+
+ { // Test adding and saving delete journals.
+ DeleteJournal* delete_journal = dir_->delete_journal();
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ EntryKernelSet journal_entries;
+ delete_journal->GetDeleteJournals(&trans, BOOKMARKS, &journal_entries);
+ ASSERT_EQ(0u, journal_entries.size());
+
+ // Set SERVER_IS_DEL of the entries to true and they should be added to
+ // delete journals.
+ MutableEntry item1(&trans, GET_BY_ID, id1);
+ ASSERT_TRUE(item1.good());
+ item1.Put(SERVER_IS_DEL, true);
+ MutableEntry item2(&trans, GET_BY_ID, id2);
+ ASSERT_TRUE(item2.good());
+ item2.Put(SERVER_IS_DEL, true);
+ EntryKernel tmp;
+ tmp.put(ID, id1);
+ EXPECT_TRUE(delete_journal->delete_journals_.count(&tmp));
+ tmp.put(ID, id2);
+ EXPECT_TRUE(delete_journal->delete_journals_.count(&tmp));
+ }
+
+ // Save delete journals in database and verify memory clearing.
+ ASSERT_TRUE(dir_->SaveChanges());
+ {
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ EXPECT_EQ(0u, delete_journal->GetDeleteJournalSize(&trans));
+ }
+ ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
+ }
+
+ {
+ {
+ // Test reading delete journals from database.
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ DeleteJournal* delete_journal = dir_->delete_journal();
+ EntryKernelSet journal_entries;
+ delete_journal->GetDeleteJournals(&trans, BOOKMARKS, &journal_entries);
+ ASSERT_EQ(2u, journal_entries.size());
+ EntryKernel tmp;
+ tmp.put(META_HANDLE, handle1);
+ EXPECT_TRUE(journal_entries.count(&tmp));
+ tmp.put(META_HANDLE, handle2);
+ EXPECT_TRUE(journal_entries.count(&tmp));
+
+ // Purge item2.
+ MetahandleSet to_purge;
+ to_purge.insert(handle2);
+ delete_journal->PurgeDeleteJournals(&trans, to_purge);
+
+ // Verify that item2 is purged from journals in memory and will be
+ // purged from database.
+ tmp.put(ID, id2);
+ EXPECT_FALSE(delete_journal->delete_journals_.count(&tmp));
+ EXPECT_EQ(1u, delete_journal->delete_journals_to_purge_.size());
+ EXPECT_TRUE(delete_journal->delete_journals_to_purge_.count(handle2));
+ }
+ ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
+ }
+
+ {
+ {
+ // Verify purged entry is gone in database.
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+ DeleteJournal* delete_journal = dir_->delete_journal();
+ EntryKernelSet journal_entries;
+ delete_journal->GetDeleteJournals(&trans, BOOKMARKS, &journal_entries);
+ ASSERT_EQ(1u, journal_entries.size());
+ EntryKernel tmp;
+ tmp.put(ID, id1);
+ tmp.put(META_HANDLE, handle1);
+ EXPECT_TRUE(journal_entries.count(&tmp));
+
+ // Undelete item1.
+ MutableEntry item1(&trans, GET_BY_ID, id1);
+ ASSERT_TRUE(item1.good());
+ item1.Put(SERVER_IS_DEL, false);
+ EXPECT_TRUE(delete_journal->delete_journals_.empty());
+ EXPECT_EQ(1u, delete_journal->delete_journals_to_purge_.size());
+ EXPECT_TRUE(delete_journal->delete_journals_to_purge_.count(handle1));
+ }
+ ASSERT_EQ(OPENED, SimulateSaveAndReloadDir());
+ }
+
+ {
+ // Verify undeleted entry is gone from database.
+ ReadTransaction trans(FROM_HERE, dir_.get());
+ DeleteJournal* delete_journal = dir_->delete_journal();
+ ASSERT_EQ(0u, delete_journal->GetDeleteJournalSize(&trans));
+ }
+}
+
const char SyncableDirectoryTest::kName[] = "Foo";
namespace {
diff --git a/sync/syncable/syncable_write_transaction.cc b/sync/syncable/syncable_write_transaction.cc
index 71fb289..788d922 100644
--- a/sync/syncable/syncable_write_transaction.cc
+++ b/sync/syncable/syncable_write_transaction.cc
@@ -170,6 +170,7 @@ std::string WriterTagToString(WriterTag writer_tag) {
ENUM_CASE(AUTHWATCHER);
ENUM_CASE(UNITTEST);
ENUM_CASE(VACUUM_AFTER_SAVE);
+ ENUM_CASE(HANDLE_SAVE_FAILURE);
ENUM_CASE(PURGE_ENTRIES);
ENUM_CASE(SYNCAPI);
};
diff --git a/sync/test/test_directory_backing_store.cc b/sync/test/test_directory_backing_store.cc
index 81d0590..4235993 100644
--- a/sync/test/test_directory_backing_store.cc
+++ b/sync/test/test_directory_backing_store.cc
@@ -23,6 +23,7 @@ TestDirectoryBackingStore::~TestDirectoryBackingStore() {
DirOpenResult TestDirectoryBackingStore::Load(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) {
DCHECK(db_->is_open());
@@ -33,6 +34,8 @@ DirOpenResult TestDirectoryBackingStore::Load(
return FAILED_DATABASE_CORRUPT;
if (!LoadEntries(entry_bucket))
return FAILED_DATABASE_CORRUPT;
+ if (!LoadDeleteJournals(delete_journals))
+ return FAILED_DATABASE_CORRUPT;
if (!LoadInfo(kernel_load_info))
return FAILED_DATABASE_CORRUPT;
if (!VerifyReferenceIntegrity(*entry_bucket))
diff --git a/sync/test/test_directory_backing_store.h b/sync/test/test_directory_backing_store.h
index 033e99e..cbbe16e 100644
--- a/sync/test/test_directory_backing_store.h
+++ b/sync/test/test_directory_backing_store.h
@@ -27,6 +27,7 @@ class TestDirectoryBackingStore : public DirectoryBackingStore {
virtual ~TestDirectoryBackingStore();
virtual DirOpenResult Load(
MetahandlesIndex* entry_bucket,
+ JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion67To68);