diff options
author | haitaol@chromium.org <haitaol@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-01-05 01:15:51 +0000 |
---|---|---|
committer | haitaol@chromium.org <haitaol@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-01-05 01:15:51 +0000 |
commit | 6add02aaf5d93a32bec7e32e1b0ff527f1bb3d18 (patch) | |
tree | b2a8fbec180d9a6457ed0856a5cb213e9e2c751a /sync/syncable/on_disk_directory_backing_store.cc | |
parent | 0e11e9fa745c6174f709773346368c4f771ddb2f (diff) | |
download | chromium_src-6add02aaf5d93a32bec7e32e1b0ff527f1bb3d18.zip chromium_src-6add02aaf5d93a32bec7e32e1b0ff527f1bb3d18.tar.gz chromium_src-6add02aaf5d93a32bec7e32e1b0ff527f1bb3d18.tar.bz2 |
[Sync] Add support for loading, updating and querying delete journals in
Directory.
Delete journals keeps deleted metas until the persistence of the deletes in
native model is confirmed. When an entry is deleted in sync model, a copy is
added to delete journals in memory and saved in database later. Next time when
the client restarts, if some native data doesn't match with sync data but
matches with a delete journal, it's safe to assume that it's because native
delete was not persisted and the native data should be deleted. This helps
prevent back-from-dead problem due to native model and sync model get
out-of-sync.
BUG=121928
Review URL: https://chromiumcodereview.appspot.com/11441026
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@175248 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sync/syncable/on_disk_directory_backing_store.cc')
-rw-r--r-- | sync/syncable/on_disk_directory_backing_store.cc | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/sync/syncable/on_disk_directory_backing_store.cc b/sync/syncable/on_disk_directory_backing_store.cc index 4201210..7264e0c 100644 --- a/sync/syncable/on_disk_directory_backing_store.cc +++ b/sync/syncable/on_disk_directory_backing_store.cc @@ -36,6 +36,7 @@ OnDiskDirectoryBackingStore::~OnDiskDirectoryBackingStore() { } DirOpenResult OnDiskDirectoryBackingStore::TryLoad( MetahandlesIndex* entry_bucket, + JournalIndex* delete_journals, Directory::KernelLoadInfo* kernel_load_info) { DCHECK(CalledOnValidThread()); if (!db_->is_open()) { @@ -50,6 +51,8 @@ DirOpenResult OnDiskDirectoryBackingStore::TryLoad( return FAILED_DATABASE_CORRUPT; if (!LoadEntries(entry_bucket)) return FAILED_DATABASE_CORRUPT; + if (!LoadDeleteJournals(delete_journals)) + return FAILED_DATABASE_CORRUPT; if (!LoadInfo(kernel_load_info)) return FAILED_DATABASE_CORRUPT; if (!VerifyReferenceIntegrity(*entry_bucket)) @@ -61,8 +64,10 @@ DirOpenResult OnDiskDirectoryBackingStore::TryLoad( DirOpenResult OnDiskDirectoryBackingStore::Load( MetahandlesIndex* entry_bucket, + JournalIndex* delete_journals, Directory::KernelLoadInfo* kernel_load_info) { - DirOpenResult result = TryLoad(entry_bucket, kernel_load_info); + DirOpenResult result = TryLoad(entry_bucket, delete_journals, + kernel_load_info); if (result == OPENED) { UMA_HISTOGRAM_ENUMERATION( "Sync.DirectoryOpenResult", FIRST_TRY_SUCCESS, RESULT_COUNT); @@ -72,12 +77,13 @@ DirOpenResult OnDiskDirectoryBackingStore::Load( ReportFirstTryOpenFailure(); // The fallback: delete the current database and return a fresh one. We can - // fetch the user's data from the could. + // fetch the user's data from the cloud. STLDeleteElements(entry_bucket); + STLDeleteElements(delete_journals); db_.reset(new sql::Connection); file_util::Delete(backing_filepath_, false); - result = TryLoad(entry_bucket, kernel_load_info); + result = TryLoad(entry_bucket, delete_journals, kernel_load_info); if (result == OPENED) { UMA_HISTOGRAM_ENUMERATION( "Sync.DirectoryOpenResult", SECOND_TRY_SUCCESS, RESULT_COUNT); |