summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--chrome/browser/sync/about_sync_util.cc2
-rw-r--r--chrome/browser/sync/glue/sync_backend_host.cc23
-rw-r--r--chrome/browser/sync/glue/sync_backend_host_unittest.cc46
-rw-r--r--chrome/browser/sync/profile_sync_service_unittest.cc12
-rw-r--r--chrome/browser/sync/test_profile_sync_service.cc18
-rw-r--r--chrome/browser/sync/test_profile_sync_service.h2
-rw-r--r--sync/engine/all_status.cc4
-rw-r--r--sync/engine/apply_control_data_updates.cc11
-rw-r--r--sync/engine/apply_control_data_updates_unittest.cc22
-rw-r--r--sync/engine/apply_updates_and_resolve_conflicts_command.cc14
-rw-r--r--sync/engine/syncer_unittest.cc12
-rw-r--r--sync/internal_api/js_sync_manager_observer_unittest.cc2
-rw-r--r--sync/internal_api/public/engine/sync_status.cc1
-rw-r--r--sync/internal_api/public/engine/sync_status.h2
-rw-r--r--sync/internal_api/public/sessions/sync_session_snapshot.cc18
-rw-r--r--sync/internal_api/public/sessions/sync_session_snapshot.h6
-rw-r--r--sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc13
-rw-r--r--sync/internal_api/public/test/test_entry_factory.h5
-rw-r--r--sync/internal_api/sync_manager_impl.cc4
-rw-r--r--sync/internal_api/sync_manager_impl_unittest.cc127
-rw-r--r--sync/internal_api/test/test_entry_factory.cc22
-rw-r--r--sync/internal_api/test/test_user_share.cc22
-rw-r--r--sync/sessions/sync_session.cc10
-rw-r--r--sync/syncable/directory.cc46
-rw-r--r--sync/syncable/directory.h9
-rw-r--r--sync/syncable/directory_backing_store.cc77
-rw-r--r--sync/syncable/directory_backing_store.h2
-rw-r--r--sync/syncable/directory_backing_store_unittest.cc275
-rw-r--r--sync/syncable/syncable_unittest.cc29
-rw-r--r--sync/test/engine/test_syncable_utils.cc27
-rw-r--r--sync/test/engine/test_syncable_utils.h8
-rw-r--r--sync/test/test_directory_backing_store.h1
32 files changed, 565 insertions, 307 deletions
diff --git a/chrome/browser/sync/about_sync_util.cc b/chrome/browser/sync/about_sync_util.cc
index ad32c06..e696273 100644
--- a/chrome/browser/sync/about_sync_util.cc
+++ b/chrome/browser/sync/about_sync_util.cc
@@ -189,7 +189,6 @@ scoped_ptr<DictionaryValue> ConstructAboutInformation(
"Sync First-Time Setup Complete");
BoolSyncStat is_backend_initialized(section_local,
"Sync Backend Initialized");
- BoolSyncStat is_download_complete(section_local, "Initial Download Complete");
BoolSyncStat is_syncing(section_local, "Syncing");
ListValue* section_network = AddSection(stats_list, "Network");
@@ -299,7 +298,6 @@ scoped_ptr<DictionaryValue> ConstructAboutInformation(
is_setup_complete.SetValue(service->HasSyncSetupCompleted());
is_backend_initialized.SetValue(sync_initialized);
if (is_status_valid) {
- is_download_complete.SetValue(full_status.initial_sync_ended);
is_syncing.SetValue(full_status.syncing);
}
diff --git a/chrome/browser/sync/glue/sync_backend_host.cc b/chrome/browser/sync/glue/sync_backend_host.cc
index 96dd732..22acf72 100644
--- a/chrome/browser/sync/glue/sync_backend_host.cc
+++ b/chrome/browser/sync/glue/sync_backend_host.cc
@@ -676,10 +676,10 @@ void SyncBackendHost::ConfigureDataTypes(
// downloaded if they are enabled.
//
// The SyncBackendRegistrar's state was initially derived from the types
- // marked initial_sync_ended when the sync database was loaded. Afterwards it
- // is modified only by this function. We expect it to remain in sync with the
+ // detected to have been downloaded in the database. Afterwards it is
+ // modified only by this function. We expect it to remain in sync with the
// backend because configuration requests are never aborted; they are retried
- // until they succeed or the browser is closed.
+ // until they succeed or the backend is shut down.
syncer::ModelTypeSet types_to_download = registrar_->ConfigureDataTypes(
types_to_add, types_to_remove);
@@ -692,10 +692,10 @@ void SyncBackendHost::ConfigureDataTypes(
// prepared to handle a migration during a configure, so we must ensure that
// all our types_to_download actually contain no data before we sync them.
//
- // The most common way to end up in this situation used to be types which had
- // !initial_sync_ended, but did have some progress markers. We avoid problems
- // with those types by purging the data of any such partially synced types
- // soon after we load the directory.
+ // One common way to end up in this situation used to be types which
+ // downloaded some or all of their data but have not applied it yet. We avoid
+ // problems with those types by purging the data of any such partially synced
+ // types soon after we load the directory.
//
// Another possible scenario is that we have newly supported or newly enabled
// data types being downloaded here but the nigori type, which is always
@@ -1241,6 +1241,15 @@ void SyncBackendHost::Core::DoInitialProcessControlTypes() {
return;
}
+ if (!sync_manager_->InitialSyncEndedTypes().HasAll(syncer::ControlTypes())) {
+ LOG(ERROR) << "Failed to download control types";
+ host_.Call(
+ FROM_HERE,
+ &SyncBackendHost::HandleInitializationCompletedOnFrontendLoop,
+ false);
+ return;
+ }
+
// Initialize device info. This is asynchronous on some platforms, so we
// provide a callback for when it finishes.
synced_device_tracker_.reset(
diff --git a/chrome/browser/sync/glue/sync_backend_host_unittest.cc b/chrome/browser/sync/glue/sync_backend_host_unittest.cc
index c5f518a..0893cea 100644
--- a/chrome/browser/sync/glue/sync_backend_host_unittest.cc
+++ b/chrome/browser/sync/glue/sync_backend_host_unittest.cc
@@ -185,8 +185,8 @@ class SyncBackendHostTest : public testing::Test {
}
// Synchronously initializes the backend.
- void InitializeBackend() {
- EXPECT_CALL(mock_frontend_, OnBackendInitialized(_, _, true)).
+ void InitializeBackend(bool expect_success) {
+ EXPECT_CALL(mock_frontend_, OnBackendInitialized(_, _, expect_success)).
WillOnce(InvokeWithoutArgs(QuitMessageLoop));
backend_->Initialize(&mock_frontend_,
syncer::WeakHandle<syncer::JsEventHandler>(),
@@ -250,7 +250,7 @@ class SyncBackendHostTest : public testing::Test {
// Test basic initialization with no initial types (first time initialization).
// Only the nigori should be configured.
TEST_F(SyncBackendHostTest, InitShutdown) {
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals(
syncer::ControlTypes()));
EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(
@@ -261,7 +261,7 @@ TEST_F(SyncBackendHostTest, InitShutdown) {
// Test first time sync scenario. All types should be properly configured.
TEST_F(SyncBackendHostTest, FirstTimeSync) {
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals(
syncer::ControlTypes()));
EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(
@@ -287,7 +287,7 @@ TEST_F(SyncBackendHostTest, Restart) {
syncer::ModelTypeSet all_but_nigori = enabled_types_;
fake_manager_factory_.set_progress_marker_types(enabled_types_);
fake_manager_factory_.set_initial_sync_ended_types(enabled_types_);
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Empty());
EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(),
enabled_types_).Empty());
@@ -321,7 +321,7 @@ TEST_F(SyncBackendHostTest, PartialTypes) {
// Bringing up the backend should purge all partial types, then proceed to
// download the Nigori.
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals(
syncer::ModelTypeSet(syncer::NIGORI)));
EXPECT_TRUE(fake_manager_->GetAndResetCleanedTypes().HasAll(partial_types));
@@ -351,7 +351,7 @@ TEST_F(SyncBackendHostTest, LostDB) {
sync_prefs_->SetSyncSetupCompleted();
// Initialization should fetch the Nigori node. Everything else should be
// left untouched.
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals(
syncer::ModelTypeSet(syncer::ControlTypes())));
EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(
@@ -380,7 +380,7 @@ TEST_F(SyncBackendHostTest, LostDB) {
TEST_F(SyncBackendHostTest, DisableTypes) {
// Simulate first time sync.
- InitializeBackend();
+ InitializeBackend(true);
fake_manager_->GetAndResetCleanedTypes();
ConfigureDataTypes(enabled_types_,
Difference(syncer::ModelTypeSet::All(),
@@ -415,7 +415,7 @@ TEST_F(SyncBackendHostTest, DisableTypes) {
TEST_F(SyncBackendHostTest, AddTypes) {
// Simulate first time sync.
- InitializeBackend();
+ InitializeBackend(true);
fake_manager_->GetAndResetCleanedTypes();
ConfigureDataTypes(enabled_types_,
Difference(syncer::ModelTypeSet::All(),
@@ -451,7 +451,7 @@ TEST_F(SyncBackendHostTest, AddTypes) {
// And and disable in the same configuration.
TEST_F(SyncBackendHostTest, AddDisableTypes) {
// Simulate first time sync.
- InitializeBackend();
+ InitializeBackend(true);
fake_manager_->GetAndResetCleanedTypes();
ConfigureDataTypes(enabled_types_,
Difference(syncer::ModelTypeSet::All(),
@@ -502,7 +502,7 @@ TEST_F(SyncBackendHostTest, NewlySupportedTypes) {
enabled_types_.PutAll(new_types);
// Does nothing.
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Empty());
EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(),
old_types).Empty());
@@ -543,7 +543,7 @@ TEST_F(SyncBackendHostTest, NewlySupportedTypesWithPartialTypes) {
// Purge the partial types. The nigori will be among the purged types, but
// the syncer will re-download it by the time the initialization is complete.
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals(
syncer::ModelTypeSet(syncer::NIGORI)));
EXPECT_TRUE(fake_manager_->GetAndResetCleanedTypes().HasAll(partial_types));
@@ -571,7 +571,7 @@ TEST_F(SyncBackendHostTest, NewlySupportedTypesWithPartialTypes) {
// Register for some IDs and trigger an invalidation. This should
// propagate all the way to the frontend.
TEST_F(SyncBackendHostTest, Invalidate) {
- InitializeBackend();
+ InitializeBackend(true);
syncer::ObjectIdSet ids;
ids.insert(invalidation::ObjectId(1, "id1"));
@@ -594,7 +594,7 @@ TEST_F(SyncBackendHostTest, Invalidate) {
// Register for some IDs and update the invalidator state. This
// should propagate all the way to the frontend.
TEST_F(SyncBackendHostTest, UpdateInvalidatorState) {
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_CALL(mock_frontend_,
OnInvalidatorStateChange(syncer::INVALIDATIONS_ENABLED))
@@ -613,7 +613,7 @@ TEST_F(SyncBackendHostTest, UpdateInvalidatorState) {
// before calling Shutdown(). Then start up and shut down the backend again.
// Those notifications shouldn't propagate to the frontend.
TEST_F(SyncBackendHostTest, InvalidationsAfterStopSyncingForShutdown) {
- InitializeBackend();
+ InitializeBackend(true);
syncer::ObjectIdSet ids;
ids.insert(invalidation::ObjectId(5, "id5"));
@@ -642,7 +642,7 @@ TEST_F(SyncBackendHostTest, InvalidationsAfterStopSyncingForShutdown) {
TEST_F(SyncBackendHostTest, InitializeDeviceInfo) {
ASSERT_EQ(NULL, backend_->GetSyncedDeviceTracker());
- InitializeBackend();
+ InitializeBackend(true);
const SyncedDeviceTracker* device_tracker =
backend_->GetSyncedDeviceTracker();
ASSERT_TRUE(device_tracker->ReadLocalDeviceInfo());
@@ -663,7 +663,7 @@ TEST_F(SyncBackendHostTest, DownloadControlTypes) {
// Bringing up the backend should download the new types without downloading
// any old types.
- InitializeBackend();
+ InitializeBackend(true);
EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals(new_types));
EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(),
enabled_types_).Empty());
@@ -672,6 +672,18 @@ TEST_F(SyncBackendHostTest, DownloadControlTypes) {
enabled_types_).Empty());
}
+// Fail to download control types. It's believed that there is a server bug
+// which can allow this to happen (crbug.com/164288). The sync backend host
+// should detect this condition and fail to initialize the backend.
+//
+// The failure is "silent" in the sense that the GetUpdates request appears to
+// be successful, but it returned no results. This means that the usual
+// download retry logic will not be invoked.
+TEST_F(SyncBackendHostTest, SilentlyFailToDownloadControlTypes) {
+ fake_manager_factory_.set_configure_fail_types(syncer::ModelTypeSet::All());
+ InitializeBackend(false);
+}
+
} // namespace
} // namespace browser_sync
diff --git a/chrome/browser/sync/profile_sync_service_unittest.cc b/chrome/browser/sync/profile_sync_service_unittest.cc
index 74040bdc..a125be1 100644
--- a/chrome/browser/sync/profile_sync_service_unittest.cc
+++ b/chrome/browser/sync/profile_sync_service_unittest.cc
@@ -421,6 +421,18 @@ TEST_F(ProfileSyncServiceTest, FailToOpenDatabase) {
EXPECT_FALSE(harness_.service->sync_initialized());
}
+// This setup will allow the database to exist, but leave it empty. The attempt
+// to download control types will silently fail (no downloads have any effect in
+// these tests). The sync_backend_host will notice this and inform the profile
+// sync service of the failure to initialize the backed.
+TEST_F(ProfileSyncServiceTest, FailToDownloadControlTypes) {
+ harness_.StartSyncServiceAndSetInitialSyncEnded(false, true, true, true,
+ syncer::STORAGE_IN_MEMORY);
+
+ // The backend is not ready. Ensure the PSS knows this.
+ EXPECT_FALSE(harness_.service->sync_initialized());
+}
+
// Register a handler with the ProfileSyncService, and disable and
// reenable sync. The handler should get notified of the state
// changes.
diff --git a/chrome/browser/sync/test_profile_sync_service.cc b/chrome/browser/sync/test_profile_sync_service.cc
index ceaa540..978b7ef 100644
--- a/chrome/browser/sync/test_profile_sync_service.cc
+++ b/chrome/browser/sync/test_profile_sync_service.cc
@@ -133,22 +133,21 @@ void SyncBackendHostForProfileSyncTest
UserShare* user_share = GetUserShare();
Directory* directory = user_share->directory.get();
- if (!directory->initial_sync_ended_for_type(NIGORI)) {
+ if (!directory->InitialSyncEndedForType(NIGORI)) {
syncer::TestUserShare::CreateRoot(NIGORI, user_share);
// A side effect of adding the NIGORI mode (normally done by the
// syncer) is a decryption attempt, which will fail the first time.
}
- if (!directory->initial_sync_ended_for_type(DEVICE_INFO)) {
+ if (!directory->InitialSyncEndedForType(DEVICE_INFO)) {
syncer::TestUserShare::CreateRoot(DEVICE_INFO, user_share);
}
- if (!directory->initial_sync_ended_for_type(EXPERIMENTS)) {
+ if (!directory->InitialSyncEndedForType(EXPERIMENTS)) {
syncer::TestUserShare::CreateRoot(EXPERIMENTS, user_share);
}
- SetInitialSyncEndedForAllTypes();
restored_types = syncer::ModelTypeSet::All();
}
@@ -168,17 +167,6 @@ void SyncBackendHostForProfileSyncTest
}
}
-void SyncBackendHostForProfileSyncTest::SetInitialSyncEndedForAllTypes() {
- UserShare* user_share = GetUserShare();
- Directory* directory = user_share->directory.get();
-
- for (int i = syncer::FIRST_REAL_MODEL_TYPE;
- i < syncer::MODEL_TYPE_COUNT; ++i) {
- directory->set_initial_sync_ended_for_type(
- syncer::ModelTypeFromInt(i), true);
- }
-}
-
void SyncBackendHostForProfileSyncTest::EmitOnInvalidatorStateChange(
syncer::InvalidatorState state) {
frontend()->OnInvalidatorStateChange(state);
diff --git a/chrome/browser/sync/test_profile_sync_service.h b/chrome/browser/sync/test_profile_sync_service.h
index 9ec0c51..136f949 100644
--- a/chrome/browser/sync/test_profile_sync_service.h
+++ b/chrome/browser/sync/test_profile_sync_service.h
@@ -67,8 +67,6 @@ class SyncBackendHostForProfileSyncTest : public SyncBackendHost {
static void SetHistoryServiceExpectations(ProfileMock* profile);
- void SetInitialSyncEndedForAllTypes();
-
void EmitOnInvalidatorStateChange(syncer::InvalidatorState state);
void EmitOnIncomingInvalidation(
const syncer::ObjectIdInvalidationMap& invalidation_map,
diff --git a/sync/engine/all_status.cc b/sync/engine/all_status.cc
index ff85d57..24ab03a 100644
--- a/sync/engine/all_status.cc
+++ b/sync/engine/all_status.cc
@@ -14,7 +14,6 @@
namespace syncer {
AllStatus::AllStatus() {
- status_.initial_sync_ended = true;
status_.notifications_enabled = false;
status_.cryptographer_ready = false;
status_.crypto_has_pending_keys = false;
@@ -32,7 +31,6 @@ SyncStatus AllStatus::CreateBlankStatus() const {
status.hierarchy_conflicts = 0;
status.server_conflicts = 0;
status.committed_count = 0;
- status.initial_sync_ended = false;
status.updates_available = 0;
return status;
}
@@ -52,8 +50,6 @@ SyncStatus AllStatus::CalcSyncing(const SyncEngineEvent &event) const {
status.syncing = false;
}
- status.initial_sync_ended |= snapshot.is_share_usable();
-
status.updates_available += snapshot.num_server_changes_remaining();
status.sync_protocol_error =
snapshot.model_neutral_state().sync_protocol_error;
diff --git a/sync/engine/apply_control_data_updates.cc b/sync/engine/apply_control_data_updates.cc
index 6277365..09778c3 100644
--- a/sync/engine/apply_control_data_updates.cc
+++ b/sync/engine/apply_control_data_updates.cc
@@ -79,17 +79,6 @@ void ApplyControlDataUpdates(sessions::SyncSession* session) {
&entry,
dir->GetCryptographer(&trans));
}
-
- // Set initial sync ended bits for all control types requested.
- for (ModelTypeSet::Iterator it =
- session->status_controller().updates_request_types().First();
- it.Good(); it.Inc()) {
- if (!IsControlType(it.Get()))
- continue;
-
- // This gets persisted to the directory's backing store.
- dir->set_initial_sync_ended_for_type(it.Get(), true);
- }
}
// Update the nigori handler with the server's nigori node.
diff --git a/sync/engine/apply_control_data_updates_unittest.cc b/sync/engine/apply_control_data_updates_unittest.cc
index 09c5e11..754bac3 100644
--- a/sync/engine/apply_control_data_updates_unittest.cc
+++ b/sync/engine/apply_control_data_updates_unittest.cc
@@ -67,11 +67,6 @@ TEST_F(ApplyControlDataUpdatesTest, NigoriUpdate) {
ModelTypeSet encrypted_types;
encrypted_types.PutAll(SyncEncryptionHandler::SensitiveTypes());
- // We start with initial_sync_ended == false. This is wrong, since we would
- // have no nigori node if that were the case. However, it makes it easier to
- // verify that ApplyControlDataUpdates sets initial_sync_ended correctly.
- EXPECT_FALSE(directory()->initial_sync_ended_types().Has(NIGORI));
-
{
syncable::ReadTransaction trans(FROM_HERE, directory());
cryptographer = directory()->GetCryptographer(&trans);
@@ -100,7 +95,6 @@ TEST_F(ApplyControlDataUpdatesTest, NigoriUpdate) {
syncable::ReadTransaction trans(FROM_HERE, directory());
EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
.Equals(ModelTypeSet::All()));
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(NIGORI));
}
}
@@ -383,7 +377,6 @@ TEST_F(ApplyControlDataUpdatesTest,
syncable::ReadTransaction trans(FROM_HERE, directory());
EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
.Equals(ModelTypeSet::All()));
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(NIGORI));
}
}
@@ -462,7 +455,6 @@ TEST_F(ApplyControlDataUpdatesTest,
syncable::ReadTransaction trans(FROM_HERE, directory());
EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
.Equals(ModelTypeSet::All()));
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(NIGORI));
}
}
@@ -534,7 +526,6 @@ TEST_F(ApplyControlDataUpdatesTest,
syncable::ReadTransaction trans(FROM_HERE, directory());
EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
.Equals(ModelTypeSet::All()));
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(NIGORI));
}
}
@@ -617,7 +608,6 @@ TEST_F(ApplyControlDataUpdatesTest,
syncable::ReadTransaction trans(FROM_HERE, directory());
EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
.Equals(ModelTypeSet::All()));
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(NIGORI));
}
}
@@ -701,7 +691,6 @@ TEST_F(ApplyControlDataUpdatesTest,
syncable::ReadTransaction trans(FROM_HERE, directory());
EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
.Equals(ModelTypeSet::All()));
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(NIGORI));
}
}
@@ -781,7 +770,6 @@ TEST_F(ApplyControlDataUpdatesTest,
syncable::ReadTransaction trans(FROM_HERE, directory());
EXPECT_TRUE(directory()->GetNigoriHandler()->GetEncryptedTypes(&trans)
.Equals(ModelTypeSet::All()));
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(NIGORI));
}
}
@@ -866,14 +854,11 @@ TEST_F(ApplyControlDataUpdatesTest,
nigori().passphrase_type());
{
syncable::ReadTransaction trans(FROM_HERE, directory());
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(NIGORI));
}
}
// Check that we can apply a simple control datatype node successfully.
TEST_F(ApplyControlDataUpdatesTest, ControlApply) {
- EXPECT_FALSE(directory()->initial_sync_ended_types().Has(EXPERIMENTS));
-
std::string experiment_id = "experiment";
sync_pb::EntitySpecifics specifics;
specifics.mutable_experiments()->mutable_keystore_encryption()->
@@ -882,7 +867,6 @@ TEST_F(ApplyControlDataUpdatesTest, ControlApply) {
experiment_id, specifics, false);
ApplyControlDataUpdates(session());
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(EXPERIMENTS));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
EXPECT_TRUE(
entry_factory_->GetLocalSpecificsForItem(experiment_handle).
@@ -891,8 +875,6 @@ TEST_F(ApplyControlDataUpdatesTest, ControlApply) {
// Verify that we apply top level folders before their children.
TEST_F(ApplyControlDataUpdatesTest, ControlApplyParentBeforeChild) {
- EXPECT_FALSE(directory()->initial_sync_ended_types().Has(EXPERIMENTS));
-
std::string parent_id = "parent";
std::string experiment_id = "experiment";
sync_pb::EntitySpecifics specifics;
@@ -904,7 +886,6 @@ TEST_F(ApplyControlDataUpdatesTest, ControlApplyParentBeforeChild) {
parent_id, specifics, true);
ApplyControlDataUpdates(session());
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(EXPERIMENTS));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(parent_handle));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
EXPECT_TRUE(
@@ -915,8 +896,6 @@ TEST_F(ApplyControlDataUpdatesTest, ControlApplyParentBeforeChild) {
// Verify that we handle control datatype conflicts by preserving the server
// data.
TEST_F(ApplyControlDataUpdatesTest, ControlConflict) {
- EXPECT_FALSE(directory()->initial_sync_ended_types().Has(EXPERIMENTS));
-
std::string experiment_id = "experiment";
sync_pb::EntitySpecifics local_specifics, server_specifics;
server_specifics.mutable_experiments()->mutable_keystore_encryption()->
@@ -931,7 +910,6 @@ TEST_F(ApplyControlDataUpdatesTest, ControlConflict) {
local_specifics);
ApplyControlDataUpdates(session());
- EXPECT_TRUE(directory()->initial_sync_ended_types().Has(EXPERIMENTS));
EXPECT_FALSE(entry_factory_->GetIsUnappliedForItem(experiment_handle));
EXPECT_TRUE(
entry_factory_->GetLocalSpecificsForItem(experiment_handle).
diff --git a/sync/engine/apply_updates_and_resolve_conflicts_command.cc b/sync/engine/apply_updates_and_resolve_conflicts_command.cc
index 454ba5a..971f90d 100644
--- a/sync/engine/apply_updates_and_resolve_conflicts_command.cc
+++ b/sync/engine/apply_updates_and_resolve_conflicts_command.cc
@@ -128,20 +128,6 @@ SyncerError ApplyUpdatesAndResolveConflictsCommand::ModelChangingExecuteImpl(
DCHECK(conflict_applicator.simple_conflict_ids().empty());
}
- // This might be the first time we've fully completed a sync cycle, for
- // some subset of the currently synced datatypes.
- if (status->ServerSaysNothingMoreToDownload()) {
- for (ModelTypeSet::Iterator it =
- status->updates_request_types().First(); it.Good(); it.Inc()) {
- // Don't set the flag for control types. We didn't process them here.
- if (IsControlType(it.Get()))
- continue;
-
- // This gets persisted to the directory's backing store.
- dir->set_initial_sync_ended_for_type(it.Get(), true);
- }
- }
-
return SYNCER_OK;
}
diff --git a/sync/engine/syncer_unittest.cc b/sync/engine/syncer_unittest.cc
index d92cc0e..df41960 100644
--- a/sync/engine/syncer_unittest.cc
+++ b/sync/engine/syncer_unittest.cc
@@ -297,10 +297,6 @@ class SyncerTest : public testing::Test,
EXPECT_FALSE(client_status.has_hierarchy_conflict_detected());
}
- bool initial_sync_ended_for_type(ModelType type) {
- return directory()->initial_sync_ended_for_type(type);
- }
-
void SyncRepeatedlyToTriggerConflictResolution(SyncSession* session) {
// We should trigger after less than 6 syncs, but extra does no harm.
for (int i = 0 ; i < 6 ; ++i)
@@ -3952,8 +3948,6 @@ TEST_F(SyncerTest, UpdateFailsThenDontCommit) {
// Downloads two updates and applies them successfully.
// This is the "happy path" alternative to ConfigureFailsDontApplyUpdates.
TEST_F(SyncerTest, ConfigureDownloadsTwoBatchesSuccess) {
- EXPECT_FALSE(initial_sync_ended_for_type(BOOKMARKS));
-
syncable::Id node1 = ids_.NewServerId();
syncable::Id node2 = ids_.NewServerId();
@@ -3977,14 +3971,10 @@ TEST_F(SyncerTest, ConfigureDownloadsTwoBatchesSuccess) {
Entry n2(&trans, GET_BY_ID, node2);
ASSERT_TRUE(n2.good());
EXPECT_FALSE(n2.Get(IS_UNAPPLIED_UPDATE));
-
- EXPECT_TRUE(initial_sync_ended_for_type(BOOKMARKS));
}
// Same as the above case, but this time the second batch fails to download.
TEST_F(SyncerTest, ConfigureFailsDontApplyUpdates) {
- EXPECT_FALSE(initial_sync_ended_for_type(BOOKMARKS));
-
syncable::Id node1 = ids_.NewServerId();
syncable::Id node2 = ids_.NewServerId();
@@ -4017,8 +4007,6 @@ TEST_F(SyncerTest, ConfigureFailsDontApplyUpdates) {
// One update remains undownloaded.
mock_server_->ClearUpdatesQueue();
-
- EXPECT_FALSE(initial_sync_ended_for_type(BOOKMARKS));
}
TEST_F(SyncerTest, GetKeySuccess) {
diff --git a/sync/internal_api/js_sync_manager_observer_unittest.cc b/sync/internal_api/js_sync_manager_observer_unittest.cc
index 3668400..ce09797 100644
--- a/sync/internal_api/js_sync_manager_observer_unittest.cc
+++ b/sync/internal_api/js_sync_manager_observer_unittest.cc
@@ -77,8 +77,6 @@ TEST_F(JsSyncManagerObserverTest, OnInitializationComplete) {
TEST_F(JsSyncManagerObserverTest, OnSyncCycleCompleted) {
sessions::SyncSessionSnapshot snapshot(
sessions::ModelNeutralState(),
- false,
- ModelTypeSet(),
ProgressMarkerMap(),
false,
5,
diff --git a/sync/internal_api/public/engine/sync_status.cc b/sync/internal_api/public/engine/sync_status.cc
index f4037da..6291b29 100644
--- a/sync/internal_api/public/engine/sync_status.cc
+++ b/sync/internal_api/public/engine/sync_status.cc
@@ -14,7 +14,6 @@ SyncStatus::SyncStatus()
server_conflicts(0),
committed_count(0),
syncing(false),
- initial_sync_ended(false),
updates_available(0),
updates_received(0),
reflected_updates_received(0),
diff --git a/sync/internal_api/public/engine/sync_status.h b/sync/internal_api/public/engine/sync_status.h
index 5afc356..31cbf4c 100644
--- a/sync/internal_api/public/engine/sync_status.h
+++ b/sync/internal_api/public/engine/sync_status.h
@@ -48,8 +48,6 @@ struct SYNC_EXPORT SyncStatus {
int committed_count;
bool syncing;
- // True after a client has done a first sync.
- bool initial_sync_ended;
// Total updates available. If zero, nothing left to download.
int64 updates_available;
diff --git a/sync/internal_api/public/sessions/sync_session_snapshot.cc b/sync/internal_api/public/sessions/sync_session_snapshot.cc
index d74ffa7..b0e9fac 100644
--- a/sync/internal_api/public/sessions/sync_session_snapshot.cc
+++ b/sync/internal_api/public/sessions/sync_session_snapshot.cc
@@ -12,8 +12,7 @@ namespace syncer {
namespace sessions {
SyncSessionSnapshot::SyncSessionSnapshot()
- : is_share_usable_(false),
- is_silenced_(false),
+ : is_silenced_(false),
num_encryption_conflicts_(0),
num_hierarchy_conflicts_(0),
num_server_conflicts_(0),
@@ -26,8 +25,6 @@ SyncSessionSnapshot::SyncSessionSnapshot()
SyncSessionSnapshot::SyncSessionSnapshot(
const ModelNeutralState& model_neutral_state,
- bool is_share_usable,
- ModelTypeSet initial_sync_ended,
const ProgressMarkerMap& download_progress_markers,
bool is_silenced,
int num_encryption_conflicts,
@@ -41,8 +38,6 @@ SyncSessionSnapshot::SyncSessionSnapshot(
const std::vector<int>& num_entries_by_type,
const std::vector<int>& num_to_delete_entries_by_type)
: model_neutral_state_(model_neutral_state),
- is_share_usable_(is_share_usable),
- initial_sync_ended_(initial_sync_ended),
download_progress_markers_(download_progress_markers),
is_silenced_(is_silenced),
num_encryption_conflicts_(num_encryption_conflicts),
@@ -79,9 +74,6 @@ DictionaryValue* SyncSessionSnapshot::ToValue() const {
value->SetInteger(
"numServerChangesRemaining",
static_cast<int>(model_neutral_state_.num_server_changes_remaining));
- value->SetBoolean("isShareUsable", is_share_usable_);
- value->Set("initialSyncEnded",
- ModelTypeSetToValue(initial_sync_ended_));
value->Set("downloadProgressMarkers",
ProgressMarkerMapToValue(download_progress_markers_).release());
value->SetBoolean("isSilenced", is_silenced_);
@@ -130,14 +122,6 @@ int64 SyncSessionSnapshot::num_server_changes_remaining() const {
return model_neutral_state().num_server_changes_remaining;
}
-bool SyncSessionSnapshot::is_share_usable() const {
- return is_share_usable_;
-}
-
-ModelTypeSet SyncSessionSnapshot::initial_sync_ended() const {
- return initial_sync_ended_;
-}
-
const ProgressMarkerMap&
SyncSessionSnapshot::download_progress_markers() const {
return download_progress_markers_;
diff --git a/sync/internal_api/public/sessions/sync_session_snapshot.h b/sync/internal_api/public/sessions/sync_session_snapshot.h
index 25195a7..be29d35 100644
--- a/sync/internal_api/public/sessions/sync_session_snapshot.h
+++ b/sync/internal_api/public/sessions/sync_session_snapshot.h
@@ -32,8 +32,6 @@ class SYNC_EXPORT SyncSessionSnapshot {
SyncSessionSnapshot();
SyncSessionSnapshot(
const ModelNeutralState& model_neutral_state,
- bool is_share_usable,
- ModelTypeSet initial_sync_ended,
const ProgressMarkerMap& download_progress_markers,
bool is_silenced,
int num_encryption_conflicts,
@@ -57,8 +55,6 @@ class SYNC_EXPORT SyncSessionSnapshot {
return model_neutral_state_;
}
int64 num_server_changes_remaining() const;
- bool is_share_usable() const;
- ModelTypeSet initial_sync_ended() const;
const ProgressMarkerMap& download_progress_markers() const;
bool is_silenced() const;
int num_encryption_conflicts() const;
@@ -77,8 +73,6 @@ class SYNC_EXPORT SyncSessionSnapshot {
private:
ModelNeutralState model_neutral_state_;
- bool is_share_usable_;
- ModelTypeSet initial_sync_ended_;
ProgressMarkerMap download_progress_markers_;
bool is_silenced_;
int num_encryption_conflicts_;
diff --git a/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc b/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc
index 5a97a16..9301ffd 100644
--- a/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc
+++ b/sync/internal_api/public/sessions/sync_session_snapshot_unittest.cc
@@ -33,12 +33,6 @@ TEST_F(SyncSessionSnapshotTest, SyncSessionSnapshotToValue) {
model_neutral.num_local_overwrites = 15;
model_neutral.num_server_overwrites = 18;
- const bool kIsShareUsable = true;
-
- const ModelTypeSet initial_sync_ended(BOOKMARKS, PREFERENCES);
- scoped_ptr<ListValue> expected_initial_sync_ended_value(
- ModelTypeSetToValue(initial_sync_ended));
-
ProgressMarkerMap download_progress_markers;
download_progress_markers[BOOKMARKS] = "test";
download_progress_markers[APPS] = "apps";
@@ -59,8 +53,6 @@ TEST_F(SyncSessionSnapshotTest, SyncSessionSnapshotToValue) {
expected_sources_list_value->Append(source.ToValue());
SyncSessionSnapshot snapshot(model_neutral,
- kIsShareUsable,
- initial_sync_ended,
download_progress_markers,
kIsSilenced,
kNumEncryptionConflicts,
@@ -74,7 +66,7 @@ TEST_F(SyncSessionSnapshotTest, SyncSessionSnapshotToValue) {
std::vector<int>(MODEL_TYPE_COUNT,0),
std::vector<int>(MODEL_TYPE_COUNT, 0));
scoped_ptr<DictionaryValue> value(snapshot.ToValue());
- EXPECT_EQ(20u, value->size());
+ EXPECT_EQ(18u, value->size());
ExpectDictIntegerValue(model_neutral.num_successful_commits,
*value, "numSuccessfulCommits");
ExpectDictIntegerValue(model_neutral.num_successful_bookmark_commits,
@@ -91,9 +83,6 @@ TEST_F(SyncSessionSnapshotTest, SyncSessionSnapshotToValue) {
*value, "numServerOverwrites");
ExpectDictIntegerValue(model_neutral.num_server_changes_remaining,
*value, "numServerChangesRemaining");
- ExpectDictBooleanValue(kIsShareUsable, *value, "isShareUsable");
- ExpectDictListValue(*expected_initial_sync_ended_value, *value,
- "initialSyncEnded");
ExpectDictDictionaryValue(*expected_download_progress_markers_value,
*value, "downloadProgressMarkers");
ExpectDictBooleanValue(kIsSilenced, *value, "isSilenced");
diff --git a/sync/internal_api/public/test/test_entry_factory.h b/sync/internal_api/public/test/test_entry_factory.h
index d5bc911..071a501 100644
--- a/sync/internal_api/public/test/test_entry_factory.h
+++ b/sync/internal_api/public/test/test_entry_factory.h
@@ -56,6 +56,11 @@ class TestEntryFactory {
int64 CreateSyncedItem(const std::string& name,
ModelType model_type, bool is_folder);
+ // Creates a root node that IS_UNAPPLIED. Smiilar to what one would find in
+ // the database between the ProcessUpdates of an initial datatype configure
+ // cycle and the ApplyUpdates step of the same sync cycle.
+ int64 CreateUnappliedRootNode(ModelType model_type);
+
// Looks up the item referenced by |meta_handle|. If successful, overwrites
// the server specifics with |specifics|, sets
// IS_UNAPPLIED_UPDATES/IS_UNSYNCED appropriately, and returns true.
diff --git a/sync/internal_api/sync_manager_impl.cc b/sync/internal_api/sync_manager_impl.cc
index ddab0bc..fe5e33b 100644
--- a/sync/internal_api/sync_manager_impl.cc
+++ b/sync/internal_api/sync_manager_impl.cc
@@ -277,7 +277,7 @@ void SyncManagerImpl::ThrowUnrecoverableError() {
}
ModelTypeSet SyncManagerImpl::InitialSyncEndedTypes() {
- return directory()->initial_sync_ended_types();
+ return directory()->InitialSyncEndedTypes();
}
ModelTypeSet SyncManagerImpl::GetTypesWithEmptyProgressMarkerToken(
@@ -572,6 +572,8 @@ bool SyncManagerImpl::PurgePartiallySyncedTypes() {
partially_synced_types.RemoveAll(GetTypesWithEmptyProgressMarkerToken(
ModelTypeSet::All()));
+ DVLOG(1) << "Purging partially synced types "
+ << ModelTypeSetToString(partially_synced_types);
UMA_HISTOGRAM_COUNTS("Sync.PartiallySyncedTypes",
partially_synced_types.Size());
if (partially_synced_types.Empty())
diff --git a/sync/internal_api/sync_manager_impl_unittest.cc b/sync/internal_api/sync_manager_impl_unittest.cc
index e77eb71..edacea0 100644
--- a/sync/internal_api/sync_manager_impl_unittest.cc
+++ b/sync/internal_api/sync_manager_impl_unittest.cc
@@ -32,6 +32,7 @@
#include "sync/internal_api/public/http_post_provider_interface.h"
#include "sync/internal_api/public/read_node.h"
#include "sync/internal_api/public/read_transaction.h"
+#include "sync/internal_api/public/test/test_entry_factory.h"
#include "sync/internal_api/public/test/test_internal_components_factory.h"
#include "sync/internal_api/public/test/test_user_share.h"
#include "sync/internal_api/public/write_node.h"
@@ -60,6 +61,7 @@
#include "sync/syncable/entry.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/nigori_util.h"
+#include "sync/syncable/read_transaction.h"
#include "sync/syncable/syncable_id.h"
#include "sync/syncable/write_transaction.h"
#include "sync/test/callback_counter.h"
@@ -84,11 +86,12 @@ using testing::StrictMock;
namespace syncer {
using sessions::SyncSessionSnapshot;
+using syncable::GET_BY_HANDLE;
using syncable::IS_DEL;
using syncable::IS_UNSYNCED;
-using syncable::kEncryptedString;
using syncable::NON_UNIQUE_NAME;
using syncable::SPECIFICS;
+using syncable::kEncryptedString;
namespace {
@@ -888,7 +891,6 @@ class SyncManagerTest : public testing::Test,
bool SetUpEncryption(NigoriStatus nigori_status,
EncryptionStatus encryption_status) {
UserShare* share = sync_manager_.GetUserShare();
- share->directory->set_initial_sync_ended_for_type(NIGORI, true);
// We need to create the nigori node as if it were an applied server update.
int64 nigori_id = GetIdForDataType(NIGORI);
@@ -1012,10 +1014,6 @@ class SyncManagerTest : public testing::Test,
}
}
- void SetInitialSyncEndedForType(ModelType type, bool value) {
- sync_manager_.directory()->set_initial_sync_ended_for_type(type, value);
- }
-
InternalComponentsFactory::Switches GetSwitches() const {
return switches_;
}
@@ -2855,7 +2853,6 @@ TEST_F(SyncManagerTestWithMockScheduler, MAYBE_BasicConfiguration) {
for (ModelTypeSet::Iterator iter = ModelTypeSet::All().First(); iter.Good();
iter.Inc()) {
SetProgressMarkerForType(iter.Get(), true);
- SetInitialSyncEndedForType(iter.Get(), true);
}
CallbackCounter ready_task_counter, retry_task_counter;
@@ -2906,10 +2903,8 @@ TEST_F(SyncManagerTestWithMockScheduler, ReConfiguration) {
iter.Inc()) {
if (!disabled_types.Has(iter.Get())) {
SetProgressMarkerForType(iter.Get(), true);
- SetInitialSyncEndedForType(iter.Get(), true);
} else {
SetProgressMarkerForType(iter.Get(), false);
- SetInitialSyncEndedForType(iter.Get(), false);
}
}
@@ -2933,8 +2928,6 @@ TEST_F(SyncManagerTestWithMockScheduler, ReConfiguration) {
EXPECT_EQ(new_routing_info, params.routing_info);
// Verify only the recently disabled types were purged.
- EXPECT_TRUE(sync_manager_.InitialSyncEndedTypes().Equals(
- Difference(ModelTypeSet::All(), disabled_types)));
EXPECT_TRUE(sync_manager_.GetTypesWithEmptyProgressMarkerToken(
ModelTypeSet::All()).Equals(disabled_types));
}
@@ -2968,51 +2961,81 @@ TEST_F(SyncManagerTestWithMockScheduler, ConfigurationRetry) {
EXPECT_EQ(new_routing_info, params.routing_info);
}
-// Test that PurgePartiallySyncedTypes purges only those types that don't
-// have empty progress marker and don't have initial sync ended set.
+// Test that PurgePartiallySyncedTypes purges only those types that have not
+// fully completed their initial download and apply.
TEST_F(SyncManagerTest, PurgePartiallySyncedTypes) {
+ ModelSafeRoutingInfo routing_info;
+ GetModelSafeRoutingInfo(&routing_info);
+ ModelTypeSet enabled_types = GetRoutingInfoTypes(routing_info);
+
UserShare* share = sync_manager_.GetUserShare();
- // Set Nigori and Bookmarks to be partial types.
- sync_pb::DataTypeProgressMarker nigori_marker;
- nigori_marker.set_data_type_id(
- GetSpecificsFieldNumberFromModelType(NIGORI));
- nigori_marker.set_token("token");
- sync_pb::DataTypeProgressMarker bookmark_marker;
- bookmark_marker.set_data_type_id(
- GetSpecificsFieldNumberFromModelType(BOOKMARKS));
- bookmark_marker.set_token("token");
- share->directory->SetDownloadProgress(NIGORI, nigori_marker);
- share->directory->SetDownloadProgress(BOOKMARKS, bookmark_marker);
-
- // Set Preferences to be a full type.
- sync_pb::DataTypeProgressMarker pref_marker;
- pref_marker.set_data_type_id(
- GetSpecificsFieldNumberFromModelType(PREFERENCES));
- pref_marker.set_token("token");
- share->directory->SetDownloadProgress(PREFERENCES, pref_marker);
- share->directory->set_initial_sync_ended_for_type(PREFERENCES, true);
+ // The test harness automatically initializes all types in the routing info.
+ // Check that autofill is not among them.
+ ASSERT_FALSE(enabled_types.Has(AUTOFILL));
- ModelTypeSet partial_types =
- sync_manager_.GetTypesWithEmptyProgressMarkerToken(ModelTypeSet::All());
- EXPECT_FALSE(partial_types.Has(NIGORI));
- EXPECT_FALSE(partial_types.Has(BOOKMARKS));
- EXPECT_FALSE(partial_types.Has(PREFERENCES));
+ // Further ensure that the test harness did not create its root node.
+ {
+ syncable::ReadTransaction trans(FROM_HERE, share->directory.get());
+ syncable::Entry autofill_root_node(&trans, syncable::GET_BY_SERVER_TAG,
+ ModelTypeToRootTag(AUTOFILL));
+ ASSERT_FALSE(autofill_root_node.good());
+ }
+
+ // One more redundant check.
+ ASSERT_FALSE(sync_manager_.InitialSyncEndedTypes().Has(AUTOFILL));
+
+ // Give autofill a progress marker.
+ sync_pb::DataTypeProgressMarker autofill_marker;
+ autofill_marker.set_data_type_id(
+ GetSpecificsFieldNumberFromModelType(AUTOFILL));
+ autofill_marker.set_token("token");
+ share->directory->SetDownloadProgress(AUTOFILL, autofill_marker);
+
+ // Also add a pending autofill root node update from the server.
+ TestEntryFactory factory_(share->directory.get());
+ int autofill_meta = factory_.CreateUnappliedRootNode(AUTOFILL);
+
+ // Preferences is an enabled type. Check that the harness initialized it.
+ ASSERT_TRUE(enabled_types.Has(PREFERENCES));
+ ASSERT_TRUE(sync_manager_.InitialSyncEndedTypes().Has(PREFERENCES));
+ // Give preferencse a progress marker.
+ sync_pb::DataTypeProgressMarker prefs_marker;
+ prefs_marker.set_data_type_id(
+ GetSpecificsFieldNumberFromModelType(PREFERENCES));
+ prefs_marker.set_token("token");
+ share->directory->SetDownloadProgress(PREFERENCES, prefs_marker);
+
+ // Add a fully synced preferences node under the root.
+ std::string pref_client_tag = "prefABC";
+ std::string pref_hashed_tag = "hashXYZ";
+ sync_pb::EntitySpecifics pref_specifics;
+ AddDefaultFieldValue(PREFERENCES, &pref_specifics);
+ int pref_meta = MakeServerNode(
+ share, PREFERENCES, pref_client_tag, pref_hashed_tag, pref_specifics);
+
+ // And now, the purge.
EXPECT_TRUE(sync_manager_.PurgePartiallySyncedTypes());
- // Ensure only bookmarks and nigori lost their progress marker. Preferences
- // should still have it.
- partial_types =
+ // Ensure that autofill lost its progress marker, but preferences did not.
+ ModelTypeSet empty_tokens =
sync_manager_.GetTypesWithEmptyProgressMarkerToken(ModelTypeSet::All());
- EXPECT_TRUE(partial_types.Has(NIGORI));
- EXPECT_TRUE(partial_types.Has(BOOKMARKS));
- EXPECT_FALSE(partial_types.Has(PREFERENCES));
+ EXPECT_TRUE(empty_tokens.Has(AUTOFILL));
+ EXPECT_FALSE(empty_tokens.Has(PREFERENCES));
+
+ // Ensure that autofill lots its node, but preferences did not.
+ {
+ syncable::ReadTransaction trans(FROM_HERE, share->directory.get());
+ syncable::Entry autofill_node(&trans, GET_BY_HANDLE, autofill_meta);
+ syncable::Entry pref_node(&trans, GET_BY_HANDLE, pref_meta);
+ EXPECT_FALSE(autofill_node.good());
+ EXPECT_TRUE(pref_node.good());
+ }
}
// Test CleanupDisabledTypes properly purges all disabled types as specified
-// by the previous and current enabled params. Enabled partial types should not
-// be purged.
+// by the previous and current enabled params.
// Fails on Windows: crbug.com/139726
#if defined(OS_WIN)
#define MAYBE_PurgeDisabledTypes DISABLED_PurgeDisabledTypes
@@ -3024,21 +3047,20 @@ TEST_F(SyncManagerTest, MAYBE_PurgeDisabledTypes) {
GetModelSafeRoutingInfo(&routing_info);
ModelTypeSet enabled_types = GetRoutingInfoTypes(routing_info);
ModelTypeSet disabled_types = Difference(ModelTypeSet::All(), enabled_types);
- ModelTypeSet partial_enabled_types(PASSWORDS);
- // Set data for all non-partial types.
+ // The harness should have initialized the enabled_types for us.
+ EXPECT_TRUE(enabled_types.Equals(sync_manager_.InitialSyncEndedTypes()));
+
+ // Set progress markers for all types.
for (ModelTypeSet::Iterator iter = ModelTypeSet::All().First(); iter.Good();
iter.Inc()) {
SetProgressMarkerForType(iter.Get(), true);
- if (!partial_enabled_types.Has(iter.Get()))
- SetInitialSyncEndedForType(iter.Get(), true);
}
// Verify all the enabled types remain after cleanup, and all the disabled
// types were purged.
sync_manager_.PurgeDisabledTypes(ModelTypeSet::All(), enabled_types);
- EXPECT_TRUE(enabled_types.Equals(
- Union(sync_manager_.InitialSyncEndedTypes(), partial_enabled_types)));
+ EXPECT_TRUE(enabled_types.Equals(sync_manager_.InitialSyncEndedTypes()));
EXPECT_TRUE(disabled_types.Equals(
sync_manager_.GetTypesWithEmptyProgressMarkerToken(ModelTypeSet::All())));
@@ -3050,8 +3072,7 @@ TEST_F(SyncManagerTest, MAYBE_PurgeDisabledTypes) {
// Verify only the non-disabled types remain after cleanup.
sync_manager_.PurgeDisabledTypes(enabled_types, new_enabled_types);
- EXPECT_TRUE(new_enabled_types.Equals(
- Union(sync_manager_.InitialSyncEndedTypes(), partial_enabled_types)));
+ EXPECT_TRUE(new_enabled_types.Equals(sync_manager_.InitialSyncEndedTypes()));
EXPECT_TRUE(disabled_types.Equals(
sync_manager_.GetTypesWithEmptyProgressMarkerToken(ModelTypeSet::All())));
}
diff --git a/sync/internal_api/test/test_entry_factory.cc b/sync/internal_api/test/test_entry_factory.cc
index 3cf0dcf..f578412 100644
--- a/sync/internal_api/test/test_entry_factory.cc
+++ b/sync/internal_api/test/test_entry_factory.cc
@@ -164,6 +164,28 @@ int64 TestEntryFactory::CreateSyncedItem(
return entry.Get(syncable::META_HANDLE);
}
+int64 TestEntryFactory::CreateUnappliedRootNode(
+ ModelType model_type) {
+ syncable::WriteTransaction trans(FROM_HERE, syncable::UNITTEST, directory_);
+ sync_pb::EntitySpecifics specifics;
+ AddDefaultFieldValue(model_type, &specifics);
+ syncable::Id node_id = TestIdFactory::MakeServer("xyz");
+ syncable::MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM,
+ node_id);
+ DCHECK(entry.good());
+ // Make it look like sort of like a pending creation from the server.
+ // The SERVER_PARENT_ID and UNIQUE_CLIENT_TAG aren't quite right, but
+ // it's good enough for our purposes.
+ entry.Put(syncable::SERVER_VERSION, 1);
+ entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
+ entry.Put(syncable::SERVER_IS_DIR, false);
+ entry.Put(syncable::SERVER_PARENT_ID, TestIdFactory::root());
+ entry.Put(syncable::SERVER_SPECIFICS, specifics);
+ entry.Put(syncable::NON_UNIQUE_NAME, "xyz");
+
+ return entry.Get(syncable::META_HANDLE);
+}
+
bool TestEntryFactory::SetServerSpecificsForItem(
int64 meta_handle,
const sync_pb::EntitySpecifics specifics) {
diff --git a/sync/internal_api/test/test_user_share.cc b/sync/internal_api/test/test_user_share.cc
index 5c7856e..a6147c4 100644
--- a/sync/internal_api/test/test_user_share.cc
+++ b/sync/internal_api/test/test_user_share.cc
@@ -10,6 +10,7 @@
#include "sync/syncable/write_transaction.h"
#include "sync/test/engine/test_directory_setter_upper.h"
#include "sync/test/engine/test_id_factory.h"
+#include "sync/test/engine/test_syncable_utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
@@ -53,27 +54,8 @@ syncable::TestTransactionObserver* TestUserShare::transaction_observer() {
/* static */
bool TestUserShare::CreateRoot(ModelType model_type, UserShare* user_share) {
syncer::syncable::Directory* directory = user_share->directory.get();
-
- std::string tag_name = syncer::ModelTypeToRootTag(model_type);
-
syncable::WriteTransaction wtrans(FROM_HERE, syncable::UNITTEST, directory);
- syncable::MutableEntry node(&wtrans,
- syncable::CREATE,
- wtrans.root_id(),
- tag_name);
- node.Put(syncable::UNIQUE_SERVER_TAG, tag_name);
- node.Put(syncable::IS_DIR, true);
- node.Put(syncable::SERVER_IS_DIR, false);
- node.Put(syncable::IS_UNSYNCED, false);
- node.Put(syncable::IS_UNAPPLIED_UPDATE, false);
- node.Put(syncable::SERVER_VERSION, 20);
- node.Put(syncable::BASE_VERSION, 20);
- node.Put(syncable::IS_DEL, false);
- node.Put(syncable::ID, syncer::TestIdFactory::MakeServer(tag_name));
- sync_pb::EntitySpecifics specifics;
- syncer::AddDefaultFieldValue(model_type, &specifics);
- node.Put(syncable::SPECIFICS, specifics);
-
+ CreateTypeRoot(&wtrans, directory, model_type);
return true;
}
diff --git a/sync/sessions/sync_session.cc b/sync/sessions/sync_session.cc
index 0732329..f9241d9 100644
--- a/sync/sessions/sync_session.cc
+++ b/sync/sessions/sync_session.cc
@@ -150,17 +150,9 @@ void SyncSession::RebaseRoutingInfoWithLatest(
SyncSessionSnapshot SyncSession::TakeSnapshot() const {
syncable::Directory* dir = context_->directory();
- bool is_share_useable = true;
- ModelTypeSet initial_sync_ended;
ProgressMarkerMap download_progress_markers;
for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
ModelType type(ModelTypeFromInt(i));
- if (routing_info_.count(type) != 0) {
- if (dir->initial_sync_ended_for_type(type))
- initial_sync_ended.Put(type);
- else
- is_share_useable = false;
- }
dir->GetDownloadProgressAsString(type, &download_progress_markers[type]);
}
@@ -171,8 +163,6 @@ SyncSessionSnapshot SyncSession::TakeSnapshot() const {
SyncSessionSnapshot snapshot(
status_controller_->model_neutral_state(),
- is_share_useable,
- initial_sync_ended,
download_progress_markers,
delegate_->IsSyncingCurrentlySilenced(),
status_controller_->num_encryption_conflicts(),
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc
index 21e05eb..82eb709c 100644
--- a/sync/syncable/directory.cc
+++ b/sync/syncable/directory.cc
@@ -596,7 +596,6 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types) {
// Ensure meta tracking for these data types reflects the deleted state.
for (ModelTypeSet::Iterator it = types.First();
it.Good(); it.Inc()) {
- set_initial_sync_ended_for_type_unsafe(it.Get(), false);
kernel_->persisted_info.reset_download_progress(it.Get());
kernel_->persisted_info.transaction_version[it.Get()] = 0;
}
@@ -667,14 +666,30 @@ void Directory::IncrementTransactionVersion(ModelType type) {
kernel_->persisted_info.transaction_version[type]++;
}
-ModelTypeSet Directory::initial_sync_ended_types() const {
- ScopedKernelLock lock(this);
- return kernel_->persisted_info.initial_sync_ended;
+ModelTypeSet Directory::InitialSyncEndedTypes() {
+ syncable::ReadTransaction trans(FROM_HERE, this);
+ const ModelTypeSet all_types = ModelTypeSet::All();
+ ModelTypeSet initial_sync_ended_types;
+ for (ModelTypeSet::Iterator i = all_types.First(); i.Good(); i.Inc()) {
+ if (InitialSyncEndedForType(&trans, i.Get())) {
+ initial_sync_ended_types.Put(i.Get());
+ }
+ }
+ return initial_sync_ended_types;
}
-bool Directory::initial_sync_ended_for_type(ModelType type) const {
- ScopedKernelLock lock(this);
- return kernel_->persisted_info.initial_sync_ended.Has(type);
+bool Directory::InitialSyncEndedForType(ModelType type) {
+ syncable::ReadTransaction trans(FROM_HERE, this);
+ return InitialSyncEndedForType(&trans, type);
+}
+
+bool Directory::InitialSyncEndedForType(
+ BaseTransaction* trans, ModelType type) {
+ // True iff the type's root node has been received and applied.
+ syncable::Entry entry(trans,
+ syncable::GET_BY_SERVER_TAG,
+ ModelTypeToRootTag(type));
+ return entry.good() && entry.Get(syncable::BASE_VERSION) != CHANGES_VERSION;
}
template <class T> void Directory::TestAndSet(
@@ -685,23 +700,6 @@ template <class T> void Directory::TestAndSet(
}
}
-void Directory::set_initial_sync_ended_for_type(ModelType type, bool x) {
- ScopedKernelLock lock(this);
- set_initial_sync_ended_for_type_unsafe(type, x);
-}
-
-void Directory::set_initial_sync_ended_for_type_unsafe(ModelType type,
- bool x) {
- if (kernel_->persisted_info.initial_sync_ended.Has(type) == x)
- return;
- if (x) {
- kernel_->persisted_info.initial_sync_ended.Put(type);
- } else {
- kernel_->persisted_info.initial_sync_ended.Remove(type);
- }
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-}
-
void Directory::SetNotificationStateUnsafe(
const std::string& notification_state) {
if (notification_state == kernel_->persisted_info.notification_state)
diff --git a/sync/syncable/directory.h b/sync/syncable/directory.h
index 2b0bd3c4..0eb0ae7 100644
--- a/sync/syncable/directory.h
+++ b/sync/syncable/directory.h
@@ -169,8 +169,6 @@ class SYNC_EXPORT Directory {
// TODO(hatiaol): implement detection and fixing of out-of-sync models.
// Bug 154858.
int64 transaction_version[MODEL_TYPE_COUNT];
- // true iff we ever reached the end of the changelog.
- ModelTypeSet initial_sync_ended;
// The store birthday we were given by the server. Contents are opaque to
// the client.
std::string store_birthday;
@@ -265,9 +263,9 @@ class SYNC_EXPORT Directory {
int64 GetTransactionVersion(ModelType type) const;
void IncrementTransactionVersion(ModelType type);
- ModelTypeSet initial_sync_ended_types() const;
- bool initial_sync_ended_for_type(ModelType type) const;
- void set_initial_sync_ended_for_type(ModelType type, bool value);
+ ModelTypeSet InitialSyncEndedTypes();
+ bool InitialSyncEndedForType(ModelType type);
+ bool InitialSyncEndedForType(BaseTransaction* trans, ModelType type);
const std::string& name() const { return kernel_->name; }
@@ -491,7 +489,6 @@ class SYNC_EXPORT Directory {
// Internal setters that do not acquire a lock internally. These are unsafe
// on their own; caller must guarantee exclusive access manually by holding
// a ScopedKernelLock.
- void set_initial_sync_ended_for_type_unsafe(ModelType type, bool x);
void SetNotificationStateUnsafe(const std::string& notification_state);
Directory& operator = (const Directory&);
diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
index d072011..a609527 100644
--- a/sync/syncable/directory_backing_store.cc
+++ b/sync/syncable/directory_backing_store.cc
@@ -40,7 +40,7 @@ static const string::size_type kUpdateStatementBufferSize = 2048;
// Increment this version whenever updating DB tables.
extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
-const int32 kCurrentDBVersion = 84;
+const int32 kCurrentDBVersion = 85;
// Iterate over the fields of |entry| and bind each to |statement| for
// updating. Returns the number of args bound.
@@ -197,8 +197,11 @@ bool DirectoryBackingStore::SaveChanges(
// Back out early if there is nothing to write.
bool save_info =
(Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
- if (snapshot.dirty_metas.size() < 1 && !save_info)
+ if (snapshot.dirty_metas.empty()
+ && snapshot.metahandles_to_purge.empty()
+ && !save_info) {
return true;
+ }
sql::Transaction transaction(db_.get());
if (!transaction.Begin())
@@ -236,9 +239,8 @@ bool DirectoryBackingStore::SaveChanges(
sql::Statement s2(db_->GetCachedStatement(
SQL_FROM_HERE,
"INSERT OR REPLACE "
- "INTO models (model_id, progress_marker, initial_sync_ended, "
- " transaction_version) "
- "VALUES (?, ?, ?, ?)"));
+ "INTO models (model_id, progress_marker, transaction_version) "
+ "VALUES (?, ?, ?)"));
for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
// We persist not ModelType but rather a protobuf-derived ID.
@@ -247,8 +249,7 @@ bool DirectoryBackingStore::SaveChanges(
info.download_progress[i].SerializeToString(&progress_marker);
s2.BindBlob(0, model_id.data(), model_id.length());
s2.BindBlob(1, progress_marker.data(), progress_marker.length());
- s2.BindBool(2, info.initial_sync_ended.Has(ModelTypeFromInt(i)));
- s2.BindInt64(3, info.transaction_version[i]);
+ s2.BindInt64(2, info.transaction_version[i]);
if (!s2.Run())
return false;
DCHECK_EQ(db_->GetLastChangeCount(), 1);
@@ -370,6 +371,12 @@ bool DirectoryBackingStore::InitializeTables() {
version_on_disk = 84;
}
+ // Version 85 migration removes the initial_sync_ended bits.
+ if (version_on_disk == 84) {
+ if (MigrateVersion84To85())
+ version_on_disk = 85;
+ }
+
// If one of the migrations requested it, drop columns that aren't current.
// It's only safe to do this after migrating all the way to the current
// version.
@@ -379,13 +386,6 @@ bool DirectoryBackingStore::InitializeTables() {
}
// A final, alternative catch-all migration to simply re-sync everything.
- //
- // TODO(rlarocque): It's wrong to recreate the database here unless the higher
- // layers were expecting us to do so. See crbug.com/103824. We must leave
- // this code as is for now because this is the code that ends up creating the
- // database in the first time sync case, where the higher layers are expecting
- // us to create a fresh database. The solution to this should be to implement
- // crbug.com/105018.
if (version_on_disk != kCurrentDBVersion) {
if (version_on_disk > kCurrentDBVersion)
return false;
@@ -508,7 +508,7 @@ bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
{
sql::Statement s(
db_->GetUniqueStatement(
- "SELECT model_id, progress_marker, initial_sync_ended, "
+ "SELECT model_id, progress_marker, "
"transaction_version FROM models"));
while (s.Step()) {
@@ -517,9 +517,7 @@ bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
info->kernel_info.download_progress[type].ParseFromArray(
s.ColumnBlob(1), s.ColumnByteLength(1));
- if (s.ColumnBool(2))
- info->kernel_info.initial_sync_ended.Put(type);
- info->kernel_info.transaction_version[type] = s.ColumnInt64(3);
+ info->kernel_info.transaction_version[type] = s.ColumnInt64(2);
}
}
if (!s.Succeeded())
@@ -914,7 +912,7 @@ bool DirectoryBackingStore::MigrateVersion74To75() {
// Move aside the old table and create a new empty one at the current schema.
if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
return false;
- if (!CreateModelsTable())
+ if (!CreateV75ModelsTable())
return false;
sql::Statement query(db_->GetUniqueStatement(
@@ -1065,14 +1063,6 @@ bool DirectoryBackingStore::MigrateVersion80To81() {
}
bool DirectoryBackingStore::MigrateVersion81To82() {
- // Version 82 added transaction_version to kernel info. But if user is
- // migrating from 74 or before, 74->75 migration would recreate models table
- // that already has transaction_version column.
- if (db_->DoesColumnExist("models", "transaction_version")) {
- SetVersion(82);
- return true;
- }
-
if (!db_->Execute(
"ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
return false;
@@ -1104,10 +1094,28 @@ bool DirectoryBackingStore::MigrateVersion83To84() {
query.append(ComposeCreateTableColumnSpecs());
if (!db_->Execute(query.c_str()))
return false;
+
SetVersion(84);
return true;
}
+bool DirectoryBackingStore::MigrateVersion84To85() {
+ // Version 84 removes the initial_sync_ended flag.
+ if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
+ return false;
+ if (!CreateModelsTable())
+ return false;
+ if (!db_->Execute("INSERT INTO models SELECT "
+ "model_id, progress_marker, transaction_version "
+ "FROM temp_models")) {
+ return false;
+ }
+ SafeDropTable("temp_models");
+
+ SetVersion(85);
+ return true;
+}
+
bool DirectoryBackingStore::CreateTables() {
DVLOG(1) << "First run, creating tables";
// Create two little tables share_version and share_info
@@ -1213,10 +1221,22 @@ bool DirectoryBackingStore::CreateV71ModelsTable() {
"initial_sync_ended BOOLEAN default 0)");
}
+bool DirectoryBackingStore::CreateV75ModelsTable() {
+ // This is an old schema for the Models table, used from versions 75 to 80.
+ return db_->Execute(
+ "CREATE TABLE models ("
+ "model_id BLOB primary key, "
+ "progress_marker BLOB, "
+ // Gets set if the syncer ever gets updates from the
+ // server and the server returns 0. Lets us detect the
+ // end of the initial sync.
+ "initial_sync_ended BOOLEAN default 0)");
+}
+
bool DirectoryBackingStore::CreateModelsTable() {
// This is the current schema for the Models table, from version 81
// onward. If you change the schema, you'll probably want to double-check
- // the use of this function in the v74-v75 migration.
+ // the use of this function in the v84-v85 migration.
return db_->Execute(
"CREATE TABLE models ("
"model_id BLOB primary key, "
@@ -1224,7 +1244,6 @@ bool DirectoryBackingStore::CreateModelsTable() {
// Gets set if the syncer ever gets updates from the
// server and the server returns 0. Lets us detect the
// end of the initial sync.
- "initial_sync_ended BOOLEAN default 0, "
"transaction_version BIGINT default 0)");
}
diff --git a/sync/syncable/directory_backing_store.h b/sync/syncable/directory_backing_store.h
index 4b604ab..8170733 100644
--- a/sync/syncable/directory_backing_store.h
+++ b/sync/syncable/directory_backing_store.h
@@ -84,6 +84,7 @@ class DirectoryBackingStore : public base::NonThreadSafe {
bool CreateMetasTable(bool is_temporary);
bool CreateModelsTable();
bool CreateV71ModelsTable();
+ bool CreateV75ModelsTable();
// We don't need to load any synced and applied deleted entries, we can
// in fact just purge them forever on startup.
@@ -160,6 +161,7 @@ class DirectoryBackingStore : public base::NonThreadSafe {
bool MigrateVersion81To82();
bool MigrateVersion82To83();
bool MigrateVersion83To84();
+ bool MigrateVersion84To85();
scoped_ptr<sql::Connection> db_;
sql::Statement save_entry_statement_;
diff --git a/sync/syncable/directory_backing_store_unittest.cc b/sync/syncable/directory_backing_store_unittest.cc
index fe8e923..1b89ae0 100644
--- a/sync/syncable/directory_backing_store_unittest.cc
+++ b/sync/syncable/directory_backing_store_unittest.cc
@@ -68,9 +68,11 @@ class MigrationTest : public testing::TestWithParam<int> {
void SetUpVersion81Database(sql::Connection* connection);
void SetUpVersion82Database(sql::Connection* connection);
void SetUpVersion83Database(sql::Connection* connection);
+ void SetUpVersion84Database(sql::Connection* connection);
+ void SetUpVersion85Database(sql::Connection* connection);
void SetUpCurrentDatabaseAndCheckVersion(sql::Connection* connection) {
- SetUpVersion77Database(connection); // Prepopulates data.
+ SetUpVersion85Database(connection); // Prepopulates data.
scoped_ptr<TestDirectoryBackingStore> dbs(
new TestDirectoryBackingStore(GetUsername(), connection));
@@ -1968,7 +1970,7 @@ void MigrationTest::SetUpVersion82Database(sql::Connection* connection) {
ASSERT_TRUE(connection->BeginTransaction());
ASSERT_TRUE(connection->Execute(
"CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',81);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',82);"
"CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
"itial_sync_ended BOOLEAN default 0, transaction_version BIGINT "
"default 0);"
@@ -2074,7 +2076,7 @@ void MigrationTest::SetUpVersion83Database(sql::Connection* connection) {
ASSERT_TRUE(connection->BeginTransaction());
ASSERT_TRUE(connection->Execute(
"CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',81);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',83);"
"CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
"itial_sync_ended BOOLEAN default 0, transaction_version BIGINT "
"default 0);"
@@ -2091,6 +2093,251 @@ void MigrationTest::SetUpVersion83Database(sql::Connection* connection) {
"server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
"e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
"har,specifics blob,server_specifics blob, base_server_specifics BLOB"
+ ", server_ordinal_in_parent blob, transaction_version bigint default "
+ "0);"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
+ "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
+ "ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
+ "blob);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
+ "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
+ "-131078,'9010788312004066376x-6609234393368420856x',NULL, NULL);"));
+
+ const char* insert_stmts[V80_ROW_COUNT] = {
+ "INSERT INTO 'metas' VALUES(1,-1,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
+ "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(2,669,669,4,"
+ META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
+ "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
+ "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
+ "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
+ "14447414447414447',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(4,681,681,3,"
+ META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
+ "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
+ ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
+ "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
+ "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
+ "E2F77656C636F6D652E68746D6C1200',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(5,677,677,7,"
+ META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
+ "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
+ "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
+ "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
+ "ULL,?,0);",
+ "INSERT INTO 'metas' VALUES(6,694,694,6,"
+ META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
+ ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
+ ",NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(7,663,663,0,"
+ META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
+ "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,?,0);"
+ "",
+ "INSERT INTO 'metas' VALUES(8,664,664,0,"
+ META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
+ ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
+ "00',X'C2881000',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(9,665,665,1,"
+ META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
+ ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
+ "000',X'C2881000',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(10,666,666,2,"
+ META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
+ "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
+ "LL,X'C2881000',X'C2881000',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(11,683,683,8,"
+ META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
+ ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
+ "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
+ "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
+ "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(12,685,685,9,"
+ META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
+ "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
+ "2881000',X'C2881000',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(13,687,687,10,"
+ META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
+ "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
+ "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
+ "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
+ "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
+ "E6963616E6E2E636F6D2F120744414146415346',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(14,692,692,11,"
+ META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
+ ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
+ "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
+ "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
+ "81205504E473259',NULL,?,0);" };
+
+ for (int i = 0; i < V80_ROW_COUNT; i++) {
+ sql::Statement s(connection->GetUniqueStatement(insert_stmts[i]));
+ std::string ord = V81_Ordinal(i);
+ s.BindBlob(0, ord.data(), ord.length());
+ ASSERT_TRUE(s.Run());
+ s.Reset(true);
+ }
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion84Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',84);"
+ "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
+ "itial_sync_ended BOOLEAN default 0, transaction_version BIGINT "
+ "default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1, 1);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
+ "_version bigint default -1,server_version bigint default 0, "
+ "local_external_id bigint default 0"
+ ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
+ "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
+ "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
+ "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
+ "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
+ "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
+ ", server_ordinal_in_parent blob, transaction_version bigint default "
+ "0);"
+ "CREATE TABLE 'deleted_metas'"
+ "(metahandle bigint primary key ON CONFLICT FAIL,base"
+ "_version bigint default -1,server_version bigint default 0, "
+ "local_external_id bigint default 0"
+ ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
+ "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
+ "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
+ "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
+ "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
+ "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
+ ", server_ordinal_in_parent blob, transaction_verion bigint default 0"
+ ");"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
+ "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
+ "ult -2, cache_guid TEXT , notification_state BLOB, bag_of_chips "
+ "blob);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
+ "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,"
+ "-131078,'9010788312004066376x-6609234393368420856x',NULL, NULL);"));
+
+ const char* insert_stmts[V80_ROW_COUNT] = {
+ "INSERT INTO 'metas' VALUES(1,-1,0,0," META_PROTO_TIMES_VALS(1) ",'r','"
+ "r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(2,669,669,4,"
+ META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
+ "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
+ "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
+ "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
+ "14447414447414447',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(4,681,681,3,"
+ META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
+ "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
+ ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
+ "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
+ "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
+ "E2F77656C636F6D652E68746D6C1200',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(5,677,677,7,"
+ META_PROTO_TIMES_VALS(5) ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_"
+ "5',0,0,1,0,0,1,'Google','Google',NULL,NULL,X'C28810220A16687474703A2"
+ "F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'C28810220A1"
+ "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464447415347',N"
+ "ULL,?,0);",
+ "INSERT INTO 'metas' VALUES(6,694,694,6,"
+ META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
+ ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
+ ",NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(7,663,663,0,"
+ META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Goog"
+ "le Chrome','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,?,0);"
+ "",
+ "INSERT INTO 'metas' VALUES(8,664,664,0,"
+ META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1"
+ ",1,0,'Bookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C28810"
+ "00',X'C2881000',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(9,665,665,1,"
+ META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0"
+ ",0,0,1,1,0,'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881"
+ "000',X'C2881000',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(10,666,666,2,"
+ META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
+ "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
+ "LL,X'C2881000',X'C2881000',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(11,683,683,8,"
+ META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
+ ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
+ "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
+ "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
+ "8726F6D69756D2E6F72672F6F7468657212084146414756415346',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(12,685,685,9,"
+ META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_"
+ "ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C"
+ "2881000',X'C2881000',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(13,687,687,10,"
+ META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
+ "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
+ "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
+ "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
+ "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
+ "E6963616E6E2E636F6D2F120744414146415346',NULL,?,0);",
+ "INSERT INTO 'metas' VALUES(14,692,692,11,"
+ META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
+ ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
+ "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
+ "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
+ "81205504E473259',NULL,?,0);" };
+
+ for (int i = 0; i < V80_ROW_COUNT; i++) {
+ sql::Statement s(connection->GetUniqueStatement(insert_stmts[i]));
+ std::string ord = V81_Ordinal(i);
+ s.BindBlob(0, ord.data(), ord.length());
+ ASSERT_TRUE(s.Run());
+ s.Reset(true);
+ }
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
+void MigrationTest::SetUpVersion85Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',85);"
+ "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, "
+ "transaction_version BIGINT default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605', 1);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
+ "_version bigint default -1,server_version bigint default 0, "
+ "local_external_id bigint default 0"
+ ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
+ "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
+ "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
+ "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
+ "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
+ "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
+ ", server_ordinal_in_parent blob, transaction_version bigint default "
+ "0);"
+ "CREATE TABLE 'deleted_metas'"
+ "(metahandle bigint primary key ON CONFLICT FAIL,base"
+ "_version bigint default -1,server_version bigint default 0, "
+ "local_external_id bigint default 0"
+ ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
+ "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
+ "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
+ "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
+ "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
+ "har,specifics blob,server_specifics blob, base_server_specifics BLOB"
", server_ordinal_in_parent blob, transaction_verion bigint default 0"
");"
"CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
@@ -2632,6 +2879,19 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion83To84) {
ASSERT_TRUE(connection.DoesTableExist("deleted_metas"));
}
+TEST_F(DirectoryBackingStoreTest, MigrateVersion84To85) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion84Database(&connection);
+ ASSERT_TRUE(connection.DoesColumnExist("models", "initial_sync_ended"));
+
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_TRUE(dbs->MigrateVersion84To85());
+ ASSERT_EQ(85, dbs->GetVersion());
+ ASSERT_FALSE(connection.DoesColumnExist("models", "initial_sync_ended"));
+}
+
TEST_P(MigrationTest, ToCurrentVersion) {
sql::Connection connection;
ASSERT_TRUE(connection.OpenInMemory());
@@ -2687,6 +2947,9 @@ TEST_P(MigrationTest, ToCurrentVersion) {
case 83:
SetUpVersion83Database(&connection);
break;
+ case 84:
+ SetUpVersion84Database(&connection);
+ break;
default:
// If you see this error, it may mean that you've increased the
// database version number but you haven't finished adding unit tests
@@ -2772,6 +3035,12 @@ TEST_P(MigrationTest, ToCurrentVersion) {
// Column added in version 83.
ASSERT_TRUE(connection.DoesColumnExist("metas", "transaction_version"));
+ // Table added in version 84.
+ ASSERT_TRUE(connection.DoesTableExist("deleted_metas"));
+
+ // Column removed in version 85.
+ ASSERT_FALSE(connection.DoesColumnExist("models", "initial_sync_ended"));
+
// Check download_progress state (v75 migration)
ASSERT_EQ(694,
dir_info.kernel_info.download_progress[BOOKMARKS]
diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc
index 2a089ac..077afcd 100644
--- a/sync/syncable/syncable_unittest.cc
+++ b/sync/syncable/syncable_unittest.cc
@@ -460,9 +460,9 @@ class SyncableDirectoryTest : public testing::Test {
ReadTransaction trans(FROM_HERE, dir_.get());
MetahandleSet all_set;
dir_->GetAllMetaHandles(&trans, &all_set);
- EXPECT_EQ(3U, all_set.size());
+ EXPECT_EQ(4U, all_set.size());
if (before_reload)
- EXPECT_EQ(4U, dir_->kernel_->metahandles_to_purge->size());
+ EXPECT_EQ(6U, dir_->kernel_->metahandles_to_purge->size());
for (MetahandleSet::iterator iter = all_set.begin();
iter != all_set.end(); ++iter) {
Entry e(&trans, GET_BY_HANDLE, *iter);
@@ -481,10 +481,10 @@ class SyncableDirectoryTest : public testing::Test {
for (ModelTypeSet::Iterator it = types_to_purge.First();
it.Good(); it.Inc()) {
- EXPECT_FALSE(dir_->initial_sync_ended_for_type(it.Get()));
+ EXPECT_FALSE(dir_->InitialSyncEndedForType(it.Get()));
}
EXPECT_FALSE(types_to_purge.Has(BOOKMARKS));
- EXPECT_TRUE(dir_->initial_sync_ended_for_type(BOOKMARKS));
+ EXPECT_TRUE(dir_->InitialSyncEndedForType(BOOKMARKS));
}
FakeEncryptor encryptor_;
@@ -1550,9 +1550,6 @@ TEST_F(OnDiskSyncableDirectoryTest, TestPurgeEntriesWithTypeIn) {
AddDefaultFieldValue(BOOKMARKS, &bookmark_specs);
AddDefaultFieldValue(PREFERENCES, &preference_specs);
AddDefaultFieldValue(AUTOFILL, &autofill_specs);
- dir_->set_initial_sync_ended_for_type(BOOKMARKS, true);
- dir_->set_initial_sync_ended_for_type(PREFERENCES, true);
- dir_->set_initial_sync_ended_for_type(AUTOFILL, true);
ModelTypeSet types_to_purge(PREFERENCES, AUTOFILL);
@@ -1560,6 +1557,15 @@ TEST_F(OnDiskSyncableDirectoryTest, TestPurgeEntriesWithTypeIn) {
// Create some items for each type.
{
WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
+
+ // Make it look like these types have completed initial sync.
+ CreateTypeRoot(&trans, dir_.get(), BOOKMARKS);
+ CreateTypeRoot(&trans, dir_.get(), PREFERENCES);
+ CreateTypeRoot(&trans, dir_.get(), AUTOFILL);
+
+ // Add more nodes for this type. Technically, they should be placed under
+ // the proper type root nodes but the assertions in this test won't notice
+ // if their parent isn't quite right.
MutableEntry item1(&trans, CREATE, trans.root_id(), "Item");
ASSERT_TRUE(item1.good());
item1.Put(SPECIFICS, bookmark_specs);
@@ -1602,7 +1608,7 @@ TEST_F(OnDiskSyncableDirectoryTest, TestPurgeEntriesWithTypeIn) {
ReadTransaction trans(FROM_HERE, dir_.get());
MetahandleSet all_set;
GetAllMetaHandles(&trans, &all_set);
- ASSERT_EQ(7U, all_set.size());
+ ASSERT_EQ(10U, all_set.size());
}
dir_->PurgeEntriesWithTypeIn(types_to_purge);
@@ -1615,7 +1621,6 @@ TEST_F(OnDiskSyncableDirectoryTest, TestPurgeEntriesWithTypeIn) {
}
TEST_F(OnDiskSyncableDirectoryTest, TestShareInfo) {
- dir_->set_initial_sync_ended_for_type(AUTOFILL, true);
dir_->set_store_birthday("Jan 31st");
dir_->SetNotificationState("notification_state");
const char* const bag_of_chips_array = "\0bag of chips";
@@ -1624,8 +1629,6 @@ TEST_F(OnDiskSyncableDirectoryTest, TestShareInfo) {
dir_->set_bag_of_chips(bag_of_chips_string);
{
ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
- EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
EXPECT_EQ("Jan 31st", dir_->store_birthday());
EXPECT_EQ("notification_state", dir_->GetNotificationState());
EXPECT_EQ(bag_of_chips_string, dir_->bag_of_chips());
@@ -1639,8 +1642,6 @@ TEST_F(OnDiskSyncableDirectoryTest, TestShareInfo) {
dir_->SaveChanges();
{
ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
- EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
EXPECT_EQ("April 10th", dir_->store_birthday());
EXPECT_EQ("notification_state2", dir_->GetNotificationState());
EXPECT_EQ(bag_of_chips2_string, dir_->bag_of_chips());
@@ -1650,8 +1651,6 @@ TEST_F(OnDiskSyncableDirectoryTest, TestShareInfo) {
SaveAndReloadDir();
{
ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
- EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
EXPECT_EQ("April 10th", dir_->store_birthday());
EXPECT_EQ("notification_state2", dir_->GetNotificationState());
EXPECT_EQ(bag_of_chips2_string, dir_->bag_of_chips());
diff --git a/sync/test/engine/test_syncable_utils.cc b/sync/test/engine/test_syncable_utils.cc
index 3873420..e5ff285 100644
--- a/sync/test/engine/test_syncable_utils.cc
+++ b/sync/test/engine/test_syncable_utils.cc
@@ -9,6 +9,9 @@
#include "sync/syncable/base_transaction.h"
#include "sync/syncable/directory.h"
#include "sync/syncable/entry.h"
+#include "sync/syncable/mutable_entry.h"
+#include "sync/syncable/write_transaction.h"
+#include "sync/test/engine/test_id_factory.h"
using std::string;
@@ -62,5 +65,29 @@ Id GetOnlyEntryWithName(BaseTransaction* rtrans,
return GetFirstEntryWithName(rtrans, parent_id, name);
}
+void CreateTypeRoot(WriteTransaction* trans,
+ syncable::Directory *dir,
+ ModelType type) {
+ std::string tag_name = syncer::ModelTypeToRootTag(type);
+ syncable::MutableEntry node(trans,
+ syncable::CREATE,
+ TestIdFactory::root(),
+ tag_name);
+ DCHECK(node.good());
+ node.Put(syncable::UNIQUE_SERVER_TAG, tag_name);
+ node.Put(syncable::IS_DIR, true);
+ node.Put(syncable::SERVER_IS_DIR, false);
+ node.Put(syncable::IS_UNSYNCED, false);
+ node.Put(syncable::IS_UNAPPLIED_UPDATE, false);
+ node.Put(syncable::SERVER_VERSION, 20);
+ node.Put(syncable::BASE_VERSION, 20);
+ node.Put(syncable::IS_DEL, false);
+ node.Put(syncable::ID, syncer::TestIdFactory::MakeServer(tag_name));
+ sync_pb::EntitySpecifics specifics;
+ syncer::AddDefaultFieldValue(type, &specifics);
+ node.Put(syncable::SERVER_SPECIFICS, specifics);
+ node.Put(syncable::SPECIFICS, specifics);
+}
+
} // namespace syncable
} // namespace syncer
diff --git a/sync/test/engine/test_syncable_utils.h b/sync/test/engine/test_syncable_utils.h
index 5076ebb..7f162f7 100644
--- a/sync/test/engine/test_syncable_utils.h
+++ b/sync/test/engine/test_syncable_utils.h
@@ -10,11 +10,15 @@
#include <string>
+#include "sync/internal_api/public/base/model_type.h"
+
namespace syncer {
namespace syncable {
class BaseTransaction;
+class Directory;
class Id;
+class WriteTransaction;
// Count the number of entries with a given name inside of a parent.
// Useful to check folder structure and for porting older tests that
@@ -34,6 +38,10 @@ Id GetOnlyEntryWithName(BaseTransaction* rtrans,
const syncable::Id& parent_id,
const std::string& name);
+void CreateTypeRoot(WriteTransaction* trans,
+ syncable::Directory *dir,
+ ModelType type);
+
} // namespace syncable
} // namespace syncer
diff --git a/sync/test/test_directory_backing_store.h b/sync/test/test_directory_backing_store.h
index d58e7b2..033e99e 100644
--- a/sync/test/test_directory_backing_store.h
+++ b/sync/test/test_directory_backing_store.h
@@ -46,6 +46,7 @@ class TestDirectoryBackingStore : public DirectoryBackingStore {
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion81To82);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion82To83);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion83To84);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion84To85);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DetectInvalidOrdinal);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, ModelTypeIds);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, Corruption);