diff options
author | zork@chromium.org <zork@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-03-26 00:36:43 +0000 |
---|---|---|
committer | zork@chromium.org <zork@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-03-26 00:36:43 +0000 |
commit | f2b02f0659ec13b12156b11defba7f7ec816e0ca (patch) | |
tree | 16128097dbc378e7fde0f15836349bd58ae896f7 /chrome | |
parent | 9db5f4e61b9bdfb4b60804f6465d44c6771d073d (diff) | |
download | chromium_src-f2b02f0659ec13b12156b11defba7f7ec816e0ca.zip chromium_src-f2b02f0659ec13b12156b11defba7f7ec816e0ca.tar.gz chromium_src-f2b02f0659ec13b12156b11defba7f7ec816e0ca.tar.bz2 |
Scope the WriteTransactions during model association so that we don't lock the UI thread
BUG=34206
TEST=none
Review URL: http://codereview.chromium.org/1361002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42708 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome')
3 files changed, 153 insertions, 136 deletions
diff --git a/chrome/browser/sync/glue/autofill_model_associator.cc b/chrome/browser/sync/glue/autofill_model_associator.cc index 57d1bae..06d3427 100644 --- a/chrome/browser/sync/glue/autofill_model_associator.cc +++ b/chrome/browser/sync/glue/autofill_model_associator.cc @@ -43,93 +43,100 @@ bool AutofillModelAssociator::AssociateModels() { return false; } - sync_api::WriteTransaction trans( - sync_service_->backend()->GetUserShareHandle()); - sync_api::ReadNode autofill_root(&trans); - if (!autofill_root.InitByTagLookup(kAutofillTag)) { - error_handler_->OnUnrecoverableError(); - LOG(ERROR) << "Server did not create the top-level autofill node. We " - << "might be running against an out-of-date server."; - return false; - } - std::set<AutofillKey> current_entries; std::vector<AutofillEntry> new_entries; - for (std::vector<AutofillEntry>::iterator ix = entries.begin(); - ix != entries.end(); ++ix) { - if (id_map_.find(ix->key()) != id_map_.end()) { - // It seems that name/value pairs are not unique in the web database. - // As a result, we have to filter out duplicates here. This is probably - // a bug in the database. - continue; + { + sync_api::WriteTransaction trans( + sync_service_->backend()->GetUserShareHandle()); + sync_api::ReadNode autofill_root(&trans); + if (!autofill_root.InitByTagLookup(kAutofillTag)) { + error_handler_->OnUnrecoverableError(); + LOG(ERROR) << "Server did not create the top-level autofill node. We " + << "might be running against an out-of-date server."; + return false; } - std::string tag = KeyToTag(ix->key().name(), ix->key().value()); - - sync_api::ReadNode node(&trans); - if (node.InitByClientTagLookup(syncable::AUTOFILL, tag)) { - const sync_pb::AutofillSpecifics& autofill(node.GetAutofillSpecifics()); - DCHECK_EQ(tag, KeyToTag(UTF8ToUTF16(autofill.name()), - UTF8ToUTF16(autofill.value()))); + for (std::vector<AutofillEntry>::iterator ix = entries.begin(); + ix != entries.end(); ++ix) { + if (id_map_.find(ix->key()) != id_map_.end()) { + // It seems that name/value pairs are not unique in the web database. + // As a result, we have to filter out duplicates here. This is probably + // a bug in the database. + continue; + } - std::vector<base::Time> timestamps; - if (MergeTimestamps(autofill, ix->timestamps(), ×tamps)) { - AutofillEntry new_entry(ix->key(), timestamps); - new_entries.push_back(new_entry); + std::string tag = KeyToTag(ix->key().name(), ix->key().value()); + + sync_api::ReadNode node(&trans); + if (node.InitByClientTagLookup(syncable::AUTOFILL, tag)) { + const sync_pb::AutofillSpecifics& autofill(node.GetAutofillSpecifics()); + DCHECK_EQ(tag, KeyToTag(UTF8ToUTF16(autofill.name()), + UTF8ToUTF16(autofill.value()))); + + std::vector<base::Time> timestamps; + if (MergeTimestamps(autofill, ix->timestamps(), ×tamps)) { + AutofillEntry new_entry(ix->key(), timestamps); + new_entries.push_back(new_entry); + + sync_api::WriteNode write_node(&trans); + if (!write_node.InitByClientTagLookup(syncable::AUTOFILL, tag)) { + LOG(ERROR) << "Failed to write autofill sync node."; + return false; + } + AutofillChangeProcessor::WriteAutofill(&write_node, new_entry); + } - sync_api::WriteNode write_node(&trans); - if (!write_node.InitByClientTagLookup(syncable::AUTOFILL, tag)) { - LOG(ERROR) << "Failed to write autofill sync node."; + Associate(&(ix->key()), node.GetId()); + } else { + sync_api::WriteNode node(&trans); + if (!node.InitUniqueByCreation(syncable::AUTOFILL, + autofill_root, tag)) { + LOG(ERROR) << "Failed to create autofill sync node."; error_handler_->OnUnrecoverableError(); return false; } - AutofillChangeProcessor::WriteAutofill(&write_node, new_entry); + node.SetTitle(UTF16ToWide(ix->key().name() + ix->key().value())); + AutofillChangeProcessor::WriteAutofill(&node, *ix); + Associate(&(ix->key()), node.GetId()); } - Associate(&(ix->key()), node.GetId()); - } else { - sync_api::WriteNode node(&trans); - if (!node.InitUniqueByCreation(syncable::AUTOFILL, autofill_root, tag)) { - LOG(ERROR) << "Failed to create autofill sync node."; + current_entries.insert(ix->key()); + } + + int64 sync_child_id = autofill_root.GetFirstChildId(); + while (sync_child_id != sync_api::kInvalidId) { + sync_api::ReadNode sync_child_node(&trans); + if (!sync_child_node.InitByIdLookup(sync_child_id)) { + LOG(ERROR) << "Failed to fetch child node."; error_handler_->OnUnrecoverableError(); return false; } - node.SetTitle(UTF16ToWide(ix->key().name() + ix->key().value())); - AutofillChangeProcessor::WriteAutofill(&node, *ix); - Associate(&(ix->key()), node.GetId()); - } - - current_entries.insert(ix->key()); - } - - int64 sync_child_id = autofill_root.GetFirstChildId(); - while (sync_child_id != sync_api::kInvalidId) { - sync_api::ReadNode sync_child_node(&trans); - if (!sync_child_node.InitByIdLookup(sync_child_id)) { - LOG(ERROR) << "Failed to fetch child node."; - error_handler_->OnUnrecoverableError(); - return false; - } - const sync_pb::AutofillSpecifics& autofill( - sync_child_node.GetAutofillSpecifics()); - AutofillKey key(UTF8ToUTF16(autofill.name()), - UTF8ToUTF16(autofill.value())); - - if (current_entries.find(key) == current_entries.end()) { - std::vector<base::Time> timestamps; - int timestamps_count = autofill.usage_timestamp_size(); - for (int c = 0; c < timestamps_count; ++c) { - timestamps.push_back(base::Time::FromInternalValue( - autofill.usage_timestamp(c))); + const sync_pb::AutofillSpecifics& autofill( + sync_child_node.GetAutofillSpecifics()); + AutofillKey key(UTF8ToUTF16(autofill.name()), + UTF8ToUTF16(autofill.value())); + + if (current_entries.find(key) == current_entries.end()) { + std::vector<base::Time> timestamps; + int timestamps_count = autofill.usage_timestamp_size(); + for (int c = 0; c < timestamps_count; ++c) { + timestamps.push_back(base::Time::FromInternalValue( + autofill.usage_timestamp(c))); + } + Associate(&key, sync_child_node.GetId()); + new_entries.push_back(AutofillEntry(key, timestamps)); } - Associate(&key, sync_child_node.GetId()); - new_entries.push_back(AutofillEntry(key, timestamps)); - } - sync_child_id = sync_child_node.GetSuccessorId(); + sync_child_id = sync_child_node.GetSuccessorId(); + } } + // Since we're on the DB thread, we don't have to worry about updating + // the autofill database after closing the write transaction, since + // this is the only thread that writes to the database. We also don't have + // to worry about the sync model getting out of sync, because changes are + // propogated to the ChangeProcessor on this thread. if (new_entries.size() && !web_database_->UpdateAutofillEntries(new_entries)) { LOG(ERROR) << "Failed to update autofill entries."; diff --git a/chrome/browser/sync/glue/typed_url_change_processor.cc b/chrome/browser/sync/glue/typed_url_change_processor.cc index 709c5e6..4701b3a 100644 --- a/chrome/browser/sync/glue/typed_url_change_processor.cc +++ b/chrome/browser/sync/glue/typed_url_change_processor.cc @@ -62,6 +62,9 @@ void TypedUrlChangeProcessor::HandleURLsModified( history::URLsModifiedDetails* details) {
sync_api::WriteTransaction trans(share_handle());
+ // TODO(sync): Get visits without holding the write transaction.
+ // See issue 34206
+
sync_api::ReadNode typed_url_root(&trans);
if (!typed_url_root.InitByTagLookup(kTypedUrlTag)) {
error_handler()->OnUnrecoverableError();
diff --git a/chrome/browser/sync/glue/typed_url_model_associator.cc b/chrome/browser/sync/glue/typed_url_model_associator.cc index 080a598..1415ffe 100644 --- a/chrome/browser/sync/glue/typed_url_model_associator.cc +++ b/chrome/browser/sync/glue/typed_url_model_associator.cc @@ -42,94 +42,101 @@ bool TypedUrlModelAssociator::AssociateModels() { return false;
}
- sync_api::WriteTransaction trans(
- sync_service_->backend()->GetUserShareHandle());
- sync_api::ReadNode typed_url_root(&trans);
- if (!typed_url_root.InitByTagLookup(kTypedUrlTag)) {
- LOG(ERROR) << "Server did not create the top-level typed_url node. We "
- << "might be running against an out-of-date server.";
- return false;
- }
-
- std::set<std::string> current_urls;
TypedUrlTitleVector titles;
TypedUrlVector new_urls;
TypedUrlUpdateVector updated_urls;
- for (std::vector<history::URLRow>::iterator ix = typed_urls.begin();
- ix != typed_urls.end(); ++ix) {
- std::string tag = ix->url().spec();
-
- sync_api::ReadNode node(&trans);
- if (node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) {
- const sync_pb::TypedUrlSpecifics& typed_url(node.GetTypedUrlSpecifics());
- DCHECK_EQ(tag, typed_url.url());
+ {
+ sync_api::WriteTransaction trans(
+ sync_service_->backend()->GetUserShareHandle());
+ sync_api::ReadNode typed_url_root(&trans);
+ if (!typed_url_root.InitByTagLookup(kTypedUrlTag)) {
+ LOG(ERROR) << "Server did not create the top-level typed_url node. We "
+ << "might be running against an out-of-date server.";
+ return false;
+ }
- history::URLRow new_url(ix->url());
+ std::set<std::string> current_urls;
+ for (std::vector<history::URLRow>::iterator ix = typed_urls.begin();
+ ix != typed_urls.end(); ++ix) {
+ std::string tag = ix->url().spec();
+
+ sync_api::ReadNode node(&trans);
+ if (node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) {
+ const sync_pb::TypedUrlSpecifics& typed_url(node.GetTypedUrlSpecifics());
+ DCHECK_EQ(tag, typed_url.url());
+
+ history::URLRow new_url(ix->url());
+
+ int difference = MergeUrls(typed_url, *ix, &new_url);
+ if (difference & DIFF_NODE_CHANGED) {
+ sync_api::WriteNode write_node(&trans);
+ if (!write_node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) {
+ LOG(ERROR) << "Failed to edit typed_url sync node.";
+ return false;
+ }
+ WriteToSyncNode(new_url, &write_node);
+ }
+ if (difference & DIFF_TITLE_CHANGED) {
+ titles.push_back(std::pair<GURL, std::wstring>(new_url.url(),
+ new_url.title()));
+ }
+ if (difference & DIFF_ROW_CHANGED) {
+ updated_urls.push_back(
+ std::pair<history::URLID, history::URLRow>(ix->id(), new_url));
+ }
- int difference = MergeUrls(typed_url, *ix, &new_url);
- if (difference & DIFF_NODE_CHANGED) {
- sync_api::WriteNode write_node(&trans);
- if (!write_node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) {
- LOG(ERROR) << "Failed to edit typed_url sync node.";
+ Associate(&tag, node.GetId());
+ } else {
+ sync_api::WriteNode node(&trans);
+ if (!node.InitUniqueByCreation(syncable::TYPED_URLS,
+ typed_url_root, tag)) {
+ LOG(ERROR) << "Failed to create typed_url sync node.";
return false;
}
- WriteToSyncNode(new_url, &write_node);
- }
- if (difference & DIFF_TITLE_CHANGED) {
- titles.push_back(std::pair<GURL, std::wstring>(new_url.url(),
- new_url.title()));
- }
- if (difference & DIFF_ROW_CHANGED) {
- updated_urls.push_back(
- std::pair<history::URLID, history::URLRow>(ix->id(), new_url));
- }
- Associate(&tag, node.GetId());
- } else {
- sync_api::WriteNode node(&trans);
- if (!node.InitUniqueByCreation(syncable::TYPED_URLS,
- typed_url_root, tag)) {
- LOG(ERROR) << "Failed to create typed_url sync node.";
- return false;
- }
+ node.SetTitle(UTF8ToWide(tag));
+ WriteToSyncNode(*ix, &node);
- node.SetTitle(UTF8ToWide(tag));
- WriteToSyncNode(*ix, &node);
+ Associate(&tag, node.GetId());
+ }
- Associate(&tag, node.GetId());
+ current_urls.insert(tag);
}
- current_urls.insert(tag);
- }
+ int64 sync_child_id = typed_url_root.GetFirstChildId();
+ while (sync_child_id != sync_api::kInvalidId) {
+ sync_api::ReadNode sync_child_node(&trans);
+ if (!sync_child_node.InitByIdLookup(sync_child_id)) {
+ LOG(ERROR) << "Failed to fetch child node.";
+ return false;
+ }
+ const sync_pb::TypedUrlSpecifics& typed_url(
+ sync_child_node.GetTypedUrlSpecifics());
- int64 sync_child_id = typed_url_root.GetFirstChildId();
- while (sync_child_id != sync_api::kInvalidId) {
- sync_api::ReadNode sync_child_node(&trans);
- if (!sync_child_node.InitByIdLookup(sync_child_id)) {
- LOG(ERROR) << "Failed to fetch child node.";
- return false;
- }
- const sync_pb::TypedUrlSpecifics& typed_url(
- sync_child_node.GetTypedUrlSpecifics());
+ if (current_urls.find(typed_url.url()) == current_urls.end()) {
+ history::URLRow new_url(GURL(typed_url.url()));
- if (current_urls.find(typed_url.url()) == current_urls.end()) {
- history::URLRow new_url(GURL(typed_url.url()));
+ new_url.set_title(UTF8ToWide(typed_url.title()));
+ new_url.set_visit_count(typed_url.visit_count());
+ new_url.set_typed_count(typed_url.typed_count());
+ new_url.set_last_visit(
+ base::Time::FromInternalValue(typed_url.last_visit()));
+ new_url.set_hidden(typed_url.hidden());
- new_url.set_title(UTF8ToWide(typed_url.title()));
- new_url.set_visit_count(typed_url.visit_count());
- new_url.set_typed_count(typed_url.typed_count());
- new_url.set_last_visit(
- base::Time::FromInternalValue(typed_url.last_visit()));
- new_url.set_hidden(typed_url.hidden());
+ Associate(&typed_url.url(), sync_child_node.GetId());
+ new_urls.push_back(new_url);
+ }
- Associate(&typed_url.url(), sync_child_node.GetId());
- new_urls.push_back(new_url);
+ sync_child_id = sync_child_node.GetSuccessorId();
}
-
- sync_child_id = sync_child_node.GetSuccessorId();
}
+ // Since we're on the history thread, we don't have to worry about updating + // the history database after closing the write transaction, since + // this is the only thread that writes to the database. We also don't have + // to worry about the sync model getting out of sync, because changes are + // propogated to the ChangeProcessor on this thread. WriteToHistoryBackend(&titles, &new_urls, &updated_urls);
return true;
|