summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorshess@chromium.org <shess@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-02 23:30:43 +0000
committershess@chromium.org <shess@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-02 23:30:43 +0000
commitb57c07779e9f1e6af2d45d89102d8772d6b72819 (patch)
tree0ea75672b21996dff5330515ac2f04a7a59f60e2
parent44e6e916d72360059c8ea044a3a11f76211196d5 (diff)
downloadchromium_src-b57c07779e9f1e6af2d45d89102d8772d6b72819.zip
chromium_src-b57c07779e9f1e6af2d45d89102d8772d6b72819.tar.gz
chromium_src-b57c07779e9f1e6af2d45d89102d8772d6b72819.tar.bz2
Final fixes for new safe-browsing storage.
In safe_browsing_database.cc, when inserting subs which have no prefixes, calculate the add-chunk based on the entry's chunk, not the sub chunk being processed. This matches the old code. Revise safe_browsing_store* to trim deleted chunks as part of processing rather than as part of reading. This means that more data will be in memory, but it follows the old code, which allowed subs to cancel adds before the add's chunk was deleted. Without this, those subs will stay around until the sub chunk is deleted (because their adds are not seen). In safe_browsing_store_sqlite.cc, the previous code attempted to trick SQLite into writing data efficiently by locking down the old tables while writing the new ones. Due to our use of SQLITE_SECURE_DELETE, this actually resulted in bigger files and more overall I/O. Modify things to simply delete the data, instead. BUG=28647 TEST=none Review URL: http://codereview.chromium.org/3244012 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@58426 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/browser/safe_browsing/safe_browsing_database.cc2
-rw-r--r--chrome/browser/safe_browsing/safe_browsing_store.cc29
-rw-r--r--chrome/browser/safe_browsing/safe_browsing_store.h8
-rw-r--r--chrome/browser/safe_browsing/safe_browsing_store_file.cc79
-rw-r--r--chrome/browser/safe_browsing/safe_browsing_store_sqlite.cc65
-rw-r--r--chrome/browser/safe_browsing/safe_browsing_store_sqlite.h5
-rw-r--r--chrome/browser/safe_browsing/safe_browsing_store_unittest.cc92
7 files changed, 164 insertions, 116 deletions
diff --git a/chrome/browser/safe_browsing/safe_browsing_database.cc b/chrome/browser/safe_browsing/safe_browsing_database.cc
index f7e2bb3..76b40db 100644
--- a/chrome/browser/safe_browsing/safe_browsing_database.cc
+++ b/chrome/browser/safe_browsing/safe_browsing_database.cc
@@ -373,7 +373,7 @@ void SafeBrowsingDatabaseNew::InsertSub(int chunk_id, SBPrefix host,
if (!count) {
// No prefixes, use host instead.
STATS_COUNTER("SB.PrefixSub", 1);
- const int add_chunk_id = EncodeChunkId(chunk_id, list_id);
+ const int add_chunk_id = EncodeChunkId(entry->chunk_id(), list_id);
store_->WriteSubPrefix(encoded_chunk_id, add_chunk_id, host);
} else if (entry->IsPrefix()) {
// Prefixes only.
diff --git a/chrome/browser/safe_browsing/safe_browsing_store.cc b/chrome/browser/safe_browsing/safe_browsing_store.cc
index b90e98d..ea5d4f5 100644
--- a/chrome/browser/safe_browsing/safe_browsing_store.cc
+++ b/chrome/browser/safe_browsing/safe_browsing_store.cc
@@ -102,12 +102,31 @@ void RemoveMatchingPrefixes(const std::vector<SBAddPrefix>& removes,
full_hashes->erase(out, hash_iter);
}
+// Remove deleted items (|chunk_id| in |del_set|) from the vector.
+template <class T>
+void RemoveDeleted(std::vector<T>* vec, const base::hash_set<int32>& del_set) {
+ DCHECK(vec);
+
+ // Scan through the items read, dropping the items in |del_set|.
+ typename std::vector<T>::iterator add_iter = vec->begin();
+ for (typename std::vector<T>::iterator iter = add_iter;
+ iter != vec->end(); ++iter) {
+ if (del_set.count(iter->chunk_id) == 0) {
+ *add_iter = *iter;
+ ++add_iter;
+ }
+ }
+ vec->erase(add_iter, vec->end());
+}
+
} // namespace
void SBProcessSubs(std::vector<SBAddPrefix>* add_prefixes,
std::vector<SBSubPrefix>* sub_prefixes,
std::vector<SBAddFullHash>* add_full_hashes,
- std::vector<SBSubFullHash>* sub_full_hashes) {
+ std::vector<SBSubFullHash>* sub_full_hashes,
+ const base::hash_set<int32>& add_chunks_deleted,
+ const base::hash_set<int32>& sub_chunks_deleted) {
// It is possible to structure templates and template
// specializations such that the following calls work without having
// to qualify things. It becomes very arbitrary, though, and less
@@ -152,4 +171,12 @@ void SBProcessSubs(std::vector<SBAddPrefix>* add_prefixes,
SBAddPrefixHashLess<SBSubFullHash,SBAddFullHash>,
&removed_full_adds);
}
+
+ // Remove items from the deleted chunks. This is done after other
+ // processing to allow subs to knock out adds (and be removed) even
+ // if the add's chunk is deleted.
+ RemoveDeleted(add_prefixes, add_chunks_deleted);
+ RemoveDeleted(sub_prefixes, sub_chunks_deleted);
+ RemoveDeleted(add_full_hashes, add_chunks_deleted);
+ RemoveDeleted(sub_full_hashes, sub_chunks_deleted);
}
diff --git a/chrome/browser/safe_browsing/safe_browsing_store.h b/chrome/browser/safe_browsing/safe_browsing_store.h
index 92d6e93..b6b44bd 100644
--- a/chrome/browser/safe_browsing/safe_browsing_store.h
+++ b/chrome/browser/safe_browsing/safe_browsing_store.h
@@ -10,6 +10,7 @@
#include "base/basictypes.h"
#include "base/callback.h"
+#include "base/hash_tables.h"
#include "base/task.h"
#include "base/time.h"
#include "chrome/browser/safe_browsing/safe_browsing_util.h"
@@ -124,7 +125,8 @@ bool SBAddPrefixHashLess(const T& a, const U& b) {
// Process the lists for subs which knock out adds. For any item in
// |sub_prefixes| which has a match in |add_prefixes|, knock out the
-// matched items from all vectors.
+// matched items from all vectors. Additionally remove items from
+// deleted chunks.
//
// TODO(shess): Since the prefixes are uniformly-distributed hashes,
// there aren't many ways to organize the inputs for efficient
@@ -139,7 +141,9 @@ bool SBAddPrefixHashLess(const T& a, const U& b) {
void SBProcessSubs(std::vector<SBAddPrefix>* add_prefixes,
std::vector<SBSubPrefix>* sub_prefixes,
std::vector<SBAddFullHash>* add_full_hashes,
- std::vector<SBSubFullHash>* sub_full_hashes);
+ std::vector<SBSubFullHash>* sub_full_hashes,
+ const base::hash_set<int32>& add_chunks_deleted,
+ const base::hash_set<int32>& sub_chunks_deleted);
// TODO(shess): This uses int32 rather than int because it's writing
// specifically-sized items to files. SBPrefix should likewise be
diff --git a/chrome/browser/safe_browsing/safe_browsing_store_file.cc b/chrome/browser/safe_browsing/safe_browsing_store_file.cc
index 207a607..31a4337 100644
--- a/chrome/browser/safe_browsing/safe_browsing_store_file.cc
+++ b/chrome/browser/safe_browsing/safe_browsing_store_file.cc
@@ -108,39 +108,6 @@ bool WriteVector(const std::vector<T>& values, FILE* fp, MD5Context* context) {
return WriteArray(ptr, values.size(), fp, context);
}
-// Remove deleted items (|chunk_id| in |del_set|) from the vector
-// starting at |offset| running to |end()|.
-template <class T>
-void RemoveDeleted(std::vector<T>* vec, size_t offset,
- const base::hash_set<int32>& del_set) {
- DCHECK(vec);
-
- // Scan through the items read, dropping the items in |del_set|.
- typename std::vector<T>::iterator add_iter = vec->begin() + offset;
- for (typename std::vector<T>::iterator iter = add_iter;
- iter != vec->end(); ++iter) {
- if (del_set.count(iter->chunk_id) == 0) {
- *add_iter = *iter;
- ++add_iter;
- }
- }
- vec->erase(add_iter, vec->end());
-}
-
-// Combine |ReadToVector()| and |RemoveDeleted()|. Returns true on
-// success.
-template <class T>
-bool ReadToVectorAndDelete(std::vector<T>* values, size_t count,
- FILE* fp, MD5Context* context,
- const base::hash_set<int32>& del_set) {
- const size_t original_size = values->size();
- if (!ReadToVector(values, count, fp, context))
- return false;
-
- RemoveDeleted(values, original_size, del_set);
- return true;
-}
-
// Read an array of |count| integers and add them to |values|.
// Returns true on success.
bool ReadToChunkSet(std::set<int32>* values, size_t count,
@@ -431,14 +398,14 @@ bool SafeBrowsingStoreFile::DoUpdate(
file_.get(), &context))
return OnCorruptDatabase();
- if (!ReadToVectorAndDelete(&add_prefixes, header.add_prefix_count,
- file_.get(), &context, add_del_cache_) ||
- !ReadToVectorAndDelete(&sub_prefixes, header.sub_prefix_count,
- file_.get(), &context, sub_del_cache_) ||
- !ReadToVectorAndDelete(&add_full_hashes, header.add_hash_count,
- file_.get(), &context, add_del_cache_) ||
- !ReadToVectorAndDelete(&sub_full_hashes, header.sub_hash_count,
- file_.get(), &context, sub_del_cache_))
+ if (!ReadToVector(&add_prefixes, header.add_prefix_count,
+ file_.get(), &context) ||
+ !ReadToVector(&sub_prefixes, header.sub_prefix_count,
+ file_.get(), &context) ||
+ !ReadToVector(&add_full_hashes, header.add_hash_count,
+ file_.get(), &context) ||
+ !ReadToVector(&sub_full_hashes, header.sub_hash_count,
+ file_.get(), &context))
return OnCorruptDatabase();
// Calculate the digest to this point.
@@ -475,27 +442,25 @@ bool SafeBrowsingStoreFile::DoUpdate(
// some sort of recursive binary merge might be in order (merge
// chunks pairwise, merge those chunks pairwise, and so on, then
// merge the result with the main list).
- if (!ReadToVectorAndDelete(&add_prefixes, header.add_prefix_count,
- new_file_.get(), NULL, add_del_cache_) ||
- !ReadToVectorAndDelete(&sub_prefixes, header.sub_prefix_count,
- new_file_.get(), NULL, sub_del_cache_) ||
- !ReadToVectorAndDelete(&add_full_hashes, header.add_hash_count,
- new_file_.get(), NULL, add_del_cache_) ||
- !ReadToVectorAndDelete(&sub_full_hashes, header.sub_hash_count,
- new_file_.get(), NULL, sub_del_cache_))
+ if (!ReadToVector(&add_prefixes, header.add_prefix_count,
+ new_file_.get(), NULL) ||
+ !ReadToVector(&sub_prefixes, header.sub_prefix_count,
+ new_file_.get(), NULL) ||
+ !ReadToVector(&add_full_hashes, header.add_hash_count,
+ new_file_.get(), NULL) ||
+ !ReadToVector(&sub_full_hashes, header.sub_hash_count,
+ new_file_.get(), NULL))
return false;
}
- // Append items from |pending_adds| which haven't been deleted.
- for (std::vector<SBAddFullHash>::const_iterator iter = pending_adds.begin();
- iter != pending_adds.end(); ++iter) {
- if (add_del_cache_.count(iter->chunk_id) == 0)
- add_full_hashes.push_back(*iter);
- }
+ // Append items from |pending_adds|.
+ add_full_hashes.insert(add_full_hashes.end(),
+ pending_adds.begin(), pending_adds.end());
- // Knock the subs from the adds.
+ // Knock the subs from the adds and process deleted chunks.
SBProcessSubs(&add_prefixes, &sub_prefixes,
- &add_full_hashes, &sub_full_hashes);
+ &add_full_hashes, &sub_full_hashes,
+ add_del_cache_, sub_del_cache_);
// We no longer need to track deleted chunks.
DeleteChunksFromSet(add_del_cache_, &add_chunks_cache_);
diff --git a/chrome/browser/safe_browsing/safe_browsing_store_sqlite.cc b/chrome/browser/safe_browsing/safe_browsing_store_sqlite.cc
index 0ac8ec4..b312f15 100644
--- a/chrome/browser/safe_browsing/safe_browsing_store_sqlite.cc
+++ b/chrome/browser/safe_browsing/safe_browsing_store_sqlite.cc
@@ -350,9 +350,6 @@ bool SafeBrowsingStoreSqlite::ReadAddPrefixes(
int rv;
while ((rv = statement->step()) == SQLITE_ROW) {
const int32 chunk_id = statement->column_int(0);
- if (add_del_cache_.count(chunk_id) > 0)
- continue;
-
const SBPrefix prefix = statement->column_int(1);
add_prefixes->push_back(SBAddPrefix(chunk_id, prefix));
}
@@ -402,9 +399,6 @@ bool SafeBrowsingStoreSqlite::ReadSubPrefixes(
int rv;
while ((rv = statement->step()) == SQLITE_ROW) {
const int32 chunk_id = statement->column_int(0);
- if (sub_del_cache_.count(chunk_id) > 0)
- continue;
-
const int32 add_chunk_id = statement->column_int(1);
const SBPrefix add_prefix = statement->column_int(2);
sub_prefixes->push_back(SBSubPrefix(chunk_id, add_chunk_id, add_prefix));
@@ -456,9 +450,6 @@ bool SafeBrowsingStoreSqlite::ReadAddHashes(
int rv;
while ((rv = statement->step()) == SQLITE_ROW) {
const int32 chunk_id = statement->column_int(0);
- if (add_del_cache_.count(chunk_id) > 0)
- continue;
-
// NOTE: Legacy format duplicated |hash.prefix| in column 1.
const SBPrefix prefix = statement->column_int(1);
const int32 received = statement->column_int(2);
@@ -517,9 +508,6 @@ bool SafeBrowsingStoreSqlite::ReadSubHashes(
int rv;
while ((rv = statement->step()) == SQLITE_ROW) {
const int32 chunk_id = statement->column_int(0);
- if (sub_del_cache_.count(chunk_id) > 0)
- continue;
-
// NOTE: Legacy format duplicated |hash.prefix| in column 2.
const int32 add_chunk_id = statement->column_int(1);
const SBPrefix add_prefix = statement->column_int(2);
@@ -563,29 +551,15 @@ bool SafeBrowsingStoreSqlite::WriteSubHashes(
return true;
}
-bool SafeBrowsingStoreSqlite::RenameTables() {
+bool SafeBrowsingStoreSqlite::ResetTables() {
DCHECK(db_);
- if (!ExecSql("ALTER TABLE add_prefix RENAME TO add_prefix_old") ||
- !ExecSql("ALTER TABLE sub_prefix RENAME TO sub_prefix_old") ||
- !ExecSql("ALTER TABLE add_full_hash RENAME TO add_full_hash_old") ||
- !ExecSql("ALTER TABLE sub_full_hash RENAME TO sub_full_hash_old") ||
- !ExecSql("ALTER TABLE add_chunks RENAME TO add_chunks_old") ||
- !ExecSql("ALTER TABLE sub_chunks RENAME TO sub_chunks_old"))
- return false;
-
- return CreateTables();
-}
-
-bool SafeBrowsingStoreSqlite::DeleteOldTables() {
- DCHECK(db_);
-
- if (!ExecSql("DROP TABLE add_prefix_old") ||
- !ExecSql("DROP TABLE sub_prefix_old") ||
- !ExecSql("DROP TABLE add_full_hash_old") ||
- !ExecSql("DROP TABLE sub_full_hash_old") ||
- !ExecSql("DROP TABLE add_chunks_old") ||
- !ExecSql("DROP TABLE sub_chunks_old"))
+ if (!ExecSql("DELETE FROM add_prefix") ||
+ !ExecSql("DELETE FROM sub_prefix") ||
+ !ExecSql("DELETE FROM add_full_hash") ||
+ !ExecSql("DELETE FROM sub_full_hash") ||
+ !ExecSql("DELETE FROM add_chunks") ||
+ !ExecSql("DELETE FROM sub_chunks"))
return false;
return true;
@@ -627,23 +601,22 @@ bool SafeBrowsingStoreSqlite::DoUpdate(
!ReadSubHashes(&sub_full_hashes))
return false;
- // Add the pending adds which haven't since been deleted.
- for (std::vector<SBAddFullHash>::const_iterator iter = pending_adds.begin();
- iter != pending_adds.end(); ++iter) {
- if (add_del_cache_.count(iter->chunk_id) == 0)
- add_full_hashes.push_back(*iter);
- }
+ // Append items from |pending_adds|.
+ add_full_hashes.insert(add_full_hashes.end(),
+ pending_adds.begin(), pending_adds.end());
+ // Knock the subs from the adds and process deleted chunks.
SBProcessSubs(&add_prefixes, &sub_prefixes,
- &add_full_hashes, &sub_full_hashes);
-
- // Move the existing tables aside and prepare to write fresh tables.
- if (!RenameTables())
- return false;
+ &add_full_hashes, &sub_full_hashes,
+ add_del_cache_, sub_del_cache_);
DeleteChunksFromSet(add_del_cache_, &add_chunks_cache_);
DeleteChunksFromSet(sub_del_cache_, &sub_chunks_cache_);
+ // Clear the existing tables before rewriting them.
+ if (!ResetTables())
+ return false;
+
if (!WriteAddChunks() ||
!WriteSubChunks() ||
!WriteAddPrefixes(add_prefixes) ||
@@ -652,10 +625,6 @@ bool SafeBrowsingStoreSqlite::DoUpdate(
!WriteSubHashes(sub_full_hashes))
return false;
- // Delete the old tables.
- if (!DeleteOldTables())
- return false;
-
// Commit all the changes to the database.
int rv = insert_transaction_->Commit();
if (rv != SQLITE_OK) {
diff --git a/chrome/browser/safe_browsing/safe_browsing_store_sqlite.h b/chrome/browser/safe_browsing/safe_browsing_store_sqlite.h
index f32507f..6f684c1d 100644
--- a/chrome/browser/safe_browsing/safe_browsing_store_sqlite.h
+++ b/chrome/browser/safe_browsing/safe_browsing_store_sqlite.h
@@ -127,8 +127,9 @@ class SafeBrowsingStoreSqlite : public SafeBrowsingStore {
bool CheckCompatibleVersion();
bool CreateTables();
- bool RenameTables();
- bool DeleteOldTables();
+
+ // Clear the old safe-browsing data from the tables.
+ bool ResetTables();
// Read and write the chunks-seen data from |*_chunks_cache_|.
// Chunk deletions are not accounted for.
diff --git a/chrome/browser/safe_browsing/safe_browsing_store_unittest.cc b/chrome/browser/safe_browsing/safe_browsing_store_unittest.cc
index 76f0e91..4284625 100644
--- a/chrome/browser/safe_browsing/safe_browsing_store_unittest.cc
+++ b/chrome/browser/safe_browsing/safe_browsing_store_unittest.cc
@@ -109,20 +109,25 @@ TEST(SafeBrowsingStoreTest, SBSubFullHashLess) {
SBSubFullHash(9, 10, one)));
}
-TEST(SafeBrowsingStoreTest, SBProcessSubs) {
+// SBProcessSubs does a lot of iteration, run through empty just to
+// make sure degenerate cases work.
+TEST(SafeBrowsingStoreTest, SBProcessSubsEmpty) {
std::vector<SBAddPrefix> add_prefixes;
std::vector<SBAddFullHash> add_hashes;
std::vector<SBSubPrefix> sub_prefixes;
std::vector<SBSubFullHash> sub_hashes;
- // SBProcessSubs does a lot of iteration, run through empty just to
- // make sure degenerate cases work.
- SBProcessSubs(&add_prefixes, &sub_prefixes, &add_hashes, &sub_hashes);
+ const base::hash_set<int32> no_deletions;
+ SBProcessSubs(&add_prefixes, &sub_prefixes, &add_hashes, &sub_hashes,
+ no_deletions, no_deletions);
EXPECT_TRUE(add_prefixes.empty());
EXPECT_TRUE(sub_prefixes.empty());
EXPECT_TRUE(add_hashes.empty());
EXPECT_TRUE(sub_hashes.empty());
+}
+// Test that subs knock out adds.
+TEST(SafeBrowsingStoreTest, SBProcessSubsKnockout) {
const base::Time kNow = base::Time::Now();
const SBFullHash kHash1(SBFullHashFromString("one"));
const SBFullHash kHash2(SBFullHashFromString("two"));
@@ -138,6 +143,11 @@ TEST(SafeBrowsingStoreTest, SBProcessSubs) {
SBFullHash kHash1mod3 = kHash1mod2;
kHash1mod3.full_hash[sizeof(kHash1mod3.full_hash) - 1] ++;
+ std::vector<SBAddPrefix> add_prefixes;
+ std::vector<SBAddFullHash> add_hashes;
+ std::vector<SBSubPrefix> sub_prefixes;
+ std::vector<SBSubFullHash> sub_hashes;
+
// An add with prefix and a couple hashes, plus a sub for the prefix
// and a couple sub hashes. The sub should knock all of them out.
add_prefixes.push_back(SBAddPrefix(kAddChunk1, kHash1.prefix));
@@ -155,7 +165,9 @@ TEST(SafeBrowsingStoreTest, SBProcessSubs) {
sub_hashes.push_back(SBSubFullHash(kSubChunk1, kAddChunk1, kHash3));
sub_prefixes.push_back(SBSubPrefix(kSubChunk1, kAddChunk1, kHash3.prefix));
- SBProcessSubs(&add_prefixes, &sub_prefixes, &add_hashes, &sub_hashes);
+ const base::hash_set<int32> no_deletions;
+ SBProcessSubs(&add_prefixes, &sub_prefixes, &add_hashes, &sub_hashes,
+ no_deletions, no_deletions);
EXPECT_EQ(1U, add_prefixes.size());
EXPECT_EQ(kAddChunk1, add_prefixes[0].chunk_id);
@@ -176,6 +188,76 @@ TEST(SafeBrowsingStoreTest, SBProcessSubs) {
EXPECT_TRUE(SBFullHashEq(kHash3, sub_hashes[0].full_hash));
}
+// Test chunk deletions, and ordering of deletions WRT subs knocking
+// out adds.
+TEST(SafeBrowsingStoreTest, SBProcessSubsDeleteChunk) {
+ const base::Time kNow = base::Time::Now();
+ const SBFullHash kHash1(SBFullHashFromString("one"));
+ const SBFullHash kHash2(SBFullHashFromString("two"));
+ const SBFullHash kHash3(SBFullHashFromString("three"));
+ const int kAddChunk1 = 1; // Use different chunk numbers just in case.
+ const int kSubChunk1 = 2;
+
+ // Construct some full hashes which share prefix with another.
+ SBFullHash kHash1mod1 = kHash1;
+ kHash1mod1.full_hash[sizeof(kHash1mod1.full_hash) - 1] ++;
+ SBFullHash kHash1mod2 = kHash1mod1;
+ kHash1mod2.full_hash[sizeof(kHash1mod2.full_hash) - 1] ++;
+ SBFullHash kHash1mod3 = kHash1mod2;
+ kHash1mod3.full_hash[sizeof(kHash1mod3.full_hash) - 1] ++;
+
+ std::vector<SBAddPrefix> add_prefixes;
+ std::vector<SBAddFullHash> add_hashes;
+ std::vector<SBSubPrefix> sub_prefixes;
+ std::vector<SBSubFullHash> sub_hashes;
+
+ // An add with prefix and a couple hashes, plus a sub for the prefix
+ // and a couple sub hashes. The sub should knock all of them out.
+ add_prefixes.push_back(SBAddPrefix(kAddChunk1, kHash1.prefix));
+ add_hashes.push_back(SBAddFullHash(kAddChunk1, kNow, kHash1));
+ add_hashes.push_back(SBAddFullHash(kAddChunk1, kNow, kHash1mod1));
+ sub_prefixes.push_back(SBSubPrefix(kSubChunk1, kAddChunk1, kHash1.prefix));
+ sub_hashes.push_back(SBSubFullHash(kSubChunk1, kAddChunk1, kHash1mod2));
+ sub_hashes.push_back(SBSubFullHash(kSubChunk1, kAddChunk1, kHash1mod3));
+
+ // An add with no corresponding sub. Both items should be retained.
+ add_hashes.push_back(SBAddFullHash(kAddChunk1, kNow, kHash2));
+ add_prefixes.push_back(SBAddPrefix(kAddChunk1, kHash2.prefix));
+
+ // A sub with no corresponding add. Both items should be retained.
+ sub_hashes.push_back(SBSubFullHash(kSubChunk1, kAddChunk1, kHash3));
+ sub_prefixes.push_back(SBSubPrefix(kSubChunk1, kAddChunk1, kHash3.prefix));
+
+ const base::hash_set<int32> no_deletions;
+ base::hash_set<int32> add_deletions;
+ add_deletions.insert(kAddChunk1);
+ SBProcessSubs(&add_prefixes, &sub_prefixes, &add_hashes, &sub_hashes,
+ add_deletions, no_deletions);
+
+ EXPECT_TRUE(add_prefixes.empty());
+ EXPECT_TRUE(add_hashes.empty());
+
+ EXPECT_EQ(1U, sub_prefixes.size());
+ EXPECT_EQ(kSubChunk1, sub_prefixes[0].chunk_id);
+ EXPECT_EQ(kAddChunk1, sub_prefixes[0].add_chunk_id);
+ EXPECT_EQ(kHash3.prefix, sub_prefixes[0].add_prefix);
+
+ EXPECT_EQ(1U, sub_hashes.size());
+ EXPECT_EQ(kSubChunk1, sub_hashes[0].chunk_id);
+ EXPECT_EQ(kAddChunk1, sub_hashes[0].add_chunk_id);
+ EXPECT_TRUE(SBFullHashEq(kHash3, sub_hashes[0].full_hash));
+
+ base::hash_set<int32> sub_deletions;
+ sub_deletions.insert(kSubChunk1);
+ SBProcessSubs(&add_prefixes, &sub_prefixes, &add_hashes, &sub_hashes,
+ no_deletions, sub_deletions);
+
+ EXPECT_TRUE(add_prefixes.empty());
+ EXPECT_TRUE(add_hashes.empty());
+ EXPECT_TRUE(sub_prefixes.empty());
+ EXPECT_TRUE(sub_hashes.empty());
+}
+
TEST(SafeBrowsingStoreTest, Y2K38) {
const base::Time now = base::Time::Now();
const base::Time future = now + base::TimeDelta::FromDays(3*365);