1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
|
// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_SAFE_BROWSING_SAFE_BROWSING_STORE_FILE_H_
#define CHROME_BROWSER_SAFE_BROWSING_SAFE_BROWSING_STORE_FILE_H_
#include <set>
#include <vector>
#include "chrome/browser/safe_browsing/safe_browsing_store.h"
#include "base/file_util.h"
// Implement SafeBrowsingStore in terms of a flat file. The file
// format is pretty literal:
//
// int32 magic; // magic number "validating" file
// int32 version; // format version
//
// // Counts for the various data which follows the header.
// int32 add_chunk_count; // Chunks seen, including empties.
// int32 sub_chunk_count; // Ditto.
// int32 add_prefix_count;
// int32 sub_prefix_count;
// int32 add_hash_count;
// int32 sub_hash_count;
//
// array[add_chunk_count] {
// int32 chunk_id;
// }
// array[sub_chunk_count] {
// int32 chunk_id;
// }
// array[add_prefix_count] {
// int32 chunk_id;
// int32 prefix;
// }
// array[sub_prefix_count] {
// int32 chunk_id;
// int32 add_chunk_id;
// int32 add_prefix;
// }
// array[add_hash_count] {
// int32 chunk_id;
// // TODO(shess): This duplicates first four bytes of full_hash!
// int32 prefix;
// // From base::Time::ToTimeT().
// // TODO(shess): an int32 probably has enough resolution.
// int64 received_time;
// char[32] full_hash;
// array[sub_hash_count] {
// int32 chunk_id;
// int32 add_chunk_id;
// int32 add_prefix;
// char[32] add_full_hash;
// }
// TODO(shess): Would a checksum be worthwhile? If so, check at open,
// or at commit?
//
// During the course of an update, uncommitted data is stored in a
// temporary file (which is later re-used to commit). This is an
// array of chunks, with the count kept in memory until the end of the
// transaction. The format of this file is like the main file, with
// the list of chunks seen omitted, as that data is tracked in-memory:
//
// array[] {
// int32 add_prefix_count;
// int32 sub_prefix_count;
// int32 add_hash_count;
// int32 sub_hash_count;
// array[add_prefix_count] {
// int32 chunk_id;
// int32 prefix;
// }
// array[sub_prefix_count] {
// int32 chunk_id;
// int32 add_chunk_id;
// int32 add_prefix;
// }
// array[add_hash_count] {
// int32 chunk_id;
// int32 prefix;
// int64 received_time;
// char[32] full_hash;
// array[sub_hash_count] {
// int32 chunk_id;
// int32 add_chunk_id;
// int32 add_prefix;
// char[32] add_full_hash;
// }
// }
//
// The overall transaction works like this:
// - Open the original file to get the chunks-seen data.
// - Open a temp file for storing new chunk info.
// - Write new chunks to the temp file.
// - When the transaction is finished:
// - Read the rest of the original file's data into buffers.
// - Rewind the temp file and merge the new data into buffers.
// - Process buffers for deletions and apply subs.
// - Rewind and write the buffers out to temp file.
// - Delete original file.
// - Rename temp file to original filename.
//
// TODO(shess): Does there need to be an fsync() before the rename?
// important_file_writer.h seems to think that
// http://valhenson.livejournal.com/37921.html means you don't, but I
// don't think it follows (and, besides, this needs to run on other
// operating systems).
//
// TODO(shess): Using a checksum to validate the file would allow
// correctness without fsync, at the cost of periodically needing to
// regenerate the database from scratch.
// TODO(shess): Regeneration could be moderated by saving the previous
// file, if valid, as a checkpoint. During update, if the current
// file is found to be invalid, rollback to the checkpoint and run the
// updat forward from there. This would require that the current file
// be validated at BeginUpdate() rather than FinishUpdate(), because
// the chunks-seen data may have changed. [Does this have
// implications for the pending_hashes, which were generated while
// using a newer bloom filter?]
class SafeBrowsingStoreFile : public SafeBrowsingStore {
public:
SafeBrowsingStoreFile();
virtual ~SafeBrowsingStoreFile();
virtual void Init(const FilePath& filename,
Callback0::Type* corruption_callback);
// Delete any on-disk files, including the permanent storage.
virtual bool Delete();
virtual bool BeginChunk() {
return ClearChunkBuffers();
}
virtual bool WriteAddPrefix(int32 chunk_id, SBPrefix prefix) {
add_prefixes_.push_back(SBAddPrefix(chunk_id, prefix));
return true;
}
virtual bool WriteAddHash(int32 chunk_id, SBPrefix prefix,
base::Time receive_time, SBFullHash full_hash) {
add_hashes_.push_back(
SBAddFullHash(chunk_id, prefix, receive_time, full_hash));
return true;
}
virtual bool WriteSubPrefix(int32 chunk_id,
int32 add_chunk_id, SBPrefix prefix) {
sub_prefixes_.push_back(SBSubPrefix(chunk_id, add_chunk_id, prefix));
return true;
}
virtual bool WriteSubHash(int32 chunk_id, int32 add_chunk_id,
SBPrefix prefix, SBFullHash full_hash) {
sub_hashes_.push_back(
SBSubFullHash(chunk_id, add_chunk_id, prefix, full_hash));
return true;
}
virtual bool FinishChunk();
virtual bool BeginUpdate();
virtual bool DoUpdate(const std::vector<SBAddFullHash>& pending_adds,
std::vector<SBAddPrefix>* add_prefixes_result,
std::vector<SBAddFullHash>* add_full_hashes_result);
virtual bool FinishUpdate(const std::vector<SBAddFullHash>& pending_adds,
std::vector<SBAddPrefix>* add_prefixes_result,
std::vector<SBAddFullHash>* add_full_hashes_result);
virtual bool CancelUpdate();
virtual void SetAddChunk(int32 chunk_id) {
add_chunks_cache_.insert(chunk_id);
}
virtual bool CheckAddChunk(int32 chunk_id) {
return add_chunks_cache_.count(chunk_id) > 0;
}
virtual void GetAddChunks(std::vector<int32>* out) {
out->clear();
out->insert(out->end(), add_chunks_cache_.begin(), add_chunks_cache_.end());
}
virtual void SetSubChunk(int32 chunk_id) {
sub_chunks_cache_.insert(chunk_id);
}
virtual bool CheckSubChunk(int32 chunk_id) {
return sub_chunks_cache_.count(chunk_id) > 0;
}
virtual void GetSubChunks(std::vector<int32>* out) {
out->clear();
out->insert(out->end(), sub_chunks_cache_.begin(), sub_chunks_cache_.end());
}
virtual void DeleteAddChunk(int32 chunk_id) {
add_del_cache_.insert(chunk_id);
}
virtual void DeleteSubChunk(int32 chunk_id) {
sub_del_cache_.insert(chunk_id);
}
// Returns the name of the temporary file used to buffer data for
// |filename|. Exported for unit tests.
static const FilePath TemporaryFileForFilename(const FilePath& filename) {
return FilePath(filename.value() + FILE_PATH_LITERAL("_new"));
}
private:
// Close all files and clear all buffers.
bool Close();
// Helpers to read/write the various data sets. Excepting
// ReadChunksToSet(), which is called too early, the readers skip
// items from deleted chunks (listed in add_del_cache_ and
// sub_del_cache_).
bool ReadChunksToSet(FILE* fp, std::set<int32>* chunks, int count);
bool WriteChunksFromSet(const std::set<int32>& chunks);
bool ReadAddPrefixes(FILE* fp,
std::vector<SBAddPrefix>* add_prefixes, int count);
bool WriteAddPrefixes(const std::vector<SBAddPrefix>& add_prefixes);
bool ReadSubPrefixes(FILE* fp,
std::vector<SBSubPrefix>* sub_prefixes, int count);
bool WriteSubPrefixes(std::vector<SBSubPrefix>& sub_prefixes);
bool ReadAddHashes(FILE* fp,
std::vector<SBAddFullHash>* add_hashes, int count);
bool WriteAddHashes(const std::vector<SBAddFullHash>& add_hashes);
bool ReadSubHashes(FILE* fp,
std::vector<SBSubFullHash>* sub_hashes, int count);
bool WriteSubHashes(std::vector<SBSubFullHash>& sub_hashes);
// Calls |corruption_callback_| if non-NULL, always returns false as
// a convenience to the caller.
bool OnCorruptDatabase();
// Clear temporary buffers used to accumulate chunk data.
bool ClearChunkBuffers() {
// NOTE: .clear() doesn't release memory.
// TODO(shess): Figure out if this is overkill. Some amount of
// pre-reserved space is probably reasonable between each chunk
// collected.
std::vector<SBAddPrefix>().swap(add_prefixes_);
std::vector<SBSubPrefix>().swap(sub_prefixes_);
std::vector<SBAddFullHash>().swap(add_hashes_);
std::vector<SBSubFullHash>().swap(sub_hashes_);
return true;
}
// Clear all buffers used during update.
void ClearUpdateBuffers() {
ClearChunkBuffers();
chunks_written_ = 0;
std::set<int32>().swap(add_chunks_cache_);
std::set<int32>().swap(sub_chunks_cache_);
base::hash_set<int32>().swap(add_del_cache_);
base::hash_set<int32>().swap(sub_del_cache_);
}
// Buffers for collecting data between BeginChunk() and
// FinishChunk().
std::vector<SBAddPrefix> add_prefixes_;
std::vector<SBSubPrefix> sub_prefixes_;
std::vector<SBAddFullHash> add_hashes_;
std::vector<SBSubFullHash> sub_hashes_;
// Count of chunks collected in |new_file_|.
int chunks_written_;
// Name of the main database file.
FilePath filename_;
// Handles to the main and scratch files. |empty_| is true if the
// main file didn't exist when the update was started.
file_util::ScopedFILE file_;
file_util::ScopedFILE new_file_;
bool empty_;
// Cache of chunks which have been seen. Loaded from the database
// on BeginUpdate() so that it can be queried during the
// transaction.
std::set<int32> add_chunks_cache_;
std::set<int32> sub_chunks_cache_;
// Cache the set of deleted chunks during a transaction, applied on
// FinishUpdate().
// TODO(shess): If the set is small enough, hash_set<> might be
// slower than plain set<>.
base::hash_set<int32> add_del_cache_;
base::hash_set<int32> sub_del_cache_;
scoped_ptr<Callback0::Type> corruption_callback_;
DISALLOW_COPY_AND_ASSIGN(SafeBrowsingStoreFile);
};
#endif // CHROME_BROWSER_SAFE_BROWSING_SAFE_BROWSING_STORE_FILE_H_
|