summaryrefslogtreecommitdiffstats
path: root/net/disk_cache/entry_impl.cc
diff options
context:
space:
mode:
authorrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-08-06 18:48:37 +0000
committerrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-08-06 18:48:37 +0000
commit0bcd33eacb640e709f36abf3f8d3e88ecd34efef (patch)
treec389c0f7763648e1241f457e8af4980901cc85c1 /net/disk_cache/entry_impl.cc
parenta0f200ea6032c2a1aa05f53482b1f272234339f1 (diff)
downloadchromium_src-0bcd33eacb640e709f36abf3f8d3e88ecd34efef.zip
chromium_src-0bcd33eacb640e709f36abf3f8d3e88ecd34efef.tar.gz
chromium_src-0bcd33eacb640e709f36abf3f8d3e88ecd34efef.tar.bz2
Disk Cache: Don't depend on the backend being enabled to
be able to return the key of an open entry. Whenever a critical corruption is detected by the disk cache, the backend disables itself and starts failing all requests until it's able to re-create the backing store. Key's longer than 928 bytes are not stored inside the entry itself, so a file object is required to access them. The backend will reject any request for a file object after it is disabled, so a user's request for the key of an open entry will also fail. Now we keep a pointer to the related file object (if needed) so that we don't have to ask the backend for it when the user requests the current key. BUG=9952 TEST=unittest Review URL: http://codereview.chromium.org/165030 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@22637 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache/entry_impl.cc')
-rw-r--r--net/disk_cache/entry_impl.cc43
1 files changed, 24 insertions, 19 deletions
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 3493152..0f25684 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -84,6 +84,7 @@ EntryImpl::EntryImpl(BackendImpl* backend, Addr address)
for (int i = 0; i < kNumStreams; i++) {
unreported_size_[i] = 0;
}
+ key_file_ = NULL;
}
// When an entry is deleted from the cache, we clean up all the data associated
@@ -139,25 +140,29 @@ void EntryImpl::Close() {
std::string EntryImpl::GetKey() const {
CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
- if (entry->Data()->key_len > kMaxInternalKeyLength) {
- Addr address(entry->Data()->long_key);
- DCHECK(address.is_initialized());
- COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
- File* file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
- kKeyFileIndex);
+ if (entry->Data()->key_len <= kMaxInternalKeyLength)
+ return std::string(entry->Data()->key);
- size_t offset = 0;
- if (address.is_block_file())
- offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+ Addr address(entry->Data()->long_key);
+ DCHECK(address.is_initialized());
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
- std::string key;
- if (!file || !file->Read(WriteInto(&key, entry->Data()->key_len + 1),
- entry->Data()->key_len + 1, offset))
- key.clear();
- return key;
- } else {
- return std::string(entry->Data()->key);
+ if (!key_file_) {
+ // We keep a copy of the file needed to access the key so that we can
+ // always return this object's key, even if the backend is disabled.
+ COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
+ key_file_ = const_cast<EntryImpl*>(this)->GetBackingFile(address,
+ kKeyFileIndex);
}
+
+ std::string key;
+ if (!key_file_ ||
+ !key_file_->Read(WriteInto(&key, entry->Data()->key_len + 1),
+ entry->Data()->key_len + 1, offset))
+ key.clear();
+ return key;
}
Time EntryImpl::GetLastUsed() const {
@@ -402,19 +407,19 @@ bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
return false;
entry_store->long_key = address.value();
- File* file = GetBackingFile(address, kKeyFileIndex);
+ key_file_ = GetBackingFile(address, kKeyFileIndex);
size_t offset = 0;
if (address.is_block_file())
offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
- if (!file || !file->Write(key.data(), key.size(), offset)) {
+ if (!key_file_ || !key_file_->Write(key.data(), key.size(), offset)) {
DeleteData(address, kKeyFileIndex);
return false;
}
if (address.is_separate_file())
- file->SetLength(key.size() + 1);
+ key_file_->SetLength(key.size() + 1);
} else {
memcpy(entry_store->key, key.data(), key.size());
entry_store->key[key.size()] = '\0';