summaryrefslogtreecommitdiffstats
path: root/net/disk_cache/entry_impl.cc
diff options
context:
space:
mode:
authorinitial.commit <initial.commit@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-26 22:42:52 +0000
committerinitial.commit <initial.commit@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-26 22:42:52 +0000
commit586acc5fe142f498261f52c66862fa417c3d52d2 (patch)
treec98b3417a883f2477029c8cd5888f4078681e24e /net/disk_cache/entry_impl.cc
parenta814a8d55429605fe6d7045045cd25b6bf624580 (diff)
downloadchromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.zip
chromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.tar.gz
chromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.tar.bz2
Add net to the repository.
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@14 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache/entry_impl.cc')
-rw-r--r--net/disk_cache/entry_impl.cc779
1 files changed, 779 insertions, 0 deletions
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
new file mode 100644
index 0000000..d2a4b0f
--- /dev/null
+++ b/net/disk_cache/entry_impl.cc
@@ -0,0 +1,779 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/entry_impl.h"
+
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+
+namespace {
+
+// This is a simple Task to execute the callback (from the message loop instead
+// of the APC).
+class InvokeCallback : public Task {
+ public:
+ InvokeCallback(net::CompletionCallback* callback, int argument)
+ : callback_(callback), argument_(argument) {}
+
+ virtual void Run() {
+ callback_->Run(argument_);
+ }
+
+ private:
+ net::CompletionCallback* callback_;
+ int argument_;
+ DISALLOW_EVIL_CONSTRUCTORS(InvokeCallback);
+};
+
+// This class implements FileIOCallback to buffer the callback from an IO
+// operation from the actual IO class.
+class SyncCallback: public disk_cache::FileIOCallback {
+ public:
+ SyncCallback(disk_cache::EntryImpl* entry,
+ net::CompletionCallback* callback )
+ : entry_(entry), callback_(callback) {
+ entry->AddRef();
+ entry->IncrementIoCount();
+ }
+ ~SyncCallback() {}
+
+ virtual void OnFileIOComplete(int bytes_copied);
+ void Discard();
+ private:
+ disk_cache::EntryImpl* entry_;
+ net::CompletionCallback* callback_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SyncCallback);
+};
+
+void SyncCallback::OnFileIOComplete(int bytes_copied) {
+ entry_->DecrementIoCount();
+ entry_->Release();
+ if (callback_) {
+ InvokeCallback* task = new InvokeCallback(callback_, bytes_copied);
+ MessageLoop::current()->PostTask(FROM_HERE, task);
+ }
+ delete this;
+}
+
+void SyncCallback::Discard() {
+ callback_ = NULL;
+ OnFileIOComplete(0);
+}
+
+// Clears buffer before offset and after valid_len, knowing that the size of
+// buffer is kMaxBlockSize.
+void ClearInvalidData(char* buffer, int offset, int valid_len) {
+ DCHECK(offset >= 0);
+ DCHECK(valid_len >= 0);
+ DCHECK(disk_cache::kMaxBlockSize >= offset + valid_len);
+ if (offset)
+ memset(buffer, 0, offset);
+ int end = disk_cache::kMaxBlockSize - offset - valid_len;
+ if (end)
+ memset(buffer + offset + valid_len, 0, end);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+EntryImpl::EntryImpl(BackendImpl* backend, Addr address)
+ : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) {
+ entry_.LazyInit(backend->File(address), address);
+ doomed_ = false;
+ backend_ = backend;
+ unreported_size_[0] = unreported_size_[1] = 0;
+}
+
+// When an entry is deleted from the cache, we clean up all the data associated
+// with it for two reasons: to simplify the reuse of the block (we know that any
+// unused block is filled with zeros), and to simplify the handling of write /
+// read partial information from an entry (don't have to worry about returning
+// data related to a previous cache entry because the range was not fully
+// written before).
+EntryImpl::~EntryImpl() {
+ if (doomed_) {
+ for (int index = 0; index < kKeyFileIndex; index++) {
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized()) {
+ DeleteData(address, index);
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ }
+ }
+ Addr address(entry_.Data()->long_key);
+ DeleteData(address, kKeyFileIndex);
+ backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
+
+ memset(node_.buffer(), 0, node_.size());
+ memset(entry_.buffer(), 0, entry_.size());
+ node_.Store();
+ entry_.Store();
+
+ backend_->DeleteBlock(node_.address(), false);
+ backend_->DeleteBlock(entry_.address(), false);
+ } else {
+ bool ret = true;
+ for (int index = 0; index < kKeyFileIndex; index++) {
+ if (user_buffers_[index].get()) {
+ if (!(ret = Flush(index, entry_.Data()->data_size[index], false)))
+ LOG(ERROR) << "Failed to save user data";
+ } else if (unreported_size_[index]) {
+ backend_->ModifyStorageSize(
+ entry_.Data()->data_size[index] - unreported_size_[index],
+ entry_.Data()->data_size[index]);
+ }
+ }
+ if (node_.HasData() && this == node_.Data()->pointer) {
+ // We have to do this after Flush because we may trigger a cache trim from
+ // there, and technically this entry should be "in use".
+ node_.Data()->pointer = NULL;
+ node_.set_modified();
+ }
+
+ if (!ret) {
+ // There was a failure writing the actual data. Mark the entry as dirty.
+ int current_id = backend_->GetCurrentEntryId();
+ node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
+ node_.Store();
+ } else if (node_.HasData() && node_.Data()->dirty) {
+ node_.Data()->dirty = 0;
+ node_.Store();
+ }
+ }
+
+ backend_->CacheEntryDestroyed();
+}
+
+void EntryImpl::DeleteData(Addr address, int index) {
+ if (!address.is_initialized())
+ return;
+ if (address.is_separate_file()) {
+ if (files_[index])
+ files_[index] = NULL; // Releases the object.
+
+ if (!DeleteFile(backend_->GetFileName(address).c_str()))
+ LOG(ERROR) << "Failed to delete " << backend_->GetFileName(address) <<
+ " from the cache.";
+ } else {
+ backend_->DeleteBlock(address, true);
+ }
+}
+
+bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
+ uint32 hash) {
+ Trace("Create entry In");
+ EntryStore* entry_store = entry_.Data();
+ RankingsNode* node = node_.Data();
+ memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
+ memset(node, 0, sizeof(RankingsNode));
+ if (!node_.LazyInit(backend_->File(node_address), node_address))
+ return false;
+
+ entry_store->rankings_node = node_address.value();
+ node->contents = entry_.address().value();
+ node->pointer = this;
+
+ entry_store->hash = hash;
+ entry_store->key_len = static_cast<int32>(key.size());
+ if (entry_store->key_len > kMaxInternalKeyLength) {
+ Addr address(0);
+ if (!CreateBlock(entry_store->key_len + 1, &address))
+ return false;
+
+ entry_store->long_key = address.value();
+ File* file = GetBackingFile(address, kKeyFileIndex);
+
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!file || !file->Write(key.data(), key.size(), offset)) {
+ DeleteData(address, kKeyFileIndex);
+ return false;
+ }
+
+ if (address.is_separate_file())
+ file->SetLength(key.size() + 1);
+ } else {
+ memcpy(entry_store->key, key.data(), key.size());
+ entry_store->key[key.size()] = '\0';
+ }
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ node->dirty = backend_->GetCurrentEntryId();
+ Log("Create Entry ");
+ return true;
+}
+
+void EntryImpl::Close() {
+ Release();
+}
+
+void EntryImpl::Doom() {
+ if (doomed_)
+ return;
+
+ SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
+ backend_->InternalDoomEntry(this);
+}
+
+void EntryImpl::InternalDoom() {
+ DCHECK(node_.HasData());
+ if (!node_.Data()->dirty) {
+ node_.Data()->dirty = backend_->GetCurrentEntryId();
+ node_.Store();
+ }
+ doomed_ = true;
+}
+
+std::string EntryImpl::GetKey() const {
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ if (entry->Data()->key_len > kMaxInternalKeyLength) {
+ Addr address(entry->Data()->long_key);
+ DCHECK(address.is_initialized());
+ File* file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
+ kKeyFileIndex);
+
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ std::string key;
+ if (!file || !file->Read(WriteInto(&key, entry->Data()->key_len + 1),
+ entry->Data()->key_len + 1, offset))
+ key.clear();
+ return key;
+ } else {
+ return std::string(entry->Data()->key);
+ }
+}
+
+Time EntryImpl::GetLastUsed() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_used);
+}
+
+Time EntryImpl::GetLastModified() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_modified);
+}
+
+int32 EntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index > 1)
+ return 0;
+
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ return entry->Data()->data_size[index];
+}
+
+int EntryImpl::ReadData(int index, int offset, char* buf, int buf_len,
+ net::CompletionCallback* completion_callback) {
+ DCHECK(node_.Data()->dirty);
+ if (index < 0 || index > 1)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = entry_.Data()->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ backend_->OnEvent(Stats::READ_DATA);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ DCHECK(kMaxBlockSize >= offset + buf_len);
+ memcpy(buf , user_buffers_[index].get() + offset, buf_len);
+ return buf_len;
+ }
+
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(address.is_initialized());
+ if (!address.is_initialized())
+ return net::ERR_FAILED;
+
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return net::ERR_FAILED;
+
+ size_t file_offset = offset;
+ if (address.is_block_file())
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+
+ SyncCallback* io_callback = NULL;
+ if (completion_callback)
+ io_callback = new SyncCallback(this, completion_callback);
+
+ bool completed;
+ if (!file->Read(buf, buf_len, file_offset, io_callback, &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ return net::ERR_FAILED;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteData(int index, int offset, const char* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate) {
+ DCHECK(node_.Data()->dirty);
+ if (index < 0 || index > 1)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset of buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ int size = offset + buf_len;
+ if (size <= max_file_size)
+ size = kint32max;
+ backend_->TooMuchStorageRequested(size);
+ return net::ERR_FAILED;
+ }
+
+ // Read the size at this point (it may change inside prepare).
+ int entry_size = entry_.Data()->data_size[index];
+ if (!PrepareTarget(index, offset, buf_len, truncate))
+ return net::ERR_FAILED;
+
+ if (entry_size < offset + buf_len) {
+ unreported_size_[index] += offset + buf_len - entry_size;
+ entry_.Data()->data_size[index] = offset + buf_len;
+ entry_.set_modified();
+ } else if (truncate) {
+ // If the size was modified inside PrepareTarget, we should not do
+ // anything here.
+ if ((entry_size > offset + buf_len) &&
+ (entry_size == entry_.Data()->data_size[index])) {
+ unreported_size_[index] += offset + buf_len - entry_size;
+ entry_.Data()->data_size[index] = offset + buf_len;
+ entry_.set_modified();
+ } else {
+ // Nothing to truncate.
+ truncate = false;
+ }
+ }
+
+ UpdateRank(true);
+
+ backend_->OnEvent(Stats::WRITE_DATA);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ DCHECK(kMaxBlockSize >= offset + buf_len);
+ memcpy(user_buffers_[index].get() + offset, buf, buf_len);
+ return buf_len;
+ }
+
+ Addr address(entry_.Data()->data_addr[index]);
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return net::ERR_FAILED;
+
+ size_t file_offset = offset;
+ if (address.is_block_file()) {
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ } else if (truncate) {
+ if (!file->SetLength(offset + buf_len))
+ return net::ERR_FAILED;
+ }
+
+ if (!buf_len)
+ return 0;
+
+ SyncCallback* io_callback = NULL;
+ if (completion_callback)
+ io_callback = new SyncCallback(this, completion_callback);
+
+ bool completed;
+ if (!file->Write(buf, buf_len, file_offset, io_callback, &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ return net::ERR_FAILED;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING;
+}
+
+bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
+ bool truncate) {
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized() || user_buffers_[index].get())
+ return GrowUserBuffer(index, offset, buf_len, truncate);
+
+ if (offset + buf_len > kMaxBlockSize)
+ return CreateDataBlock(index, offset + buf_len);
+
+ user_buffers_[index].reset(new char[kMaxBlockSize]);
+
+ // Overwrite the parts of the buffer that are not going to be written
+ // by the current operation (and yes, let's assume that nothing is going
+ // to fail, and we'll actually write over the part that we are not cleaning
+ // here). The point is to avoid writing random stuff to disk later on.
+ ClearInvalidData(user_buffers_[index].get(), offset, buf_len);
+
+ return true;
+}
+
+// We get to this function with some data already stored. If there is a
+// truncation that results on data stored internally, we'll explicitly
+// handle the case here.
+bool EntryImpl::GrowUserBuffer(int index, int offset, int buf_len,
+ bool truncate) {
+ Addr address(entry_.Data()->data_addr[index]);
+
+ if (offset + buf_len > kMaxBlockSize) {
+ // The data has to be stored externally.
+ if (address.is_initialized()) {
+ if (address.is_separate_file())
+ return true;
+ if (!MoveToLocalBuffer(index))
+ return false;
+ }
+ return Flush(index, offset + buf_len, true);
+ }
+
+ if (!address.is_initialized()) {
+ DCHECK(user_buffers_[index].get());
+ if (truncate)
+ ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len);
+ return true;
+ }
+ if (address.is_separate_file()) {
+ if (!truncate)
+ return true;
+ return ImportSeparateFile(index, offset, buf_len);
+ }
+
+ // At this point we are dealing with data stored on disk, inside a block file.
+ if (offset + buf_len <= address.BlockSize() * address.num_blocks())
+ return true;
+
+ // ... and the allocated block has to change.
+ if (!MoveToLocalBuffer(index))
+ return false;
+
+ int clear_start = entry_.Data()->data_size[index];
+ if (truncate)
+ clear_start = std::min(clear_start, offset + buf_len);
+ else if (offset < clear_start)
+ clear_start = std::max(offset + buf_len, clear_start);
+
+ // Clear the end of the buffer.
+ ClearInvalidData(user_buffers_[index].get(), 0, clear_start);
+ return true;
+}
+
+bool EntryImpl::ImportSeparateFile(int index, int offset, int buf_len) {
+ if (entry_.Data()->data_size[index] > offset + buf_len) {
+ entry_.Data()->data_size[index] = offset + buf_len;
+ unreported_size_[index] += offset + buf_len -
+ entry_.Data()->data_size[index];
+ }
+
+ if (!MoveToLocalBuffer(index))
+ return false;
+
+ // Clear the end of the buffer.
+ ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len);
+ return true;
+}
+
+bool EntryImpl::MoveToLocalBuffer(int index) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+ scoped_ptr<char> buffer(new char[kMaxBlockSize]);
+
+ File* file = GetBackingFile(address, index);
+ size_t len = entry_.Data()->data_size[index];
+ size_t offset = 0;
+
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!file || !file->Read(buffer.get(), len, offset, NULL, NULL))
+ return false;
+
+ DeleteData(address, index);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Store();
+
+ // If we lose this entry we'll see it as zero sized.
+ backend_->ModifyStorageSize(static_cast<int>(len) - unreported_size_[index],
+ 0);
+ unreported_size_[index] = static_cast<int>(len);
+
+ user_buffers_[index].swap(buffer);
+ return true;
+}
+
+// The common scenario is that this is called from the destructor of the entry,
+// to write to disk what we have buffered. We don't want to hold the destructor
+// until the actual IO finishes, so we'll send an asynchronous write that will
+// free up the memory containing the data. To be consistent, this method always
+// returns with the buffer freed up (on success).
+bool EntryImpl::Flush(int index, int size, bool async) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(user_buffers_[index].get());
+ DCHECK(!address.is_initialized());
+
+ if (!size)
+ return true;
+
+ if (!CreateDataBlock(index, size))
+ return false;
+
+ address.set_value(entry_.Data()->data_addr[index]);
+
+ File* file = GetBackingFile(address, index);
+ size_t len = entry_.Data()->data_size[index];
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ // We just told the backend to store len bytes for real.
+ DCHECK(len == unreported_size_[index]);
+ backend_->ModifyStorageSize(0, static_cast<int>(len));
+ unreported_size_[index] = 0;
+
+ if (!file)
+ return false;
+
+ if (async) {
+ if (!file->PostWrite(user_buffers_[index].get(), len, offset))
+ return false;
+ } else {
+ if (!file->Write(user_buffers_[index].get(), len, offset, NULL, NULL))
+ return false;
+ user_buffers_[index].reset(NULL);
+ }
+
+ // The buffer is deleted from the PostWrite operation.
+ user_buffers_[index].release();
+
+ return true;
+}
+
+bool EntryImpl::LoadNodeAddress() {
+ Addr address(entry_.Data()->rankings_node);
+ if (!node_.LazyInit(backend_->File(address), address))
+ return false;
+ return node_.Load();
+}
+
+EntryImpl* EntryImpl::Update(EntryImpl* entry) {
+ DCHECK(entry->rankings()->HasData());
+
+ RankingsNode* rankings = entry->rankings()->Data();
+ if (rankings->pointer) {
+ // Already in memory. Prevent clearing the dirty flag on the destructor.
+ rankings->dirty = 0;
+ EntryImpl* real_node = reinterpret_cast<EntryImpl*>(rankings->pointer);
+ real_node->AddRef();
+ entry->Release();
+ return real_node;
+ } else {
+ rankings->dirty = entry->backend_->GetCurrentEntryId();
+ rankings->pointer = entry;
+ if (!entry->rankings()->Store()) {
+ entry->Release();
+ return NULL;
+ }
+ return entry;
+ }
+}
+
+bool EntryImpl::CreateDataBlock(int index, int size) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(0 == index || 1 == index);
+
+ if (!CreateBlock(size, &address))
+ return false;
+
+ entry_.Data()->data_addr[index] = address.value();
+ entry_.Store();
+ return true;
+}
+
+bool EntryImpl::CreateBlock(int size, Addr* address) {
+ DCHECK(!address->is_initialized());
+
+ FileType file_type = Addr::RequiredFileType(size);
+ if (EXTERNAL == file_type) {
+ if (size > backend_->MaxFileSize())
+ return false;
+ if (!backend_->CreateExternalFile(address))
+ return false;
+ } else {
+ int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) /
+ Addr::BlockSizeForFileType(file_type);
+
+ if (!backend_->CreateBlock(file_type, num_blocks, address))
+ return false;
+ }
+ return true;
+}
+
+bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
+ if (entry_.Data()->hash != hash || entry_.Data()->key_len != key.size())
+ return false;
+
+ std::string my_key = GetKey();
+ return key.compare(my_key) ? false : true;
+}
+
+CacheAddr EntryImpl::GetNextAddress() {
+ return entry_.Data()->next;
+}
+
+void EntryImpl::SetNextAddress(Addr address) {
+ entry_.Data()->next = address.value();
+ bool success = entry_.Store();
+ DCHECK(success);
+}
+
+void EntryImpl::UpdateRank(bool modified) {
+ if (!doomed_) {
+ // Everything is handled by the backend.
+ backend_->UpdateRank(&node_, true);
+ return;
+ }
+
+ Time current = Time::Now();
+ node_.Data()->last_used = current.ToInternalValue();
+
+ if (modified)
+ node_.Data()->last_modified = current.ToInternalValue();
+}
+
+File* EntryImpl::GetBackingFile(Addr address, int index) {
+ File* file;
+ if (address.is_separate_file())
+ file = GetExternalFile(address, index);
+ else
+ file = backend_->File(address);
+ return file;
+}
+
+File* EntryImpl::GetExternalFile(Addr address, int index) {
+ DCHECK(index >= 0 && index <= 2);
+ if (!files_[index].get()) {
+ // For a key file, use mixed mode IO.
+ scoped_refptr<File> file(new File(2 == index));
+ if (file->Init(backend_->GetFileName(address)))
+ files_[index].swap(file);
+ }
+ return files_[index].get();
+}
+
+uint32 EntryImpl::GetHash() {
+ return entry_.Data()->hash;
+}
+
+bool EntryImpl::IsDirty(int32 current_id) {
+ DCHECK(node_.HasData());
+ return node_.Data()->dirty && current_id != node_.Data()->dirty;
+}
+
+void EntryImpl::ClearDirtyFlag() {
+ node_.Data()->dirty = 0;
+}
+
+void EntryImpl::SetPointerForInvalidEntry(int32 new_id) {
+ node_.Data()->dirty = new_id;
+ node_.Data()->pointer = this;
+ node_.Store();
+}
+
+bool EntryImpl::SanityCheck() {
+ if (!entry_.Data()->rankings_node || !entry_.Data()->key_len)
+ return false;
+
+ Addr rankings_addr(entry_.Data()->rankings_node);
+ if (!rankings_addr.is_initialized() || rankings_addr.is_separate_file() ||
+ rankings_addr.file_type() != RANKINGS)
+ return false;
+
+ Addr next_addr(entry_.Data()->next);
+ if (next_addr.is_initialized() &&
+ (next_addr.is_separate_file() || next_addr.file_type() != BLOCK_256))
+ return false;
+
+ return true;
+}
+
+void EntryImpl::IncrementIoCount() {
+ backend_->IncrementIoCount();
+}
+
+void EntryImpl::DecrementIoCount() {
+ backend_->DecrementIoCount();
+}
+
+void EntryImpl::Log(const char* msg) {
+ void* pointer = NULL;
+ int dirty = 0;
+ if (node_.HasData()) {
+ pointer = node_.Data()->pointer;
+ dirty = node_.Data()->dirty;
+ }
+
+ Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
+ entry_.address().value(), node_.address().value());
+
+ Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
+ entry_.Data()->data_addr[1], entry_.Data()->long_key);
+
+ Trace(" doomed: %d 0x%p 0x%x", doomed_, pointer, dirty);
+}
+
+} // namespace disk_cache