summaryrefslogtreecommitdiffstats
path: root/net/disk_cache
diff options
context:
space:
mode:
authorinitial.commit <initial.commit@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-26 22:42:52 +0000
committerinitial.commit <initial.commit@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-26 22:42:52 +0000
commit586acc5fe142f498261f52c66862fa417c3d52d2 (patch)
treec98b3417a883f2477029c8cd5888f4078681e24e /net/disk_cache
parenta814a8d55429605fe6d7045045cd25b6bf624580 (diff)
downloadchromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.zip
chromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.tar.gz
chromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.tar.bz2
Add net to the repository.
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@14 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r--net/disk_cache/addr.h178
-rw-r--r--net/disk_cache/addr_unittest.cc61
-rw-r--r--net/disk_cache/backend_impl.cc1169
-rw-r--r--net/disk_cache/backend_impl.h218
-rw-r--r--net/disk_cache/backend_unittest.cc944
-rw-r--r--net/disk_cache/block_files.cc441
-rw-r--r--net/disk_cache/block_files.h105
-rw-r--r--net/disk_cache/block_files_unittest.cc125
-rw-r--r--net/disk_cache/disk_cache.h181
-rw-r--r--net/disk_cache/disk_cache_perftest.cc236
-rw-r--r--net/disk_cache/disk_cache_test_base.cc126
-rw-r--r--net/disk_cache/disk_cache_test_base.h92
-rw-r--r--net/disk_cache/disk_cache_test_util.cc163
-rw-r--r--net/disk_cache/disk_cache_test_util.h127
-rw-r--r--net/disk_cache/disk_format.h192
-rw-r--r--net/disk_cache/entry_impl.cc779
-rw-r--r--net/disk_cache/entry_impl.h168
-rw-r--r--net/disk_cache/entry_unittest.cc713
-rw-r--r--net/disk_cache/errors.h52
-rw-r--r--net/disk_cache/file.cc313
-rw-r--r--net/disk_cache/file.h112
-rw-r--r--net/disk_cache/file_block.h56
-rw-r--r--net/disk_cache/file_lock.cc52
-rw-r--r--net/disk_cache/file_lock.h70
-rw-r--r--net/disk_cache/hash.cc67
-rw-r--r--net/disk_cache/hash.h55
-rw-r--r--net/disk_cache/mapped_file.cc78
-rw-r--r--net/disk_cache/mapped_file.h77
-rw-r--r--net/disk_cache/mapped_file_unittest.cc139
-rw-r--r--net/disk_cache/mem_backend_impl.cc276
-rw-r--r--net/disk_cache/mem_backend_impl.h104
-rw-r--r--net/disk_cache/mem_entry_impl.cc200
-rw-r--r--net/disk_cache/mem_entry_impl.h110
-rw-r--r--net/disk_cache/mem_rankings.cc87
-rw-r--r--net/disk_cache/mem_rankings.h69
-rw-r--r--net/disk_cache/rankings.cc697
-rw-r--r--net/disk_cache/rankings.h178
-rw-r--r--net/disk_cache/stats.cc258
-rw-r--r--net/disk_cache/stats.h98
-rw-r--r--net/disk_cache/storage_block-inl.h152
-rw-r--r--net/disk_cache/storage_block.h107
-rw-r--r--net/disk_cache/storage_block_unittest.cc96
-rw-r--r--net/disk_cache/stress_cache.cc221
-rw-r--r--net/disk_cache/trace.cc146
-rw-r--r--net/disk_cache/trace.h67
45 files changed, 9955 insertions, 0 deletions
diff --git a/net/disk_cache/addr.h b/net/disk_cache/addr.h
new file mode 100644
index 0000000..3c6923a
--- /dev/null
+++ b/net/disk_cache/addr.h
@@ -0,0 +1,178 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is an internal class that handles the address of a cache record.
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_ADDR_H__
+#define NET_DISK_CACHE_ADDR_H__
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "net/disk_cache/disk_format.h"
+
+namespace disk_cache {
+
+enum FileType {
+ EXTERNAL = 0,
+ RANKINGS = 1,
+ BLOCK_256,
+ BLOCK_1K,
+ BLOCK_4K,
+};
+
+const int kMaxBlockSize = 4096 * 4;
+const int kMaxBlockFile = 255;
+const int kMaxNumBlocks = 4;
+const int kFirstAdditionlBlockFile = 4;
+
+// Defines a storage address for a cache record
+//
+// Header:
+// 1000 0000 0000 0000 0000 0000 0000 0000 : initialized bit
+// 0111 0000 0000 0000 0000 0000 0000 0000 : file type
+//
+// File type values:
+// 0 = separate file on disk
+// 1 = rankings block file
+// 2 = 256 byte block file
+// 3 = 1k byte block file
+// 4 = 4k byte block file
+//
+// If separate file:
+// 0000 1111 1111 1111 1111 1111 1111 1111 : file# 0 - 268,435,456 (2^28)
+//
+// If block file:
+// 0000 1100 0000 0000 0000 0000 0000 0000 : reserved bits
+// 0000 0011 0000 0000 0000 0000 0000 0000 : number of contiguous blocks 1-4
+// 0000 0000 1111 1111 0000 0000 0000 0000 : file selector 0 - 255
+// 0000 0000 0000 0000 1111 1111 1111 1111 : block# 0 - 65,535 (2^16)
+class Addr {
+ public:
+ explicit Addr(CacheAddr address) : value_(address) {}
+ Addr(FileType file_type, int max_blocks, int block_file, int index) {
+ value_ = ((file_type << kFileTypeOffset) & kFileTypeMask) |
+ (((max_blocks - 1) << kNumBlocksOffset) & kNumBlocksMask) |
+ ((block_file << kFileSelectorOffset) & kFileSelectorMask) |
+ (index & kStartBlockMask) | kInitializedMask;
+ }
+
+ CacheAddr value() const { return value_; }
+ void set_value(CacheAddr address) {
+ value_ = address;
+ }
+
+ bool is_initialized() const {
+ return (value_ & kInitializedMask) != 0;
+ }
+
+ bool is_separate_file() const {
+ return (value_ & kFileTypeMask) == 0;
+ }
+
+ bool is_block_file() const {
+ return !is_separate_file();
+ }
+
+ FileType file_type() const {
+ return static_cast<FileType>((value_ & kFileTypeMask) >> kFileTypeOffset);
+ }
+
+ int FileNumber() const {
+ if (is_separate_file())
+ return value_ & kFileNameMask;
+ else
+ return ((value_ & kFileSelectorMask) >> kFileSelectorOffset);
+ }
+
+ int start_block() const {
+ DCHECK(is_block_file());
+ return value_ & kStartBlockMask;
+ }
+
+ int num_blocks() const {
+ DCHECK(is_block_file() || !value_);
+ return ((value_ & kNumBlocksMask) >> kNumBlocksOffset) + 1;
+ }
+
+ bool SetFileNumber(int file_number) {
+ DCHECK(is_separate_file());
+ if (file_number & ~kFileNameMask)
+ return false;
+ value_ = kInitializedMask | file_number;
+ return true;
+ }
+
+ int BlockSize() const {
+ return BlockSizeForFileType(file_type());
+ }
+
+ static int BlockSizeForFileType(FileType file_type) {
+ switch (file_type) {
+ case RANKINGS:
+ return 36;
+ case BLOCK_256:
+ return 256;
+ case BLOCK_1K:
+ return 1024;
+ case BLOCK_4K:
+ return 4096;
+ default:
+ return 0;
+ }
+ }
+
+ static FileType RequiredFileType(int size) {
+ if (size < 1024)
+ return BLOCK_256;
+ else if (size < 4096)
+ return BLOCK_1K;
+ else if (size <= 4096 * 4)
+ return BLOCK_4K;
+ else
+ return EXTERNAL;
+ }
+
+ private:
+ static const uint32 kInitializedMask = 0x80000000;
+ static const uint32 kFileTypeMask = 0x70000000;
+ static const uint32 kFileTypeOffset = 28;
+ static const uint32 kNumBlocksMask = 0x03000000;
+ static const uint32 kNumBlocksOffset = 24;
+ static const uint32 kFileSelectorMask = 0x00ff0000;
+ static const uint32 kFileSelectorOffset = 16;
+ static const uint32 kStartBlockMask = 0x0000FFFF;
+ static const uint32 kFileNameMask = 0x0FFFFFFF;
+
+ CacheAddr value_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ADDR_H__
diff --git a/net/disk_cache/addr_unittest.cc b/net/disk_cache/addr_unittest.cc
new file mode 100644
index 0000000..a06cf35
--- /dev/null
+++ b/net/disk_cache/addr_unittest.cc
@@ -0,0 +1,61 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/addr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace disk_cache {
+
+TEST(DiskCacheTest, CacheAddr_Size) {
+ Addr addr1(0);
+ EXPECT_FALSE(addr1.is_initialized());
+
+ // The object should not be more expensive than the actual address.
+ EXPECT_EQ(sizeof(uint32), sizeof(addr1));
+}
+
+TEST(DiskCacheTest, CacheAddr_ValidValues) {
+ Addr addr2(BLOCK_1K, 3, 5, 25);
+ EXPECT_EQ(BLOCK_1K, addr2.file_type());
+ EXPECT_EQ(3, addr2.num_blocks());
+ EXPECT_EQ(5, addr2.FileNumber());
+ EXPECT_EQ(25, addr2.start_block());
+ EXPECT_EQ(1024, addr2.BlockSize());
+}
+
+TEST(DiskCacheTest, CacheAddr_InvalidValues) {
+ Addr addr3(BLOCK_4K, 0x44, 0x41508, 0x952536);
+ EXPECT_EQ(BLOCK_4K, addr3.file_type());
+ EXPECT_EQ(4, addr3.num_blocks());
+ EXPECT_EQ(8, addr3.FileNumber());
+ EXPECT_EQ(0x2536, addr3.start_block());
+ EXPECT_EQ(4096, addr3.BlockSize());
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
new file mode 100644
index 0000000..59dd8e1
--- /dev/null
+++ b/net/disk_cache/backend_impl.cc
@@ -0,0 +1,1169 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/backend_impl.h"
+
+#include "base/file_util.h"
+#include "base/message_loop.h"
+#include "base/scoped_handle.h"
+#include "base/string_util.h"
+#include "base/timer.h"
+#include "base/worker_pool.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/errors.h"
+#include "net/disk_cache/hash.h"
+
+namespace {
+
+const wchar_t* kIndexName = L"index";
+const int kCleanUpMargin = 1024 * 1024;
+const int kMaxOldFolders = 100;
+
+// Seems like ~160 MB correspond to ~50k entries.
+const int k64kEntriesStore = 160 * 1000 * 1000;
+const int kBaseTableLen = 64 * 1024;
+const int kDefaultCacheSize = 80 * 1024 * 1024;
+
+int DesiredIndexTableLen(int32 storage_size) {
+ if (storage_size <= k64kEntriesStore)
+ return kBaseTableLen;
+ if (storage_size <= k64kEntriesStore * 2)
+ return kBaseTableLen * 2;
+ if (storage_size <= k64kEntriesStore * 4)
+ return kBaseTableLen * 4;
+ if (storage_size <= k64kEntriesStore * 8)
+ return kBaseTableLen * 8;
+
+ // The biggest storage_size for int32 requires a 4 MB table.
+ return kBaseTableLen * 16;
+}
+
+int MaxStorageSizeForTable(int table_len) {
+ return table_len * (k64kEntriesStore / kBaseTableLen);
+}
+
+size_t GetIndexSize(int table_len) {
+ size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
+ return sizeof(disk_cache::IndexHeader) + table_size;
+}
+
+// Deletes all the files on path that match search_name pattern.
+// Do not call this function with "*" as search_name.
+bool DeleteFiles(const wchar_t* path, const wchar_t* search_name) {
+ std::wstring name(path);
+ name += search_name;
+ DCHECK(search_name[0] == L'\\');
+
+ WIN32_FIND_DATA data;
+ ScopedFindFileHandle handle(FindFirstFile(name.c_str(), &data));
+ if (!handle.IsValid()) {
+ DWORD error = GetLastError();
+ return ERROR_FILE_NOT_FOUND == error;
+ }
+ std::wstring adjusted_path(path);
+ adjusted_path += L'\\';
+ do {
+ std::wstring current(adjusted_path);
+ current += data.cFileName;
+ if (!DeleteFile(current.c_str()))
+ return false;
+ } while (FindNextFile(handle, &data));
+ return true;
+}
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
+}
+
+// ------------------------------------------------------------------------
+
+// Returns a fully qualified name from path and name, using a given name prefix
+// and index number. For instance, if the arguments are "/foo", "bar" and 5, it
+// will return "/foo/old_bar_005".
+std::wstring GetPrefixedName(const std::wstring& path, const std::wstring& name,
+ int index) {
+ std::wstring prefixed(path);
+ std::wstring tmp = StringPrintf(L"%s%s_%03d", L"old_", name.c_str(), index);
+ file_util::AppendToPath(&prefixed, tmp);
+ return prefixed;
+}
+
+// This is a simple Task to cleanup old caches.
+class CleanupTask : public Task {
+ public:
+ CleanupTask(const std::wstring& path, const std::wstring& name)
+ : path_(path), name_(name) {}
+
+ virtual void Run();
+
+ private:
+ std::wstring path_;
+ std::wstring name_;
+ DISALLOW_EVIL_CONSTRUCTORS(CleanupTask);
+};
+
+void CleanupTask::Run() {
+ for (int i = 0; i < kMaxOldFolders; i++) {
+ std::wstring to_delete = GetPrefixedName(path_, name_, i);
+
+ // We do not create subfolders on the cache. If there is any subfolder, it
+ // was created by someone else so we don't want to delete it.
+ file_util::Delete(to_delete, false);
+ }
+}
+
+// Returns a full path to reneme the current cache, in order to delete it. path
+// is the current folder location, and name is the current folder name.
+std::wstring GetTempCacheName(const std::wstring& path,
+ const std::wstring& name) {
+ // We'll attempt to have up to kMaxOldFolders folders for deletion.
+ for (int i = 0; i < kMaxOldFolders; i++) {
+ std::wstring to_delete = GetPrefixedName(path, name, i);
+ if (!file_util::PathExists(to_delete))
+ return to_delete;
+ }
+ return std::wstring();
+}
+
+// Moves the cache files to a new folder and creates a task to delete them.
+bool DelayedCacheCleanup(const std::wstring& full_path) {
+ std::wstring path(full_path);
+ file_util::TrimTrailingSeparator(&path);
+
+ std::wstring name = file_util::GetFilenameFromPath(path);
+ file_util::TrimFilename(&path);
+
+ std::wstring to_delete = GetTempCacheName(path, name);
+ if (to_delete.empty()) {
+ LOG(ERROR) << "Unable to get another cache folder";
+ return false;
+ }
+
+ // I don't want to use the shell version of move because if something goes
+ // wrong, that version will attempt to move file by file and fail at the end.
+ if (!MoveFileEx(full_path.c_str(), to_delete.c_str(), 0)) {
+ DWORD error = GetLastError();
+ LOG(ERROR) << "Unable to rename cache folder";
+ return false;
+ }
+
+ WorkerPool::Run(new CleanupTask(path, name), true);
+ return true;
+}
+
+// ------------------------------------------------------------------------
+
+class TimerTask : public Task {
+ public:
+ explicit TimerTask(disk_cache::BackendImpl* backend) : backend_(backend) {}
+ ~TimerTask() {}
+
+ virtual void Run() {
+ backend_->OnStatsTimer();
+ }
+
+ private:
+ disk_cache::BackendImpl* backend_;
+};
+
+} // namespace
+
+namespace disk_cache {
+
+// If the initialization of the cache fails, and force is true, we will discard
+// the whole cache and create a new one. In order to process a potentially large
+// number of files, we'll rename the cache folder to old_ + original_name +
+// number, (located on the same parent folder), and spawn a worker thread to
+// delete all the files on all the stale cache folders. The whole process can
+// still fail if we are not able to rename the cache folder (for instance due to
+// a sharing violation), and in that case a cache for this profile (on the
+// desired path) cannot be created.
+Backend* CreateCacheBackend(const std::wstring& full_path, bool force,
+ int max_bytes) {
+ BackendImpl* cache = new BackendImpl(full_path);
+ cache->SetMaxSize(max_bytes);
+ if (cache->Init())
+ return cache;
+
+ delete cache;
+ if (!force)
+ return NULL;
+
+ if (!DelayedCacheCleanup(full_path))
+ return NULL;
+
+ // The worker thread will start deleting files soon, but the original folder
+ // is not there anymore... let's create a new set of files.
+ cache = new BackendImpl(full_path);
+ cache->SetMaxSize(max_bytes);
+ if (cache->Init())
+ return cache;
+
+ delete cache;
+ LOG(ERROR) << "Unable to create cache";
+ return NULL;
+}
+
+// ------------------------------------------------------------------------
+
+bool BackendImpl::Init() {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ bool create_files = false;
+ if (!InitBackingStore(&create_files))
+ return false;
+
+ num_refs_ = num_pending_io_ = max_refs_ = 0;
+
+ if (!restarted_) {
+ // Create a recurrent timer of 30 secs.
+ int timer_delay = unit_test_ ? 1000 : 30000;
+ TimerTask* task = new TimerTask(this);
+ timer_task_ = task;
+ timer_ = MessageLoop::current()->timer_manager()->StartTimer(timer_delay,
+ task, true);
+ }
+
+ init_ = true;
+
+ if (!CheckIndex())
+ return false;
+
+ // We don't care if the value overflows. The only thing we care about is that
+ // the id cannot be zero, because that value is used as "not dirty".
+ // Increasing the value once per second gives us many years before a we start
+ // having collisions.
+ data_->header.this_id++;
+ if (!data_->header.this_id)
+ data_->header.this_id++;
+
+ if (!block_files_.Init(create_files))
+ return false;
+
+ // stats_ and rankings_ may end up calling back to us so we better be enabled.
+ disabled_ = false;
+ if (!stats_.Init(this, &data_->header.stats))
+ return false;
+
+ disabled_ = !rankings_.Init(this);
+
+ return !disabled_;
+}
+
+BackendImpl::~BackendImpl() {
+ Trace("Backend destructor");
+ if (!init_)
+ return;
+
+ MessageLoop::current()->timer_manager()->StopTimer(timer_);
+ delete timer_;
+ delete timer_task_;
+
+ while (num_pending_io_) {
+ // Asynchronous IO operations may be in flight and the completion may end
+ // up calling us back so let's wait for them (we need an alertable wait).
+ // The idea is to let other threads do usefull work and at the same time
+ // allow more than one IO to finish... 20 mS later, we process all queued
+ // APCs and see if we have to repeat the wait.
+ Sleep(20);
+ SleepEx(0, TRUE);
+ }
+ DCHECK(!num_refs_);
+}
+
+bool BackendImpl::InitBackingStore(bool* file_created) {
+ // This call fails if the folder exists.
+ file_util::CreateDirectory(path_);
+
+ std::wstring index_name(path_);
+ file_util::AppendToPath(&index_name, kIndexName);
+
+ HANDLE file = CreateFile(index_name.c_str(), GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ, NULL, OPEN_ALWAYS, 0, NULL);
+
+ if (INVALID_HANDLE_VALUE == file)
+ return false;
+
+ bool ret = true;
+ if (ERROR_ALREADY_EXISTS != GetLastError()) {
+ *file_created = true;
+ ret = CreateBackingStore(file);
+ } else {
+ *file_created = false;
+ }
+
+ CloseHandle(file);
+ if (!ret)
+ return false;
+
+ index_ = new MappedFile();
+ data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
+ return true;
+}
+
+// We just created a new file so we're going to write the header and set the
+// file length to include the hash table (zero filled).
+bool BackendImpl::CreateBackingStore(HANDLE file) {
+ AdjustMaxCacheSize(0);
+
+ IndexHeader header;
+ header.table_len = DesiredIndexTableLen(max_size_);
+
+ DWORD actual;
+ if (!WriteFile(file, &header, sizeof(header), &actual, NULL) ||
+ sizeof(header) != actual)
+ return false;
+
+ LONG size = static_cast<LONG>(GetIndexSize(header.table_len));
+
+ if (INVALID_SET_FILE_POINTER == SetFilePointer(file, size, NULL, FILE_BEGIN))
+ return false;
+
+ if (!SetEndOfFile(file))
+ return false;
+
+ return true;
+}
+
+bool BackendImpl::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ max_size_ = max_bytes;
+ return true;
+}
+
+int32 BackendImpl::GetEntryCount() const {
+ if (!index_)
+ return 0;
+ return data_->header.num_entries;
+}
+
+bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) {
+ if (disabled_)
+ return false;
+
+ uint32 hash = Hash(key);
+
+ EntryImpl* cache_entry = MatchEntry(key, hash, false);
+ if (!cache_entry) {
+ stats_.OnEvent(Stats::OPEN_MISS);
+ return false;
+ }
+
+ DCHECK(entry);
+ *entry = cache_entry;
+
+ stats_.OnEvent(Stats::OPEN_HIT);
+ return true;
+}
+
+bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) {
+ if (disabled_ || key.empty())
+ return false;
+
+ uint32 hash = Hash(key);
+
+ scoped_refptr<EntryImpl> parent;
+ Addr entry_address(data_->table[hash & mask_]);
+ if (entry_address.is_initialized()) {
+ EntryImpl* parent_entry = MatchEntry(key, hash, true);
+ if (!parent_entry) {
+ stats_.OnEvent(Stats::CREATE_MISS);
+ Trace("create entry miss ");
+ return false;
+ }
+ parent.swap(&parent_entry);
+ }
+
+ int num_blocks;
+ size_t key1_len = sizeof(EntryStore) - offsetof(EntryStore, key);
+ if (key.size() < key1_len || key.size() > kMaxInternalKeyLength)
+ num_blocks = 1;
+ else
+ num_blocks = static_cast<int>((key.size() - key1_len) / 256 + 2);
+
+ if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return false;
+ }
+
+ Addr node_address(0);
+ if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
+ block_files_.DeleteBlock(entry_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return false;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, entry_address));
+ IncreaseNumRefs();
+
+ if (!cache_entry->CreateEntry(node_address, key, hash)) {
+ block_files_.DeleteBlock(entry_address, false);
+ block_files_.DeleteBlock(node_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return false;
+ }
+
+ if (parent.get())
+ parent->SetNextAddress(entry_address);
+
+ block_files_.GetFile(entry_address)->Store(cache_entry->entry());
+ block_files_.GetFile(node_address)->Store(cache_entry->rankings());
+
+ data_->header.num_entries++;
+ DCHECK(data_->header.num_entries > 0);
+ rankings_.Insert(cache_entry->rankings(), true);
+ if (!parent.get())
+ data_->table[hash & mask_] = entry_address.value();
+
+ DCHECK(entry);
+ *entry = NULL;
+ cache_entry.swap(reinterpret_cast<EntryImpl**>(entry));
+
+ stats_.OnEvent(Stats::CREATE_HIT);
+ Trace("create entry hit ");
+ return true;
+}
+
+EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
+ bool find_parent) {
+ Addr address(data_->table[hash & mask_]);
+ EntryImpl* cache_entry = NULL;
+ EntryImpl* parent_entry = NULL;
+ bool found = false;
+
+ for (;;) {
+ if (disabled_)
+ break;
+
+ if (!address.is_initialized()) {
+ if (find_parent)
+ found = true;
+ break;
+ }
+
+ bool dirty;
+ int error = NewEntry(address, &cache_entry, &dirty);
+
+ if (error || dirty) {
+ // This entry is dirty on disk (it was not properly closed): we cannot
+ // trust it.
+ Addr child(0);
+ if (!error)
+ child.set_value(cache_entry->GetNextAddress());
+
+ if (parent_entry) {
+ parent_entry->SetNextAddress(child);
+ parent_entry->Release();
+ parent_entry = NULL;
+ } else {
+ data_->table[hash & mask_] = child.value();
+ }
+
+ if (!error) {
+ // It is important to call DestroyInvalidEntry after removing this
+ // entry from the table.
+ DestroyInvalidEntry(address, cache_entry);
+ cache_entry->Release();
+ cache_entry = NULL;
+ } else {
+ Trace("NewEntry failed on MatchEntry 0x%x", address.value());
+ }
+
+ // Restart the search.
+ address.set_value(data_->table[hash & mask_]);
+ continue;
+ }
+
+ if (cache_entry->IsSameEntry(key, hash)) {
+ cache_entry = EntryImpl::Update(cache_entry);
+ found = true;
+ break;
+ }
+ cache_entry = EntryImpl::Update(cache_entry);
+ if (parent_entry)
+ parent_entry->Release();
+ parent_entry = cache_entry;
+ cache_entry = NULL;
+ if (!parent_entry)
+ break;
+
+ address.set_value(parent_entry->GetNextAddress());
+ }
+
+ if (parent_entry && (!find_parent || !found)) {
+ parent_entry->Release();
+ parent_entry = NULL;
+ }
+
+ if (cache_entry && (find_parent || !found)) {
+ cache_entry->Release();
+ cache_entry = NULL;
+ }
+
+ return find_parent ? parent_entry : cache_entry;
+}
+
+bool BackendImpl::DoomEntry(const std::string& key) {
+ if (disabled_)
+ return false;
+
+ EntryImpl* entry;
+ if (!OpenEntry(key, reinterpret_cast<Entry**>(&entry)))
+ return false;
+
+ entry->Doom();
+ entry->Release();
+ return true;
+}
+
+void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
+ uint32 hash = entry->GetHash();
+ std::string key = entry->GetKey();
+ EntryImpl* parent_entry = MatchEntry(key, hash, true);
+ CacheAddr child(entry->GetNextAddress());
+
+ Trace("Doom entry 0x%p", entry);
+
+ rankings_.Remove(entry->rankings());
+
+ entry->InternalDoom();
+
+ if (parent_entry) {
+ parent_entry->SetNextAddress(Addr(child));
+ parent_entry->Release();
+ } else {
+ data_->table[hash & mask_] = child;
+ }
+
+ data_->header.num_entries--;
+ DCHECK(data_->header.num_entries >= 0);
+ stats_.OnEvent(Stats::DOOM_ENTRY);
+}
+
+bool BackendImpl::DoomAllEntries() {
+ if (!num_refs_) {
+ index_ = NULL;
+ block_files_.CloseFiles();
+ rankings_.Reset();
+ DeleteFiles(path_.c_str(), L"\\f_*");
+ DeleteFiles(path_.c_str(), L"\\data_*");
+
+ std::wstring index(path_);
+ file_util::AppendToPath(&index, kIndexName);
+ DeleteFile(index.c_str());
+ init_ = false;
+ restarted_ = true;
+ return Init();
+ } else {
+ if (disabled_)
+ return false;
+
+ TrimCache(true);
+ stats_.OnEvent(Stats::DOOM_CACHE);
+ return true;
+ }
+}
+
+bool BackendImpl::DoomEntriesBetween(const Time initial_time,
+ const Time end_time) {
+ if (end_time.is_null())
+ return DoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ if (disabled_)
+ return false;
+
+ Entry* node, *next;
+ void* iter = NULL;
+ if (!OpenNextEntry(&iter, &next))
+ return true;
+
+ while (next) {
+ node = next;
+ if (!OpenNextEntry(&iter, &next))
+ next = NULL;
+
+ if (node->GetLastUsed() >= initial_time &&
+ node->GetLastUsed() < end_time) {
+ node->Doom();
+ } else if (node->GetLastUsed() < initial_time) {
+ if (next)
+ next->Close();
+ next = NULL;
+ EndEnumeration(&iter);
+ }
+
+ node->Close();
+ }
+
+ return true;
+}
+
+// We use OpenNextEntry to retrieve elements from the cache, until we get
+// entries that are too old.
+bool BackendImpl::DoomEntriesSince(const Time initial_time) {
+ if (disabled_)
+ return false;
+
+ for (;;) {
+ Entry* entry;
+ void* iter = NULL;
+ if (!OpenNextEntry(&iter, &entry))
+ return true;
+
+ if (initial_time > entry->GetLastUsed()) {
+ entry->Close();
+ EndEnumeration(&iter);
+ return true;
+ }
+
+ entry->Doom();
+ entry->Close();
+ EndEnumeration(&iter); // Dooming the entry invalidates the iterator.
+ }
+}
+
+bool BackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
+ if (disabled_)
+ return false;
+
+ Rankings::ScopedRankingsBlock rankings(&rankings_,
+ reinterpret_cast<CacheRankingsBlock*>(*iter));
+ Rankings::ScopedRankingsBlock next(&rankings_,
+ rankings_.GetNext(rankings.get()));
+ *next_entry = NULL;
+ *iter = NULL;
+ if (!next.get())
+ return false;
+
+ scoped_refptr<EntryImpl> entry;
+ if (next->Data()->pointer) {
+ entry = reinterpret_cast<EntryImpl*>(next->Data()->pointer);
+ } else {
+ bool dirty;
+ EntryImpl* temp = NULL;
+ if (NewEntry(Addr(next->Data()->contents), &temp, &dirty))
+ return false;
+ entry.swap(&temp);
+
+ if (dirty) {
+ // We cannot trust this entry. Call MatchEntry to go through the regular
+ // path and take the appropriate action.
+ std::string key = entry->GetKey();
+ uint32 hash = entry->GetHash();
+ entry = NULL; // Release the entry.
+ temp = MatchEntry(key, hash, false);
+ if (temp)
+ temp->Release();
+
+ return false;
+ }
+
+ entry.swap(&temp);
+ temp = EntryImpl::Update(temp); // Update returns an adref'd entry.
+ entry.swap(&temp);
+ if (!entry.get())
+ return false;
+ }
+
+ entry.swap(reinterpret_cast<EntryImpl**>(next_entry));
+ *iter = next.release();
+ return true;
+}
+
+void BackendImpl::EndEnumeration(void** iter) {
+ Rankings::ScopedRankingsBlock rankings(&rankings_,
+ reinterpret_cast<CacheRankingsBlock*>(*iter));
+ *iter = NULL;
+}
+
+void BackendImpl::GetStats(StatsItems* stats) {
+ if (disabled_)
+ return;
+
+ std::pair<std::string, std::string> item;
+
+ item.first = "Entries";
+ item.second = StringPrintf("%d", data_->header.num_entries);
+ stats->push_back(item);
+
+ item.first = "Pending IO";
+ item.second = StringPrintf("%d", num_pending_io_);
+ stats->push_back(item);
+
+ item.first = "Max size";
+ item.second = StringPrintf("%d", max_size_);
+ stats->push_back(item);
+
+ item.first = "Current size";
+ item.second = StringPrintf("%d", data_->header.num_bytes);
+ stats->push_back(item);
+
+ stats_.GetItems(stats);
+}
+
+void BackendImpl::TrimCache(bool empty) {
+ Trace("*** Trim Cache ***");
+ if (disabled_)
+ return;
+
+ Rankings::ScopedRankingsBlock node(&rankings_);
+ Rankings::ScopedRankingsBlock next(&rankings_, rankings_.GetPrev(node.get()));
+ DCHECK(next.get());
+ int target_size = empty ? 0 : LowWaterAdjust(max_size_);
+ while (data_->header.num_bytes > target_size && next.get()) {
+ node.reset(next.release());
+ next.reset(rankings_.GetPrev(node.get()));
+ if (!node->Data()->pointer || empty) {
+ // This entry is not being used by anybody.
+ EntryImpl* entry;
+ bool dirty;
+ if (NewEntry(Addr(node->Data()->contents), &entry, &dirty)) {
+ Trace("NewEntry failed on Trim 0x%x", node->address().value());
+ continue;
+ }
+
+ if (node->Data()->pointer) {
+ entry = EntryImpl::Update(entry);
+ }
+ entry->Doom();
+ entry->Release();
+ if (!empty)
+ stats_.OnEvent(Stats::TRIM_ENTRY);
+ }
+ }
+
+ Trace("*** Trim Cache end ***");
+ return;
+}
+
+void BackendImpl::DestroyInvalidEntry(Addr address, EntryImpl* entry) {
+ LOG(WARNING) << "Destroying invalid entry.";
+ Trace("Destroying invalid entry 0x%p", entry);
+
+ rankings_.Remove(entry->rankings());
+ entry->SetPointerForInvalidEntry(GetCurrentEntryId());
+
+ entry->InternalDoom();
+
+ data_->header.num_entries--;
+ DCHECK(data_->header.num_entries >= 0);
+ stats_.OnEvent(Stats::INVALID_ENTRY);
+}
+
+int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) {
+ scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, address));
+ IncreaseNumRefs();
+ *entry = NULL;
+
+ if (!address.is_initialized() || address.is_separate_file() ||
+ address.file_type() != BLOCK_256) {
+ LOG(WARNING) << "Wrong entry address.";
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (!cache_entry->entry()->Load())
+ return ERR_READ_FAILURE;
+
+ if (!cache_entry->SanityCheck()) {
+ LOG(WARNING) << "Messed up entry found.";
+ return ERR_INVALID_ENTRY;
+ }
+
+ if (!cache_entry->LoadNodeAddress())
+ return ERR_READ_FAILURE;
+
+ *dirty = cache_entry->IsDirty(GetCurrentEntryId());
+
+ // Prevent overwriting the dirty flag on the destructor.
+ cache_entry->ClearDirtyFlag();
+
+ if (!rankings_.SanityCheck(cache_entry->rankings(), false))
+ return ERR_INVALID_LINKS;
+
+ cache_entry.swap(entry);
+ return 0;
+}
+
+bool BackendImpl::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ return block_files_.CreateBlock(block_type, block_count, block_address);
+}
+
+void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
+ block_files_.DeleteBlock(block_address, deep);
+}
+
+void BackendImpl::CacheEntryDestroyed() {
+ DecreaseNumRefs();
+}
+
+void BackendImpl::AddStorageSize(int32 bytes) {
+ data_->header.num_bytes += bytes;
+ DCHECK(data_->header.num_bytes >= 0);
+
+ if (data_->header.num_bytes > max_size_)
+ TrimCache(false);
+}
+
+void BackendImpl::SubstractStorageSize(int32 bytes) {
+ data_->header.num_bytes -= bytes;
+ DCHECK(data_->header.num_bytes >= 0);
+}
+
+std::wstring BackendImpl::GetFileName(Addr address) const {
+ if (!address.is_separate_file() || !address.is_initialized()) {
+ NOTREACHED();
+ return std::wstring();
+ }
+
+ std::wstring name = StringPrintf(L"%s\\f_%06x", path_.c_str(),
+ address.FileNumber());
+ return name;
+}
+
+bool BackendImpl::CreateExternalFile(Addr* address) {
+ int file_number = data_->header.last_file + 1;
+ Addr file_address(0);
+ bool success = false;
+ for (int i = 0; (i < 0x0fffffff) && !success; i++) {
+ if (!file_address.SetFileNumber(file_number)) {
+ file_number = 1;
+ continue;
+ }
+ std::wstring name = GetFileName(file_address);
+ ScopedHandle file(CreateFile(name.c_str(), GENERIC_WRITE | GENERIC_READ,
+ FILE_SHARE_READ, NULL, CREATE_ALWAYS, 0,
+ NULL));
+ if (!file.IsValid())
+ continue;
+
+ success = true;
+ }
+
+ DCHECK(success);
+ if (!success)
+ return false;
+
+ data_->header.last_file = file_number;
+ address->set_value(file_address.value());
+ return true;
+}
+
+int BackendImpl::SelfCheck() {
+ if (!init_) {
+ LOG(ERROR) << "Init failed";
+ return ERR_INIT_FAILED;
+ }
+
+ int num_entries = rankings_.SelfCheck();
+ if (num_entries < 0) {
+ LOG(ERROR) << "Invalid rankings list, error " << num_entries;
+ return num_entries;
+ }
+
+ if (num_entries != data_->header.num_entries) {
+ LOG(ERROR) << "Number of entries mismatch";
+ return ERR_NUM_ENTRIES_MISMATCH;
+ }
+
+ return CheckAllEntries();
+}
+
+void BackendImpl::CriticalError(int error) {
+ LOG(ERROR) << "Critical error found " << error;
+ if (disabled_)
+ return;
+
+ LogStats();
+
+ // Setting the index table length to an invalid value will force re-creation
+ // of the cache files.
+ data_->header.table_len = 1;
+ disabled_ = true;
+
+ if (!num_refs_)
+ RestartCache();
+}
+
+bool BackendImpl::CheckIndex() {
+ if (!data_) {
+ LOG(ERROR) << "Unable to map Index file";
+ return false;
+ }
+
+ size_t current_size = index_->GetLength();
+ if (current_size < sizeof(Index)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion != data_->header.version) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+
+ if (data_->header.table_len) {
+ if (current_size < GetIndexSize(data_->header.table_len) ||
+ data_->header.table_len & (kBaseTableLen - 1)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ AdjustMaxCacheSize(data_->header.table_len);
+ } else {
+ max_size_ = kDefaultCacheSize;
+ }
+
+ if (data_->header.num_bytes < 0 ||
+ data_->header.num_bytes > max_size_ * 11 / 10) {
+ LOG(ERROR) << "Invalid cache (current) size";
+ return false;
+ }
+
+ if (data_->header.num_entries < 0) {
+ LOG(ERROR) << "Invalid number of entries";
+ return false;
+ }
+
+ if (!mask_)
+ mask_ = DesiredIndexTableLen(max_size_) - 1;
+
+ return true;
+}
+
+int BackendImpl::CheckAllEntries() {
+ int num_dirty = 0;
+ int num_entries = 0;
+ DCHECK(mask_ < kuint32max);
+ for (int i = 0; i <= static_cast<int>(mask_); i++) {
+ Addr address(data_->table[i]);
+ if (!address.is_initialized())
+ continue;
+ for (;;) {
+ bool dirty;
+ EntryImpl* tmp;
+ int ret = NewEntry(address, &tmp, &dirty);
+ if (ret)
+ return ret;
+ scoped_refptr<EntryImpl> cache_entry;
+ cache_entry.swap(&tmp);
+
+ if (dirty)
+ num_dirty++;
+ else if (CheckEntry(cache_entry.get()))
+ num_entries++;
+ else
+ return ERR_INVALID_ENTRY;
+
+ address.set_value(cache_entry->GetNextAddress());
+ if (!address.is_initialized())
+ break;
+ }
+ }
+
+ if (num_entries + num_dirty != data_->header.num_entries) {
+ LOG(ERROR) << "Number of entries mismatch";
+ return ERR_NUM_ENTRIES_MISMATCH;
+ }
+
+ return num_dirty;
+}
+
+bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
+ RankingsNode* rankings = cache_entry->rankings()->Data();
+ return !rankings->pointer;
+}
+
+void BackendImpl::LogStats() {
+ StatsItems stats;
+ GetStats(&stats);
+
+ for (size_t index = 0; index < stats.size(); index++) {
+ LOG(INFO) << stats[index].first << ": " << stats[index].second;
+ }
+}
+
+void BackendImpl::RestartCache() {
+ index_ = NULL;
+ block_files_.CloseFiles();
+ rankings_.Reset();
+
+ DelayedCacheCleanup(path_);
+
+ init_ = false;
+ restarted_ = true;
+ int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
+ if (Init())
+ stats_.SetCounter(Stats::FATAL_ERROR, errors + 1);
+}
+
+void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
+ Addr address(rankings->Data()->contents);
+ EntryImpl* cache_entry = NULL;
+ bool dirty;
+ if (NewEntry(address, &cache_entry, &dirty))
+ return;
+
+ uint32 hash = cache_entry->GetHash();
+ cache_entry->Release();
+
+ // Anything on the table means that this entry is there.
+ if (data_->table[hash & mask_])
+ return;
+
+ data_->table[hash & mask_] = address.value();
+}
+
+void BackendImpl::UpdateRank(CacheRankingsBlock* node, bool modified) {
+ rankings_.UpdateRank(node, modified);
+}
+
+void BackendImpl::IncrementIoCount() {
+ num_pending_io_++;
+}
+
+void BackendImpl::DecrementIoCount() {
+ num_pending_io_--;
+}
+
+int32 BackendImpl::GetCurrentEntryId() {
+ return data_->header.this_id;
+}
+
+void BackendImpl::ClearRefCountForTest() {
+ num_refs_ = 0;
+}
+
+void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (disabled_)
+ return;
+ if (old_size > new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+
+ // Update the usage statistics.
+ stats_.ModifyStorageStats(old_size, new_size);
+}
+
+void BackendImpl::OnEvent(Stats::Counters an_event) {
+ stats_.OnEvent(an_event);
+}
+
+void BackendImpl::TooMuchStorageRequested(int32 size) {
+ stats_.ModifyStorageStats(0, size);
+}
+
+int BackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+void BackendImpl::OnStatsTimer() {
+ stats_.OnEvent(Stats::TIMER);
+ int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
+ int64 time = stats_.GetCounter(Stats::TIMER);
+
+ current = current * (time - 1) + num_refs_;
+ current /= time;
+ stats_.SetCounter(Stats::OPEN_ENTRIES, current);
+ stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
+}
+
+void BackendImpl::IncreaseNumRefs() {
+ num_refs_++;
+ if (max_refs_ < num_refs_)
+ max_refs_ = num_refs_;
+}
+
+void BackendImpl::DecreaseNumRefs() {
+ DCHECK(num_refs_);
+ num_refs_--;
+
+ if (!num_refs_ && disabled_)
+ RestartCache();
+}
+
+void BackendImpl::SetUnitTestMode() {
+ unit_test_ = true;
+}
+
+void BackendImpl::AdjustMaxCacheSize(int table_len) {
+ if (max_size_)
+ return;
+
+ // The user is not setting the size, let's figure it out.
+ ULARGE_INTEGER available, total, free;
+ if (!GetDiskFreeSpaceExW(path_.c_str(), &available, &total, &free)) {
+ max_size_ = kDefaultCacheSize;
+ return;
+ }
+
+ // Attempt to use 1% of the disk available for this user.
+ available.QuadPart /= 100;
+
+ if (available.QuadPart < static_cast<uint32>(kDefaultCacheSize))
+ max_size_ = kDefaultCacheSize;
+ else if (available.QuadPart > static_cast<uint32>(kint32max))
+ max_size_ = kint32max;
+ else
+ max_size_ = static_cast<int32>(available.LowPart);
+
+ // Let's not use more than the default size while we tune-up the performance
+ // of bigger caches. TODO(rvargas): remove this limit.
+ if (max_size_ > kDefaultCacheSize)
+ max_size_ = kDefaultCacheSize;
+
+ if (!table_len)
+ return;
+
+ // If we already have a table, adjust the size to it.
+ int current_max_size = MaxStorageSizeForTable(table_len);
+ if (max_size_ > current_max_size)
+ max_size_= current_max_size;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
new file mode 100644
index 0000000..b891487
--- /dev/null
+++ b/net/disk_cache/backend_impl.h
@@ -0,0 +1,218 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_BACKEND_IMPL_H__
+#define NET_DISK_CACHE_BACKEND_IMPL_H__
+
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/rankings.h"
+#include "net/disk_cache/stats.h"
+#include "net/disk_cache/trace.h"
+
+class Timer;
+
+namespace disk_cache {
+
+// This class implements the Backend interface. An object of this
+// class handles the operations of the cache for a particular profile.
+class BackendImpl : public Backend {
+ public:
+ explicit BackendImpl(const std::wstring& path)
+ : path_(path), init_(false), mask_(0), block_files_(path),
+ unit_test_(false), restarted_(false), max_size_(0) {}
+ // mask can be used to limit the usable size of the hash table, for testing.
+ BackendImpl(const std::wstring& path, uint32 mask)
+ : path_(path), init_(false), mask_(mask), block_files_(path),
+ unit_test_(false), restarted_(false), max_size_(0) {}
+ ~BackendImpl();
+
+ // Performs general initialization for this current instance of the cache.
+ bool Init();
+
+ // Backend interface.
+ virtual int32 GetEntryCount() const;
+ virtual bool OpenEntry(const std::string& key, Entry** entry);
+ virtual bool CreateEntry(const std::string& key, Entry** entry);
+ virtual bool DoomEntry(const std::string& key);
+ virtual bool DoomAllEntries();
+ virtual bool DoomEntriesBetween(const Time initial_time,
+ const Time end_time);
+ virtual bool DoomEntriesSince(const Time initial_time);
+ virtual bool OpenNextEntry(void** iter, Entry** next_entry);
+ virtual void EndEnumeration(void** iter);
+ virtual void GetStats(StatsItems* stats);
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Returns the actual file used to store a given (non-external) address.
+ MappedFile* File(Addr address) {
+ if (disabled_)
+ return NULL;
+ return block_files_.GetFile(address);
+ }
+
+ // Creates a new storage block of size block_count.
+ bool CreateBlock(FileType block_type, int block_count,
+ Addr* block_address);
+
+ // Deletes a given storage block. deep set to true can be used to zero-fill
+ // the related storage in addition of releasing the related block.
+ void DeleteBlock(Addr block_address, bool deep);
+
+ // Permanently deletes an entry.
+ void InternalDoomEntry(EntryImpl* entry);
+
+ // Returns the full name for an external storage file.
+ std::wstring GetFileName(Addr address) const;
+
+ // Creates an external storage file.
+ bool CreateExternalFile(Addr* address);
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(CacheRankingsBlock* node, bool modified);
+
+ // This method must be called whenever an entry is released for the last time.
+ void CacheEntryDestroyed();
+
+ // Handles the pending asynchronous IO count.
+ void IncrementIoCount();
+ void DecrementIoCount();
+
+ // Returns the id being used on this run of the cache.
+ int32 GetCurrentEntryId();
+
+ // A node was recovered from a crash, it may not be on the index, so this
+ // method checks it and takes the appropriate action.
+ void RecoveredEntry(CacheRankingsBlock* rankings);
+
+ // Clears the counter of references to test handling of corruptions.
+ void ClearRefCountForTest();
+
+ // Sets internal parameters to enable unit testing mode.
+ void SetUnitTestMode();
+
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32 old_size, int32 new_size);
+
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
+
+ // Logs requests that are denied due to being too big.
+ void TooMuchStorageRequested(int32 size);
+
+ // Called when an interesting event should be logged (counted).
+ void OnEvent(Stats::Counters an_event);
+
+ // Timer callback to calculate usage statistics.
+ void OnStatsTimer();
+
+ // Peforms a simple self-check, and returns the number of dirty items
+ // or an error code (negative value).
+ int SelfCheck();
+
+ // Reports a critical error (and disables the cache).
+ void CriticalError(int error);
+
+ private:
+ // Creates a new backing file for the cache index.
+ bool CreateBackingStore(HANDLE file);
+ bool InitBackingStore(bool* file_created);
+
+ // Returns a given entry from the cache. The entry to match is determined by
+ // key and hash, and the returned entry may be the matched one or it's parent
+ // on the list of entries with the same hash (or bucket).
+ EntryImpl* MatchEntry(const std::string& key, uint32 hash,
+ bool find_parent);
+
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
+
+ void DestroyInvalidEntry(Addr address, EntryImpl* entry);
+
+ // Creates a new entry object and checks to see if it is dirty. Returns zero
+ // on success, or a disk_cache error on failure.
+ int NewEntry(Addr address, EntryImpl** entry, bool* dirty);
+
+ // Part of the selt test. Returns the number or dirty entries, or an error.
+ int CheckAllEntries();
+
+ // Part of the self test. Returns false if the entry is corrupt.
+ bool CheckEntry(EntryImpl* cache_entry);
+
+ // Performs basic checks on the index file. Returns false on failure.
+ bool CheckIndex();
+
+ // Dumps current cache statistics to the log.
+ void LogStats();
+
+ // Deletes the cache and starts again.
+ void RestartCache();
+
+ // Handles the used storage count.
+ void AddStorageSize(int32 bytes);
+ void SubstractStorageSize(int32 bytes);
+
+ // Update the number of referenced cache entries.
+ void IncreaseNumRefs();
+ void DecreaseNumRefs();
+
+ void AdjustMaxCacheSize(int table_len);
+
+ scoped_refptr<MappedFile> index_; // The main cache index.
+ std::wstring path_; // Path to the folder used as backing storage.
+ Index* data_; // Pointer to the index data.
+ BlockFiles block_files_; // Set of files used to store all data.
+ Rankings rankings_; // Rankings to be able to trim the cache.
+ uint32 mask_; // Binary mask to map a hash to the hash table.
+ int32 max_size_; // Maximum data size for this instance.
+ int num_refs_; // Number of referenced cache entries.
+ int max_refs_; // Max number of eferenced cache entries.
+ int num_pending_io_; // Number of pending IO operations;
+ bool init_; // controls the initialization of the system.
+ bool restarted_;
+ bool unit_test_;
+ bool disabled_;
+
+ Stats stats_; // Usage statistcs.
+ Task* timer_task_;
+ Timer* timer_; // Usage timer.
+ TraceObject trace_object_; // Inits and destroys internal tracing.
+
+ DISALLOW_EVIL_CONSTRUCTORS(BackendImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BACKEND_IMPL_H__
diff --git a/net/disk_cache/backend_unittest.cc b/net/disk_cache/backend_unittest.cc
new file mode 100644
index 0000000..0514758
--- /dev/null
+++ b/net/disk_cache/backend_unittest.cc
@@ -0,0 +1,944 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "base/file_util.h"
+#include "base/path_service.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/mapped_file.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Copies a set of cache files from the data folder to the test folder.
+bool CopyTestCache(const std::wstring& name) {
+ std::wstring path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &path);
+ file_util::AppendToPath(&path, L"net");
+ file_util::AppendToPath(&path, L"data");
+ file_util::AppendToPath(&path, L"cache_tests");
+ file_util::AppendToPath(&path, name);
+
+ std::wstring dest = GetCachePath();
+ if (!DeleteCache(dest.c_str()))
+ return false;
+ return file_util::CopyDirectory(path, dest, false);
+}
+
+// Verifies that we can recover a transaction (insert or remove on the rankings
+// list) that is interrupted.
+int TestTransaction(const std::wstring& name, int num_entries, bool load) {
+ if (!CopyTestCache(name))
+ return 1;
+ std::wstring path = GetCachePath();
+ scoped_ptr<disk_cache::Backend> cache;
+
+ if (!load) {
+ cache.reset(disk_cache::CreateCacheBackend(path, false, 0));
+ } else {
+ disk_cache::BackendImpl* cache2 = new disk_cache::BackendImpl(path, 0xf);
+ if (!cache2 || !cache2->SetMaxSize(0x100000) || !cache2->Init())
+ return 2;
+ cache.reset(cache2);
+ }
+ if (!cache.get())
+ return 2;
+
+ if (num_entries + 1 != cache->GetEntryCount())
+ return 3;
+
+ std::string key("the first key");
+ disk_cache::Entry* entry1;
+ if (cache->OpenEntry(key, &entry1))
+ return 4;
+
+ int actual = cache->GetEntryCount();
+ if (num_entries != actual) {
+ if (!load)
+ return 5;
+ // If there is a heavy load, inserting an entry will make another entry
+ // dirty (on the hash bucket) so two entries are removed.
+ if (actual != num_entries - 1)
+ return 5;
+ }
+
+ cache.reset();
+
+ if (!CheckCacheIntegrity(path))
+ return 6;
+
+ return 0;
+}
+
+} // namespace
+
+// Tests that can run with different types of caches.
+class DiskCacheBackendTest : public DiskCacheTestBase {
+ protected:
+ void BackendBasics();
+ void BackendSetSize();
+ void BackendLoad();
+ void BackendKeying();
+ void BackendEnumerations();
+ void BackendDoomRecent();
+ void BackendDoomBetween();
+ void BackendDoomAll();
+};
+
+void DiskCacheBackendTest::BackendBasics() {
+ disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
+ EXPECT_FALSE(cache_->OpenEntry("the first key", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ ASSERT_TRUE(cache_->OpenEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ EXPECT_FALSE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(cache_->OpenEntry("the first key", &entry1));
+ EXPECT_FALSE(cache_->OpenEntry("some other key", &entry2));
+ ASSERT_TRUE(cache_->CreateEntry("some other key", &entry2));
+ ASSERT_TRUE(NULL != entry1);
+ ASSERT_TRUE(NULL != entry2);
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ disk_cache::Entry* entry3 = NULL;
+ ASSERT_TRUE(cache_->OpenEntry("some other key", &entry3));
+ ASSERT_TRUE(NULL != entry3);
+ EXPECT_TRUE(entry2 == entry3);
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ EXPECT_TRUE(cache_->DoomEntry("some other key"));
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ entry1->Close();
+ entry2->Close();
+ entry3->Close();
+
+ EXPECT_TRUE(cache_->DoomEntry("the first key"));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("some other key", &entry2));
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_TRUE(cache_->DoomEntry("some other key"));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ entry2->Close();
+}
+
+TEST_F(DiskCacheBackendTest, Basics) {
+ InitCache();
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BackendBasics();
+}
+
+void DiskCacheBackendTest::BackendKeying() {
+ const char* kName1 = "the first key";
+ const char* kName2 = "the first Key";
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry(kName1, &entry1));
+
+ ASSERT_TRUE(cache_->CreateEntry(kName2, &entry2));
+ EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
+ entry2->Close();
+
+ char buffer[30];
+ EXPECT_EQ(0, strcpy_s(buffer, kName1));
+ ASSERT_TRUE(cache_->OpenEntry(buffer, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ EXPECT_EQ(0, strcpy_s(buffer + 1, sizeof(buffer) - 1 , kName1));
+ ASSERT_TRUE(cache_->OpenEntry(buffer + 1, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ EXPECT_EQ(0, strcpy_s(buffer + 3, sizeof(buffer) - 3, kName1));
+ ASSERT_TRUE(cache_->OpenEntry(buffer + 3, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ // Now verify long keys.
+ char buffer2[20000];
+ memset(buffer2, 's', sizeof(buffer2));
+ buffer2[1023] = '\0';
+ ASSERT_TRUE(cache_->CreateEntry(buffer2, &entry2)) << "key on block file";
+ entry2->Close();
+
+ buffer2[1023] = 'g';
+ buffer2[19999] = '\0';
+ ASSERT_TRUE(cache_->CreateEntry(buffer2, &entry2)) << "key on external file";
+ entry2->Close();
+ entry1->Close();
+}
+
+TEST_F(DiskCacheBackendTest, Keying) {
+ InitCache();
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BackendKeying();
+}
+
+void DiskCacheBackendTest::BackendSetSize() {
+ SetDirectMode();
+ const int cache_size = 0x10000; // 64 kB
+ SetMaxSize(cache_size);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(first, &entry));
+
+ char buffer[cache_size] = {0};
+ EXPECT_EQ(cache_size / 10, entry->WriteData(0, 0, buffer, cache_size / 10,
+ NULL, false)) << "normal file";
+
+ EXPECT_EQ(net::ERR_FAILED, entry->WriteData(1, 0, buffer, cache_size / 5,
+ NULL, false)) << "file size above the limit";
+
+ // By doubling the total size, we make this file cacheable.
+ SetMaxSize(cache_size * 2);
+ EXPECT_EQ(cache_size / 5, entry->WriteData(1, 0, buffer, cache_size / 5,
+ NULL, false));
+
+ // Let's fill up the cache!.
+ SetMaxSize(cache_size * 10);
+ EXPECT_EQ(cache_size * 3 / 4, entry->WriteData(0, 0, buffer,
+ cache_size * 3 / 4, NULL, false));
+ entry->Close();
+
+ SetMaxSize(cache_size);
+
+ // Verify that the cache is 95% full.
+ ASSERT_TRUE(cache_->OpenEntry(first, &entry));
+ EXPECT_EQ(cache_size * 3 / 4, entry->GetDataSize(0));
+ EXPECT_EQ(cache_size / 5, entry->GetDataSize(1));
+ entry->Close();
+
+ ASSERT_TRUE(cache_->CreateEntry(second, &entry));
+ EXPECT_EQ(cache_size / 10, entry->WriteData(0, 0, buffer, cache_size / 10,
+ NULL, false)) << "trim the cache";
+ entry->Close();
+
+ EXPECT_FALSE(cache_->OpenEntry(first, &entry));
+ ASSERT_TRUE(cache_->OpenEntry(second, &entry));
+ EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, SetSize) {
+ BackendSetSize();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
+ SetMemoryOnlyMode();
+ BackendSetSize();
+}
+
+void DiskCacheBackendTest::BackendLoad() {
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ disk_cache::Entry* entries[100];
+ for (int i = 0; i < 100; i++) {
+ std::string key = GenerateKey(true);
+ ASSERT_TRUE(cache_->CreateEntry(key, &entries[i]));
+ }
+ EXPECT_EQ(100, cache_->GetEntryCount());
+
+ for (int i = 0; i < 100; i++) {
+ int source1 = rand() % 100;
+ int source2 = rand() % 100;
+ disk_cache::Entry* temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ for (int i = 0; i < 100; i++) {
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->OpenEntry(entries[i]->GetKey(), &entry));
+ EXPECT_TRUE(entry == entries[i]);
+ entry->Close();
+ entries[i]->Doom();
+ entries[i]->Close();
+ }
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, Load) {
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ InitCache();
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
+ // Work with a tiny index table (16 entries)
+ SetMaxSize(0x100000);
+ SetMemoryOnlyMode();
+ InitCache();
+ BackendLoad();
+}
+
+// Before looking for invalid entries, let's check a valid entry.
+TEST_F(DiskCacheBackendTest, ValidEntry) {
+ SetDirectMode();
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry1));
+
+ char data[] = "And the data to save";
+ EXPECT_EQ(sizeof(data), entry1->WriteData(0, 0, data, sizeof(data), NULL,
+ false));
+ entry1->Close();
+ SimulateCrash();
+
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry1));
+
+ char buffer[40];
+ memset(buffer, 0, sizeof(buffer));
+ EXPECT_EQ(sizeof(data), entry1->ReadData(0, 0, buffer, sizeof(data), NULL));
+ entry1->Close();
+ EXPECT_STREQ(data, buffer);
+}
+
+// The same logic of the previous test (ValidEntry), but this time force the
+// entry to be invalid, simulating a crash in the middle.
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntry) {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry1));
+
+ char data[] = "And the data to save";
+ EXPECT_EQ(sizeof(data), entry1->WriteData(0, 0, data, sizeof(data), NULL,
+ false));
+ SimulateCrash();
+
+ EXPECT_FALSE(cache_->OpenEntry(key, &entry1));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+// Almost the same test, but this time crash the cache after reading an entry.
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry1));
+
+ char data[] = "And the data to save";
+ EXPECT_EQ(sizeof(data), entry1->WriteData(0, 0, data, sizeof(data), NULL,
+ false));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry1));
+ EXPECT_EQ(sizeof(data), entry1->ReadData(0, 0, data, sizeof(data), NULL));
+
+ SimulateCrash();
+
+ EXPECT_FALSE(cache_->OpenEntry(key, &entry1));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ InitCache();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 100;
+ disk_cache::Entry* entries[kNumEntries];
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ ASSERT_TRUE(cache_->CreateEntry(key, &entries[i]));
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+
+ for (int i = 0; i < kNumEntries; i++) {
+ int source1 = rand() % kNumEntries;
+ int source2 = rand() % kNumEntries;
+ disk_cache::Entry* temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ std::string keys[kNumEntries];
+ for (int i = 0; i < kNumEntries; i++) {
+ keys[i] = entries[i]->GetKey();
+ if (i < kNumEntries / 2)
+ entries[i]->Close();
+ }
+
+ SimulateCrash();
+
+ for (int i = kNumEntries / 2; i < kNumEntries; i++) {
+ disk_cache::Entry* entry;
+ EXPECT_FALSE(cache_->OpenEntry(keys[i], &entry));
+ }
+
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ disk_cache::Entry* entry;
+ EXPECT_TRUE(cache_->OpenEntry(keys[i], &entry));
+ entry->Close();
+ }
+
+ EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+
+ const int cache_size = 0x4000; // 16 kB
+ SetMaxSize(cache_size * 10);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(first, &entry));
+
+ char buffer[cache_size] = {0};
+ EXPECT_EQ(cache_size * 19 / 20, entry->WriteData(0, 0, buffer,
+ cache_size * 19 / 20, NULL, false));
+
+ // Simulate a crash.
+ SimulateCrash();
+
+ ASSERT_TRUE(cache_->CreateEntry(second, &entry));
+ EXPECT_EQ(cache_size / 10, entry->WriteData(0, 0, buffer, cache_size / 10,
+ NULL, false)) << "trim the cache";
+ entry->Close();
+
+ EXPECT_FALSE(cache_->OpenEntry(first, &entry));
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+void DiskCacheBackendTest::BackendEnumerations() {
+ Time initial = Time::Now();
+ int seed = static_cast<int>(initial.ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 100;
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+ entry->Close();
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+ Time final = Time::Now();
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ int count = 0;
+ Time last_modified[kNumEntries];
+ Time last_used[kNumEntries];
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(NULL != entry);
+ if (count < kNumEntries) {
+ last_modified[count] = entry->GetLastModified();
+ last_used[count] = entry->GetLastUsed();
+ }
+
+ EXPECT_TRUE(initial <= last_modified[count]);
+ EXPECT_TRUE(final >= last_modified[count]);
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(kNumEntries, count);
+
+ iter = NULL;
+ count = 0;
+ // The previous enumeration should not have changed the timestamps.
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(NULL != entry);
+ if (count < kNumEntries) {
+ EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
+ EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
+ }
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(kNumEntries, count);
+}
+
+TEST_F(DiskCacheBackendTest, Enumerations) {
+ InitCache();
+ BackendEnumerations();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BackendEnumerations();
+}
+
+// Verify handling of invalid entries while doing enumerations.
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry *entry, *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry1));
+
+ char data[] = "And the data to save";
+ EXPECT_EQ(sizeof(data), entry1->WriteData(0, 0, data, sizeof(data), NULL,
+ false));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry1));
+ EXPECT_EQ(sizeof(data), entry1->ReadData(0, 0, data, sizeof(data), NULL));
+
+ std::string key2("Another key");
+ ASSERT_TRUE(cache_->CreateEntry(key2, &entry2));
+ entry2->Close();
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ SimulateCrash();
+
+ void* iter = NULL;
+ int count = 0;
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(NULL != entry);
+ EXPECT_EQ(key2, entry->GetKey());
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(1, count);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+// Tests that if for some reason entries are modified close to existing cache
+// iterators, we don't generate fatal errors or reset the cache.
+TEST_F(DiskCacheBackendTest, FixEnumerators) {
+ InitCache();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 10;
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+ entry->Close();
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+
+ disk_cache::Entry *entry1, *entry2;
+ void* iter1 = NULL;
+ void* iter2 = NULL;
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter1, &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ // Let's go to the middle of the list.
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ if (entry1)
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter1, &entry1));
+ ASSERT_TRUE(NULL != entry1);
+
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+ entry2->Close();
+ }
+
+ // Messing up with entry1 will modify entry2->next.
+ entry1->Doom();
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+
+ // The link entry2->entry1 should be broken.
+ EXPECT_NE(entry2->GetKey(), entry1->GetKey());
+ entry1->Close();
+ entry2->Close();
+
+ // And the second iterator should keep working.
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+ entry2->Close();
+
+ cache_->EndEnumeration(&iter1);
+ cache_->EndEnumeration(&iter2);
+}
+
+void DiskCacheBackendTest::BackendDoomRecent() {
+ Time initial = Time::Now();
+
+ disk_cache::Entry *entry;
+ ASSERT_TRUE(cache_->CreateEntry("first", &entry));
+ entry->Close();
+ ASSERT_TRUE(cache_->CreateEntry("second", &entry));
+ entry->Close();
+
+ Sleep(20);
+ Time middle = Time::Now();
+
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry));
+ entry->Close();
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry));
+ entry->Close();
+
+ Sleep(20);
+ Time final = Time::Now();
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomEntriesSince(final));
+ ASSERT_EQ(4, cache_->GetEntryCount());
+
+ EXPECT_TRUE(cache_->DoomEntriesSince(middle));
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ ASSERT_TRUE(cache_->OpenEntry("second", &entry));
+ entry->Close();
+}
+
+void DiskCacheBackendTest::BackendDoomBetween() {
+ Time initial = Time::Now();
+
+ disk_cache::Entry *entry;
+ ASSERT_TRUE(cache_->CreateEntry("first", &entry));
+ entry->Close();
+
+ Sleep(20);
+ Time middle_start = Time::Now();
+
+ ASSERT_TRUE(cache_->CreateEntry("second", &entry));
+ entry->Close();
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry));
+ entry->Close();
+
+ Sleep(20);
+ Time middle_end = Time::Now();
+
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry));
+ entry->Close();
+
+ Sleep(20);
+ Time final = Time::Now();
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomEntriesBetween(middle_start, middle_end));
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ ASSERT_TRUE(cache_->OpenEntry("fourth", &entry));
+ entry->Close();
+
+ EXPECT_TRUE(cache_->DoomEntriesBetween(middle_start, final));
+ ASSERT_EQ(1, cache_->GetEntryCount());
+
+ ASSERT_TRUE(cache_->OpenEntry("first", &entry));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, DoomRecent) {
+ InitCache();
+ BackendDoomRecent();
+}
+
+TEST_F(DiskCacheBackendTest, DoomBetween) {
+ InitCache();
+ BackendDoomBetween();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BackendDoomRecent();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BackendDoomBetween();
+}
+
+TEST(DiskCacheTest, Backend_RecoverInsert) {
+ // Tests with an empty cache.
+ EXPECT_EQ(0, TestTransaction(L"insert_empty1", 0, false));
+ EXPECT_EQ(0, TestTransaction(L"insert_empty2", 0, false));
+ EXPECT_EQ(0, TestTransaction(L"insert_empty3", 0, false));
+
+ // Tests with one entry on the cache.
+ EXPECT_EQ(0, TestTransaction(L"insert_one1", 1, false));
+ EXPECT_EQ(0, TestTransaction(L"insert_one2", 1, false));
+ EXPECT_EQ(0, TestTransaction(L"insert_one3", 1, false));
+
+ // Tests with one hundred entries on the cache, tiny index.
+ EXPECT_EQ(0, TestTransaction(L"insert_load1", 100, true));
+ EXPECT_EQ(0, TestTransaction(L"insert_load2", 100, true));
+}
+
+TEST(DiskCacheTest, Backend_RecoverRemove) {
+ // Removing the only element.
+ EXPECT_EQ(0, TestTransaction(L"remove_one1", 0, false));
+ EXPECT_EQ(0, TestTransaction(L"remove_one2", 0, false));
+ EXPECT_EQ(0, TestTransaction(L"remove_one3", 0, false));
+
+ // Removing the head.
+ EXPECT_EQ(0, TestTransaction(L"remove_head1", 1, false));
+ EXPECT_EQ(0, TestTransaction(L"remove_head2", 1, false));
+ EXPECT_EQ(0, TestTransaction(L"remove_head3", 1, false));
+
+ // Removing the tail.
+ EXPECT_EQ(0, TestTransaction(L"remove_tail1", 1, false));
+ EXPECT_EQ(0, TestTransaction(L"remove_tail2", 1, false));
+ EXPECT_EQ(0, TestTransaction(L"remove_tail3", 1, false));
+
+ // Removing with one hundred entries on the cache, tiny index.
+ EXPECT_EQ(0, TestTransaction(L"remove_load1", 100, true));
+ EXPECT_EQ(0, TestTransaction(L"remove_load2", 100, true));
+ EXPECT_EQ(0, TestTransaction(L"remove_load3", 100, true));
+
+#ifdef NDEBUG
+ // This case cannot be reverted, so it will assert on debug builds.
+ EXPECT_EQ(0, TestTransaction(L"remove_one4", 0, false));
+ EXPECT_EQ(0, TestTransaction(L"remove_head4", 1, false));
+#endif
+}
+
+// Tests dealing with cache files that cannot be recovered.
+TEST(DiskCacheTest, Backend_DeleteOld) {
+ ASSERT_TRUE(CopyTestCache(L"wrong_version"));
+ std::wstring path = GetCachePath();
+ scoped_ptr<disk_cache::Backend> cache;
+ cache.reset(disk_cache::CreateCacheBackend(path, true, 0));
+
+ MessageLoopHelper helper;
+
+ ASSERT_TRUE(NULL != cache.get());
+ ASSERT_EQ(0, cache->GetEntryCount());
+
+ // Wait for a callback that never comes... about 2 secs :). The message loop
+ // has to run to allow destruction of the cleaner thread.
+ helper.WaitUntilCacheIoFinished(1);
+}
+
+// We want to be able to deal with messed up entries on disk.
+TEST(DiskCacheTest, Backend_InvalidEntry) {
+ ASSERT_TRUE(CopyTestCache(L"bad_entry"));
+ std::wstring path = GetCachePath();
+ disk_cache::Backend* cache = disk_cache::CreateCacheBackend(path, false, 0);
+ ASSERT_TRUE(NULL != cache);
+
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache->OpenEntry("the first key", &entry1));
+ EXPECT_FALSE(cache->OpenEntry("some other key", &entry2));
+ entry1->Close();
+
+ // CheckCacheIntegrity will fail at this point.
+ delete cache;
+}
+
+// We want to be able to deal with messed up entries on disk.
+TEST(DiskCacheTest, Backend_InvalidRankings) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ std::wstring path = GetCachePath();
+ disk_cache::Backend* cache = disk_cache::CreateCacheBackend(path, false, 0);
+ ASSERT_TRUE(NULL != cache);
+
+ disk_cache::Entry *entry1, *entry2;
+ EXPECT_FALSE(cache->OpenEntry("the first key", &entry1));
+ ASSERT_TRUE(cache->OpenEntry("some other key", &entry2));
+ entry2->Close();
+
+ // CheckCacheIntegrity will fail at this point.
+ delete cache;
+}
+
+// If the LRU is corrupt, we delete the cache.
+TEST(DiskCacheTest, Backend_InvalidRankings2) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ std::wstring path = GetCachePath();
+ disk_cache::Backend* cache = disk_cache::CreateCacheBackend(path, false, 0);
+ ASSERT_TRUE(NULL != cache);
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ ASSERT_TRUE(cache->OpenNextEntry(&iter, &entry));
+ entry->Close();
+ EXPECT_EQ(2, cache->GetEntryCount());
+
+ EXPECT_FALSE(cache->OpenNextEntry(&iter, &entry));
+ EXPECT_EQ(0, cache->GetEntryCount());
+
+ delete cache;
+ EXPECT_TRUE(CheckCacheIntegrity(path));
+}
+
+// If the LRU is corrupt and we have open entries, we disable the cache.
+TEST(DiskCacheTest, Backend_Disable) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ std::wstring path = GetCachePath();
+ disk_cache::Backend* cache = disk_cache::CreateCacheBackend(path, false, 0);
+ ASSERT_TRUE(NULL != cache);
+
+ disk_cache::Entry *entry1, *entry2;
+ void* iter = NULL;
+ ASSERT_TRUE(cache->OpenNextEntry(&iter, &entry1));
+
+ EXPECT_FALSE(cache->OpenNextEntry(&iter, &entry2));
+ EXPECT_EQ(2, cache->GetEntryCount());
+ EXPECT_FALSE(cache->CreateEntry("Something new", &entry2));
+
+ entry1->Close();
+
+ EXPECT_EQ(0, cache->GetEntryCount());
+
+ delete cache;
+ EXPECT_TRUE(CheckCacheIntegrity(path));
+}
+
+// This is another type of corruption on the LRU; disable the cache.
+TEST(DiskCacheTest, Backend_Disable2) {
+ ASSERT_TRUE(CopyTestCache(L"list_loop"));
+ std::wstring path = GetCachePath();
+ disk_cache::Backend* cache = disk_cache::CreateCacheBackend(path, false, 0);
+ ASSERT_TRUE(NULL != cache);
+
+ EXPECT_EQ(8, cache->GetEntryCount());
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ int count = 0;
+ while (cache->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(NULL != entry);
+ entry->Close();
+ count++;
+ ASSERT_LT(count, 9);
+ };
+
+ EXPECT_EQ(0, cache->GetEntryCount());
+
+ delete cache;
+ EXPECT_TRUE(CheckCacheIntegrity(path));
+}
+
+TEST(DiskCacheTest, Backend_UsageStats) {
+ MessageLoopHelper helper;
+
+ std::wstring path = GetCachePath();
+ ASSERT_TRUE(DeleteCache(path.c_str()));
+ scoped_ptr<disk_cache::BackendImpl> cache;
+ cache.reset(new disk_cache::BackendImpl(path));
+ ASSERT_TRUE(NULL != cache.get());
+ cache->SetUnitTestMode();
+ ASSERT_TRUE(cache->Init());
+
+ // Wait for a callback that never comes... about 2 secs :). The message loop
+ // has to run to allow invocation of the usage timer.
+ helper.WaitUntilCacheIoFinished(1);
+}
+
+void DiskCacheBackendTest::BackendDoomAll() {
+ Time initial = Time::Now();
+
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry("first", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("second", &entry2));
+ entry1->Close();
+ entry2->Close();
+
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry2));
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+
+ disk_cache::Entry *entry3, *entry4;
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry3));
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry4));
+
+ EXPECT_TRUE(cache_->DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+
+ entry1->Close();
+ entry2->Close();
+ entry3->Doom(); // The entry should be already doomed, but this must work.
+ entry3->Close();
+ entry4->Close();
+
+ // Now try with all references released.
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry2));
+ entry1->Close();
+ entry2->Close();
+
+ ASSERT_EQ(2, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DoomAll) {
+ InitCache();
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BackendDoomAll();
+}
diff --git a/net/disk_cache/block_files.cc b/net/disk_cache/block_files.cc
new file mode 100644
index 0000000..e985765
--- /dev/null
+++ b/net/disk_cache/block_files.cc
@@ -0,0 +1,441 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/block_files.h"
+
+#include "base/scoped_handle.h"
+#include "base/string_util.h"
+#include "net/disk_cache/file_lock.h"
+
+namespace {
+
+const wchar_t* kBlockName = L"\\data_";
+
+// This array is used to perform a fast lookup of the nibble bit pattern to the
+// type of entry that can be stored there (number of consecutive blocks).
+const char s_types[16] = {4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0};
+
+// Returns the type of block (number of consecutive blocks that can be stored)
+// for a given nibble of the bitmap.
+inline int GetMapBlockType(uint8 value) {
+ value &= 0xf;
+ return s_types[value];
+}
+
+void FixAllocationCounters(disk_cache::BlockFileHeader* header);
+
+// Creates a new entry on the allocation map, updating the apropriate counters.
+// target is the type of block to use (number of empty blocks), and size is the
+// actual number of blocks to use.
+bool CreateMapBlock(int target, int size, disk_cache::BlockFileHeader* header,
+ int* index) {
+ if (target <= 0 || target > disk_cache::kMaxNumBlocks ||
+ size <= 0 || size > disk_cache::kMaxNumBlocks) {
+ NOTREACHED();
+ return false;
+ }
+
+ // We are going to process the map on 32-block chunks (32 bits), and on every
+ // chunk, iterate through the 8 nibbles where the new block can be located.
+ int current = header->hints[target - 1];
+ for (int i = 0; i < header->max_entries / 32; i++, current++) {
+ if (current == header->max_entries / 32)
+ current = 0;
+ uint32 map_block = header->allocation_map[current];
+
+ for (int j = 0; j < 8; j++, map_block >>= 4) {
+ if (GetMapBlockType(map_block) != target)
+ continue;
+
+ disk_cache::FileLock lock(header);
+ int index_offset = j * 4 + 4 - target;
+ *index = current * 32 + index_offset;
+ uint32 to_add = ((1 << size) - 1) << index_offset;
+ header->allocation_map[current] |= to_add;
+
+ header->hints[target - 1] = current;
+ header->empty[target - 1]--;
+ DCHECK(header->empty[target - 1] >= 0);
+ header->num_entries++;
+ if (target != size) {
+ header->empty[target - size - 1]++;
+ }
+ return true;
+ }
+ }
+
+ // It is possible to have an undetected corruption (for example when the OS
+ // crashes), fix it here.
+ LOG(ERROR) << "Failing CreateMapBlock";
+ FixAllocationCounters(header);
+ return false;
+}
+
+// Deletes the block pointed by index from allocation_map, and updates the
+// relevant counters on the header.
+void DeleteMapBlock(int index, int size, disk_cache::BlockFileHeader* header) {
+ if (size < 0 || size > disk_cache::kMaxNumBlocks) {
+ NOTREACHED();
+ return;
+ }
+ int byte_index = index / 8;
+ uint8* byte_map = reinterpret_cast<uint8*>(header->allocation_map);
+ uint8 map_block = byte_map[byte_index];
+
+ if (index % 8 >= 4)
+ map_block >>= 4;
+
+ // See what type of block will be availabe after we delete this one.
+ int bits_at_end = 4 - size - index % 4;
+ uint8 end_mask = (0xf << (4 - bits_at_end)) & 0xf;
+ bool update_counters = (map_block & end_mask) == 0;
+ uint8 new_value = map_block & ~(((1 << size) - 1) << (index % 4));
+ int new_type = GetMapBlockType(new_value);
+
+ disk_cache::FileLock lock(header);
+ DCHECK((((1 << size) - 1) << (index % 8)) < 0x100);
+ uint8 to_clear = ((1 << size) - 1) << (index % 8);
+ DCHECK((byte_map[byte_index] & to_clear) == to_clear);
+ byte_map[byte_index] &= ~to_clear;
+
+ if (update_counters) {
+ if (bits_at_end)
+ header->empty[bits_at_end - 1]--;
+ header->empty[new_type - 1]++;
+ DCHECK(header->empty[bits_at_end - 1] >= 0);
+ }
+ header->num_entries--;
+ DCHECK(header->num_entries >= 0);
+}
+
+// Restores the "empty counters" and allocation hints.
+void FixAllocationCounters(disk_cache::BlockFileHeader* header) {
+ for (int i = 0; i < disk_cache::kMaxNumBlocks; i++) {
+ header->hints[i] = 0;
+ header->empty[i] = 0;
+ }
+
+ for (int i = 0; i < header->max_entries / 32; i++) {
+ uint32 map_block = header->allocation_map[i];
+
+ for (int j = 0; j < 8; j++, map_block >>= 4) {
+ int type = GetMapBlockType(map_block);
+ if (type)
+ header->empty[type -1]++;
+ }
+ }
+}
+
+bool NeedToGrowBlockFile(const disk_cache::BlockFileHeader* header,
+ int block_count) {
+ for (int i = block_count; i <= disk_cache::kMaxNumBlocks; i++) {
+ if (header->empty[i - 1])
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+BlockFiles::~BlockFiles() {
+ if (zero_buffer_)
+ delete[] zero_buffer_;
+ CloseFiles();
+}
+
+bool BlockFiles::Init(bool create_files) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ block_files_.resize(kFirstAdditionlBlockFile);
+ for (int i = 0; i < kFirstAdditionlBlockFile; i++) {
+ if (create_files)
+ if (!CreateBlockFile(i, static_cast<FileType>(i + 1), true))
+ return false;
+
+ if (!OpenBlockFile(i))
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+void BlockFiles::CloseFiles() {
+ init_ = false;
+ for (unsigned int i = 0; i < block_files_.size(); i++) {
+ if (block_files_[i]) {
+ block_files_[i]->Release();
+ block_files_[i] = NULL;
+ }
+ }
+ block_files_.clear();
+}
+
+std::wstring BlockFiles::Name(int index) {
+ // The file format allows for 256 files.
+ DCHECK(index < 256 || index >= 0);
+ std::wstring name = StringPrintf(L"%s%s%d", path_.c_str(), kBlockName, index);
+
+ return name;
+}
+
+bool BlockFiles::CreateBlockFile(int index, FileType file_type, bool force) {
+ std::wstring name = Name(index);
+ DWORD disposition = force ? CREATE_ALWAYS : CREATE_NEW;
+
+ ScopedHandle file(CreateFile(name.c_str(), GENERIC_WRITE, FILE_SHARE_READ,
+ NULL, disposition, 0, NULL));
+ if (!file.IsValid())
+ return false;
+
+ BlockFileHeader header;
+ header.entry_size = Addr::BlockSizeForFileType(file_type);
+ header.this_file = static_cast<int16>(index);
+ DCHECK(index <= kint16max && index >= 0);
+
+ DWORD actual;
+ if (!WriteFile(file.Get(), &header, sizeof(header), &actual, NULL) ||
+ sizeof(header) != actual)
+ return false;
+
+ return true;
+}
+
+bool BlockFiles::OpenBlockFile(int index) {
+ if (block_files_.size() - 1 < static_cast<unsigned int>(index)) {
+ DCHECK(index > 0);
+ int to_add = index - static_cast<int>(block_files_.size()) + 1;
+ block_files_.resize(block_files_.size() + to_add);
+ }
+
+ std::wstring name = Name(index);
+ MappedFile* file = new MappedFile();
+ file->AddRef();
+
+ if (!file->Init(name, kBlockHeaderSize)) {
+ NOTREACHED();
+ LOG(ERROR) << "Failed to open " << name;
+ file->Release();
+ return false;
+ }
+
+ block_files_[index] = file;
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ if (kBlockMagic != header->magic || kCurrentVersion != header->version) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+
+ if (header->updating) {
+ // Last instance was not properly shutdown.
+ if (!FixBlockFileHeader(file))
+ return false;
+ }
+ return true;
+}
+
+MappedFile* BlockFiles::GetFile(Addr address) {
+ CHECK(block_files_.size() >= 4);
+
+ int file_index = address.FileNumber();
+ if (static_cast<unsigned int>(file_index) >= block_files_.size() ||
+ !block_files_[file_index]) {
+ // We need to open the file
+ if (!OpenBlockFile(file_index))
+ return NULL;
+ }
+ DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
+ return block_files_[file_index];
+}
+
+bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) {
+ if (kMaxBlocks == header->max_entries)
+ return false;
+
+ DCHECK(!header->empty[3]);
+ int new_size = header->max_entries + 1024;
+ if (new_size > kMaxBlocks)
+ new_size = kMaxBlocks;
+
+ int new_size_bytes = new_size * header->entry_size + sizeof(*header);
+
+ FileLock lock(header);
+ if (!file->SetLength(new_size_bytes)) {
+ // Most likely we are trying to truncate the file, so the header is wrong.
+ if (header->updating < 10 && !FixBlockFileHeader(file)) {
+ // If we can't fix the file increase the lock guard so we'll pick it on
+ // the next start and replace it.
+ header->updating = 100;
+ return false;
+ }
+ return (header->max_entries >= new_size);
+ }
+
+ header->empty[3] = (new_size - header->max_entries) / 4; // 4 blocks entries
+ header->max_entries = new_size;
+
+ return true;
+}
+
+MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) {
+ COMPILE_ASSERT(RANKINGS == 1, invalid_fily_type);
+ MappedFile* file = block_files_[block_type - 1];
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ while (NeedToGrowBlockFile(header, block_count)) {
+ if (kMaxBlocks == header->max_entries) {
+ file = NextFile(file);
+ if (!file)
+ return NULL;
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ continue;
+ }
+
+ if (!GrowBlockFile(file, header))
+ return NULL;
+ break;
+ }
+ return file;
+}
+
+MappedFile* BlockFiles::NextFile(const MappedFile* file) {
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ int new_file = header->next_file;
+ if (!new_file) {
+ // RANKINGS is not reported as a type for small entries, but we may be
+ // extending the rankings block file.
+ FileType type = Addr::RequiredFileType(header->entry_size);
+ if (header->entry_size == Addr::BlockSizeForFileType(RANKINGS))
+ type = RANKINGS;
+
+ new_file = CreateNextBlockFile(type);
+ if (!new_file)
+ return NULL;
+
+ FileLock lock(header);
+ header->next_file = new_file;
+ }
+
+ // Only the block_file argument is relevant for what we want.
+ Addr address(BLOCK_256, 1, new_file, 0);
+ return GetFile(address);
+}
+
+int BlockFiles::CreateNextBlockFile(FileType block_type) {
+ for (int i = kFirstAdditionlBlockFile; i <= kMaxBlockFile; i++) {
+ if (CreateBlockFile(i, block_type, false))
+ return i;
+ }
+ return 0;
+}
+
+bool BlockFiles::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ if (block_type < RANKINGS || block_type > BLOCK_4K ||
+ block_count < 1 || block_count > 4)
+ return false;
+ if (!init_)
+ return false;
+
+ MappedFile* file = FileForNewBlock(block_type, block_count);
+ if (!file)
+ return false;
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ int target_size = 0;
+ for (int i = block_count; i <= 4; i++) {
+ if (header->empty[i - 1]) {
+ target_size = i;
+ break;
+ }
+ }
+
+ DCHECK(target_size);
+ int index;
+ if (!CreateMapBlock(target_size, block_count, header, &index))
+ return false;
+
+ Addr address(block_type, block_count, header->this_file, index);
+ block_address->set_value(address.value());
+ return true;
+}
+
+void BlockFiles::DeleteBlock(Addr address, bool deep) {
+ if (!address.is_initialized() || address.is_separate_file())
+ return;
+
+ if (!zero_buffer_) {
+ zero_buffer_ = new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4];
+ memset(zero_buffer_, 0, Addr::BlockSizeForFileType(BLOCK_4K) * 4);
+ }
+ MappedFile* file = GetFile(address);
+ if (!file)
+ return;
+
+ size_t size = address.BlockSize() * address.num_blocks();
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (deep)
+ file->Write(zero_buffer_, size, offset);
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ DeleteMapBlock(address.start_block(), address.num_blocks(), header);
+}
+
+bool BlockFiles::FixBlockFileHeader(MappedFile* file) {
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ int file_size = static_cast<int>(file->GetLength());
+ if (file_size < static_cast<int>(sizeof(*header)))
+ return false; // file_size > 2GB is also an error.
+
+ int expected = header->entry_size * header->max_entries + sizeof(*header);
+ if (file_size != expected) {
+ int max_expected = header->entry_size * kMaxBlocks + sizeof(*header);
+ if (file_size < expected || header->empty[3] || file_size > max_expected) {
+ NOTREACHED();
+ LOG(ERROR) << "Unexpected file size";
+ return false;
+ }
+ // We were in the middle of growing the file.
+ int num_entries = (file_size - sizeof(*header)) / header->entry_size;
+ header->max_entries = num_entries;
+ }
+
+ FixAllocationCounters(header);
+ header->updating = 0;
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/block_files.h b/net/disk_cache/block_files.h
new file mode 100644
index 0000000..f0f27ec
--- /dev/null
+++ b/net/disk_cache/block_files.h
@@ -0,0 +1,105 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_BLOCK_FILES_H__
+#define NET_DISK_CACHE_BLOCK_FILES_H__
+
+#include <vector>
+
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+
+namespace disk_cache {
+
+class EntryImpl;
+
+// This class handles the set of block-files open by the disk cache.
+class BlockFiles {
+ public:
+ explicit BlockFiles(const std::wstring& path)
+ : path_(path), init_(false), zero_buffer_(NULL) {}
+ ~BlockFiles();
+
+ // Performs the object initialization. create_files indicates if the backing
+ // files should be created or just open.
+ bool Init(bool create_files);
+
+ // Returns the file that stores a given address.
+ MappedFile* GetFile(Addr address);
+
+ // Creates a new entry on a block file. block_type indicates the size of block
+ // to be used (as defined on cache_addr.h), block_count is the number of
+ // blocks to allocate, and block_address is the address of the new entry.
+ bool CreateBlock(FileType block_type, int block_count, Addr* block_address);
+
+ // Removes an entry from the block files. If deep is true, the storage is zero
+ // filled; otherwise the entry is removed but the data is not altered (must be
+ // already zeroed).
+ void DeleteBlock(Addr address, bool deep);
+
+ // Close all the files and set the internal state to be initializad again. The
+ // cache is being purged.
+ void CloseFiles();
+
+ private:
+ // Set force to true to overwrite the file if it exists.
+ bool CreateBlockFile(int index, FileType file_type, bool force);
+ bool OpenBlockFile(int index);
+
+ // Attemp to grow this file. Fails if the file cannot be extended anymore.
+ bool GrowBlockFile(MappedFile* file, BlockFileHeader* header);
+
+ // Returns the appropriate file to use for a new block.
+ MappedFile* FileForNewBlock(FileType block_type, int block_count);
+
+ // Returns the next block file on this chain, creating new files if needed.
+ MappedFile* NextFile(const MappedFile* file);
+
+ // Creates an empty block file and returns its index.
+ int CreateNextBlockFile(FileType block_type);
+
+ // Restores the header of a potentially inconsistent file.
+ bool FixBlockFileHeader(MappedFile* file);
+
+ // Returns the filename for a given file index.
+ std::wstring Name(int index);
+
+ bool init_;
+ char* zero_buffer_; // Buffer to speed-up cleaning deleted entries.
+ std::wstring path_; // Path to the backing folder.
+ std::vector<MappedFile*> block_files_; // The actual files.
+
+ DISALLOW_EVIL_CONSTRUCTORS(BlockFiles);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BLOCK_FILES_H__
diff --git a/net/disk_cache/block_files_unittest.cc b/net/disk_cache/block_files_unittest.cc
new file mode 100644
index 0000000..804ee3b
--- /dev/null
+++ b/net/disk_cache/block_files_unittest.cc
@@ -0,0 +1,125 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(DiskCacheTest, BlockFiles_Grow) {
+ std::wstring path = GetCachePath();
+ ASSERT_TRUE(DeleteCache(path.c_str()));
+
+ disk_cache::BlockFiles files(path);
+ ASSERT_TRUE(files.Init(true));
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < 35000; i++) {
+ disk_cache::Addr address(0);
+ EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, 4, &address));
+ }
+}
+
+// Handling of block files not properly closed.
+TEST(DiskCacheTest, BlockFiles_Recover) {
+ std::wstring path = GetCachePath();
+ ASSERT_TRUE(DeleteCache(path.c_str()));
+
+ disk_cache::BlockFiles files(path);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kNumEntries = 2000;
+ disk_cache::CacheAddr entries[kNumEntries];
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ for (int i = 0; i < kNumEntries; i++) {
+ disk_cache::Addr address(0);
+ int size = (rand() % 4) + 1;
+ EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, size, &address));
+ entries[i] = address.value();
+ }
+
+ for (int i = 0; i < kNumEntries; i++) {
+ int source1 = rand() % kNumEntries;
+ int source2 = rand() % kNumEntries;
+ disk_cache::CacheAddr temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ disk_cache::Addr address(entries[i]);
+ files.DeleteBlock(address, false);
+ }
+
+ // At this point, there are kNumEntries / 2 entries on the file, randomly
+ // distributed both on location and size.
+
+ disk_cache::Addr address(entries[kNumEntries / 2]);
+ disk_cache::MappedFile* file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ disk_cache::BlockFileHeader* header =
+ reinterpret_cast<disk_cache::BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ ASSERT_EQ(0, header->updating);
+
+ int max_entries = header->max_entries;
+ int empty_1 = header->empty[0];
+ int empty_2 = header->empty[1];
+ int empty_3 = header->empty[2];
+ int empty_4 = header->empty[3];
+
+ // Corrupt the file.
+ header->max_entries = header->empty[0] = 0;
+ header->empty[1] = header->empty[2] = header->empty[3] = 0;
+ header->updating = -1;
+
+ files.CloseFiles();
+
+ ASSERT_TRUE(files.Init(false));
+
+ // The file must have been fixed.
+ file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ header = reinterpret_cast<disk_cache::BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ ASSERT_EQ(0, header->updating);
+
+ EXPECT_EQ(max_entries, header->max_entries);
+ EXPECT_EQ(empty_1, header->empty[0]);
+ EXPECT_EQ(empty_2, header->empty[1]);
+ EXPECT_EQ(empty_3, header->empty[2]);
+ EXPECT_EQ(empty_4, header->empty[3]);
+}
+
diff --git a/net/disk_cache/disk_cache.h b/net/disk_cache/disk_cache.h
new file mode 100644
index 0000000..f2c90b7
--- /dev/null
+++ b/net/disk_cache/disk_cache.h
@@ -0,0 +1,181 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Defines the public interface of the disk cache. For more details see
+// http://wiki/Main/ChromeDiskCacheBackend
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_H__
+#define NET_DISK_CACHE_DISK_CACHE_H__
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/time.h"
+#include "net/base/completion_callback.h"
+
+namespace disk_cache {
+
+class Entry;
+class Backend;
+
+// Returns an instance of the Backend. path points to a folder where
+// the cached data will be stored. This cache instance must be the only object
+// that will be reading or writing files to that folder. The returned object
+// should be deleted when not needed anymore. If force is true, and there is
+// a problem with the cache initialization, the files will be deleted and a
+// new set will be created. max_bytes is the maximum size the cache can grow to.
+// If zero is passed in as max_bytes, the cache will determine the value to use
+// based on the available disk space. The returned pointer can be NULL if a
+// fatal error is found.
+Backend* CreateCacheBackend(const std::wstring& path, bool force,
+ int max_bytes);
+
+// Returns an instance of a Backend implemented only in memory. The returned
+// object should be deleted when not needed anymore. max_bytes is the maximum
+// size the cache can grow to. If zero is passed in as max_bytes, the cache will
+// determine the value to use based on the available memory. The returned
+// pointer can be NULL if a fatal error is found.
+Backend* CreateInMemoryCacheBackend(int max_bytes);
+
+// The root interface for a disk cache instance.
+class Backend {
+ public:
+ virtual ~Backend() {}
+
+ // Returns the number of entries in the cache.
+ virtual int32 GetEntryCount() const = 0;
+
+ // Opens an existing entry. Upon success, the out param holds a pointer
+ // to a Entry object representing the specified disk cache entry.
+ // When the entry pointer is no longer needed, the Close method
+ // should be called.
+ virtual bool OpenEntry(const std::string& key, Entry** entry) = 0;
+
+ // Creates a new entry. Upon success, the out param holds a pointer
+ // to a Entry object representing the newly created disk cache
+ // entry. When the entry pointer is no longer needed, the Close
+ // method should be called.
+ virtual bool CreateEntry(const std::string& key, Entry** entry) = 0;
+
+ // Marks the entry, specified by the given key, for deletion.
+ virtual bool DoomEntry(const std::string& key) = 0;
+
+ // Marks all entries for deletion.
+ virtual bool DoomAllEntries() = 0;
+
+ // Marks a range of entries for deletion. This supports unbounded deletes in
+ // either direction by using null Time values for either argument.
+ virtual bool DoomEntriesBetween(const Time initial_time,
+ const Time end_time) = 0;
+
+ // Marks all entries accessed since initial_time for deletion.
+ virtual bool DoomEntriesSince(const Time initial_time) = 0;
+
+ // Enumerate the cache. Initialize iter to NULL before calling this method
+ // the first time. That will cause the enumeration to start at the head of
+ // the cache. For subsequent calls, pass the same iter pointer again without
+ // changing its value. This method returns false when there are no more
+ // entries to enumerate. When the entry pointer is no longer needed, the
+ // Close method should be called.
+ //
+ // NOTE: This method does not modify the last_used field of the entry,
+ // and therefore it does not impact the eviction ranking of the entry.
+ virtual bool OpenNextEntry(void** iter, Entry** next_entry) = 0;
+
+ // Releases iter without returning the next entry. Whenever OpenNextEntry()
+ // returns true, but the caller is not interested in continuing the
+ // enumeration by calling OpenNextEntry() again, the enumeration must be
+ // ended by calling this method with iter returned by OpenNextEntry().
+ virtual void EndEnumeration(void** iter) = 0;
+
+ // Return a list of cache statistics.
+ virtual void GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) = 0;
+};
+
+// This interface represents an entry in the disk cache.
+class Entry {
+ public:
+ // Marks this cache entry for deletion.
+ virtual void Doom() = 0;
+
+ // Releases this entry. Calling this method does not cancel pending IO
+ // operations on this entry. Even after the last reference to this object has
+ // been released, pending completion callbacks may be invoked.
+ virtual void Close() = 0;
+
+ // Returns the key associated with this cache entry.
+ virtual std::string GetKey() const = 0;
+
+ // Returns the time when this cache entry was last used.
+ virtual Time GetLastUsed() const = 0;
+
+ // Returns the time when this cache entry was last modified.
+ virtual Time GetLastModified() const = 0;
+
+ // Returns the size of the cache data with the given index.
+ virtual int32 GetDataSize(int index) const = 0;
+
+ // Copies cache data into the given buffer of length |buf_len|. If
+ // completion_callback is null, then this call blocks until the read
+ // operation is complete. Otherwise, completion_callback will be
+ // called on the current thread once the read completes. Returns the
+ // number of bytes read or a network error code. If a completion callback is
+ // provided then it will be called if this function returns ERR_IO_PENDING.
+ // Note that the callback will be invoked in any case, even after Close has
+ // been called; in other words, the caller may close this entry without
+ // having to wait for all the callbacks, and still rely on the cleanup
+ // performed from the callback code.
+ virtual int ReadData(int index, int offset, char* buf, int buf_len,
+ net::CompletionCallback* completion_callback) = 0;
+
+ // Copies cache data from the given buffer of length |buf_len|. If
+ // completion_callback is null, then this call blocks until the write
+ // operation is complete. Otherwise, completion_callback will be
+ // called on the current thread once the write completes. Returns the
+ // number of bytes written or a network error code. If a completion callback
+ // is provided then it will be called if this function returns ERR_IO_PENDING.
+ // Note that the callback will be invoked in any case, even after Close has
+ // been called; in other words, the caller may close this entry without
+ // having to wait for all the callbacks, and still rely on the cleanup
+ // performed from the callback code.
+ // If truncate is true, this call will truncate the stored data at the end of
+ // what we are writing here.
+ virtual int WriteData(int index, int offset, const char* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate) = 0;
+
+ protected:
+ virtual ~Entry() {}
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_DISK_CACHE_H__
diff --git a/net/disk_cache/disk_cache_perftest.cc b/net/disk_cache/disk_cache_perftest.cc
new file mode 100644
index 0000000..6e23471
--- /dev/null
+++ b/net/disk_cache/disk_cache_perftest.cc
@@ -0,0 +1,236 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "base/perftimer.h"
+#include "base/scoped_handle.h"
+#include "base/timer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/hash.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+extern int g_cache_tests_max_id;
+extern volatile int g_cache_tests_received;
+extern volatile bool g_cache_tests_error;
+
+namespace {
+
+bool EvictFileFromSystemCache(const wchar_t* name) {
+ // Overwrite it with no buffering.
+ ScopedHandle file(CreateFile(name, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
+ OPEN_EXISTING, FILE_FLAG_NO_BUFFERING, NULL));
+ if (!file.IsValid())
+ return false;
+
+ // Execute in chunks. It could be optimized. We want to do few of these since
+ // these opterations will be slow without the cache.
+ char buffer[128 * 1024];
+ int total_bytes = 0;
+ DWORD bytes_read;
+ for (;;) {
+ if (!ReadFile(file, buffer, sizeof(buffer), &bytes_read, NULL))
+ return false;
+ if (bytes_read == 0)
+ break;
+
+ bool final = false;
+ if (bytes_read < sizeof(buffer))
+ final = true;
+
+ DWORD to_write = final ? sizeof(buffer) : bytes_read;
+
+ DWORD actual;
+ SetFilePointer(file, total_bytes, 0, FILE_BEGIN);
+ if (!WriteFile(file, buffer, to_write, &actual, NULL))
+ return false;
+ total_bytes += bytes_read;
+
+ if (final) {
+ SetFilePointer(file, total_bytes, 0, FILE_BEGIN);
+ SetEndOfFile(file);
+ break;
+ }
+ }
+ return true;
+}
+
+struct TestEntry {
+ std::string key;
+ int data_len;
+};
+typedef std::vector<TestEntry> TestEntries;
+
+const int kMaxSize = 16 * 1024 - 1;
+
+// Creates num_entries on the cache, and writes 200 bytes of metadata and up
+// to kMaxSize of data to each entry.
+int TimeWrite(int num_entries, disk_cache::Backend* cache,
+ TestEntries* entries) {
+ char buffer1[200];
+ char buffer2[kMaxSize];
+
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ CacheTestFillBuffer(buffer2, sizeof(buffer2), false);
+
+ CallbackTest callback(1);
+ g_cache_tests_error = false;
+ g_cache_tests_max_id = 1;
+ g_cache_tests_received = 0;
+ int expected = 0;
+
+ MessageLoopHelper helper;
+
+ PerfTimeLogger timer("Write disk cache entries");
+
+ for (int i = 0; i < num_entries; i++) {
+ TestEntry entry;
+ entry.key = GenerateKey(true);
+ entry.data_len = rand() % sizeof(buffer2);
+ entries->push_back(entry);
+
+ disk_cache::Entry* cache_entry;
+ if (!cache->CreateEntry(entry.key, &cache_entry))
+ break;
+ int ret = cache_entry->WriteData(0, 0, buffer1, sizeof(buffer1), &callback,
+ false);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (sizeof(buffer1) != ret)
+ break;
+
+ ret = cache_entry->WriteData(1, 0, buffer2, entry.data_len, &callback,
+ false);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (entry.data_len != ret)
+ break;
+ cache_entry->Close();
+ }
+
+ helper.WaitUntilCacheIoFinished(expected);
+ timer.Done();
+
+ return expected;
+}
+
+// Reads the data and metadata from each entry listed on |entries|.
+int TimeRead(int num_entries, disk_cache::Backend* cache,
+ const TestEntries& entries, bool cold) {
+ char buffer1[200];
+ char buffer2[kMaxSize];
+
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ CacheTestFillBuffer(buffer2, sizeof(buffer2), false);
+
+ CallbackTest callback(1);
+ g_cache_tests_error = false;
+ g_cache_tests_max_id = 1;
+ g_cache_tests_received = 0;
+ int expected = 0;
+
+ MessageLoopHelper helper;
+
+ const char* message = cold ? "Read disk cache entries (cold)" :
+ "Read disk cache entries (warm)";
+ PerfTimeLogger timer(message);
+
+ for (int i = 0; i < num_entries; i++) {
+ disk_cache::Entry* cache_entry;
+ if (!cache->OpenEntry(entries[i].key, &cache_entry))
+ break;
+ int ret = cache_entry->ReadData(0, 0, buffer1, sizeof(buffer1), &callback);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (sizeof(buffer1) != ret)
+ break;
+
+ ret = cache_entry->ReadData(1, 0, buffer2, entries[i].data_len, &callback);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (entries[i].data_len != ret)
+ break;
+ cache_entry->Close();
+ }
+
+ helper.WaitUntilCacheIoFinished(expected);
+ timer.Done();
+
+ return expected;
+}
+
+} // namespace
+
+TEST(DiskCacheTest, Hash) {
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ PerfTimeLogger timer("Hash disk cache keys");
+ for (int i = 0; i < 300000; i++) {
+ std::string key = GenerateKey(true);
+ uint32 hash = disk_cache::Hash(key);
+ }
+ timer.Done();
+}
+
+TEST(DiskCacheTest, CacheBackendPerformance) {
+ std::wstring path = GetCachePath();
+ ASSERT_TRUE(DeleteCache(path.c_str()));
+ disk_cache::Backend* cache = disk_cache::CreateCacheBackend(path, false, 0);
+ ASSERT_TRUE(NULL != cache);
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ TestEntries entries;
+ int num_entries = 1000;
+
+ int ret = TimeWrite(num_entries, cache, &entries);
+ EXPECT_EQ(ret, g_cache_tests_received);
+
+ delete cache;
+
+ ASSERT_TRUE(EvictFileFromSystemCache((path + L"\\index").c_str()));
+ ASSERT_TRUE(EvictFileFromSystemCache((path + L"\\data_0").c_str()));
+ ASSERT_TRUE(EvictFileFromSystemCache((path + L"\\data_1").c_str()));
+ ASSERT_TRUE(EvictFileFromSystemCache((path + L"\\data_2").c_str()));
+ ASSERT_TRUE(EvictFileFromSystemCache((path + L"\\data_3").c_str()));
+
+ cache = disk_cache::CreateCacheBackend(path, false, 0);
+ ASSERT_TRUE(NULL != cache);
+
+ ret = TimeRead(num_entries, cache, entries, true);
+ EXPECT_EQ(ret, g_cache_tests_received);
+
+ ret = TimeRead(num_entries, cache, entries, false);
+ EXPECT_EQ(ret, g_cache_tests_received);
+
+ delete cache;
+}
diff --git a/net/disk_cache/disk_cache_test_base.cc b/net/disk_cache/disk_cache_test_base.cc
new file mode 100644
index 0000000..57ab31f
--- /dev/null
+++ b/net/disk_cache/disk_cache_test_base.cc
@@ -0,0 +1,126 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/disk_cache_test_base.h"
+
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/mem_backend_impl.h"
+
+void DiskCacheTestBase::SetMaxSize(int size) {
+ size_ = size;
+ if (cache_impl_)
+ EXPECT_TRUE(cache_impl_->SetMaxSize(size));
+
+ if (mem_cache_)
+ EXPECT_TRUE(mem_cache_->SetMaxSize(size));
+}
+
+void DiskCacheTestBase::InitCache() {
+ if (mask_)
+ implementation_ = true;
+
+ if (memory_only_)
+ InitMemoryCache();
+ else
+ InitDiskCache();
+
+ ASSERT_TRUE(NULL != cache_);
+ ASSERT_EQ(0, cache_->GetEntryCount());
+}
+
+void DiskCacheTestBase::InitMemoryCache() {
+ if (!implementation_) {
+ cache_ = disk_cache::CreateInMemoryCacheBackend(size_);
+ return;
+ }
+
+ mem_cache_ = new disk_cache::MemBackendImpl();
+ cache_ = mem_cache_;
+ ASSERT_TRUE(NULL != cache_);
+
+ if (size_)
+ EXPECT_TRUE(mem_cache_->SetMaxSize(size_));
+
+ ASSERT_TRUE(mem_cache_->Init());
+}
+
+void DiskCacheTestBase::InitDiskCache() {
+ std::wstring path = GetCachePath();
+ ASSERT_TRUE(DeleteCache(path.c_str()));
+
+ if (!implementation_) {
+ cache_ = disk_cache::CreateCacheBackend(path, force_creation_, size_);
+ return;
+ }
+
+ if (mask_)
+ cache_impl_ = new disk_cache::BackendImpl(path, mask_);
+ else
+ cache_impl_ = new disk_cache::BackendImpl(path);
+
+ cache_ = cache_impl_;
+ ASSERT_TRUE(NULL != cache_);
+
+ if (size_)
+ EXPECT_TRUE(cache_impl_->SetMaxSize(size_));
+
+ ASSERT_TRUE(cache_impl_->Init());
+}
+
+
+void DiskCacheTestBase::TearDown() {
+ delete cache_;
+
+ if (!memory_only_) {
+ std::wstring path = GetCachePath();
+ EXPECT_TRUE(CheckCacheIntegrity(path));
+ }
+}
+
+// We are expected to leak memory when simulating crashes.
+void DiskCacheTestBase::SimulateCrash() {
+ ASSERT_TRUE(implementation_ && !memory_only_);
+ cache_impl_->ClearRefCountForTest();
+
+ delete cache_impl_;
+ std::wstring path = GetCachePath();
+ EXPECT_TRUE(CheckCacheIntegrity(path));
+
+ if (mask_)
+ cache_impl_ = new disk_cache::BackendImpl(path, mask_);
+ else
+ cache_impl_ = new disk_cache::BackendImpl(path);
+ cache_ = cache_impl_;
+ ASSERT_TRUE(NULL != cache_);
+
+ if (size_)
+ cache_impl_->SetMaxSize(size_);
+ ASSERT_TRUE(cache_impl_->Init());
+}
diff --git a/net/disk_cache/disk_cache_test_base.h b/net/disk_cache/disk_cache_test_base.h
new file mode 100644
index 0000000..feb22b2
--- /dev/null
+++ b/net/disk_cache/disk_cache_test_base.h
@@ -0,0 +1,92 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H__
+#define NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H__
+
+#include "base/basictypes.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace disk_cache {
+
+class Backend;
+class BackendImpl;
+class MemBackendImpl;
+
+}
+
+// Provides basic support for cache related tests.
+class DiskCacheTestBase : public testing::Test {
+ protected:
+ DiskCacheTestBase()
+ : cache_(NULL), cache_impl_(NULL), mem_cache_(NULL), mask_(0), size_(0),
+ memory_only_(false), implementation_(false), force_creation_(false) {}
+
+ void InitCache();
+ virtual void TearDown();
+ void SimulateCrash();
+
+ void SetMemoryOnlyMode() {
+ memory_only_ = true;
+ }
+
+ // Use the implementation directly instead of the factory provided object.
+ void SetDirectMode() {
+ implementation_ = true;
+ }
+
+ void SetMask(uint32 mask) {
+ mask_ = mask;
+ }
+
+ void SetMaxSize(int size);
+
+ // Deletes and re-creates the files on initialization errors.
+ void SetForceCreation() {
+ force_creation_ = true;
+ }
+
+ // cache_ will always have a valid object, regardless of how the cache was
+ // initialized. The implementation pointers can be NULL.
+ disk_cache::Backend* cache_;
+ disk_cache::BackendImpl* cache_impl_;
+ disk_cache::MemBackendImpl* mem_cache_;
+
+ uint32 mask_;
+ int size_;
+ bool memory_only_;
+ bool implementation_;
+ bool force_creation_;
+
+ private:
+ void InitMemoryCache();
+ void InitDiskCache();
+};
+
+#endif // NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H__
diff --git a/net/disk_cache/disk_cache_test_util.cc b/net/disk_cache/disk_cache_test_util.cc
new file mode 100644
index 0000000..5ad5d9e
--- /dev/null
+++ b/net/disk_cache/disk_cache_test_util.cc
@@ -0,0 +1,163 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/disk_cache_test_util.h"
+
+#include "base/file_util.h"
+#include "base/path_service.h"
+#include "base/scoped_handle.h"
+#include "net/disk_cache/backend_impl.h"
+
+std::string GenerateKey(bool same_length) {
+ char key[200];
+ CacheTestFillBuffer(key, sizeof(key), same_length);
+
+ key[199] = '\0';
+ return std::string(key);
+}
+
+void CacheTestFillBuffer(char* buffer, size_t len, bool no_nulls) {
+ static bool called = false;
+ if (!called) {
+ called = true;
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ }
+
+ for (size_t i = 0; i < len; i++) {
+ buffer[i] = static_cast<char>(rand());
+ if (!buffer[i] && no_nulls)
+ buffer[i] = 'g';
+ }
+ if (len && !buffer[0])
+ buffer[0] = 'g';
+}
+
+std::wstring GetCachePath() {
+ std::wstring path;
+ PathService::Get(base::DIR_TEMP, &path);
+ file_util::AppendToPath(&path, L"cache_test");
+ if (!file_util::PathExists(path))
+ file_util::CreateDirectory(path);
+
+ return path;
+}
+
+bool CreateCacheTestFile(const wchar_t* name) {
+ ScopedHandle file(CreateFile(name, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
+ CREATE_ALWAYS, 0, NULL));
+ if (!file.IsValid())
+ return false;
+
+ SetFilePointer(file, 4 * 1024 * 1024, 0, FILE_BEGIN);
+ SetEndOfFile(file);
+ return true;
+}
+
+bool DeleteCache(const wchar_t* path) {
+ std::wstring my_path(path);
+ file_util::AppendToPath(&my_path, L"*.*");
+ return file_util::Delete(my_path, false);
+}
+
+bool CheckCacheIntegrity(const std::wstring& path) {
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(path));
+ if (!cache.get())
+ return false;
+ if (!cache->Init())
+ return false;
+ return cache->SelfCheck() >= 0;
+}
+
+// -----------------------------------------------------------------------
+
+int g_cache_tests_max_id = 0;
+volatile int g_cache_tests_received = 0;
+volatile bool g_cache_tests_error = 0;
+
+// On the actual callback, increase the number of tests received and check for
+// errors (an unexpected test received)
+void CallbackTest::RunWithParams(const Tuple1<int>& params) {
+ if (id_ > g_cache_tests_max_id) {
+ NOTREACHED();
+ g_cache_tests_error = true;
+ } else if (reuse_) {
+ DCHECK(1 == reuse_);
+ if (2 == reuse_)
+ g_cache_tests_error = true;
+ reuse_++;
+ }
+
+ g_cache_tests_received++;
+}
+
+// -----------------------------------------------------------------------
+
+// Quits the message loop when all callbacks are called or we've been waiting
+// too long for them (2 secs without a callback).
+void TimerTask::Run() {
+ if (g_cache_tests_received > num_callbacks_) {
+ NOTREACHED();
+ } else if (g_cache_tests_received == num_callbacks_) {
+ completed_ = true;
+ MessageLoop::current()->Quit();
+ } else {
+ // Not finished yet. See if we have to abort.
+ if (last_ == g_cache_tests_received)
+ num_iterations_++;
+ else
+ last_ = g_cache_tests_received;
+ if (40 == num_iterations_)
+ MessageLoop::current()->Quit();
+ }
+}
+
+// -----------------------------------------------------------------------
+
+MessageLoopHelper::MessageLoopHelper() {
+ message_loop_ = MessageLoop::current();
+ // Create a recurrent timer of 50 mS.
+ timer_ = message_loop_->timer_manager()->StartTimer(50, &timer_task_, true);
+}
+
+MessageLoopHelper::~MessageLoopHelper() {
+ message_loop_->timer_manager()->StopTimer(timer_);
+ delete timer_;
+}
+
+bool MessageLoopHelper::WaitUntilCacheIoFinished(int num_callbacks) {
+ if (num_callbacks == g_cache_tests_received)
+ return true;
+
+ timer_task_.ExpectCallbacks(num_callbacks);
+ message_loop_->Run();
+ return timer_task_.GetSate();
+}
+
diff --git a/net/disk_cache/disk_cache_test_util.h b/net/disk_cache/disk_cache_test_util.h
new file mode 100644
index 0000000..ee41059
--- /dev/null
+++ b/net/disk_cache/disk_cache_test_util.h
@@ -0,0 +1,127 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H__
+#define NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H__
+
+#include <string>
+
+#include "base/message_loop.h"
+#include "base/task.h"
+
+// Re-creates a given test file inside the cache test folder.
+bool CreateCacheTestFile(const wchar_t* name);
+
+// Deletes all file son the cache.
+bool DeleteCache(const wchar_t* path);
+
+// Gets the path to the cache test folder.
+std::wstring GetCachePath();
+
+// Fills buffer with random values (may contain nulls unless no_nulls is true).
+void CacheTestFillBuffer(char* buffer, size_t len, bool no_nulls);
+
+// Deletes all files matching a pattern.
+// Do not call this function with "*" as search_name.
+bool DeleteFiles(const wchar_t* path, const wchar_t* search_name);
+
+// Generates a random key of up to 200 bytes.
+std::string GenerateKey(bool same_length);
+
+// Returns true if the cache is not corrupt.
+bool CheckCacheIntegrity(const std::wstring& path);
+
+// -----------------------------------------------------------------------
+
+// Simple callback to process IO completions from the cache.
+class CallbackTest : public CallbackRunner< Tuple1<int> > {
+ public:
+ explicit CallbackTest(int id) : id_(id), reuse_(0) {}
+ explicit CallbackTest(int id, bool reuse) : id_(id), reuse_(reuse ? 0 : 1) {}
+ ~CallbackTest() {}
+
+ virtual void RunWithParams(const Tuple1<int>& params);
+
+ private:
+ int id_;
+ int reuse_;
+ DISALLOW_EVIL_CONSTRUCTORS(CallbackTest);
+};
+
+// -----------------------------------------------------------------------
+
+// We'll use a timer to fire from time to time to check the number of IO
+// operations finished so far.
+class TimerTask : public Task {
+ public:
+ TimerTask() : num_callbacks_(0), num_iterations_(0) {}
+ ~TimerTask() {}
+
+ virtual void Run();
+
+ // Sets the number of callbacks that can be received so far.
+ void ExpectCallbacks(int num_callbacks) {
+ num_callbacks_ = num_callbacks;
+ num_iterations_ = last_ = 0;
+ completed_ = false;
+ }
+
+ // Returns true if all callbacks were invoked.
+ bool GetSate() {
+ return completed_;
+ }
+
+ private:
+ int num_callbacks_;
+ int num_iterations_;
+ int last_;
+ bool completed_;
+ DISALLOW_EVIL_CONSTRUCTORS(TimerTask);
+};
+
+// -----------------------------------------------------------------------
+
+// Simple helper to deal with the message loop on a test.
+class MessageLoopHelper {
+ public:
+ MessageLoopHelper();
+ ~MessageLoopHelper();
+
+ // Run the message loop and wait for num_callbacks before returning. Returns
+ // false if we are waiting to long.
+ bool WaitUntilCacheIoFinished(int num_callbacks);
+
+ private:
+ MessageLoop* message_loop_;
+ Timer* timer_;
+ TimerTask timer_task_;
+ DISALLOW_EVIL_CONSTRUCTORS(MessageLoopHelper);
+};
+
+#endif // NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H__
diff --git a/net/disk_cache/disk_format.h b/net/disk_cache/disk_format.h
new file mode 100644
index 0000000..4546c88
--- /dev/null
+++ b/net/disk_cache/disk_format.h
@@ -0,0 +1,192 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The cache is stored on disk as a collection of block-files, plus an index
+// file plus a collection of external files.
+//
+// Any data blob bigger than kMaxBlockSize (net/addr.h) will be stored on a
+// separate file named f_xxx where x is a hexadecimal number. Shorter data will
+// be stored as a series of blocks on a block-file. In any case, CacheAddr
+// represents the address of the data inside the cache.
+//
+// The index file is just a simple hash table that maps a particular entry to
+// a CacheAddr value. Linking for a given hash bucket is handled internally
+// by the cache entry.
+//
+// The last element of the cache is the block-file. A block file is a file
+// designed to store blocks of data of a given size. It is able to store data
+// that spans from one to four consecutive "blocks", and it grows as needed to
+// store up to approximately 65000 blocks. It has a fixed size header used for
+// book keeping such as tracking free of blocks on the file. For example, a
+// block-file for 1KB blocks will grow from 8KB when totally empty to about 64MB
+// when completely full. At that point, data blocks of 1KB will be stored on a
+// second block file that will store the next set of 65000 blocks. The first
+// file contains the number of the second file, and the second file contains the
+// number of a third file, created when the second file reaches its limit. It is
+// important to remember that no matter how long the chain of files is, any
+// given block can be located directly by its address, which contains the file
+// number and starting block inside the file.
+//
+// A new cache is initialized with four block files (named data_0 through
+// data_3), each one dedicated to store blocks of a given size. The number at
+// the end of the file name is the block file number (in decimal).
+//
+// There are two "special" types of blocks: an entry and a rankings node. An
+// entry keeps track of all the information related to the same cache entry,
+// such as the key, hash value, data pointers etc. A rankings node keeps track
+// of the information that is updated frequently for a given entry, such as its
+// location on the LRU list, last access time etc.
+//
+// The files that store internal information for the cache (blocks and index)
+// are at least partially memory mapped. They have a location that is signaled
+// every time the internal structures are modified, so it is possible to detect
+// (most of the time) when the process dies in the middle of an update.
+//
+// In order to prevent dirty data to be used as valid (after a crash), every
+// cache entry has a dirty identifier. Each running instance of the cache keeps
+// a separate identifier (maintained on the "this_id" header field) that is used
+// to mark every entry that is created or modified. When the entry is closed,
+// and all the data can be trusted, the dirty flag is cleared from the entry.
+// When the cache encounters an entry whose identifier is different than the one
+// being currently used, it means that the entry was not properly closed on a
+// previous run, so it is discarded.
+
+#ifndef NET_DISK_CACHE_DISK_FORMAT_H__
+#define NET_DISK_CACHE_DISK_FORMAT_H__
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+typedef uint32 CacheAddr;
+
+const int kIndexTablesize = 0x10000;
+const uint32 kIndexMagic = 0xC103CAC3;
+const uint32 kCurrentVersion = 0x10002; // Version 1.2.
+
+// Header for the master index file.
+struct IndexHeader {
+ uint32 magic;
+ uint32 version;
+ int32 num_entries; // Number of entries currently stored.
+ int32 num_bytes; // Total size of the stored data.
+ int32 last_file; // Last external file created.
+ int32 this_id; // Id for all entries being changed (dirty flag).
+ CacheAddr stats; // Storage for usage data.
+ int32 table_len; // Actual size of the table (0 == kIndexTablesize).
+ int32 pad[8];
+ IndexHeader() {
+ memset(this, 0, sizeof(*this));
+ magic = kIndexMagic;
+ version = kCurrentVersion;
+ };
+};
+
+// The structure of the whole index file.
+struct Index {
+ IndexHeader header;
+ CacheAddr table[kIndexTablesize]; // Default size. Actual size controlled
+ // by header.table_len.
+};
+
+// Main structure for an entry on the backing storage. If the key is longer than
+// what can be stored on this structure, it will be extended on consecutive
+// blocks (adding 256 bytes each time), up to 4 blocks (1024 - 32 - 1 chars).
+// After that point, the whole key will be stored as a data block or external
+// file.
+struct EntryStore {
+ uint32 hash; // Full hash of the key.
+ CacheAddr next; // Next entry with the same hash or bucket.
+ CacheAddr rankings_node; // Rankings node for this entry.
+ int32 key_len;
+ CacheAddr long_key; // Optional address of a long key.
+ int32 data_size[2]; // We can store up to 2 data chunks for each
+ CacheAddr data_addr[2]; // entry.
+ char key[256 - 9 * 4]; // null terminated
+};
+
+COMPILE_ASSERT(sizeof(EntryStore) == 256, bad_EntyStore);
+const int kMaxInternalKeyLength = 4 * sizeof(EntryStore) -
+ offsetof(EntryStore, key) - 1;
+
+#pragma pack(push, old, 4)
+// Rankings information for a given entry.
+struct RankingsNode {
+ uint64 last_used; // LRU info.
+ uint64 last_modified; // LRU info.
+ CacheAddr next; // LRU list.
+ CacheAddr prev; // LRU list.
+ CacheAddr contents; // Address of the EntryStore.
+ int32 dirty; // The entry is being modifyied.
+ void* pointer; // Pointer to the in-memory entry.
+};
+#pragma pack(pop, old)
+
+COMPILE_ASSERT(sizeof(RankingsNode) == 36, bad_RankingsNode);
+
+const uint32 kBlockMagic = 0xC104CAC3;
+const int kBlockHeaderSize = 8192; // Two pages: almost 64k entries
+const int kMaxBlocks = (kBlockHeaderSize - 80) * 8;
+
+// Bitmap to track used blocks on a block-file.
+typedef uint32 AllocBitmap[kMaxBlocks / 32];
+
+// A block-file is the file used to store information in blocks (could be
+// EntryStore blocks, RankingsNode blocks or user-data blocks).
+// We store entries that can expand for up to 4 consecutive blocks, and keep
+// counters of the number of blocks available for each type of entry. For
+// instance, an entry of 3 blocks is an entry of type 3. We also keep track of
+// where did we find the last entry of that type (to avoid searching the bitmap
+// from the beginning every time).
+// This Structure is the header of a block-file:
+struct BlockFileHeader {
+ uint32 magic;
+ uint32 version;
+ int16 this_file; // Index of this file.
+ int16 next_file; // Next file when this one is full.
+ int32 entry_size; // Size of the blocks of this file.
+ int32 num_entries; // Number of stored entries.
+ int32 max_entries; // Current maximum number of entries.
+ int32 empty[4]; // Counters of empty entries for each type.
+ int32 hints[4]; // Last used position for each entry type.
+ volatile int32 updating; // Keep track of updates to the header.
+ int32 user[5];
+ AllocBitmap allocation_map;
+ BlockFileHeader() {
+ memset(this, 0, sizeof(BlockFileHeader));
+ magic = kBlockMagic;
+ version = kCurrentVersion;
+ };
+};
+
+COMPILE_ASSERT(sizeof(BlockFileHeader) == kBlockHeaderSize, bad_header);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_DISK_FORMAT_H__
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
new file mode 100644
index 0000000..d2a4b0f
--- /dev/null
+++ b/net/disk_cache/entry_impl.cc
@@ -0,0 +1,779 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/entry_impl.h"
+
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+
+namespace {
+
+// This is a simple Task to execute the callback (from the message loop instead
+// of the APC).
+class InvokeCallback : public Task {
+ public:
+ InvokeCallback(net::CompletionCallback* callback, int argument)
+ : callback_(callback), argument_(argument) {}
+
+ virtual void Run() {
+ callback_->Run(argument_);
+ }
+
+ private:
+ net::CompletionCallback* callback_;
+ int argument_;
+ DISALLOW_EVIL_CONSTRUCTORS(InvokeCallback);
+};
+
+// This class implements FileIOCallback to buffer the callback from an IO
+// operation from the actual IO class.
+class SyncCallback: public disk_cache::FileIOCallback {
+ public:
+ SyncCallback(disk_cache::EntryImpl* entry,
+ net::CompletionCallback* callback )
+ : entry_(entry), callback_(callback) {
+ entry->AddRef();
+ entry->IncrementIoCount();
+ }
+ ~SyncCallback() {}
+
+ virtual void OnFileIOComplete(int bytes_copied);
+ void Discard();
+ private:
+ disk_cache::EntryImpl* entry_;
+ net::CompletionCallback* callback_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SyncCallback);
+};
+
+void SyncCallback::OnFileIOComplete(int bytes_copied) {
+ entry_->DecrementIoCount();
+ entry_->Release();
+ if (callback_) {
+ InvokeCallback* task = new InvokeCallback(callback_, bytes_copied);
+ MessageLoop::current()->PostTask(FROM_HERE, task);
+ }
+ delete this;
+}
+
+void SyncCallback::Discard() {
+ callback_ = NULL;
+ OnFileIOComplete(0);
+}
+
+// Clears buffer before offset and after valid_len, knowing that the size of
+// buffer is kMaxBlockSize.
+void ClearInvalidData(char* buffer, int offset, int valid_len) {
+ DCHECK(offset >= 0);
+ DCHECK(valid_len >= 0);
+ DCHECK(disk_cache::kMaxBlockSize >= offset + valid_len);
+ if (offset)
+ memset(buffer, 0, offset);
+ int end = disk_cache::kMaxBlockSize - offset - valid_len;
+ if (end)
+ memset(buffer + offset + valid_len, 0, end);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+EntryImpl::EntryImpl(BackendImpl* backend, Addr address)
+ : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) {
+ entry_.LazyInit(backend->File(address), address);
+ doomed_ = false;
+ backend_ = backend;
+ unreported_size_[0] = unreported_size_[1] = 0;
+}
+
+// When an entry is deleted from the cache, we clean up all the data associated
+// with it for two reasons: to simplify the reuse of the block (we know that any
+// unused block is filled with zeros), and to simplify the handling of write /
+// read partial information from an entry (don't have to worry about returning
+// data related to a previous cache entry because the range was not fully
+// written before).
+EntryImpl::~EntryImpl() {
+ if (doomed_) {
+ for (int index = 0; index < kKeyFileIndex; index++) {
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized()) {
+ DeleteData(address, index);
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ }
+ }
+ Addr address(entry_.Data()->long_key);
+ DeleteData(address, kKeyFileIndex);
+ backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
+
+ memset(node_.buffer(), 0, node_.size());
+ memset(entry_.buffer(), 0, entry_.size());
+ node_.Store();
+ entry_.Store();
+
+ backend_->DeleteBlock(node_.address(), false);
+ backend_->DeleteBlock(entry_.address(), false);
+ } else {
+ bool ret = true;
+ for (int index = 0; index < kKeyFileIndex; index++) {
+ if (user_buffers_[index].get()) {
+ if (!(ret = Flush(index, entry_.Data()->data_size[index], false)))
+ LOG(ERROR) << "Failed to save user data";
+ } else if (unreported_size_[index]) {
+ backend_->ModifyStorageSize(
+ entry_.Data()->data_size[index] - unreported_size_[index],
+ entry_.Data()->data_size[index]);
+ }
+ }
+ if (node_.HasData() && this == node_.Data()->pointer) {
+ // We have to do this after Flush because we may trigger a cache trim from
+ // there, and technically this entry should be "in use".
+ node_.Data()->pointer = NULL;
+ node_.set_modified();
+ }
+
+ if (!ret) {
+ // There was a failure writing the actual data. Mark the entry as dirty.
+ int current_id = backend_->GetCurrentEntryId();
+ node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
+ node_.Store();
+ } else if (node_.HasData() && node_.Data()->dirty) {
+ node_.Data()->dirty = 0;
+ node_.Store();
+ }
+ }
+
+ backend_->CacheEntryDestroyed();
+}
+
+void EntryImpl::DeleteData(Addr address, int index) {
+ if (!address.is_initialized())
+ return;
+ if (address.is_separate_file()) {
+ if (files_[index])
+ files_[index] = NULL; // Releases the object.
+
+ if (!DeleteFile(backend_->GetFileName(address).c_str()))
+ LOG(ERROR) << "Failed to delete " << backend_->GetFileName(address) <<
+ " from the cache.";
+ } else {
+ backend_->DeleteBlock(address, true);
+ }
+}
+
+bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
+ uint32 hash) {
+ Trace("Create entry In");
+ EntryStore* entry_store = entry_.Data();
+ RankingsNode* node = node_.Data();
+ memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
+ memset(node, 0, sizeof(RankingsNode));
+ if (!node_.LazyInit(backend_->File(node_address), node_address))
+ return false;
+
+ entry_store->rankings_node = node_address.value();
+ node->contents = entry_.address().value();
+ node->pointer = this;
+
+ entry_store->hash = hash;
+ entry_store->key_len = static_cast<int32>(key.size());
+ if (entry_store->key_len > kMaxInternalKeyLength) {
+ Addr address(0);
+ if (!CreateBlock(entry_store->key_len + 1, &address))
+ return false;
+
+ entry_store->long_key = address.value();
+ File* file = GetBackingFile(address, kKeyFileIndex);
+
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!file || !file->Write(key.data(), key.size(), offset)) {
+ DeleteData(address, kKeyFileIndex);
+ return false;
+ }
+
+ if (address.is_separate_file())
+ file->SetLength(key.size() + 1);
+ } else {
+ memcpy(entry_store->key, key.data(), key.size());
+ entry_store->key[key.size()] = '\0';
+ }
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ node->dirty = backend_->GetCurrentEntryId();
+ Log("Create Entry ");
+ return true;
+}
+
+void EntryImpl::Close() {
+ Release();
+}
+
+void EntryImpl::Doom() {
+ if (doomed_)
+ return;
+
+ SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
+ backend_->InternalDoomEntry(this);
+}
+
+void EntryImpl::InternalDoom() {
+ DCHECK(node_.HasData());
+ if (!node_.Data()->dirty) {
+ node_.Data()->dirty = backend_->GetCurrentEntryId();
+ node_.Store();
+ }
+ doomed_ = true;
+}
+
+std::string EntryImpl::GetKey() const {
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ if (entry->Data()->key_len > kMaxInternalKeyLength) {
+ Addr address(entry->Data()->long_key);
+ DCHECK(address.is_initialized());
+ File* file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
+ kKeyFileIndex);
+
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ std::string key;
+ if (!file || !file->Read(WriteInto(&key, entry->Data()->key_len + 1),
+ entry->Data()->key_len + 1, offset))
+ key.clear();
+ return key;
+ } else {
+ return std::string(entry->Data()->key);
+ }
+}
+
+Time EntryImpl::GetLastUsed() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_used);
+}
+
+Time EntryImpl::GetLastModified() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_modified);
+}
+
+int32 EntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index > 1)
+ return 0;
+
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ return entry->Data()->data_size[index];
+}
+
+int EntryImpl::ReadData(int index, int offset, char* buf, int buf_len,
+ net::CompletionCallback* completion_callback) {
+ DCHECK(node_.Data()->dirty);
+ if (index < 0 || index > 1)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = entry_.Data()->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ backend_->OnEvent(Stats::READ_DATA);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ DCHECK(kMaxBlockSize >= offset + buf_len);
+ memcpy(buf , user_buffers_[index].get() + offset, buf_len);
+ return buf_len;
+ }
+
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(address.is_initialized());
+ if (!address.is_initialized())
+ return net::ERR_FAILED;
+
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return net::ERR_FAILED;
+
+ size_t file_offset = offset;
+ if (address.is_block_file())
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+
+ SyncCallback* io_callback = NULL;
+ if (completion_callback)
+ io_callback = new SyncCallback(this, completion_callback);
+
+ bool completed;
+ if (!file->Read(buf, buf_len, file_offset, io_callback, &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ return net::ERR_FAILED;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteData(int index, int offset, const char* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate) {
+ DCHECK(node_.Data()->dirty);
+ if (index < 0 || index > 1)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset of buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ int size = offset + buf_len;
+ if (size <= max_file_size)
+ size = kint32max;
+ backend_->TooMuchStorageRequested(size);
+ return net::ERR_FAILED;
+ }
+
+ // Read the size at this point (it may change inside prepare).
+ int entry_size = entry_.Data()->data_size[index];
+ if (!PrepareTarget(index, offset, buf_len, truncate))
+ return net::ERR_FAILED;
+
+ if (entry_size < offset + buf_len) {
+ unreported_size_[index] += offset + buf_len - entry_size;
+ entry_.Data()->data_size[index] = offset + buf_len;
+ entry_.set_modified();
+ } else if (truncate) {
+ // If the size was modified inside PrepareTarget, we should not do
+ // anything here.
+ if ((entry_size > offset + buf_len) &&
+ (entry_size == entry_.Data()->data_size[index])) {
+ unreported_size_[index] += offset + buf_len - entry_size;
+ entry_.Data()->data_size[index] = offset + buf_len;
+ entry_.set_modified();
+ } else {
+ // Nothing to truncate.
+ truncate = false;
+ }
+ }
+
+ UpdateRank(true);
+
+ backend_->OnEvent(Stats::WRITE_DATA);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ DCHECK(kMaxBlockSize >= offset + buf_len);
+ memcpy(user_buffers_[index].get() + offset, buf, buf_len);
+ return buf_len;
+ }
+
+ Addr address(entry_.Data()->data_addr[index]);
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return net::ERR_FAILED;
+
+ size_t file_offset = offset;
+ if (address.is_block_file()) {
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ } else if (truncate) {
+ if (!file->SetLength(offset + buf_len))
+ return net::ERR_FAILED;
+ }
+
+ if (!buf_len)
+ return 0;
+
+ SyncCallback* io_callback = NULL;
+ if (completion_callback)
+ io_callback = new SyncCallback(this, completion_callback);
+
+ bool completed;
+ if (!file->Write(buf, buf_len, file_offset, io_callback, &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ return net::ERR_FAILED;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING;
+}
+
+bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
+ bool truncate) {
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized() || user_buffers_[index].get())
+ return GrowUserBuffer(index, offset, buf_len, truncate);
+
+ if (offset + buf_len > kMaxBlockSize)
+ return CreateDataBlock(index, offset + buf_len);
+
+ user_buffers_[index].reset(new char[kMaxBlockSize]);
+
+ // Overwrite the parts of the buffer that are not going to be written
+ // by the current operation (and yes, let's assume that nothing is going
+ // to fail, and we'll actually write over the part that we are not cleaning
+ // here). The point is to avoid writing random stuff to disk later on.
+ ClearInvalidData(user_buffers_[index].get(), offset, buf_len);
+
+ return true;
+}
+
+// We get to this function with some data already stored. If there is a
+// truncation that results on data stored internally, we'll explicitly
+// handle the case here.
+bool EntryImpl::GrowUserBuffer(int index, int offset, int buf_len,
+ bool truncate) {
+ Addr address(entry_.Data()->data_addr[index]);
+
+ if (offset + buf_len > kMaxBlockSize) {
+ // The data has to be stored externally.
+ if (address.is_initialized()) {
+ if (address.is_separate_file())
+ return true;
+ if (!MoveToLocalBuffer(index))
+ return false;
+ }
+ return Flush(index, offset + buf_len, true);
+ }
+
+ if (!address.is_initialized()) {
+ DCHECK(user_buffers_[index].get());
+ if (truncate)
+ ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len);
+ return true;
+ }
+ if (address.is_separate_file()) {
+ if (!truncate)
+ return true;
+ return ImportSeparateFile(index, offset, buf_len);
+ }
+
+ // At this point we are dealing with data stored on disk, inside a block file.
+ if (offset + buf_len <= address.BlockSize() * address.num_blocks())
+ return true;
+
+ // ... and the allocated block has to change.
+ if (!MoveToLocalBuffer(index))
+ return false;
+
+ int clear_start = entry_.Data()->data_size[index];
+ if (truncate)
+ clear_start = std::min(clear_start, offset + buf_len);
+ else if (offset < clear_start)
+ clear_start = std::max(offset + buf_len, clear_start);
+
+ // Clear the end of the buffer.
+ ClearInvalidData(user_buffers_[index].get(), 0, clear_start);
+ return true;
+}
+
+bool EntryImpl::ImportSeparateFile(int index, int offset, int buf_len) {
+ if (entry_.Data()->data_size[index] > offset + buf_len) {
+ entry_.Data()->data_size[index] = offset + buf_len;
+ unreported_size_[index] += offset + buf_len -
+ entry_.Data()->data_size[index];
+ }
+
+ if (!MoveToLocalBuffer(index))
+ return false;
+
+ // Clear the end of the buffer.
+ ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len);
+ return true;
+}
+
+bool EntryImpl::MoveToLocalBuffer(int index) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+ scoped_ptr<char> buffer(new char[kMaxBlockSize]);
+
+ File* file = GetBackingFile(address, index);
+ size_t len = entry_.Data()->data_size[index];
+ size_t offset = 0;
+
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!file || !file->Read(buffer.get(), len, offset, NULL, NULL))
+ return false;
+
+ DeleteData(address, index);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Store();
+
+ // If we lose this entry we'll see it as zero sized.
+ backend_->ModifyStorageSize(static_cast<int>(len) - unreported_size_[index],
+ 0);
+ unreported_size_[index] = static_cast<int>(len);
+
+ user_buffers_[index].swap(buffer);
+ return true;
+}
+
+// The common scenario is that this is called from the destructor of the entry,
+// to write to disk what we have buffered. We don't want to hold the destructor
+// until the actual IO finishes, so we'll send an asynchronous write that will
+// free up the memory containing the data. To be consistent, this method always
+// returns with the buffer freed up (on success).
+bool EntryImpl::Flush(int index, int size, bool async) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(user_buffers_[index].get());
+ DCHECK(!address.is_initialized());
+
+ if (!size)
+ return true;
+
+ if (!CreateDataBlock(index, size))
+ return false;
+
+ address.set_value(entry_.Data()->data_addr[index]);
+
+ File* file = GetBackingFile(address, index);
+ size_t len = entry_.Data()->data_size[index];
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ // We just told the backend to store len bytes for real.
+ DCHECK(len == unreported_size_[index]);
+ backend_->ModifyStorageSize(0, static_cast<int>(len));
+ unreported_size_[index] = 0;
+
+ if (!file)
+ return false;
+
+ if (async) {
+ if (!file->PostWrite(user_buffers_[index].get(), len, offset))
+ return false;
+ } else {
+ if (!file->Write(user_buffers_[index].get(), len, offset, NULL, NULL))
+ return false;
+ user_buffers_[index].reset(NULL);
+ }
+
+ // The buffer is deleted from the PostWrite operation.
+ user_buffers_[index].release();
+
+ return true;
+}
+
+bool EntryImpl::LoadNodeAddress() {
+ Addr address(entry_.Data()->rankings_node);
+ if (!node_.LazyInit(backend_->File(address), address))
+ return false;
+ return node_.Load();
+}
+
+EntryImpl* EntryImpl::Update(EntryImpl* entry) {
+ DCHECK(entry->rankings()->HasData());
+
+ RankingsNode* rankings = entry->rankings()->Data();
+ if (rankings->pointer) {
+ // Already in memory. Prevent clearing the dirty flag on the destructor.
+ rankings->dirty = 0;
+ EntryImpl* real_node = reinterpret_cast<EntryImpl*>(rankings->pointer);
+ real_node->AddRef();
+ entry->Release();
+ return real_node;
+ } else {
+ rankings->dirty = entry->backend_->GetCurrentEntryId();
+ rankings->pointer = entry;
+ if (!entry->rankings()->Store()) {
+ entry->Release();
+ return NULL;
+ }
+ return entry;
+ }
+}
+
+bool EntryImpl::CreateDataBlock(int index, int size) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(0 == index || 1 == index);
+
+ if (!CreateBlock(size, &address))
+ return false;
+
+ entry_.Data()->data_addr[index] = address.value();
+ entry_.Store();
+ return true;
+}
+
+bool EntryImpl::CreateBlock(int size, Addr* address) {
+ DCHECK(!address->is_initialized());
+
+ FileType file_type = Addr::RequiredFileType(size);
+ if (EXTERNAL == file_type) {
+ if (size > backend_->MaxFileSize())
+ return false;
+ if (!backend_->CreateExternalFile(address))
+ return false;
+ } else {
+ int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) /
+ Addr::BlockSizeForFileType(file_type);
+
+ if (!backend_->CreateBlock(file_type, num_blocks, address))
+ return false;
+ }
+ return true;
+}
+
+bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
+ if (entry_.Data()->hash != hash || entry_.Data()->key_len != key.size())
+ return false;
+
+ std::string my_key = GetKey();
+ return key.compare(my_key) ? false : true;
+}
+
+CacheAddr EntryImpl::GetNextAddress() {
+ return entry_.Data()->next;
+}
+
+void EntryImpl::SetNextAddress(Addr address) {
+ entry_.Data()->next = address.value();
+ bool success = entry_.Store();
+ DCHECK(success);
+}
+
+void EntryImpl::UpdateRank(bool modified) {
+ if (!doomed_) {
+ // Everything is handled by the backend.
+ backend_->UpdateRank(&node_, true);
+ return;
+ }
+
+ Time current = Time::Now();
+ node_.Data()->last_used = current.ToInternalValue();
+
+ if (modified)
+ node_.Data()->last_modified = current.ToInternalValue();
+}
+
+File* EntryImpl::GetBackingFile(Addr address, int index) {
+ File* file;
+ if (address.is_separate_file())
+ file = GetExternalFile(address, index);
+ else
+ file = backend_->File(address);
+ return file;
+}
+
+File* EntryImpl::GetExternalFile(Addr address, int index) {
+ DCHECK(index >= 0 && index <= 2);
+ if (!files_[index].get()) {
+ // For a key file, use mixed mode IO.
+ scoped_refptr<File> file(new File(2 == index));
+ if (file->Init(backend_->GetFileName(address)))
+ files_[index].swap(file);
+ }
+ return files_[index].get();
+}
+
+uint32 EntryImpl::GetHash() {
+ return entry_.Data()->hash;
+}
+
+bool EntryImpl::IsDirty(int32 current_id) {
+ DCHECK(node_.HasData());
+ return node_.Data()->dirty && current_id != node_.Data()->dirty;
+}
+
+void EntryImpl::ClearDirtyFlag() {
+ node_.Data()->dirty = 0;
+}
+
+void EntryImpl::SetPointerForInvalidEntry(int32 new_id) {
+ node_.Data()->dirty = new_id;
+ node_.Data()->pointer = this;
+ node_.Store();
+}
+
+bool EntryImpl::SanityCheck() {
+ if (!entry_.Data()->rankings_node || !entry_.Data()->key_len)
+ return false;
+
+ Addr rankings_addr(entry_.Data()->rankings_node);
+ if (!rankings_addr.is_initialized() || rankings_addr.is_separate_file() ||
+ rankings_addr.file_type() != RANKINGS)
+ return false;
+
+ Addr next_addr(entry_.Data()->next);
+ if (next_addr.is_initialized() &&
+ (next_addr.is_separate_file() || next_addr.file_type() != BLOCK_256))
+ return false;
+
+ return true;
+}
+
+void EntryImpl::IncrementIoCount() {
+ backend_->IncrementIoCount();
+}
+
+void EntryImpl::DecrementIoCount() {
+ backend_->DecrementIoCount();
+}
+
+void EntryImpl::Log(const char* msg) {
+ void* pointer = NULL;
+ int dirty = 0;
+ if (node_.HasData()) {
+ pointer = node_.Data()->pointer;
+ dirty = node_.Data()->dirty;
+ }
+
+ Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
+ entry_.address().value(), node_.address().value());
+
+ Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
+ entry_.Data()->data_addr[1], entry_.Data()->long_key);
+
+ Trace(" doomed: %d 0x%p 0x%x", doomed_, pointer, dirty);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
new file mode 100644
index 0000000..8000f37
--- /dev/null
+++ b/net/disk_cache/entry_impl.h
@@ -0,0 +1,168 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_DISK_CACHE_ENTRY_IMPL_H__
+#define NET_DISK_CACHE_ENTRY_IMPL_H__
+
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/storage_block.h"
+#include "net/disk_cache/storage_block-inl.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+
+// This class implements the Entry interface. An object of this
+// class represents a single entry on the cache.
+class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
+ friend class base::RefCounted<EntryImpl>;
+ public:
+ EntryImpl(BackendImpl* backend, Addr address);
+
+ // Entry interface.
+ virtual void Doom();
+ virtual void Close();
+ virtual std::string GetKey() const;
+ virtual Time GetLastUsed() const;
+ virtual Time GetLastModified() const;
+ virtual int32 GetDataSize(int index) const;
+ virtual int ReadData(int index, int offset, char* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int WriteData(int index, int offset, const char* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate);
+
+ inline CacheEntryBlock* entry() {
+ return &entry_;
+ }
+
+ inline CacheRankingsBlock* rankings() {
+ return &node_;
+ }
+
+ uint32 GetHash();
+
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(Addr node_address, const std::string& key,
+ uint32 hash);
+
+ // Returns true if this entry matches the lookup arguments.
+ bool IsSameEntry(const std::string& key, uint32 hash);
+
+ // Permamently destroys this entry
+ void InternalDoom();
+
+ // Returns the address of the next entry on the list of entries with the same
+ // hash.
+ CacheAddr GetNextAddress();
+
+ // Sets the address of the next entry on the list of entries with the same
+ // hash.
+ void SetNextAddress(Addr address);
+
+ // Reloads the rankings node information.
+ bool LoadNodeAddress();
+
+ // Reloads the data for this entry. If there is already an object in memory
+ // for the entry, the returned value is a pointer to that entry, otherwise
+ // it is the passed in entry. On failure returns NULL.
+ static EntryImpl* Update(EntryImpl* entry);
+
+ // Returns true if this entry is marked as dirty on disk.
+ bool IsDirty(int32 current_id);
+ void ClearDirtyFlag();
+
+ // Fixes this entry so it can be treated as valid (to delete it).
+ void SetPointerForInvalidEntry(int32 new_id);
+
+ // Returns false if the entry is clearly invalid.
+ bool SanityCheck();
+
+ // Handle the pending asynchronous IO count.
+ void IncrementIoCount();
+ void DecrementIoCount();
+
+ private:
+ ~EntryImpl();
+
+ // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
+ static const int kKeyFileIndex = 2;
+
+ // Initializes the storage for an internal or external data block.
+ bool CreateDataBlock(int index, int size);
+
+ // Initializes the storage for an internal or external generic block.
+ bool CreateBlock(int size, Addr* address);
+
+ // Deletes the data pointed by address, maybe backed by files_[index].
+ void DeleteData(Addr address, int index);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
+
+ // Returns a pointer to the file that stores the given address.
+ File* GetBackingFile(Addr address, int index);
+
+ // Returns a pointer to the file that stores external data.
+ File* GetExternalFile(Addr address, int index);
+
+ // Prepares the target file or buffer for a write of buf_len bytes at the
+ // given offset.
+ bool PrepareTarget(int index, int offset, int buf_len, bool truncate);
+
+ // Grows the size of the storage used to store user data, if needed.
+ bool GrowUserBuffer(int index, int offset, int buf_len, bool truncate);
+
+ // Reads from a block data file to this object's memory buffer.
+ bool MoveToLocalBuffer(int index);
+
+ // Loads the external file to this object's memory buffer.
+ bool ImportSeparateFile(int index, int offset, int buf_len);
+
+ // Flush the in-memory data to the backing storage.
+ bool Flush(int index, int size, bool async);
+
+ // Logs this entry to the internal trace buffer.
+ void Log(const char* msg);
+
+ CacheEntryBlock entry_; // Key related information for this entry.
+ CacheRankingsBlock node_; // Rankings related information for this entry.
+ BackendImpl* backend_; // Back pointer to the cache.
+ scoped_ptr<char> user_buffers_[2]; // Store user data.
+ scoped_refptr<File> files_[3]; // Files to store external user data and key.
+ int unreported_size_[2]; // Bytes not reported yet to the backend.
+ bool doomed_; // True if this entry was removed from the cache.
+
+ DISALLOW_EVIL_CONSTRUCTORS(EntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ENTRY_IMPL_H__
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
new file mode 100644
index 0000000..d2eea79
--- /dev/null
+++ b/net/disk_cache/entry_unittest.cc
@@ -0,0 +1,713 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "base/timer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/entry_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+extern int g_cache_tests_max_id;
+extern volatile int g_cache_tests_received;
+extern volatile bool g_cache_tests_error;
+
+// Tests that can run with different types of caches.
+class DiskCacheEntryTest : public DiskCacheTestBase {
+ protected:
+ void InternalSyncIO();
+ void InternalAsyncIO();
+ void ExternalSyncIO();
+ void ExternalAsyncIO();
+ void GetKey();
+ void GrowData();
+ void TruncateData();
+ void InvalidData();
+ void DoomEntry();
+ void DoomedEntry();
+};
+
+void DiskCacheEntryTest::InternalSyncIO() {
+ disk_cache::Entry *entry1 = NULL;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+
+ char buffer1[10];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ EXPECT_EQ(0, entry1->ReadData(0, 0, buffer1, sizeof(buffer1), NULL));
+ strcpy_s(buffer1, "the data");
+ EXPECT_EQ(10, entry1->WriteData(0, 0, buffer1, sizeof(buffer1), NULL, false));
+ memset(buffer1, 0, sizeof(buffer1));
+ EXPECT_EQ(10, entry1->ReadData(0, 0, buffer1, sizeof(buffer1), NULL));
+ EXPECT_STREQ("the data", buffer1);
+
+ char buffer2[5000];
+ char buffer3[10000] = {0};
+ CacheTestFillBuffer(buffer2, sizeof(buffer2), false);
+ strcpy_s(buffer2, "The really big data goes here");
+ EXPECT_EQ(5000, entry1->WriteData(1, 1500, buffer2, sizeof(buffer2), NULL,
+ false));
+ memset(buffer2, 0, sizeof(buffer2));
+ EXPECT_EQ(4989, entry1->ReadData(1, 1511, buffer2, sizeof(buffer2), NULL));
+ EXPECT_STREQ("big data goes here", buffer2);
+ EXPECT_EQ(5000, entry1->ReadData(1, 0, buffer2, sizeof(buffer2), NULL));
+ EXPECT_EQ(0, memcmp(buffer2, buffer3, 1500));
+ EXPECT_EQ(1500, entry1->ReadData(1, 5000, buffer2, sizeof(buffer2), NULL));
+
+ EXPECT_EQ(0, entry1->ReadData(1, 6500, buffer2, sizeof(buffer2), NULL));
+ EXPECT_EQ(6500, entry1->ReadData(1, 0, buffer3, sizeof(buffer3), NULL));
+ EXPECT_EQ(8192, entry1->WriteData(1, 0, buffer3, 8192, NULL, false));
+ EXPECT_EQ(8192, entry1->ReadData(1, 0, buffer3, sizeof(buffer3), NULL));
+ EXPECT_EQ(8192, entry1->GetDataSize(1));
+
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, InternalSyncIO) {
+ InitCache();
+ InternalSyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InternalSyncIO();
+}
+
+void DiskCacheEntryTest::InternalAsyncIO() {
+ disk_cache::Entry *entry1 = NULL;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+
+ // Let's verify that each IO goes to the right callback object.
+ CallbackTest callback1(1, false);
+ CallbackTest callback2(2, false);
+ CallbackTest callback3(3, false);
+ CallbackTest callback4(4, false);
+ CallbackTest callback5(5, false);
+ CallbackTest callback6(6, false);
+ CallbackTest callback7(7, false);
+ CallbackTest callback8(8, false);
+ CallbackTest callback9(9, false);
+ CallbackTest callback10(10, false);
+ CallbackTest callback11(11, false);
+ CallbackTest callback12(12, false);
+ CallbackTest callback13(13, false);
+
+ g_cache_tests_error = false;
+ g_cache_tests_max_id = 0;
+ g_cache_tests_received = 0;
+
+ MessageLoopHelper helper;
+
+ char buffer1[10];
+ char buffer2[5000];
+ char buffer3[10000];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ CacheTestFillBuffer(buffer2, sizeof(buffer2), false);
+ CacheTestFillBuffer(buffer3, sizeof(buffer3), false);
+
+ EXPECT_EQ(0, entry1->ReadData(0, 0, buffer1, sizeof(buffer1), &callback1));
+ strcpy_s(buffer1, "the data");
+ int expected = 0;
+ int ret = entry1->WriteData(0, 0, buffer1, sizeof(buffer1), &callback2,
+ false);
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ memset(buffer2, 0, sizeof(buffer1));
+ ret = entry1->ReadData(0, 0, buffer2, sizeof(buffer1), &callback3);
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ g_cache_tests_max_id = 3;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("the data", buffer2);
+
+ strcpy_s(buffer2, sizeof(buffer2), "The really big data goes here");
+ ret = entry1->WriteData(1, 1500, buffer2, sizeof(buffer2), &callback4, false);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ memset(buffer3, 0, sizeof(buffer2));
+ ret = entry1->ReadData(1, 1511, buffer3, sizeof(buffer2), &callback5);
+ EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ g_cache_tests_max_id = 5;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("big data goes here", buffer3);
+ ret = entry1->ReadData(1, 0, buffer2, sizeof(buffer2), &callback6);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ memset(buffer3, 0, sizeof(buffer3));
+
+ g_cache_tests_max_id = 6;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(0, memcmp(buffer2, buffer3, 1500));
+ ret = entry1->ReadData(1, 5000, buffer2, sizeof(buffer2), &callback7);
+ EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_EQ(0, entry1->ReadData(1, 6500, buffer2, sizeof(buffer2), &callback8));
+ ret = entry1->ReadData(1, 0, buffer3, sizeof(buffer3), &callback9);
+ EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry1->WriteData(1, 0, buffer3, 8192, &callback10, false);
+ EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry1->ReadData(1, 0, buffer3, sizeof(buffer3), &callback11);
+ EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_EQ(8192, entry1->GetDataSize(1));
+
+ ret = entry1->ReadData(0, 0, buffer1, sizeof(buffer1), &callback12);
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry1->ReadData(1, 0, buffer2, sizeof(buffer2), &callback13);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ g_cache_tests_max_id = 13;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ EXPECT_FALSE(g_cache_tests_error);
+ EXPECT_EQ(expected, g_cache_tests_received);
+
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
+ InitCache();
+ InternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InternalAsyncIO();
+}
+
+void DiskCacheEntryTest::ExternalSyncIO() {
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+
+ char buffer1[17000], buffer2[25000];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ CacheTestFillBuffer(buffer2, sizeof(buffer2), false);
+ strcpy_s(buffer1, "the data");
+ EXPECT_EQ(17000, entry1->WriteData(0, 0, buffer1, sizeof(buffer1), NULL,
+ false));
+ memset(buffer1, 0, sizeof(buffer1));
+ EXPECT_EQ(17000, entry1->ReadData(0, 0, buffer1, sizeof(buffer1), NULL));
+ EXPECT_STREQ("the data", buffer1);
+
+ strcpy_s(buffer2, "The really big data goes here");
+ EXPECT_EQ(25000, entry1->WriteData(1, 10000, buffer2, sizeof(buffer2), NULL,
+ false));
+ memset(buffer2, 0, sizeof(buffer2));
+ EXPECT_EQ(24989, entry1->ReadData(1, 10011, buffer2, sizeof(buffer2), NULL));
+ EXPECT_STREQ("big data goes here", buffer2);
+ EXPECT_EQ(25000, entry1->ReadData(1, 0, buffer2, sizeof(buffer2), NULL));
+ EXPECT_EQ(0, memcmp(buffer2, buffer2, 10000));
+ EXPECT_EQ(5000, entry1->ReadData(1, 30000, buffer2, sizeof(buffer2), NULL));
+
+ EXPECT_EQ(0, entry1->ReadData(1, 35000, buffer2, sizeof(buffer2), NULL));
+ EXPECT_EQ(17000, entry1->ReadData(1, 0, buffer1, sizeof(buffer1), NULL));
+ EXPECT_EQ(17000, entry1->WriteData(1, 20000, buffer1, sizeof(buffer1), NULL,
+ false));
+ EXPECT_EQ(37000, entry1->GetDataSize(1));
+
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
+ InitCache();
+ ExternalSyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ExternalSyncIO();
+}
+
+void DiskCacheEntryTest::ExternalAsyncIO() {
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+
+ // Let's verify that each IO goes to the right callback object.
+ CallbackTest callback1(1, false);
+ CallbackTest callback2(2, false);
+ CallbackTest callback3(3, false);
+ CallbackTest callback4(4, false);
+ CallbackTest callback5(5, false);
+ CallbackTest callback6(6, false);
+ CallbackTest callback7(7, false);
+ CallbackTest callback8(8, false);
+ CallbackTest callback9(9, false);
+
+ g_cache_tests_error = false;
+ g_cache_tests_max_id = 0;
+ g_cache_tests_received = 0;
+ int expected = 0;
+
+ MessageLoopHelper helper;
+
+ char buffer1[17000], buffer2[25000], buffer3[25000];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ CacheTestFillBuffer(buffer2, sizeof(buffer2), false);
+ CacheTestFillBuffer(buffer3, sizeof(buffer3), false);
+ strcpy_s(buffer1, "the data");
+ int ret = entry1->WriteData(0, 0, buffer1, sizeof(buffer1), &callback1,
+ false);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ g_cache_tests_max_id = 1;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ memset(buffer2, 0, sizeof(buffer1));
+ ret = entry1->ReadData(0, 0, buffer2, sizeof(buffer1), &callback2);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ g_cache_tests_max_id = 2;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("the data", buffer1);
+
+ strcpy_s(buffer2, "The really big data goes here");
+ ret = entry1->WriteData(1, 10000, buffer2, sizeof(buffer2), &callback3,
+ false);
+ EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ g_cache_tests_max_id = 3;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ memset(buffer3, 0, sizeof(buffer3));
+ ret = entry1->ReadData(1, 10011, buffer3, sizeof(buffer3), &callback4);
+ EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ g_cache_tests_max_id = 4;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("big data goes here", buffer3);
+ ret = entry1->ReadData(1, 0, buffer2, sizeof(buffer2), &callback5);
+ EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ g_cache_tests_max_id = 5;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(0, memcmp(buffer2, buffer2, 10000));
+ ret = entry1->ReadData(1, 30000, buffer2, sizeof(buffer2), &callback6);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_EQ(0, entry1->ReadData(1, 35000, buffer2, sizeof(buffer2),
+ &callback7));
+ ret = entry1->ReadData(1, 0, buffer1, sizeof(buffer1), &callback8);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ ret = entry1->WriteData(1, 20000, buffer1, sizeof(buffer1), &callback9,
+ false);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ EXPECT_EQ(37000, entry1->GetDataSize(1));
+
+ g_cache_tests_max_id = 9;
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ EXPECT_FALSE(g_cache_tests_error);
+ EXPECT_EQ(expected, g_cache_tests_received);
+
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
+ InitCache();
+ ExternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ExternalAsyncIO();
+}
+
+void DiskCacheEntryTest::GetKey() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_EQ(key1, entry1->GetKey()) << "short key";
+ entry1->Close();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ char key_buffer[20000];
+
+ CacheTestFillBuffer(key_buffer, 3000, true);
+ key_buffer[1000] = '\0';
+
+ key1 = key_buffer;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_TRUE(key1 == entry1->GetKey()) << "1000 bytes key";
+ entry1->Close();
+
+ key_buffer[1000] = 'p';
+ key_buffer[3000] = '\0';
+ key1 = key_buffer;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_TRUE(key1 == entry1->GetKey()) << "medium size key";
+ entry1->Close();
+
+ CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
+ key_buffer[19999] = '\0';
+
+ key1 = key_buffer;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_TRUE(key1 == entry1->GetKey()) << "long key";
+ entry1->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GetKey) {
+ InitCache();
+ GetKey();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GetKey();
+}
+
+void DiskCacheEntryTest::GrowData() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+
+ char buffer1[20000];
+ char buffer2[20000];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ memset(buffer2, 0, sizeof(buffer2));
+
+ strcpy_s(buffer1, "the data");
+ EXPECT_EQ(10, entry1->WriteData(0, 0, buffer1, 10, NULL, false));
+ EXPECT_EQ(10, entry1->ReadData(0, 0, buffer2, 10, NULL));
+ EXPECT_STREQ("the data", buffer2);
+ EXPECT_EQ(10, entry1->GetDataSize(0));
+
+ EXPECT_EQ(2000, entry1->WriteData(0, 0, buffer1, 2000, NULL, false));
+ EXPECT_EQ(2000, entry1->GetDataSize(0));
+ EXPECT_EQ(2000, entry1->ReadData(0, 0, buffer2, 2000, NULL));
+ EXPECT_TRUE(!memcmp(buffer1, buffer2, 2000));
+
+ EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, sizeof(buffer1), NULL,
+ false));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(20000, entry1->ReadData(0, 0, buffer2, sizeof(buffer2), NULL));
+ EXPECT_TRUE(!memcmp(buffer1, buffer2, sizeof(buffer1)));
+ entry1->Close();
+
+ memset(buffer2, 0, sizeof(buffer2));
+ ASSERT_TRUE(cache_->CreateEntry("Second key", &entry2));
+ EXPECT_EQ(10, entry2->WriteData(0, 0, buffer1, 10, NULL, false));
+ EXPECT_EQ(10, entry2->GetDataSize(0));
+ entry2->Close();
+
+ // Go from an internal address to a bigger block size.
+ ASSERT_TRUE(cache_->OpenEntry("Second key", &entry2));
+ EXPECT_EQ(2000, entry2->WriteData(0, 0, buffer1, 2000, NULL, false));
+ EXPECT_EQ(2000, entry2->GetDataSize(0));
+ EXPECT_EQ(2000, entry2->ReadData(0, 0, buffer2, 2000, NULL));
+ EXPECT_TRUE(!memcmp(buffer1, buffer2, 2000));
+ entry2->Close();
+ memset(buffer2, 0, sizeof(buffer2));
+
+ // Go from an internal address to an external one.
+ ASSERT_TRUE(cache_->OpenEntry("Second key", &entry2));
+ EXPECT_EQ(20000, entry2->WriteData(0, 0, buffer1, sizeof(buffer1), NULL,
+ false));
+ EXPECT_EQ(20000, entry2->GetDataSize(0));
+ EXPECT_EQ(20000, entry2->ReadData(0, 0, buffer2, sizeof(buffer2), NULL));
+ EXPECT_TRUE(!memcmp(buffer1, buffer2, sizeof(buffer1)));
+ entry2->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GrowData) {
+ InitCache();
+ GrowData();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GrowData();
+}
+
+void DiskCacheEntryTest::TruncateData() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+
+ char buffer1[20000];
+ char buffer2[20000];
+
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ memset(buffer2, 0, sizeof(buffer2));
+
+ // Simple truncation:
+ EXPECT_EQ(200, entry1->WriteData(0, 0, buffer1, 200, NULL, false));
+ EXPECT_EQ(200, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->WriteData(0, 0, buffer1, 100, NULL, false));
+ EXPECT_EQ(200, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->WriteData(0, 0, buffer1, 100, NULL, true));
+ EXPECT_EQ(100, entry1->GetDataSize(0));
+ EXPECT_EQ(0, entry1->WriteData(0, 50, buffer1, 0, NULL, true));
+ EXPECT_EQ(50, entry1->GetDataSize(0));
+ EXPECT_EQ(0, entry1->WriteData(0, 0, buffer1, 0, NULL, true));
+ EXPECT_EQ(0, entry1->GetDataSize(0));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
+
+ // Go to an external file.
+ EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, 20000, NULL, true));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(20000, entry1->ReadData(0, 0, buffer2, 20000, NULL));
+ EXPECT_TRUE(!memcmp(buffer1, buffer2, 20000));
+ memset(buffer2, 0, sizeof(buffer2));
+
+ // External file truncation
+ EXPECT_EQ(18000, entry1->WriteData(0, 0, buffer1, 18000, NULL, false));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(18000, entry1->WriteData(0, 0, buffer1, 18000, NULL, true));
+ EXPECT_EQ(18000, entry1->GetDataSize(0));
+ EXPECT_EQ(0, entry1->WriteData(0, 17500, buffer1, 0, NULL, true));
+ EXPECT_EQ(17500, entry1->GetDataSize(0));
+
+ // And back to an internal block.
+ EXPECT_EQ(600, entry1->WriteData(0, 1000, buffer1, 600, NULL, true));
+ EXPECT_EQ(1600, entry1->GetDataSize(0));
+ EXPECT_EQ(600, entry1->ReadData(0, 1000, buffer2, 600, NULL));
+ EXPECT_TRUE(!memcmp(buffer1, buffer2, 600));
+ EXPECT_EQ(1000, entry1->ReadData(0, 0, buffer2, 1000, NULL));
+ EXPECT_TRUE(!memcmp(buffer1, buffer2, 1000)) << "Preserves previous data";
+
+ // Go from external file to zero length.
+ EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, 20000, NULL, true));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(0, entry1->WriteData(0, 0, buffer1, 0, NULL, true));
+ EXPECT_EQ(0, entry1->GetDataSize(0));
+
+ entry1->Close();
+}
+
+TEST_F(DiskCacheEntryTest, TruncateData) {
+ InitCache();
+ TruncateData();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ TruncateData();
+}
+
+// Reading somewhere that was not written should return zeros.
+void DiskCacheEntryTest::InvalidData() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+
+ char buffer1[20000];
+ char buffer2[20000];
+ char buffer3[20000];
+
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ memset(buffer2, 0, sizeof(buffer2));
+
+ // Simple data grow:
+ EXPECT_EQ(200, entry1->WriteData(0, 400, buffer1, 200, NULL, false));
+ EXPECT_EQ(600, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->ReadData(0, 300, buffer3, 100, NULL));
+ EXPECT_TRUE(!memcmp(buffer3, buffer2, 100));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
+
+ // The entry is now on disk. Load it and extend it.
+ EXPECT_EQ(200, entry1->WriteData(0, 800, buffer1, 200, NULL, false));
+ EXPECT_EQ(1000, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->ReadData(0, 700, buffer3, 100, NULL));
+ EXPECT_TRUE(!memcmp(buffer3, buffer2, 100));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
+
+ // This time using truncate.
+ EXPECT_EQ(200, entry1->WriteData(0, 1800, buffer1, 200, NULL, true));
+ EXPECT_EQ(2000, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->ReadData(0, 1500, buffer3, 100, NULL));
+ EXPECT_TRUE(!memcmp(buffer3, buffer2, 100));
+
+ // Go to an external file.
+ EXPECT_EQ(200, entry1->WriteData(0, 19800, buffer1, 200, NULL, false));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(4000, entry1->ReadData(0, 14000, buffer3, 4000, NULL));
+ EXPECT_TRUE(!memcmp(buffer3, buffer2, 4000));
+
+ // And back to an internal block.
+ EXPECT_EQ(600, entry1->WriteData(0, 1000, buffer1, 600, NULL, true));
+ EXPECT_EQ(1600, entry1->GetDataSize(0));
+ EXPECT_EQ(600, entry1->ReadData(0, 1000, buffer3, 600, NULL));
+ EXPECT_TRUE(!memcmp(buffer3, buffer1, 600));
+
+ // Extend it again.
+ EXPECT_EQ(600, entry1->WriteData(0, 2000, buffer1, 600, NULL, false));
+ EXPECT_EQ(2600, entry1->GetDataSize(0));
+ EXPECT_EQ(200, entry1->ReadData(0, 1800, buffer3, 200, NULL));
+ EXPECT_TRUE(!memcmp(buffer3, buffer2, 200));
+
+ // And again (with truncation flag).
+ EXPECT_EQ(600, entry1->WriteData(0, 3000, buffer1, 600, NULL, true));
+ EXPECT_EQ(3600, entry1->GetDataSize(0));
+ EXPECT_EQ(200, entry1->ReadData(0, 2800, buffer3, 200, NULL));
+ EXPECT_TRUE(!memcmp(buffer3, buffer2, 200));
+
+ entry1->Close();
+}
+
+TEST_F(DiskCacheEntryTest, InvalidData) {
+ InitCache();
+ InvalidData();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InvalidData();
+}
+
+void DiskCacheEntryTest::DoomEntry() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ entry1->Doom();
+ entry1->Close();
+
+ char key_buffer[20000];
+ CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
+ key_buffer[19999] = '\0';
+
+ key1 = key_buffer;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_EQ(20000, entry1->WriteData(0, 0, key_buffer, 20000, NULL, false));
+ EXPECT_EQ(20000, entry1->WriteData(1, 0, key_buffer, 20000, NULL, false));
+ entry1->Doom();
+ entry1->Close();
+
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, DoomEntry) {
+ InitCache();
+ DoomEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ DoomEntry();
+}
+
+// Verify that basic operations work as expected with doomed entries.
+void DiskCacheEntryTest::DoomedEntry() {
+ std::string key("the first key");
+ disk_cache::Entry *entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+ entry->Doom();
+
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ Time initial = Time::Now();
+ Sleep(20);
+
+ char buffer1[2000];
+ char buffer2[2000];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ memset(buffer2, 0, sizeof(buffer2));
+
+ EXPECT_EQ(2000, entry->WriteData(0, 0, buffer1, 2000, NULL, false));
+ EXPECT_EQ(2000, entry->ReadData(0, 0, buffer2, 2000, NULL));
+ EXPECT_EQ(0, memcmp(buffer1, buffer2, sizeof(buffer1)));
+ EXPECT_TRUE(initial < entry->GetLastModified());
+ EXPECT_TRUE(initial < entry->GetLastUsed());
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, DoomedEntry) {
+ InitCache();
+ DoomEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ DoomEntry();
+}
diff --git a/net/disk_cache/errors.h b/net/disk_cache/errors.h
new file mode 100644
index 0000000..553e6d14
--- /dev/null
+++ b/net/disk_cache/errors.h
@@ -0,0 +1,52 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Error codes reported by self tests.
+
+#ifndef NET_DISK_CACHE_ERRORS_H__
+#define NET_DISK_CACHE_ERRORS_H__
+
+namespace disk_cache {
+
+enum {
+ ERR_INIT_FAILED = -1,
+ ERR_INVALID_TAIL = -2,
+ ERR_INVALID_HEAD = -3,
+ ERR_INVALID_PREV = -4,
+ ERR_INVALID_NEXT = -5,
+ ERR_INVALID_ENTRY = -6,
+ ERR_INVALID_ADDRESS = -7,
+ ERR_INVALID_LINKS = -8,
+ ERR_NUM_ENTRIES_MISMATCH = -9,
+ ERR_READ_FAILURE = -10
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ERRORS_H__
diff --git a/net/disk_cache/file.cc b/net/disk_cache/file.cc
new file mode 100644
index 0000000..36b023b
--- /dev/null
+++ b/net/disk_cache/file.cc
@@ -0,0 +1,313 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/file.h"
+
+#include "net/disk_cache/disk_cache.h"
+
+namespace {
+
+// This class implements FileIOCallback to perform IO operations
+// when the callback parameter of the operation is NULL.
+class SyncCallback: public disk_cache::FileIOCallback {
+ public:
+ SyncCallback() : called_(false) {}
+ ~SyncCallback() {}
+
+ virtual void OnFileIOComplete(int bytes_copied);
+ void WaitForResult(int* bytes_copied);
+ private:
+ bool called_;
+ int actual_;
+};
+
+void SyncCallback::OnFileIOComplete(int bytes_copied) {
+ actual_ = bytes_copied;
+ called_ = true;
+}
+
+// Waits for the IO operation to complete.
+void SyncCallback::WaitForResult(int* bytes_copied) {
+ for (;;) {
+ SleepEx(INFINITE, TRUE);
+ if (called_)
+ break;
+ }
+ *bytes_copied = actual_;
+}
+
+// Structure used for asynchronous operations.
+struct MyOverlapped {
+ OVERLAPPED overlapped;
+ disk_cache::File* file;
+ disk_cache::FileIOCallback* callback;
+ const void* buffer;
+ DWORD actual_bytes;
+ bool async; // Invoke the callback form the completion.
+ bool called; // Completion received.
+ bool delete_buffer; // Delete the user buffer at completion.
+};
+
+COMPILE_ASSERT(!offsetof(MyOverlapped, overlapped), starts_with_overlapped);
+
+} // namespace
+
+namespace disk_cache {
+
+// SyncCallback to be invoked as an APC when the asynchronous operation
+// completes.
+void CALLBACK IoCompletion(DWORD error, DWORD actual_bytes,
+ OVERLAPPED* overlapped) {
+ MyOverlapped* data = reinterpret_cast<MyOverlapped*>(overlapped);
+
+ if (error) {
+ DCHECK(!actual_bytes);
+ actual_bytes = static_cast<DWORD>(-1);
+ NOTREACHED();
+ }
+
+ if (data->delete_buffer) {
+ DCHECK(!data->callback);
+ data->file->Release();
+ delete data->buffer;
+ delete data;
+ return;
+ }
+
+ if (data->async) {
+ data->callback->OnFileIOComplete(static_cast<int>(actual_bytes));
+ data->file->Release();
+ delete data;
+ } else {
+ // Somebody is waiting for this so don't delete data and instead notify
+ // that we were called.
+ data->actual_bytes = actual_bytes;
+ data->file->Release();
+ data->called = true;
+ }
+}
+
+bool File::Init(const std::wstring name) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ handle_ = CreateFile(name.c_str(), GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING,
+ FILE_FLAG_OVERLAPPED, NULL);
+
+ if (INVALID_HANDLE_VALUE == handle_)
+ return false;
+
+ init_ = true;
+ if (mixed_) {
+ sync_handle_ = CreateFile(name.c_str(), GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
+ OPEN_EXISTING, 0, NULL);
+
+ if (INVALID_HANDLE_VALUE == sync_handle_)
+ return false;
+ }
+
+ return true;
+}
+
+File::~File() {
+ if (!init_)
+ return;
+
+ CloseHandle(handle_);
+ if (mixed_ && INVALID_HANDLE_VALUE != sync_handle_)
+ CloseHandle(sync_handle_);
+}
+
+bool File::Read(void* buffer, size_t buffer_len, size_t offset) {
+ if (!mixed_ || buffer_len > ULONG_MAX || offset > LONG_MAX)
+ return false;
+
+ DWORD ret = SetFilePointer(sync_handle_, static_cast<LONG>(offset), NULL,
+ FILE_BEGIN);
+ if (INVALID_SET_FILE_POINTER == ret)
+ return false;
+
+ DWORD actual;
+ DWORD size = static_cast<DWORD>(buffer_len);
+ if (!ReadFile(sync_handle_, buffer, size, &actual, NULL))
+ return false;
+ return actual == size;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset) {
+ if (!mixed_ || buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ DWORD ret = SetFilePointer(sync_handle_, static_cast<LONG>(offset), NULL,
+ FILE_BEGIN);
+ if (INVALID_SET_FILE_POINTER == ret)
+ return false;
+
+ DWORD actual;
+ DWORD size = static_cast<DWORD>(buffer_len);
+ if (!WriteFile(sync_handle_, buffer, size, &actual, NULL))
+ return false;
+ return actual == size;
+}
+
+// We have to increase the ref counter of the file before performing the IO to
+// prevent the completion to happen with an invalid handle (if the file is
+// closed while the IO is in flight).
+bool File::Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ MyOverlapped* data = new MyOverlapped;
+ memset(data, 0, sizeof(*data));
+
+ SyncCallback local_callback;
+ data->overlapped.Offset = static_cast<DWORD>(offset);
+ data->callback = callback ? callback : &local_callback;
+ data->file = this;
+
+ DWORD size = static_cast<DWORD>(buffer_len);
+ AddRef();
+
+ if (!ReadFileEx(handle_, buffer, size, &data->overlapped, &IoCompletion)) {
+ Release();
+ delete data;
+ return false;
+ }
+
+ if (callback) {
+ *completed = false;
+ // Let's check if the operation is already finished.
+ SleepEx(0, TRUE);
+ if (data->called) {
+ *completed = (data->actual_bytes == size);
+ DCHECK(data->actual_bytes == size);
+ delete data;
+ return *completed;
+ }
+ data->async = true;
+ } else {
+ // Invoke the callback and perform cleanup on the APC.
+ data->async = true;
+ int bytes_copied;
+ local_callback.WaitForResult(&bytes_copied);
+ if (static_cast<int>(buffer_len) != bytes_copied) {
+ NOTREACHED();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ return AsyncWrite(buffer, buffer_len, offset, true, callback, completed);
+}
+
+bool File::PostWrite(const void* buffer, size_t buffer_len, size_t offset) {
+ return AsyncWrite(buffer, buffer_len, offset, false, NULL, NULL);
+}
+
+bool File::AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ bool notify, FileIOCallback* callback, bool* completed) {
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ MyOverlapped* data = new MyOverlapped;
+ memset(data, 0, sizeof(*data));
+
+ SyncCallback local_callback;
+ data->overlapped.Offset = static_cast<DWORD>(offset);
+ data->callback = callback ? callback : &local_callback;
+ data->file = this;
+ if (!callback && !notify) {
+ data->delete_buffer = true;
+ data->callback = NULL;
+ data->buffer = buffer;
+ }
+
+ DWORD size = static_cast<DWORD>(buffer_len);
+ AddRef();
+
+ if (!WriteFileEx(handle_, buffer, size, &data->overlapped, &IoCompletion)) {
+ Release();
+ delete data;
+ return false;
+ }
+
+ if (callback) {
+ *completed = false;
+ SleepEx(0, TRUE);
+ if (data->called) {
+ *completed = (data->actual_bytes == size);
+ DCHECK(data->actual_bytes == size);
+ delete data;
+ return *completed;
+ }
+ data->async = true;
+ } else if (notify) {
+ data->async = true;
+ int bytes_copied;
+ local_callback.WaitForResult(&bytes_copied);
+ if (static_cast<int>(buffer_len) != bytes_copied) {
+ NOTREACHED();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool File::SetLength(size_t length) {
+ if (length > ULONG_MAX)
+ return false;
+
+ DWORD size = static_cast<DWORD>(length);
+ if (INVALID_SET_FILE_POINTER == SetFilePointer(handle_, size, NULL,
+ FILE_BEGIN))
+ return false;
+
+ return TRUE == SetEndOfFile(handle_);
+}
+
+size_t File::GetLength() {
+ LARGE_INTEGER size;
+ if (!GetFileSizeEx(handle_, &size))
+ return 0;
+ if (size.HighPart)
+ return ULONG_MAX;
+
+ return static_cast<size_t>(size.LowPart);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/file.h b/net/disk_cache/file.h
new file mode 100644
index 0000000..db42026
--- /dev/null
+++ b/net/disk_cache/file.h
@@ -0,0 +1,112 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_H__
+#define NET_DISK_CACHE_FILE_H__
+
+#include "base/ref_counted.h"
+
+namespace disk_cache {
+
+// This interface is used to support asynchronous ReadData and WriteData calls.
+class FileIOCallback {
+ public:
+ // Notified of the actual number of bytes read or written. This value is
+ // negative if an error occurred.
+ virtual void OnFileIOComplete(int bytes_copied) = 0;
+};
+
+// Simple wrapper around a file that allows asynchronous operations.
+class File : public base::RefCounted<File> {
+ friend class base::RefCounted<File>;
+ public:
+ File() : init_(false), mixed_(false) {}
+ // mixed_mode set to true enables regular synchronous operations for the file.
+ explicit File(bool mixed_mode) : init_(false), mixed_(mixed_mode) {}
+
+ // Initializes the object to point to a given file. The file must aready exist
+ // on disk, and allow shared read and write.
+ bool Init(const std::wstring name);
+
+#ifdef WIN32
+ HANDLE handle() const {
+ return handle_;
+ }
+#else
+ int file_descriptor() const {
+ return file_descriptor_;
+ }
+#endif
+
+ // Performs synchronous IO.
+ bool Read(void* buffer, size_t buffer_len, size_t offset);
+ bool Write(const void* buffer, size_t buffer_len, size_t offset);
+
+ // Performs asynchronous IO. callback will be called when the IO completes,
+ // as an APC on the thread that queued the operation.
+ bool Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed);
+ bool Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed);
+
+ // Performs asynchronous writes, but doesn't notify when done. Automatically
+ // deletes buffer when done.
+ bool PostWrite(const void* buffer, size_t buffer_len, size_t offset);
+
+ // Sets the file's length. The file is truncated or extended with zeros to
+ // the new length.
+ bool SetLength(size_t length);
+ size_t GetLength();
+
+ protected:
+ virtual ~File();
+
+ // Performs the actual asynchronous write. If notify is set and there is no
+ // callback, the call will be re-synchronized.
+ bool AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ bool notify, FileIOCallback* callback, bool* completed);
+
+ private:
+ bool init_;
+ bool mixed_;
+#ifdef WIN32
+ HANDLE handle_; // Regular, asynchronous IO handle.
+ HANDLE sync_handle_; // Synchronous IO hanlde.
+#else
+ int file_descriptor_;
+#endif
+
+ DISALLOW_EVIL_CONSTRUCTORS(File);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_H__
diff --git a/net/disk_cache/file_block.h b/net/disk_cache/file_block.h
new file mode 100644
index 0000000..50574b3
--- /dev/null
+++ b/net/disk_cache/file_block.h
@@ -0,0 +1,56 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_BLOCK_H__
+#define NET_DISK_CACHE_FILE_BLOCK_H__
+
+namespace disk_cache {
+
+// This interface exposes common functionality for a single block of data
+// stored on a file-block, regardless of the real type or size of the block.
+// Used to simplify loading / storing the block from disk.
+class FileBlock {
+ public:
+ virtual ~FileBlock() {}
+
+ // Returns a pointer to the actual data.
+ virtual void* buffer() const = 0;
+
+ // Returns the size of the block;
+ virtual size_t size() const = 0;
+
+ // Returns the file offset of this block.
+ virtual DWORD offset() const = 0;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_BLOCK_H__
diff --git a/net/disk_cache/file_lock.cc b/net/disk_cache/file_lock.cc
new file mode 100644
index 0000000..e78d567
--- /dev/null
+++ b/net/disk_cache/file_lock.cc
@@ -0,0 +1,52 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/file_lock.h"
+
+namespace disk_cache {
+
+FileLock::FileLock(BlockFileHeader* header) {
+ updating_ = &header->updating;
+ (*updating_)++;
+ acquired_ = true;
+}
+
+void FileLock::Lock() {
+ if (acquired_)
+ return;
+ (*updating_)++;
+}
+
+void FileLock::Unlock() {
+ if (!acquired_)
+ return;
+ (*updating_)--;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/file_lock.h b/net/disk_cache/file_lock.h
new file mode 100644
index 0000000..4134ad9
--- /dev/null
+++ b/net/disk_cache/file_lock.h
@@ -0,0 +1,70 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_LOCK_H__
+#define NET_DISK_CACHE_FILE_LOCK_H__
+
+#include "net/disk_cache/disk_format.h"
+
+namespace disk_cache {
+
+// This class implements a file lock that lives on the header of a memory mapped
+// file. This is NOT a thread related lock, it is a lock to detect corruption
+// of the file when the process crashes in the middle of an update.
+// The lock is acquired on the constructor and released on the destructor.
+// The typical use of the class is:
+// {
+// BlockFileHeader* header = GetFileHeader();
+// FileLock lock(header);
+// header->max_entries = num_entries;
+// // At this point the destructor is going to release the lock.
+// }
+// It is important to perform Lock() and Unlock() operations in the right order,
+// because otherwise the desired effect of the "lock" will not be achieved. If
+// the operations are inlined / optimized, the "locked" operations can happen
+// outside the lock.
+class FileLock {
+ public:
+ explicit FileLock(BlockFileHeader* header);
+ ~FileLock() {
+ Unlock();
+ }
+ // Virtual to make sure the compiler never inlines the calls.
+ virtual void Lock();
+ virtual void Unlock();
+ private:
+ bool acquired_;
+ volatile int32* updating_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_LOCK_H__
diff --git a/net/disk_cache/hash.cc b/net/disk_cache/hash.cc
new file mode 100644
index 0000000..1e83913
--- /dev/null
+++ b/net/disk_cache/hash.cc
@@ -0,0 +1,67 @@
+// From http://www.azillionmonkeys.com/qed/hash.html
+
+#include "net/disk_cache/hash.h"
+
+typedef uint32 uint32_t;
+typedef uint16 uint16_t;
+
+namespace disk_cache {
+
+#undef get16bits
+#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
+ || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
+#define get16bits(d) (*((const uint16_t *) (d)))
+#endif
+
+#if !defined (get16bits)
+#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
+ +(uint32_t)(((const uint8_t *)(d))[0]) )
+#endif
+
+uint32 SuperFastHash(const char * data, int len) {
+ uint32_t hash = len, tmp;
+ int rem;
+
+ if (len <= 0 || data == NULL)
+ return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+ for (;len > 0; len--) {
+ hash += get16bits(data);
+ tmp = (get16bits(data + 2) << 11) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2 * sizeof(uint16_t);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits(data);
+ hash ^= hash << 16;
+ hash ^= data[sizeof(uint16_t)] << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits(data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += *data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/hash.h b/net/disk_cache/hash.h
new file mode 100644
index 0000000..e71a678
--- /dev/null
+++ b/net/disk_cache/hash.h
@@ -0,0 +1,55 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_DISK_CACHE_HASH_H__
+#define NET_DISK_CACHE_HASH_H__
+
+#include <string>
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+// From http://www.azillionmonkeys.com/qed/hash.html
+// This is the hash used on WebCore/platform/stringhash
+uint32 SuperFastHash(const char * data, int len);
+
+inline uint32 Hash(const char* key, size_t length) {
+ return SuperFastHash(key, static_cast<int>(length));
+}
+
+inline uint32 Hash(const std::string& key) {
+ if (key.empty())
+ return 0;
+ return SuperFastHash(key.data(), static_cast<int>(key.size()));
+}
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_HASH_H__
diff --git a/net/disk_cache/mapped_file.cc b/net/disk_cache/mapped_file.cc
new file mode 100644
index 0000000..ee459da
--- /dev/null
+++ b/net/disk_cache/mapped_file.cc
@@ -0,0 +1,78 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/mapped_file.h"
+
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+void* MappedFile::Init(const std::wstring name, size_t size) {
+ DCHECK(!init_);
+ if (init_ || !File::Init(name))
+ return NULL;
+
+ buffer_ = NULL;
+ init_ = true;
+ section_ = CreateFileMapping(handle(), NULL, PAGE_READWRITE, 0,
+ static_cast<DWORD>(size), NULL);
+ if (!section_)
+ return NULL;
+
+ buffer_ = MapViewOfFile(section_, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, size);
+ DCHECK(buffer_);
+ view_size_ = size;
+
+ return buffer_;
+}
+
+MappedFile::~MappedFile() {
+ if (!init_)
+ return;
+
+ if (buffer_) {
+ BOOL ret = UnmapViewOfFile(buffer_);
+ DCHECK(ret);
+ }
+
+ if (section_)
+ CloseHandle(section_);
+}
+
+bool MappedFile::Load(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Read(block->buffer(), block->size(), offset);
+}
+
+bool MappedFile::Store(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Write(block->buffer(), block->size(), offset);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mapped_file.h b/net/disk_cache/mapped_file.h
new file mode 100644
index 0000000..cb2da0c
--- /dev/null
+++ b/net/disk_cache/mapped_file.h
@@ -0,0 +1,77 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_MAPPED_FILE_H__
+#define NET_DISK_CACHE_MAPPED_FILE_H__
+
+#include "base/ref_counted.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/file.h"
+#include "net/disk_cache/file_block.h"
+
+namespace disk_cache {
+
+// This class implements a memory mapped file used to access block-files. The
+// idea is that the header and bitmap will be memory mapped all the time, and
+// the actual data for the blocks will be access asynchronously (most of the
+// time).
+class MappedFile : public File {
+ public:
+ MappedFile() : init_(false), File(true) {}
+
+ // Performs object initialization. name is the file to use, and size is the
+ // ammount of data to memory map from th efile. If size is 0, the whole file
+ // will be mapped in memory.
+ void* Init(const std::wstring name, size_t size);
+
+ void* buffer() const {
+ return buffer_;
+ }
+
+ // Loads or stores a given block from the backing file (synchronously).
+ bool Load(const FileBlock* block);
+ bool Store(const FileBlock* block);
+
+ protected:
+ virtual ~MappedFile();
+
+ private:
+ bool init_;
+ HANDLE section_;
+ void* buffer_; // Address of the memory mapped buffer.
+ size_t view_size_; // Size of the memory pointed by buffer_.
+
+ DISALLOW_EVIL_CONSTRUCTORS(MappedFile);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MAPPED_FILE_H__
diff --git a/net/disk_cache/mapped_file_unittest.cc b/net/disk_cache/mapped_file_unittest.cc
new file mode 100644
index 0000000..84d7fec
--- /dev/null
+++ b/net/disk_cache/mapped_file_unittest.cc
@@ -0,0 +1,139 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "base/file_util.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/mapped_file.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+int g_cache_tests_max_id;
+volatile int g_cache_tests_received;
+volatile bool g_cache_tests_error;
+
+// Implementation of FileIOCallback for the tests.
+class FileCallbackTest: public disk_cache::FileIOCallback {
+ public:
+ explicit FileCallbackTest(int id) : id_(id), reuse_(0) {}
+ explicit FileCallbackTest(int id, bool reuse)
+ : id_(id), reuse_(reuse_ ? 0 : 1) {}
+ ~FileCallbackTest() {}
+
+ virtual void OnFileIOComplete(int bytes_copied);
+ private:
+ int id_;
+ int reuse_;
+};
+
+void FileCallbackTest::OnFileIOComplete(int bytes_copied) {
+ if (id_ > g_cache_tests_max_id) {
+ NOTREACHED();
+ g_cache_tests_error = true;
+ } else if (reuse_) {
+ DCHECK(1 == reuse_);
+ if (2 == reuse_)
+ g_cache_tests_error = true;
+ reuse_++;
+ }
+
+ g_cache_tests_received++;
+}
+
+// Wait up to 2 secs without callbacks, or until we receive expected callbacks.
+void WaitForCallbacks(int expected) {
+ if (!expected)
+ return;
+
+ int iterations = 0;
+ int last = 0;
+ while (iterations < 40) {
+ SleepEx(50, TRUE);
+ if (expected == g_cache_tests_received)
+ return;
+ if (last == g_cache_tests_received)
+ iterations++;
+ else
+ iterations = 0;
+ }
+}
+
+} // namespace
+
+TEST(DiskCacheTest, MappedFile_SyncIO) {
+ std::wstring filename = GetCachePath();
+ file_util::AppendToPath(&filename, L"a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename.c_str()));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ char buffer1[20];
+ char buffer2[20];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ strcpy_s(buffer1, "the data");
+ EXPECT_TRUE(file->Write(buffer1, sizeof(buffer1), 8192));
+ EXPECT_TRUE(file->Read(buffer2, sizeof(buffer2), 8192));
+ EXPECT_STREQ(buffer1, buffer2);
+}
+
+TEST(DiskCacheTest, MappedFile_AsyncIO) {
+ std::wstring filename = GetCachePath();
+ file_util::AppendToPath(&filename, L"a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename.c_str()));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ FileCallbackTest callback(1);
+ g_cache_tests_error = false;
+ g_cache_tests_max_id = 0;
+ g_cache_tests_received = 0;
+
+ char buffer1[20];
+ char buffer2[20];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ strcpy_s(buffer1, "the data");
+ bool completed;
+ EXPECT_TRUE(file->Write(buffer1, sizeof(buffer1), 1024 * 1024, &callback,
+ &completed));
+ int expected = completed ? 0 : 1;
+
+ g_cache_tests_max_id = 1;
+ WaitForCallbacks(expected);
+
+ EXPECT_TRUE(file->Read(buffer2, sizeof(buffer2), 1024 * 1024, &callback,
+ &completed));
+ if (!completed)
+ expected++;
+
+ WaitForCallbacks(expected);
+
+ EXPECT_EQ(expected, g_cache_tests_received);
+ EXPECT_FALSE(g_cache_tests_error);
+ EXPECT_STREQ(buffer1, buffer2);
+}
diff --git a/net/disk_cache/mem_backend_impl.cc b/net/disk_cache/mem_backend_impl.cc
new file mode 100644
index 0000000..10aadb8
--- /dev/null
+++ b/net/disk_cache/mem_backend_impl.cc
@@ -0,0 +1,276 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/mem_backend_impl.h"
+
+#include "net/disk_cache/mem_entry_impl.h"
+
+namespace {
+
+const int kDefaultCacheSize = 10 * 1024 * 1024;
+const int kCleanUpMargin = 1024 * 1024;
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+Backend* CreateInMemoryCacheBackend(int max_bytes) {
+ MemBackendImpl* cache = new MemBackendImpl();
+ cache->SetMaxSize(max_bytes);
+ if (cache->Init())
+ return cache;
+
+ delete cache;
+ LOG(ERROR) << "Unable to create cache";
+ return NULL;
+}
+
+// ------------------------------------------------------------------------
+
+bool MemBackendImpl::Init() {
+ if (max_size_)
+ return true;
+
+ MEMORYSTATUSEX memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatusEx(&memory_info)) {
+ max_size_ = kDefaultCacheSize;
+ return true;
+ }
+
+ // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
+ // reached on systemd with more than 2.5 GB of RAM.
+ memory_info.ullTotalPhys = memory_info.ullTotalPhys * 2 / 100;
+ if (memory_info.ullTotalPhys >
+ static_cast<unsigned int>(kDefaultCacheSize * 5))
+ max_size_ = kDefaultCacheSize * 5;
+ else
+ max_size_ = static_cast<int>(memory_info.ullTotalPhys);
+
+ return true;
+}
+
+MemBackendImpl::~MemBackendImpl() {
+ EntryMap::iterator it = entries_.begin();
+ while (it != entries_.end()) {
+ it->second->Doom();
+ it = entries_.begin();
+ }
+ DCHECK(!current_size_);
+}
+
+bool MemBackendImpl::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ max_size_ = max_bytes;
+ return true;
+}
+
+int32 MemBackendImpl::GetEntryCount() const {
+ return static_cast<int32>(entries_.size());
+}
+
+bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it == entries_.end())
+ return false;
+
+ it->second->Open();
+
+ *entry = it->second;
+ return true;
+}
+
+bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it != entries_.end())
+ return false;
+
+ MemEntryImpl* cache_entry = new MemEntryImpl(this);
+ if (!cache_entry->CreateEntry(key)) {
+ delete entry;
+ return false;
+ }
+
+ rankings_.Insert(cache_entry);
+ entries_[key] = cache_entry;
+
+ *entry = cache_entry;
+ return true;
+}
+
+bool MemBackendImpl::DoomEntry(const std::string& key) {
+ Entry* entry;
+ if (!OpenEntry(key, &entry))
+ return false;
+
+ entry->Doom();
+ entry->Close();
+ return true;
+}
+
+void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
+ rankings_.Remove(entry);
+ EntryMap::iterator it = entries_.find(entry->GetKey());
+ if (it != entries_.end())
+ entries_.erase(it);
+ else
+ NOTREACHED();
+
+ entry->InternalDoom();
+}
+
+bool MemBackendImpl::DoomAllEntries() {
+ TrimCache(true);
+ return true;
+}
+
+bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
+ const Time end_time) {
+ if (end_time.is_null())
+ return DoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ MemEntryImpl* next = rankings_.GetNext(NULL);
+
+ // rankings_ is ordered by last used, this will descend through the cache
+ // and start dooming items before the end_time, and will stop once it reaches
+ // an item used before the initial time.
+ while (next) {
+ MemEntryImpl* node = next;
+ next = rankings_.GetNext(next);
+
+ if (node->GetLastUsed() < initial_time)
+ break;
+
+ if (node->GetLastUsed() < end_time) {
+ node->Doom();
+ }
+ }
+
+ return true;
+}
+
+// We use OpenNextEntry to retrieve elements from the cache, until we get
+// entries that are too old.
+bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
+ for (;;) {
+ Entry* entry;
+ void* iter = NULL;
+ if (!OpenNextEntry(&iter, &entry))
+ return true;
+
+ if (initial_time > entry->GetLastUsed()) {
+ entry->Close();
+ EndEnumeration(&iter);
+ return true;
+ }
+
+ entry->Doom();
+ entry->Close();
+ EndEnumeration(&iter); // Dooming the entry invalidates the iterator.
+ }
+}
+
+bool MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
+ MemEntryImpl* current = reinterpret_cast<MemEntryImpl*>(*iter);
+ MemEntryImpl* node = rankings_.GetNext(current);
+ *next_entry = node;
+ *iter = node;
+
+ if (node)
+ node->Open();
+
+ return NULL != node;
+}
+
+void MemBackendImpl::EndEnumeration(void** iter) {
+ *iter = NULL;
+}
+
+void MemBackendImpl::TrimCache(bool empty) {
+ MemEntryImpl* next = rankings_.GetPrev(NULL);
+
+ DCHECK(next);
+
+ int target_size = empty ? 0 : LowWaterAdjust(max_size_);
+ while (current_size_ > target_size && next) {
+ MemEntryImpl* node = next;
+ next = rankings_.GetPrev(next);
+ if (!node->InUse() || empty) {
+ node->Doom();
+ }
+ }
+
+ return;
+}
+
+void MemBackendImpl::AddStorageSize(int32 bytes) {
+ current_size_ += bytes;
+ DCHECK(current_size_ >= 0);
+
+ if (current_size_ > max_size_)
+ TrimCache(false);
+}
+
+void MemBackendImpl::SubstractStorageSize(int32 bytes) {
+ current_size_ -= bytes;
+ DCHECK(current_size_ >= 0);
+}
+
+void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (old_size >= new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+}
+
+void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
+ rankings_.UpdateRank(node);
+}
+
+int MemBackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mem_backend_impl.h b/net/disk_cache/mem_backend_impl.h
new file mode 100644
index 0000000..0804ad0
--- /dev/null
+++ b/net/disk_cache/mem_backend_impl.h
@@ -0,0 +1,104 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
+#define NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
+
+#include <hash_map>
+
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/mem_rankings.h"
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class implements the Backend interface. An object of this class handles
+// the operations of the cache without writting to disk.
+class MemBackendImpl : public Backend {
+ public:
+ MemBackendImpl() : max_size_(0), current_size_(0) {}
+ ~MemBackendImpl();
+
+ // Performs general initialization for this current instance of the cache.
+ bool Init();
+
+ // Backend interface.
+ virtual int32 GetEntryCount() const;
+ virtual bool OpenEntry(const std::string& key, Entry** entry);
+ virtual bool CreateEntry(const std::string& key, Entry** entry);
+ virtual bool DoomEntry(const std::string& key);
+ virtual bool DoomAllEntries();
+ virtual bool DoomEntriesBetween(const Time initial_time, const Time end_time);
+ virtual bool DoomEntriesSince(const Time initial_time);
+ virtual bool OpenNextEntry(void** iter, Entry** next_entry);
+ virtual void EndEnumeration(void** iter);
+ virtual void GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) {}
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Permanently deletes an entry.
+ void InternalDoomEntry(MemEntryImpl* entry);
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(MemEntryImpl* node);
+
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32 old_size, int32 new_size);
+
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
+
+ private:
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
+
+ // Handles the used storage count.
+ void AddStorageSize(int32 bytes);
+ void SubstractStorageSize(int32 bytes);
+
+ typedef stdext::hash_map<std::string, MemEntryImpl*> EntryMap;
+
+ EntryMap entries_;
+ MemRankings rankings_; // Rankings to be able to trim the cache.
+ int32 max_size_; // Maximum data size for this instance.
+ int32 current_size_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(MemBackendImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc
new file mode 100644
index 0000000..130b4da
--- /dev/null
+++ b/net/disk_cache/mem_entry_impl.cc
@@ -0,0 +1,200 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/mem_entry_impl.h"
+
+#include "net/base/net_errors.h"
+#include "net/disk_cache/mem_backend_impl.h"
+
+namespace disk_cache {
+
+MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
+ doomed_ = false;
+ backend_ = backend;
+ ref_count_ = 0;
+ data_size_[0] = data_size_[1] = 0;
+}
+
+MemEntryImpl::~MemEntryImpl() {
+ backend_->ModifyStorageSize(data_size_[0], 0);
+ backend_->ModifyStorageSize(data_size_[1], 0);
+ backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
+}
+
+bool MemEntryImpl::CreateEntry(const std::string& key) {
+ key_ = key;
+ last_modified_ = Time::Now();
+ last_used_ = Time::Now();
+ Open();
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ return true;
+}
+
+void MemEntryImpl::Close() {
+ ref_count_--;
+ DCHECK(ref_count_ >= 0);
+ if (!ref_count_ && doomed_)
+ delete this;
+}
+
+void MemEntryImpl::Open() {
+ ref_count_++;
+ DCHECK(ref_count_ >= 0);
+ DCHECK(!doomed_);
+}
+
+bool MemEntryImpl::InUse() {
+ return ref_count_ > 0;
+}
+
+void MemEntryImpl::Doom() {
+ if (doomed_)
+ return;
+ backend_->InternalDoomEntry(this);
+}
+
+void MemEntryImpl::InternalDoom() {
+ doomed_ = true;
+ if (!ref_count_)
+ delete this;
+}
+
+std::string MemEntryImpl::GetKey() const {
+ return key_;
+}
+
+Time MemEntryImpl::GetLastUsed() const {
+ return last_used_;
+}
+
+Time MemEntryImpl::GetLastModified() const {
+ return last_modified_;
+}
+
+int32 MemEntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index > 1)
+ return 0;
+
+ return data_size_[index];
+}
+
+int MemEntryImpl::ReadData(int index, int offset, char* buf, int buf_len,
+ net::CompletionCallback* completion_callback) {
+ if (index < 0 || index > 1)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = GetDataSize(index);
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ memcpy(buf , &(data_[index])[offset], buf_len);
+ return buf_len;
+}
+
+int MemEntryImpl::WriteData(int index, int offset, const char* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate) {
+ if (index < 0 || index > 1)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset of buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ int size = offset + buf_len;
+ if (size <= max_file_size)
+ size = kint32max;
+ return net::ERR_FAILED;
+ }
+
+ // Read the size at this point.
+ int entry_size = GetDataSize(index);
+
+ PrepareTarget(index, offset, buf_len);
+
+ if (entry_size < offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ } else if (truncate) {
+ if (entry_size > offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ }
+ }
+
+ UpdateRank(true);
+
+ if (!buf_len)
+ return 0;
+
+ memcpy(&(data_[index])[offset], buf, buf_len);
+ return buf_len;
+}
+
+void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
+ int entry_size = GetDataSize(index);
+
+ if (entry_size >= offset + buf_len)
+ return; // Not growing the stored data.
+
+ if (static_cast<int>(data_[index].size()) < offset + buf_len)
+ data_[index].resize(offset + buf_len);
+
+ if (offset <= entry_size)
+ return; // There is no "hole" on the stored data.
+
+ // Cleanup the hole not written by the user. The point is to avoid returning
+ // random stuff later on.
+ memset(&(data_[index])[entry_size], 0, offset - entry_size);
+}
+
+void MemEntryImpl::UpdateRank(bool modified) {
+ Time current = Time::Now();
+ last_used_ = current;
+
+ if (modified)
+ last_modified_ = current;
+
+ if (!doomed_)
+ backend_->UpdateRank(this);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h
new file mode 100644
index 0000000..08f2983
--- /dev/null
+++ b/net/disk_cache/mem_entry_impl.h
@@ -0,0 +1,110 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_DISK_CACHE_MEM_ENTRY_IMPL_H__
+#define NET_DISK_CACHE_MEM_ENTRY_IMPL_H__
+
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+class MemBackendImpl;
+
+// This class implements the Entry interface for the memory-only cache. An
+// object of this class represents a single entry on the cache.
+class MemEntryImpl : public Entry {
+ public:
+ explicit MemEntryImpl(MemBackendImpl* backend);
+
+ // Entry interface.
+ virtual void Doom();
+ virtual void Close();
+ virtual std::string GetKey() const;
+ virtual Time GetLastUsed() const;
+ virtual Time GetLastModified() const;
+ virtual int32 GetDataSize(int index) const;
+ virtual int ReadData(int index, int offset, char* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int WriteData(int index, int offset, const char* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate);
+
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(const std::string& key);
+
+ // Permamently destroys this entry
+ void InternalDoom();
+
+ MemEntryImpl* next() const {
+ return next_;
+ }
+
+ MemEntryImpl* prev() const {
+ return prev_;
+ }
+
+ void set_next(MemEntryImpl* next) {
+ next_ = next;
+ }
+
+ void set_prev(MemEntryImpl* prev) {
+ prev_ = prev;
+ }
+
+ void Open();
+ bool InUse();
+
+ private:
+ ~MemEntryImpl();
+
+ // Grows and cleans up the data buffer.
+ void PrepareTarget(int index, int offset, int buf_len);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
+
+ std::string key_;
+ std::vector<char> data_[2]; // User data.
+ int32 data_size_[2];
+ int ref_count_;
+
+ MemEntryImpl* next_; // Pointers for the LRU list.
+ MemEntryImpl* prev_;
+ Time last_modified_; // LRU information.
+ Time last_used_;
+ MemBackendImpl* backend_; // Back pointer to the cache.
+ bool doomed_; // True if this entry was removed from the cache.
+
+ DISALLOW_EVIL_CONSTRUCTORS(MemEntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_ENTRY_IMPL_H__
diff --git a/net/disk_cache/mem_rankings.cc b/net/disk_cache/mem_rankings.cc
new file mode 100644
index 0000000..13d1ccc
--- /dev/null
+++ b/net/disk_cache/mem_rankings.cc
@@ -0,0 +1,87 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/mem_rankings.h"
+
+#include "net/disk_cache/mem_entry_impl.h"
+
+namespace disk_cache {
+
+void MemRankings::Insert(MemEntryImpl* node) {
+ if (head_)
+ head_->set_prev(node);
+
+ if (!tail_)
+ tail_ = node;
+
+ node->set_prev(NULL);
+ node->set_next(head_);
+ head_ = node;
+}
+
+void MemRankings::Remove(MemEntryImpl* node) {
+ MemEntryImpl* prev = node->prev();
+ MemEntryImpl* next = node->next();
+
+ if (head_ == node)
+ head_ = next;
+
+ if (tail_ == node)
+ tail_ = prev;
+
+ if (prev)
+ prev->set_next(next);
+
+ if (next)
+ next->set_prev(prev);
+
+ node->set_next(NULL);
+ node->set_prev(NULL);
+}
+
+void MemRankings::UpdateRank(MemEntryImpl* node) {
+ Remove(node);
+ Insert(node);
+}
+
+MemEntryImpl* MemRankings::GetNext(MemEntryImpl* node) {
+ if (!node)
+ return head_;
+
+ return node->next();
+}
+
+MemEntryImpl* MemRankings::GetPrev(MemEntryImpl* node) {
+ if (!node)
+ return tail_;
+
+ return node->prev();
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mem_rankings.h b/net/disk_cache/mem_rankings.h
new file mode 100644
index 0000000..b262f5e
--- /dev/null
+++ b/net/disk_cache/mem_rankings.h
@@ -0,0 +1,69 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_MEM_RANKINGS_H__
+#define NET_DISK_CACHE_MEM_RANKINGS_H__
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class handles the ranking information for the memory-only cache.
+class MemRankings {
+ public:
+ MemRankings() : head_(NULL), tail_(NULL) {}
+ ~MemRankings() {}
+
+ // Inserts a given entry at the head of the queue.
+ void Insert(MemEntryImpl* node);
+
+ // Removes a given entry from the LRU list.
+ void Remove(MemEntryImpl* node);
+
+ // Moves a given entry to the head.
+ void UpdateRank(MemEntryImpl* node);
+
+ // Iterates through the list.
+ MemEntryImpl* GetNext(MemEntryImpl* node);
+ MemEntryImpl* GetPrev(MemEntryImpl* node);
+
+ private:
+ MemEntryImpl* head_;
+ MemEntryImpl* tail_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(MemRankings);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_RANKINGS_H__
diff --git a/net/disk_cache/rankings.cc b/net/disk_cache/rankings.cc
new file mode 100644
index 0000000..81b5767
--- /dev/null
+++ b/net/disk_cache/rankings.cc
@@ -0,0 +1,697 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/rankings.h"
+
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/errors.h"
+
+// This is used by crash_cache.exe to generate unit test files.
+extern disk_cache::RankCrashes g_rankings_crash = disk_cache::NO_CRASH;
+
+namespace {
+
+const wchar_t* kBlockName = L"\\data_";
+const int kHeadIndex = 0;
+const int kTailIndex = 1;
+const int kTransactionIndex = 2;
+const int kOperationIndex = 3;
+
+enum Operation {
+ INSERT = 1,
+ REMOVE
+};
+
+// This class provides a simple lock for the LRU list of rankings. Whenever an
+// entry is to be inserted or removed from the list, a transaction object should
+// be created to keep track of the operation. If the process crashes before
+// finishing the operation, the transaction record (stored as part of the user
+// data on the file header) can be used to finish the operation.
+class Transaction {
+ public:
+ // addr is the cache addres of the node being inserted or removed. We want to
+ // avoid having the compiler doing optimizations on when to read or write
+ // from user_data because it is the basis of the crash detection. Maybe
+ // volatile is not enough for that, but it should be a good hint.
+ Transaction(volatile int32* user_data, disk_cache::Addr addr, Operation op);
+ ~Transaction();
+ private:
+ volatile int32* user_data_;
+ DISALLOW_EVIL_CONSTRUCTORS(Transaction);
+};
+
+Transaction::Transaction(volatile int32* user_data, disk_cache::Addr addr,
+ Operation op) : user_data_(user_data) {
+ DCHECK(!user_data_[kTransactionIndex]);
+ DCHECK(addr.is_initialized());
+ user_data_[kOperationIndex] = op;
+ user_data_[kTransactionIndex] = static_cast<int32>(addr.value());
+}
+
+Transaction::~Transaction() {
+ DCHECK(user_data_[kTransactionIndex]);
+ user_data_[kTransactionIndex] = 0;
+ user_data_[kOperationIndex] = 0;
+}
+
+// Code locations that can generate crashes.
+enum CrashLocation {
+ ON_INSERT_1, ON_INSERT_2, ON_INSERT_3, ON_INSERT_4, ON_REMOVE_1, ON_REMOVE_2,
+ ON_REMOVE_3, ON_REMOVE_4, ON_REMOVE_5, ON_REMOVE_6, ON_REMOVE_7, ON_REMOVE_8
+};
+
+// Generates a crash on debug builds, acording to the value of g_rankings_crash.
+// This used by crash_cache.exe to generate unit-test files.
+void GenerateCrash(CrashLocation location) {
+#ifndef NDEBUG
+ if (disk_cache::NO_CRASH == g_rankings_crash)
+ return;
+ switch (location) {
+ case ON_INSERT_1:
+ switch (g_rankings_crash) {
+ case disk_cache::INSERT_ONE_1:
+ case disk_cache::INSERT_LOAD_1:
+ TerminateProcess(GetCurrentProcess(), 0);
+ }
+ break;
+ case ON_INSERT_2:
+ if (disk_cache::INSERT_EMPTY_1 == g_rankings_crash)
+ TerminateProcess(GetCurrentProcess(), 0);
+ break;
+ case ON_INSERT_3:
+ switch (g_rankings_crash) {
+ case disk_cache::INSERT_EMPTY_2:
+ case disk_cache::INSERT_ONE_2:
+ case disk_cache::INSERT_LOAD_2:
+ TerminateProcess(GetCurrentProcess(), 0);
+ }
+ break;
+ case ON_INSERT_4:
+ switch (g_rankings_crash) {
+ case disk_cache::INSERT_EMPTY_3:
+ case disk_cache::INSERT_ONE_3:
+ TerminateProcess(GetCurrentProcess(), 0);
+ }
+ break;
+ case ON_REMOVE_1:
+ switch (g_rankings_crash) {
+ case disk_cache::REMOVE_ONE_1:
+ case disk_cache::REMOVE_HEAD_1:
+ case disk_cache::REMOVE_TAIL_1:
+ case disk_cache::REMOVE_LOAD_1:
+ TerminateProcess(GetCurrentProcess(), 0);
+ }
+ break;
+ case ON_REMOVE_2:
+ if (disk_cache::REMOVE_ONE_2 == g_rankings_crash)
+ TerminateProcess(GetCurrentProcess(), 0);
+ break;
+ case ON_REMOVE_3:
+ if (disk_cache::REMOVE_ONE_3 == g_rankings_crash)
+ TerminateProcess(GetCurrentProcess(), 0);
+ break;
+ case ON_REMOVE_4:
+ if (disk_cache::REMOVE_HEAD_2 == g_rankings_crash)
+ TerminateProcess(GetCurrentProcess(), 0);
+ break;
+ case ON_REMOVE_5:
+ if (disk_cache::REMOVE_TAIL_2 == g_rankings_crash)
+ TerminateProcess(GetCurrentProcess(), 0);
+ break;
+ case ON_REMOVE_6:
+ if (disk_cache::REMOVE_TAIL_3 == g_rankings_crash)
+ TerminateProcess(GetCurrentProcess(), 0);
+ break;
+ case ON_REMOVE_7:
+ switch (g_rankings_crash) {
+ case disk_cache::REMOVE_ONE_4:
+ case disk_cache::REMOVE_LOAD_2:
+ case disk_cache::REMOVE_HEAD_3:
+ TerminateProcess(GetCurrentProcess(), 0);
+ }
+ break;
+ case ON_REMOVE_8:
+ switch (g_rankings_crash) {
+ case disk_cache::REMOVE_HEAD_4:
+ case disk_cache::REMOVE_LOAD_3:
+ TerminateProcess(GetCurrentProcess(), 0);
+ }
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+#endif
+}
+
+} // namespace
+
+namespace disk_cache {
+
+bool Rankings::Init(BackendImpl* backend) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ backend_ = backend;
+ MappedFile* file = backend_->File(Addr(RANKINGS, 0, 0, 0));
+
+ header_ = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ head_ = ReadHead();
+ tail_ = ReadTail();
+
+ if (header_->user[kTransactionIndex])
+ CompleteTransaction();
+
+ init_ = true;
+ return true;
+}
+
+void Rankings::Reset() {
+ init_ = false;
+ head_.set_value(0);
+ tail_.set_value(0);
+ header_ = NULL;
+}
+
+bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
+ if (!rankings->address().is_initialized())
+ return false;
+
+ if (!rankings->Load())
+ return false;
+
+ if (!SanityCheck(rankings, true)) {
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ if (!rankings->Data()->pointer) {
+ backend_->OnEvent(Stats::GET_RANKINGS);
+ return true;
+ }
+
+ backend_->OnEvent(Stats::OPEN_RANKINGS);
+
+ if (backend_->GetCurrentEntryId() != rankings->Data()->dirty) {
+ // We cannot trust this entry, but we cannot initiate a cleanup from this
+ // point (we may be in the middle of a cleanup already). Just get rid of
+ // the invalid pointer and continue; the entry will be deleted when detected
+ // from a regular open/create path.
+ rankings->Data()->pointer = NULL;
+ return true;
+ }
+
+ EntryImpl* cache_entry =
+ reinterpret_cast<EntryImpl*>(rankings->Data()->pointer);
+ rankings->SetData(cache_entry->rankings()->Data());
+ return true;
+}
+
+void Rankings::Insert(CacheRankingsBlock* node, bool modified) {
+ Trace("Insert 0x%x", node->address().value());
+ DCHECK(node->HasData());
+ Transaction lock(header_->user, node->address(), INSERT);
+ CacheRankingsBlock head(backend_->File(head_), head_);
+ if (head_.is_initialized()) {
+ if (!GetRanking(&head))
+ return;
+
+ if (head.Data()->prev != head_.value() && // Normal path.
+ head.Data()->prev != node->address().value()) { // FinishInsert().
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return;
+ }
+
+ head.Data()->prev = node->address().value();
+ head.Store();
+ GenerateCrash(ON_INSERT_1);
+ UpdateIterators(&head);
+ }
+
+ node->Data()->next = head_.value();
+ node->Data()->prev = node->address().value();
+ head_.set_value(node->address().value());
+
+ if (!tail_.is_initialized() || tail_.value() == node->address().value()) {
+ tail_.set_value(node->address().value());
+ node->Data()->next = tail_.value();
+ WriteTail();
+ GenerateCrash(ON_INSERT_2);
+ }
+
+ Time now = Time::Now();
+ node->Data()->last_used = now.ToInternalValue();
+ if (modified)
+ node->Data()->last_modified = now.ToInternalValue();
+ node->Store();
+ GenerateCrash(ON_INSERT_3);
+
+ // The last thing to do is move our head to point to a node already stored.
+ WriteHead();
+ GenerateCrash(ON_INSERT_4);
+}
+
+// If a, b and r are elements on the list, and we want to remove r, the possible
+// states for the objects if a crash happens are (where y(x, z) means for object
+// y, prev is x and next is z):
+// A. One element:
+// 1. r(r, r), head(r), tail(r) initial state
+// 2. r(r, r), head(0), tail(r) WriteHead()
+// 3. r(r, r), head(0), tail(0) WriteTail()
+// 4. r(0, 0), head(0), tail(0) next.Store()
+//
+// B. Remove a random element:
+// 1. a(x, r), r(a, b), b(r, y), head(x), tail(y) initial state
+// 2. a(x, r), r(a, b), b(a, y), head(x), tail(y) next.Store()
+// 3. a(x, b), r(a, b), b(a, y), head(x), tail(y) prev.Store()
+// 4. a(x, b), r(0, 0), b(a, y), head(x), tail(y) node.Store()
+//
+// C. Remove head:
+// 1. r(r, b), b(r, y), head(r), tail(y) initial state
+// 2. r(r, b), b(r, y), head(b), tail(y) WriteHead()
+// 3. r(r, b), b(b, y), head(b), tail(y) next.Store()
+// 4. r(0, 0), b(b, y), head(b), tail(y) prev.Store()
+//
+// D. Remove tail:
+// 1. a(x, r), r(a, r), head(x), tail(r) initial state
+// 2. a(x, r), r(a, r), head(x), tail(a) WriteTail()
+// 3. a(x, a), r(a, r), head(x), tail(a) prev.Store()
+// 4. a(x, a), r(0, 0), head(x), tail(a) next.Store()
+void Rankings::Remove(CacheRankingsBlock* node) {
+ Trace("Remove 0x%x (0x%x 0x%x)", node->address().value(), node->Data()->next,
+ node->Data()->prev);
+ DCHECK(node->HasData());
+ Addr next_addr(node->Data()->next);
+ Addr prev_addr(node->Data()->prev);
+ if (!next_addr.is_initialized() || next_addr.is_separate_file() ||
+ !prev_addr.is_initialized() || prev_addr.is_separate_file()) {
+ LOG(WARNING) << "Invalid rankings info.";
+ return;
+ }
+
+ CacheRankingsBlock next(backend_->File(next_addr), next_addr);
+ CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
+ if (!GetRanking(&next) || !GetRanking(&prev))
+ return;
+
+ if (!CheckLinks(node, &prev, &next))
+ return;
+
+ Transaction lock(header_->user, node->address(), REMOVE);
+ prev.Data()->next = next.address().value();
+ next.Data()->prev = prev.address().value();
+ GenerateCrash(ON_REMOVE_1);
+
+ CacheAddr node_value = node->address().value();
+ if (node_value == head_.value() || node_value == tail_.value()) {
+ if (head_.value() == tail_.value()) {
+ head_.set_value(0);
+ tail_.set_value(0);
+
+ WriteHead();
+ GenerateCrash(ON_REMOVE_2);
+ WriteTail();
+ GenerateCrash(ON_REMOVE_3);
+ } else if (node_value == head_.value()) {
+ head_.set_value(next.address().value());
+ next.Data()->prev = next.address().value();
+
+ WriteHead();
+ GenerateCrash(ON_REMOVE_4);
+ } else if (node_value == tail_.value()) {
+ tail_.set_value(prev.address().value());
+ prev.Data()->next = prev.address().value();
+
+ WriteTail();
+ GenerateCrash(ON_REMOVE_5);
+
+ // Store the new tail to make sure we can undo the operation if we crash.
+ prev.Store();
+ GenerateCrash(ON_REMOVE_6);
+ }
+ }
+
+ // Nodes out of the list can be identified by invalid pointers.
+ node->Data()->next = 0;
+ node->Data()->prev = 0;
+
+ // The last thing to get to disk is the node itself, so before that there is
+ // enough info to recover.
+ next.Store();
+ GenerateCrash(ON_REMOVE_7);
+ prev.Store();
+ GenerateCrash(ON_REMOVE_8);
+ node->Store();
+ UpdateIterators(&next);
+ UpdateIterators(&prev);
+}
+
+// A crash in between Remove and Insert will lead to a dirty entry not on the
+// list. We want to avoid that case as much as we can (as while waiting for IO),
+// but the net effect is just an assert on debug when attempting to remove the
+// entry. Otherwise we'll need reentrant transactions, which is an overkill.
+void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified) {
+ Remove(node);
+ Insert(node, modified);
+}
+
+void Rankings::CompleteTransaction() {
+ Addr node_addr(static_cast<CacheAddr>(header_->user[kTransactionIndex]));
+ if (!node_addr.is_initialized() || node_addr.is_separate_file()) {
+ NOTREACHED();
+ LOG(ERROR) << "Invalid rankings info.";
+ return;
+ }
+
+ Trace("CompleteTransaction 0x%x", node_addr.value());
+
+ CacheRankingsBlock node(backend_->File(node_addr), node_addr);
+ if (!node.Load())
+ return;
+
+ node.Data()->pointer = NULL;
+ node.Store();
+
+ // We want to leave the node inside the list. The entry must me marked as
+ // dirty, and will be removed later. Otherwise, we'll get assertions when
+ // attempting to remove the dirty entry.
+ if (INSERT == header_->user[kOperationIndex]) {
+ Trace("FinishInsert h:0x%x t:0x%x", head_.value(), tail_.value());
+ FinishInsert(&node);
+ } else if (REMOVE == header_->user[kOperationIndex]) {
+ Trace("RevertRemove h:0x%x t:0x%x", head_.value(), tail_.value());
+ RevertRemove(&node);
+ } else {
+ NOTREACHED();
+ LOG(ERROR) << "Invalid operation to recover.";
+ }
+}
+
+void Rankings::FinishInsert(CacheRankingsBlock* node) {
+ header_->user[kTransactionIndex] = 0;
+ header_->user[kOperationIndex] = 0;
+ if (head_.value() != node->address().value()) {
+ if (tail_.value() == node->address().value()) {
+ // This part will be skipped by the logic of Insert.
+ node->Data()->next = tail_.value();
+ }
+
+ Insert(node, true);
+ }
+
+ // Tell the backend about this entry.
+ backend_->RecoveredEntry(node);
+}
+
+void Rankings::RevertRemove(CacheRankingsBlock* node) {
+ Addr next_addr(node->Data()->next);
+ Addr prev_addr(node->Data()->prev);
+ if (!next_addr.is_initialized() || !prev_addr.is_initialized()) {
+ // The operation actually finished. Nothing to do.
+ header_->user[kTransactionIndex] = 0;
+ return;
+ }
+ if (next_addr.is_separate_file() || prev_addr.is_separate_file()) {
+ NOTREACHED();
+ LOG(WARNING) << "Invalid rankings info.";
+ header_->user[kTransactionIndex] = 0;
+ return;
+ }
+
+ CacheRankingsBlock next(backend_->File(next_addr), next_addr);
+ CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
+ if (!next.Load() || !prev.Load())
+ return;
+
+ CacheAddr node_value = node->address().value();
+ DCHECK(prev.Data()->next == node_value ||
+ prev.Data()->next == prev_addr.value() ||
+ prev.Data()->next == next.address().value());
+ DCHECK(next.Data()->prev == node_value ||
+ next.Data()->prev == next_addr.value() ||
+ next.Data()->prev == prev.address().value());
+
+ if (node_value != prev_addr.value())
+ prev.Data()->next = node_value;
+ if (node_value != next_addr.value())
+ next.Data()->prev = node_value;
+
+ if (!head_.is_initialized() || !tail_.is_initialized()) {
+ head_.set_value(node_value);
+ tail_.set_value(node_value);
+ WriteHead();
+ WriteTail();
+ } else if (head_.value() == next.address().value()) {
+ head_.set_value(node_value);
+ prev.Data()->next = next.address().value();
+ WriteHead();
+ } else if (tail_.value() == prev.address().value()) {
+ tail_.set_value(node_value);
+ next.Data()->prev = prev.address().value();
+ WriteTail();
+ }
+
+ next.Store();
+ prev.Store();
+ header_->user[kTransactionIndex] = 0;
+ header_->user[kOperationIndex] = 0;
+}
+
+CacheRankingsBlock* Rankings::GetNext(CacheRankingsBlock* node) {
+ ScopedRankingsBlock next(this);
+ if (!node) {
+ if (!head_.is_initialized())
+ return NULL;
+ next.reset(new CacheRankingsBlock(backend_->File(head_), head_));
+ } else {
+ if (!tail_.is_initialized())
+ return NULL;
+ if (tail_.value() == node->address().value())
+ return NULL;
+ Addr address(node->Data()->next);
+ next.reset(new CacheRankingsBlock(backend_->File(address), address));
+ }
+
+ TrackRankingsBlock(next.get(), true);
+
+ if (!GetRanking(next.get()))
+ return NULL;
+
+ if (node && !CheckSingleLink(node, next.get()))
+ return NULL;
+
+ return next.release();
+}
+
+CacheRankingsBlock* Rankings::GetPrev(CacheRankingsBlock* node) {
+ ScopedRankingsBlock prev(this);
+ if (!node) {
+ if (!tail_.is_initialized())
+ return NULL;
+ prev.reset(new CacheRankingsBlock(backend_->File(tail_), tail_));
+ } else {
+ if (!head_.is_initialized())
+ return NULL;
+ if (head_.value() == node->address().value())
+ return NULL;
+ Addr address(node->Data()->prev);
+ prev.reset(new CacheRankingsBlock(backend_->File(address), address));
+ }
+
+ TrackRankingsBlock(prev.get(), true);
+
+ if (!GetRanking(prev.get()))
+ return NULL;
+
+ if (node && !CheckSingleLink(prev.get(), node))
+ return NULL;
+
+ return prev.release();
+}
+
+void Rankings::FreeRankingsBlock(CacheRankingsBlock* node) {
+ TrackRankingsBlock(node, false);
+}
+
+int Rankings::SelfCheck() {
+ if (!head_.is_initialized()) {
+ if (!tail_.is_initialized())
+ return 0;
+ return ERR_INVALID_TAIL;
+ }
+ if (!tail_.is_initialized())
+ return ERR_INVALID_HEAD;
+
+ if (tail_.is_separate_file())
+ return ERR_INVALID_TAIL;
+
+ if (head_.is_separate_file())
+ return ERR_INVALID_HEAD;
+
+ int num_items = 0;
+ Addr address(head_.value());
+ Addr prev(head_.value());
+ scoped_ptr<CacheRankingsBlock> node;
+ do {
+ node.reset(new CacheRankingsBlock(backend_->File(address), address));
+ node->Load();
+ if (node->Data()->prev != prev.value())
+ return ERR_INVALID_PREV;
+ if (!CheckEntry(node.get()))
+ return ERR_INVALID_ENTRY;
+
+ prev.set_value(address.value());
+ address.set_value(node->Data()->next);
+ if (!address.is_initialized() || address.is_separate_file())
+ return ERR_INVALID_NEXT;
+
+ num_items++;
+ } while (node->address().value() != address.value());
+ return num_items;
+}
+
+bool Rankings::SanityCheck(CacheRankingsBlock* node, bool from_list) {
+ const RankingsNode* data = node->Data();
+ if (!data->contents)
+ return false;
+
+ // It may have never been inserted.
+ if (from_list && (!data->last_used || !data->last_modified))
+ return false;
+
+ if ((!data->next && data->prev) || (data->next && !data->prev))
+ return false;
+
+ // Both pointers on zero is a node out of the list.
+ if (!data->next && !data->prev && from_list)
+ return false;
+
+ if ((node->address().value() == data->prev) && (head_.value() != data->prev))
+ return false;
+
+ if ((node->address().value() == data->next) && (tail_.value() != data->next))
+ return false;
+
+ return true;
+}
+
+Addr Rankings::ReadHead() {
+ CacheAddr head = static_cast<CacheAddr>(header_->user[kHeadIndex]);
+ return Addr(head);
+}
+
+Addr Rankings::ReadTail() {
+ CacheAddr tail = static_cast<CacheAddr>(header_->user[kTailIndex]);
+ return Addr(tail);
+}
+
+void Rankings::WriteHead() {
+ header_->user[kHeadIndex] = static_cast<int32>(head_.value());
+}
+
+void Rankings::WriteTail() {
+ header_->user[kTailIndex] = static_cast<int32>(tail_.value());
+}
+
+bool Rankings::CheckEntry(CacheRankingsBlock* rankings) {
+ if (!rankings->Data()->pointer)
+ return true;
+
+ // If this entry is not dirty, it is a serious problem.
+ return backend_->GetCurrentEntryId() != rankings->Data()->dirty;
+}
+
+bool Rankings::CheckLinks(CacheRankingsBlock* node, CacheRankingsBlock* prev,
+ CacheRankingsBlock* next) {
+ if ((prev->Data()->next != node->address().value() &&
+ head_.value() != node->address().value()) ||
+ (next->Data()->prev != node->address().value() &&
+ tail_.value() != node->address().value())) {
+ LOG(ERROR) << "Inconsistent LRU.";
+
+ if (prev->Data()->next == next->address().value() &&
+ next->Data()->prev == prev->address().value()) {
+ // The list is actually ok, node is wrong.
+ node->Data()->next = 0;
+ node->Data()->prev = 0;
+ node->Store();
+ return false;
+ }
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ return true;
+}
+
+bool Rankings::CheckSingleLink(CacheRankingsBlock* prev,
+ CacheRankingsBlock* next) {
+ if (prev->Data()->next != next->address().value() ||
+ next->Data()->prev != prev->address().value()) {
+ LOG(ERROR) << "Inconsistent LRU.";
+
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ return true;
+}
+
+void Rankings::TrackRankingsBlock(CacheRankingsBlock* node,
+ bool start_tracking) {
+ if (!node)
+ return;
+
+ IteratorPair current(node->address().value(), node);
+
+ if (start_tracking)
+ iterators_.push_back(current);
+ else
+ iterators_.remove(current);
+}
+
+// We expect to have just a few iterators at any given time, maybe two or three,
+// But we could have more than one pointing at the same mode. We walk the list
+// of cache iterators and update all that are pointing to the given node.
+void Rankings::UpdateIterators(CacheRankingsBlock* node) {
+ CacheAddr address = node->address().value();
+ for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
+ ++it) {
+ if (it->first == address) {
+ CacheRankingsBlock* other = it->second;
+ other->Data()->next = node->Data()->next;
+ other->Data()->prev = node->Data()->prev;
+ }
+ }
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/rankings.h b/net/disk_cache/rankings.h
new file mode 100644
index 0000000..898092d
--- /dev/null
+++ b/net/disk_cache/rankings.h
@@ -0,0 +1,178 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_RANKINGS_H__
+#define NET_DISK_CACHE_RANKINGS_H__
+
+#include <list>
+
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+#include "net/disk_cache/storage_block.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+
+// Type of crashes generated for the unit tests.
+enum RankCrashes {
+ NO_CRASH = 0,
+ INSERT_EMPTY_1,
+ INSERT_EMPTY_2,
+ INSERT_EMPTY_3,
+ INSERT_ONE_1,
+ INSERT_ONE_2,
+ INSERT_ONE_3,
+ INSERT_LOAD_1,
+ INSERT_LOAD_2,
+ REMOVE_ONE_1,
+ REMOVE_ONE_2,
+ REMOVE_ONE_3,
+ REMOVE_ONE_4,
+ REMOVE_HEAD_1,
+ REMOVE_HEAD_2,
+ REMOVE_HEAD_3,
+ REMOVE_HEAD_4,
+ REMOVE_TAIL_1,
+ REMOVE_TAIL_2,
+ REMOVE_TAIL_3,
+ REMOVE_LOAD_1,
+ REMOVE_LOAD_2,
+ REMOVE_LOAD_3,
+ MAX_CRASH
+};
+
+// This class handles the ranking information for the cache.
+class Rankings {
+ public:
+ // This class provides a specialized version of scoped_ptr, that calls
+ // Rankings whenever a CacheRankingsBlock is deleted, to keep track of cache
+ // iterators that may go stale.
+ class ScopedRankingsBlock : public scoped_ptr<CacheRankingsBlock> {
+ public:
+ explicit ScopedRankingsBlock(Rankings* rankings) : rankings_(rankings) {}
+ ScopedRankingsBlock(Rankings* rankings, CacheRankingsBlock* node)
+ : rankings_(rankings), scoped_ptr<CacheRankingsBlock>(node) {}
+
+ ~ScopedRankingsBlock() {
+ rankings_->FreeRankingsBlock(get());
+ }
+
+ // scoped_ptr::reset will delete the object.
+ void reset(CacheRankingsBlock* p = NULL) {
+ if (p != get())
+ rankings_->FreeRankingsBlock(get());
+ scoped_ptr::reset(p);
+ }
+
+ private:
+ Rankings* rankings_;
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedRankingsBlock);
+ };
+
+ Rankings()
+ : init_(false), head_(0), tail_(0) {}
+ ~Rankings() {}
+
+ bool Init(BackendImpl* backend);
+
+ // Restores original state, leaving the object ready for initialization.
+ void Reset();
+
+ // Inserts a given entry at the head of the queue.
+ void Insert(CacheRankingsBlock* node, bool modified);
+
+ // Removes a given entry from the LRU list.
+ void Remove(CacheRankingsBlock* node);
+
+ // Moves a given entry to the head.
+ void UpdateRank(CacheRankingsBlock* node, bool modified);
+
+ // Iterates through the list.
+ CacheRankingsBlock* GetNext(CacheRankingsBlock* node);
+ CacheRankingsBlock* GetPrev(CacheRankingsBlock* node);
+ void FreeRankingsBlock(CacheRankingsBlock* node);
+
+ // Peforms a simple self-check of the list, and returns the number of items
+ // or an error code (negative value).
+ int SelfCheck();
+
+ // Returns false if the entry is clearly invalid. from_list is true if the
+ // node comes from the LRU list.
+ bool SanityCheck(CacheRankingsBlock* node, bool from_list);
+
+ private:
+ typedef std::pair<CacheAddr, CacheRankingsBlock*> IteratorPair;
+ typedef std::list<IteratorPair> IteratorList;
+
+ Addr ReadHead();
+ Addr ReadTail();
+ void WriteHead();
+ void WriteTail();
+
+ // Gets the rankings information for a given rankings node.
+ bool GetRanking(CacheRankingsBlock* rankings);
+
+ // Finishes a list modification after a crash.
+ void CompleteTransaction();
+ void FinishInsert(CacheRankingsBlock* rankings);
+ void RevertRemove(CacheRankingsBlock* rankings);
+
+ // Returns false if this entry will not be recognized as dirty (called during
+ // selfcheck).
+ bool CheckEntry(CacheRankingsBlock* rankings);
+
+ // Returns false if node is not properly linked.
+ bool CheckLinks(CacheRankingsBlock* node, CacheRankingsBlock* prev,
+ CacheRankingsBlock* next);
+
+ // Checks the links between two consecutive nodes.
+ bool CheckSingleLink(CacheRankingsBlock* prev, CacheRankingsBlock* next);
+
+ // Controls tracking of nodes used for enumerations.
+ void TrackRankingsBlock(CacheRankingsBlock* node, bool start_tracking);
+
+ // Updates the iterators whenever node is being changed.
+ void UpdateIterators(CacheRankingsBlock* node);
+
+ bool init_;
+ Addr head_;
+ Addr tail_;
+ BlockFileHeader* header_; // Header of the block-file used to store rankings.
+ BackendImpl* backend_;
+ IteratorList iterators_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(Rankings);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_RANKINGS_H__
diff --git a/net/disk_cache/stats.cc b/net/disk_cache/stats.cc
new file mode 100644
index 0000000..5a5df7c
--- /dev/null
+++ b/net/disk_cache/stats.cc
@@ -0,0 +1,258 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/disk_cache/stats.h"
+
+#include "base/logging.h"
+#include "base/string_util.h"
+#include "net/disk_cache/backend_impl.h"
+
+namespace {
+
+const int32 kDiskSignature = 0xF01427E0;
+
+struct OnDiskStats {
+ int32 signature;
+ int size;
+ int data_sizes[disk_cache::Stats::kDataSizesLength];
+ int64 counters[disk_cache::Stats::MAX_COUNTER];
+};
+
+// Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
+int LogBase2(int32 number) {
+ unsigned int value = static_cast<unsigned int>(number);
+ const unsigned int mask[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
+ const unsigned int s[] = {1, 2, 4, 8, 16};
+
+ unsigned int result = 0;
+ for (int i = 4; i >= 0; i--) {
+ if (value & mask[i]) {
+ value >>= s[i];
+ result |= s[i];
+ }
+ }
+ return static_cast<int>(result);
+}
+
+static const char* kCounterNames[] = {
+ "Open miss",
+ "Open hit",
+ "Create miss",
+ "Create hit",
+ "Create error",
+ "Trim entry",
+ "Doom entry",
+ "Doom cache",
+ "Invalid entry",
+ "Open entries",
+ "Max entries",
+ "Timer",
+ "Read data",
+ "Write data",
+ "Open rankings",
+ "Get rankings",
+ "Fatal error",
+};
+COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER,
+ update_the_names);
+
+} // namespace
+
+namespace disk_cache {
+
+bool LoadStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
+ MappedFile* file = backend->File(address);
+ if (!file)
+ return false;
+
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (!file->Read(stats, sizeof(*stats), offset))
+ return false;
+
+ if (stats->signature != kDiskSignature)
+ return false;
+
+ // We don't want to discard the whole cache everytime we have one extra
+ // counter; just reset them to zero.
+ if (stats->size != sizeof(*stats))
+ memset(stats, 0, sizeof(*stats));
+
+ return true;
+}
+
+bool StoreStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
+ MappedFile* file = backend->File(address);
+ if (!file)
+ return false;
+
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ return file->Write(stats, sizeof(*stats), offset);
+}
+
+bool CreateStats(BackendImpl* backend, Addr* address, OnDiskStats* stats) {
+ if (!backend->CreateBlock(BLOCK_256, 2, address))
+ return false;
+
+ // If we have more than 512 bytes of counters, change kDiskSignature so we
+ // don't overwrite something else (LoadStats must fail).
+ COMPILE_ASSERT(sizeof(*stats) <= 256 * 2, use_more_blocks);
+ memset(stats, 0, sizeof(*stats));
+ stats->signature = kDiskSignature;
+ stats->size = sizeof(*stats);
+
+ return StoreStats(backend, *address, stats);
+}
+
+bool Stats::Init(BackendImpl* backend, uint32* storage_addr) {
+ backend_ = backend;
+
+ OnDiskStats stats;
+ Addr address(*storage_addr);
+ if (address.is_initialized()) {
+ if (!LoadStats(backend, address, &stats))
+ return false;
+ } else {
+ if (!CreateStats(backend, &address, &stats))
+ return false;
+ *storage_addr = address.value();
+ }
+
+ storage_addr_ = address.value();
+
+ memcpy(data_sizes_, stats.data_sizes, sizeof(data_sizes_));
+ memcpy(counters_, stats.counters, sizeof(counters_));
+
+ return true;
+}
+
+Stats::~Stats() {
+ if (!backend_)
+ return;
+
+ OnDiskStats stats;
+ stats.signature = kDiskSignature;
+ stats.size = sizeof(stats);
+ memcpy(stats.data_sizes, data_sizes_, sizeof(data_sizes_));
+ memcpy(stats.counters, counters_, sizeof(counters_));
+
+ Addr address(storage_addr_);
+ StoreStats(backend_, address, &stats);
+}
+
+// The array will be filled this way:
+// index size
+// 0 [0, 1024)
+// 1 [1024, 2048)
+// 2 [2048, 4096)
+// 3 [4K, 6K)
+// ...
+// 10 [18K, 20K)
+// 11 [20K, 24K)
+// 12 [24k, 28K)
+// ...
+// 15 [36k, 40K)
+// 16 [40k, 64K)
+// 17 [64K, 128K)
+// 18 [128K, 256K)
+// ...
+// 23 [4M, 8M)
+// 24 [8M, 16M)
+// 25 [16M, 32M)
+// 26 [32M, 64M)
+// 27 [64M, ...)
+int Stats::GetStatsBucket(int32 size) {
+ if (size < 1024)
+ return 0;
+
+ // 10 slots more, until 20K.
+ if (size < 20 * 1024)
+ return size / 2048 + 1;
+
+ // 5 slots more, from 20K to 40K.
+ if (size < 40 * 1024)
+ return (size - 20 * 1024) / 4096 + 11;
+
+ // From this point on, use a logarithmic scale.
+ int result = LogBase2(size) + 1;
+
+ COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale);
+ if (result >= kDataSizesLength)
+ result = kDataSizesLength - 1;
+
+ return result;
+}
+
+void Stats::ModifyStorageStats(int32 old_size, int32 new_size) {
+ // We keep a counter of the data block size on an array where each entry is
+ // the adjusted log base 2 of the size. The first entry counts blocks of 256
+ // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
+ // one stores entries of more than 64 MB
+ int new_index = GetStatsBucket(new_size);
+ int old_index = GetStatsBucket(old_size);
+
+ if (new_size)
+ data_sizes_[new_index]++;
+
+ if (old_size)
+ data_sizes_[old_index]--;
+}
+
+void Stats::OnEvent(Counters an_event) {
+ DCHECK(an_event > MIN_COUNTER || an_event < MAX_COUNTER);
+ counters_[an_event]++;
+}
+
+void Stats::SetCounter(Counters counter, int64 value) {
+ DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
+ counters_[counter] = value;
+}
+
+int64 Stats::GetCounter(Counters counter) const {
+ DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
+ return counters_[counter];
+}
+
+void Stats::GetItems(StatsItems* items) {
+ std::pair<std::string, std::string> item;
+ for (int i = 0; i < kDataSizesLength; i++) {
+ item.first = StringPrintf("Size%02d", i);
+ item.second = StringPrintf("0x%08x", data_sizes_[i]);
+ items->push_back(item);
+ }
+
+ for (int i = MIN_COUNTER + 1; i < MAX_COUNTER; i++) {
+ item.first = kCounterNames[i];
+ item.second = StringPrintf("0x%I64x", counters_[i]);
+ items->push_back(item);
+ }
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/stats.h b/net/disk_cache/stats.h
new file mode 100644
index 0000000..f862116
--- /dev/null
+++ b/net/disk_cache/stats.h
@@ -0,0 +1,98 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_DISK_CACHE_STATS_H__
+#define NET_DISK_CACHE_STATS_H__
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+
+typedef std::vector<std::pair<std::string, std::string> > StatsItems;
+
+// This class stores cache-specific usage information, for tunning purposes.
+class Stats {
+ public:
+ static const int kDataSizesLength = 28;
+ enum Counters {
+ MIN_COUNTER = 0,
+ OPEN_MISS = MIN_COUNTER,
+ OPEN_HIT,
+ CREATE_MISS,
+ CREATE_HIT,
+ CREATE_ERROR,
+ TRIM_ENTRY,
+ DOOM_ENTRY,
+ DOOM_CACHE,
+ INVALID_ENTRY,
+ OPEN_ENTRIES, // Average number of open entries.
+ MAX_ENTRIES, // Maximum number of open entries.
+ TIMER,
+ READ_DATA,
+ WRITE_DATA,
+ OPEN_RANKINGS, // An entry has to be read just to modify rankings.
+ GET_RANKINGS, // We got the ranking info without reading the whole entry.
+ FATAL_ERROR,
+ MAX_COUNTER
+ };
+
+ Stats() : backend_(NULL) {}
+ ~Stats();
+
+ bool Init(BackendImpl* backend, uint32* storage_addr);
+
+ // Tracks changes to the stoage space used by an entry.
+ void ModifyStorageStats(int32 old_size, int32 new_size);
+
+ // Tracks general events.
+ void OnEvent(Counters an_event);
+ void SetCounter(Counters counter, int64 value);
+ int64 GetCounter(Counters counter) const;
+
+ void GetItems(StatsItems* items);
+
+ private:
+ int GetStatsBucket(int32 size);
+
+ BackendImpl* backend_;
+ uint32 storage_addr_;
+ int data_sizes_[kDataSizesLength];
+ int64 counters_[MAX_COUNTER];
+
+ DISALLOW_EVIL_CONSTRUCTORS(Stats);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STATS_H__
diff --git a/net/disk_cache/storage_block-inl.h b/net/disk_cache/storage_block-inl.h
new file mode 100644
index 0000000..35ba95b
--- /dev/null
+++ b/net/disk_cache/storage_block-inl.h
@@ -0,0 +1,152 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_DISK_CACHE_CACHE_INTERNAL_INL_H__
+#define NET_DISK_CACHE_CACHE_INTERNAL_INL_H__
+
+#include "net/disk_cache/storage_block.h"
+
+#include "net/disk_cache/trace.h"
+
+namespace disk_cache {
+
+template<typename T> StorageBlock<T>::StorageBlock(MappedFile* file,
+ Addr address)
+ : file_(file), address_(address), data_(NULL), modified_(false),
+ own_data_(false), extended_(false) {
+ if (address.num_blocks() > 1)
+ extended_ = true;
+ DCHECK(!address.is_initialized() || sizeof(*data_) == address.BlockSize());
+}
+
+template<typename T> StorageBlock<T>::~StorageBlock() {
+ if (modified_)
+ Store();
+ if (own_data_)
+ delete data_;
+}
+
+template<typename T> void* StorageBlock<T>::buffer() const {
+ return data_;
+}
+
+template<typename T> size_t StorageBlock<T>::size() const {
+ if (!extended_)
+ return sizeof(*data_);
+ return address_.num_blocks() * sizeof(*data_);
+}
+
+template<typename T> DWORD StorageBlock<T>::offset() const {
+ return address_.start_block() * address_.BlockSize();
+}
+
+template<typename T> bool StorageBlock<T>::LazyInit(MappedFile* file,
+ Addr address) {
+ if (file_ || address_.is_initialized()) {
+ NOTREACHED();
+ return false;
+ }
+ file_ = file;
+ address_.set_value(address.value());
+ if (address.num_blocks() > 1)
+ extended_ = true;
+
+ DCHECK(sizeof(*data_) == address.BlockSize());
+ return true;
+}
+
+template<typename T> void StorageBlock<T>::SetData(T* other) {
+ DCHECK(!modified_);
+ if (own_data_) {
+ delete data_;
+ own_data_ = false;
+ }
+ data_ = other;
+}
+
+template<typename T> void StorageBlock<T>::set_modified() {
+ DCHECK(data_);
+ modified_ = true;
+}
+
+template<typename T> T* StorageBlock<T>::Data() {
+ if (!data_)
+ AllocateData();
+ return data_;
+}
+
+template<typename T> bool StorageBlock<T>::HasData() const {
+ return (NULL != data_);
+}
+
+template<typename T> const Addr StorageBlock<T>::address() const {
+ return address_;
+}
+
+template<typename T> bool StorageBlock<T>::Load() {
+ if (file_) {
+ if (!data_)
+ AllocateData();
+
+ if (file_->Load(this)) {
+ modified_ = false;
+ return true;
+ }
+ }
+ LOG(WARNING) << "Failed data load.";
+ Trace("Failed data load.");
+ return false;
+}
+
+template<typename T> bool StorageBlock<T>::Store() {
+ if (file_) {
+ if (file_->Store(this)) {
+ modified_ = false;
+ return true;
+ }
+ }
+ LOG(ERROR) << "Failed data store.";
+ Trace("Failed data store.");
+ return false;
+}
+
+template<typename T> void StorageBlock<T>::AllocateData() {
+ DCHECK(!data_);
+ if (!extended_) {
+ data_ = new T;
+ } else {
+ void* buffer = new char[address_.num_blocks() * sizeof(*data_)];
+ data_ = new(buffer) T;
+ }
+ own_data_ = true;
+}
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_CACHE_INTERNAL_INL_H__
diff --git a/net/disk_cache/storage_block.h b/net/disk_cache/storage_block.h
new file mode 100644
index 0000000..7c100fd
--- /dev/null
+++ b/net/disk_cache/storage_block.h
@@ -0,0 +1,107 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_STORAGE_BLOCK_H__
+#define NET_DISK_CACHE_STORAGE_BLOCK_H__
+
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+
+namespace disk_cache {
+
+class EntryImpl;
+
+// This class encapsulates common behavior of a single "block" of data that is
+// stored on a block-file. It implements the FileBlock interface, so it can be
+// serialized directly to the backing file.
+// This object provides a memory buffer for the related data, and it can be used
+// to actually share that memory with another instance of the class.
+//
+// The following example shows how to share storage with another object:
+// StorageBlock<TypeA> a(file, address);
+// StorageBlock<TypeB> b(file, address);
+// a.Load();
+// DoSomething(a.Data());
+// b.SetData(a.Data());
+// ModifySomething(b.Data());
+// // Data modified on the previous call will be saved by b's destructor.
+// b.set_modified();
+template<typename T>
+class StorageBlock : public FileBlock {
+ public:
+ StorageBlock(MappedFile* file, Addr address);
+ virtual ~StorageBlock();
+
+ // FileBlock interface.
+ virtual void* buffer() const;
+ virtual size_t size() const;
+ virtual DWORD offset() const;
+
+ // Allows the overide of dummy values passed on the constructor.
+ bool LazyInit(MappedFile* file, Addr address);
+
+ // Sets the internal storage to share the momory provided by other instance.
+ void SetData(T* other);
+
+ // Sets the object to lazily save the in-memory data on destruction.
+ void set_modified();
+
+ // Gets a pointer to the internal storage (allocates storage if needed).
+ T* Data();
+
+ // Returns true if there is data associated with this object.
+ bool HasData() const;
+
+ const Addr address() const;
+
+ // Loads and store the data.
+ bool Load();
+ bool Store();
+
+ private:
+ void AllocateData();
+
+ T* data_;
+ MappedFile* file_;
+ Addr address_;
+ bool modified_;
+ bool own_data_; // Is data_ owned by this object or shared with someone else.
+ bool extended_; // Used to store an entry of more than one block.
+
+ DISALLOW_EVIL_CONSTRUCTORS(StorageBlock);
+};
+
+typedef StorageBlock<EntryStore> CacheEntryBlock;
+typedef StorageBlock<RankingsNode> CacheRankingsBlock;
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STORAGE_BLOCK_H__
diff --git a/net/disk_cache/storage_block_unittest.cc b/net/disk_cache/storage_block_unittest.cc
new file mode 100644
index 0000000..9f6e8c1
--- /dev/null
+++ b/net/disk_cache/storage_block_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "base/file_util.h"
+#include "net/disk_cache/storage_block.h"
+#include "net/disk_cache/storage_block-inl.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(DiskCacheTest, StorageBlock_LoadStore) {
+ std::wstring filename = GetCachePath();
+ file_util::AppendToPath(&filename, L"a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename.c_str()));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ disk_cache::CacheEntryBlock entry1(file, disk_cache::Addr(0xa0010001));
+ memset(entry1.Data(), 0, sizeof(disk_cache::EntryStore));
+ entry1.Data()->hash = 0xaa5555aa;
+ entry1.Data()->rankings_node = 0xa0010002;
+
+ EXPECT_TRUE(entry1.Store());
+ entry1.Data()->hash = 0x88118811;
+ entry1.Data()->rankings_node = 0xa0040009;
+
+ EXPECT_TRUE(entry1.Load());
+ EXPECT_EQ(0xaa5555aa, entry1.Data()->hash);
+ EXPECT_EQ(0xa0010002, entry1.Data()->rankings_node);
+}
+
+TEST(DiskCacheTest, StorageBlock_SetData) {
+ std::wstring filename = GetCachePath();
+ file_util::AppendToPath(&filename, L"a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename.c_str()));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ disk_cache::CacheEntryBlock entry1(file, disk_cache::Addr(0xa0010001));
+ entry1.Data()->hash = 0xaa5555aa;
+
+ disk_cache::CacheEntryBlock entry2(file, disk_cache::Addr(0xa0010002));
+ EXPECT_TRUE(entry2.Load());
+ EXPECT_TRUE(entry2.Data() != NULL);
+ EXPECT_EQ(0, entry2.Data()->hash);
+
+ EXPECT_TRUE(entry2.Data() != entry1.Data());
+ entry2.SetData(entry1.Data());
+ EXPECT_EQ(0xaa5555aa, entry2.Data()->hash);
+ EXPECT_TRUE(entry2.Data() == entry1.Data());
+}
+
+TEST(DiskCacheTest, StorageBlock_SetModified) {
+ std::wstring filename = GetCachePath();
+ file_util::AppendToPath(&filename, L"a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename.c_str()));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ disk_cache::CacheEntryBlock* entry1 =
+ new disk_cache::CacheEntryBlock(file, disk_cache::Addr(0xa0010003));
+ EXPECT_TRUE(entry1->Load());
+ EXPECT_EQ(0, entry1->Data()->hash);
+ entry1->Data()->hash = 0x45687912;
+ entry1->set_modified();
+ delete entry1;
+
+ disk_cache::CacheEntryBlock entry2(file, disk_cache::Addr(0xa0010003));
+ EXPECT_TRUE(entry2.Load());
+ EXPECT_EQ(0x45687912, entry2.Data()->hash);
+}
diff --git a/net/disk_cache/stress_cache.cc b/net/disk_cache/stress_cache.cc
new file mode 100644
index 0000000..74f257a
--- /dev/null
+++ b/net/disk_cache/stress_cache.cc
@@ -0,0 +1,221 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is a simple application that stress-tests the crash recovery of the disk
+// cache. The main application starts a copy of itself on a loop, checking the
+// exit code of the child process. When the child dies in an unexpected way,
+// the main application quits.
+
+// The child application has two threads: one to exercise the cache in an
+// infinite loop, and another one to asynchronously kill the process.
+
+#include <windows.h>
+#include <string>
+
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/string_util.h"
+#include "base/thread.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+
+const int kError = -1;
+const int kExpectedCrash = 1000000;
+
+// Starts a new process.
+int RunSlave(int iteration) {
+ std::wstring exe;
+ PathService::Get(base::FILE_EXE, &exe);
+
+ std::wstring command = StringPrintf(L"%s %d", exe.c_str(), iteration);
+
+ STARTUPINFO startup_info = {0};
+ startup_info.cb = sizeof(startup_info);
+ PROCESS_INFORMATION process_info;
+
+ // I really don't care about this call modifying the string.
+ if (!::CreateProcess(exe.c_str(), const_cast<wchar_t*>(command.c_str()), NULL,
+ NULL, FALSE, 0, NULL, NULL, &startup_info,
+ &process_info)) {
+ printf("Unable to run test\n");
+ return kError;
+ }
+
+ DWORD reason = ::WaitForSingleObject(process_info.hProcess, INFINITE);
+
+ int code;
+ bool ok = ::GetExitCodeProcess(process_info.hProcess,
+ reinterpret_cast<PDWORD>(&code)) ? true :
+ false;
+
+ ::CloseHandle(process_info.hProcess);
+ ::CloseHandle(process_info.hThread);
+
+ if (!ok) {
+ printf("Unable to get return code\n");
+ return kError;
+ }
+
+ return code;
+}
+
+// Main loop for the master process.
+int MasterCode() {
+ for (int i = 0; i < 100000; i++) {
+ int ret = RunSlave(i);
+ if (kExpectedCrash != ret)
+ return ret;
+ }
+
+ printf("More than enough...\n");
+
+ return 0;
+}
+
+// -----------------------------------------------------------------------
+
+// This thread will loop forever, adding and removing entries from the cache.
+// iteration is the current crash cycle, so the entries on the cache are marked
+// to know which instance of the application wrote them.
+void StressTheCache(int iteration) {
+ int cache_size = 0x800000; // 8MB
+ std::wstring path = GetCachePath();
+ path.append(L"_stress");
+ disk_cache::Backend* cache = disk_cache::CreateCacheBackend(path, false,
+ cache_size);
+ if (NULL == cache) {
+ printf("Unable to initialize cache.\n");
+ return;
+ }
+ printf("Iteration %d, initial entries: %d\n", iteration,
+ cache->GetEntryCount());
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumKeys = 5000;
+ const int kNumEntries = 30;
+ std::string keys[kNumKeys];
+ disk_cache::Entry* entries[kNumEntries] = {0};
+
+ for (int i = 0; i < kNumKeys; i++) {
+ keys[i] = GenerateKey(true);
+ }
+
+ const int kDataLen = 4000;
+ char data[kDataLen];
+ memset(data, 'k', kDataLen);
+
+ for (int i = 0;; i++) {
+ int slot = rand() % kNumEntries;
+ int key = rand() % kNumKeys;
+
+ if (entries[slot])
+ entries[slot]->Close();
+
+ if (!cache->OpenEntry(keys[key], &entries[slot]))
+ CHECK(cache->CreateEntry(keys[key], &entries[slot]));
+
+ sprintf_s(data, "%d %d", iteration, i);
+ CHECK(kDataLen == entries[slot]->WriteData(0, 0, data, kDataLen, NULL,
+ false));
+
+ if (rand() % 100 > 80) {
+ key = rand() % kNumKeys;
+ cache->DoomEntry(keys[key]);
+ }
+
+ if (!(i % 100))
+ printf("Entries: %d \r", i);
+ }
+}
+
+// We want to prevent the timer thread from killing the process while we are
+// waiting for the debugger to attach.
+bool g_crashing = false;
+
+class CrashTask : public Task {
+ public:
+ CrashTask() {}
+ ~CrashTask() {}
+
+ virtual void Run() {
+ if (g_crashing)
+ return;
+
+ if (rand() % 100 > 1) {
+ printf("sweet death...\n");
+ TerminateProcess(GetCurrentProcess(), kExpectedCrash);
+ }
+ }
+};
+
+// We leak everything here :)
+bool StartCrashThread() {
+ Thread* thread = new Thread("party_crasher");
+ if (!thread->Start())
+ return false;
+
+ // Create a recurrent timer of 10 secs.
+ int timer_delay = 10000;
+ CrashTask* task = new CrashTask();
+ thread->message_loop()->timer_manager()->StartTimer(timer_delay, task, true);
+
+ return true;
+}
+
+void CrashHandler(const std::string& str) {
+ g_crashing = true;
+ __debugbreak();
+}
+
+// -----------------------------------------------------------------------
+
+int main(int argc, const char* argv[]) {
+ if (argc < 2)
+ return MasterCode();
+
+ logging::SetLogAssertHandler(CrashHandler);
+
+ // Some time for the memory manager to flush stuff.
+ Sleep(3000);
+ MessageLoop message_loop;
+
+ char* end;
+ long int iteration = strtol(argv[1], &end, 0);
+
+ if (!StartCrashThread()) {
+ printf("failed to start thread\n");
+ return kError;
+ }
+
+ StressTheCache(iteration);
+ return 0;
+}
diff --git a/net/disk_cache/trace.cc b/net/disk_cache/trace.cc
new file mode 100644
index 0000000..5f97b7d
--- /dev/null
+++ b/net/disk_cache/trace.cc
@@ -0,0 +1,146 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <windows.h>
+
+#include "net/disk_cache/trace.h"
+
+#include "base/logging.h"
+
+// Change this value to 1 to enable tracing on a release build. By default,
+// tracing is enabled only on debug builds.
+#define ENABLE_TRACING 0
+
+#if _DEBUG
+#undef ENABLE_TRACING
+#define ENABLE_TRACING 1
+#endif
+
+namespace {
+
+const int kEntrySize = 48;
+const int kNumberOfEntries = 5000; // 240 KB.
+
+struct TraceBuffer {
+ int num_traces;
+ int current;
+ char buffer[kNumberOfEntries][kEntrySize];
+};
+
+TraceBuffer* s_trace_buffer = NULL;
+
+void DebugOutput(char* msg) {
+ OutputDebugStringA(msg);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+#if ENABLE_TRACING
+
+bool InitTrace(void) {
+ DCHECK(!s_trace_buffer);
+ if (s_trace_buffer)
+ return false;
+
+ s_trace_buffer = new TraceBuffer;
+ memset(s_trace_buffer, 0, sizeof(*s_trace_buffer));
+ return true;
+}
+
+void DestroyTrace(void) {
+ DCHECK(s_trace_buffer);
+ delete s_trace_buffer;
+ s_trace_buffer = NULL;
+}
+
+void Trace(const char* format, ...) {
+ DCHECK(s_trace_buffer);
+ va_list ap;
+ va_start(ap, format);
+
+ vsprintf_s(s_trace_buffer->buffer[s_trace_buffer->current], format, ap);
+ s_trace_buffer->num_traces++;
+ s_trace_buffer->current++;
+ if (s_trace_buffer->current == kNumberOfEntries)
+ s_trace_buffer->current = 0;
+
+ va_end(ap);
+}
+
+// Writes the last num_traces to the debugger output.
+void DumpTrace(int num_traces) {
+ DCHECK(s_trace_buffer);
+ DebugOutput("Last traces:\n");
+
+ if (num_traces > kNumberOfEntries || num_traces < 0)
+ num_traces = kNumberOfEntries;
+
+ if (s_trace_buffer->num_traces) {
+ char line[kEntrySize + 2];
+
+ int current = s_trace_buffer->current - num_traces;
+ if (current < 0)
+ current += kNumberOfEntries;
+
+ for (int i = 0; i < num_traces; i++) {
+ memcpy(line, s_trace_buffer->buffer[current], kEntrySize);
+ line[kEntrySize] = '\0';
+ size_t length = strlen(line);
+ if (length) {
+ line[length] = '\n';
+ line[length + 1] = '\0';
+ DebugOutput(line);
+ }
+
+ current++;
+ if (current == kNumberOfEntries)
+ current = 0;
+ }
+ }
+
+ DebugOutput("End of Traces\n");
+}
+
+#else // ENABLE_TRACING
+
+bool InitTrace(void) {
+ return true;
+}
+
+void DestroyTrace(void) {
+}
+
+void Trace(const char* format, ...) {
+}
+
+#endif // ENABLE_TRACING
+
+} // namespace disk_cache
diff --git a/net/disk_cache/trace.h b/net/disk_cache/trace.h
new file mode 100644
index 0000000..25a05ae
--- /dev/null
+++ b/net/disk_cache/trace.h
@@ -0,0 +1,67 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file provides support for basic in-memory tracing of short events. We
+// keep a static circular buffer where we store the last traced events, so we
+// can review the cache recent behavior should we need it.
+
+#ifndef NET_DISK_CACHE_TRACE_H__
+#define NET_DISK_CACHE_TRACE_H__
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+// Create and destroy the tracing buffer.
+bool InitTrace(void);
+void DestroyTrace(void);
+
+// Simple class to handle the trace buffer lifetime.
+class TraceObject {
+ public:
+ TraceObject() {
+ InitTrace();
+ }
+ ~TraceObject() {
+ DestroyTrace();
+ }
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(TraceObject);
+};
+
+// Traces to the internal buffer.
+void Trace(const char* format, ...);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_TRACE_H__