summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/build/dump_cache.vcproj159
-rw-r--r--net/disk_cache/backend_impl.cc121
-rw-r--r--net/disk_cache/backend_impl.h14
-rw-r--r--net/disk_cache/entry_impl.cc6
-rw-r--r--net/disk_cache/entry_impl.h4
-rw-r--r--net/dump_cache.scons35
-rw-r--r--net/net.scons1
-rw-r--r--net/net.sln13
-rw-r--r--net/tools/dump_cache/dump_cache.cc160
-rw-r--r--net/tools/dump_cache/dump_files.cc300
-rw-r--r--net/tools/dump_cache/upgrade.cc791
11 files changed, 1550 insertions, 54 deletions
diff --git a/net/build/dump_cache.vcproj b/net/build/dump_cache.vcproj
new file mode 100644
index 0000000..18f7864
--- /dev/null
+++ b/net/build/dump_cache.vcproj
@@ -0,0 +1,159 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="dump_cache"
+ ProjectGUID="{4A14E455-2B7C-4C0F-BCC2-35A9666C186F}"
+ RootNamespace="dump_cache"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\build\debug.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ SubSystem="1"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\build\release.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ SubSystem="1"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <File
+ RelativePath="..\tools\dump_cache\dump_cache.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\tools\dump_cache\dump_files.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\tools\dump_cache\upgrade.cc"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index 75af4e4..7aa1e78 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -432,51 +432,7 @@ bool BackendImpl::DoomEntriesSince(const Time initial_time) {
}
bool BackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
- if (disabled_)
- return false;
-
- Rankings::ScopedRankingsBlock rankings(&rankings_,
- reinterpret_cast<CacheRankingsBlock*>(*iter));
- Rankings::ScopedRankingsBlock next(&rankings_,
- rankings_.GetNext(rankings.get()));
- *next_entry = NULL;
- *iter = NULL;
- if (!next.get())
- return false;
-
- scoped_refptr<EntryImpl> entry;
- if (next->Data()->pointer) {
- entry = reinterpret_cast<EntryImpl*>(next->Data()->pointer);
- } else {
- bool dirty;
- EntryImpl* temp = NULL;
- if (NewEntry(Addr(next->Data()->contents), &temp, &dirty))
- return false;
- entry.swap(&temp);
-
- if (dirty) {
- // We cannot trust this entry. Call MatchEntry to go through the regular
- // path and take the appropriate action.
- std::string key = entry->GetKey();
- uint32 hash = entry->GetHash();
- entry = NULL; // Release the entry.
- temp = MatchEntry(key, hash, false);
- if (temp)
- temp->Release();
-
- return false;
- }
-
- entry.swap(&temp);
- temp = EntryImpl::Update(temp); // Update returns an adref'd entry.
- entry.swap(&temp);
- if (!entry.get())
- return false;
- }
-
- entry.swap(reinterpret_cast<EntryImpl**>(next_entry));
- *iter = next.release();
- return true;
+ return OpenFollowingEntry(true, iter, next_entry);
}
void BackendImpl::EndEnumeration(void** iter) {
@@ -547,15 +503,15 @@ bool BackendImpl::CreateExternalFile(Addr* address) {
int file_number = data_->header.last_file + 1;
Addr file_address(0);
bool success = false;
- for (int i = 0; (i < 0x0fffffff) && !success; i++, file_number++) {
+ for (int i = 0; i < 0x0fffffff; i++, file_number++) {
if (!file_address.SetFileNumber(file_number)) {
file_number = 1;
continue;
}
std::wstring name = GetFileName(file_address);
int flags = base::PLATFORM_FILE_READ |
- base::PLATFORM_FILE_WRITE |
- base::PLATFORM_FILE_CREATE |
+ base::PLATFORM_FILE_WRITE |
+ base::PLATFORM_FILE_CREATE |
base::PLATFORM_FILE_EXCLUSIVE_WRITE;
scoped_refptr<disk_cache::File> file(new disk_cache::File(
base::CreatePlatformFile(name.c_str(), flags, NULL)));
@@ -563,6 +519,7 @@ bool BackendImpl::CreateExternalFile(Addr* address) {
continue;
success = true;
+ break;
}
DCHECK(success);
@@ -584,7 +541,8 @@ void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
}
void BackendImpl::UpdateRank(CacheRankingsBlock* node, bool modified) {
- rankings_.UpdateRank(node, modified);
+ if (!read_only_)
+ rankings_.UpdateRank(node, modified);
}
void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
@@ -708,6 +666,10 @@ void BackendImpl::SetUnitTestMode() {
unit_test_ = true;
}
+void BackendImpl::SetUpgradeMode() {
+ read_only_ = true;
+}
+
void BackendImpl::ClearRefCountForTest() {
num_refs_ = 0;
}
@@ -732,6 +694,10 @@ int BackendImpl::SelfCheck() {
return CheckAllEntries();
}
+bool BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry) {
+ return OpenFollowingEntry(false, iter, prev_entry);
+}
+
// ------------------------------------------------------------------------
// We just created a new file so we're going to write the header and set the
@@ -755,8 +721,8 @@ bool BackendImpl::InitBackingStore(bool* file_created) {
file_util::AppendToPath(&index_name, kIndexName);
int flags = base::PLATFORM_FILE_READ |
- base::PLATFORM_FILE_WRITE |
- base::PLATFORM_FILE_OPEN_ALWAYS |
+ base::PLATFORM_FILE_WRITE |
+ base::PLATFORM_FILE_OPEN_ALWAYS |
base::PLATFORM_FILE_EXCLUSIVE_WRITE;
scoped_refptr<disk_cache::File> file(new disk_cache::File(
base::CreatePlatformFile(index_name.c_str(), flags, file_created)));
@@ -866,7 +832,7 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) {
}
EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
- bool find_parent) {
+ bool find_parent) {
Addr address(data_->table[hash & mask_]);
EntryImpl* cache_entry = NULL;
EntryImpl* parent_entry = NULL;
@@ -944,6 +910,57 @@ EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
return find_parent ? parent_entry : cache_entry;
}
+// This is the actual implementation for OpenNextEntry and OpenPrevEntry.
+bool BackendImpl::OpenFollowingEntry(bool forward, void** iter,
+ Entry** next_entry) {
+ if (disabled_)
+ return false;
+
+ Rankings::ScopedRankingsBlock rankings(&rankings_,
+ reinterpret_cast<CacheRankingsBlock*>(*iter));
+ CacheRankingsBlock* next_block = forward ? rankings_.GetNext(rankings.get()) :
+ rankings_.GetPrev(rankings.get());
+ Rankings::ScopedRankingsBlock next(&rankings_, next_block);
+ *next_entry = NULL;
+ *iter = NULL;
+ if (!next.get())
+ return false;
+
+ scoped_refptr<EntryImpl> entry;
+ if (next->Data()->pointer) {
+ entry = reinterpret_cast<EntryImpl*>(next->Data()->pointer);
+ } else {
+ bool dirty;
+ EntryImpl* temp = NULL;
+ if (NewEntry(Addr(next->Data()->contents), &temp, &dirty))
+ return false;
+ entry.swap(&temp);
+
+ if (dirty) {
+ // We cannot trust this entry. Call MatchEntry to go through the regular
+ // path and take the appropriate action.
+ std::string key = entry->GetKey();
+ uint32 hash = entry->GetHash();
+ entry = NULL; // Release the entry.
+ temp = MatchEntry(key, hash, false);
+ if (temp)
+ temp->Release();
+
+ return false;
+ }
+
+ entry.swap(&temp);
+ temp = EntryImpl::Update(temp); // Update returns an adref'd entry.
+ entry.swap(&temp);
+ if (!entry.get())
+ return false;
+ }
+
+ entry.swap(reinterpret_cast<EntryImpl**>(next_entry));
+ *iter = next.release();
+ return true;
+}
+
void BackendImpl::DestroyInvalidEntry(Addr address, EntryImpl* entry) {
LOG(WARNING) << "Destroying invalid entry.";
Trace("Destroying invalid entry 0x%p", entry);
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
index 40b6f51..5c88682 100644
--- a/net/disk_cache/backend_impl.h
+++ b/net/disk_cache/backend_impl.h
@@ -23,12 +23,12 @@ class BackendImpl : public Backend {
public:
explicit BackendImpl(const std::wstring& path)
: path_(path), block_files_(path), mask_(0), max_size_(0),
- init_(false), restarted_(false), unit_test_(false),
+ init_(false), restarted_(false), unit_test_(false), read_only_(false),
ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)) {}
// mask can be used to limit the usable size of the hash table, for testing.
BackendImpl(const std::wstring& path, uint32 mask)
: path_(path), block_files_(path), mask_(mask), max_size_(0),
- init_(false), restarted_(false), unit_test_(false),
+ init_(false), restarted_(false), unit_test_(false), read_only_(false),
ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)) {}
~BackendImpl();
@@ -109,6 +109,9 @@ class BackendImpl : public Backend {
// Sets internal parameters to enable unit testing mode.
void SetUnitTestMode();
+ // Sets internal parameters to enable upgrade mode (for internal tools).
+ void SetUpgradeMode();
+
// Clears the counter of references to test handling of corruptions.
void ClearRefCountForTest();
@@ -116,6 +119,9 @@ class BackendImpl : public Backend {
// or an error code (negative value).
int SelfCheck();
+ // Same bahavior as OpenNextEntry but walks the list from back to front.
+ bool OpenPrevEntry(void** iter, Entry** prev_entry);
+
private:
// Creates a new backing file for the cache index.
bool CreateBackingStore(disk_cache::File* file);
@@ -135,6 +141,9 @@ class BackendImpl : public Backend {
EntryImpl* MatchEntry(const std::string& key, uint32 hash,
bool find_parent);
+ // Opens the next or previous entry on a cache iteration.
+ bool OpenFollowingEntry(bool forward, void** iter, Entry** next_entry);
+
void DestroyInvalidEntry(Addr address, EntryImpl* entry);
// Deletes entries from the cache until the current size is below the limit.
@@ -175,6 +184,7 @@ class BackendImpl : public Backend {
bool init_; // controls the initialization of the system.
bool restarted_;
bool unit_test_;
+ bool read_only_; // Prevents updates of the rankings data (used by tools).
bool disabled_;
Stats stats_; // Usage statistcs.
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 2821540..2e7b8c46 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -499,6 +499,12 @@ void EntryImpl::DecrementIoCount() {
backend_->DecrementIoCount();
}
+void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
+ node_.Data()->last_used = last_used.ToInternalValue();
+ node_.Data()->last_modified = last_modified.ToInternalValue();
+ node_.set_modified();
+}
+
bool EntryImpl::CreateDataBlock(int index, int size) {
Addr address(entry_.Data()->data_addr[index]);
DCHECK(0 == index || 1 == index);
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
index 64fa75b..b504b0bd 100644
--- a/net/disk_cache/entry_impl.h
+++ b/net/disk_cache/entry_impl.h
@@ -84,6 +84,10 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
void IncrementIoCount();
void DecrementIoCount();
+ // Set the access times for this entry. This method provides support for
+ // the upgrade tool.
+ void SetTimes(base::Time last_used, base::Time last_modified);
+
private:
~EntryImpl();
diff --git a/net/dump_cache.scons b/net/dump_cache.scons
new file mode 100644
index 0000000..37099e5
--- /dev/null
+++ b/net/dump_cache.scons
@@ -0,0 +1,35 @@
+# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+__doc__ = """
+Configuration the dump_cache{,.exe} executable.
+"""
+
+Import('env')
+
+env = env.Clone()
+
+env.ApplySConscript([
+ '$BASE_DIR/using_base.scons',
+ '$ICU38_DIR/using_icu38.scons',
+ '$NET_DIR/using_net.scons',
+])
+
+if env['PLATFORM'] == 'win32':
+ env.Prepend(
+ CCFLAGS = [
+ '/WX',
+ ],
+ )
+
+input_files = [
+ 'tools/dump_cache/dump_cache.cc',
+ 'tools/dump_cache/dump_files.cc',
+ 'tools/dump_cache/upgrade.cc',
+]
+
+
+if env['PLATFORM'] == 'win32':
+
+ env.ChromeTestProgram('dump_cache', input_files)
diff --git a/net/net.scons b/net/net.scons
index e5e74da..cc22d36 100644
--- a/net/net.scons
+++ b/net/net.scons
@@ -16,6 +16,7 @@ env.Append(
sconscript_files = [
'crash_cache.scons',
+ 'dump_cache.scons',
'net_lib.scons',
'net_perftests.scons',
'net_unittests.scons',
diff --git a/net/net.sln b/net/net.sln
index b6eba10..fb52fd4 100644
--- a/net/net.sln
+++ b/net/net.sln
@@ -153,6 +153,13 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sdch", "..\sdch\sdch.vcproj
Release.AspNetCompiler.Debug = "False"
EndProjectSection
EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "dump_cache", "build\dump_cache.vcproj", "{4A14E455-2B7C-4C0F-BCC2-35A9666C186F}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1832A374-8A74-4F9E-B536-69A699B3E165} = {1832A374-8A74-4F9E-B536-69A699B3E165}
+ {8C27D792-2648-4F5E-9ED0-374276327308} = {8C27D792-2648-4F5E-9ED0-374276327308}
+ {326E9795-E760-410A-B69A-3F79DB3F5243} = {326E9795-E760-410A-B69A-3F79DB3F5243}
+ EndProjectSection
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
@@ -175,6 +182,10 @@ Global
{326E9795-E760-410A-B69A-3F79DB3F5243}.Debug|Win32.Build.0 = Debug|Win32
{326E9795-E760-410A-B69A-3F79DB3F5243}.Release|Win32.ActiveCfg = Release|Win32
{326E9795-E760-410A-B69A-3F79DB3F5243}.Release|Win32.Build.0 = Release|Win32
+ {4A14E455-2B7C-4C0F-BCC2-35A9666C186F}.Debug|Win32.ActiveCfg = Debug|Win32
+ {4A14E455-2B7C-4C0F-BCC2-35A9666C186F}.Debug|Win32.Build.0 = Debug|Win32
+ {4A14E455-2B7C-4C0F-BCC2-35A9666C186F}.Release|Win32.ActiveCfg = Release|Win32
+ {4A14E455-2B7C-4C0F-BCC2-35A9666C186F}.Release|Win32.Build.0 = Release|Win32
{7100F41F-868D-4E99-80A2-AF8E6574749D}.Debug|Win32.ActiveCfg = Debug|Win32
{7100F41F-868D-4E99-80A2-AF8E6574749D}.Debug|Win32.Build.0 = Debug|Win32
{7100F41F-868D-4E99-80A2-AF8E6574749D}.Release|Win32.ActiveCfg = Release|Win32
@@ -231,10 +242,12 @@ Global
{0E5474AC-5996-4B13-87C0-4AE931EE0815} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
{1832A374-8A74-4F9E-B536-69A699B3E165} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
{2A70CBF0-847E-4E3A-B926-542A656DC7FE} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
+ {4A14E455-2B7C-4C0F-BCC2-35A9666C186F} = {E7D78B1F-F7D3-47CB-BF51-3957C646B406}
{7100F41F-868D-4E99-80A2-AF8E6574749D} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
{8423AF0D-4B88-4EBF-94E1-E4D00D00E21C} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
{8C27D792-2648-4F5E-9ED0-374276327308} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
{A0D94973-D355-47A5-A1E2-3456F321F010} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
+ {B0EE0599-2913-46A0-A847-A3EC813658D3} = {E7D78B1F-F7D3-47CB-BF51-3957C646B406}
{BFE8E2A7-3B3B-43B0-A994-3058B852DB8B} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
{E13045CD-7E1F-4A41-9B18-8D288B2E7B41} = {E7D78B1F-F7D3-47CB-BF51-3957C646B406}
{EF5E94AB-B646-4E5B-A058-52EF07B8351C} = {A04F65DF-D422-4E8F-B918-29EBA839363E}
diff --git a/net/tools/dump_cache/dump_cache.cc b/net/tools/dump_cache/dump_cache.cc
new file mode 100644
index 0000000..2aa3b56
--- /dev/null
+++ b/net/tools/dump_cache/dump_cache.cc
@@ -0,0 +1,160 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This command-line program dumps the contents of a set of cache files, either
+// to stdout or to another set of cache files.
+
+#include <stdio.h>
+#include <string>
+
+#include "base/at_exit.h"
+#include "base/command_line.h"
+#include "base/process_util.h"
+#include "base/scoped_handle.h"
+#include "base/string_util.h"
+
+#include "net/disk_cache/disk_format.h"
+
+enum Errors {
+ GENERIC = -1,
+ ALL_GOOD = 0,
+ INVALID_ARGUMENT = 1,
+ FILE_ACCESS_ERROR,
+ UNKNOWN_VERSION,
+ TOOL_NOT_FOUND,
+};
+
+int GetMajorVersion(const std::wstring input_path);
+int DumpContents(const std::wstring input_path);
+int DumpHeaders(const std::wstring input_path);
+int RunSlave(const std::wstring input_path, const std::wstring pipe_number);
+int Upgrade(const std::wstring output_path, HANDLE pipe);
+HANDLE CreateServer(std::wstring* pipe_number);
+
+const char kUpgradeHelp[] =
+ "\nIn order to use the upgrade function, a version of this tool that\n"
+ "understands the file format of the files to upgrade is needed. For\n"
+ "instance, to upgrade files saved with file format 3.4 to version 5.2,\n"
+ "a version of this program that was compiled with version 3.4 has to be\n"
+ "located beside this executable, and named dump_cache_3.exe, and this\n"
+ "executable should be compiled with version 5.2 being the current one.";
+
+// Folders to read and write cache files.
+const wchar_t kInputPath[] = L"input";
+const wchar_t kOutputPath[] = L"output";
+
+// Dumps the file headers to stdout.
+const wchar_t kDumpHeaders[] = L"dump-headers";
+
+// Dumps all entries to stdout.
+const wchar_t kDumpContents[] = L"dump-contents";
+
+// Upgrade an old version to the current one.
+const wchar_t kUpgrade[] = L"upgrade";
+
+// Internal use:
+const wchar_t kSlave[] = L"slave";
+const wchar_t kPipe[] = L"pipe";
+
+int Help() {
+ printf("warning: input files are modified by this tool\n");
+ printf("dump_cache --input=path1 [--output=path2]\n");
+ printf("--dump-headers: display file headers\n");
+ printf("--dump-contents: display all entries\n");
+ printf("--upgrade: copy contents to the output path\n");
+ return INVALID_ARGUMENT;
+}
+
+// Starts a new process, to generate the files.
+int LaunchSlave(const CommandLine &command_line, const std::wstring pipe_number,
+ int version) {
+ std::wstring new_command_line = command_line.command_line_string();
+ const std::wstring old_exe(L"dump_cache.exe");
+ size_t to_remove = new_command_line.find(old_exe);
+ new_command_line.erase(to_remove, old_exe.size());
+
+ std::wstring new_program = StringPrintf(L"%ls%d.exe", L"dump_cache_",
+ version);
+ new_command_line.insert(to_remove, new_program);
+ if (command_line.HasSwitch(kUpgrade))
+ CommandLine::AppendSwitch(&new_command_line, kSlave);
+
+ CommandLine::AppendSwitchWithValue(&new_command_line, kPipe, pipe_number);
+ if (!base::LaunchApp(new_command_line, false, false, NULL)) {
+ printf("Unable to launch the needed version of this tool: %ls\n",
+ new_program.c_str());
+ printf(kUpgradeHelp);
+ return TOOL_NOT_FOUND;
+ }
+ return ALL_GOOD;
+}
+
+// -----------------------------------------------------------------------
+
+int main(int argc, const char* argv[]) {
+ // Setup an AtExitManager so Singleton objects will be destroyed.
+ base::AtExitManager at_exit_manager;
+
+ CommandLine command_line;
+ std::wstring input_path = command_line.GetSwitchValue(kInputPath);
+ if (input_path.empty())
+ return Help();
+
+ bool upgrade = false;
+ bool slave_required = false;
+ std::wstring output_path;
+ if (command_line.HasSwitch(kUpgrade)) {
+ output_path = command_line.GetSwitchValue(kOutputPath);
+ if (output_path.empty())
+ return Help();
+ slave_required = true;
+ upgrade = true;
+ }
+
+ int version = GetMajorVersion(input_path);
+ if (!version)
+ return FILE_ACCESS_ERROR;
+
+ if (version != disk_cache::kCurrentVersion >> 16) {
+ if (command_line.HasSwitch(kSlave)) {
+ printf("Unknown version\n");
+ return UNKNOWN_VERSION;
+ }
+ slave_required = true;
+ }
+
+ std::wstring pipe_number = command_line.GetSwitchValue(kPipe);
+ if (command_line.HasSwitch(kSlave) && slave_required)
+ return RunSlave(input_path, pipe_number);
+
+ ScopedHandle server;
+ if (slave_required) {
+ server.Set(CreateServer(&pipe_number));
+ if (!server.IsValid()) {
+ printf("Unable to create the server pipe\n");
+ return -1;
+ }
+
+ int ret = LaunchSlave(command_line, pipe_number, version);
+ if (ret)
+ return ret;
+ }
+
+ if (upgrade)
+ return Upgrade(output_path, server);
+
+ if (slave_required) {
+ // Wait until the slave starts dumping data before we quit. Lazy "fix" for a
+ // console quirk.
+ Sleep(500);
+ return ALL_GOOD;
+ }
+
+ if (command_line.HasSwitch(kDumpContents))
+ return DumpContents(input_path);
+ if (command_line.HasSwitch(kDumpHeaders))
+ return DumpHeaders(input_path);
+ return Help();
+}
+
diff --git a/net/tools/dump_cache/dump_files.cc b/net/tools/dump_cache/dump_files.cc
new file mode 100644
index 0000000..67bdad2
--- /dev/null
+++ b/net/tools/dump_cache/dump_files.cc
@@ -0,0 +1,300 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Performs basic inspection of the disk cache files with minimal disruption
+// to the actual files (they still may change if an error is detected on the
+// files).
+
+#include <stdio.h>
+#include <string>
+
+#include "base/file_util.h"
+#include "base/message_loop.h"
+#include "net/base/file_stream.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/mapped_file.h"
+#include "net/disk_cache/storage_block.h"
+
+namespace {
+
+const wchar_t kIndexName[] = L"index";
+const wchar_t kDataPrefix[] = L"data_";
+
+// Reads the |header_size| bytes from the beginning of file |name|.
+bool ReadHeader(const std::wstring name, char* header, int header_size) {
+ net::FileStream file;
+ file.Open(name, base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ);
+ if (!file.IsOpen()) {
+ printf("Unable to open file %ls\n", name.c_str());
+ return false;
+ }
+
+ int read = file.Read(header, header_size, NULL);
+ if (read != header_size) {
+ printf("Unable to read file %ls\n", name.c_str());
+ return false;
+ }
+ return true;
+}
+
+int GetMajorVersionFromFile(const std::wstring name) {
+ disk_cache::IndexHeader header;
+ if (!ReadHeader(name, reinterpret_cast<char*>(&header), sizeof(header)))
+ return 0;
+
+ return header.version >> 16;
+}
+
+// Dumps the contents of the Index-file header.
+void DumpIndexHeader(const std::wstring name) {
+ disk_cache::IndexHeader header;
+ if (!ReadHeader(name, reinterpret_cast<char*>(&header), sizeof(header)))
+ return;
+
+ printf("Index file:\n");
+ printf("magic: %x\n", header.magic);
+ printf("version: %d.%d\n", header.version >> 16, header.version & 0xffff);
+ printf("entries: %d\n", header.num_entries);
+ printf("total bytes: %d\n", header.num_bytes);
+ printf("last file number: %d\n", header.last_file);
+ printf("current id: %d\n", header.this_id);
+ printf("table length: %d\n", header.table_len);
+ printf("-------------------------\n\n");
+}
+
+// Dumps the contents of a block-file header.
+void DumpBlockHeader(const std::wstring name) {
+ disk_cache::BlockFileHeader header;
+ if (!ReadHeader(name, reinterpret_cast<char*>(&header), sizeof(header)))
+ return;
+
+ std::wstring file_name = file_util::GetFilenameFromPath(name);
+
+ printf("Block file: %ls\n", file_name.c_str());
+ printf("magic: %x\n", header.magic);
+ printf("version: %d.%d\n", header.version >> 16, header.version & 0xffff);
+ printf("file id: %d\n", header.this_file);
+ printf("next file id: %d\n", header.next_file);
+ printf("entry size: %d\n", header.entry_size);
+ printf("current entries: %d\n", header.num_entries);
+ printf("max entries: %d\n", header.max_entries);
+ printf("updating: %d\n", header.updating);
+ printf("empty sz 1: %d\n", header.empty[0]);
+ printf("empty sz 2: %d\n", header.empty[1]);
+ printf("empty sz 3: %d\n", header.empty[2]);
+ printf("empty sz 4: %d\n", header.empty[3]);
+ printf("user 0: 0x%x\n", header.user[0]);
+ printf("user 1: 0x%x\n", header.user[1]);
+ printf("user 2: 0x%x\n", header.user[2]);
+ printf("user 3: 0x%x\n", header.user[3]);
+ printf("-------------------------\n\n");
+}
+
+// Simple class that interacts with the set of cache files.
+class CacheDumper {
+ public:
+ explicit CacheDumper(const std::wstring path)
+ : path_(path), block_files_(path), index_(NULL) {}
+
+ bool Init();
+
+ // Reads an entry from disk. Return false when all entries have been already
+ // returned.
+ bool GetEntry(disk_cache::EntryStore* entry);
+
+ // Loads a specific block from the block files.
+ bool LoadEntry(disk_cache::CacheAddr addr, disk_cache::EntryStore* entry);
+ bool LoadRankings(disk_cache::CacheAddr addr,
+ disk_cache::RankingsNode* rankings);
+
+ private:
+ std::wstring path_;
+ disk_cache::BlockFiles block_files_;
+ scoped_refptr<disk_cache::MappedFile> index_file_;
+ disk_cache::Index* index_;
+ int current_hash_;
+ disk_cache::CacheAddr next_addr_;
+ DISALLOW_COPY_AND_ASSIGN(CacheDumper);
+};
+
+bool CacheDumper::Init() {
+ if (!block_files_.Init(false)) {
+ printf("Unable to init block files\n");
+ return false;
+ }
+
+ std::wstring index_name(path_);
+ file_util::AppendToPath(&index_name, kIndexName);
+ index_file_ = new disk_cache::MappedFile;
+ index_ =
+ reinterpret_cast<disk_cache::Index*>(index_file_->Init(index_name, 0));
+ if (!index_) {
+ printf("Unable to map index\n");
+ return false;
+ }
+
+ current_hash_ = 0;
+ next_addr_ = 0;
+ return true;
+}
+
+bool CacheDumper::GetEntry(disk_cache::EntryStore* entry) {
+ if (next_addr_) {
+ if (LoadEntry(next_addr_, entry)) {
+ next_addr_ = entry->next;
+ if (!next_addr_)
+ current_hash_++;
+ return true;
+ } else {
+ printf("Unable to load entry at address 0x%x\n", next_addr_);
+ next_addr_ = 0;
+ current_hash_++;
+ }
+ }
+
+ for (int i = current_hash_; i < index_->header.table_len; i++) {
+ // Yes, we'll crash if the table is shorter than expected, but only after
+ // dumping every entry that we can find.
+ if (index_->table[i]) {
+ current_hash_ = i;
+ if (LoadEntry(index_->table[i], entry)) {
+ next_addr_ = entry->next;
+ if (!next_addr_)
+ current_hash_++;
+ return true;
+ } else {
+ printf("Unable to load entry at address 0x%x\n", index_->table[i]);
+ }
+ }
+ }
+ return false;
+}
+
+bool CacheDumper::LoadEntry(disk_cache::CacheAddr addr,
+ disk_cache::EntryStore* entry) {
+ disk_cache::Addr address(addr);
+ disk_cache::MappedFile* file = block_files_.GetFile(address);
+ if (!file)
+ return false;
+
+ disk_cache::CacheEntryBlock entry_block(file, address);
+ if (!entry_block.Load())
+ return false;
+
+ memcpy(entry, entry_block.Data(), sizeof(*entry));
+ printf("Entry at 0x%x\n", addr);
+ return true;
+}
+
+bool CacheDumper::LoadRankings(disk_cache::CacheAddr addr,
+ disk_cache::RankingsNode* rankings) {
+ disk_cache::Addr address(addr);
+ disk_cache::MappedFile* file = block_files_.GetFile(address);
+ if (!file)
+ return false;
+
+ disk_cache::CacheRankingsBlock rank_block(file, address);
+ if (!rank_block.Load())
+ return false;
+
+ memcpy(rankings, rank_block.Data(), sizeof(*rankings));
+ printf("Rankings at 0x%x\n", addr);
+ return true;
+}
+
+void DumpEntry(const disk_cache::EntryStore& entry) {
+ std::string key;
+ if (!entry.long_key) {
+ key = entry.key;
+ if (key.size() > 50)
+ key.resize(50);
+ }
+
+ printf("hash: 0x%x\n", entry.hash);
+ printf("next entry: 0x%x\n", entry.next);
+ printf("rankings: 0x%x\n", entry.rankings_node);
+ printf("key length: %d\n", entry.key_len);
+ printf("key: \"%s\"\n", key.c_str());
+ printf("key addr: 0x%x\n", entry.long_key);
+ printf("data size 0: %d\n", entry.data_size[0]);
+ printf("data size 1: %d\n", entry.data_size[1]);
+ printf("data addr 0: 0x%x\n", entry.data_addr[0]);
+ printf("data addr 1: 0x%x\n", entry.data_addr[1]);
+ printf("----------\n\n");
+}
+
+void DumpRankings(const disk_cache::RankingsNode& rankings) {
+ printf("next: 0x%x\n", rankings.next);
+ printf("prev: 0x%x\n", rankings.prev);
+ printf("entry: 0x%x\n", rankings.contents);
+ printf("dirty: %d\n", rankings.dirty);
+ printf("pointer: 0x%x\n", rankings.pointer);
+ printf("----------\n\n");
+}
+
+} // namespace.
+
+// -----------------------------------------------------------------------
+
+int GetMajorVersion(const std::wstring input_path) {
+ std::wstring index_name(input_path);
+ file_util::AppendToPath(&index_name, kIndexName);
+
+ int version = GetMajorVersionFromFile(index_name);
+ if (!version)
+ return 0;
+
+ std::wstring data_name(input_path);
+ file_util::AppendToPath(&data_name, L"data_0");
+ if (version != GetMajorVersionFromFile(data_name))
+ return 0;
+
+ data_name = input_path;
+ file_util::AppendToPath(&data_name, L"data_1");
+ if (version != GetMajorVersionFromFile(data_name))
+ return 0;
+
+ return version;
+}
+
+// Dumps the headers of all files.
+int DumpHeaders(const std::wstring input_path) {
+ std::wstring index_name(input_path);
+ file_util::AppendToPath(&index_name, kIndexName);
+ DumpIndexHeader(index_name);
+
+ std::wstring pattern(kDataPrefix);
+ pattern.append(L"*");
+ file_util::FileEnumerator iter(input_path, false,
+ file_util::FileEnumerator::FILES, pattern);
+ for (std::wstring file = iter.Next(); !file.empty(); file = iter.Next()) {
+ DumpBlockHeader(file);
+ }
+
+ return 0;
+}
+
+// Dumps all entries from the cache.
+int DumpContents(const std::wstring input_path) {
+ DumpHeaders(input_path);
+
+ // We need a message loop, although we really don't run any task.
+ MessageLoop loop(MessageLoop::TYPE_IO);
+ CacheDumper dumper(input_path);
+ if (!dumper.Init())
+ return -1;
+
+ disk_cache::EntryStore entry;
+ while (dumper.GetEntry(&entry)) {
+ DumpEntry(entry);
+ disk_cache::RankingsNode rankings;
+ if (dumper.LoadRankings(entry.rankings_node, &rankings))
+ DumpRankings(rankings);
+ }
+
+ printf("Done.\n");
+
+ return 0;
+}
diff --git a/net/tools/dump_cache/upgrade.cc b/net/tools/dump_cache/upgrade.cc
new file mode 100644
index 0000000..1495b95
--- /dev/null
+++ b/net/tools/dump_cache/upgrade.cc
@@ -0,0 +1,791 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+
+namespace {
+
+const wchar_t kPipePrefix[] = L"\\\\.\\pipe\\dump_cache_";
+const int kChannelSize = 64 * 1024;
+const int kNumStreams = 2;
+
+// Simple macro to print out formatted debug messages. It is similar to a DLOG
+// except that it doesn't include a header.
+#ifdef NDEBUG
+#define DEBUGMSG(...) {}
+#else
+#define DEBUGMSG(...) { printf(__VA_ARGS__); }
+#endif
+
+HANDLE OpenServer(const std::wstring pipe_number) {
+ std::wstring pipe_name(kPipePrefix);
+ pipe_name.append(pipe_number);
+ return CreateFile(pipe_name.c_str(), GENERIC_READ | GENERIC_WRITE, 0, NULL,
+ OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL);
+}
+
+// This is the basic message to use between the two processes. It is intended
+// to transmit a single action (like "get the key name for entry xx"), with up
+// to 5 32-bit arguments and 4 64-bit arguments. After this structure, the rest
+// of the message has |buffer_bytes| of length with the actual data.
+struct Message {
+ int32 command;
+ int32 result;
+ int32 buffer_bytes;
+ int32 arg1;
+ int32 arg2;
+ int32 arg3;
+ int32 arg4;
+ int32 arg5;
+ int64 long_arg1;
+ int64 long_arg2;
+ int64 long_arg3;
+ int64 long_arg4;
+ Message() {
+ memset(this, 0, sizeof(*this));
+ }
+ Message& operator= (const Message& other) {
+ memcpy(this, &other, sizeof(*this));
+ return *this;
+ }
+};
+
+const int kBufferSize = kChannelSize - sizeof(Message);
+struct IoBuffer {
+ Message msg;
+ char buffer[kBufferSize];
+};
+COMPILE_ASSERT(sizeof(IoBuffer) == kChannelSize, invalid_io_buffer);
+
+
+// The list of commands.
+// Currently, there is support for working ONLY with one entry at a time.
+enum {
+ // Get the entry from list |arg1| that follows |long_arg1|.
+ // The result is placed on |long_arg1| (closes the previous one).
+ GET_NEXT_ENTRY = 1,
+ // Get the entry from list |arg1| that precedes |long_arg1|.
+ // The result is placed on |long_arg1| (closes the previous one).
+ GET_PREV_ENTRY,
+ // Closes the entry |long_arg1|.
+ CLOSE_ENTRY,
+ // Get the key of the entry |long_arg1|.
+ GET_KEY,
+ // Get last used (long_arg2) and last modified (long_arg3) times for the
+ // entry at |long_arg1|.
+ GET_USE_TIMES,
+ // Returns on |arg2| the data size in bytes if the stream |arg1| of entry at
+ // |long_arg1|.
+ GET_DATA_SIZE,
+ // Returns |arg2| bytes of the stream |arg1| for the entry at |long_arg1|,
+ // starting at offset |arg3|.
+ READ_DATA,
+ // End processing requests.
+ QUIT
+};
+
+// The list of return codes.
+enum {
+ RESULT_OK = 0,
+ RESULT_UNKNOWN_COMMAND,
+ RESULT_INVALID_PARAMETER,
+ RESULT_NAME_OVERFLOW
+};
+
+// -----------------------------------------------------------------------
+
+class BaseSM : public MessageLoopForIO::IOHandler {
+ public:
+ BaseSM(disk_cache::BackendImpl* cache, HANDLE channel);
+ virtual ~BaseSM();
+
+ protected:
+ bool SendMsg(const Message& msg);
+ bool ReceiveMsg();
+ bool ConnectChannel();
+ bool IsPending();
+
+ MessageLoopForIO::IOContext in_context_;
+ MessageLoopForIO::IOContext out_context_;
+ disk_cache::BackendImpl* cache_;
+ disk_cache::EntryImpl* entry_;
+ HANDLE channel_;
+ int state_;
+ int pending_count_;
+ scoped_array<char> in_buffer_;
+ scoped_array<char> out_buffer_;
+ IoBuffer* input_;
+ IoBuffer* output_;
+ DISALLOW_COPY_AND_ASSIGN(BaseSM);
+};
+
+BaseSM::BaseSM(disk_cache::BackendImpl* cache, HANDLE channel)
+ : cache_(cache), entry_(NULL), channel_(channel), state_(0),
+ pending_count_(0) {
+ in_buffer_.reset(new char[kChannelSize]);
+ out_buffer_.reset(new char[kChannelSize]);
+ input_ = reinterpret_cast<IoBuffer*>(in_buffer_.get());
+ output_ = reinterpret_cast<IoBuffer*>(out_buffer_.get());
+
+ memset(&in_context_, 0, sizeof(in_context_));
+ memset(&out_context_, 0, sizeof(out_context_));
+ in_context_.handler = this;
+ out_context_.handler = this;
+ MessageLoopForIO::current()->RegisterIOHandler(channel_, this);
+}
+
+BaseSM::~BaseSM() {
+ if (entry_)
+ entry_->Close();
+}
+
+bool BaseSM::SendMsg(const Message& msg) {
+ // Only one command will be in-flight at a time. Let's start the Read IO here
+ // when we know that it will be pending.
+ if (!ReceiveMsg())
+ return false;
+
+ output_->msg = msg;
+ DWORD written;
+ if (!WriteFile(channel_, output_, sizeof(msg) + msg.buffer_bytes, &written,
+ &out_context_.overlapped)) {
+ if (ERROR_IO_PENDING != GetLastError())
+ return false;
+ }
+ pending_count_++;
+ return true;
+}
+
+bool BaseSM::ReceiveMsg() {
+ DWORD read;
+ if (!ReadFile(channel_, input_, kChannelSize, &read,
+ &in_context_.overlapped)) {
+ if (ERROR_IO_PENDING != GetLastError())
+ return false;
+ }
+ pending_count_++;
+ return true;
+}
+
+bool BaseSM::ConnectChannel() {
+ if (!ConnectNamedPipe(channel_, &in_context_.overlapped)) {
+ DWORD error = GetLastError();
+ if (ERROR_PIPE_CONNECTED == error)
+ return true;
+ // By returning true in case of a generic error, we allow the operation to
+ // fail while sending the first message.
+ if (ERROR_IO_PENDING != error)
+ return true;
+ }
+ pending_count_++;
+ return false;
+}
+
+bool BaseSM::IsPending() {
+ return pending_count_ != 0;
+}
+
+// -----------------------------------------------------------------------
+
+class MasterSM : public BaseSM {
+ public:
+ MasterSM(disk_cache::BackendImpl* cache, HANDLE channel)
+ : BaseSM(cache, channel) {}
+ virtual ~MasterSM() {}
+
+ bool DoInit();
+ virtual void OnIOCompleted(MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered, DWORD error);
+
+ private:
+ enum {
+ MASTER_INITIAL = 0,
+ MASTER_CONNECT,
+ MASTER_GET_ENTRY,
+ MASTER_GET_NEXT_ENTRY,
+ MASTER_GET_KEY,
+ MASTER_GET_USE_TIMES,
+ MASTER_GET_DATA_SIZE,
+ MASTER_READ_DATA,
+ MASTER_END
+ };
+
+ void SendGetPrevEntry();
+ void DoGetEntry();
+ void DoGetKey(int bytes_read);
+ void DoGetUseTimes();
+ void SendGetDataSize();
+ void DoGetDataSize();
+ void CloseEntry();
+ void SendReadData();
+ void DoReadData(int bytes_read);
+ void SendQuit();
+ void DoEnd();
+ void Fail();
+
+ base::Time last_used_;
+ base::Time last_modified_;
+ int64 remote_entry_;
+ int stream_;
+ int bytes_remaining_;
+ int offset_;
+ int copied_entries_;
+};
+
+void MasterSM::OnIOCompleted(MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered, DWORD error) {
+ pending_count_--;
+ if (context == &out_context_) {
+ if (!error)
+ return;
+ return Fail();
+ }
+
+ int bytes_read = static_cast<int>(bytes_transfered);
+ if (bytes_read < sizeof(Message) && state_ != MASTER_END &&
+ state_ != MASTER_CONNECT) {
+ printf("Communication breakdown\n");
+ return Fail();
+ }
+
+ switch (state_) {
+ case MASTER_CONNECT:
+ SendGetPrevEntry();
+ break;
+ case MASTER_GET_ENTRY:
+ DoGetEntry();
+ break;
+ case MASTER_GET_KEY:
+ DoGetKey(bytes_read);
+ break;
+ case MASTER_GET_USE_TIMES:
+ DoGetUseTimes();
+ break;
+ case MASTER_GET_DATA_SIZE:
+ DoGetDataSize();
+ break;
+ case MASTER_READ_DATA:
+ DoReadData(bytes_read);
+ break;
+ case MASTER_END:
+ if (!IsPending())
+ DoEnd();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+bool MasterSM::DoInit() {
+ DEBUGMSG("Master DoInit\n");
+ DCHECK(state_ == MASTER_INITIAL);
+
+ copied_entries_ = 0;
+ remote_entry_ = 0;
+
+ if (ConnectChannel()) {
+ SendGetPrevEntry();
+ // If we don't have pending operations we couldn't connect.
+ return IsPending();
+ }
+
+ state_ = MASTER_CONNECT;
+ return true;
+}
+
+void MasterSM::SendGetPrevEntry() {
+ DEBUGMSG("Master SendGetPrevEntry\n");
+ state_ = MASTER_GET_ENTRY;
+ Message msg;
+ msg.command = GET_PREV_ENTRY;
+ msg.long_arg1 = remote_entry_;
+ SendMsg(msg);
+}
+
+void MasterSM::DoGetEntry() {
+ DEBUGMSG("Master DoGetEntry\n");
+ DCHECK(state_ == MASTER_GET_ENTRY);
+ DCHECK(input_->msg.command == GET_PREV_ENTRY);
+ if (input_->msg.result != RESULT_OK)
+ return Fail();
+
+ if (!input_->msg.long_arg1) {
+ printf("Done: %d entries copied over.\n", copied_entries_);
+ return SendQuit();
+ }
+ remote_entry_ = input_->msg.long_arg1;
+ state_ = MASTER_GET_KEY;
+ Message msg;
+ msg.command = GET_KEY;
+ msg.long_arg1 = remote_entry_;
+ SendMsg(msg);
+}
+
+void MasterSM::DoGetKey(int bytes_read) {
+ DEBUGMSG("Master DoGetKey\n");
+ DCHECK(state_ == MASTER_GET_KEY);
+ DCHECK(input_->msg.command == GET_KEY);
+ if (input_->msg.result == RESULT_NAME_OVERFLOW)
+ // The key is too long. Just move on.
+ return SendGetPrevEntry();
+
+ if (input_->msg.result != RESULT_OK)
+ return Fail();
+
+ std::string key(input_->buffer);
+ DCHECK(key.size() == input_->msg.buffer_bytes - 1);
+ if (!cache_->CreateEntry(key, reinterpret_cast<disk_cache::Entry**>(&entry_)))
+ return Fail();
+
+ if (key.size() < 60) {
+ DEBUGMSG("Entry \"%s\" created\n", key.c_str());
+ } else {
+ DEBUGMSG("Entry (long name) created\n", key.c_str());
+ }
+ state_ = MASTER_GET_USE_TIMES;
+ Message msg;
+ msg.command = GET_USE_TIMES;
+ msg.long_arg1 = remote_entry_;
+ SendMsg(msg);
+}
+
+void MasterSM::DoGetUseTimes() {
+ DEBUGMSG("Master DoGetUseTimes\n");
+ DCHECK(state_ == MASTER_GET_USE_TIMES);
+ DCHECK(input_->msg.command == GET_USE_TIMES);
+ if (input_->msg.result != RESULT_OK)
+ return Fail();
+
+ last_used_ = base::Time::FromInternalValue(input_->msg.long_arg2);
+ last_modified_ = base::Time::FromInternalValue(input_->msg.long_arg3);
+ stream_ = 0;
+ SendGetDataSize();
+}
+
+void MasterSM::SendGetDataSize() {
+ DEBUGMSG("Master SendGetDataSize (%d)\n", stream_);
+ state_ = MASTER_GET_DATA_SIZE;
+ Message msg;
+ msg.command = GET_DATA_SIZE;
+ msg.arg1 = stream_;
+ msg.long_arg1 = remote_entry_;
+ SendMsg(msg);
+}
+
+void MasterSM::DoGetDataSize() {
+ DEBUGMSG("Master DoGetDataSize: %d\n", input_->msg.arg2);
+ DCHECK(state_ == MASTER_GET_DATA_SIZE);
+ DCHECK(input_->msg.command == GET_DATA_SIZE);
+ if (input_->msg.result == RESULT_INVALID_PARAMETER)
+ // No more streams, move to the next entry.
+ return CloseEntry();
+
+ if (input_->msg.result != RESULT_OK)
+ return Fail();
+
+ bytes_remaining_ = input_->msg.arg2;
+ offset_ = 0;
+ SendReadData();
+}
+
+void MasterSM::CloseEntry() {
+ DEBUGMSG("Master CloseEntry\n");
+ printf("%c\r", copied_entries_ % 2 ? 'x' : '+');
+ entry_->SetTimes(last_used_, last_modified_);
+ entry_->Close();
+ entry_ = NULL;
+ copied_entries_++;
+ SendGetPrevEntry();
+}
+
+void MasterSM::SendReadData() {
+ int read_size = std::min(bytes_remaining_, kBufferSize);
+ DEBUGMSG("Master SendReadData (%d): %d bytes at %d\n", stream_, read_size,
+ offset_);
+ if (bytes_remaining_ <= 0) {
+ stream_++;
+ if (stream_ >= kNumStreams)
+ return CloseEntry();
+ return SendGetDataSize();
+ }
+
+ state_ = MASTER_READ_DATA;
+ Message msg;
+ msg.command = READ_DATA;
+ msg.arg1 = stream_;
+ msg.arg2 = read_size;
+ msg.arg3 = offset_;
+ msg.long_arg1 = remote_entry_;
+ SendMsg(msg);
+}
+
+void MasterSM::DoReadData(int bytes_read) {
+ DEBUGMSG("Master DoReadData: %d bytes\n", input_->msg.buffer_bytes);
+ DCHECK(state_ == MASTER_READ_DATA);
+ DCHECK(input_->msg.command == READ_DATA);
+ if (input_->msg.result != RESULT_OK)
+ return Fail();
+
+ int read_size = input_->msg.buffer_bytes;
+ if (read_size != entry_->WriteData(stream_, offset_, input_->buffer,
+ read_size, NULL, false))
+ return Fail();
+
+ offset_ += read_size;
+ bytes_remaining_ -= read_size;
+ // Read some more.
+ SendReadData();
+}
+
+void MasterSM::SendQuit() {
+ DEBUGMSG("Master SendQuit\n");
+ state_ = MASTER_END;
+ Message msg;
+ msg.command = QUIT;
+ SendMsg(msg);
+ if (!IsPending())
+ DoEnd();
+}
+
+void MasterSM::DoEnd() {
+ DEBUGMSG("Master DoEnd\n");
+ MessageLoop::current()->PostTask(FROM_HERE, new MessageLoop::QuitTask());
+}
+
+void MasterSM::Fail() {
+ DEBUGMSG("Master Fail\n");
+ printf("Unexpected failure\n");
+ SendQuit();
+}
+
+// -----------------------------------------------------------------------
+
+class SlaveSM : public BaseSM {
+ public:
+ SlaveSM(disk_cache::BackendImpl* cache, HANDLE channel)
+ : BaseSM(cache, channel), iterator_(NULL) {}
+ virtual ~SlaveSM();
+
+ bool DoInit();
+ virtual void OnIOCompleted(MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered, DWORD error);
+
+ private:
+ enum {
+ SLAVE_INITIAL = 0,
+ SLAVE_WAITING,
+ SLAVE_END
+ };
+
+ void DoGetNextEntry();
+ void DoGetPrevEntry();
+ int32 GetEntryFromList();
+ void DoCloseEntry();
+ void DoGetKey();
+ void DoGetUseTimes();
+ void DoGetDataSize();
+ void DoReadData();
+ void DoEnd();
+ void Fail();
+
+ void* iterator_;
+};
+
+SlaveSM::~SlaveSM() {
+ if (iterator_)
+ cache_->EndEnumeration(&iterator_);
+}
+
+void SlaveSM::OnIOCompleted(MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered, DWORD error) {
+ pending_count_--;
+ if (state_ == SLAVE_END) {
+ if (IsPending())
+ return;
+ return DoEnd();
+ }
+
+ if (context == &out_context_) {
+ if (!error)
+ return;
+ return Fail();
+ }
+
+ int bytes_read = static_cast<int>(bytes_transfered);
+ if (bytes_read < sizeof(Message)) {
+ printf("Communication breakdown\n");
+ return Fail();
+ }
+ DCHECK(state_ == SLAVE_WAITING);
+
+ switch (input_->msg.command) {
+ case GET_NEXT_ENTRY:
+ DoGetNextEntry();
+ break;
+ case GET_PREV_ENTRY:
+ DoGetPrevEntry();
+ break;
+ case CLOSE_ENTRY:
+ DoCloseEntry();
+ break;
+ case GET_KEY:
+ DoGetKey();
+ break;
+ case GET_USE_TIMES:
+ DoGetUseTimes();
+ break;
+ case GET_DATA_SIZE:
+ DoGetDataSize();
+ break;
+ case READ_DATA:
+ DoReadData();
+ break;
+ case QUIT:
+ DoEnd();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+bool SlaveSM::DoInit() {
+ DEBUGMSG("\t\t\tSlave DoInit\n");
+ DCHECK(state_ == SLAVE_INITIAL);
+ state_ = SLAVE_WAITING;
+ return ReceiveMsg();
+}
+
+void SlaveSM::DoGetNextEntry() {
+ DEBUGMSG("\t\t\tSlave DoGetNextEntry\n");
+ Message msg;
+ msg.command = GET_NEXT_ENTRY;
+
+ if (input_->msg.arg1) {
+ // We only support one list.
+ msg.result = RESULT_UNKNOWN_COMMAND;
+ } else {
+ msg.result = GetEntryFromList();
+ msg.long_arg1 = reinterpret_cast<int64>(entry_);
+ }
+ SendMsg(msg);
+}
+
+void SlaveSM::DoGetPrevEntry() {
+ DEBUGMSG("\t\t\tSlave DoGetPrevEntry\n");
+ Message msg;
+ msg.command = GET_PREV_ENTRY;
+
+ if (input_->msg.arg1) {
+ // We only support one list.
+ msg.result = RESULT_UNKNOWN_COMMAND;
+ } else {
+ msg.result = GetEntryFromList();
+ msg.long_arg1 = reinterpret_cast<int64>(entry_);
+ }
+ SendMsg(msg);
+}
+
+// Move to the next or previous entry on the list.
+int32 SlaveSM::GetEntryFromList() {
+ DEBUGMSG("\t\t\tSlave GetEntryFromList\n");
+ if (input_->msg.long_arg1 != reinterpret_cast<int64>(entry_))
+ return RESULT_INVALID_PARAMETER;
+
+ // We know that the current iteration is valid.
+ if (entry_)
+ entry_->Close();
+
+ bool ret;
+ if (input_->msg.command == GET_NEXT_ENTRY) {
+ ret = cache_->OpenNextEntry(&iterator_,
+ reinterpret_cast<disk_cache::Entry**>(&entry_));
+ } else {
+ DCHECK(input_->msg.command == GET_PREV_ENTRY);
+ ret = cache_->OpenPrevEntry(&iterator_,
+ reinterpret_cast<disk_cache::Entry**>(&entry_));
+ }
+
+ if (!ret)
+ entry_ = NULL;
+
+ if (!entry_)
+ DEBUGMSG("\t\t\tSlave end of list\n");
+
+ return RESULT_OK;
+}
+
+void SlaveSM::DoCloseEntry() {
+ DEBUGMSG("\t\t\tSlave DoCloseEntry\n");
+ Message msg;
+ msg.command = GET_KEY;
+
+ if (!entry_ || input_->msg.long_arg1 != reinterpret_cast<int64>(entry_)) {
+ msg.result = RESULT_INVALID_PARAMETER;
+ } else {
+ entry_->Close();
+ entry_ = NULL;
+ cache_->EndEnumeration(&iterator_);
+ msg.result = RESULT_OK;
+ }
+ SendMsg(msg);
+}
+
+void SlaveSM::DoGetKey() {
+ DEBUGMSG("\t\t\tSlave DoGetKey\n");
+ Message msg;
+ msg.command = GET_KEY;
+
+ if (!entry_ || input_->msg.long_arg1 != reinterpret_cast<int64>(entry_)) {
+ msg.result = RESULT_INVALID_PARAMETER;
+ } else {
+ std::string key = entry_->GetKey();
+ msg.buffer_bytes = std::min(key.size() + 1,
+ static_cast<size_t>(kBufferSize));
+ memcpy(output_->buffer, key.c_str(), msg.buffer_bytes);
+ if (msg.buffer_bytes != key.size() + 1) {
+ // We don't support moving this entry. Just tell the master.
+ msg.result = RESULT_NAME_OVERFLOW;
+ } else {
+ msg.result = RESULT_OK;
+ }
+ }
+ SendMsg(msg);
+}
+
+void SlaveSM::DoGetUseTimes() {
+ DEBUGMSG("\t\t\tSlave DoGetUseTimes\n");
+ Message msg;
+ msg.command = GET_USE_TIMES;
+
+ if (!entry_ || input_->msg.long_arg1 != reinterpret_cast<int64>(entry_)) {
+ msg.result = RESULT_INVALID_PARAMETER;
+ } else {
+ msg.long_arg2 = entry_->GetLastUsed().ToInternalValue();
+ msg.long_arg3 = entry_->GetLastModified().ToInternalValue();
+ msg.result = RESULT_OK;
+ }
+ SendMsg(msg);
+}
+
+void SlaveSM::DoGetDataSize() {
+ DEBUGMSG("\t\t\tSlave DoGetDataSize\n");
+ Message msg;
+ msg.command = GET_DATA_SIZE;
+
+ int stream = input_->msg.arg1;
+ if (!entry_ || input_->msg.long_arg1 != reinterpret_cast<int64>(entry_) ||
+ stream < 0 || stream >= kNumStreams) {
+ msg.result = RESULT_INVALID_PARAMETER;
+ } else {
+ msg.arg1 = stream;
+ msg.arg2 = entry_->GetDataSize(stream);
+ msg.result = RESULT_OK;
+ }
+ SendMsg(msg);
+}
+
+void SlaveSM::DoReadData() {
+ DEBUGMSG("\t\t\tSlave DoReadData\n");
+ Message msg;
+ msg.command = READ_DATA;
+
+ int stream = input_->msg.arg1;
+ int size = input_->msg.arg2;
+ if (!entry_ || input_->msg.long_arg1 != reinterpret_cast<int64>(entry_) ||
+ stream < 0 || stream > 1 || size > kBufferSize) {
+ msg.result = RESULT_INVALID_PARAMETER;
+ } else {
+ int ret = entry_->ReadData(stream, input_->msg.arg3, output_->buffer, size,
+ NULL);
+
+ msg.buffer_bytes = (ret < 0) ? 0 : ret;
+ msg.result = RESULT_OK;
+ }
+ SendMsg(msg);
+}
+
+void SlaveSM::DoEnd() {
+ DEBUGMSG("\t\t\tSlave DoEnd\n");
+ MessageLoop::current()->PostTask(FROM_HERE, new MessageLoop::QuitTask());
+}
+
+void SlaveSM::Fail() {
+ DEBUGMSG("\t\t\tSlave Fail\n");
+ printf("Unexpected failure\n");
+ state_ = SLAVE_END;
+ if (IsPending()) {
+ CancelIo(channel_);
+ } else {
+ DoEnd();
+ }
+}
+
+} // namespace.
+
+// -----------------------------------------------------------------------
+
+HANDLE CreateServer(std::wstring* pipe_number) {
+ std::wstring pipe_name(kPipePrefix);
+ srand(static_cast<int>(base::Time::Now().ToInternalValue()));
+ *pipe_number = IntToWString(rand());
+ pipe_name.append(*pipe_number);
+
+ DWORD mode = PIPE_ACCESS_DUPLEX | FILE_FLAG_FIRST_PIPE_INSTANCE |
+ FILE_FLAG_OVERLAPPED;
+
+ return CreateNamedPipe(pipe_name.c_str(), mode, 0, 1, kChannelSize,
+ kChannelSize, 0, NULL);
+}
+
+// This is the controller process for an upgrade operation.
+int Upgrade(const std::wstring output_path, HANDLE pipe) {
+ MessageLoop loop(MessageLoop::TYPE_IO);
+ disk_cache::BackendImpl cache(output_path);
+ if (!cache.Init()) {
+ printf("Unable to initialize new files\n");
+ return -1;
+ }
+
+ MasterSM master(&cache, pipe);
+ if (!master.DoInit()) {
+ printf("Unable to talk with the helper\n");
+ return -1;
+ }
+
+ loop.Run();
+ return 0;
+}
+
+// This process will only execute commands from the controller.
+int RunSlave(const std::wstring input_path, const std::wstring pipe_number) {
+ MessageLoop loop(MessageLoop::TYPE_IO);
+
+ ScopedHandle pipe(OpenServer(pipe_number));
+ if (!pipe.IsValid()) {
+ printf("Unable to open the server pipe\n");
+ return -1;
+ }
+
+ disk_cache::BackendImpl cache(input_path);
+ if (!cache.Init()) {
+ printf("Unable to open cache files\n");
+ return -1;
+ }
+ cache.SetUpgradeMode();
+
+ SlaveSM slave(&cache, pipe);
+ if (!slave.DoInit()) {
+ printf("Unable to talk with the main process\n");
+ return -1;
+ }
+
+ loop.Run();
+ return 0;
+}