summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-15 21:03:43 +0000
committerrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-15 21:03:43 +0000
commit2235b22b88260fde392b753b5d7bb7904e5efbc6 (patch)
treef60319a8097731b4d49bc875a185d948942c1b47
parent099c6c2f4118361105354eec5b36c720a3c20a00 (diff)
downloadchromium_src-2235b22b88260fde392b753b5d7bb7904e5efbc6.zip
chromium_src-2235b22b88260fde392b753b5d7bb7904e5efbc6.tar.gz
chromium_src-2235b22b88260fde392b753b5d7bb7904e5efbc6.tar.bz2
Disk cache: Code cleanup after the last cl.
BUG=26730 TEST=none git-svn-id: svn://svn.chromium.org/chrome/trunk/src@52527 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--net/disk_cache/backend_impl.cc418
-rw-r--r--net/disk_cache/backend_impl.h40
-rw-r--r--net/disk_cache/backend_unittest.cc2
-rw-r--r--net/disk_cache/disk_cache_test_util.cc3
-rw-r--r--net/disk_cache/entry_impl.cc214
-rw-r--r--net/disk_cache/entry_impl.h3
6 files changed, 316 insertions, 364 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index 94e0b08..92a4ae3 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -335,21 +335,14 @@ int BackendImpl::CreateBackend(const FilePath& full_path, bool force,
}
int BackendImpl::SyncInit() {
- if (Init())
- return net::OK;
-
- return net::ERR_FAILED;
-}
-
-bool BackendImpl::Init() {
DCHECK(!init_);
if (init_)
- return false;
+ return net::ERR_FAILED;
bool create_files = false;
if (!InitBackingStore(&create_files)) {
ReportError(ERR_STORAGE_ERROR);
- return false;
+ return net::ERR_FAILED;
}
num_refs_ = num_pending_io_ = max_refs_ = 0;
@@ -367,20 +360,20 @@ bool BackendImpl::Init() {
if (data_->header.experiment != 0 && cache_type_ != net::DISK_CACHE) {
// No experiment for other caches.
- return false;
+ return net::ERR_FAILED;
}
if (!(user_flags_ & disk_cache::kNoRandom)) {
// The unit test controls directly what to test.
if (!InitExperiment(&data_->header.experiment))
- return false;
+ return net::ERR_FAILED;
new_eviction_ = (cache_type_ == net::DISK_CACHE);
}
if (!CheckIndex()) {
ReportError(ERR_INIT_FAILED);
- return false;
+ return net::ERR_FAILED;
}
// We don't care if the value overflows. The only thing we care about is that
@@ -399,12 +392,12 @@ bool BackendImpl::Init() {
}
if (!block_files_.Init(create_files))
- return false;
+ return net::ERR_FAILED;
// stats_ and rankings_ may end up calling back to us so we better be enabled.
disabled_ = false;
if (!stats_.Init(this, &data_->header.stats))
- return false;
+ return net::ERR_FAILED;
disabled_ = !rankings_.Init(this, new_eviction_);
eviction_.Init(this);
@@ -413,7 +406,7 @@ bool BackendImpl::Init() {
if (cache_type() == net::DISK_CACHE)
SetFieldTrialInfo(GetSizeGroup());
- return !disabled_;
+ return disabled_ ? net::ERR_FAILED : net::OK;
}
int BackendImpl::Init(CompletionCallback* callback) {
@@ -434,20 +427,6 @@ BackendImpl::~BackendImpl() {
}
}
-void BackendImpl::CleanupCache() {
- Trace("Backend Cleanup");
- if (init_) {
- if (data_)
- data_->header.crash = 0;
-
- timer_.Stop();
- File::WaitForPendingIO(&num_pending_io_);
- DCHECK(!num_refs_);
- }
- factory_.RevokeAll();
- done_.Signal();
-}
-
// ------------------------------------------------------------------------
int32 BackendImpl::GetEntryCount() const {
@@ -465,40 +444,6 @@ int32 BackendImpl::GetEntryCount() const {
return not_deleted;
}
-EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
- if (disabled_)
- return NULL;
-
- TimeTicks start = TimeTicks::Now();
- uint32 hash = Hash(key);
-
- EntryImpl* cache_entry = MatchEntry(key, hash, false);
- if (!cache_entry) {
- stats_.OnEvent(Stats::OPEN_MISS);
- return NULL;
- }
-
- if (ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
- // The entry was already evicted.
- cache_entry->Release();
- stats_.OnEvent(Stats::OPEN_MISS);
- return NULL;
- }
-
- eviction_.OnOpenEntry(cache_entry);
- entry_count_++;
-
- CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start);
- stats_.OnEvent(Stats::OPEN_HIT);
- return cache_entry;
-}
-
-int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
- DCHECK(entry);
- *entry = OpenEntryImpl(key);
- return (*entry) ? net::OK : net::ERR_FAILED;
-}
-
int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
CompletionCallback* callback) {
DCHECK(callback);
@@ -506,90 +451,6 @@ int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
return net::ERR_IO_PENDING;
}
-EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
- if (disabled_ || key.empty())
- return NULL;
-
- TimeTicks start = TimeTicks::Now();
- uint32 hash = Hash(key);
-
- scoped_refptr<EntryImpl> parent;
- Addr entry_address(data_->table[hash & mask_]);
- if (entry_address.is_initialized()) {
- // We have an entry already. It could be the one we are looking for, or just
- // a hash conflict.
- EntryImpl* old_entry = MatchEntry(key, hash, false);
- if (old_entry)
- return ResurrectEntry(old_entry);
-
- EntryImpl* parent_entry = MatchEntry(key, hash, true);
- if (!parent_entry) {
- NOTREACHED();
- return NULL;
- }
- parent.swap(&parent_entry);
- }
-
- int num_blocks;
- size_t key1_len = sizeof(EntryStore) - offsetof(EntryStore, key);
- if (key.size() < key1_len ||
- key.size() > static_cast<size_t>(kMaxInternalKeyLength))
- num_blocks = 1;
- else
- num_blocks = static_cast<int>((key.size() - key1_len) / 256 + 2);
-
- if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- Addr node_address(0);
- if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
- block_files_.DeleteBlock(entry_address, false);
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, entry_address));
- IncreaseNumRefs();
-
- if (!cache_entry->CreateEntry(node_address, key, hash)) {
- block_files_.DeleteBlock(entry_address, false);
- block_files_.DeleteBlock(node_address, false);
- LOG(ERROR) << "Create entry failed " << key.c_str();
- stats_.OnEvent(Stats::CREATE_ERROR);
- return NULL;
- }
-
- // We are not failing the operation; let's add this to the map.
- open_entries_[entry_address.value()] = cache_entry;
-
- if (parent.get())
- parent->SetNextAddress(entry_address);
-
- block_files_.GetFile(entry_address)->Store(cache_entry->entry());
- block_files_.GetFile(node_address)->Store(cache_entry->rankings());
-
- IncreaseNumEntries();
- eviction_.OnCreateEntry(cache_entry);
- entry_count_++;
- if (!parent.get())
- data_->table[hash & mask_] = entry_address.value();
-
- CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start);
- stats_.OnEvent(Stats::CREATE_HIT);
- Trace("create entry hit ");
- return cache_entry.release();
-}
-
-int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
- DCHECK(entry);
- *entry = CreateEntryImpl(key);
- return (*entry) ? net::OK : net::ERR_FAILED;
-}
-
int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
CompletionCallback* callback) {
DCHECK(callback);
@@ -598,23 +459,16 @@ int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
}
int BackendImpl::SyncDoomEntry(const std::string& key) {
- if (DoomEntry(key))
- return net::OK;
-
- return net::ERR_FAILED;
-}
-
-bool BackendImpl::DoomEntry(const std::string& key) {
if (disabled_)
- return false;
+ return net::ERR_FAILED;
EntryImpl* entry = OpenEntryImpl(key);
if (!entry)
- return false;
+ return net::ERR_FAILED;
entry->DoomImpl();
entry->Release();
- return true;
+ return net::OK;
}
int BackendImpl::DoomEntry(const std::string& key,
@@ -625,24 +479,17 @@ int BackendImpl::DoomEntry(const std::string& key,
}
int BackendImpl::SyncDoomAllEntries() {
- if (DoomAllEntries())
- return net::OK;
-
- return net::ERR_FAILED;
-}
-
-bool BackendImpl::DoomAllEntries() {
if (!num_refs_) {
PrepareForRestart();
DeleteCache(path_, false);
- return Init();
+ return SyncInit();
} else {
if (disabled_)
- return false;
+ return net::ERR_FAILED;
eviction_.TrimCache(true);
stats_.OnEvent(Stats::DOOM_CACHE);
- return true;
+ return net::OK;
}
}
@@ -654,27 +501,19 @@ int BackendImpl::DoomAllEntries(CompletionCallback* callback) {
int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
const base::Time end_time) {
- if (DoomEntriesBetween(initial_time, end_time))
- return net::OK;
-
- return net::ERR_FAILED;
-}
-
-bool BackendImpl::DoomEntriesBetween(const Time initial_time,
- const Time end_time) {
if (end_time.is_null())
- return DoomEntriesSince(initial_time);
+ return SyncDoomEntriesSince(initial_time);
DCHECK(end_time >= initial_time);
if (disabled_)
- return false;
+ return net::ERR_FAILED;
EntryImpl* node;
void* iter = NULL;
EntryImpl* next = OpenNextEntryImpl(&iter);
if (!next)
- return true;
+ return net::OK;
while (next) {
node = next;
@@ -693,7 +532,7 @@ bool BackendImpl::DoomEntriesBetween(const Time initial_time,
node->Release();
}
- return true;
+ return net::OK;
}
int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
@@ -704,29 +543,22 @@ int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
return net::ERR_IO_PENDING;
}
-int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
- if (DoomEntriesSince(initial_time))
- return net::OK;
-
- return net::ERR_FAILED;
-}
-
// We use OpenNextEntryImpl to retrieve elements from the cache, until we get
// entries that are too old.
-bool BackendImpl::DoomEntriesSince(const Time initial_time) {
+int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
if (disabled_)
- return false;
+ return net::ERR_FAILED;
for (;;) {
void* iter = NULL;
EntryImpl* entry = OpenNextEntryImpl(&iter);
if (!entry)
- return true;
+ return net::OK;
if (initial_time > entry->GetLastUsed()) {
entry->Release();
SyncEndEnumeration(iter);
- return true;
+ return net::OK;
}
entry->DoomImpl();
@@ -742,15 +574,6 @@ int BackendImpl::DoomEntriesSince(const base::Time initial_time,
return net::ERR_IO_PENDING;
}
-int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
- *next_entry = OpenNextEntryImpl(iter);
- return (*next_entry) ? net::OK : net::ERR_FAILED;
-}
-
-EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
- return OpenFollowingEntry(true, iter);
-}
-
int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
CompletionCallback* callback) {
DCHECK(callback);
@@ -758,11 +581,6 @@ int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
return net::ERR_IO_PENDING;
}
-void BackendImpl::SyncEndEnumeration(void* iter) {
- scoped_ptr<Rankings::Iterator> iterator(
- reinterpret_cast<Rankings::Iterator*>(iter));
-}
-
void BackendImpl::EndEnumeration(void** iter) {
background_queue_.EndEnumeration(*iter);
*iter = NULL;
@@ -795,6 +613,170 @@ void BackendImpl::GetStats(StatsItems* stats) {
// ------------------------------------------------------------------------
+void BackendImpl::CleanupCache() {
+ Trace("Backend Cleanup");
+ if (init_) {
+ if (data_)
+ data_->header.crash = 0;
+
+ timer_.Stop();
+ File::WaitForPendingIO(&num_pending_io_);
+ DCHECK(!num_refs_);
+ }
+ factory_.RevokeAll();
+ done_.Signal();
+}
+
+// ------------------------------------------------------------------------
+
+int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
+ CompletionCallback* callback) {
+ DCHECK(callback);
+ background_queue_.OpenPrevEntry(iter, prev_entry, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
+ DCHECK(entry);
+ *entry = OpenEntryImpl(key);
+ return (*entry) ? net::OK : net::ERR_FAILED;
+}
+
+int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
+ DCHECK(entry);
+ *entry = CreateEntryImpl(key);
+ return (*entry) ? net::OK : net::ERR_FAILED;
+}
+
+int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
+ *next_entry = OpenNextEntryImpl(iter);
+ return (*next_entry) ? net::OK : net::ERR_FAILED;
+}
+
+int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
+ *prev_entry = OpenPrevEntryImpl(iter);
+ return (*prev_entry) ? net::OK : net::ERR_FAILED;
+}
+
+void BackendImpl::SyncEndEnumeration(void* iter) {
+ scoped_ptr<Rankings::Iterator> iterator(
+ reinterpret_cast<Rankings::Iterator*>(iter));
+}
+
+EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
+ if (disabled_)
+ return NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ uint32 hash = Hash(key);
+
+ EntryImpl* cache_entry = MatchEntry(key, hash, false);
+ if (!cache_entry) {
+ stats_.OnEvent(Stats::OPEN_MISS);
+ return NULL;
+ }
+
+ if (ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
+ // The entry was already evicted.
+ cache_entry->Release();
+ stats_.OnEvent(Stats::OPEN_MISS);
+ return NULL;
+ }
+
+ eviction_.OnOpenEntry(cache_entry);
+ entry_count_++;
+
+ CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start);
+ stats_.OnEvent(Stats::OPEN_HIT);
+ return cache_entry;
+}
+
+EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
+ if (disabled_ || key.empty())
+ return NULL;
+
+ TimeTicks start = TimeTicks::Now();
+ uint32 hash = Hash(key);
+
+ scoped_refptr<EntryImpl> parent;
+ Addr entry_address(data_->table[hash & mask_]);
+ if (entry_address.is_initialized()) {
+ // We have an entry already. It could be the one we are looking for, or just
+ // a hash conflict.
+ EntryImpl* old_entry = MatchEntry(key, hash, false);
+ if (old_entry)
+ return ResurrectEntry(old_entry);
+
+ EntryImpl* parent_entry = MatchEntry(key, hash, true);
+ if (!parent_entry) {
+ NOTREACHED();
+ return NULL;
+ }
+ parent.swap(&parent_entry);
+ }
+
+ int num_blocks;
+ size_t key1_len = sizeof(EntryStore) - offsetof(EntryStore, key);
+ if (key.size() < key1_len ||
+ key.size() > static_cast<size_t>(kMaxInternalKeyLength))
+ num_blocks = 1;
+ else
+ num_blocks = static_cast<int>((key.size() - key1_len) / 256 + 2);
+
+ if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ Addr node_address(0);
+ if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
+ block_files_.DeleteBlock(entry_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, entry_address));
+ IncreaseNumRefs();
+
+ if (!cache_entry->CreateEntry(node_address, key, hash)) {
+ block_files_.DeleteBlock(entry_address, false);
+ block_files_.DeleteBlock(node_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return NULL;
+ }
+
+ // We are not failing the operation; let's add this to the map.
+ open_entries_[entry_address.value()] = cache_entry;
+
+ if (parent.get())
+ parent->SetNextAddress(entry_address);
+
+ block_files_.GetFile(entry_address)->Store(cache_entry->entry());
+ block_files_.GetFile(node_address)->Store(cache_entry->rankings());
+
+ IncreaseNumEntries();
+ eviction_.OnCreateEntry(cache_entry);
+ entry_count_++;
+ if (!parent.get())
+ data_->table[hash & mask_] = entry_address.value();
+
+ CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start);
+ stats_.OnEvent(Stats::CREATE_HIT);
+ Trace("create entry hit ");
+ return cache_entry.release();
+}
+
+EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
+ return OpenFollowingEntry(true, iter);
+}
+
+EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
+ return OpenFollowingEntry(false, iter);
+}
+
bool BackendImpl::SetMaxSize(int max_bytes) {
COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
if (max_bytes < 0)
@@ -1082,7 +1064,7 @@ void BackendImpl::CriticalError(int error) {
void BackendImpl::ReportError(int error) {
// We transmit positive numbers, instead of direct error codes.
- DCHECK(error <= 0);
+ DCHECK_LE(error, 0);
CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
}
@@ -1194,22 +1176,6 @@ int BackendImpl::SelfCheck() {
return CheckAllEntries();
}
-int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
- *prev_entry = OpenPrevEntryImpl(iter);
- return (*prev_entry) ? net::OK : net::ERR_FAILED;
-}
-
-int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
- CompletionCallback* callback) {
- DCHECK(callback);
- background_queue_.OpenPrevEntry(iter, prev_entry, callback);
- return net::ERR_IO_PENDING;
-}
-
-EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
- return OpenFollowingEntry(false, iter);
-}
-
// ------------------------------------------------------------------------
// We just created a new file so we're going to write the header and set the
@@ -1519,7 +1485,7 @@ EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
if (entries[i].get()) {
access_times[i] = entries[i]->GetLastUsed();
if (newest < 0) {
- DCHECK(oldest < 0);
+ DCHECK_LT(oldest, 0);
newest = oldest = i;
continue;
}
@@ -1648,7 +1614,7 @@ void BackendImpl::DestroyInvalidEntryFromEnumeration(EntryImpl* entry) {
DestroyInvalidEntry(entry);
entry->Release();
}
- DoomEntry(key);
+ SyncDoomEntry(key);
if (!next_entry)
return;
@@ -1669,7 +1635,7 @@ void BackendImpl::DestroyInvalidEntryFromEnumeration(EntryImpl* entry) {
void BackendImpl::AddStorageSize(int32 bytes) {
data_->header.num_bytes += bytes;
- DCHECK(data_->header.num_bytes >= 0);
+ DCHECK_GE(data_->header.num_bytes, 0);
if (data_->header.num_bytes > max_size_)
eviction_.TrimCache(false);
@@ -1677,7 +1643,7 @@ void BackendImpl::AddStorageSize(int32 bytes) {
void BackendImpl::SubstractStorageSize(int32 bytes) {
data_->header.num_bytes -= bytes;
- DCHECK(data_->header.num_bytes >= 0);
+ DCHECK_GE(data_->header.num_bytes, 0);
}
void BackendImpl::IncreaseNumRefs() {
@@ -1697,7 +1663,7 @@ void BackendImpl::DecreaseNumRefs() {
void BackendImpl::IncreaseNumEntries() {
data_->header.num_entries++;
- DCHECK(data_->header.num_entries > 0);
+ DCHECK_GT(data_->header.num_entries, 0);
}
void BackendImpl::DecreaseNumEntries() {
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
index e15e429..98fc16d 100644
--- a/net/disk_cache/backend_impl.h
+++ b/net/disk_cache/backend_impl.h
@@ -62,12 +62,7 @@ class BackendImpl : public Backend {
Backend** backend, CompletionCallback* callback);
// Performs general initialization for this current instance of the cache.
- bool Init(); // Deprecated.
int Init(CompletionCallback* callback);
- int SyncInit();
-
- // Performs final cleanup on destruction.
- void CleanupCache();
// Backend interface.
virtual int32 GetEntryCount() const;
@@ -87,6 +82,14 @@ class BackendImpl : public Backend {
virtual void EndEnumeration(void** iter);
virtual void GetStats(StatsItems* stats);
+ // Performs the actual initialization and final cleanup on destruction.
+ int SyncInit();
+ void CleanupCache();
+
+ // Same bahavior as OpenNextEntry but walks the list from back to front.
+ int OpenPrevEntry(void** iter, Entry** prev_entry,
+ CompletionCallback* callback);
+
// Synchronous implementation of the asynchronous interface.
int SyncOpenEntry(const std::string& key, Entry** entry);
int SyncCreateEntry(const std::string& key, Entry** entry);
@@ -96,8 +99,15 @@ class BackendImpl : public Backend {
const base::Time end_time);
int SyncDoomEntriesSince(const base::Time initial_time);
int SyncOpenNextEntry(void** iter, Entry** next_entry);
+ int SyncOpenPrevEntry(void** iter, Entry** prev_entry);
void SyncEndEnumeration(void* iter);
+ // Open or create an entry for the given |key| or |iter|.
+ EntryImpl* OpenEntryImpl(const std::string& key);
+ EntryImpl* CreateEntryImpl(const std::string& key);
+ EntryImpl* OpenNextEntryImpl(void** iter);
+ EntryImpl* OpenPrevEntryImpl(void** iter);
+
// Sets the maximum size for the total amount of data stored by this instance.
bool SetMaxSize(int max_bytes);
@@ -227,26 +237,6 @@ class BackendImpl : public Backend {
// or an error code (negative value).
int SelfCheck();
- // Same bahavior as OpenNextEntry but walks the list from back to front.
- int OpenPrevEntry(void** iter, Entry** prev_entry,
- CompletionCallback* callback);
- int SyncOpenPrevEntry(void** iter, Entry** prev_entry);
-
- // Old Backend interface.
- bool OpenEntry(const std::string& key, Entry** entry);
- bool CreateEntry(const std::string& key, Entry** entry);
- bool DoomEntry(const std::string& key);
- bool DoomAllEntries();
- bool DoomEntriesBetween(const base::Time initial_time,
- const base::Time end_time);
- bool DoomEntriesSince(const base::Time initial_time);
-
- // Open or create an entry for the given |key| or |iter|.
- EntryImpl* OpenEntryImpl(const std::string& key);
- EntryImpl* CreateEntryImpl(const std::string& key);
- EntryImpl* OpenNextEntryImpl(void** iter);
- EntryImpl* OpenPrevEntryImpl(void** iter);
-
private:
typedef base::hash_map<CacheAddr, EntryImpl*> EntriesMap;
diff --git a/net/disk_cache/backend_unittest.cc b/net/disk_cache/backend_unittest.cc
index 58d8da9..7bb27d2 100644
--- a/net/disk_cache/backend_unittest.cc
+++ b/net/disk_cache/backend_unittest.cc
@@ -1557,7 +1557,7 @@ TEST_F(DiskCacheTest, Backend_UsageStats) {
path, base::MessageLoopProxy::CreateForCurrentThread()));
ASSERT_TRUE(NULL != cache.get());
cache->SetUnitTestMode();
- ASSERT_TRUE(cache->Init());
+ ASSERT_EQ(net::OK, cache->SyncInit());
// Wait for a callback that never comes... about 2 secs :). The message loop
// has to run to allow invocation of the usage timer.
diff --git a/net/disk_cache/disk_cache_test_util.cc b/net/disk_cache/disk_cache_test_util.cc
index e480ef2..46e33db 100644
--- a/net/disk_cache/disk_cache_test_util.cc
+++ b/net/disk_cache/disk_cache_test_util.cc
@@ -8,6 +8,7 @@
#include "base/file_util.h"
#include "base/message_loop_proxy.h"
#include "base/path_service.h"
+#include "net/base/net_errors.h"
#include "net/disk_cache/backend_impl.h"
#include "net/disk_cache/cache_util.h"
#include "net/disk_cache/file.h"
@@ -99,7 +100,7 @@ bool CheckCacheIntegrity(const FilePath& path, bool new_eviction) {
if (new_eviction)
cache->SetNewEviction();
cache->SetFlags(disk_cache::kNoRandom);
- if (!cache->Init())
+ if (cache->SyncInit() != net::OK)
return false;
return cache->SelfCheck() >= 0;
}
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 549cea2..c0e7270 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -67,8 +67,8 @@ void SyncCallback::Discard() {
// Clears buffer before offset and after valid_len, knowing that the size of
// buffer is kMaxBlockSize.
void ClearInvalidData(char* buffer, int offset, int valid_len) {
- DCHECK(offset >= 0);
- DCHECK(valid_len >= 0);
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(valid_len, 0);
DCHECK(disk_cache::kMaxBlockSize >= offset + valid_len);
if (offset)
memset(buffer, 0, offset);
@@ -130,14 +130,6 @@ EntryImpl::~EntryImpl() {
backend_->CacheEntryDestroyed(entry_.address());
}
-void EntryImpl::DoomImpl() {
- if (doomed_)
- return;
-
- SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
- backend_->InternalDoomEntry(this);
-}
-
void EntryImpl::Doom() {
backend_->background_queue()->DoomEntryImpl(this);
}
@@ -191,6 +183,110 @@ int32 EntryImpl::GetDataSize(int index) const {
return entry->Data()->data_size[index];
}
+int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* callback) {
+ if (!callback)
+ return ReadDataImpl(index, offset, buf, buf_len, callback);
+
+ DCHECK(node_.Data()->dirty);
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = entry_.Data()->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ backend_->background_queue()->ReadData(this, index, offset, buf, buf_len,
+ callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ CompletionCallback* callback, bool truncate) {
+ if (!callback)
+ return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
+
+ DCHECK(node_.Data()->dirty);
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ backend_->background_queue()->WriteData(this, index, offset, buf, buf_len,
+ truncate, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* callback) {
+ if (!callback)
+ return ReadSparseDataImpl(offset, buf, buf_len, callback);
+
+ backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len,
+ callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* callback) {
+ if (!callback)
+ return WriteSparseDataImpl(offset, buf, buf_len, callback);
+
+ backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len,
+ callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ return sparse_->GetAvailableRange(offset, len, start);
+}
+
+int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ CompletionCallback* callback) {
+ backend_->background_queue()->GetAvailableRange(this, offset, len, start,
+ callback);
+ return net::ERR_IO_PENDING;
+}
+
+bool EntryImpl::CouldBeSparse() const {
+ if (sparse_.get())
+ return true;
+
+ scoped_ptr<SparseControl> sparse;
+ sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
+ return sparse->CouldBeSparse();
+}
+
+void EntryImpl::CancelSparseIO() {
+ backend_->background_queue()->CancelSparseIO(this);
+}
+
+int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) {
+ if (!sparse_.get())
+ return net::OK;
+
+ backend_->background_queue()->ReadyForSparseIO(this, callback);
+ return net::ERR_IO_PENDING;
+}
+
+// ------------------------------------------------------------------------
+
+void EntryImpl::DoomImpl() {
+ if (doomed_)
+ return;
+
+ SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
+ backend_->InternalDoomEntry(this);
+}
+
int EntryImpl::ReadDataImpl(int index, int offset, net::IOBuffer* buf,
int buf_len, CompletionCallback* callback) {
DCHECK(node_.Data()->dirty);
@@ -254,27 +350,6 @@ int EntryImpl::ReadDataImpl(int index, int offset, net::IOBuffer* buf,
return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
}
-int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
- net::CompletionCallback* callback) {
- if (!callback)
- return ReadDataImpl(index, offset, buf, buf_len, callback);
-
- DCHECK(node_.Data()->dirty);
- if (index < 0 || index >= kNumStreams)
- return net::ERR_INVALID_ARGUMENT;
-
- int entry_size = entry_.Data()->data_size[index];
- if (offset >= entry_size || offset < 0 || !buf_len)
- return 0;
-
- if (buf_len < 0)
- return net::ERR_INVALID_ARGUMENT;
-
- backend_->background_queue()->ReadData(this, index, offset, buf, buf_len,
- callback);
- return net::ERR_IO_PENDING;
-}
-
int EntryImpl::WriteDataImpl(int index, int offset, net::IOBuffer* buf,
int buf_len, CompletionCallback* callback,
bool truncate) {
@@ -376,23 +451,6 @@ int EntryImpl::WriteDataImpl(int index, int offset, net::IOBuffer* buf,
return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
}
-int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
- CompletionCallback* callback, bool truncate) {
- if (!callback)
- return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
-
- DCHECK(node_.Data()->dirty);
- if (index < 0 || index >= kNumStreams)
- return net::ERR_INVALID_ARGUMENT;
-
- if (offset < 0 || buf_len < 0)
- return net::ERR_INVALID_ARGUMENT;
-
- backend_->background_queue()->WriteData(this, index, offset, buf, buf_len,
- truncate, callback);
- return net::ERR_IO_PENDING;
-}
-
int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len,
CompletionCallback* callback) {
DCHECK(node_.Data()->dirty);
@@ -407,16 +465,6 @@ int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len,
return result;
}
-int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
- net::CompletionCallback* callback) {
- if (!callback)
- return ReadSparseDataImpl(offset, buf, buf_len, callback);
-
- backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len,
- callback);
- return net::ERR_IO_PENDING;
-}
-
int EntryImpl::WriteSparseDataImpl(int64 offset, net::IOBuffer* buf,
int buf_len, CompletionCallback* callback) {
DCHECK(node_.Data()->dirty);
@@ -431,48 +479,6 @@ int EntryImpl::WriteSparseDataImpl(int64 offset, net::IOBuffer* buf,
return result;
}
-int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
- net::CompletionCallback* callback) {
- if (!callback)
- return WriteSparseDataImpl(offset, buf, buf_len, callback);
-
- backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len,
- callback);
- return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
- return GetAvailableRange(offset, len, start);
-}
-
-int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
- int result = InitSparseData();
- if (net::OK != result)
- return result;
-
- return sparse_->GetAvailableRange(offset, len, start);
-}
-
-int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
- CompletionCallback* callback) {
- backend_->background_queue()->GetAvailableRange(this, offset, len, start,
- callback);
- return net::ERR_IO_PENDING;
-}
-
-bool EntryImpl::CouldBeSparse() const {
- if (sparse_.get())
- return true;
-
- scoped_ptr<SparseControl> sparse;
- sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
- return sparse->CouldBeSparse();
-}
-
-void EntryImpl::CancelSparseIO() {
- backend_->background_queue()->CancelSparseIO(this);
-}
-
void EntryImpl::CancelSparseIOImpl() {
if (!sparse_.get())
return;
@@ -485,14 +491,6 @@ int EntryImpl::ReadyForSparseIOImpl(CompletionCallback* callback) {
return sparse_->ReadyToUse(callback);
}
-int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) {
- if (!sparse_.get())
- return net::OK;
-
- backend_->background_queue()->ReadyForSparseIO(this, callback);
- return net::ERR_IO_PENDING;
-}
-
// ------------------------------------------------------------------------
uint32 EntryImpl::GetHash() {
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
index 1caece2..9d37fff3a 100644
--- a/net/disk_cache/entry_impl.h
+++ b/net/disk_cache/entry_impl.h
@@ -135,9 +135,6 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
~EntryImpl();
- // Old Entry interface.
- int GetAvailableRange(int64 offset, int len, int64* start);
-
// Initializes the storage for an internal or external data block.
bool CreateDataBlock(int index, int size);