diff options
author | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-03-23 01:19:55 +0000 |
---|---|---|
committer | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-03-23 01:19:55 +0000 |
commit | c67287e61638d3cf598cb68c29133558080f1270 (patch) | |
tree | 40f3426ad01efbf096773d523ebec652d96d4316 /net/disk_cache | |
parent | a438488ef232fdb3117ada69d8458f1506643633 (diff) | |
download | chromium_src-c67287e61638d3cf598cb68c29133558080f1270.zip chromium_src-c67287e61638d3cf598cb68c29133558080f1270.tar.gz chromium_src-c67287e61638d3cf598cb68c29133558080f1270.tar.bz2 |
Disk cache: Fix style and extra net:: specification.
No real code change.
BUG=none
TEST=none
Review URL: https://chromiumcodereview.appspot.com/9812031
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@128364 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r-- | net/disk_cache/backend_impl.cc | 30 | ||||
-rw-r--r-- | net/disk_cache/backend_impl.h | 32 | ||||
-rw-r--r-- | net/disk_cache/disk_cache.h | 42 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.cc | 64 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.h | 53 | ||||
-rw-r--r-- | net/disk_cache/mem_entry_impl.cc | 37 | ||||
-rw-r--r-- | net/disk_cache/mem_entry_impl.h | 39 | ||||
-rw-r--r-- | net/disk_cache/sparse_control.cc | 39 | ||||
-rw-r--r-- | net/disk_cache/sparse_control.h | 12 |
9 files changed, 168 insertions, 180 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc index 7f36741..aa232c3 100644 --- a/net/disk_cache/backend_impl.cc +++ b/net/disk_cache/backend_impl.cc @@ -386,7 +386,7 @@ int BackendImpl::CreateBackend(const FilePath& full_path, bool force, int max_bytes, net::CacheType type, uint32 flags, base::MessageLoopProxy* thread, net::NetLog* net_log, Backend** backend, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { DCHECK(!callback.is_null()); CacheCreator* creator = new CacheCreator(full_path, force, max_bytes, type, flags, thread, @@ -395,7 +395,7 @@ int BackendImpl::CreateBackend(const FilePath& full_path, bool force, return creator->Run(); } -int BackendImpl::Init(const net::CompletionCallback& callback) { +int BackendImpl::Init(const CompletionCallback& callback) { background_queue_.Init(callback); return net::ERR_IO_PENDING; } @@ -436,7 +436,7 @@ int BackendImpl::SyncInit() { return net::ERR_FAILED; } - if (!(user_flags_ & disk_cache::kNoRandom)) { + if (!(user_flags_ & kNoRandom)) { // The unit test controls directly what to test. new_eviction_ = (cache_type_ == net::DISK_CACHE); } @@ -446,7 +446,7 @@ int BackendImpl::SyncInit() { return net::ERR_FAILED; } - if (!(user_flags_ & disk_cache::kNoRandom) && + if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE && !InitExperiment(&data_->header)) return net::ERR_FAILED; @@ -521,7 +521,7 @@ void BackendImpl::CleanupCache() { // ------------------------------------------------------------------------ int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { DCHECK(!callback.is_null()); background_queue_.OpenPrevEntry(iter, prev_entry, callback); return net::ERR_IO_PENDING; @@ -998,7 +998,7 @@ void BackendImpl::OnEntryDestroyBegin(Addr address) { void BackendImpl::OnEntryDestroyEnd() { DecreaseNumRefs(); if (data_->header.num_bytes > max_size_ && !read_only_ && - (up_ticks_ > kTrimDelay || user_flags_ & disk_cache::kNoRandom)) + (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom)) eviction_.TrimCache(false); } @@ -1258,13 +1258,13 @@ void BackendImpl::ClearRefCountForTest() { num_refs_ = 0; } -int BackendImpl::FlushQueueForTest(const net::CompletionCallback& callback) { +int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) { background_queue_.FlushQueue(callback); return net::ERR_IO_PENDING; } int BackendImpl::RunTaskForTest(const base::Closure& task, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { background_queue_.RunTask(task, callback); return net::ERR_IO_PENDING; } @@ -1321,27 +1321,27 @@ int32 BackendImpl::GetEntryCount() const { } int BackendImpl::OpenEntry(const std::string& key, Entry** entry, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { DCHECK(!callback.is_null()); background_queue_.OpenEntry(key, entry, callback); return net::ERR_IO_PENDING; } int BackendImpl::CreateEntry(const std::string& key, Entry** entry, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { DCHECK(!callback.is_null()); background_queue_.CreateEntry(key, entry, callback); return net::ERR_IO_PENDING; } int BackendImpl::DoomEntry(const std::string& key, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { DCHECK(!callback.is_null()); background_queue_.DoomEntry(key, callback); return net::ERR_IO_PENDING; } -int BackendImpl::DoomAllEntries(const net::CompletionCallback& callback) { +int BackendImpl::DoomAllEntries(const CompletionCallback& callback) { DCHECK(!callback.is_null()); background_queue_.DoomAllEntries(callback); return net::ERR_IO_PENDING; @@ -1349,21 +1349,21 @@ int BackendImpl::DoomAllEntries(const net::CompletionCallback& callback) { int BackendImpl::DoomEntriesBetween(const base::Time initial_time, const base::Time end_time, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { DCHECK(!callback.is_null()); background_queue_.DoomEntriesBetween(initial_time, end_time, callback); return net::ERR_IO_PENDING; } int BackendImpl::DoomEntriesSince(const base::Time initial_time, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { DCHECK(!callback.is_null()); background_queue_.DoomEntriesSince(initial_time, callback); return net::ERR_IO_PENDING; } int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { DCHECK(!callback.is_null()); background_queue_.OpenNextEntry(iter, next_entry, callback); return net::ERR_IO_PENDING; diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h index cc822d5..e2fb6ef 100644 --- a/net/disk_cache/backend_impl.h +++ b/net/disk_cache/backend_impl.h @@ -56,10 +56,10 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend { int max_bytes, net::CacheType type, uint32 flags, base::MessageLoopProxy* thread, net::NetLog* net_log, Backend** backend, - const net::CompletionCallback& callback); + const CompletionCallback& callback); // Performs general initialization for this current instance of the cache. - int Init(const net::CompletionCallback& callback); + int Init(const CompletionCallback& callback); // Performs the actual initialization and final cleanup on destruction. int SyncInit(); @@ -67,7 +67,7 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend { // Same behavior as OpenNextEntry but walks the list from back to front. int OpenPrevEntry(void** iter, Entry** prev_entry, - const net::CompletionCallback& callback); + const CompletionCallback& callback); // Synchronous implementation of the asynchronous interface. int SyncOpenEntry(const std::string& key, Entry** entry); @@ -237,12 +237,12 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend { void ClearRefCountForTest(); // Sends a dummy operation through the operation queue, for unit tests. - int FlushQueueForTest(const net::CompletionCallback& callback); + int FlushQueueForTest(const CompletionCallback& callback); // Runs the provided task on the cache thread. The task will be automatically // deleted after it runs. int RunTaskForTest(const base::Closure& task, - const net::CompletionCallback& callback); + const CompletionCallback& callback); // Trims an entry (all if |empty| is true) from the list of deleted // entries. This method should be called directly on the cache thread. @@ -259,21 +259,19 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend { // Backend implementation. virtual int32 GetEntryCount() const OVERRIDE; virtual int OpenEntry(const std::string& key, Entry** entry, - const net::CompletionCallback& callback) OVERRIDE; + const CompletionCallback& callback) OVERRIDE; virtual int CreateEntry(const std::string& key, Entry** entry, - const net::CompletionCallback& callback) OVERRIDE; + const CompletionCallback& callback) OVERRIDE; virtual int DoomEntry(const std::string& key, - const net::CompletionCallback& callback) OVERRIDE; - virtual int DoomAllEntries(const net::CompletionCallback& callback) OVERRIDE; - virtual int DoomEntriesBetween( - const base::Time initial_time, - const base::Time end_time, - const net::CompletionCallback& callback) OVERRIDE; - virtual int DoomEntriesSince( - const base::Time initial_time, - const net::CompletionCallback& callback) OVERRIDE; + const CompletionCallback& callback) OVERRIDE; + virtual int DoomAllEntries(const CompletionCallback& callback) OVERRIDE; + virtual int DoomEntriesBetween(const base::Time initial_time, + const base::Time end_time, + const CompletionCallback& callback) OVERRIDE; + virtual int DoomEntriesSince(const base::Time initial_time, + const CompletionCallback& callback) OVERRIDE; virtual int OpenNextEntry(void** iter, Entry** next_entry, - const net::CompletionCallback& callback) OVERRIDE; + const CompletionCallback& callback) OVERRIDE; virtual void EndEnumeration(void** iter) OVERRIDE; virtual void GetStats(StatsItems* stats) OVERRIDE; virtual void OnExternalCacheHit(const std::string& key) OVERRIDE; diff --git a/net/disk_cache/disk_cache.h b/net/disk_cache/disk_cache.h index 4d928e6..f2fbcdd 100644 --- a/net/disk_cache/disk_cache.h +++ b/net/disk_cache/disk_cache.h @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -58,6 +58,8 @@ NET_EXPORT int CreateCacheBackend(net::CacheType type, const FilePath& path, // The root interface for a disk cache instance. class NET_EXPORT Backend { public: + typedef net::CompletionCallback CompletionCallback; + // If the backend is destroyed when there are operations in progress (any // callback that has not been invoked yet), this method cancels said // operations so the callbacks are not invoked, possibly leaving the work @@ -76,7 +78,7 @@ class NET_EXPORT Backend { // will be invoked when the entry is available. The pointer to receive the // |entry| must remain valid until the operation completes. virtual int OpenEntry(const std::string& key, Entry** entry, - const net::CompletionCallback& callback) = 0; + const CompletionCallback& callback) = 0; // Creates a new entry. Upon success, the out param holds a pointer to an // Entry object representing the newly created disk cache entry. When the @@ -85,18 +87,18 @@ class NET_EXPORT Backend { // the |callback| will be invoked when the entry is available. The pointer to // receive the |entry| must remain valid until the operation completes. virtual int CreateEntry(const std::string& key, Entry** entry, - const net::CompletionCallback& callback) = 0; + const CompletionCallback& callback) = 0; // Marks the entry, specified by the given key, for deletion. The return value // is a net error code. If this method returns ERR_IO_PENDING, the |callback| // will be invoked after the entry is doomed. virtual int DoomEntry(const std::string& key, - const net::CompletionCallback& callback) = 0; + const CompletionCallback& callback) = 0; // Marks all entries for deletion. The return value is a net error code. If // this method returns ERR_IO_PENDING, the |callback| will be invoked when the // operation completes. - virtual int DoomAllEntries(const net::CompletionCallback& callback) = 0; + virtual int DoomAllEntries(const CompletionCallback& callback) = 0; // Marks a range of entries for deletion. This supports unbounded deletes in // either direction by using null Time values for either argument. The return @@ -104,13 +106,13 @@ class NET_EXPORT Backend { // |callback| will be invoked when the operation completes. virtual int DoomEntriesBetween(const base::Time initial_time, const base::Time end_time, - const net::CompletionCallback& callback) = 0; + const CompletionCallback& callback) = 0; // Marks all entries accessed since |initial_time| for deletion. The return // value is a net error code. If this method returns ERR_IO_PENDING, the // |callback| will be invoked when the operation completes. virtual int DoomEntriesSince(const base::Time initial_time, - const net::CompletionCallback& callback) = 0; + const CompletionCallback& callback) = 0; // Enumerates the cache. Initialize |iter| to NULL before calling this method // the first time. That will cause the enumeration to start at the head of @@ -125,7 +127,7 @@ class NET_EXPORT Backend { // NOTE: This method does not modify the last_used field of the entry, and // therefore it does not impact the eviction ranking of the entry. virtual int OpenNextEntry(void** iter, Entry** next_entry, - const net::CompletionCallback& callback) = 0; + const CompletionCallback& callback) = 0; // Releases iter without returning the next entry. Whenever OpenNextEntry() // returns true, but the caller is not interested in continuing the @@ -145,6 +147,9 @@ class NET_EXPORT Backend { // This interface represents an entry in the disk cache. class NET_EXPORT Entry { public: + typedef net::CompletionCallback CompletionCallback; + typedef net::IOBuffer IOBuffer; + // Marks this cache entry for deletion. virtual void Doom() = 0; @@ -176,8 +181,8 @@ class NET_EXPORT Entry { // been called; in other words, the caller may close this entry without // having to wait for all the callbacks, and still rely on the cleanup // performed from the callback code. - virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) = 0; + virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) = 0; // Copies cache data from the given buffer of length |buf_len|. If // completion_callback is null, then this call blocks until the write @@ -192,8 +197,8 @@ class NET_EXPORT Entry { // performed from the callback code. // If truncate is true, this call will truncate the stored data at the end of // what we are writing here. - virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback, + virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback, bool truncate) = 0; // Sparse entries support: @@ -240,8 +245,8 @@ class NET_EXPORT Entry { // Behaves like ReadData() except that this method is used to access sparse // entries. - virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) = 0; + virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) = 0; // Behaves like WriteData() except that this method is used to access sparse // entries. |truncate| is not part of this interface because a sparse entry @@ -249,8 +254,8 @@ class NET_EXPORT Entry { // start again, or to reduce the total size of the stream data (which implies // that the content has changed), the whole entry should be doomed and // re-created. - virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) = 0; + virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) = 0; // Returns information about the currently stored portion of a sparse entry. // |offset| and |len| describe a particular range that should be scanned to @@ -262,7 +267,7 @@ class NET_EXPORT Entry { // this method returns ERR_IO_PENDING, the |callback| will be invoked when the // operation completes, and |start| must remain valid until that point. virtual int GetAvailableRange(int64 offset, int len, int64* start, - const net::CompletionCallback& callback) = 0; + const CompletionCallback& callback) = 0; // Returns true if this entry could be a sparse entry or false otherwise. This // is a quick test that may return true even if the entry is not really @@ -292,8 +297,7 @@ class NET_EXPORT Entry { // Note that CancelSparseIO may have been called on another instance of this // object that refers to the same physical disk entry. // Note: This method is deprecated. - virtual int ReadyForSparseIO( - const net::CompletionCallback& completion_callback) = 0; + virtual int ReadyForSparseIO(const CompletionCallback& callback) = 0; protected: virtual ~Entry() {} diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc index d0e5856..b7eb18c 100644 --- a/net/disk_cache/entry_impl.cc +++ b/net/disk_cache/entry_impl.cc @@ -106,7 +106,7 @@ class EntryImpl::UserBuffer { void Truncate(int offset); // Writes |len| bytes from |buf| at the given |offset|. - void Write(int offset, net::IOBuffer* buf, int len); + void Write(int offset, IOBuffer* buf, int len); // Returns true if we can read |len| bytes from |offset|, given that the // actual file has |eof| bytes stored. Note that the number of bytes to read @@ -115,7 +115,7 @@ class EntryImpl::UserBuffer { bool PreRead(int eof, int offset, int* len); // Read |len| bytes from |buf| at the given |offset|. - int Read(int offset, net::IOBuffer* buf, int len); + int Read(int offset, IOBuffer* buf, int len); // Prepare this buffer for reuse. void Reset(); @@ -168,7 +168,7 @@ void EntryImpl::UserBuffer::Truncate(int offset) { buffer_.resize(offset); } -void EntryImpl::UserBuffer::Write(int offset, net::IOBuffer* buf, int len) { +void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) { DCHECK_GE(offset, 0); DCHECK_GE(len, 0); DCHECK_GE(offset + len, 0); @@ -225,7 +225,7 @@ bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) { return (offset - offset_ < Size()); } -int EntryImpl::UserBuffer::Read(int offset, net::IOBuffer* buf, int len) { +int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) { DCHECK_GE(offset, 0); DCHECK_GT(len, 0); DCHECK(Size() || offset < offset_); @@ -309,9 +309,8 @@ void EntryImpl::DoomImpl() { backend_->InternalDoomEntry(this); } -int EntryImpl::ReadDataImpl( - int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { if (net_log_.IsLoggingAllEvents()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_READ_DATA, @@ -329,9 +328,9 @@ int EntryImpl::ReadDataImpl( return result; } -int EntryImpl::WriteDataImpl( - int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback, bool truncate) { +int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback, + bool truncate) { if (net_log_.IsLoggingAllEvents()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_WRITE_DATA, @@ -350,8 +349,8 @@ int EntryImpl::WriteDataImpl( return result; } -int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { DCHECK(node_.Data()->dirty || read_only_); int result = InitSparseData(); if (net::OK != result) @@ -364,9 +363,8 @@ int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len, return result; } -int EntryImpl::WriteSparseDataImpl( - int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { DCHECK(node_.Data()->dirty || read_only_); int result = InitSparseData(); if (net::OK != result) @@ -394,7 +392,7 @@ void EntryImpl::CancelSparseIOImpl() { sparse_->CancelIO(); } -int EntryImpl::ReadyForSparseIOImpl(const net::CompletionCallback& callback) { +int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { DCHECK(sparse_.get()); return sparse_->ReadyToUse(callback); } @@ -813,8 +811,8 @@ int32 EntryImpl::GetDataSize(int index) const { return entry->Data()->data_size[index]; } -int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { if (callback.is_null()) return ReadDataImpl(index, offset, buf, buf_len, callback); @@ -836,9 +834,8 @@ int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, return net::ERR_IO_PENDING; } -int EntryImpl::WriteData( - int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback, bool truncate) { +int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback, bool truncate) { if (callback.is_null()) return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); @@ -857,8 +854,8 @@ int EntryImpl::WriteData( return net::ERR_IO_PENDING; } -int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { if (callback.is_null()) return ReadSparseDataImpl(offset, buf, buf_len, callback); @@ -869,8 +866,8 @@ int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, return net::ERR_IO_PENDING; } -int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { if (callback.is_null()) return WriteSparseDataImpl(offset, buf, buf_len, callback); @@ -882,7 +879,7 @@ int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, } int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { if (!background_queue_) return net::ERR_UNEXPECTED; @@ -904,7 +901,7 @@ void EntryImpl::CancelSparseIO() { background_queue_->CancelSparseIO(this); } -int EntryImpl::ReadyForSparseIO(const net::CompletionCallback& callback) { +int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { if (!sparse_.get()) return net::OK; @@ -975,9 +972,9 @@ EntryImpl::~EntryImpl() { // ------------------------------------------------------------------------ -int EntryImpl::InternalReadData( - int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int EntryImpl::InternalReadData(int index, int offset, + IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { DCHECK(node_.Data()->dirty || read_only_); DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; if (index < 0 || index >= kNumStreams) @@ -1059,9 +1056,10 @@ int EntryImpl::InternalReadData( return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; } -int EntryImpl::InternalWriteData( - int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback, bool truncate) { +int EntryImpl::InternalWriteData(int index, int offset, + IOBuffer* buf, int buf_len, + const CompletionCallback& callback, + bool truncate) { DCHECK(node_.Data()->dirty || read_only_); DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; if (index < 0 || index >= kNumStreams) diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h index 8296200..466540f 100644 --- a/net/disk_cache/entry_impl.h +++ b/net/disk_cache/entry_impl.h @@ -40,17 +40,17 @@ class NET_EXPORT_PRIVATE EntryImpl // Background implementation of the Entry interface. void DoomImpl(); - int ReadDataImpl(int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback); - int WriteDataImpl(int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback, bool truncate); - int ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback); - int WriteSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback); + int ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback); + int WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback, bool truncate); + int ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback); + int WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback); int GetAvailableRangeImpl(int64 offset, int len, int64* start); void CancelSparseIOImpl(); - int ReadyForSparseIOImpl(const net::CompletionCallback& callback); + int ReadyForSparseIOImpl(const CompletionCallback& callback); inline CacheEntryBlock* entry() { return &entry_; @@ -151,25 +151,20 @@ class NET_EXPORT_PRIVATE EntryImpl virtual base::Time GetLastUsed() const OVERRIDE; virtual base::Time GetLastModified() const OVERRIDE; virtual int32 GetDataSize(int index) const OVERRIDE; - virtual int ReadData( - int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) OVERRIDE; - virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback, + virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) OVERRIDE; + virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback, bool truncate) OVERRIDE; - virtual int ReadSparseData( - int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) OVERRIDE; - virtual int WriteSparseData( - int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) OVERRIDE; - virtual int GetAvailableRange( - int64 offset, int len, int64* start, - const net::CompletionCallback& callback) OVERRIDE; + virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) OVERRIDE; + virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) OVERRIDE; + virtual int GetAvailableRange(int64 offset, int len, int64* start, + const CompletionCallback& callback) OVERRIDE; virtual bool CouldBeSparse() const OVERRIDE; virtual void CancelSparseIO() OVERRIDE; - virtual int ReadyForSparseIO( - const net::CompletionCallback& callback) OVERRIDE; + virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE; private: enum { @@ -181,10 +176,10 @@ class NET_EXPORT_PRIVATE EntryImpl // Do all the work for ReadDataImpl and WriteDataImpl. Implemented as // separate functions to make logging of results simpler. - int InternalReadData(int index, int offset, net::IOBuffer* buf, - int buf_len, const net::CompletionCallback& callback); - int InternalWriteData(int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback, bool truncate); + int InternalReadData(int index, int offset, IOBuffer* buf, + int buf_len, const CompletionCallback& callback); + int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback, bool truncate); // Initializes the storage for an internal or external data block. bool CreateDataBlock(int index, int size); diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc index de6329b..776759b 100644 --- a/net/disk_cache/mem_entry_impl.cc +++ b/net/disk_cache/mem_entry_impl.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -166,8 +166,8 @@ int32 MemEntryImpl::GetDataSize(int index) const { return data_size_[index]; } -int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, - int buf_len, const net::CompletionCallback& callback) { +int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { if (net_log_.IsLoggingAllEvents()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_READ_DATA, @@ -185,8 +185,8 @@ int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, return result; } -int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, - int buf_len, const net::CompletionCallback& callback, bool truncate) { +int MemEntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback, bool truncate) { if (net_log_.IsLoggingAllEvents()) { net_log_.BeginEvent( net::NetLog::TYPE_ENTRY_WRITE_DATA, @@ -204,8 +204,8 @@ int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, return result; } -int MemEntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int MemEntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { if (net_log_.IsLoggingAllEvents()) { net_log_.BeginEvent( net::NetLog::TYPE_SPARSE_READ, @@ -218,8 +218,8 @@ int MemEntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, return result; } -int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int MemEntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) { if (net_log_.IsLoggingAllEvents()) { net_log_.BeginEvent(net::NetLog::TYPE_SPARSE_WRITE, make_scoped_refptr( @@ -232,7 +232,7 @@ int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, } int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start, - const net::CompletionCallback& callback) { + const CompletionCallback& callback) { if (net_log_.IsLoggingAllEvents()) { net_log_.BeginEvent( net::NetLog::TYPE_SPARSE_GET_RANGE, @@ -254,7 +254,7 @@ bool MemEntryImpl::CouldBeSparse() const { return (children_.get() != NULL); } -int MemEntryImpl::ReadyForSparseIO(const net::CompletionCallback& callback) { +int MemEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { return net::OK; } @@ -267,7 +267,7 @@ MemEntryImpl::~MemEntryImpl() { net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL, NULL); } -int MemEntryImpl::InternalReadData(int index, int offset, net::IOBuffer* buf, +int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf, int buf_len) { DCHECK(type() == kParentEntry || index == kSparseData); @@ -290,7 +290,7 @@ int MemEntryImpl::InternalReadData(int index, int offset, net::IOBuffer* buf, return buf_len; } -int MemEntryImpl::InternalWriteData(int index, int offset, net::IOBuffer* buf, +int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len, bool truncate) { DCHECK(type() == kParentEntry || index == kSparseData); @@ -332,7 +332,7 @@ int MemEntryImpl::InternalWriteData(int index, int offset, net::IOBuffer* buf, return buf_len; } -int MemEntryImpl::InternalReadSparseData(int64 offset, net::IOBuffer* buf, +int MemEntryImpl::InternalReadSparseData(int64 offset, IOBuffer* buf, int buf_len) { DCHECK(type() == kParentEntry); @@ -368,9 +368,8 @@ int MemEntryImpl::InternalReadSparseData(int64 offset, net::IOBuffer* buf, child->net_log().source(), io_buf->BytesRemaining()))); } - int ret = child->ReadData( - kSparseData, child_offset, io_buf, io_buf->BytesRemaining(), - net::CompletionCallback()); + int ret = child->ReadData(kSparseData, child_offset, io_buf, + io_buf->BytesRemaining(), CompletionCallback()); if (net_log_.IsLoggingAllEvents()) { net_log_.EndEventWithNetErrorCode( net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, ret); @@ -391,7 +390,7 @@ int MemEntryImpl::InternalReadSparseData(int64 offset, net::IOBuffer* buf, return io_buf->BytesConsumed(); } -int MemEntryImpl::InternalWriteSparseData(int64 offset, net::IOBuffer* buf, +int MemEntryImpl::InternalWriteSparseData(int64 offset, IOBuffer* buf, int buf_len) { DCHECK(type() == kParentEntry); @@ -433,7 +432,7 @@ int MemEntryImpl::InternalWriteSparseData(int64 offset, net::IOBuffer* buf, // TODO(hclam): if there is data in the entry and this write is not // continuous we may want to discard this write. int ret = child->WriteData(kSparseData, child_offset, io_buf, write_len, - net::CompletionCallback(), true); + CompletionCallback(), true); if (net_log_.IsLoggingAllEvents()) { net_log_.EndEventWithNetErrorCode( net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, ret); diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h index 7e188a5..c16a0ad 100644 --- a/net/disk_cache/mem_entry_impl.h +++ b/net/disk_cache/mem_entry_impl.h @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -98,25 +98,20 @@ class MemEntryImpl : public Entry { virtual base::Time GetLastUsed() const OVERRIDE; virtual base::Time GetLastModified() const OVERRIDE; virtual int32 GetDataSize(int index) const OVERRIDE; - virtual int ReadData( - int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) OVERRIDE; - virtual int WriteData( - int index, int offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback, bool truncate) OVERRIDE; - virtual int ReadSparseData( - int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) OVERRIDE; - virtual int WriteSparseData( - int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) OVERRIDE; - virtual int GetAvailableRange( - int64 offset, int len, int64* start, - const net::CompletionCallback& callback) OVERRIDE; + virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) OVERRIDE; + virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback, + bool truncate) OVERRIDE; + virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) OVERRIDE; + virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, + const CompletionCallback& callback) OVERRIDE; + virtual int GetAvailableRange(int64 offset, int len, int64* start, + const CompletionCallback& callback) OVERRIDE; virtual bool CouldBeSparse() const OVERRIDE; virtual void CancelSparseIO() OVERRIDE {} - virtual int ReadyForSparseIO( - const net::CompletionCallback& callback) OVERRIDE; + virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE; private: typedef base::hash_map<int, MemEntryImpl*> EntryMap; @@ -129,11 +124,11 @@ class MemEntryImpl : public Entry { // Do all the work for corresponding public functions. Implemented as // separate functions to make logging of results simpler. - int InternalReadData(int index, int offset, net::IOBuffer* buf, int buf_len); - int InternalWriteData(int index, int offset, net::IOBuffer* buf, int buf_len, + int InternalReadData(int index, int offset, IOBuffer* buf, int buf_len); + int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len, bool truncate); - int InternalReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len); - int InternalWriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len); + int InternalReadSparseData(int64 offset, IOBuffer* buf, int buf_len); + int InternalWriteSparseData(int64 offset, IOBuffer* buf, int buf_len); // Old Entry interface. int GetAvailableRange(int64 offset, int len, int64* start); diff --git a/net/disk_cache/sparse_control.cc b/net/disk_cache/sparse_control.cc index a45ba1f..166b7f7 100644 --- a/net/disk_cache/sparse_control.cc +++ b/net/disk_cache/sparse_control.cc @@ -230,9 +230,8 @@ bool SparseControl::CouldBeSparse() const { return (entry_->GetDataSize(kSparseIndex) != 0); } -int SparseControl::StartIO( - SparseOperation op, int64 offset, net::IOBuffer* buf, int buf_len, - const net::CompletionCallback& callback) { +int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf, + int buf_len, const CompletionCallback& callback) { DCHECK(init_); // We don't support simultaneous IO for sparse data. if (operation_ != kNoOperation) @@ -291,7 +290,7 @@ int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) { range_found_ = false; int result = StartIO( - kGetRangeOperation, offset, NULL, len, net::CompletionCallback()); + kGetRangeOperation, offset, NULL, len, CompletionCallback()); if (range_found_) { *start = offset_; return result; @@ -308,7 +307,7 @@ void SparseControl::CancelIO() { abort_ = true; } -int SparseControl::ReadyToUse(const net::CompletionCallback& callback) { +int SparseControl::ReadyToUse(const CompletionCallback& callback) { if (!abort_) return net::OK; @@ -371,7 +370,7 @@ int SparseControl::CreateSparseEntry() { new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_))); int rv = entry_->WriteData( - kSparseIndex, 0, buf, sizeof(sparse_header_), net::CompletionCallback(), + kSparseIndex, 0, buf, sizeof(sparse_header_), CompletionCallback(), false); if (rv != sizeof(sparse_header_)) { DLOG(ERROR) << "Unable to save sparse_header_"; @@ -403,7 +402,7 @@ int SparseControl::OpenSparseEntry(int data_len) { // Read header. int rv = entry_->ReadData( - kSparseIndex, 0, buf, sizeof(sparse_header_), net::CompletionCallback()); + kSparseIndex, 0, buf, sizeof(sparse_header_), CompletionCallback()); if (rv != static_cast<int>(sizeof(sparse_header_))) return net::ERR_CACHE_READ_FAILURE; @@ -417,7 +416,7 @@ int SparseControl::OpenSparseEntry(int data_len) { // Read the actual bitmap. buf = new net::IOBuffer(map_len); rv = entry_->ReadData(kSparseIndex, sizeof(sparse_header_), buf, map_len, - net::CompletionCallback()); + CompletionCallback()); if (rv != map_len) return net::ERR_CACHE_READ_FAILURE; @@ -459,8 +458,8 @@ bool SparseControl::OpenChild() { new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); // Read signature. - int rv = child_->ReadData( - kSparseIndex, 0, buf, sizeof(child_data_), net::CompletionCallback()); + int rv = child_->ReadData(kSparseIndex, 0, buf, sizeof(child_data_), + CompletionCallback()); if (rv != sizeof(child_data_)) return KillChildAndContinue(key, true); // This is a fatal failure. @@ -483,8 +482,8 @@ void SparseControl::CloseChild() { new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); // Save the allocation bitmap before closing the child entry. - int rv = child_->WriteData( - kSparseIndex, 0, buf, sizeof(child_data_), net::CompletionCallback(), + int rv = child_->WriteData(kSparseIndex, 0, buf, sizeof(child_data_), + CompletionCallback(), false); if (rv != sizeof(child_data_)) DLOG(ERROR) << "Failed to save child data"; @@ -554,9 +553,8 @@ void SparseControl::WriteSparseData() { reinterpret_cast<const char*>(children_map_.GetMap()))); int len = children_map_.ArraySize() * 4; - int rv = entry_->WriteData( - kSparseIndex, sizeof(sparse_header_), buf, len, net::CompletionCallback(), - false); + int rv = entry_->WriteData(kSparseIndex, sizeof(sparse_header_), buf, len, + CompletionCallback(), false); if (rv != len) { DLOG(ERROR) << "Unable to save sparse map"; } @@ -659,9 +657,8 @@ void SparseControl::InitChildData() { scoped_refptr<net::WrappedIOBuffer> buf( new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); - int rv = child_->WriteData( - kSparseIndex, 0, buf, sizeof(child_data_), net::CompletionCallback(), - false); + int rv = child_->WriteData(kSparseIndex, 0, buf, sizeof(child_data_), + CompletionCallback(), false); if (rv != sizeof(child_data_)) DLOG(ERROR) << "Failed to save child data"; SetChildBit(true); @@ -702,7 +699,7 @@ bool SparseControl::DoChildIO() { // We have more work to do. Let's not trigger a callback to the caller. finished_ = false; - net::CompletionCallback callback; + CompletionCallback callback; if (!user_callback_.is_null()) { callback = base::Bind(&SparseControl::OnChildIOCompleted, base::Unretained(this)); @@ -852,7 +849,7 @@ void SparseControl::OnChildIOCompleted(int result) { void SparseControl::DoUserCallback() { DCHECK(!user_callback_.is_null()); - net::CompletionCallback cb = user_callback_; + CompletionCallback cb = user_callback_; user_callback_.Reset(); user_buf_ = NULL; pending_ = false; @@ -866,7 +863,7 @@ void SparseControl::DoAbortCallbacks() { for (size_t i = 0; i < abort_callbacks_.size(); i++) { // Releasing all references to entry_ may result in the destruction of this // object so we should not be touching it after the last Release(). - net::CompletionCallback cb = abort_callbacks_[i]; + CompletionCallback cb = abort_callbacks_[i]; if (i == abort_callbacks_.size() - 1) abort_callbacks_.clear(); diff --git a/net/disk_cache/sparse_control.h b/net/disk_cache/sparse_control.h index be6ca13..b0d9f37 100644 --- a/net/disk_cache/sparse_control.h +++ b/net/disk_cache/sparse_control.h @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -32,6 +32,8 @@ class EntryImpl; // used directly for sparse operations (the entry passed in to the constructor). class SparseControl { public: + typedef net::CompletionCallback CompletionCallback; + // The operation to perform. enum SparseOperation { kNoOperation, @@ -59,7 +61,7 @@ class SparseControl { // WriteSparseData for details about the arguments. The return value is the // number of bytes read or written, or a net error code. int StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf, - int buf_len, const net::CompletionCallback& callback); + int buf_len, const CompletionCallback& callback); // Implements Entry::GetAvailableRange(). int GetAvailableRange(int64 offset, int len, int64* start); @@ -70,7 +72,7 @@ class SparseControl { // Returns OK if the entry can be used for new IO or ERR_IO_PENDING if we are // busy. If the entry is busy, we'll invoke the callback when we are ready // again. See disk_cache::Entry::ReadyToUse() for more info. - int ReadyToUse(const net::CompletionCallback& completion_callback); + int ReadyToUse(const CompletionCallback& completion_callback); // Deletes the children entries of |entry|. static void DeleteChildren(EntryImpl* entry); @@ -159,8 +161,8 @@ class SparseControl { SparseData child_data_; // Parent and allocation map of child_. Bitmap child_map_; // The allocation map as a bitmap. - net::CompletionCallback user_callback_; - std::vector<net::CompletionCallback> abort_callbacks_; + CompletionCallback user_callback_; + std::vector<CompletionCallback> abort_callbacks_; int64 offset_; // Current sparse offset. scoped_refptr<net::DrainableIOBuffer> user_buf_; int buf_len_; // Bytes to read or write. |