summaryrefslogtreecommitdiffstats
path: root/net/disk_cache
diff options
context:
space:
mode:
authorrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-10-08 23:07:39 +0000
committerrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-10-08 23:07:39 +0000
commit06e62baf611d6fca8698d50765bca81989155845 (patch)
tree5516ccda32e56e7dae301b8b30f5fd24a4c8b83c /net/disk_cache
parent681d3b6b072e17a5de40be8794550f781bf28d87 (diff)
downloadchromium_src-06e62baf611d6fca8698d50765bca81989155845.zip
chromium_src-06e62baf611d6fca8698d50765bca81989155845.tar.gz
chromium_src-06e62baf611d6fca8698d50765bca81989155845.tar.bz2
Disk cache: Add a method to cancel pending sparse operations.
The sparse IO methods require exclusive use of the cache entry and they complain when that requirement is violated. When the user cancels a request and reissues another one to the same entry, we may be waiting for the previous operation to finish when we receive a new IO request, so we fail. This CL add a way for the HTTP cache to cancel IO operations and get a notification when the disk cache is able to operate on that entry again. BUG=23862 TEST=unittests Review URL: http://codereview.chromium.org/256090 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@28475 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r--net/disk_cache/disk_cache.h32
-rw-r--r--net/disk_cache/entry_impl.cc14
-rw-r--r--net/disk_cache/entry_impl.h2
-rw-r--r--net/disk_cache/entry_unittest.cc47
-rw-r--r--net/disk_cache/mem_entry_impl.cc5
-rw-r--r--net/disk_cache/mem_entry_impl.h2
-rw-r--r--net/disk_cache/sparse_control.cc42
-rw-r--r--net/disk_cache/sparse_control.h12
8 files changed, 155 insertions, 1 deletions
diff --git a/net/disk_cache/disk_cache.h b/net/disk_cache/disk_cache.h
index f43cace..04c08d6 100644
--- a/net/disk_cache/disk_cache.h
+++ b/net/disk_cache/disk_cache.h
@@ -183,6 +183,18 @@ class Entry {
// The Backend implementation is free to evict any range from the cache at any
// moment, so in practice, the previously stated granularity of 1 KB is not
// as bad as it sounds.
+ //
+ // The sparse methods don't support multiple simultaneous IO operations to the
+ // same physical entry, so in practice a single object should be instantiated
+ // for a given key at any given time. Once an operation has been issued, the
+ // caller should wait until it completes before starting another one. This
+ // requirement includes the case when an entry is closed while some operation
+ // is in progress and another object is instantiated; any IO operation will
+ // fail while the previous operation is still in-flight. In order to deal with
+ // this requirement, the caller could either wait until the operation
+ // completes before closing the entry, or call CancelSparseIO() before closing
+ // the entry, and call ReadyForSparseIO() on the new entry and wait for the
+ // callback before issuing new operations.
// Behaves like ReadData() except that this method is used to access sparse
// entries.
@@ -207,6 +219,26 @@ class Entry {
// net error code whenever the request cannot be completed successfully.
virtual int GetAvailableRange(int64 offset, int len, int64* start) = 0;
+ // Cancels any pending sparse IO operation (if any). The completion callback
+ // of the operation in question will still be called when the operation
+ // finishes, but the operation will finish sooner when this method is used.
+ virtual void CancelSparseIO() = 0;
+
+ // Returns OK if this entry can be used immediately. If that is not the
+ // case, returns ERR_IO_PENDING and invokes the provided callback when this
+ // entry is ready to use. This method always returns OK for non-sparse
+ // entries, and returns ERR_IO_PENDING when a previous operation was cancelled
+ // (by calling CancelSparseIO), but the cache is still busy with it. If there
+ // is a pending operation that has not been cancelled, this method will return
+ // OK although another IO operation cannot be issued at this time; in this
+ // case the caller should just wait for the regular callback to be invoked
+ // instead of using this method to provide another callback.
+ //
+ // Note that CancelSparseIO may have been called on another instance of this
+ // object that refers to the same physical disk entry.
+ virtual int ReadyForSparseIO(
+ net::CompletionCallback* completion_callback) = 0;
+
protected:
virtual ~Entry() {}
};
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 4854a49..7d877c5 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -384,6 +384,20 @@ int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
return sparse_->GetAvailableRange(offset, len, start);
}
+void EntryImpl::CancelSparseIO() {
+ if (!sparse_.get())
+ return;
+
+ sparse_->CancelIO();
+}
+
+int EntryImpl::ReadyForSparseIO(net::CompletionCallback* completion_callback) {
+ if (!sparse_.get())
+ return net::OK;
+
+ return sparse_->ReadyToUse(completion_callback);
+}
+
// ------------------------------------------------------------------------
uint32 EntryImpl::GetHash() {
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
index f5961f9..0d0bf09 100644
--- a/net/disk_cache/entry_impl.h
+++ b/net/disk_cache/entry_impl.h
@@ -48,6 +48,8 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
net::CompletionCallback* completion_callback);
virtual int GetAvailableRange(int64 offset, int len, int64* start);
+ virtual void CancelSparseIO();
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
inline CacheEntryBlock* entry() {
return &entry_;
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
index dbd4f82..34240e6 100644
--- a/net/disk_cache/entry_unittest.cc
+++ b/net/disk_cache/entry_unittest.cc
@@ -1343,3 +1343,50 @@ TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
// We re-created one of the corrupt children.
EXPECT_EQ(3, cache_->GetEntryCount());
}
+
+TEST_F(DiskCacheEntryTest, CancelSparseIO) {
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ const int kSize = 4 * 1024;
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ SimpleCallbackTest cb1, cb2, cb3, cb4;
+ int64 offset = 0;
+ for (int ret = 0; ret != net::ERR_IO_PENDING; offset += kSize * 4)
+ ret = entry->WriteSparseData(offset, buf, kSize, &cb1);
+
+ // Cannot use the entry at this point.
+ offset = 0;
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::OK, entry->ReadyForSparseIO(&cb2));
+
+ // We cancel the pending operation, and register multiple notifications.
+ entry->CancelSparseIO();
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb2));
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb3));
+ entry->CancelSparseIO(); // Should be a no op at this point.
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb4));
+
+ offset = 0;
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->ReadSparseData(offset, buf, kSize, NULL));
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->WriteSparseData(offset, buf, kSize, NULL));
+
+ // Now see if we receive all notifications.
+ EXPECT_EQ(kSize, cb1.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb2.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb3.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb4.GetResult(net::ERR_IO_PENDING));
+
+ EXPECT_EQ(kSize, entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::OK, entry->ReadyForSparseIO(&cb2));
+ entry->Close();
+}
diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc
index 17eb65f..144e33f 100644
--- a/net/disk_cache/mem_entry_impl.cc
+++ b/net/disk_cache/mem_entry_impl.cc
@@ -319,6 +319,11 @@ int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
return 0;
}
+int MemEntryImpl::ReadyForSparseIO(
+ net::CompletionCallback* completion_callback) {
+ return net::OK;
+}
+
// ------------------------------------------------------------------------
bool MemEntryImpl::CreateEntry(const std::string& key) {
diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h
index 2463985..3eb1467 100644
--- a/net/disk_cache/mem_entry_impl.h
+++ b/net/disk_cache/mem_entry_impl.h
@@ -68,6 +68,8 @@ class MemEntryImpl : public Entry {
virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
net::CompletionCallback* completion_callback);
virtual int GetAvailableRange(int64 offset, int len, int64* start);
+ virtual void CancelSparseIO() {}
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
// Performs the initialization of a EntryImpl that will be added to the
// cache.
diff --git a/net/disk_cache/sparse_control.cc b/net/disk_cache/sparse_control.cc
index 7648f4e..3c1dd8c 100644
--- a/net/disk_cache/sparse_control.cc
+++ b/net/disk_cache/sparse_control.cc
@@ -192,6 +192,7 @@ int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
result_ = 0;
pending_ = false;
finished_ = false;
+ abort_ = false;
DoChildrenIO();
@@ -226,6 +227,24 @@ int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) {
return result < 0 ? result : 0; // Don't mask error codes to the caller.
}
+void SparseControl::CancelIO() {
+ if (operation_ == kNoOperation)
+ return;
+ abort_ = true;
+}
+
+int SparseControl::ReadyToUse(net::CompletionCallback* completion_callback) {
+ if (!abort_)
+ return net::OK;
+
+ // We'll grab another reference to keep this object alive because we just have
+ // one extra reference due to the pending IO operation itself, but we'll
+ // release that one before invoking user_callback_.
+ entry_->AddRef(); // Balanced in DoAbortCallbacks.
+ abort_callbacks_.push_back(completion_callback);
+ return net::ERR_IO_PENDING;
+}
+
// Static
void SparseControl::DeleteChildren(EntryImpl* entry) {
DCHECK(entry->GetEntryFlags() & PARENT_ENTRY);
@@ -601,7 +620,7 @@ bool SparseControl::DoChildIO() {
// progress. However, this entry can still be closed, and that would not
// be a good thing for us, so we increase the refcount until we're
// finished doing sparse stuff.
- entry_->AddRef();
+ entry_->AddRef(); // Balanced in DoUserCallback.
}
return false;
}
@@ -681,6 +700,14 @@ void SparseControl::OnChildIOCompleted(int result) {
DCHECK_NE(net::ERR_IO_PENDING, result);
DoChildIOCompleted(result);
+ if (abort_) {
+ // We'll return the current result of the operation, which may be less than
+ // the bytes to read or write, but the user cancelled the operation.
+ abort_ = false;
+ DoUserCallback();
+ return DoAbortCallbacks();
+ }
+
// We are running a callback from the message loop. It's time to restart what
// we were doing before.
DoChildrenIO();
@@ -697,4 +724,17 @@ void SparseControl::DoUserCallback() {
c->Run(result_);
}
+void SparseControl::DoAbortCallbacks() {
+ for (size_t i = 0; i < abort_callbacks_.size(); i++) {
+ // Releasing all references to entry_ may result in the destruction of this
+ // object so we should not be touching it after the last Release().
+ net::CompletionCallback* c = abort_callbacks_[i];
+ if (i == abort_callbacks_.size() - 1)
+ abort_callbacks_.clear();
+
+ entry_->Release(); // Don't touch object after this line.
+ c->Run(net::OK);
+ }
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/sparse_control.h b/net/disk_cache/sparse_control.h
index 534d1a9..24f5446 100644
--- a/net/disk_cache/sparse_control.h
+++ b/net/disk_cache/sparse_control.h
@@ -6,6 +6,7 @@
#define NET_DISK_CACHE_SPARSE_CONTROL_H_
#include <string>
+#include <vector>
#include "base/basictypes.h"
#include "base/compiler_specific.h"
@@ -63,6 +64,14 @@ class SparseControl {
// Implements Entry::GetAvailableRange().
int GetAvailableRange(int64 offset, int len, int64* start);
+ // Cancels the current sparse operation (if any).
+ void CancelIO();
+
+ // Returns OK if the entry can be used for new IO or ERR_IO_PENDING if we are
+ // busy. If the entry is busy, we'll invoke the callback when we are ready
+ // again. See disk_cache::Entry::ReadyToUse() for more info.
+ int ReadyToUse(net::CompletionCallback* completion_callback);
+
// Deletes the children entries of |entry|.
static void DeleteChildren(EntryImpl* entry);
@@ -134,6 +143,7 @@ class SparseControl {
// Reports to the user that we are done.
void DoUserCallback();
+ void DoAbortCallbacks();
EntryImpl* entry_; // The sparse entry.
Entry* child_; // The current child entry.
@@ -142,6 +152,7 @@ class SparseControl {
bool finished_;
bool init_;
bool range_found_; // True if GetAvailableRange found something.
+ bool abort_; // True if we should abort the current operation ASAP.
SparseHeader sparse_header_; // Data about the children of entry_.
Bitmap children_map_; // The actual bitmap of children.
@@ -150,6 +161,7 @@ class SparseControl {
net::CompletionCallbackImpl<SparseControl> child_callback_;
net::CompletionCallback* user_callback_;
+ std::vector<net::CompletionCallback*> abort_callbacks_;
int64 offset_; // Current sparse offset.
scoped_refptr<net::ReusedIOBuffer> user_buf_;
int buf_len_; // Bytes to read or write.