summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-10-08 23:07:39 +0000
committerrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-10-08 23:07:39 +0000
commit06e62baf611d6fca8698d50765bca81989155845 (patch)
tree5516ccda32e56e7dae301b8b30f5fd24a4c8b83c
parent681d3b6b072e17a5de40be8794550f781bf28d87 (diff)
downloadchromium_src-06e62baf611d6fca8698d50765bca81989155845.zip
chromium_src-06e62baf611d6fca8698d50765bca81989155845.tar.gz
chromium_src-06e62baf611d6fca8698d50765bca81989155845.tar.bz2
Disk cache: Add a method to cancel pending sparse operations.
The sparse IO methods require exclusive use of the cache entry and they complain when that requirement is violated. When the user cancels a request and reissues another one to the same entry, we may be waiting for the previous operation to finish when we receive a new IO request, so we fail. This CL add a way for the HTTP cache to cancel IO operations and get a notification when the disk cache is able to operate on that entry again. BUG=23862 TEST=unittests Review URL: http://codereview.chromium.org/256090 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@28475 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--net/disk_cache/disk_cache.h32
-rw-r--r--net/disk_cache/entry_impl.cc14
-rw-r--r--net/disk_cache/entry_impl.h2
-rw-r--r--net/disk_cache/entry_unittest.cc47
-rw-r--r--net/disk_cache/mem_entry_impl.cc5
-rw-r--r--net/disk_cache/mem_entry_impl.h2
-rw-r--r--net/disk_cache/sparse_control.cc42
-rw-r--r--net/disk_cache/sparse_control.h12
-rw-r--r--net/http/http_cache.cc40
-rw-r--r--net/http/http_cache_unittest.cc98
10 files changed, 285 insertions, 9 deletions
diff --git a/net/disk_cache/disk_cache.h b/net/disk_cache/disk_cache.h
index f43cace..04c08d6 100644
--- a/net/disk_cache/disk_cache.h
+++ b/net/disk_cache/disk_cache.h
@@ -183,6 +183,18 @@ class Entry {
// The Backend implementation is free to evict any range from the cache at any
// moment, so in practice, the previously stated granularity of 1 KB is not
// as bad as it sounds.
+ //
+ // The sparse methods don't support multiple simultaneous IO operations to the
+ // same physical entry, so in practice a single object should be instantiated
+ // for a given key at any given time. Once an operation has been issued, the
+ // caller should wait until it completes before starting another one. This
+ // requirement includes the case when an entry is closed while some operation
+ // is in progress and another object is instantiated; any IO operation will
+ // fail while the previous operation is still in-flight. In order to deal with
+ // this requirement, the caller could either wait until the operation
+ // completes before closing the entry, or call CancelSparseIO() before closing
+ // the entry, and call ReadyForSparseIO() on the new entry and wait for the
+ // callback before issuing new operations.
// Behaves like ReadData() except that this method is used to access sparse
// entries.
@@ -207,6 +219,26 @@ class Entry {
// net error code whenever the request cannot be completed successfully.
virtual int GetAvailableRange(int64 offset, int len, int64* start) = 0;
+ // Cancels any pending sparse IO operation (if any). The completion callback
+ // of the operation in question will still be called when the operation
+ // finishes, but the operation will finish sooner when this method is used.
+ virtual void CancelSparseIO() = 0;
+
+ // Returns OK if this entry can be used immediately. If that is not the
+ // case, returns ERR_IO_PENDING and invokes the provided callback when this
+ // entry is ready to use. This method always returns OK for non-sparse
+ // entries, and returns ERR_IO_PENDING when a previous operation was cancelled
+ // (by calling CancelSparseIO), but the cache is still busy with it. If there
+ // is a pending operation that has not been cancelled, this method will return
+ // OK although another IO operation cannot be issued at this time; in this
+ // case the caller should just wait for the regular callback to be invoked
+ // instead of using this method to provide another callback.
+ //
+ // Note that CancelSparseIO may have been called on another instance of this
+ // object that refers to the same physical disk entry.
+ virtual int ReadyForSparseIO(
+ net::CompletionCallback* completion_callback) = 0;
+
protected:
virtual ~Entry() {}
};
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 4854a49..7d877c5 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -384,6 +384,20 @@ int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
return sparse_->GetAvailableRange(offset, len, start);
}
+void EntryImpl::CancelSparseIO() {
+ if (!sparse_.get())
+ return;
+
+ sparse_->CancelIO();
+}
+
+int EntryImpl::ReadyForSparseIO(net::CompletionCallback* completion_callback) {
+ if (!sparse_.get())
+ return net::OK;
+
+ return sparse_->ReadyToUse(completion_callback);
+}
+
// ------------------------------------------------------------------------
uint32 EntryImpl::GetHash() {
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
index f5961f9..0d0bf09 100644
--- a/net/disk_cache/entry_impl.h
+++ b/net/disk_cache/entry_impl.h
@@ -48,6 +48,8 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
net::CompletionCallback* completion_callback);
virtual int GetAvailableRange(int64 offset, int len, int64* start);
+ virtual void CancelSparseIO();
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
inline CacheEntryBlock* entry() {
return &entry_;
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
index dbd4f82..34240e6 100644
--- a/net/disk_cache/entry_unittest.cc
+++ b/net/disk_cache/entry_unittest.cc
@@ -1343,3 +1343,50 @@ TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
// We re-created one of the corrupt children.
EXPECT_EQ(3, cache_->GetEntryCount());
}
+
+TEST_F(DiskCacheEntryTest, CancelSparseIO) {
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ const int kSize = 4 * 1024;
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ SimpleCallbackTest cb1, cb2, cb3, cb4;
+ int64 offset = 0;
+ for (int ret = 0; ret != net::ERR_IO_PENDING; offset += kSize * 4)
+ ret = entry->WriteSparseData(offset, buf, kSize, &cb1);
+
+ // Cannot use the entry at this point.
+ offset = 0;
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::OK, entry->ReadyForSparseIO(&cb2));
+
+ // We cancel the pending operation, and register multiple notifications.
+ entry->CancelSparseIO();
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb2));
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb3));
+ entry->CancelSparseIO(); // Should be a no op at this point.
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb4));
+
+ offset = 0;
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->ReadSparseData(offset, buf, kSize, NULL));
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->WriteSparseData(offset, buf, kSize, NULL));
+
+ // Now see if we receive all notifications.
+ EXPECT_EQ(kSize, cb1.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb2.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb3.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb4.GetResult(net::ERR_IO_PENDING));
+
+ EXPECT_EQ(kSize, entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::OK, entry->ReadyForSparseIO(&cb2));
+ entry->Close();
+}
diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc
index 17eb65f..144e33f 100644
--- a/net/disk_cache/mem_entry_impl.cc
+++ b/net/disk_cache/mem_entry_impl.cc
@@ -319,6 +319,11 @@ int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
return 0;
}
+int MemEntryImpl::ReadyForSparseIO(
+ net::CompletionCallback* completion_callback) {
+ return net::OK;
+}
+
// ------------------------------------------------------------------------
bool MemEntryImpl::CreateEntry(const std::string& key) {
diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h
index 2463985..3eb1467 100644
--- a/net/disk_cache/mem_entry_impl.h
+++ b/net/disk_cache/mem_entry_impl.h
@@ -68,6 +68,8 @@ class MemEntryImpl : public Entry {
virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
net::CompletionCallback* completion_callback);
virtual int GetAvailableRange(int64 offset, int len, int64* start);
+ virtual void CancelSparseIO() {}
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
// Performs the initialization of a EntryImpl that will be added to the
// cache.
diff --git a/net/disk_cache/sparse_control.cc b/net/disk_cache/sparse_control.cc
index 7648f4e..3c1dd8c 100644
--- a/net/disk_cache/sparse_control.cc
+++ b/net/disk_cache/sparse_control.cc
@@ -192,6 +192,7 @@ int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
result_ = 0;
pending_ = false;
finished_ = false;
+ abort_ = false;
DoChildrenIO();
@@ -226,6 +227,24 @@ int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) {
return result < 0 ? result : 0; // Don't mask error codes to the caller.
}
+void SparseControl::CancelIO() {
+ if (operation_ == kNoOperation)
+ return;
+ abort_ = true;
+}
+
+int SparseControl::ReadyToUse(net::CompletionCallback* completion_callback) {
+ if (!abort_)
+ return net::OK;
+
+ // We'll grab another reference to keep this object alive because we just have
+ // one extra reference due to the pending IO operation itself, but we'll
+ // release that one before invoking user_callback_.
+ entry_->AddRef(); // Balanced in DoAbortCallbacks.
+ abort_callbacks_.push_back(completion_callback);
+ return net::ERR_IO_PENDING;
+}
+
// Static
void SparseControl::DeleteChildren(EntryImpl* entry) {
DCHECK(entry->GetEntryFlags() & PARENT_ENTRY);
@@ -601,7 +620,7 @@ bool SparseControl::DoChildIO() {
// progress. However, this entry can still be closed, and that would not
// be a good thing for us, so we increase the refcount until we're
// finished doing sparse stuff.
- entry_->AddRef();
+ entry_->AddRef(); // Balanced in DoUserCallback.
}
return false;
}
@@ -681,6 +700,14 @@ void SparseControl::OnChildIOCompleted(int result) {
DCHECK_NE(net::ERR_IO_PENDING, result);
DoChildIOCompleted(result);
+ if (abort_) {
+ // We'll return the current result of the operation, which may be less than
+ // the bytes to read or write, but the user cancelled the operation.
+ abort_ = false;
+ DoUserCallback();
+ return DoAbortCallbacks();
+ }
+
// We are running a callback from the message loop. It's time to restart what
// we were doing before.
DoChildrenIO();
@@ -697,4 +724,17 @@ void SparseControl::DoUserCallback() {
c->Run(result_);
}
+void SparseControl::DoAbortCallbacks() {
+ for (size_t i = 0; i < abort_callbacks_.size(); i++) {
+ // Releasing all references to entry_ may result in the destruction of this
+ // object so we should not be touching it after the last Release().
+ net::CompletionCallback* c = abort_callbacks_[i];
+ if (i == abort_callbacks_.size() - 1)
+ abort_callbacks_.clear();
+
+ entry_->Release(); // Don't touch object after this line.
+ c->Run(net::OK);
+ }
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/sparse_control.h b/net/disk_cache/sparse_control.h
index 534d1a9..24f5446 100644
--- a/net/disk_cache/sparse_control.h
+++ b/net/disk_cache/sparse_control.h
@@ -6,6 +6,7 @@
#define NET_DISK_CACHE_SPARSE_CONTROL_H_
#include <string>
+#include <vector>
#include "base/basictypes.h"
#include "base/compiler_specific.h"
@@ -63,6 +64,14 @@ class SparseControl {
// Implements Entry::GetAvailableRange().
int GetAvailableRange(int64 offset, int len, int64* start);
+ // Cancels the current sparse operation (if any).
+ void CancelIO();
+
+ // Returns OK if the entry can be used for new IO or ERR_IO_PENDING if we are
+ // busy. If the entry is busy, we'll invoke the callback when we are ready
+ // again. See disk_cache::Entry::ReadyToUse() for more info.
+ int ReadyToUse(net::CompletionCallback* completion_callback);
+
// Deletes the children entries of |entry|.
static void DeleteChildren(EntryImpl* entry);
@@ -134,6 +143,7 @@ class SparseControl {
// Reports to the user that we are done.
void DoUserCallback();
+ void DoAbortCallbacks();
EntryImpl* entry_; // The sparse entry.
Entry* child_; // The current child entry.
@@ -142,6 +152,7 @@ class SparseControl {
bool finished_;
bool init_;
bool range_found_; // True if GetAvailableRange found something.
+ bool abort_; // True if we should abort the current operation ASAP.
SparseHeader sparse_header_; // Data about the children of entry_.
Bitmap children_map_; // The actual bitmap of children.
@@ -150,6 +161,7 @@ class SparseControl {
net::CompletionCallbackImpl<SparseControl> child_callback_;
net::CompletionCallback* user_callback_;
+ std::vector<net::CompletionCallback*> abort_callbacks_;
int64 offset_; // Current sparse offset.
scoped_refptr<net::ReusedIOBuffer> user_buf_;
int buf_len_; // Bytes to read or write.
diff --git a/net/http/http_cache.cc b/net/http/http_cache.cc
index 7fb9703..7985992 100644
--- a/net/http/http_cache.cc
+++ b/net/http/http_cache.cc
@@ -161,7 +161,10 @@ class HttpCache::Transaction : public HttpTransaction {
network_read_callback_(this, &Transaction::OnNetworkReadCompleted)),
ALLOW_THIS_IN_INITIALIZER_LIST(
cache_read_callback_(new CancelableCompletionCallback<Transaction>(
- this, &Transaction::OnCacheReadCompleted))) {
+ this, &Transaction::OnCacheReadCompleted))),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ entry_ready_callback_(new CancelableCompletionCallback<Transaction>(
+ this, &Transaction::OnCacheEntryReady))) {
}
// Clean up the transaction.
@@ -251,6 +254,10 @@ class HttpCache::Transaction : public HttpTransaction {
// a network error code.
int BeginPartialCacheValidation();
+ // Validates the entry headers against the requested range and continues with
+ // the validation of the rest of the entry. Returns a network error code.
+ int ValidateEntryHeadersAndContinue(bool byte_range_requested);
+
// Performs the cache validation for the next chunk of data stored by the
// cache. If this chunk is not currently stored, starts the network request
// to fetch it. Returns a network error code.
@@ -345,6 +352,9 @@ class HttpCache::Transaction : public HttpTransaction {
// Called to signal completion of the cache's ReadData method:
void OnCacheReadCompleted(int result);
+ // Called to signal completion of the cache entry's ReadyForSparseIO method:
+ void OnCacheEntryReady(int result);
+
scoped_refptr<LoadLog> load_log_;
const HttpRequestInfo* request_;
scoped_ptr<HttpRequestInfo> custom_request_;
@@ -373,14 +383,21 @@ class HttpCache::Transaction : public HttpTransaction {
CompletionCallbackImpl<Transaction> network_read_callback_;
scoped_refptr<CancelableCompletionCallback<Transaction> >
cache_read_callback_;
+ scoped_refptr<CancelableCompletionCallback<Transaction> >
+ entry_ready_callback_;
};
HttpCache::Transaction::~Transaction() {
if (cache_) {
if (entry_) {
bool cancel_request = reading_ && enable_range_support_;
- if (cancel_request && !partial_.get())
- cancel_request &= (response_.headers->response_code() == 200);
+ if (cancel_request) {
+ if (partial_.get()) {
+ entry_->disk_entry->CancelSparseIO();
+ } else {
+ cancel_request &= (response_.headers->response_code() == 200);
+ }
+ }
cache_->DoneWithEntry(entry_, this, cancel_request);
} else {
@@ -928,7 +945,10 @@ int HttpCache::Transaction::BeginPartialCacheValidation() {
return BeginCacheValidation();
bool byte_range_requested = partial_.get() != NULL;
- if (!byte_range_requested) {
+ if (byte_range_requested) {
+ if (OK != entry_->disk_entry->ReadyForSparseIO(entry_ready_callback_))
+ return ERR_IO_PENDING;
+ } else {
// The request is not for a range, but we have stored just ranges.
partial_.reset(new PartialData());
if (!custom_request_.get()) {
@@ -937,6 +957,13 @@ int HttpCache::Transaction::BeginPartialCacheValidation() {
}
}
+ return ValidateEntryHeadersAndContinue(byte_range_requested);
+}
+
+int HttpCache::Transaction::ValidateEntryHeadersAndContinue(
+ bool byte_range_requested) {
+ DCHECK(mode_ == READ_WRITE);
+
if (!partial_->UpdateFromStoredHeaders(response_.headers, entry_->disk_entry,
truncated_)) {
// The stored data cannot be used. Get rid of it and restart this request.
@@ -1552,6 +1579,11 @@ void HttpCache::Transaction::OnCacheReadCompleted(int result) {
DoCacheReadCompleted(result);
}
+void HttpCache::Transaction::OnCacheEntryReady(int result) {
+ DCHECK_EQ(OK, result);
+ ValidateEntryHeadersAndContinue(true);
+}
+
//-----------------------------------------------------------------------------
HttpCache::HttpCache(HostResolver* host_resolver,
diff --git a/net/http/http_cache_unittest.cc b/net/http/http_cache_unittest.cc
index ea98c67..67d6de8 100644
--- a/net/http/http_cache_unittest.cc
+++ b/net/http/http_cache_unittest.cc
@@ -32,11 +32,13 @@ class MockDiskEntry : public disk_cache::Entry,
public base::RefCounted<MockDiskEntry> {
public:
MockDiskEntry()
- : test_mode_(0), doomed_(false), sparse_(false), fail_requests_(false) {
+ : test_mode_(0), doomed_(false), sparse_(false), fail_requests_(false),
+ busy_(false), delayed_(false) {
}
explicit MockDiskEntry(const std::string& key)
- : key_(key), doomed_(false), sparse_(false), fail_requests_(false) {
+ : key_(key), doomed_(false), sparse_(false), fail_requests_(false),
+ busy_(false), delayed_(false) {
//
// 'key' is prefixed with an identifier if it corresponds to a cached POST.
// Skip past that to locate the actual URL.
@@ -131,7 +133,7 @@ class MockDiskEntry : public disk_cache::Entry,
virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
net::CompletionCallback* completion_callback) {
- if (!sparse_)
+ if (!sparse_ || busy_)
return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
if (offset < 0)
return net::ERR_FAILED;
@@ -152,11 +154,15 @@ class MockDiskEntry : public disk_cache::Entry,
return num;
CallbackLater(completion_callback, num);
+ busy_ = true;
+ delayed_ = false;
return net::ERR_IO_PENDING;
}
virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
net::CompletionCallback* completion_callback) {
+ if (busy_)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
if (!sparse_) {
if (data_[1].size())
return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
@@ -181,7 +187,7 @@ class MockDiskEntry : public disk_cache::Entry,
}
virtual int GetAvailableRange(int64 offset, int len, int64* start) {
- if (!sparse_)
+ if (!sparse_ || busy_)
return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
if (offset < 0)
return net::ERR_FAILED;
@@ -212,6 +218,23 @@ class MockDiskEntry : public disk_cache::Entry,
return count;
}
+ virtual void CancelSparseIO() { cancel_ = true; }
+
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback) {
+ if (!cancel_)
+ return net::OK;
+
+ cancel_ = false;
+ DCHECK(completion_callback);
+ if (test_mode_ & TEST_MODE_SYNC_CACHE_READ)
+ return net::OK;
+
+ // The pending operation is already in the message loop (and hopefuly
+ // already in the second pass). Just notify the caller that it finished.
+ CallbackLater(completion_callback, 0);
+ return net::ERR_IO_PENDING;
+ }
+
// Fail most subsequent requests.
void set_fail_requests() { fail_requests_ = true; }
@@ -224,6 +247,21 @@ class MockDiskEntry : public disk_cache::Entry,
&MockDiskEntry::RunCallback, callback, result));
}
void RunCallback(net::CompletionCallback* callback, int result) {
+ if (busy_) {
+ // This is kind of hacky, but controlling the behavior of just this entry
+ // from a test is sort of complicated. What we really want to do is
+ // delay the delivery of a sparse IO operation a little more so that the
+ // request start operation (async) will finish without seeing the end of
+ // this operation (already posted to the message loop)... and without
+ // just delaying for n mS (which may cause trouble with slow bots). So
+ // we re-post this operation (all async sparse IO operations will take two
+ // trips trhough the message loop instead of one).
+ if (!delayed_) {
+ delayed_ = true;
+ return CallbackLater(callback, result);
+ }
+ }
+ busy_ = false;
callback->Run(result);
}
@@ -233,8 +271,14 @@ class MockDiskEntry : public disk_cache::Entry,
bool doomed_;
bool sparse_;
bool fail_requests_;
+ bool busy_;
+ bool delayed_;
+ static bool cancel_;
};
+// Static.
+bool MockDiskEntry::cancel_ = false;
+
class MockDiskCache : public disk_cache::Backend {
public:
MockDiskCache()
@@ -2344,6 +2388,52 @@ TEST(HttpCache, RangeGET_Cancel) {
RemoveMockTransaction(&kRangeGET_TransactionOK);
}
+// Tests that we don't delete a sparse entry when we start a new request after
+// cancelling the previous one.
+TEST(HttpCache, RangeGET_Cancel2) {
+ MockHttpCache cache;
+ cache.http_cache()->set_enable_range_support(true);
+ AddMockTransaction(&kRangeGET_TransactionOK);
+
+ RunTransactionTest(cache.http_cache(), kRangeGET_TransactionOK);
+ MockHttpRequest request(kRangeGET_TransactionOK);
+
+ Context* c = new Context();
+ int rv = cache.http_cache()->CreateTransaction(&c->trans);
+ EXPECT_EQ(net::OK, rv);
+
+ rv = c->trans->Start(&request, &c->callback, NULL);
+ if (rv == net::ERR_IO_PENDING)
+ rv = c->callback.WaitForResult();
+
+ EXPECT_EQ(2, cache.network_layer()->transaction_count());
+ EXPECT_EQ(1, cache.disk_cache()->open_count());
+ EXPECT_EQ(1, cache.disk_cache()->create_count());
+
+ // Make sure that we revalidate the entry and read from the cache (a single
+ // read will return while waiting for the network).
+ scoped_refptr<net::IOBufferWithSize> buf = new net::IOBufferWithSize(5);
+ rv = c->trans->Read(buf, buf->size(), &c->callback);
+ EXPECT_EQ(net::ERR_IO_PENDING, rv);
+ rv = c->callback.WaitForResult();
+ rv = c->trans->Read(buf, buf->size(), &c->callback);
+ EXPECT_EQ(net::ERR_IO_PENDING, rv);
+
+ // Destroy the transaction before completing the read.
+ delete c;
+
+ // We have the read and the delete (OnProcessPendingQueue) waiting on the
+ // message loop. This means that a new transaction will just reuse the same
+ // active entry (no open or create).
+
+ RunTransactionTest(cache.http_cache(), kRangeGET_TransactionOK);
+
+ EXPECT_EQ(3, cache.network_layer()->transaction_count());
+ EXPECT_EQ(1, cache.disk_cache()->open_count());
+ EXPECT_EQ(1, cache.disk_cache()->create_count());
+ RemoveMockTransaction(&kRangeGET_TransactionOK);
+}
+
#ifdef NDEBUG
// This test hits a NOTREACHED so it is a release mode only test.
TEST(HttpCache, RangeGET_OK_LoadOnlyFromCache) {