summaryrefslogtreecommitdiffstats
path: root/net/http
diff options
context:
space:
mode:
authorrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-10-08 23:07:39 +0000
committerrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-10-08 23:07:39 +0000
commit06e62baf611d6fca8698d50765bca81989155845 (patch)
tree5516ccda32e56e7dae301b8b30f5fd24a4c8b83c /net/http
parent681d3b6b072e17a5de40be8794550f781bf28d87 (diff)
downloadchromium_src-06e62baf611d6fca8698d50765bca81989155845.zip
chromium_src-06e62baf611d6fca8698d50765bca81989155845.tar.gz
chromium_src-06e62baf611d6fca8698d50765bca81989155845.tar.bz2
Disk cache: Add a method to cancel pending sparse operations.
The sparse IO methods require exclusive use of the cache entry and they complain when that requirement is violated. When the user cancels a request and reissues another one to the same entry, we may be waiting for the previous operation to finish when we receive a new IO request, so we fail. This CL add a way for the HTTP cache to cancel IO operations and get a notification when the disk cache is able to operate on that entry again. BUG=23862 TEST=unittests Review URL: http://codereview.chromium.org/256090 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@28475 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/http')
-rw-r--r--net/http/http_cache.cc40
-rw-r--r--net/http/http_cache_unittest.cc98
2 files changed, 130 insertions, 8 deletions
diff --git a/net/http/http_cache.cc b/net/http/http_cache.cc
index 7fb9703..7985992 100644
--- a/net/http/http_cache.cc
+++ b/net/http/http_cache.cc
@@ -161,7 +161,10 @@ class HttpCache::Transaction : public HttpTransaction {
network_read_callback_(this, &Transaction::OnNetworkReadCompleted)),
ALLOW_THIS_IN_INITIALIZER_LIST(
cache_read_callback_(new CancelableCompletionCallback<Transaction>(
- this, &Transaction::OnCacheReadCompleted))) {
+ this, &Transaction::OnCacheReadCompleted))),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ entry_ready_callback_(new CancelableCompletionCallback<Transaction>(
+ this, &Transaction::OnCacheEntryReady))) {
}
// Clean up the transaction.
@@ -251,6 +254,10 @@ class HttpCache::Transaction : public HttpTransaction {
// a network error code.
int BeginPartialCacheValidation();
+ // Validates the entry headers against the requested range and continues with
+ // the validation of the rest of the entry. Returns a network error code.
+ int ValidateEntryHeadersAndContinue(bool byte_range_requested);
+
// Performs the cache validation for the next chunk of data stored by the
// cache. If this chunk is not currently stored, starts the network request
// to fetch it. Returns a network error code.
@@ -345,6 +352,9 @@ class HttpCache::Transaction : public HttpTransaction {
// Called to signal completion of the cache's ReadData method:
void OnCacheReadCompleted(int result);
+ // Called to signal completion of the cache entry's ReadyForSparseIO method:
+ void OnCacheEntryReady(int result);
+
scoped_refptr<LoadLog> load_log_;
const HttpRequestInfo* request_;
scoped_ptr<HttpRequestInfo> custom_request_;
@@ -373,14 +383,21 @@ class HttpCache::Transaction : public HttpTransaction {
CompletionCallbackImpl<Transaction> network_read_callback_;
scoped_refptr<CancelableCompletionCallback<Transaction> >
cache_read_callback_;
+ scoped_refptr<CancelableCompletionCallback<Transaction> >
+ entry_ready_callback_;
};
HttpCache::Transaction::~Transaction() {
if (cache_) {
if (entry_) {
bool cancel_request = reading_ && enable_range_support_;
- if (cancel_request && !partial_.get())
- cancel_request &= (response_.headers->response_code() == 200);
+ if (cancel_request) {
+ if (partial_.get()) {
+ entry_->disk_entry->CancelSparseIO();
+ } else {
+ cancel_request &= (response_.headers->response_code() == 200);
+ }
+ }
cache_->DoneWithEntry(entry_, this, cancel_request);
} else {
@@ -928,7 +945,10 @@ int HttpCache::Transaction::BeginPartialCacheValidation() {
return BeginCacheValidation();
bool byte_range_requested = partial_.get() != NULL;
- if (!byte_range_requested) {
+ if (byte_range_requested) {
+ if (OK != entry_->disk_entry->ReadyForSparseIO(entry_ready_callback_))
+ return ERR_IO_PENDING;
+ } else {
// The request is not for a range, but we have stored just ranges.
partial_.reset(new PartialData());
if (!custom_request_.get()) {
@@ -937,6 +957,13 @@ int HttpCache::Transaction::BeginPartialCacheValidation() {
}
}
+ return ValidateEntryHeadersAndContinue(byte_range_requested);
+}
+
+int HttpCache::Transaction::ValidateEntryHeadersAndContinue(
+ bool byte_range_requested) {
+ DCHECK(mode_ == READ_WRITE);
+
if (!partial_->UpdateFromStoredHeaders(response_.headers, entry_->disk_entry,
truncated_)) {
// The stored data cannot be used. Get rid of it and restart this request.
@@ -1552,6 +1579,11 @@ void HttpCache::Transaction::OnCacheReadCompleted(int result) {
DoCacheReadCompleted(result);
}
+void HttpCache::Transaction::OnCacheEntryReady(int result) {
+ DCHECK_EQ(OK, result);
+ ValidateEntryHeadersAndContinue(true);
+}
+
//-----------------------------------------------------------------------------
HttpCache::HttpCache(HostResolver* host_resolver,
diff --git a/net/http/http_cache_unittest.cc b/net/http/http_cache_unittest.cc
index ea98c67..67d6de8 100644
--- a/net/http/http_cache_unittest.cc
+++ b/net/http/http_cache_unittest.cc
@@ -32,11 +32,13 @@ class MockDiskEntry : public disk_cache::Entry,
public base::RefCounted<MockDiskEntry> {
public:
MockDiskEntry()
- : test_mode_(0), doomed_(false), sparse_(false), fail_requests_(false) {
+ : test_mode_(0), doomed_(false), sparse_(false), fail_requests_(false),
+ busy_(false), delayed_(false) {
}
explicit MockDiskEntry(const std::string& key)
- : key_(key), doomed_(false), sparse_(false), fail_requests_(false) {
+ : key_(key), doomed_(false), sparse_(false), fail_requests_(false),
+ busy_(false), delayed_(false) {
//
// 'key' is prefixed with an identifier if it corresponds to a cached POST.
// Skip past that to locate the actual URL.
@@ -131,7 +133,7 @@ class MockDiskEntry : public disk_cache::Entry,
virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
net::CompletionCallback* completion_callback) {
- if (!sparse_)
+ if (!sparse_ || busy_)
return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
if (offset < 0)
return net::ERR_FAILED;
@@ -152,11 +154,15 @@ class MockDiskEntry : public disk_cache::Entry,
return num;
CallbackLater(completion_callback, num);
+ busy_ = true;
+ delayed_ = false;
return net::ERR_IO_PENDING;
}
virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
net::CompletionCallback* completion_callback) {
+ if (busy_)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
if (!sparse_) {
if (data_[1].size())
return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
@@ -181,7 +187,7 @@ class MockDiskEntry : public disk_cache::Entry,
}
virtual int GetAvailableRange(int64 offset, int len, int64* start) {
- if (!sparse_)
+ if (!sparse_ || busy_)
return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
if (offset < 0)
return net::ERR_FAILED;
@@ -212,6 +218,23 @@ class MockDiskEntry : public disk_cache::Entry,
return count;
}
+ virtual void CancelSparseIO() { cancel_ = true; }
+
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback) {
+ if (!cancel_)
+ return net::OK;
+
+ cancel_ = false;
+ DCHECK(completion_callback);
+ if (test_mode_ & TEST_MODE_SYNC_CACHE_READ)
+ return net::OK;
+
+ // The pending operation is already in the message loop (and hopefuly
+ // already in the second pass). Just notify the caller that it finished.
+ CallbackLater(completion_callback, 0);
+ return net::ERR_IO_PENDING;
+ }
+
// Fail most subsequent requests.
void set_fail_requests() { fail_requests_ = true; }
@@ -224,6 +247,21 @@ class MockDiskEntry : public disk_cache::Entry,
&MockDiskEntry::RunCallback, callback, result));
}
void RunCallback(net::CompletionCallback* callback, int result) {
+ if (busy_) {
+ // This is kind of hacky, but controlling the behavior of just this entry
+ // from a test is sort of complicated. What we really want to do is
+ // delay the delivery of a sparse IO operation a little more so that the
+ // request start operation (async) will finish without seeing the end of
+ // this operation (already posted to the message loop)... and without
+ // just delaying for n mS (which may cause trouble with slow bots). So
+ // we re-post this operation (all async sparse IO operations will take two
+ // trips trhough the message loop instead of one).
+ if (!delayed_) {
+ delayed_ = true;
+ return CallbackLater(callback, result);
+ }
+ }
+ busy_ = false;
callback->Run(result);
}
@@ -233,8 +271,14 @@ class MockDiskEntry : public disk_cache::Entry,
bool doomed_;
bool sparse_;
bool fail_requests_;
+ bool busy_;
+ bool delayed_;
+ static bool cancel_;
};
+// Static.
+bool MockDiskEntry::cancel_ = false;
+
class MockDiskCache : public disk_cache::Backend {
public:
MockDiskCache()
@@ -2344,6 +2388,52 @@ TEST(HttpCache, RangeGET_Cancel) {
RemoveMockTransaction(&kRangeGET_TransactionOK);
}
+// Tests that we don't delete a sparse entry when we start a new request after
+// cancelling the previous one.
+TEST(HttpCache, RangeGET_Cancel2) {
+ MockHttpCache cache;
+ cache.http_cache()->set_enable_range_support(true);
+ AddMockTransaction(&kRangeGET_TransactionOK);
+
+ RunTransactionTest(cache.http_cache(), kRangeGET_TransactionOK);
+ MockHttpRequest request(kRangeGET_TransactionOK);
+
+ Context* c = new Context();
+ int rv = cache.http_cache()->CreateTransaction(&c->trans);
+ EXPECT_EQ(net::OK, rv);
+
+ rv = c->trans->Start(&request, &c->callback, NULL);
+ if (rv == net::ERR_IO_PENDING)
+ rv = c->callback.WaitForResult();
+
+ EXPECT_EQ(2, cache.network_layer()->transaction_count());
+ EXPECT_EQ(1, cache.disk_cache()->open_count());
+ EXPECT_EQ(1, cache.disk_cache()->create_count());
+
+ // Make sure that we revalidate the entry and read from the cache (a single
+ // read will return while waiting for the network).
+ scoped_refptr<net::IOBufferWithSize> buf = new net::IOBufferWithSize(5);
+ rv = c->trans->Read(buf, buf->size(), &c->callback);
+ EXPECT_EQ(net::ERR_IO_PENDING, rv);
+ rv = c->callback.WaitForResult();
+ rv = c->trans->Read(buf, buf->size(), &c->callback);
+ EXPECT_EQ(net::ERR_IO_PENDING, rv);
+
+ // Destroy the transaction before completing the read.
+ delete c;
+
+ // We have the read and the delete (OnProcessPendingQueue) waiting on the
+ // message loop. This means that a new transaction will just reuse the same
+ // active entry (no open or create).
+
+ RunTransactionTest(cache.http_cache(), kRangeGET_TransactionOK);
+
+ EXPECT_EQ(3, cache.network_layer()->transaction_count());
+ EXPECT_EQ(1, cache.disk_cache()->open_count());
+ EXPECT_EQ(1, cache.disk_cache()->create_count());
+ RemoveMockTransaction(&kRangeGET_TransactionOK);
+}
+
#ifdef NDEBUG
// This test hits a NOTREACHED so it is a release mode only test.
TEST(HttpCache, RangeGET_OK_LoadOnlyFromCache) {