diff options
-rw-r--r-- | net/disk_cache/backend_unittest.cc | 14 | ||||
-rw-r--r-- | net/disk_cache/entry_unittest.cc | 333 | ||||
-rw-r--r-- | net/disk_cache/simple/simple_entry_impl.cc | 282 | ||||
-rw-r--r-- | net/disk_cache/simple/simple_entry_impl.h | 11 | ||||
-rw-r--r-- | net/http/http_cache.cc | 4 |
5 files changed, 542 insertions, 102 deletions
diff --git a/net/disk_cache/backend_unittest.cc b/net/disk_cache/backend_unittest.cc index 70e24ac..3908f409 100644 --- a/net/disk_cache/backend_unittest.cc +++ b/net/disk_cache/backend_unittest.cc @@ -2900,6 +2900,13 @@ TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) { entry->Close(); entry = NULL; + // To make sure the file creation completed we need to call open again so that + // we block until it actually created the files. + ASSERT_EQ(net::OK, OpenEntry(key, &entry)); + ASSERT_TRUE(entry != NULL); + entry->Close(); + entry = NULL; + // Delete one of the files in the entry. base::FilePath to_delete_file = cache_path_.AppendASCII( disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 0)); @@ -2931,6 +2938,13 @@ TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) { entry->Close(); entry = NULL; + // To make sure the file creation completed we need to call open again so that + // we block until it actually created the files. + ASSERT_EQ(net::OK, OpenEntry(key, &entry)); + ASSERT_NE(null, entry); + entry->Close(); + entry = NULL; + // Write an invalid header on stream 1. base::FilePath entry_file1_path = cache_path_.AppendASCII( disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 1)); diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc index ae5c120..94a227a 100644 --- a/net/disk_cache/entry_unittest.cc +++ b/net/disk_cache/entry_unittest.cc @@ -6,10 +6,10 @@ #include "base/bind.h" #include "base/bind_helpers.h" #include "base/file_util.h" -#include "base/threading/platform_thread.h" -#include "base/timer.h" #include "base/string_util.h" #include "base/stringprintf.h" +#include "base/threading/platform_thread.h" +#include "base/timer.h" #include "net/base/completion_callback.h" #include "net/base/io_buffer.h" #include "net/base/net_errors.h" @@ -20,6 +20,7 @@ #include "net/disk_cache/entry_impl.h" #include "net/disk_cache/mem_entry_impl.h" #include "net/disk_cache/simple/simple_entry_format.h" +#include "net/disk_cache/simple/simple_entry_impl.h" #include "net/disk_cache/simple/simple_util.h" #include "testing/gtest/include/gtest/gtest.h" @@ -2425,6 +2426,334 @@ TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) { DisableIntegrityCheck(); } +TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) { + // Test sequence: + // Create, Write, Read, Write, Read, Close. + SetSimpleCacheMode(); + InitCache(); + disk_cache::Entry* null = NULL; + const char key[] = "the first key"; + + MessageLoopHelper helper; + CallbackTest callback1(&helper, false); + CallbackTest callback2(&helper, false); + CallbackTest callback3(&helper, false); + CallbackTest callback4(&helper, false); + CallbackTest callback5(&helper, false); + + int expected = 0; + const int kSize1 = 10; + const int kSize2 = 20; + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); + scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1)); + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2)); + scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2)); + CacheTestFillBuffer(buffer1->data(), kSize1, false); + CacheTestFillBuffer(buffer2->data(), kSize2, false); + + disk_cache::Entry* entry = NULL; + // Create is optimistic, must return OK. + ASSERT_EQ(net::OK, + cache_->CreateEntry(key, &entry, + base::Bind(&CallbackTest::Run, + base::Unretained(&callback1)))); + EXPECT_NE(null, entry); + + // This write may or may not be optimistic (it depends if the previous + // optimistic create already finished by the time we call the write here). + int ret = entry->WriteData( + 0, 0, buffer1, kSize1, + base::Bind(&CallbackTest::Run, base::Unretained(&callback2)), false); + EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret); + if (net::ERR_IO_PENDING == ret) + expected++; + + // This Read must not be optimistic, since we don't support that yet. + EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadData( + 0, 0, buffer1_read, kSize1, + base::Bind(&CallbackTest::Run, base::Unretained(&callback3)))); + expected++; + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); + EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1)); + + // At this point after waiting, the pending operations queue on the entry + // should be empty, so the next Write operation must run as optimistic. + EXPECT_EQ(kSize2, + entry->WriteData( + 0, 0, buffer2, kSize2, + base::Bind(&CallbackTest::Run, + base::Unretained(&callback4)), false)); + + // Lets do another read so we block until both the write and the read + // operation finishes and we can then test for HasOneRef() below. + EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadData( + 0, 0, buffer2_read, kSize2, + base::Bind(&CallbackTest::Run, base::Unretained(&callback5)))); + expected++; + + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); + EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2)); + + // Check that we are not leaking. + EXPECT_NE(entry, null); + EXPECT_TRUE( + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); + entry->Close(); + entry = NULL; +} + +TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) { + // Test sequence: + // Create, Open, Close, Close. + SetSimpleCacheMode(); + InitCache(); + disk_cache::Entry* null = NULL; + const char key[] = "the first key"; + + MessageLoopHelper helper; + CallbackTest callback1(&helper, false); + CallbackTest callback2(&helper, false); + + disk_cache::Entry* entry = NULL; + ASSERT_EQ(net::OK, + cache_->CreateEntry(key, &entry, + base::Bind(&CallbackTest::Run, + base::Unretained(&callback1)))); + EXPECT_NE(null, entry); + + disk_cache::Entry* entry2 = NULL; + EXPECT_EQ(net::ERR_IO_PENDING, + cache_->OpenEntry(key, &entry2, + base::Bind(&CallbackTest::Run, + base::Unretained(&callback2)))); + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(1)); + + EXPECT_NE(null, entry2); + EXPECT_EQ(entry, entry2); + + // We have to call close twice, since we called create and open above. + entry->Close(); + + // Check that we are not leaking. + EXPECT_TRUE( + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); + entry->Close(); + entry = NULL; +} + +TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) { + // Test sequence: + // Create, Close, Open, Close. + SetSimpleCacheMode(); + InitCache(); + disk_cache::Entry* null = NULL; + const char key[] = "the first key"; + + disk_cache::Entry* entry = NULL; + ASSERT_EQ(net::OK, + cache_->CreateEntry(key, &entry, net::CompletionCallback())); + EXPECT_NE(null, entry); + entry->Close(); + + net::TestCompletionCallback cb; + disk_cache::Entry* entry2 = NULL; + EXPECT_EQ(net::ERR_IO_PENDING, + cache_->OpenEntry(key, &entry2, cb.callback())); + EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING)); + + EXPECT_NE(null, entry2); + EXPECT_EQ(entry, entry2); + + // Check that we are not leaking. + EXPECT_TRUE( + static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef()); + entry2->Close(); +} + +TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) { + // Test sequence: + // Create, Close, Write, Open, Open, Close, Write, Read, Close. + SetSimpleCacheMode(); + InitCache(); + disk_cache::Entry* null = NULL; + const char key[] = "the first key"; + + net::TestCompletionCallback cb; + const int kSize1 = 10; + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); + CacheTestFillBuffer(buffer1->data(), kSize1, false); + disk_cache::Entry* entry = NULL; + + ASSERT_EQ(net::OK, + cache_->CreateEntry(key, &entry, net::CompletionCallback())); + EXPECT_NE(null, entry); + entry->Close(); + + // Lets do a Write so we block until both the Close and the Write + // operation finishes. Write must fail since we are writing in a closed entry. + EXPECT_EQ(net::ERR_IO_PENDING, entry->WriteData( + 0, 0, buffer1, kSize1, cb.callback(), false)); + EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING)); + + // Finish running the pending tasks so that we fully complete the close + // operation and destroy the entry object. + MessageLoop::current()->RunUntilIdle(); + + // At this point the |entry| must have been destroyed, and called + // RemoveSelfFromBackend(). + disk_cache::Entry* entry2 = NULL; + EXPECT_EQ(net::ERR_IO_PENDING, + cache_->OpenEntry(key, &entry2, cb.callback())); + EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING)); + EXPECT_NE(null, entry2); + + disk_cache::Entry* entry3 = NULL; + EXPECT_EQ(net::ERR_IO_PENDING, + cache_->OpenEntry(key, &entry3, cb.callback())); + EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING)); + EXPECT_NE(null, entry3); + EXPECT_EQ(entry2, entry3); + entry3->Close(); + + // The previous Close doesn't actually closes the entry since we opened it + // twice, so the next Write operation must succeed and it must be able to + // perform it optimistically, since there is no operation running on this + // entry. + EXPECT_EQ(kSize1, entry2->WriteData( + 0, 0, buffer1, kSize1, net::CompletionCallback(), false)); + + // Lets do another read so we block until both the write and the read + // operation finishes and we can then test for HasOneRef() below. + EXPECT_EQ(net::ERR_IO_PENDING, entry2->ReadData( + 0, 0, buffer1, kSize1, cb.callback())); + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); + + // Check that we are not leaking. + EXPECT_TRUE( + static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef()); + entry2->Close(); +} + +// This test is flaky because of the race of Create followed by a Doom. +// See test SimpleCacheCreateDoomRace. +TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheOptimistic5) { + // Test sequence: + // Create, Doom, Write, Read, Close. + SetSimpleCacheMode(); + InitCache(); + disk_cache::Entry* null = NULL; + const char key[] = "the first key"; + + net::TestCompletionCallback cb; + const int kSize1 = 10; + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); + CacheTestFillBuffer(buffer1->data(), kSize1, false); + disk_cache::Entry* entry = NULL; + + ASSERT_EQ(net::OK, + cache_->CreateEntry(key, &entry, net::CompletionCallback())); + EXPECT_NE(null, entry); + entry->Doom(); + + EXPECT_EQ(net::ERR_IO_PENDING, entry->WriteData( + 0, 0, buffer1, kSize1, cb.callback(), false)); + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); + + EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadData( + 0, 0, buffer1, kSize1, cb.callback())); + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); + + // Check that we are not leaking. + EXPECT_TRUE( + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); + entry->Close(); +} + +TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) { + // Test sequence: + // Create, Write, Doom, Doom, Read, Doom, Close. + SetSimpleCacheMode(); + InitCache(); + disk_cache::Entry* null = NULL; + const char key[] = "the first key"; + + net::TestCompletionCallback cb; + const int kSize1 = 10; + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); + scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1)); + CacheTestFillBuffer(buffer1->data(), kSize1, false); + disk_cache::Entry* entry = NULL; + + ASSERT_EQ(net::OK, + cache_->CreateEntry(key, &entry, net::CompletionCallback())); + EXPECT_NE(null, entry); + + EXPECT_EQ(net::ERR_IO_PENDING, entry->WriteData( + 0, 0, buffer1, kSize1, cb.callback(), false)); + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); + + entry->Doom(); + entry->Doom(); + + // This Read must not be optimistic, since we don't support that yet. + EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadData( + 0, 0, buffer1_read, kSize1, cb.callback())); + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); + EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1)); + + entry->Doom(); + + // Check that we are not leaking. + EXPECT_TRUE( + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); + entry->Close(); +} + +TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheCreateDoomRace) { + // Test sequence: + // Create, Doom, Write, Close, Check files are not on disk anymore. + SetSimpleCacheMode(); + InitCache(); + disk_cache::Entry* null = NULL; + const char key[] = "the first key"; + + net::TestCompletionCallback cb; + const int kSize1 = 10; + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); + CacheTestFillBuffer(buffer1->data(), kSize1, false); + disk_cache::Entry* entry = NULL; + + ASSERT_EQ(net::OK, + cache_->CreateEntry(key, &entry, net::CompletionCallback())); + EXPECT_NE(null, entry); + + cache_->DoomEntry(key, cb.callback()); + EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING)); + + // Lets do a Write so we block until all operations are done, so we can check + // the HasOneRef() below. This call can't be optimistic and we are checking + // that here. + EXPECT_EQ(net::ERR_IO_PENDING, entry->WriteData( + 0, 0, buffer1, kSize1, cb.callback(), false)); + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); + + // Check that we are not leaking. + EXPECT_TRUE( + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); + entry->Close(); + + // Finish running the pending tasks so that we fully complete the close + // operation and destroy the entry object. + MessageLoop::current()->RunUntilIdle(); + + for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) { + base::FilePath entry_file_path = cache_path_.AppendASCII( + disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, i)); + base::PlatformFileInfo info; + EXPECT_FALSE(file_util::GetFileInfo(entry_file_path, &info)); + } +} + // Tests that old entries are evicted while new entries remain in the index. // This test relies on non-mandatory properties of the simple Cache Backend: // LRU eviction, specific values of high-watermark and low-watermark etc. diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc index 2296ecd..cf9066a 100644 --- a/net/disk_cache/simple/simple_entry_impl.cc +++ b/net/disk_cache/simple/simple_entry_impl.cc @@ -79,16 +79,35 @@ SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend, arrays_should_be_same_size2); COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), arrays_should_be_same_size3); - MakeUninitialized(); } int SimpleEntryImpl::OpenEntry(Entry** out_entry, const CompletionCallback& callback) { DCHECK(backend_); + // This enumeration is used in histograms, add entries only at end. + enum OpenEntryIndexEnum { + INDEX_NOEXIST = 0, + INDEX_MISS = 1, + INDEX_HIT = 2, + INDEX_MAX = 3, + }; + OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST; + if (backend_) { + if (backend_->index()->Has(key_)) + open_entry_index_enum = INDEX_HIT; + else + open_entry_index_enum = INDEX_MISS; + } + UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState", + open_entry_index_enum, INDEX_MAX); + + // If entry is not known to the index, initiate fast failover to the network. + if (open_entry_index_enum == INDEX_MISS) + return net::ERR_FAILED; pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal, - this, out_entry, callback)); + this, callback, out_entry)); RunNextOperationIfNeeded(); return net::ERR_IO_PENDING; } @@ -96,15 +115,42 @@ int SimpleEntryImpl::OpenEntry(Entry** out_entry, int SimpleEntryImpl::CreateEntry(Entry** out_entry, const CompletionCallback& callback) { DCHECK(backend_); - pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal, - this, out_entry, callback)); + int ret_value = net::ERR_FAILED; + if (state_ == STATE_UNINITIALIZED && + pending_operations_.size() == 0) { + ReturnEntryToCaller(out_entry); + // We can do optimistic Create. + pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal, + this, + CompletionCallback(), + static_cast<Entry**>(NULL))); + ret_value = net::OK; + } else { + pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal, + this, + callback, + out_entry)); + ret_value = net::ERR_IO_PENDING; + } + + // We insert the entry in the index before creating the entry files in the + // SimpleSynchronousEntry, because this way the worst scenario is when we + // have the entry in the index but we don't have the created files yet, this + // way we never leak files. CreationOperationComplete will remove the entry + // from the index if the creation fails. + if (backend_) + backend_->index()->Insert(key_); + + // Since we don't know the correct values for |last_used_| and + // |last_modified_| yet, we make this approximation. + last_used_ = last_modified_ = base::Time::Now(); + RunNextOperationIfNeeded(); - return net::ERR_IO_PENDING; + return ret_value; } int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) { MarkAsDoomed(); - scoped_ptr<int> result(new int()); Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_, entry_hash_, result.get()); @@ -152,6 +198,7 @@ Time SimpleEntryImpl::GetLastModified() const { int32 SimpleEntryImpl::GetDataSize(int stream_index) const { DCHECK(io_thread_checker_.CalledOnValidThread()); + DCHECK_LE(0, data_size_[stream_index]); return data_size_[stream_index]; } @@ -165,7 +212,7 @@ int SimpleEntryImpl::ReadData(int stream_index, return net::ERR_INVALID_ARGUMENT; if (offset >= data_size_[stream_index] || offset < 0 || !buf_len) return 0; - buf_len = std::min(buf_len, data_size_[stream_index] - offset); + // TODO(felipeg): Optimization: Add support for truly parallel read // operations. pending_operations_.push( @@ -191,19 +238,41 @@ int SimpleEntryImpl::WriteData(int stream_index, buf_len < 0) { return net::ERR_INVALID_ARGUMENT; } - pending_operations_.push( - base::Bind(&SimpleEntryImpl::WriteDataInternal, - this, - stream_index, - offset, - make_scoped_refptr(buf), - buf_len, - callback, - truncate)); + + int ret_value = net::ERR_FAILED; + if (state_ == STATE_READY && pending_operations_.size() == 0) { + // We can only do optimistic Write if there is no pending operations, so + // that we are sure that the next call to RunNextOperationIfNeeded will + // actually run the write operation that sets the stream size. It also + // prevents from previous possibly-conflicting writes that could be stacked + // in the |pending_operations_|. We could optimize this for when we have + // only read operations enqueued. + pending_operations_.push( + base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index, + offset, make_scoped_refptr(buf), buf_len, + CompletionCallback(), truncate)); + ret_value = buf_len; + } else { + pending_operations_.push( + base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index, + offset, make_scoped_refptr(buf), buf_len, callback, + truncate)); + ret_value = net::ERR_IO_PENDING; + } + + if (truncate) { + data_size_[stream_index] = offset + buf_len; + } else { + data_size_[stream_index] = std::max(offset + buf_len, + data_size_[stream_index]); + } + + // Since we don't know the correct values for |last_used_| and + // |last_modified_| yet, we make this approximation. + last_used_ = last_modified_ = base::Time::Now(); + RunNextOperationIfNeeded(); - // TODO(felipeg): Optimization: Add support for optimistic writes, quickly - // returning net::OK here. - return net::ERR_IO_PENDING; + return ret_value; } int SimpleEntryImpl::ReadSparseData(int64 offset, @@ -258,7 +327,7 @@ int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { SimpleEntryImpl::~SimpleEntryImpl() { DCHECK(io_thread_checker_.CalledOnValidThread()); DCHECK_EQ(0U, pending_operations_.size()); - DCHECK_EQ(STATE_UNINITIALIZED, state_); + DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); DCHECK(!synchronous_entry_); RemoveSelfFromBackend(); } @@ -268,9 +337,11 @@ void SimpleEntryImpl::MakeUninitialized() { std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_)); std::memset(crc32s_, 0, sizeof(crc32s_)); std::memset(have_written_, 0, sizeof(have_written_)); + std::memset(data_size_, 0, sizeof(data_size_)); } void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) { + DCHECK(out_entry); ++open_count_; AddRef(); // Balanced in Close() *out_entry = this; @@ -292,10 +363,8 @@ void SimpleEntryImpl::MarkAsDoomed() { void SimpleEntryImpl::RunNextOperationIfNeeded() { DCHECK(io_thread_checker_.CalledOnValidThread()); - UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending", pending_operations_.size(), 0, 100, 20); - if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) { base::Closure operation = pending_operations_.front(); pending_operations_.pop(); @@ -304,43 +373,23 @@ void SimpleEntryImpl::RunNextOperationIfNeeded() { } } -void SimpleEntryImpl::OpenEntryInternal(Entry** out_entry, - const CompletionCallback& callback) { +void SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback, + Entry** out_entry) { ScopedOperationRunner operation_runner(this); - if (state_ == STATE_READY) { ReturnEntryToCaller(out_entry); MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, net::OK)); return; - } - DCHECK_EQ(STATE_UNINITIALIZED, state_); - - // This enumeration is used in histograms, add entries only at end. - enum OpenEntryIndexEnum { - INDEX_NOEXIST = 0, - INDEX_MISS = 1, - INDEX_HIT = 2, - INDEX_MAX = 3, - }; - OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST; - if (backend_) { - if (backend_->index()->Has(key_)) - open_entry_index_enum = INDEX_HIT; - else - open_entry_index_enum = INDEX_MISS; - } - UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState", - open_entry_index_enum, INDEX_MAX); - // If entry is not known to the index, initiate fast failover to the network. - if (open_entry_index_enum == INDEX_MISS) { - MessageLoopProxy::current()->PostTask(FROM_HERE, - base::Bind(callback, - net::ERR_FAILED)); + } else if (state_ == STATE_FAILURE) { + if (!callback.is_null()) { + MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( + callback, net::ERR_FAILED)); + } return; } + DCHECK_EQ(STATE_UNINITIALIZED, state_); state_ = STATE_IO_PENDING; - const base::TimeTicks start_time = base::TimeTicks::Now(); typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry; scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry( @@ -354,15 +403,15 @@ void SimpleEntryImpl::OpenEntryInternal(Entry** out_entry, WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); } -void SimpleEntryImpl::CreateEntryInternal(Entry** out_entry, - const CompletionCallback& callback) { +void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback, + Entry** out_entry) { ScopedOperationRunner operation_runner(this); - - if (state_ == STATE_READY) { + if (state_ != STATE_UNINITIALIZED) { // There is already an active normal entry. - MessageLoopProxy::current()->PostTask(FROM_HERE, - base::Bind(callback, - net::ERR_FAILED)); + if (!callback.is_null()) { + MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( + callback, net::ERR_FAILED)); + } return; } DCHECK_EQ(STATE_UNINITIALIZED, state_); @@ -373,13 +422,6 @@ void SimpleEntryImpl::CreateEntryInternal(Entry** out_entry, for (int i = 0; i < kSimpleEntryFileCount; ++i) have_written_[i] = true; - // We insert the entry in the index before creating the entry files in the - // SimpleSynchronousEntry, because this way the worst scenario is when we - // have the entry in the index but we don't have the created files yet, this - // way we never leak files. CreationOperationComplete will remove the entry - // from the index if the creation fails. - if (backend_) - backend_->index()->Insert(key_); const base::TimeTicks start_time = base::TimeTicks::Now(); typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry; scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry( @@ -395,32 +437,38 @@ void SimpleEntryImpl::CreateEntryInternal(Entry** out_entry, void SimpleEntryImpl::CloseInternal() { DCHECK(io_thread_checker_.CalledOnValidThread()); - DCHECK_EQ(0U, pending_operations_.size()); - DCHECK_EQ(STATE_READY, state_); - DCHECK(synchronous_entry_); - - state_ = STATE_IO_PENDING; - typedef SimpleSynchronousEntry::CRCRecord CRCRecord; - scoped_ptr<std::vector<CRCRecord> > crc32s_to_write(new std::vector<CRCRecord>()); - for (int i = 0; i < kSimpleEntryFileCount; ++i) { - if (have_written_[i]) { - if (data_size_[i] == crc32s_end_offset_[i]) { - int32 crc = data_size_[i] == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; - crc32s_to_write->push_back(CRCRecord(i, true, crc)); - } else { - crc32s_to_write->push_back(CRCRecord(i, false, 0)); + + if (state_ == STATE_READY) { + DCHECK(synchronous_entry_); + state_ = STATE_IO_PENDING; + for (int i = 0; i < kSimpleEntryFileCount; ++i) { + if (have_written_[i]) { + if (data_size_[i] == crc32s_end_offset_[i]) { + int32 crc = data_size_[i] == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; + crc32s_to_write->push_back(CRCRecord(i, true, crc)); + } else { + crc32s_to_write->push_back(CRCRecord(i, false, 0)); + } } } + } else { + DCHECK_EQ(STATE_FAILURE, state_); + } + + if (synchronous_entry_) { + Closure task = base::Bind(&SimpleSynchronousEntry::Close, + base::Unretained(synchronous_entry_), + base::Passed(&crc32s_to_write)); + Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); + synchronous_entry_ = NULL; + WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); + } else { + synchronous_entry_ = NULL; + CloseOperationComplete(); } - Closure task = base::Bind(&SimpleSynchronousEntry::Close, - base::Unretained(synchronous_entry_), - base::Passed(&crc32s_to_write)); - Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); - WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); - synchronous_entry_ = NULL; } void SimpleEntryImpl::ReadDataInternal(int stream_index, @@ -429,7 +477,26 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index, int buf_len, const CompletionCallback& callback) { DCHECK(io_thread_checker_.CalledOnValidThread()); + ScopedOperationRunner operation_runner(this); + + if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { + if (!callback.is_null()) { + MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( + callback, net::ERR_FAILED)); + } + return; + } DCHECK_EQ(STATE_READY, state_); + buf_len = std::min(buf_len, GetDataSize(stream_index) - offset); + if (offset < 0 || buf_len <= 0) { + // If there is nothing to read, we bail out before setting state_ to + // STATE_IO_PENDING. + if (!callback.is_null()) + MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( + callback, 0)); + return; + } + state_ = STATE_IO_PENDING; if (backend_) backend_->index()->UseIfExists(key_); @@ -453,6 +520,17 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index, const CompletionCallback& callback, bool truncate) { DCHECK(io_thread_checker_.CalledOnValidThread()); + ScopedOperationRunner operation_runner(this); + if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) { + if (!callback.is_null()) { + // We need to posttask so that we don't go in a loop when we call the + // callback directly. + MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( + callback, net::ERR_FAILED)); + } + // |this| may be destroyed after return here. + return; + } DCHECK_EQ(STATE_READY, state_); state_ = STATE_IO_PENDING; if (backend_) @@ -495,25 +573,35 @@ void SimpleEntryImpl::CreationOperationComplete( DCHECK_EQ(state_, STATE_IO_PENDING); DCHECK(in_sync_entry); DCHECK(in_result); - ScopedOperationRunner operation_runner(this); - UMA_HISTOGRAM_BOOLEAN( "SimpleCache.EntryCreationResult", *in_result == net::OK); if (*in_result != net::OK) { if (*in_result!= net::ERR_FILE_EXISTS) MarkAsDoomed(); + if (!completion_callback.is_null()) { + MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( + completion_callback, net::ERR_FAILED)); + } MakeUninitialized(); - completion_callback.Run(net::ERR_FAILED); + state_ = STATE_FAILURE; return; } + // If out_entry is NULL, it means we already called ReturnEntryToCaller from + // the optimistic Create case. + if (out_entry) + ReturnEntryToCaller(out_entry); + state_ = STATE_READY; synchronous_entry_ = *in_sync_entry; SetSynchronousData(); - ReturnEntryToCaller(out_entry); UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime", (base::TimeTicks::Now() - start_time)); - completion_callback.Run(net::OK); + + if (!completion_callback.is_null()) { + MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( + completion_callback, net::OK)); + } } void SimpleEntryImpl::EntryOperationComplete( @@ -524,15 +612,19 @@ void SimpleEntryImpl::EntryOperationComplete( DCHECK(synchronous_entry_); DCHECK_EQ(STATE_IO_PENDING, state_); DCHECK(result); - state_ = STATE_READY; - if (*result < 0) { MarkAsDoomed(); + state_ = STATE_FAILURE; crc32s_end_offset_[stream_index] = 0; + } else { + SetSynchronousData(); + } + + if (!completion_callback.is_null()) { + MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind( + completion_callback, *result)); } - SetSynchronousData(); - completion_callback.Run(*result); RunNextOperationIfNeeded(); } @@ -598,14 +690,14 @@ void SimpleEntryImpl::ChecksumOperationComplete( void SimpleEntryImpl::CloseOperationComplete() { DCHECK(!synchronous_entry_); DCHECK_EQ(0, open_count_); - DCHECK_EQ(STATE_IO_PENDING, state_); - + DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_); MakeUninitialized(); RunNextOperationIfNeeded(); } void SimpleEntryImpl::SetSynchronousData() { DCHECK(io_thread_checker_.CalledOnValidThread()); + DCHECK(synchronous_entry_); DCHECK_EQ(STATE_READY, state_); // TODO(felipeg): These copies to avoid data races are not optimal. While // adding an IO thread index (for fast misses etc...), we can store this data diff --git a/net/disk_cache/simple/simple_entry_impl.h b/net/disk_cache/simple/simple_entry_impl.h index 87eecca..79ec6ae 100644 --- a/net/disk_cache/simple/simple_entry_impl.h +++ b/net/disk_cache/simple/simple_entry_impl.h @@ -104,6 +104,10 @@ class SimpleEntryImpl : public Entry, public base::RefCounted<SimpleEntryImpl>, // IO is currently in flight, operations must wait for completion before // launching. STATE_IO_PENDING, + + // A failure occurred in the current or previous operation. All operations + // after that must fail, until we receive a Close(). + STATE_FAILURE, }; virtual ~SimpleEntryImpl(); @@ -130,11 +134,10 @@ class SimpleEntryImpl : public Entry, public base::RefCounted<SimpleEntryImpl>, // the last reference. void RunNextOperationIfNeeded(); - void OpenEntryInternal(Entry** entry, - const CompletionCallback& callback); + void OpenEntryInternal(const CompletionCallback& callback, Entry** out_entry); - void CreateEntryInternal(Entry** entry, - const CompletionCallback& callback); + void CreateEntryInternal(const CompletionCallback& callback, + Entry** out_entry); void CloseInternal(); diff --git a/net/http/http_cache.cc b/net/http/http_cache.cc index 27c591d..421300e 100644 --- a/net/http/http_cache.cc +++ b/net/http/http_cache.cc @@ -722,7 +722,9 @@ int HttpCache::OpenEntry(const std::string& key, ActiveEntry** entry, int HttpCache::CreateEntry(const std::string& key, ActiveEntry** entry, Transaction* trans) { - DCHECK(!FindActiveEntry(key)); + if (FindActiveEntry(key)) { + return ERR_CACHE_RACE; + } WorkItem* item = new WorkItem(WI_CREATE_ENTRY, trans, entry); PendingOp* pending_op = GetPendingOp(key); |