summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorgavinp@chromium.org <gavinp@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-05-10 22:39:39 +0000
committergavinp@chromium.org <gavinp@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-05-10 22:39:39 +0000
commit70740b7a042b011ee45764c777799f762c4d5076 (patch)
treeb80bde5d25c3da9fdcbcca9bc6b7a92b53510bd7
parentb7a059d8e08d6c02f718552793de33cbabb6e77a (diff)
downloadchromium_src-70740b7a042b011ee45764c777799f762c4d5076.zip
chromium_src-70740b7a042b011ee45764c777799f762c4d5076.tar.gz
chromium_src-70740b7a042b011ee45764c777799f762c4d5076.tar.bz2
Deflake SimpleCache reads when multiple operations in flight.
When multiple Reads/Writes are in flight, they can race each other for updates to the size of the entry. By moving the updates to coincide with the beginning of the blocking operation, we remove flakes. Unit tests still require that reads can fast fail; but not while operations are pending (they use the single operation interface), so we still pass unit tests with the tighter (and actually correct) behaviour that we only fast fail when no other operations are pending. R=pasko@chromium.org,felipeg,rdsmith,pliard BUG=239223 Review URL: https://chromiumcodereview.appspot.com/15085016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@199559 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--net/disk_cache/disk_cache_test_util.cc4
-rw-r--r--net/disk_cache/disk_cache_test_util.h11
-rw-r--r--net/disk_cache/entry_unittest.cc100
-rw-r--r--net/disk_cache/simple/simple_entry_impl.cc38
4 files changed, 130 insertions, 23 deletions
diff --git a/net/disk_cache/disk_cache_test_util.cc b/net/disk_cache/disk_cache_test_util.cc
index 8307954..ff33ab2 100644
--- a/net/disk_cache/disk_cache_test_util.cc
+++ b/net/disk_cache/disk_cache_test_util.cc
@@ -132,7 +132,9 @@ CallbackTest::~CallbackTest() {
// On the actual callback, increase the number of tests received and check for
// errors (an unexpected test received)
-void CallbackTest::Run(int params) {
+void CallbackTest::Run(int result) {
+ last_result_ = result;
+
if (reuse_) {
DCHECK_EQ(1, reuse_);
if (2 == reuse_)
diff --git a/net/disk_cache/disk_cache_test_util.h b/net/disk_cache/disk_cache_test_util.h
index f6d1f46..2c9a573 100644
--- a/net/disk_cache/disk_cache_test_util.h
+++ b/net/disk_cache/disk_cache_test_util.h
@@ -85,17 +85,20 @@ class MessageLoopHelper {
class CallbackTest {
public:
// Creates a new CallbackTest object. When the callback is called, it will
- // update |helper| with the result of the call. If |reuse| is false and a
- // callback is called more than once, or if |reuse| is true and a callback
- // is called more than twice, an error will be reported to |helper|.
+ // update |helper|. If |reuse| is false and a callback is called more than
+ // once, or if |reuse| is true and a callback is called more than twice, an
+ // error will be reported to |helper|.
CallbackTest(MessageLoopHelper* helper, bool reuse);
~CallbackTest();
- void Run(int params);
+ void Run(int result);
+
+ int last_result() const { return last_result_; }
private:
MessageLoopHelper* helper_;
int reuse_;
+ int last_result_;
DISALLOW_COPY_AND_ASSIGN(CallbackTest);
};
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
index 37be34d..bac7e32 100644
--- a/net/disk_cache/entry_unittest.cc
+++ b/net/disk_cache/entry_unittest.cc
@@ -2792,4 +2792,104 @@ TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
}
}
+// Tests that if a read and a following in-flight truncate are both in progress
+// simultaniously that they both can occur successfully. See
+// http://crbug.com/239223
+TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "the first key";
+
+ const int kBufferSize = 1024;
+ scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
+ CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
+
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ EXPECT_EQ(kBufferSize,
+ WriteData(entry, 0, 0, write_buffer, kBufferSize, false));
+ entry->Close();
+ entry = NULL;
+
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+
+ MessageLoopHelper helper;
+ int expected = 0;
+
+ // Make a short read.
+ const int kReadBufferSize = 512;
+ scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
+ CallbackTest read_callback(&helper, false);
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(0, 0, read_buffer, kReadBufferSize,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&read_callback))));
+ ++expected;
+
+ // Truncate the entry to the length of that read.
+ scoped_refptr<net::IOBuffer>
+ truncate_buffer(new net::IOBuffer(kReadBufferSize));
+ CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
+ CallbackTest truncate_callback(&helper, false);
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->WriteData(0, 0, truncate_buffer, kReadBufferSize,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&truncate_callback)),
+ true));
+ ++expected;
+
+ // Wait for both the read and truncation to finish, and confirm that both
+ // succeeded.
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(kReadBufferSize, read_callback.last_result());
+ EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
+ EXPECT_EQ(0,
+ memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
+ entry->Close();
+}
+
+// Tests that if a write and a read dependant on it are both in flight
+// simultaneiously that they both can complete successfully without erroneous
+// early returns. See http://crbug.com/239223
+TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
+ SetSimpleCacheMode();
+ InitCache();
+
+ const char key[] = "the first key";
+ disk_cache::Entry* entry = NULL;
+ ASSERT_EQ(net::OK,
+ cache_->CreateEntry(key, &entry, net::CompletionCallback()));
+
+ const int kBufferSize = 1024;
+ scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
+ CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
+
+ MessageLoopHelper helper;
+ int expected = 0;
+
+ CallbackTest write_callback(&helper, false);
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->WriteData(0, 0, write_buffer, kBufferSize,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&write_callback)),
+ true));
+ ++expected;
+
+ scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize));
+ CallbackTest read_callback(&helper, false);
+ EXPECT_EQ(net::ERR_IO_PENDING,
+ entry->ReadData(0, 0, read_buffer, kBufferSize,
+ base::Bind(&CallbackTest::Run,
+ base::Unretained(&read_callback))));
+ ++expected;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(kBufferSize, write_callback.last_result());
+ EXPECT_EQ(kBufferSize, read_callback.last_result());
+ EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
+ entry->Close();
+}
+
#endif // defined(OS_POSIX)
diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc
index 1a6873d..086d836 100644
--- a/net/disk_cache/simple/simple_entry_impl.cc
+++ b/net/disk_cache/simple/simple_entry_impl.cc
@@ -141,10 +141,6 @@ int SimpleEntryImpl::CreateEntry(Entry** out_entry,
if (backend_)
backend_->index()->Insert(key_);
- // Since we don't know the correct values for |last_used_| and
- // |last_modified_| yet, we make this approximation.
- last_used_ = last_modified_ = base::Time::Now();
-
RunNextOperationIfNeeded();
return ret_value;
}
@@ -210,8 +206,10 @@ int SimpleEntryImpl::ReadData(int stream_index,
DCHECK(io_thread_checker_.CalledOnValidThread());
if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || buf_len < 0)
return net::ERR_INVALID_ARGUMENT;
- if (offset >= data_size_[stream_index] || offset < 0 || !buf_len)
+ if (pending_operations_.empty() && (offset >= data_size_[stream_index] ||
+ offset < 0 || !buf_len)) {
return 0;
+ }
// TODO(felipeg): Optimization: Add support for truly parallel read
// operations.
@@ -262,17 +260,6 @@ int SimpleEntryImpl::WriteData(int stream_index,
ret_value = net::ERR_IO_PENDING;
}
- if (truncate) {
- data_size_[stream_index] = offset + buf_len;
- } else {
- data_size_[stream_index] = std::max(offset + buf_len,
- data_size_[stream_index]);
- }
-
- // Since we don't know the correct values for |last_used_| and
- // |last_modified_| yet, we make this approximation.
- last_used_ = last_modified_ = base::Time::Now();
-
RunNextOperationIfNeeded();
return ret_value;
}
@@ -420,6 +407,10 @@ void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback,
state_ = STATE_IO_PENDING;
+ // Since we don't know the correct values for |last_used_| and
+ // |last_modified_| yet, we make this approximation.
+ last_used_ = last_modified_ = base::Time::Now();
+
// If creation succeeds, we should mark all streams to be saved on close.
for (int i = 0; i < kSimpleEntryFileCount; ++i)
have_written_[i] = true;
@@ -489,8 +480,7 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index,
return;
}
DCHECK_EQ(STATE_READY, state_);
- buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
- if (offset < 0 || buf_len <= 0) {
+ if (offset >= data_size_[stream_index] || offset < 0 || !buf_len) {
// If there is nothing to read, we bail out before setting state_ to
// STATE_IO_PENDING.
if (!callback.is_null())
@@ -498,6 +488,7 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index,
callback, 0));
return;
}
+ buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
state_ = STATE_IO_PENDING;
if (backend_)
@@ -553,6 +544,17 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,
crc32s_end_offset_[stream_index] = offset + buf_len;
}
+ if (truncate) {
+ data_size_[stream_index] = offset + buf_len;
+ } else {
+ data_size_[stream_index] = std::max(offset + buf_len,
+ data_size_[stream_index]);
+ }
+
+ // Since we don't know the correct values for |last_used_| and
+ // |last_modified_| yet, we make this approximation.
+ last_used_ = last_modified_ = base::Time::Now();
+
have_written_[stream_index] = true;
scoped_ptr<int> result(new int());