summaryrefslogtreecommitdiffstats
path: root/net/disk_cache
diff options
context:
space:
mode:
authoragayev@chromium.org <agayev@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-16 19:50:41 +0000
committeragayev@chromium.org <agayev@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-16 19:50:41 +0000
commitba37f68c46aecffe431f64f0806d667bf97681aa (patch)
tree4315a7372f10582b8fa545d22a600115aa16e090 /net/disk_cache
parent693ef80e4bb022a52ebbdc7e4ef60e11abbfc2ae (diff)
downloadchromium_src-ba37f68c46aecffe431f64f0806d667bf97681aa.zip
chromium_src-ba37f68c46aecffe431f64f0806d667bf97681aa.tar.gz
chromium_src-ba37f68c46aecffe431f64f0806d667bf97681aa.tar.bz2
Another approach to adding the layer for managing segment objects.
BUG=157187 TEST=net_unittests --gtest_filter="FlashCacheTest.*" --gtest_repeat=10 --shuffle Review URL: https://chromiumcodereview.appspot.com/11361113 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@168265 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r--net/disk_cache/flash/flash_cache_test_base.cc7
-rw-r--r--net/disk_cache/flash/flash_cache_test_base.h2
-rw-r--r--net/disk_cache/flash/format.h7
-rw-r--r--net/disk_cache/flash/log_structured_store.cc175
-rw-r--r--net/disk_cache/flash/log_structured_store.h102
-rw-r--r--net/disk_cache/flash/log_structured_store_unittest.cc114
-rw-r--r--net/disk_cache/flash/segment.cc41
-rw-r--r--net/disk_cache/flash/segment.h35
-rw-r--r--net/disk_cache/flash/segment_unittest.cc55
9 files changed, 495 insertions, 43 deletions
diff --git a/net/disk_cache/flash/flash_cache_test_base.cc b/net/disk_cache/flash/flash_cache_test_base.cc
index bbe6c6f..ff0de12 100644
--- a/net/disk_cache/flash/flash_cache_test_base.cc
+++ b/net/disk_cache/flash/flash_cache_test_base.cc
@@ -8,6 +8,7 @@
#include "base/scoped_temp_dir.h"
#include "base/time.h"
#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/log_structured_store.h"
#include "net/disk_cache/flash/storage.h"
namespace {
@@ -32,8 +33,14 @@ void FlashCacheTest::SetUp() {
int32 storage_size = num_segments_in_storage_ * disk_cache::kFlashSegmentSize;
storage_.reset(new disk_cache::Storage(path, storage_size));
ASSERT_TRUE(storage_->Init());
+
+ log_structured_store_.reset(
+ new disk_cache::LogStructuredStore(storage_.get()));
+ ASSERT_TRUE(log_structured_store_->Init());
}
void FlashCacheTest::TearDown() {
+ ASSERT_TRUE(log_structured_store_->Close());
+ log_structured_store_.reset();
storage_.reset();
}
diff --git a/net/disk_cache/flash/flash_cache_test_base.h b/net/disk_cache/flash/flash_cache_test_base.h
index 1c8bae1..34cbc43 100644
--- a/net/disk_cache/flash/flash_cache_test_base.h
+++ b/net/disk_cache/flash/flash_cache_test_base.h
@@ -13,6 +13,7 @@
namespace disk_cache {
+class LogStructuredStore;
class Storage;
} // namespace disk_cache
@@ -25,6 +26,7 @@ class FlashCacheTest : public testing::Test {
virtual void SetUp() OVERRIDE;
virtual void TearDown() OVERRIDE;
+ scoped_ptr<disk_cache::LogStructuredStore> log_structured_store_;
scoped_ptr<disk_cache::Storage> storage_;
ScopedTempDir temp_dir_;
int32 num_segments_in_storage_;
diff --git a/net/disk_cache/flash/format.h b/net/disk_cache/flash/format.h
index 12a5a48..d32344d 100644
--- a/net/disk_cache/flash/format.h
+++ b/net/disk_cache/flash/format.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef NET_DISK_CACHE_FLASH_FORMAT_H
-#define NET_DISK_CACHE_FLASH_FORMAT_H
+#ifndef NET_DISK_CACHE_FLASH_FORMAT_H_
+#define NET_DISK_CACHE_FLASH_FORMAT_H_
namespace disk_cache {
@@ -20,7 +20,8 @@ const size_t kFlashMaxEntryCount = kFlashSegmentSize / kFlashSmallEntrySize - 1;
// containing a counter specifying the number of saved offsets followed by the
// offsets.
const int32 kFlashSummarySize = (1 + kFlashMaxEntryCount) * sizeof(int32);
+const int32 kFlashSegmentFreeSpace = kFlashSegmentSize - kFlashSummarySize;
} // namespace disk_cache
-#endif // NET_DISK_CACHE_FLASH_FORMAT_H
+#endif // NET_DISK_CACHE_FLASH_FORMAT_H_
diff --git a/net/disk_cache/flash/log_structured_store.cc b/net/disk_cache/flash/log_structured_store.cc
new file mode 100644
index 0000000..f372de0
--- /dev/null
+++ b/net/disk_cache/flash/log_structured_store.cc
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/stl_util.h"
+#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/log_structured_store.h"
+#include "net/disk_cache/flash/segment.h"
+#include "net/disk_cache/flash/storage.h"
+
+namespace disk_cache {
+
+LogStructuredStore::LogStructuredStore(Storage* storage)
+ : storage_(storage),
+ num_segments_(storage->size() / kFlashSegmentSize),
+ open_segments_(num_segments_),
+ write_index_(0),
+ current_entry_id_(-1),
+ current_entry_num_bytes_left_to_write_(0),
+ init_(false),
+ closed_(false) {
+ DCHECK(storage->size() % kFlashSegmentSize == 0);
+}
+
+LogStructuredStore::~LogStructuredStore() {
+ DCHECK(!init_ || closed_);
+ STLDeleteElements(&open_segments_);
+}
+
+bool LogStructuredStore::Init() {
+ DCHECK(!init_);
+ // TODO(agayev): Once we start persisting segment metadata to disk, we will
+ // start from where we left off during the last shutdown.
+ scoped_ptr<Segment> segment(new Segment(write_index_, false, storage_));
+ if (!segment->Init())
+ return false;
+
+ segment->AddUser();
+ open_segments_[write_index_] = segment.release();
+ init_ = true;
+ return true;
+}
+
+bool LogStructuredStore::Close() {
+ DCHECK(init_ && !closed_);
+ open_segments_[write_index_]->ReleaseUser();
+ if (!open_segments_[write_index_]->Close())
+ return false;
+ closed_ = true;
+ return true;
+ // TODO(agayev): persist metadata to disk.
+}
+
+bool LogStructuredStore::CreateEntry(int32 size, int32* id) {
+ DCHECK(init_ && !closed_);
+ DCHECK(current_entry_id_ == -1 && size <= disk_cache::kFlashSegmentFreeSpace);
+
+ // TODO(agayev): Avoid large entries from leaving the segments almost empty.
+ if (!open_segments_[write_index_]->CanHold(size)) {
+ if (!open_segments_[write_index_]->Close())
+ return false;
+
+ open_segments_[write_index_]->ReleaseUser();
+ if (open_segments_[write_index_]->HasNoUsers()) {
+ delete open_segments_[write_index_];
+ open_segments_[write_index_] = NULL;
+ }
+
+ write_index_ = GetNextSegmentIndex();
+ scoped_ptr<Segment> segment(new Segment(write_index_, false, storage_));
+ if (!segment->Init())
+ return false;
+
+ segment->AddUser();
+ open_segments_[write_index_] = segment.release();
+ }
+
+ *id = open_segments_[write_index_]->write_offset();
+ open_segments_[write_index_]->StoreOffset(*id);
+ current_entry_id_ = *id;
+ current_entry_num_bytes_left_to_write_ = size;
+ open_entries_.insert(current_entry_id_);
+ return true;
+}
+
+bool LogStructuredStore::WriteData(const void* buffer, int32 size) {
+ DCHECK(init_ && !closed_);
+ DCHECK(current_entry_id_ != -1 &&
+ size <= current_entry_num_bytes_left_to_write_);
+ if (open_segments_[write_index_]->WriteData(buffer, size)) {
+ current_entry_num_bytes_left_to_write_ -= size;
+ return true;
+ }
+ return false;
+}
+
+bool LogStructuredStore::OpenEntry(int32 id) {
+ DCHECK(init_ && !closed_);
+ if (open_entries_.find(id) != open_entries_.end())
+ return false;
+
+ // Segment is already open.
+ int32 index = id / disk_cache::kFlashSegmentSize;
+ if (open_segments_[index]) {
+ if (!open_segments_[index]->HaveOffset(id))
+ return false;
+ open_segments_[index]->AddUser();
+ open_entries_.insert(id);
+ return true;
+ }
+
+ // Segment is not open.
+ scoped_ptr<Segment> segment(new Segment(index, true, storage_));
+ if (!segment->Init() || !segment->HaveOffset(id))
+ return false;
+
+ segment->AddUser();
+ open_segments_[index] = segment.release();
+ open_entries_.insert(id);
+ return true;
+}
+
+bool LogStructuredStore::ReadData(int32 id, void* buffer, int32 size,
+ int32 offset) const {
+ DCHECK(init_ && !closed_);
+ DCHECK(open_entries_.find(id) != open_entries_.end());
+
+ int32 index = id / disk_cache::kFlashSegmentSize;
+ DCHECK(open_segments_[index] && open_segments_[index]->HaveOffset(id));
+ return open_segments_[index]->ReadData(buffer, size, id + offset);
+}
+
+void LogStructuredStore::CloseEntry(int32 id) {
+ DCHECK(init_ && !closed_);
+ std::set<int32>::iterator entry_iter = open_entries_.find(id);
+ DCHECK(entry_iter != open_entries_.end());
+
+ if (current_entry_id_ != -1) {
+ DCHECK(id == current_entry_id_ && !current_entry_num_bytes_left_to_write_);
+ open_entries_.erase(entry_iter);
+ current_entry_id_ = -1;
+ return;
+ }
+
+ int32 index = id / disk_cache::kFlashSegmentSize;
+ DCHECK(open_segments_[index]);
+ open_entries_.erase(entry_iter);
+
+ open_segments_[index]->ReleaseUser();
+ if (open_segments_[index]->HasNoUsers()) {
+ delete open_segments_[index];
+ open_segments_[index] = NULL;
+ }
+}
+
+int32 LogStructuredStore::GetNextSegmentIndex() {
+ DCHECK(init_ && !closed_);
+ int32 next_index = (write_index_ + 1) % num_segments_;
+
+ while (InUse(next_index)) {
+ next_index = (next_index + 1) % num_segments_;
+ DCHECK_NE(next_index, write_index_);
+ }
+ return next_index;
+}
+
+bool LogStructuredStore::InUse(int32 index) const {
+ DCHECK(init_ && !closed_);
+ DCHECK(index >= 0 && index < num_segments_);
+ return open_segments_[index] != NULL;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/flash/log_structured_store.h b/net/disk_cache/flash/log_structured_store.h
new file mode 100644
index 0000000..ea65d34
--- /dev/null
+++ b/net/disk_cache/flash/log_structured_store.h
@@ -0,0 +1,102 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_FLASH_LOG_STRUCTURED_STORE_H_
+#define NET_DISK_CACHE_FLASH_LOG_STRUCTURED_STORE_H_
+
+#include <set>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/gtest_prod_util.h"
+#include "net/base/net_export.h"
+
+namespace disk_cache {
+
+class Segment;
+class Storage;
+
+// This class implements a general purpose store for storing and retrieving
+// entries consisting of arbitrary binary data. The store has log semantics,
+// i.e. it's not possible to overwrite data in place. In order to update an
+// entry, a new version must be written. Only one entry can be written to at
+// any given time, while concurrent reading of multiple entries is supported.
+class NET_EXPORT_PRIVATE LogStructuredStore {
+ public:
+ explicit LogStructuredStore(Storage* storage);
+ ~LogStructuredStore();
+
+ // Performs initialization. Must be the first function called and further
+ // calls should be made only if it is successful.
+ bool Init();
+
+ // Closes the store. Should be the last function called before destruction.
+ bool Close();
+
+ // Creates an entry of |size| bytes. The id of the created entry is stored in
+ // |entry_id|.
+ bool CreateEntry(int32 size, int32* entry_id);
+
+ // TODO(agayev): Add DeleteEntry.
+
+ // Appends data to the end of the last created entry.
+ bool WriteData(const void* buffer, int32 size);
+
+ // Opens an entry with id |entry_id|.
+ bool OpenEntry(int32 entry_id);
+
+ // Reads |size| bytes starting from |offset| into |buffer|, where |offset| is
+ // relative to the entry's content, from an entry identified by |entry_id|.
+ bool ReadData(int32 entry_id, void* buffer, int32 size, int32 offset) const;
+
+ // Closes an entry that was either opened with OpenEntry or created with
+ // CreateEntry.
+ void CloseEntry(int32 id);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(FlashCacheTest,
+ LogStructuredStoreReadFromClosedSegment);
+ FRIEND_TEST_ALL_PREFIXES(FlashCacheTest,
+ LogStructuredStoreSegmentSelectionIsFifo);
+ FRIEND_TEST_ALL_PREFIXES(FlashCacheTest,
+ LogStructuredStoreInUseSegmentIsSkipped);
+ FRIEND_TEST_ALL_PREFIXES(FlashCacheTest,
+ LogStructuredStoreReadFromCurrentAfterClose);
+
+ int32 GetNextSegmentIndex();
+ bool InUse(int32 segment_index) const;
+
+ Storage* storage_;
+
+ int32 num_segments_;
+
+ // Currently open segments, either for reading or writing. There can only be
+ // one segment open for writing, and multiple open for reading.
+ std::vector<Segment*> open_segments_;
+
+ // The index of the segment currently being written to. It's an index to
+ // |open_segments_| vector.
+ int32 write_index_;
+
+ // Ids of entries currently open, either CreatEntry'ed or OpenEntry'ed.
+ std::set<int32> open_entries_;
+
+ // Id of the entry that is currently being written to, -1 if there is no entry
+ // currently being written to.
+ int32 current_entry_id_;
+
+ // Number of bytes left to be written to the entry identified by
+ // |current_entry_id_|. Its value makes sense iff |current_entry_id_| is not
+ // -1.
+ int32 current_entry_num_bytes_left_to_write_;
+
+ bool init_; // Init was called.
+ bool closed_; // Close was called.
+
+ DISALLOW_COPY_AND_ASSIGN(LogStructuredStore);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FLASH_LOG_STRUCTURED_STORE_H_
diff --git a/net/disk_cache/flash/log_structured_store_unittest.cc b/net/disk_cache/flash/log_structured_store_unittest.cc
new file mode 100644
index 0000000..b41d264
--- /dev/null
+++ b/net/disk_cache/flash/log_structured_store_unittest.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/flash/flash_cache_test_base.h"
+#include "net/disk_cache/flash/format.h"
+#include "net/disk_cache/flash/log_structured_store.h"
+#include "net/disk_cache/flash/segment.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace disk_cache {
+
+TEST_F(FlashCacheTest, LogStructuredStoreCreateEntry) {
+ const int32 kSize = 100;
+ const std::string buf(kSize, 0);
+
+ int32 id;
+ EXPECT_TRUE(log_structured_store_->CreateEntry(kSize, &id));
+ EXPECT_TRUE(log_structured_store_->WriteData(buf.data(), kSize/2));
+ EXPECT_TRUE(log_structured_store_->WriteData(buf.data(), kSize/2));
+ log_structured_store_->CloseEntry(id);
+}
+
+// Also tests reading from current segment.
+TEST_F(FlashCacheTest, LogStructuredStoreOpenEntry) {
+ const int32 kSize = 100;
+ const std::vector<char> expected(kSize, 'b');
+
+ int32 id;
+ EXPECT_TRUE(log_structured_store_->CreateEntry(kSize, &id));
+ EXPECT_TRUE(log_structured_store_->WriteData(&expected[0], kSize));
+ log_structured_store_->CloseEntry(id);
+
+ EXPECT_TRUE(log_structured_store_->OpenEntry(id));
+ std::vector<char> actual(kSize, 0);
+ EXPECT_TRUE(log_structured_store_->ReadData(id, &actual[0], kSize, 0));
+ log_structured_store_->CloseEntry(id);
+
+ EXPECT_EQ(expected, actual);
+}
+
+// Also tests that writing advances segments.
+TEST_F(FlashCacheTest, LogStructuredStoreReadFromClosedSegment) {
+ const int32 kSize = disk_cache::kFlashSegmentFreeSpace;
+ const std::vector<char> expected(kSize, 'a');
+
+ // First two entries go to segment 0.
+ int32 id1;
+ EXPECT_EQ(0, log_structured_store_->write_index_);
+ EXPECT_TRUE(log_structured_store_->CreateEntry(kSize/2, &id1));
+ EXPECT_TRUE(log_structured_store_->WriteData(&expected[0], kSize/2));
+ log_structured_store_->CloseEntry(id1);
+
+ int32 id2;
+ EXPECT_EQ(0, log_structured_store_->write_index_);
+ EXPECT_TRUE(log_structured_store_->CreateEntry(kSize/2, &id2));
+ EXPECT_TRUE(log_structured_store_->WriteData(&expected[0], kSize/2));
+ log_structured_store_->CloseEntry(id2);
+
+ // This entry goes to segment 1.
+ int32 id3;
+ EXPECT_TRUE(log_structured_store_->CreateEntry(kSize, &id3));
+ EXPECT_EQ(1, log_structured_store_->write_index_);
+ EXPECT_TRUE(log_structured_store_->WriteData(&expected[0], kSize));
+ log_structured_store_->CloseEntry(id3);
+
+ // We read from segment 0.
+ EXPECT_TRUE(log_structured_store_->OpenEntry(id1));
+ std::vector<char> actual(kSize, 0);
+ EXPECT_TRUE(log_structured_store_->ReadData(id1, &actual[0], kSize, id1));
+ log_structured_store_->CloseEntry(id1);
+
+ EXPECT_EQ(expected, actual);
+}
+
+TEST_F(FlashCacheTest, LogStructuredStoreReadFromCurrentAfterClose) {
+ const int32 kSize = disk_cache::kFlashSegmentFreeSpace;
+ const std::vector<char> expected(kSize, 'a');
+
+ int32 id1;
+ EXPECT_EQ(0, log_structured_store_->write_index_);
+ EXPECT_TRUE(log_structured_store_->CreateEntry(kSize/2, &id1));
+ EXPECT_TRUE(log_structured_store_->WriteData(&expected[0], kSize/2));
+ log_structured_store_->CloseEntry(id1);
+
+ // Create a reference to above entry.
+ EXPECT_TRUE(log_structured_store_->OpenEntry(id1));
+
+ // This entry fills the first segment.
+ int32 id2;
+ EXPECT_EQ(0, log_structured_store_->write_index_);
+ EXPECT_TRUE(log_structured_store_->CreateEntry(kSize/2, &id2));
+ EXPECT_TRUE(log_structured_store_->WriteData(&expected[0], kSize/2));
+ log_structured_store_->CloseEntry(id2);
+
+ // Creating this entry forces closing of the first segment.
+ int32 id3;
+ EXPECT_TRUE(log_structured_store_->CreateEntry(kSize, &id3));
+ EXPECT_EQ(1, log_structured_store_->write_index_);
+ EXPECT_TRUE(log_structured_store_->WriteData(&expected[0], kSize));
+ log_structured_store_->CloseEntry(id3);
+
+ // Now attempt to read from the closed segment.
+ std::vector<char> actual(kSize, 0);
+ EXPECT_TRUE(log_structured_store_->ReadData(id1, &actual[0], kSize, id1));
+ log_structured_store_->CloseEntry(id1);
+
+ EXPECT_EQ(expected, actual);
+}
+
+// TODO(agayev): Add a test that confirms that in-use segment is not selected as
+// the next write segment.
+
+} // namespace disk_cache
diff --git a/net/disk_cache/flash/segment.cc b/net/disk_cache/flash/segment.cc
index 191ccd7..3457497 100644
--- a/net/disk_cache/flash/segment.cc
+++ b/net/disk_cache/flash/segment.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "base/logging.h"
#include "net/disk_cache/flash/format.h"
#include "net/disk_cache/flash/segment.h"
@@ -10,7 +12,9 @@
namespace disk_cache {
Segment::Segment(int32 index, bool read_only, Storage* storage)
- : read_only_(read_only),
+ : index_(index),
+ num_users_(0),
+ read_only_(read_only),
init_(false),
storage_(storage),
offset_(index * kFlashSegmentSize),
@@ -22,11 +26,32 @@ Segment::Segment(int32 index, bool read_only, Storage* storage)
Segment::~Segment() {
DCHECK(!init_ || read_only_);
+ if (num_users_ != 0)
+ LOG(WARNING) << "Users exist, but we don't care? " << num_users_;
+}
+
+bool Segment::HaveOffset(int32 offset) const {
+ DCHECK(init_);
+ return std::binary_search(offsets_.begin(), offsets_.end(), offset);
+}
+
+void Segment::AddUser() {
+ DCHECK(init_);
+ ++num_users_;
+}
+
+void Segment::ReleaseUser() {
+ DCHECK(init_);
+ --num_users_;
+}
+
+bool Segment::HasNoUsers() const {
+ DCHECK(init_);
+ return num_users_ == 0;
}
bool Segment::Init() {
- if (init_)
- return false;
+ DCHECK(!init_);
if (offset_ < 0 || offset_ + kFlashSegmentSize > storage_->size())
return false;
@@ -49,24 +74,23 @@ bool Segment::Init() {
return true;
}
-bool Segment::WriteData(const void* buffer, int32 size, int32* offset) {
+bool Segment::WriteData(const void* buffer, int32 size) {
DCHECK(init_ && !read_only_);
- DCHECK(CanHold(size));
-
+ DCHECK(write_offset_ + size <= summary_offset_);
if (!storage_->Write(buffer, size, write_offset_))
return false;
- if (offset)
- *offset = write_offset_;
write_offset_ += size;
return true;
}
void Segment::StoreOffset(int32 offset) {
+ DCHECK(init_ && !read_only_);
DCHECK(offsets_.size() < kFlashMaxEntryCount);
offsets_.push_back(offset);
}
bool Segment::ReadData(void* buffer, int32 size, int32 offset) const {
+ DCHECK(init_);
DCHECK(offset >= offset_ && offset + size <= offset_ + kFlashSegmentSize);
return storage_->Read(buffer, size, offset);
}
@@ -90,6 +114,7 @@ bool Segment::Close() {
}
bool Segment::CanHold(int32 size) const {
+ DCHECK(init_);
return offsets_.size() < kFlashMaxEntryCount &&
write_offset_ + size <= summary_offset_;
}
diff --git a/net/disk_cache/flash/segment.h b/net/disk_cache/flash/segment.h
index f4a4762..97551e2 100644
--- a/net/disk_cache/flash/segment.h
+++ b/net/disk_cache/flash/segment.h
@@ -9,7 +9,6 @@
#include "base/basictypes.h"
#include "base/gtest_prod_util.h"
-#include "base/memory/ref_counted.h"
#include "net/base/net_export.h"
namespace disk_cache {
@@ -41,8 +40,8 @@ class Storage;
// mutating functions cannot be called on the object after that.
//
// Segment can only be used as a log, i.e. all writes are laid out sequentially
-// on a segment. As a result, WriteData() function does not take an offset, but
-// can return an offset of where the write took place.
+// on a segment. As a result, WriteData() function does not take an offset.
+// Current write offset can be learned by calling write_offset().
//
// Once the entries are written to the Segment and Close() called on it and the
// object destroyed, we should later be able to instantiate a read-only Segment
@@ -50,14 +49,14 @@ class Storage;
// achieve this, a tiny region of Segment is used for its metadata and Segment
// provides two calls for interacting with metadata: StoreOffset() and
// GetOffsets(). The former can be used to store an offset that was returned by
-// WriteData() and the latter can be used to retrieve all the offsets that were
-// stored in the Segment. Before attempting to write an entry, the client
+// write_offset() and the latter can be used to retrieve all the offsets that
+// were stored in the Segment. Before attempting to write an entry, the client
// should call CanHold() to make sure that there is enough space in the segment.
//
// ReadData can be called over the range that was previously written with
// WriteData. Reading from area that was not written will fail.
-class NET_EXPORT_PRIVATE Segment : public base::RefCounted<Segment> {
+class NET_EXPORT_PRIVATE Segment {
public:
// |index| is the index of this segment on |storage|. If the storage size is
// X and the segment size is Y, where X >> Y and X % Y == 0, then the valid
@@ -65,20 +64,29 @@ class NET_EXPORT_PRIVATE Segment : public base::RefCounted<Segment> {
// |index| is given value Z, then it covers bytes on storage starting at the
// offset Z*Y and ending at the offset Z*Y+Y-1.
Segment(int32 index, bool read_only, Storage* storage);
+ ~Segment();
+
+ int32 index() const { return index_; }
+ int32 write_offset() const { return write_offset_; }
+ bool HaveOffset(int32 offset) const;
std::vector<int32> GetOffsets() const { return offsets_; }
+ // Manage the number of users of this segment.
+ void AddUser();
+ void ReleaseUser();
+ bool HasNoUsers() const;
+
// Performs segment initialization. Must be the first function called on the
// segment and further calls should be made only if it is successful.
bool Init();
// Writes |size| bytes of data from |buffer| to segment, returns false if
- // fails and true if succeeds and sets the |offset|, if it is not NULL. Can
- // block for a long time.
- bool WriteData(const void* buffer, int32 size, int32* offset);
+ // fails and true if succeeds. Can block for a long time.
+ bool WriteData(const void* buffer, int32 size);
- // Reads |size| bytes of data living at |offset| into |buffer|, returns true
- // on success and false on failure.
+ // Reads |size| bytes of data living at |offset| into |buffer| returns true on
+ // success and false on failure.
bool ReadData(void* buffer, int32 size, int32 offset) const;
// Stores the offset in the metadata.
@@ -92,9 +100,8 @@ class NET_EXPORT_PRIVATE Segment : public base::RefCounted<Segment> {
bool CanHold(int32 size) const;
private:
- friend class base::RefCounted<Segment>;
- ~Segment();
-
+ int32 index_;
+ int32 num_users_;
bool read_only_; // Indicates whether the segment can be written to.
bool init_; // Indicates whether segment was initialized.
Storage* storage_; // Storage on which the segment resides.
diff --git a/net/disk_cache/flash/segment_unittest.cc b/net/disk_cache/flash/segment_unittest.cc
index b33d000..c3c9cd3 100644
--- a/net/disk_cache/flash/segment_unittest.cc
+++ b/net/disk_cache/flash/segment_unittest.cc
@@ -36,40 +36,59 @@ const int32 kSegmentFreeSpace = disk_cache::kFlashSegmentSize -
} // namespace
-TEST_F(FlashCacheTest, CreateDestroy) {
+TEST_F(FlashCacheTest, SegmentUserTracking) {
+ scoped_ptr<disk_cache::Segment> segment(
+ new disk_cache::Segment(0, false, storage_.get()));
+ EXPECT_TRUE(segment->Init());
+
+ EXPECT_TRUE(segment->HasNoUsers());
+ segment->AddUser();
+ segment->AddUser();
+ EXPECT_FALSE(segment->HasNoUsers());
+
+ segment->ReleaseUser();
+ EXPECT_FALSE(segment->HasNoUsers());
+ segment->ReleaseUser();
+ EXPECT_TRUE(segment->HasNoUsers());
+
+ EXPECT_TRUE(segment->Close());
+}
+
+TEST_F(FlashCacheTest, SegmentCreateDestroy) {
int32 index = 0;
- scoped_refptr<disk_cache::Segment> segment(
+ scoped_ptr<disk_cache::Segment> segment(
new disk_cache::Segment(index, false, storage_.get()));
EXPECT_TRUE(segment->Init());
EXPECT_TRUE(segment->Close());
index = num_segments_in_storage_ - 1;
- segment = new disk_cache::Segment(index, false, storage_.get());
+ segment.reset(new disk_cache::Segment(index, false, storage_.get()));
EXPECT_TRUE(segment->Init());
EXPECT_TRUE(segment->Close());
int32 invalid_index = num_segments_in_storage_;
- segment = new disk_cache::Segment(invalid_index, false, storage_.get());
+ segment.reset(new disk_cache::Segment(invalid_index, false, storage_.get()));
EXPECT_FALSE(segment->Init());
invalid_index = -1;
- segment = new disk_cache::Segment(invalid_index, false, storage_.get());
+ segment.reset(new disk_cache::Segment(invalid_index, false, storage_.get()));
EXPECT_FALSE(segment->Init());
}
-TEST_F(FlashCacheTest, WriteDataReadData) {
+TEST_F(FlashCacheTest, SegmentWriteDataReadData) {
int32 index = rand() % num_segments_in_storage_;
- scoped_refptr<disk_cache::Segment> segment(
+ scoped_ptr<disk_cache::Segment> segment(
new disk_cache::Segment(index, false, storage_.get()));
EXPECT_TRUE(segment->Init());
SmallEntry entry1;
EXPECT_TRUE(segment->CanHold(entry1.size));
- int32 offset;
- EXPECT_TRUE(segment->WriteData(entry1.data, entry1.size, &offset));
+ int32 offset = segment->write_offset();
+ EXPECT_TRUE(segment->WriteData(entry1.data, entry1.size));
+ segment->StoreOffset(offset);
EXPECT_TRUE(segment->Close());
- segment = new disk_cache::Segment(index, true, storage_.get());
+ segment.reset(new disk_cache::Segment(index, true, storage_.get()));
EXPECT_TRUE(segment->Init());
SmallEntry entry2;
EXPECT_TRUE(segment->ReadData(entry2.data, entry2.size, offset));
@@ -77,17 +96,17 @@ TEST_F(FlashCacheTest, WriteDataReadData) {
EXPECT_TRUE(segment->Close());
}
-TEST_F(FlashCacheTest, FillWithSmallEntries) {
+TEST_F(FlashCacheTest, SegmentFillWithSmallEntries) {
int32 index = rand() % num_segments_in_storage_;
- scoped_refptr<disk_cache::Segment> segment(
+ scoped_ptr<disk_cache::Segment> segment(
new disk_cache::Segment(index, false, storage_.get()));
EXPECT_TRUE(segment->Init());
SmallEntry entry;
int32 num_bytes_written = 0;
- int32 offset;
while (segment->CanHold(entry.size)) {
- EXPECT_TRUE(segment->WriteData(entry.data, entry.size, &offset));
+ int32 offset = segment->write_offset();
+ EXPECT_TRUE(segment->WriteData(entry.data, entry.size));
segment->StoreOffset(offset);
num_bytes_written += entry.size;
}
@@ -97,17 +116,17 @@ TEST_F(FlashCacheTest, FillWithSmallEntries) {
EXPECT_TRUE(segment->Close());
}
-TEST_F(FlashCacheTest, FillWithLargeEntries) {
+TEST_F(FlashCacheTest, SegmentFillWithLargeEntries) {
int32 index = rand() % num_segments_in_storage_;
- scoped_refptr<disk_cache::Segment> segment(
+ scoped_ptr<disk_cache::Segment> segment(
new disk_cache::Segment(index, false, storage_.get()));
EXPECT_TRUE(segment->Init());
scoped_ptr<LargeEntry> entry(new LargeEntry);
int32 num_bytes_written = 0;
- int32 offset;
while (segment->CanHold(entry->size)) {
- EXPECT_TRUE(segment->WriteData(entry->data, entry->size, &offset));
+ int32 offset = segment->write_offset();
+ EXPECT_TRUE(segment->WriteData(entry->data, entry->size));
segment->StoreOffset(offset);
num_bytes_written += entry->size;
}