diff options
author | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-09-14 18:35:21 +0000 |
---|---|---|
committer | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-09-14 18:35:21 +0000 |
commit | fdbaecb0456642a5148b8f5caa7b6bbc27a7ccff (patch) | |
tree | bcb93a97882aa7e9f014ad02dd20c8142b24b2c7 /net | |
parent | d3a8cfcbb8703ab3c8d8b58bc4fa8e7d7c011a1f (diff) | |
download | chromium_src-fdbaecb0456642a5148b8f5caa7b6bbc27a7ccff.zip chromium_src-fdbaecb0456642a5148b8f5caa7b6bbc27a7ccff.tar.gz chromium_src-fdbaecb0456642a5148b8f5caa7b6bbc27a7ccff.tar.bz2 |
Http Cache: Add code for simulating an infinite HTTP cache.
This is not completely wired yet.
BUg=147383
TEST=net_unittests
Review URL: https://chromiumcodereview.appspot.com/10909136
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@156846 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/http/http_cache.cc | 10 | ||||
-rw-r--r-- | net/http/http_cache.h | 7 | ||||
-rw-r--r-- | net/http/http_cache_transaction.cc | 28 | ||||
-rw-r--r-- | net/http/http_cache_transaction.h | 5 | ||||
-rw-r--r-- | net/http/infinite_cache.cc | 1017 | ||||
-rw-r--r-- | net/http/infinite_cache.h | 120 | ||||
-rw-r--r-- | net/http/infinite_cache_unittest.cc | 234 | ||||
-rw-r--r-- | net/net.gyp | 3 |
8 files changed, 1419 insertions, 5 deletions
diff --git a/net/http/http_cache.cc b/net/http/http_cache.cc index 0436bc4..32ae60a 100644 --- a/net/http/http_cache.cc +++ b/net/http/http_cache.cc @@ -352,7 +352,7 @@ void HttpCache::WriteMetadata(const GURL& url, CreateBackend(NULL, net::CompletionCallback()); } - HttpCache::Transaction* trans = new HttpCache::Transaction(this, NULL); + HttpCache::Transaction* trans = new HttpCache::Transaction(this, NULL, NULL); MetadataWriter* writer = new MetadataWriter(trans); // The writer will self destruct when done. @@ -390,7 +390,8 @@ void HttpCache::OnExternalCacheHit(const GURL& url, void HttpCache::InitializeInfiniteCache(const FilePath& path) { if (base::FieldTrialList::FindFullName("InfiniteCache") != "Yes") return; - // TODO(rvargas): initialize the infinite cache + // To be enabled after everything is fully wired. + // infinite_cache_.Init(path); } int HttpCache::CreateTransaction(scoped_ptr<HttpTransaction>* trans, @@ -401,7 +402,10 @@ int HttpCache::CreateTransaction(scoped_ptr<HttpTransaction>* trans, CreateBackend(NULL, net::CompletionCallback()); } - trans->reset(new HttpCache::Transaction(this, delegate)); + InfiniteCacheTransaction* infinite_cache_transaction = + infinite_cache_.CreateInfiniteCacheTransaction(); + trans->reset(new HttpCache::Transaction(this, delegate, + infinite_cache_transaction)); return OK; } diff --git a/net/http/http_cache.h b/net/http/http_cache.h index 35f2e47..6e82876 100644 --- a/net/http/http_cache.h +++ b/net/http/http_cache.h @@ -32,6 +32,7 @@ #include "net/base/net_export.h" #include "net/http/http_network_session.h" #include "net/http/http_transaction_factory.h" +#include "net/http/infinite_cache.h" class GURL; @@ -183,6 +184,9 @@ class NET_EXPORT HttpCache : public HttpTransactionFactory, // Initializes the Infinite Cache, if selected by the field trial. void InitializeInfiniteCache(const FilePath& path); + // Returns a pointer to the Infinite Cache. + InfiniteCache* infinite_cache() { return &infinite_cache_; } + // HttpTransactionFactory implementation: virtual int CreateTransaction(scoped_ptr<HttpTransaction>* trans, HttpTransactionDelegate* delegate) OVERRIDE; @@ -208,6 +212,7 @@ class NET_EXPORT HttpCache : public HttpTransactionFactory, class Transaction; class WorkItem; friend class Transaction; + friend class InfiniteCache; struct PendingOp; // Info for an entry under construction. typedef std::list<Transaction*> TransactionList; @@ -386,6 +391,8 @@ class NET_EXPORT HttpCache : public HttpTransactionFactory, scoped_ptr<PlaybackCacheMap> playback_cache_map_; + InfiniteCache infinite_cache_; + DISALLOW_COPY_AND_ASSIGN(HttpCache); }; diff --git a/net/http/http_cache_transaction.cc b/net/http/http_cache_transaction.cc index 6dd2c91..700500e 100644 --- a/net/http/http_cache_transaction.cc +++ b/net/http/http_cache_transaction.cc @@ -114,13 +114,15 @@ static bool HeaderMatches(const HttpRequestHeaders& headers, HttpCache::Transaction::Transaction( HttpCache* cache, - HttpTransactionDelegate* transaction_delegate) + HttpTransactionDelegate* transaction_delegate, + InfiniteCacheTransaction* infinite_cache_transaction) : next_state_(STATE_NONE), request_(NULL), cache_(cache->AsWeakPtr()), entry_(NULL), new_entry_(NULL), network_trans_(NULL), + infinite_cache_transaction_(infinite_cache_transaction), new_response_(NULL), mode_(NONE), target_state_(STATE_NONE), @@ -211,6 +213,9 @@ bool HttpCache::Transaction::AddTruncatedFlag() { if (done_reading_) return true; + if (infinite_cache_transaction_.get()) + infinite_cache_transaction_->OnTruncatedResponse(); + truncated_ = true; target_state_ = STATE_NONE; next_state_ = STATE_CACHE_WRITE_TRUNCATED_RESPONSE; @@ -246,6 +251,18 @@ int HttpCache::Transaction::Start(const HttpRequestInfo* request, return ERR_UNEXPECTED; SetRequest(net_log, request); + if (infinite_cache_transaction_.get()) { + if ((effective_load_flags_ & LOAD_BYPASS_CACHE) || + (effective_load_flags_ & LOAD_ONLY_FROM_CACHE) || + (effective_load_flags_ & LOAD_DISABLE_CACHE) || + (effective_load_flags_ & LOAD_VALIDATE_CACHE) || + (effective_load_flags_ & LOAD_PREFERRING_CACHE) || + partial_.get()) { + infinite_cache_transaction_.reset(); + } else { + infinite_cache_transaction_->OnRequestStart(request); + } + } // We have to wait until the backend is initialized so we start the SM. next_state_ = STATE_GET_BACKEND; @@ -784,6 +801,9 @@ int HttpCache::Transaction::DoSendRequestComplete(int result) { int HttpCache::Transaction::DoSuccessfulSendRequest() { DCHECK(!new_response_); const HttpResponseInfo* new_response = network_trans_->GetResponseInfo(); + if (infinite_cache_transaction_.get()) + infinite_cache_transaction_->OnResponseReceived(new_response); + if (new_response->headers->response_code() == 401 || new_response->headers->response_code() == 407) { auth_response_ = *new_response; @@ -860,6 +880,9 @@ int HttpCache::Transaction::DoNetworkReadComplete(int result) { if (result > 0) bytes_read_from_network_ += result; + if (infinite_cache_transaction_.get()) + infinite_cache_transaction_->OnDataRead(read_buf_->data(), result); + // If there is an error or we aren't saving the data, we are done; just wait // until the destructor runs to see if we can keep the data. if (mode_ == NONE || result < 0) @@ -1395,6 +1418,9 @@ int HttpCache::Transaction::DoCacheReadData() { DCHECK(entry_); next_state_ = STATE_CACHE_READ_DATA_COMPLETE; + if (infinite_cache_transaction_.get()) + infinite_cache_transaction_->OnServedFromCache(); + if (net_log_.IsLoggingAllEvents()) net_log_.BeginEvent(NetLog::TYPE_HTTP_CACHE_READ_DATA); ReportCacheActionStart(); diff --git a/net/http/http_cache_transaction.h b/net/http/http_cache_transaction.h index 7eb835e..923bbce 100644 --- a/net/http/http_cache_transaction.h +++ b/net/http/http_cache_transaction.h @@ -57,7 +57,9 @@ class HttpCache::Transaction : public HttpTransaction { UPDATE = READ_META | WRITE, // READ_WRITE & ~READ_DATA }; - Transaction(HttpCache* cache, HttpTransactionDelegate* transaction_delegate); + Transaction(HttpCache* cache, + HttpTransactionDelegate* transaction_delegate, + InfiniteCacheTransaction* infinite_cache_transaction); virtual ~Transaction(); Mode mode() const { return mode_; } @@ -372,6 +374,7 @@ class HttpCache::Transaction : public HttpTransaction { HttpCache::ActiveEntry* entry_; HttpCache::ActiveEntry* new_entry_; scoped_ptr<HttpTransaction> network_trans_; + scoped_ptr<InfiniteCacheTransaction> infinite_cache_transaction_; CompletionCallback callback_; // Consumer's callback. HttpResponseInfo response_; HttpResponseInfo auth_response_; diff --git a/net/http/infinite_cache.cc b/net/http/infinite_cache.cc new file mode 100644 index 0000000..4f3bec8 --- /dev/null +++ b/net/http/infinite_cache.cc @@ -0,0 +1,1017 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/http/infinite_cache.h" + +#include <algorithm> + +#include "base/compiler_specific.h" +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/file_path.h" +#include "base/file_util.h" +#include "base/hash.h" +#include "base/hash_tables.h" +#include "base/location.h" +#include "base/memory/ref_counted.h" +#include "base/metrics/histogram.h" +#include "base/pickle.h" +#include "base/platform_file.h" +#include "base/rand_util.h" +#include "base/sha1.h" +#include "base/time.h" +#include "base/threading/sequenced_worker_pool.h" +#include "net/base/net_errors.h" +#include "net/http/http_cache_transaction.h" +#include "net/http/http_request_info.h" +#include "net/http/http_response_headers.h" +#include "net/http/http_response_info.h" +#include "net/http/http_util.h" +#include "third_party/zlib/zlib.h" + +using base::PlatformFile; +using base::Time; +using base::TimeDelta; + +namespace { + +// Flags to use with a particular resource. +enum Flags { + NO_CACHE = 1 << 0, + NO_STORE = 1 << 1, + EXPIRED = 1 << 2, + TRUNCATED = 1 << 3, + RESUMABLE = 1 << 4, + REVALIDATEABLE = 1 << 5, + DOOM_METHOD = 1 << 6, + CACHED = 1 << 7 +}; + +const int kKeySizeBytes = 20; +COMPILE_ASSERT(base::kSHA1Length == kKeySizeBytes, invalid_key_length); +struct Key { + char value[kKeySizeBytes]; +}; + +// The actual data that we store for every resource. +struct Details { + int32 expiration; + int32 last_access; + uint16 flags; + uint8 use_count; + uint8 update_count; + uint32 vary_hash; + int32 headers_size; + int32 response_size; + uint32 headers_hash; + uint32 response_hash; +}; +const size_t kRecordSize = sizeof(Key) + sizeof(Details); + +// Some constants related to the database file. +uint32 kMagicSignature = 0x1f00cace; +uint32 kCurrentVersion = 0x10001; + +// Basic limits for the experiment. +int kMaxNumEntries = 200 * 1000; +int kMaxTrackingSize = 40 * 1024 * 1024; + +// Settings that control how we generate histograms. +int kTimerMinutes = 5; +int kReportSizeStep = 100 * 1024 * 1024; + +// Buffer to read and write the file. +const size_t kBufferSize = 1024 * 1024; +const size_t kMaxRecordsToRead = kBufferSize / kRecordSize; +COMPILE_ASSERT(kRecordSize * kMaxRecordsToRead < kBufferSize, wrong_buffer); + +// Functor for operator <. +struct Key_less { + bool operator()(const Key& left, const Key& right) const { + // left < right. + return (memcmp(left.value, right.value, kKeySizeBytes) < 0); + } +}; + +// Functor for operator ==. +struct Key_eq { + bool operator()(const Key& left, const Key& right) const { + return (memcmp(left.value, right.value, kKeySizeBytes) == 0); + } +}; + +// Simple adaptor for the sha1 interface. +void CryptoHash(std::string source, Key* destination) { + base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(source.data()), + source.size(), + reinterpret_cast<unsigned char*>(destination->value)); +} + +// Simple adaptor for base::ReadPlatformFile. +bool ReadPlatformFile(PlatformFile file, size_t offset, + void* buffer, size_t buffer_len) { + DCHECK_LE(offset, static_cast<size_t>(kuint32max)); + int bytes = base::ReadPlatformFile(file, static_cast<int64>(offset), + reinterpret_cast<char*>(buffer), + static_cast<int>(buffer_len)); + return (bytes == static_cast<int>(buffer_len)); +} + +// Simple adaptor for base::WritePlatformFile. +bool WritePlatformFile(PlatformFile file, size_t offset, + const void* buffer, size_t buffer_len) { + DCHECK_LE(offset, static_cast<size_t>(kuint32max)); + int bytes = base::WritePlatformFile(file, static_cast<int64>(offset), + reinterpret_cast<const char*>(buffer), + static_cast<int>(buffer_len)); + return (bytes == static_cast<int>(buffer_len)); +} + +// 1 second resolution, +- 68 years from the baseline. +int32 TimeToInt(Time time) { + int64 seconds = (time - Time::UnixEpoch()).InSeconds(); + if (seconds > kint32max) + seconds = kint32max; + if (seconds < kint32min) + seconds = kint32min; + return static_cast<int32>(seconds); +} + +Time IntToTime(int32 time) { + return Time::UnixEpoch() + TimeDelta::FromSeconds(time); +} + +int32 GetExpiration(const net::HttpResponseInfo* response) { + TimeDelta freshness = + response->headers->GetFreshnessLifetime(response->response_time); + + // Avoid overflow when adding to current time. + if (freshness.InDays() > 365 * 10) + freshness = TimeDelta::FromDays(365 * 10); + return TimeToInt(response->response_time + freshness); +} + +uint32 GetCacheability(const net::HttpResponseInfo* response) { + uint32 cacheability = 0; + const net::HttpResponseHeaders* headers = response->headers; + if (headers->HasHeaderValue("cache-control", "no-cache") || + headers->HasHeaderValue("pragma", "no-cache") || + headers->HasHeaderValue("vary", "*")) { + cacheability |= NO_CACHE; + } + + if (headers->HasHeaderValue("cache-control", "no-store")) + cacheability |= NO_STORE; + + TimeDelta max_age; + if (headers->GetMaxAgeValue(&max_age) && max_age.InSeconds() <= 0) + cacheability |= NO_CACHE; + + return cacheability; +} + +uint32 GetRevalidationFlags(const net::HttpResponseInfo* response) { + uint32 revalidation = 0; + std::string etag; + response->headers->EnumerateHeader(NULL, "etag", &etag); + + std::string last_modified; + response->headers->EnumerateHeader(NULL, "last-modified", &last_modified); + + if (!etag.empty() || !last_modified.empty()) + revalidation = REVALIDATEABLE; + + if (response->headers->HasStrongValidators()) + revalidation = RESUMABLE; + + return revalidation; +} + + +uint32 GetVaryHash(const net::HttpResponseInfo* response) { + if (!response->vary_data.is_valid()) + return 0; + + uint32 hash = adler32(0, Z_NULL, 0); + Pickle pickle; + response->vary_data.Persist(&pickle); + return adler32(hash, reinterpret_cast<const Bytef*>(pickle.data()), + pickle.size()); +} + +// Adaptor for PostTaskAndReply. +void OnComplete(const net::CompletionCallback& callback, int* result) { + callback.Run(*result); +} + +} // namespace + +namespace BASE_HASH_NAMESPACE { +#if defined(COMPILER_MSVC) +inline size_t hash_value(const Key& key) { + return base::Hash(key.value, kKeySizeBytes); +} +#elif defined(COMPILER_GCC) +template <> +struct hash<Key> { + size_t operator()(const Key& key) const { + return base::Hash(key.value, kKeySizeBytes); + } +}; +#endif + +} // BASE_HASH_NAMESPACE + +namespace net { + +struct InfiniteCacheTransaction::ResourceData { + ResourceData() { + memset(this, 0, sizeof(*this)); + } + + Key key; + Details details; +}; + +InfiniteCacheTransaction::InfiniteCacheTransaction(InfiniteCache* cache) + : cache_(cache->AsWeakPtr()), must_doom_entry_(false) { +} + +InfiniteCacheTransaction::~InfiniteCacheTransaction() { + Finish(); +} + +void InfiniteCacheTransaction::OnRequestStart(const HttpRequestInfo* request) { + if (!cache_) + return; + + std::string method = request->method; + if (method == "POST" || method == "DELETE" || method == "PUT") { + must_doom_entry_ = true; + } else if (method != "GET") { + cache_.reset(); + return; + } + + resource_data_.reset(new ResourceData); + CryptoHash(cache_->GenerateKey(request), &resource_data_->key); +} + +void InfiniteCacheTransaction::OnResponseReceived( + const HttpResponseInfo* response) { + if (!cache_) + return; + + Details& details = resource_data_->details; + + details.expiration = GetExpiration(response); + details.last_access = TimeToInt(response->request_time); + details.flags = GetCacheability(response); + details.vary_hash = GetVaryHash(response); + details.response_hash = adler32(0, Z_NULL, 0); // Init the hash. + + if (!details.flags && + TimeToInt(response->response_time) == details.expiration) { + details.flags = EXPIRED; + } + details.flags |= GetRevalidationFlags(response); + + if (must_doom_entry_) + details.flags |= DOOM_METHOD; + + Pickle pickle; + response->Persist(&pickle, true, false); // Skip transient headers. + details.headers_size = pickle.size(); + details.headers_hash = adler32(0, Z_NULL, 0); + details.headers_hash = adler32(details.headers_hash, + reinterpret_cast<const Bytef*>(pickle.data()), + pickle.size()); +} + +void InfiniteCacheTransaction::OnDataRead(const char* data, int data_len) { + if (!cache_) + return; + + if (!data_len) + return Finish(); + + resource_data_->details.response_size += data_len; + + resource_data_->details.response_hash = + adler32(resource_data_->details.response_hash, + reinterpret_cast<const Bytef*>(data), data_len); +} + +void InfiniteCacheTransaction::OnTruncatedResponse() { + if (!cache_) + return; + + resource_data_->details.flags |= TRUNCATED; +} + +void InfiniteCacheTransaction::OnServedFromCache() { + if (!cache_) + return; + + resource_data_->details.flags |= CACHED; +} + +void InfiniteCacheTransaction::Finish() { + if (!cache_ || !resource_data_.get()) + return; + + if (!resource_data_->details.headers_size) + return; + + cache_->ProcessResource(resource_data_.Pass()); + cache_.reset(); +} + +// ---------------------------------------------------------------------------- + +// This is the object that performs the bulk of the work. +// InfiniteCacheTransaction posts the transaction data to the InfiniteCache, and +// the InfiniteCache basically just forward requests to the Worker for actual +// processing. +// The Worker lives on a worker thread (basically a dedicated worker pool with +// only one thread), and flushes data to disk once every five minutes, when it +// is notified by the InfiniteCache. +// In general, there are no callbacks on completion of tasks, and the Worker can +// be as behind as it has to when processing requests. +class InfiniteCache::Worker : public base::RefCountedThreadSafe<Worker> { + public: + Worker() : init_(false), flushed_(false) {} + + // Construction and destruction helpers. + void Init(const FilePath& path); + void Cleanup(); + + // Deletes all tracked data. + void DeleteData(int* result); + + // Deletes requests between |initial_time| and |end_time|. + void DeleteDataBetween(base::Time initial_time, + base::Time end_time, + int* result); + + // Performs the actual processing of a new transaction. Takes ownership of + // the transaction |data|. + void Process(scoped_ptr<InfiniteCacheTransaction::ResourceData> data); + + // Test helpers. + void Query(int* result); + void Flush(int* result); + + // Timer notification. + void OnTimer(); + + private: + friend base::RefCountedThreadSafe<Worker>; +#if defined(COMPILER_MSVC) + typedef BASE_HASH_NAMESPACE::hash_map< + Key, Details, BASE_HASH_NAMESPACE::hash_compare<Key, Key_less> > KeyMap; +#elif defined(COMPILER_GCC) + typedef BASE_HASH_NAMESPACE::hash_map< + Key, Details, BASE_HASH_NAMESPACE::hash<Key>, Key_eq> KeyMap; +#endif + + // Header for the data file. The file starts with the header, followed by + // all the records, and a data hash at the end (just of the records, not the + // header). Note that the header has a dedicated hash. + struct Header { + uint32 magic; + uint32 version; + int32 num_entries; + int32 generation; + uint64 creation_time; + uint64 update_time; + int64 total_size; + int64 size_last_report; + int32 use_minutes; + int32 num_hits; + int32 num_bad_hits; + int32 num_requests; + int32 disabled; + uint32 header_hash; + }; + + ~Worker() {} + + // Methods to load and store data on disk. + void LoadData(); + void StoreData(); + void InitializeData(); + bool ReadData(PlatformFile file); + bool WriteData(PlatformFile file); + bool ReadAndVerifyHeader(PlatformFile file); + + // Book-keeping methods. + void Add(const Details& details); + void Remove(const Details& details); + void UpdateSize(int old_size, int new_size); + + // Bulk of report generation methods. + void RecordHit(const Details& old, Details* details); + void RecordUpdate(const Details& old, Details* details); + void GenerateHistograms(); + + // Cache logic methods. + bool CanReuse(const Details& old, const Details& current); + bool DataChanged(const Details& old, const Details& current); + bool HeadersChanged(const Details& old, const Details& current); + + KeyMap map_; + bool init_; + bool flushed_; + scoped_ptr<Header> header_; + FilePath path_; + + DISALLOW_COPY_AND_ASSIGN(Worker); +}; + +void InfiniteCache::Worker::Init(const FilePath& path) { + path_ = path; + LoadData(); +} + +void InfiniteCache::Worker::Cleanup() { + if (init_) + StoreData(); + + map_.clear(); +} + +void InfiniteCache::Worker::DeleteData(int* result) { + if (!init_) + return; + + map_.clear(); + InitializeData(); + file_util::Delete(path_, false); + *result = OK; + UMA_HISTOGRAM_BOOLEAN("InfiniteCache.DeleteAll", true); +} + +void InfiniteCache::Worker::DeleteDataBetween(base::Time initial_time, + base::Time end_time, + int* result) { + if (!init_) + return; + + for (KeyMap::iterator i = map_.begin(); i != map_.end();) { + Time last_access = IntToTime(i->second.last_access); + if (last_access >= initial_time && last_access <= end_time) { + KeyMap::iterator next = i; + ++next; + Remove(i->second); + map_.erase(i); + i = next; + continue; + } + ++i; + } + + file_util::Delete(path_, false); + StoreData(); + *result = OK; + UMA_HISTOGRAM_BOOLEAN("InfiniteCache.DeleteAll", true); +} + +void InfiniteCache::Worker::Process( + scoped_ptr<InfiniteCacheTransaction::ResourceData> data) { + if (!init_) + return; + + if (data->details.response_size > kMaxTrackingSize) + return; + + if (header_->num_entries == kMaxNumEntries) + return; + + header_->num_requests++; + KeyMap::iterator i = map_.find(data->key); + if (i != map_.end()) { + if (data->details.flags & DOOM_METHOD) { + Remove(i->second); + map_.erase(i); + return; + } + data->details.use_count = i->second.use_count; + data->details.update_count = i->second.update_count; + if (data->details.flags & CACHED) { + RecordHit(i->second, &data->details); + } else { + bool reused = CanReuse(i->second, data->details); + bool data_changed = DataChanged(i->second, data->details); + bool headers_changed = HeadersChanged(i->second, data->details); + + if (reused && data_changed) + header_->num_bad_hits++; + + if (reused) + RecordHit(i->second, &data->details); + + if (headers_changed) + UMA_HISTOGRAM_BOOLEAN("InfiniteCache.HeadersChange", true); + + if (data_changed) + RecordUpdate(i->second, &data->details); + } + + if (data->details.flags & NO_STORE) { + Remove(i->second); + map_.erase(i); + return; + } + + map_[data->key] = data->details; + return; + } + + if (data->details.flags & NO_STORE) + return; + + if (data->details.flags & DOOM_METHOD) + return; + + map_[data->key] = data->details; + Add(data->details); +} + +void InfiniteCache::Worker::LoadData() { + if (path_.empty()) + return InitializeData();; + + PlatformFile file = base::CreatePlatformFile( + path_, base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ, NULL, NULL); + if (file == base::kInvalidPlatformFileValue) + return InitializeData(); + if (!ReadData(file)) + InitializeData(); + base::ClosePlatformFile(file); + if (header_->disabled) + map_.clear(); +} + +void InfiniteCache::Worker::StoreData() { + if (!init_ || flushed_ || path_.empty()) + return; + + header_->update_time = Time::Now().ToInternalValue(); + header_->generation++; + header_->header_hash = base::Hash( + reinterpret_cast<char*>(header_.get()), offsetof(Header, header_hash)); + + FilePath temp_file = path_.ReplaceExtension(FILE_PATH_LITERAL("tmp")); + PlatformFile file = base::CreatePlatformFile( + temp_file, base::PLATFORM_FILE_CREATE_ALWAYS | base::PLATFORM_FILE_WRITE, + NULL, NULL); + if (file == base::kInvalidPlatformFileValue) + return; + bool success = WriteData(file); + base::ClosePlatformFile(file); + if (success) { + if (!file_util::ReplaceFile(temp_file, path_)) + file_util::Delete(temp_file, false); + } else { + LOG(ERROR) << "Failed to write experiment data"; + } +} + +void InfiniteCache::Worker::InitializeData() { + header_.reset(new Header); + memset(header_.get(), 0, sizeof(Header)); + header_->magic = kMagicSignature; + header_->version = kCurrentVersion; + header_->creation_time = Time::Now().ToInternalValue(); + + UMA_HISTOGRAM_BOOLEAN("InfiniteCache.Initialize", true); + init_ = true; +} + +bool InfiniteCache::Worker::ReadData(PlatformFile file) { + if (!ReadAndVerifyHeader(file)) + return false; + + scoped_array<char> buffer(new char[kBufferSize]); + size_t offset = sizeof(Header); + uint32 hash = adler32(0, Z_NULL, 0); + + for (int remaining_records = header_->num_entries; remaining_records;) { + int num_records = std::min(header_->num_entries, + static_cast<int>(kMaxRecordsToRead)); + size_t num_bytes = num_records * kRecordSize; + remaining_records -= num_records; + if (!remaining_records) + num_bytes += sizeof(uint32); // Trailing hash. + DCHECK_LE(num_bytes, kBufferSize); + + if (!ReadPlatformFile(file, offset, buffer.get(), num_bytes)) + return false; + + hash = adler32(hash, reinterpret_cast<const Bytef*>(buffer.get()), + num_records * kRecordSize); + if (!remaining_records && + hash != *reinterpret_cast<uint32*>(buffer.get() + + num_records * kRecordSize)) { + return false; + } + + for (int i = 0; i < num_records; i++) { + char* record = buffer.get() + i * kRecordSize; + Key key = *reinterpret_cast<Key*>(record); + Details details = *reinterpret_cast<Details*>(record + sizeof(key)); + map_[key] = details; + } + offset += num_bytes; + } + if (header_->num_entries != static_cast<int>(map_.size())) { + NOTREACHED(); + return false; + } + + init_ = true; + return true; +} + +bool InfiniteCache::Worker::WriteData(PlatformFile file) { + if (!base::TruncatePlatformFile(file, 0)) + return false; + + if (!WritePlatformFile(file, 0, header_.get(), sizeof(Header))) + return false; + + scoped_array<char> buffer(new char[kBufferSize]); + size_t offset = sizeof(Header); + uint32 hash = adler32(0, Z_NULL, 0); + + DCHECK_EQ(header_->num_entries, static_cast<int32>(map_.size())); + KeyMap::iterator iterator = map_.begin(); + for (int remaining_records = header_->num_entries; remaining_records;) { + int num_records = std::min(header_->num_entries, + static_cast<int>(kMaxRecordsToRead)); + size_t num_bytes = num_records * kRecordSize; + remaining_records -= num_records; + + for (int i = 0; i < num_records; i++) { + if (iterator == map_.end()) { + NOTREACHED(); + return false; + } + char* record = buffer.get() + i * kRecordSize; + *reinterpret_cast<Key*>(record) = iterator->first; + *reinterpret_cast<Details*>(record + sizeof(Key)) = iterator->second; + ++iterator; + } + + hash = adler32(hash, reinterpret_cast<const Bytef*>(buffer.get()), + num_bytes); + + if (!remaining_records) { + num_bytes += sizeof(uint32); // Trailing hash. + *reinterpret_cast<uint32*>(buffer.get() + + num_records * kRecordSize) = hash; + } + + DCHECK_LE(num_bytes, kBufferSize); + if (!WritePlatformFile(file, offset, buffer.get(), num_bytes)) + return false; + + offset += num_bytes; + } + base::FlushPlatformFile(file); // Ignore return value. + return true; +} + +bool InfiniteCache::Worker::ReadAndVerifyHeader(PlatformFile file) { + base::PlatformFileInfo info; + if (!base::GetPlatformFileInfo(file, &info)) + return false; + + if (info.size < static_cast<int>(sizeof(Header))) + return false; + + header_.reset(new Header); + if (!ReadPlatformFile(file, 0, header_.get(), sizeof(Header))) + return false; + + if (header_->magic != kMagicSignature) + return false; + + if (header_->version != kCurrentVersion) + return false; + + if (header_->num_entries > kMaxNumEntries) + return false; + + size_t expected_size = kRecordSize * header_->num_entries + + sizeof(Header) + sizeof(uint32); // Trailing hash. + + if (info.size < static_cast<int>(expected_size)) + return false; + + uint32 hash = base::Hash(reinterpret_cast<char*>(header_.get()), + offsetof(Header, header_hash)); + if (hash != header_->header_hash) + return false; + + return true; +} + +void InfiniteCache::Worker::Query(int* result) { + *result = static_cast<int>(map_.size()); +} + +void InfiniteCache::Worker::Flush(int* result) { + flushed_ = false; + StoreData(); + flushed_ = true; + *result = OK; +} + +void InfiniteCache::Worker::OnTimer() { + header_->use_minutes += kTimerMinutes; + GenerateHistograms(); + StoreData(); +} + +void InfiniteCache::Worker::Add(const Details& details) { + UpdateSize(0, details.headers_size); + UpdateSize(0, details.response_size); + header_->num_entries = static_cast<int>(map_.size()); + if (header_->num_entries == kMaxNumEntries) { + int use_hours = header_->use_minutes / 60; + int age_hours = (Time::Now() - + Time::FromInternalValue(header_->creation_time)).InHours(); + UMA_HISTOGRAM_COUNTS_10000("InfiniteCache.MaxUseTime", use_hours); + UMA_HISTOGRAM_COUNTS_10000("InfiniteCache.MaxAge", age_hours); + + int entry_size = static_cast<int>(header_->total_size / kMaxNumEntries); + UMA_HISTOGRAM_COUNTS("InfiniteCache.FinalAvgEntrySize", entry_size); + header_->disabled = 1; + map_.clear(); + } +} + +void InfiniteCache::Worker::Remove(const Details& details) { + UpdateSize(details.headers_size, 0); + UpdateSize(details.response_size, 0); + header_->num_entries--; +} + +void InfiniteCache::Worker::UpdateSize(int old_size, int new_size) { + header_->total_size += new_size - old_size; + DCHECK_GE(header_->total_size, 0); +} + +void InfiniteCache::Worker::RecordHit(const Details& old, Details* details) { + header_->num_hits++; + int access_delta = (IntToTime(details->last_access) - + IntToTime(old.last_access)).InMinutes(); + if (old.use_count) + UMA_HISTOGRAM_COUNTS("InfiniteCache.ReuseAge", access_delta); + else + UMA_HISTOGRAM_COUNTS("InfiniteCache.FirstReuseAge", access_delta); + + details->use_count = old.use_count; + if (details->use_count < kuint8max) + details->use_count++; + UMA_HISTOGRAM_CUSTOM_COUNTS("InfiniteCache.UseCount", details->use_count, 0, + kuint8max, 25); +} + +void InfiniteCache::Worker::RecordUpdate(const Details& old, Details* details) { + int access_delta = (IntToTime(details->last_access) - + IntToTime(old.last_access)).InMinutes(); + if (old.update_count) + UMA_HISTOGRAM_COUNTS("InfiniteCache.UpdateAge", access_delta); + else + UMA_HISTOGRAM_COUNTS("InfiniteCache.FirstUpdateAge", access_delta); + + details->update_count = old.update_count; + if (details->update_count < kuint8max) + details->update_count++; + + UMA_HISTOGRAM_CUSTOM_COUNTS("InfiniteCache.UpdateCount", + details->update_count, 0, kuint8max, 25); + details->use_count = 0; +} + +void InfiniteCache::Worker::GenerateHistograms() { + bool new_size_step = (header_->total_size / kReportSizeStep != + header_->size_last_report / kReportSizeStep); + header_->size_last_report = header_->total_size; + if (!new_size_step && (header_->use_minutes % 60 != 0)) + return; + + if (header_->disabled) + return; + + int hit_ratio = header_->num_hits * 100; + if (header_->num_requests) + hit_ratio /= header_->num_requests; + else + hit_ratio = 0; + + // We'll be generating pairs of histograms that can be used to get the hit + // ratio for any bucket of the paired histogram. + bool report_second_stat = base::RandInt(0, 99) < hit_ratio; + + if (header_->use_minutes % 60 == 0) { + int use_hours = header_->use_minutes / 60; + int age_hours = (Time::Now() - + Time::FromInternalValue(header_->creation_time)).InHours(); + UMA_HISTOGRAM_COUNTS_10000("InfiniteCache.UseTime", use_hours); + UMA_HISTOGRAM_COUNTS_10000("InfiniteCache.Age", age_hours); + if (report_second_stat) { + UMA_HISTOGRAM_COUNTS_10000("InfiniteCache.HitRatioByUseTime", use_hours); + UMA_HISTOGRAM_COUNTS_10000("InfiniteCache.HitRatioByAge", age_hours); + } + } + + if (new_size_step) { + int size_bucket = static_cast<int>(header_->total_size / kReportSizeStep); + UMA_HISTOGRAM_ENUMERATION("InfiniteCache.Size", std::min(size_bucket, 50), + 51); + UMA_HISTOGRAM_ENUMERATION("InfiniteCache.SizeCoarse", size_bucket / 5, 51); + UMA_HISTOGRAM_COUNTS("InfiniteCache.Entries", header_->num_entries); + UMA_HISTOGRAM_COUNTS_10000("InfiniteCache.BadHits", header_->num_bad_hits); + if (report_second_stat) { + UMA_HISTOGRAM_ENUMERATION("InfiniteCache.HitRatioBySize", + std::min(size_bucket, 50), 51); + UMA_HISTOGRAM_ENUMERATION("InfiniteCache.HitRatioBySizeCoarse", + size_bucket / 5, 51); + UMA_HISTOGRAM_COUNTS("InfiniteCache.HitRatioByEntries", + header_->num_entries); + } + header_->num_hits = 0; + header_->num_bad_hits = 0; + header_->num_requests = 0; + } +} + +bool InfiniteCache::Worker::CanReuse(const Details& old, + const Details& current) { + enum ReuseStatus { + REUSE_OK = 0, + REUSE_NO_CACHE, + REUSE_ALWAYS_EXPIRED, + REUSE_EXPIRED, + REUSE_TRUNCATED, + REUSE_VARY, + REUSE_DUMMY_VALUE, + // Not an individual value; it's added to another reason. + REUSE_REVALIDATEABLE = 7 + }; + int reason = REUSE_OK; + + if (old.flags & NO_CACHE) + reason = REUSE_NO_CACHE; + + if (old.flags & EXPIRED) + reason = REUSE_ALWAYS_EXPIRED; + + if (old.flags & TRUNCATED) + reason = REUSE_TRUNCATED; + + Time expiration = IntToTime(old.expiration); + if (expiration < Time::Now()) + reason = REUSE_EXPIRED; + + if (old.vary_hash != current.vary_hash) + reason = REUSE_VARY; + + bool have_to_drop = (old.flags & TRUNCATED) && !(old.flags & RESUMABLE); + if (reason && (old.flags & REVALIDATEABLE) && !have_to_drop) + reason += REUSE_REVALIDATEABLE; + + UMA_HISTOGRAM_ENUMERATION("InfiniteCache.ReuseFailure", reason, 15); + return !reason; +} + +bool InfiniteCache::Worker::DataChanged(const Details& old, + const Details& current) { + bool changed = false; + if (old.response_size != current.response_size) { + changed = true; + UpdateSize(old.response_size, current.response_size); + } + + if (old.response_hash != current.response_hash) + changed = true; + + return changed; +} + +bool InfiniteCache::Worker::HeadersChanged(const Details& old, + const Details& current) { + bool changed = false; + if (old.headers_size != current.headers_size) { + changed = true; + UpdateSize(old.headers_size, current.headers_size); + } + + if (old.headers_hash != current.headers_hash) + changed = true; + + return changed; +} + +// ---------------------------------------------------------------------------- + +InfiniteCache::InfiniteCache() { +} + +InfiniteCache::~InfiniteCache() { + if (!worker_) + return; + + task_runner_->PostTask(FROM_HERE, + base::Bind(&InfiniteCache::Worker::Cleanup, worker_)); + worker_ = NULL; +} + +void InfiniteCache::Init(const FilePath& path) { + worker_pool_ = new base::SequencedWorkerPool(1, "Infinite cache thread"); + task_runner_ = worker_pool_->GetSequencedTaskRunnerWithShutdownBehavior( + worker_pool_->GetSequenceToken(), + base::SequencedWorkerPool::CONTINUE_ON_SHUTDOWN); + + worker_ = new Worker(); + task_runner_->PostTask(FROM_HERE, + base::Bind(&InfiniteCache::Worker::Init, worker_, + path)); + + timer_.Start(FROM_HERE, TimeDelta::FromMinutes(kTimerMinutes), this, + &InfiniteCache::OnTimer); +} + +InfiniteCacheTransaction* InfiniteCache::CreateInfiniteCacheTransaction() { + if (!worker_) + return NULL; + return new InfiniteCacheTransaction(this); +} + +int InfiniteCache::DeleteData(const CompletionCallback& callback) { + if (!worker_) + return OK; + int* result = new int; + task_runner_->PostTaskAndReply( + FROM_HERE, base::Bind(&InfiniteCache::Worker::DeleteData, worker_, + result), + base::Bind(&OnComplete, callback, base::Owned(result))); + return ERR_IO_PENDING; +} + +int InfiniteCache::DeleteDataBetween(base::Time initial_time, + base::Time end_time, + const CompletionCallback& callback) { + if (!worker_) + return OK; + int* result = new int; + task_runner_->PostTaskAndReply( + FROM_HERE, base::Bind(&InfiniteCache::Worker::DeleteDataBetween, worker_, + initial_time, end_time, result), + base::Bind(&OnComplete, callback, base::Owned(result))); + return ERR_IO_PENDING; +} + +std::string InfiniteCache::GenerateKey(const HttpRequestInfo* request) { + // Don't add any upload data identifier. + return HttpUtil::SpecForRequest(request->url); +} + +void InfiniteCache::ProcessResource( + scoped_ptr<InfiniteCacheTransaction::ResourceData> data) { + if (!worker_) + return; + task_runner_->PostTask(FROM_HERE, + base::Bind(&InfiniteCache::Worker::Process, worker_, + base::Passed(&data))); +} + +void InfiniteCache::OnTimer() { + task_runner_->PostTask(FROM_HERE, + base::Bind(&InfiniteCache::Worker::OnTimer, worker_)); +} + +int InfiniteCache::QueryItemsForTest(const CompletionCallback& callback) { + DCHECK(worker_); + int* result = new int; + task_runner_->PostTaskAndReply( + FROM_HERE, base::Bind(&InfiniteCache::Worker::Query, worker_, result), + base::Bind(&OnComplete, callback, base::Owned(result))); + return net::ERR_IO_PENDING; +} + +int InfiniteCache::FlushDataForTest(const CompletionCallback& callback) { + DCHECK(worker_); + int* result = new int; + task_runner_->PostTaskAndReply( + FROM_HERE, base::Bind(&InfiniteCache::Worker::Flush, worker_, result), + base::Bind(&OnComplete, callback, base::Owned(result))); + return net::ERR_IO_PENDING; +} + +} // namespace net diff --git a/net/http/infinite_cache.h b/net/http/infinite_cache.h new file mode 100644 index 0000000..ee86078 --- /dev/null +++ b/net/http/infinite_cache.h @@ -0,0 +1,120 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This is the interface of a simulated infinite cache. The purpose of this +// code is to evaluate the performance of an HTTP cache that is not constrained +// to evict resources. + +#ifndef NET_HTTP_INFINITE_CACHE_H_ +#define NET_HTTP_INFINITE_CACHE_H_ + +#include <string> + +#include "base/basictypes.h" +#include "base/memory/scoped_ptr.h" +#include "base/memory/weak_ptr.h" +#include "base/timer.h" +#include "net/base/completion_callback.h" +#include "net/base/net_export.h" + +class FilePath; + +namespace base { +class SequencedTaskRunner; +class SequencedWorkerPool; +} + +namespace net { + +class HttpCache; +class HttpResponseInfo; +class InfiniteCache; +struct HttpRequestInfo; + +// An InfiniteCacheTransaction is paired with an HttpCache::Transaction to track +// every request that goes through the HttpCache. This object is notified when +// relevant steps are reached while processing the request. +class NET_EXPORT_PRIVATE InfiniteCacheTransaction { + public: + explicit InfiniteCacheTransaction(InfiniteCache* cache); + ~InfiniteCacheTransaction(); + + // Called when a new HttpTransaction is started. + void OnRequestStart(const HttpRequestInfo* request); + + // Called when the response headers are available. + void OnResponseReceived(const HttpResponseInfo* response); + + // Called when the response data is received from the network. + void OnDataRead(const char* data, int data_len); + + // Called when the resource is marked as truncated by the HttpCache. + void OnTruncatedResponse(); + + // Called when the resource is served from the cache, so OnDataRead will not + // be called for this request. + void OnServedFromCache(); + + private: + friend class InfiniteCache; + struct ResourceData; + void Finish(); + + base::WeakPtr<InfiniteCache> cache_; + scoped_ptr<ResourceData> resource_data_; + bool must_doom_entry_; + DISALLOW_COPY_AND_ASSIGN(InfiniteCacheTransaction); +}; + +// An InfiniteCache is paired with an HttpCache instance to simulate an infinite +// backend storage. +class NET_EXPORT_PRIVATE InfiniteCache + : public base::SupportsWeakPtr<InfiniteCache> { + public: + InfiniteCache(); + ~InfiniteCache(); + + // Initializes this object to start tracking requests. |path| is the location + // of the file to use to store data; it can be empty, in which case the data + // will not be persisted to disk. + void Init(const FilePath& path); + + InfiniteCacheTransaction* CreateInfiniteCacheTransaction(); + + // Removes all data for this experiment. Returns a net error code. + int DeleteData(const CompletionCallback& callback); + + // Removes requests between |initial_time| and |end_time|. + int DeleteDataBetween(base::Time initial_time, + base::Time end_time, + const CompletionCallback& callback); + + // Returns the number of elements currently tracked. + int QueryItemsForTest(const CompletionCallback& callback); + + // Flush the data to disk, preventing flushing when the destructor runs. Any + // data added to the cache after calling this method will not be written to + // disk, unless this method is called again to do so. + int FlushDataForTest(const CompletionCallback& callback); + + private: + class Worker; + friend class base::RepeatingTimer<InfiniteCache>; + friend class InfiniteCacheTransaction; + + std::string GenerateKey(const HttpRequestInfo* request); + void ProcessResource(scoped_ptr<InfiniteCacheTransaction::ResourceData> data); + void OnTimer(); + + scoped_refptr<Worker> worker_; + scoped_refptr<base::SequencedWorkerPool> worker_pool_; + scoped_refptr<base::SequencedTaskRunner> task_runner_; + base::RepeatingTimer<InfiniteCache> timer_; + + DISALLOW_COPY_AND_ASSIGN(InfiniteCache); +}; + +} // namespace net + +#endif // NET_HTTP_INFINITE_CACHE_H_ diff --git a/net/http/infinite_cache_unittest.cc b/net/http/infinite_cache_unittest.cc new file mode 100644 index 0000000..78ef018 --- /dev/null +++ b/net/http/infinite_cache_unittest.cc @@ -0,0 +1,234 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/http/infinite_cache.h" + +#include "base/file_util.h" +#include "base/scoped_temp_dir.h" +#include "base/threading/platform_thread.h" +#include "base/time.h" +#include "net/base/net_errors.h" +#include "net/base/test_completion_callback.h" +#include "net/http/http_transaction_unittest.h" +#include "testing/gtest/include/gtest/gtest.h" + +using base::Time; +using base::TimeDelta; +using net::InfiniteCache; +using net::InfiniteCacheTransaction; + +namespace { + +void StartRequest(const MockTransaction& http_transaction, + InfiniteCacheTransaction* transaction) { + scoped_refptr<net::HttpResponseHeaders> headers( + new net::HttpResponseHeaders(http_transaction.response_headers)); + net::HttpResponseInfo response; + response.headers = headers; + response.request_time = http_transaction.request_time.is_null() ? + Time::Now() : http_transaction.request_time; + response.response_time = Time::Now(); + + MockHttpRequest request(http_transaction); + transaction->OnRequestStart(&request); + transaction->OnResponseReceived(&response); +} + +void ProcessRequest(const MockTransaction& http_transaction, + InfiniteCache* cache) { + scoped_ptr<InfiniteCacheTransaction> transaction + (cache->CreateInfiniteCacheTransaction()); + + StartRequest(http_transaction, transaction.get()); + transaction->OnDataRead(http_transaction.data, strlen(http_transaction.data)); +} + +void ProcessRequestWithTime(const MockTransaction& http_transaction, + InfiniteCache* cache, + Time time) { + scoped_ptr<InfiniteCacheTransaction> transaction + (cache->CreateInfiniteCacheTransaction()); + + MockTransaction timed_transaction = http_transaction; + timed_transaction.request_time = time; + StartRequest(timed_transaction, transaction.get()); + transaction->OnDataRead(http_transaction.data, strlen(http_transaction.data)); +} + +} // namespace + +TEST(InfiniteCache, Basics) { + InfiniteCache cache; + cache.Init(FilePath()); + + scoped_ptr<InfiniteCacheTransaction> transaction + (cache.CreateInfiniteCacheTransaction()); + + // Don't even Start() this transaction. + transaction.reset(cache.CreateInfiniteCacheTransaction()); + + net::TestCompletionCallback cb; + EXPECT_EQ(0, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); + + MockHttpRequest request(kTypicalGET_Transaction); + transaction->OnRequestStart(&request); + + // Don't have a response yet. + transaction.reset(cache.CreateInfiniteCacheTransaction()); + EXPECT_EQ(0, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); + + net::HttpResponseInfo response; + scoped_refptr<net::HttpResponseHeaders> headers( + new net::HttpResponseHeaders(kTypicalGET_Transaction.response_headers)); + response.headers = headers; + + transaction->OnRequestStart(&request); + transaction->OnResponseReceived(&response); + transaction.reset(cache.CreateInfiniteCacheTransaction()); + EXPECT_EQ(1, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); + + // Hit the same URL again. + transaction->OnRequestStart(&request); + transaction->OnResponseReceived(&response); + transaction.reset(cache.CreateInfiniteCacheTransaction()); + EXPECT_EQ(1, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); + + // Now a different URL. + MockHttpRequest request2(kSimpleGET_Transaction); + transaction->OnRequestStart(&request2); + transaction->OnResponseReceived(&response); + transaction.reset(); + EXPECT_EQ(2, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); +} + +TEST(InfiniteCache, Save_Restore) { + ScopedTempDir dir; + ASSERT_TRUE(dir.CreateUniqueTempDir()); + FilePath path = dir.path().Append(FILE_PATH_LITERAL("infinite")); + + scoped_ptr<InfiniteCache> cache(new InfiniteCache); + cache->Init(path); + net::TestCompletionCallback cb; + + ProcessRequest(kTypicalGET_Transaction, cache.get()); + ProcessRequest(kSimpleGET_Transaction, cache.get()); + ProcessRequest(kETagGET_Transaction, cache.get()); + ProcessRequest(kSimplePOST_Transaction, cache.get()); + + EXPECT_EQ(3, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + EXPECT_EQ(net::OK, cb.GetResult(cache->FlushDataForTest(cb.callback()))); + + cache.reset(new InfiniteCache); + cache->Init(path); + EXPECT_EQ(3, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + + ProcessRequest(kTypicalGET_Transaction, cache.get()); + EXPECT_EQ(3, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + EXPECT_EQ(net::OK, cb.GetResult(cache->FlushDataForTest(cb.callback()))); +} + +TEST(InfiniteCache, DoomMethod) { + InfiniteCache cache; + cache.Init(FilePath()); + + ProcessRequest(kTypicalGET_Transaction, &cache); + ProcessRequest(kSimpleGET_Transaction, &cache); + ProcessRequest(kETagGET_Transaction, &cache); + net::TestCompletionCallback cb; + EXPECT_EQ(3, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); + + MockTransaction request(kTypicalGET_Transaction); + request.method = "PUT"; + ProcessRequest(request, &cache); + EXPECT_EQ(2, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); + + request.method = "POST"; + request.url = kSimpleGET_Transaction.url; + ProcessRequest(request, &cache); + EXPECT_EQ(1, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); + + request.method = "DELETE"; + request.url = kETagGET_Transaction.url; + ProcessRequest(request, &cache); + EXPECT_EQ(0, cb.GetResult(cache.QueryItemsForTest(cb.callback()))); +} + +TEST(InfiniteCache, Delete) { + ScopedTempDir dir; + ASSERT_TRUE(dir.CreateUniqueTempDir()); + FilePath path = dir.path().Append(FILE_PATH_LITERAL("infinite")); + + scoped_ptr<InfiniteCache> cache(new InfiniteCache); + cache->Init(path); + net::TestCompletionCallback cb; + + ProcessRequest(kTypicalGET_Transaction, cache.get()); + ProcessRequest(kSimpleGET_Transaction, cache.get()); + EXPECT_EQ(2, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + EXPECT_EQ(net::OK, cb.GetResult(cache->FlushDataForTest(cb.callback()))); + EXPECT_TRUE(file_util::PathExists(path)); + + cache.reset(new InfiniteCache); + cache->Init(path); + EXPECT_EQ(2, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + EXPECT_EQ(net::OK, cb.GetResult(cache->DeleteData(cb.callback()))); + EXPECT_EQ(0, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + EXPECT_FALSE(file_util::PathExists(path)); + + EXPECT_EQ(net::OK, cb.GetResult(cache->FlushDataForTest(cb.callback()))); +} + +TEST(InfiniteCache, DeleteBetween) { + ScopedTempDir dir; + ASSERT_TRUE(dir.CreateUniqueTempDir()); + FilePath path = dir.path().Append(FILE_PATH_LITERAL("infinite")); + + scoped_ptr<InfiniteCache> cache(new InfiniteCache); + cache->Init(path); + net::TestCompletionCallback cb; + + Time::Exploded baseline = {}; + baseline.year = 2012; + baseline.month = 1; + baseline.day_of_month = 1; + Time base_time = Time::FromUTCExploded(baseline); + + ProcessRequestWithTime(kTypicalGET_Transaction, cache.get(), base_time); + + Time start = base_time + TimeDelta::FromSeconds(2); + ProcessRequestWithTime(kSimpleGET_Transaction, cache.get(), start); + Time end = start + TimeDelta::FromSeconds(2); + + ProcessRequestWithTime(kETagGET_Transaction, cache.get(), + end + TimeDelta::FromSeconds(2)); + + EXPECT_EQ(3, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + EXPECT_EQ(net::OK, + cb.GetResult(cache->DeleteDataBetween(start, end, + cb.callback()))); + EXPECT_EQ(2, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + + // Make sure the data is deleted from disk. + EXPECT_EQ(net::OK, cb.GetResult(cache->FlushDataForTest(cb.callback()))); + cache.reset(new InfiniteCache); + cache->Init(path); + + EXPECT_EQ(2, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + ProcessRequest(kETagGET_Transaction, cache.get()); + EXPECT_EQ(2, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + + EXPECT_EQ(net::OK, + cb.GetResult(cache->DeleteDataBetween(start, Time::Now(), + cb.callback()))); + EXPECT_EQ(1, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + + // Make sure the data is deleted from disk. + EXPECT_EQ(net::OK, cb.GetResult(cache->FlushDataForTest(cb.callback()))); + cache.reset(new InfiniteCache); + cache->Init(path); + + EXPECT_EQ(1, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); + ProcessRequest(kTypicalGET_Transaction, cache.get()); + EXPECT_EQ(1, cb.GetResult(cache->QueryItemsForTest(cb.callback()))); +} diff --git a/net/net.gyp b/net/net.gyp index c12d62f..ee97bba 100644 --- a/net/net.gyp +++ b/net/net.gyp @@ -543,6 +543,8 @@ 'http/http_vary_data.cc', 'http/http_vary_data.h', 'http/http_version.h', + 'http/infinite_cache.cc', + 'http/infinite_cache.h', 'http/md4.cc', 'http/md4.h', 'http/partial_data.cc', @@ -1319,6 +1321,7 @@ 'http/http_transaction_unittest.h', 'http/http_util_unittest.cc', 'http/http_vary_data_unittest.cc', + 'http/infinite_cache_unittest.cc', 'http/mock_allow_url_security_manager.cc', 'http/mock_allow_url_security_manager.h', 'http/mock_gssapi_library_posix.cc', |