summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorerg@chromium.org <erg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-01-11 07:25:40 +0000
committererg@chromium.org <erg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-01-11 07:25:40 +0000
commitf48b943fa405abdbff3278bd6d29bde6d1ef103b (patch)
treef3ad7676f0e484e5c783ef080cfdfea5cab32f0e
parente0392155775eb3dc066d51e78a320a10627a74ad (diff)
downloadchromium_src-f48b943fa405abdbff3278bd6d29bde6d1ef103b.zip
chromium_src-f48b943fa405abdbff3278bd6d29bde6d1ef103b.tar.gz
chromium_src-f48b943fa405abdbff3278bd6d29bde6d1ef103b.tar.bz2
More reordering the methods in headers in net/.
BUG=68682 TEST=compiles Review URL: http://codereview.chromium.org/6186005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@71017 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--net/base/address_list.cc40
-rw-r--r--net/base/capturing_net_log.cc30
-rw-r--r--net/base/capturing_net_log.h14
-rw-r--r--net/base/cert_verifier.h6
-rw-r--r--net/base/cookie_monster.cc1761
-rw-r--r--net/base/cookie_monster.h109
-rw-r--r--net/base/directory_lister.cc134
-rw-r--r--net/base/directory_lister.h4
-rw-r--r--net/base/dnssec_chain_verifier.cc326
-rw-r--r--net/http/http_auth_handler_factory.cc80
-rw-r--r--net/http/http_auth_handler_factory.h20
-rw-r--r--net/http/http_cache.cc46
-rw-r--r--net/http/http_cache.h22
-rw-r--r--net/http/http_response_headers.cc11
-rw-r--r--net/http/http_response_headers.h11
-rw-r--r--net/socket/client_socket_pool_base.h16
16 files changed, 1320 insertions, 1310 deletions
diff --git a/net/base/address_list.cc b/net/base/address_list.cc
index ea1ee1e..14ee0d6 100644
--- a/net/base/address_list.cc
+++ b/net/base/address_list.cc
@@ -101,18 +101,6 @@ struct AddressList::Data : public base::RefCountedThreadSafe<Data> {
AddressList::AddressList() {
}
-AddressList::AddressList(const AddressList& addresslist)
- : data_(addresslist.data_) {
-}
-
-AddressList::~AddressList() {
-}
-
-AddressList& AddressList::operator=(const AddressList& addresslist) {
- data_ = addresslist.data_;
- return *this;
-}
-
AddressList::AddressList(const IPAddressNumber& address, int port,
bool canonicalize_name) {
struct addrinfo* ai = new addrinfo;
@@ -160,6 +148,18 @@ AddressList::AddressList(const IPAddressNumber& address, int port,
SetPort(port);
}
+AddressList::AddressList(const AddressList& addresslist)
+ : data_(addresslist.data_) {
+}
+
+AddressList::~AddressList() {
+}
+
+AddressList& AddressList::operator=(const AddressList& addresslist) {
+ data_ = addresslist.data_;
+ return *this;
+}
+
void AddressList::Adopt(struct addrinfo* head) {
data_ = new Data(head, true /*is_system_created*/);
}
@@ -195,14 +195,6 @@ int AddressList::GetPort() const {
return GetPortFromAddrinfo(data_->head);
}
-bool AddressList::GetCanonicalName(std::string* canonical_name) const {
- DCHECK(canonical_name);
- if (!data_ || !data_->head->ai_canonname)
- return false;
- canonical_name->assign(data_->head->ai_canonname);
- return true;
-}
-
void AddressList::SetFrom(const AddressList& src, int port) {
if (src.GetPort() == port) {
// We can reference the data from |src| directly.
@@ -214,6 +206,14 @@ void AddressList::SetFrom(const AddressList& src, int port) {
}
}
+bool AddressList::GetCanonicalName(std::string* canonical_name) const {
+ DCHECK(canonical_name);
+ if (!data_ || !data_->head->ai_canonname)
+ return false;
+ canonical_name->assign(data_->head->ai_canonname);
+ return true;
+}
+
void AddressList::Reset() {
data_ = NULL;
}
diff --git a/net/base/capturing_net_log.cc b/net/base/capturing_net_log.cc
index c6d34240..568d4c3 100644
--- a/net/base/capturing_net_log.cc
+++ b/net/base/capturing_net_log.cc
@@ -25,6 +25,21 @@ CapturingNetLog::CapturingNetLog(size_t max_num_entries)
CapturingNetLog::~CapturingNetLog() {}
+void CapturingNetLog::GetEntries(EntryList* entry_list) const {
+ AutoLock lock(lock_);
+ *entry_list = entries_;
+}
+
+void CapturingNetLog::Clear() {
+ AutoLock lock(lock_);
+ entries_.clear();
+}
+
+void CapturingNetLog::SetLogLevel(NetLog::LogLevel log_level) {
+ AutoLock lock(lock_);
+ log_level_ = log_level;
+}
+
void CapturingNetLog::AddEntry(EventType type,
const base::TimeTicks& time,
const Source& source,
@@ -45,21 +60,6 @@ NetLog::LogLevel CapturingNetLog::GetLogLevel() const {
return log_level_;
}
-void CapturingNetLog::GetEntries(EntryList* entry_list) const {
- AutoLock lock(lock_);
- *entry_list = entries_;
-}
-
-void CapturingNetLog::Clear() {
- AutoLock lock(lock_);
- entries_.clear();
-}
-
-void CapturingNetLog::SetLogLevel(NetLog::LogLevel log_level) {
- AutoLock lock(lock_);
- log_level_ = log_level;
-}
-
CapturingBoundNetLog::CapturingBoundNetLog(const NetLog::Source& source,
CapturingNetLog* net_log)
: source_(source), capturing_net_log_(net_log) {
diff --git a/net/base/capturing_net_log.h b/net/base/capturing_net_log.h
index 193d641..0678d9a 100644
--- a/net/base/capturing_net_log.h
+++ b/net/base/capturing_net_log.h
@@ -47,6 +47,13 @@ class CapturingNetLog : public NetLog {
explicit CapturingNetLog(size_t max_num_entries);
virtual ~CapturingNetLog();
+ // Returns the list of all entries in the log.
+ void GetEntries(EntryList* entry_list) const;
+
+ void Clear();
+
+ void SetLogLevel(NetLog::LogLevel log_level);
+
// NetLog implementation:
virtual void AddEntry(EventType type,
const base::TimeTicks& time,
@@ -56,13 +63,6 @@ class CapturingNetLog : public NetLog {
virtual uint32 NextID();
virtual LogLevel GetLogLevel() const;
- // Returns the list of all entries in the log.
- void GetEntries(EntryList* entry_list) const;
-
- void Clear();
-
- void SetLogLevel(NetLog::LogLevel log_level);
-
private:
// Needs to be "mutable" so can use it in GetEntries().
mutable Lock lock_;
diff --git a/net/base/cert_verifier.h b/net/base/cert_verifier.h
index f0df67a..c0fc9fc 100644
--- a/net/base/cert_verifier.h
+++ b/net/base/cert_verifier.h
@@ -28,14 +28,14 @@ struct CachedCertVerifyResult {
CachedCertVerifyResult();
~CachedCertVerifyResult();
+ // Returns true if |current_time| is greater than or equal to |expiry|.
+ bool HasExpired(base::Time current_time) const;
+
int error; // The return value of CertVerifier::Verify.
CertVerifyResult result; // The output of CertVerifier::Verify.
// The time at which the certificate verification result expires.
base::Time expiry;
-
- // Returns true if |current_time| is greater than or equal to |expiry|.
- bool HasExpired(base::Time current_time) const;
};
// CertVerifier represents a service for verifying certificates.
diff --git a/net/base/cookie_monster.cc b/net/base/cookie_monster.cc
index 85f061d..cc96d44 100644
--- a/net/base/cookie_monster.cc
+++ b/net/base/cookie_monster.cc
@@ -98,178 +98,26 @@ const int kVlogGarbageCollection = 5;
const int kVlogSetCookies = 7;
const int kVlogGetCookies = 9;
-} // namespace
-
-// static
-bool CookieMonster::enable_file_scheme_ = false;
-
-// static
-void CookieMonster::EnableFileScheme() {
- enable_file_scheme_ = true;
-}
-
-CookieMonster::CookieMonster(PersistentCookieStore* store, Delegate* delegate)
- : initialized_(false),
- expiry_and_key_scheme_(expiry_and_key_default_),
- store_(store),
- last_access_threshold_(
- TimeDelta::FromSeconds(kDefaultAccessUpdateThresholdSeconds)),
- delegate_(delegate),
- last_statistic_record_time_(Time::Now()) {
- InitializeHistograms();
- SetDefaultCookieableSchemes();
-}
-
-CookieMonster::~CookieMonster() {
- DeleteAll(false);
-}
-
-// Initialize all histogram counter variables used in this class.
-//
-// Normal histogram usage involves using the macros defined in
-// histogram.h, which automatically takes care of declaring these
-// variables (as statics), initializing them, and accumulating into
-// them, all from a single entry point. Unfortunately, that solution
-// doesn't work for the CookieMonster, as it's vulnerable to races between
-// separate threads executing the same functions and hence initializing the
-// same static variables. There isn't a race danger in the histogram
-// accumulation calls; they are written to be resilient to simultaneous
-// calls from multiple threads.
-//
-// The solution taken here is to have per-CookieMonster instance
-// variables that are constructed during CookieMonster construction.
-// Note that these variables refer to the same underlying histogram,
-// so we still race (but safely) with other CookieMonster instances
-// for accumulation.
-//
-// To do this we've expanded out the individual histogram macros calls,
-// with declarations of the variables in the class decl, initialization here
-// (done from the class constructor) and direct calls to the accumulation
-// methods where needed. The specific histogram macro calls on which the
-// initialization is based are included in comments below.
-void CookieMonster::InitializeHistograms() {
- // From UMA_HISTOGRAM_CUSTOM_COUNTS
- histogram_expiration_duration_minutes_ = base::Histogram::FactoryGet(
- "Cookie.ExpirationDurationMinutes",
- 1, kMinutesInTenYears, 50,
- base::Histogram::kUmaTargetedHistogramFlag);
- histogram_between_access_interval_minutes_ = base::Histogram::FactoryGet(
- "Cookie.BetweenAccessIntervalMinutes",
- 1, kMinutesInTenYears, 50,
- base::Histogram::kUmaTargetedHistogramFlag);
- histogram_evicted_last_access_minutes_ = base::Histogram::FactoryGet(
- "Cookie.EvictedLastAccessMinutes",
- 1, kMinutesInTenYears, 50,
- base::Histogram::kUmaTargetedHistogramFlag);
- histogram_count_ = base::Histogram::FactoryGet(
- "Cookie.Count", 1, 4000, 50,
- base::Histogram::kUmaTargetedHistogramFlag);
- histogram_domain_count_ = base::Histogram::FactoryGet(
- "Cookie.DomainCount", 1, 4000, 50,
- base::Histogram::kUmaTargetedHistogramFlag);
- histogram_etldp1_count_ = base::Histogram::FactoryGet(
- "Cookie.Etldp1Count", 1, 4000, 50,
- base::Histogram::kUmaTargetedHistogramFlag);
- histogram_domain_per_etldp1_count_ = base::Histogram::FactoryGet(
- "Cookie.DomainPerEtldp1Count", 1, 4000, 50,
- base::Histogram::kUmaTargetedHistogramFlag);
-
- // From UMA_HISTOGRAM_COUNTS_10000 & UMA_HISTOGRAM_CUSTOM_COUNTS
- histogram_number_duplicate_db_cookies_ = base::Histogram::FactoryGet(
- "Net.NumDuplicateCookiesInDb", 1, 10000, 50,
- base::Histogram::kUmaTargetedHistogramFlag);
-
- // From UMA_HISTOGRAM_ENUMERATION
- histogram_cookie_deletion_cause_ = base::LinearHistogram::FactoryGet(
- "Cookie.DeletionCause", 1,
- DELETE_COOKIE_LAST_ENTRY - 1, DELETE_COOKIE_LAST_ENTRY,
- base::Histogram::kUmaTargetedHistogramFlag);
-
- // From UMA_HISTOGRAM_{CUSTOM_,}TIMES
- histogram_time_get_ = base::Histogram::FactoryTimeGet("Cookie.TimeGet",
- base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(1),
- 50, base::Histogram::kUmaTargetedHistogramFlag);
- histogram_time_load_ = base::Histogram::FactoryTimeGet("Cookie.TimeLoad",
- base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(1),
- 50, base::Histogram::kUmaTargetedHistogramFlag);
-}
-
-void CookieMonster::InitStore() {
- DCHECK(store_) << "Store must exist to initialize";
-
- TimeTicks beginning_time(TimeTicks::Now());
-
- // Initialize the store and sync in any saved persistent cookies. We don't
- // care if it's expired, insert it so it can be garbage collected, removed,
- // and sync'd.
- std::vector<CanonicalCookie*> cookies;
- // Reserve space for the maximum amount of cookies a database should have.
- // This prevents multiple vector growth / copies as we append cookies.
- cookies.reserve(kMaxCookies);
- store_->Load(&cookies);
-
- // Avoid ever letting cookies with duplicate creation times into the store;
- // that way we don't have to worry about what sections of code are safe
- // to call while it's in that state.
- std::set<int64> creation_times;
-
- // Presumably later than any access time in the store.
- Time earliest_access_time;
-
- for (std::vector<CanonicalCookie*>::const_iterator it = cookies.begin();
- it != cookies.end(); ++it) {
- int64 cookie_creation_time = (*it)->CreationDate().ToInternalValue();
-
- if (creation_times.insert(cookie_creation_time).second) {
- InternalInsertCookie(GetKey((*it)->Domain()), *it, false);
- const Time cookie_access_time((*it)->LastAccessDate());
- if (earliest_access_time.is_null() ||
- cookie_access_time < earliest_access_time)
- earliest_access_time = cookie_access_time;
- } else {
- LOG(ERROR) << base::StringPrintf("Found cookies with duplicate creation "
- "times in backing store: "
- "{name='%s', domain='%s', path='%s'}",
- (*it)->Name().c_str(),
- (*it)->Domain().c_str(),
- (*it)->Path().c_str());
- // We've been given ownership of the cookie and are throwing it
- // away; reclaim the space.
- delete (*it);
- }
- }
- earliest_access_time_= earliest_access_time;
-
- // After importing cookies from the PersistentCookieStore, verify that
- // none of our other constraints are violated.
- //
- // In particular, the backing store might have given us duplicate cookies.
- EnsureCookiesMapIsValid();
-
- histogram_time_load_->AddTime(TimeTicks::Now() - beginning_time);
+// Mozilla sorts on the path length (longest first), and then it
+// sorts by creation time (oldest first).
+// The RFC says the sort order for the domain attribute is undefined.
+bool CookieSorter(CookieMonster::CanonicalCookie* cc1,
+ CookieMonster::CanonicalCookie* cc2) {
+ if (cc1->Path().length() == cc2->Path().length())
+ return cc1->CreationDate() < cc2->CreationDate();
+ return cc1->Path().length() > cc2->Path().length();
}
-void CookieMonster::EnsureCookiesMapIsValid() {
- lock_.AssertAcquired();
-
- int num_duplicates_trimmed = 0;
-
- // Iterate through all the of the cookies, grouped by host.
- CookieMap::iterator prev_range_end = cookies_.begin();
- while (prev_range_end != cookies_.end()) {
- CookieMap::iterator cur_range_begin = prev_range_end;
- const std::string key = cur_range_begin->first; // Keep a copy.
- CookieMap::iterator cur_range_end = cookies_.upper_bound(key);
- prev_range_end = cur_range_end;
-
- // Ensure no equivalent cookies for this host.
- num_duplicates_trimmed +=
- TrimDuplicateCookiesForKey(key, cur_range_begin, cur_range_end);
- }
+bool LRUCookieSorter(const CookieMonster::CookieMap::iterator& it1,
+ const CookieMonster::CookieMap::iterator& it2) {
+ // Cookies accessed less recently should be deleted first.
+ if (it1->second->LastAccessDate() != it2->second->LastAccessDate())
+ return it1->second->LastAccessDate() < it2->second->LastAccessDate();
- // Record how many duplicates were found in the database.
- // See InitializeHistograms() for details.
- histogram_cookie_deletion_cause_->Add(num_duplicates_trimmed);
+ // In rare cases we might have two cookies with identical last access times.
+ // To preserve the stability of the sort, in these cases prefer to delete
+ // older cookies over newer ones. CreationDate() is guaranteed to be unique.
+ return it1->second->CreationDate() < it2->second->CreationDate();
}
// Our strategy to find duplicates is:
@@ -277,7 +125,7 @@ void CookieMonster::EnsureCookiesMapIsValid() {
// {list of cookies with this signature, sorted by creation time}.
// (2) For each list with more than 1 entry, keep the cookie having the
// most recent creation time, and delete the others.
-namespace {
+//
// Two cookies are considered equivalent if they have the same domain,
// name, and path.
struct CookieSignature {
@@ -309,252 +157,12 @@ struct CookieSignature {
std::string domain;
std::string path;
};
-}
-
-int CookieMonster::TrimDuplicateCookiesForKey(
- const std::string& key,
- CookieMap::iterator begin,
- CookieMap::iterator end) {
- lock_.AssertAcquired();
-
- // Set of cookies ordered by creation time.
- typedef std::set<CookieMap::iterator, OrderByCreationTimeDesc> CookieSet;
-
- // Helper map we populate to find the duplicates.
- typedef std::map<CookieSignature, CookieSet> EquivalenceMap;
- EquivalenceMap equivalent_cookies;
-
- // The number of duplicate cookies that have been found.
- int num_duplicates = 0;
-
- // Iterate through all of the cookies in our range, and insert them into
- // the equivalence map.
- for (CookieMap::iterator it = begin; it != end; ++it) {
- DCHECK_EQ(key, it->first);
- CanonicalCookie* cookie = it->second;
-
- CookieSignature signature(cookie->Name(), cookie->Domain(),
- cookie->Path());
- CookieSet& set = equivalent_cookies[signature];
-
- // We found a duplicate!
- if (!set.empty())
- num_duplicates++;
-
- // We save the iterator into |cookies_| rather than the actual cookie
- // pointer, since we may need to delete it later.
- bool insert_success = set.insert(it).second;
- DCHECK(insert_success) <<
- "Duplicate creation times found in duplicate cookie name scan.";
- }
-
- // If there were no duplicates, we are done!
- if (num_duplicates == 0)
- return 0;
-
- // Make sure we find everything below that we did above.
- int num_duplicates_found = 0;
-
- // Otherwise, delete all the duplicate cookies, both from our in-memory store
- // and from the backing store.
- for (EquivalenceMap::iterator it = equivalent_cookies.begin();
- it != equivalent_cookies.end();
- ++it) {
- const CookieSignature& signature = it->first;
- CookieSet& dupes = it->second;
-
- if (dupes.size() <= 1)
- continue; // This cookiename/path has no duplicates.
- num_duplicates_found += dupes.size() - 1;
-
- // Since |dups| is sorted by creation time (descending), the first cookie
- // is the most recent one, so we will keep it. The rest are duplicates.
- dupes.erase(dupes.begin());
-
- LOG(ERROR) << base::StringPrintf(
- "Found %d duplicate cookies for host='%s', "
- "with {name='%s', domain='%s', path='%s'}",
- static_cast<int>(dupes.size()),
- key.c_str(),
- signature.name.c_str(),
- signature.domain.c_str(),
- signature.path.c_str());
-
- // Remove all the cookies identified by |dupes|. It is valid to delete our
- // list of iterators one at a time, since |cookies_| is a multimap (they
- // don't invalidate existing iterators following deletion).
- for (CookieSet::iterator dupes_it = dupes.begin();
- dupes_it != dupes.end();
- ++dupes_it) {
- InternalDeleteCookie(*dupes_it, true /*sync_to_store*/,
- DELETE_COOKIE_DUPLICATE_IN_BACKING_STORE);
- }
- }
- DCHECK_EQ(num_duplicates, num_duplicates_found);
-
- return num_duplicates;
-}
-
-void CookieMonster::SetDefaultCookieableSchemes() {
- // Note: file must be the last scheme.
- static const char* kDefaultCookieableSchemes[] = { "http", "https", "file" };
- int num_schemes = enable_file_scheme_ ? 3 : 2;
- SetCookieableSchemes(kDefaultCookieableSchemes, num_schemes);
-}
-
-void CookieMonster::SetExpiryAndKeyScheme(ExpiryAndKeyScheme key_scheme) {
- DCHECK(!initialized_);
- expiry_and_key_scheme_ = key_scheme;
-}
-
-void CookieMonster::SetClearPersistentStoreOnExit(bool clear_local_store) {
- if(store_)
- store_->SetClearLocalStateOnExit(clear_local_store);
-}
-
-void CookieMonster::FlushStore(Task* completion_task) {
- AutoLock autolock(lock_);
- if (initialized_ && store_)
- store_->Flush(completion_task);
- else if (completion_task)
- MessageLoop::current()->PostTask(FROM_HERE, completion_task);
-}
-
-// The system resolution is not high enough, so we can have multiple
-// set cookies that result in the same system time. When this happens, we
-// increment by one Time unit. Let's hope computers don't get too fast.
-Time CookieMonster::CurrentTime() {
- return std::max(Time::Now(),
- Time::FromInternalValue(last_time_seen_.ToInternalValue() + 1));
-}
-
-// Parse a cookie expiration time. We try to be lenient, but we need to
-// assume some order to distinguish the fields. The basic rules:
-// - The month name must be present and prefix the first 3 letters of the
-// full month name (jan for January, jun for June).
-// - If the year is <= 2 digits, it must occur after the day of month.
-// - The time must be of the format hh:mm:ss.
-// An average cookie expiration will look something like this:
-// Sat, 15-Apr-17 21:01:22 GMT
-Time CookieMonster::ParseCookieTime(const std::string& time_string) {
- static const char* kMonths[] = { "jan", "feb", "mar", "apr", "may", "jun",
- "jul", "aug", "sep", "oct", "nov", "dec" };
- static const int kMonthsLen = arraysize(kMonths);
- // We want to be pretty liberal, and support most non-ascii and non-digit
- // characters as a delimiter. We can't treat : as a delimiter, because it
- // is the delimiter for hh:mm:ss, and we want to keep this field together.
- // We make sure to include - and +, since they could prefix numbers.
- // If the cookie attribute came in in quotes (ex expires="XXX"), the quotes
- // will be preserved, and we will get them here. So we make sure to include
- // quote characters, and also \ for anything that was internally escaped.
- static const char* kDelimiters = "\t !\"#$%&'()*+,-./;<=>?@[\\]^_`{|}~";
-
- Time::Exploded exploded = {0};
-
- StringTokenizer tokenizer(time_string, kDelimiters);
-
- bool found_day_of_month = false;
- bool found_month = false;
- bool found_time = false;
- bool found_year = false;
-
- while (tokenizer.GetNext()) {
- const std::string token = tokenizer.token();
- DCHECK(!token.empty());
- bool numerical = IsAsciiDigit(token[0]);
-
- // String field
- if (!numerical) {
- if (!found_month) {
- for (int i = 0; i < kMonthsLen; ++i) {
- // Match prefix, so we could match January, etc
- if (base::strncasecmp(token.c_str(), kMonths[i], 3) == 0) {
- exploded.month = i + 1;
- found_month = true;
- break;
- }
- }
- } else {
- // If we've gotten here, it means we've already found and parsed our
- // month, and we have another string, which we would expect to be the
- // the time zone name. According to the RFC and my experiments with
- // how sites format their expirations, we don't have much of a reason
- // to support timezones. We don't want to ever barf on user input,
- // but this DCHECK should pass for well-formed data.
- // DCHECK(token == "GMT");
- }
- // Numeric field w/ a colon
- } else if (token.find(':') != std::string::npos) {
- if (!found_time &&
-#ifdef COMPILER_MSVC
- sscanf_s(
-#else
- sscanf(
-#endif
- token.c_str(), "%2u:%2u:%2u", &exploded.hour,
- &exploded.minute, &exploded.second) == 3) {
- found_time = true;
- } else {
- // We should only ever encounter one time-like thing. If we're here,
- // it means we've found a second, which shouldn't happen. We keep
- // the first. This check should be ok for well-formed input:
- // NOTREACHED();
- }
- // Numeric field
- } else {
- // Overflow with atoi() is unspecified, so we enforce a max length.
- if (!found_day_of_month && token.length() <= 2) {
- exploded.day_of_month = atoi(token.c_str());
- found_day_of_month = true;
- } else if (!found_year && token.length() <= 5) {
- exploded.year = atoi(token.c_str());
- found_year = true;
- } else {
- // If we're here, it means we've either found an extra numeric field,
- // or a numeric field which was too long. For well-formed input, the
- // following check would be reasonable:
- // NOTREACHED();
- }
- }
- }
-
- if (!found_day_of_month || !found_month || !found_time || !found_year) {
- // We didn't find all of the fields we need. For well-formed input, the
- // following check would be reasonable:
- // NOTREACHED() << "Cookie parse expiration failed: " << time_string;
- return Time();
- }
-
- // Normalize the year to expand abbreviated years to the full year.
- if (exploded.year >= 69 && exploded.year <= 99)
- exploded.year += 1900;
- if (exploded.year >= 0 && exploded.year <= 68)
- exploded.year += 2000;
-
- // If our values are within their correct ranges, we got our time.
- if (exploded.day_of_month >= 1 && exploded.day_of_month <= 31 &&
- exploded.month >= 1 && exploded.month <= 12 &&
- exploded.year >= 1601 && exploded.year <= 30827 &&
- exploded.hour <= 23 && exploded.minute <= 59 && exploded.second <= 59) {
- return Time::FromUTCExploded(exploded);
- }
-
- // One of our values was out of expected range. For well-formed input,
- // the following check would be reasonable:
- // NOTREACHED() << "Cookie exploded expiration failed: " << time_string;
-
- return Time();
-}
-
-bool CookieMonster::DomainIsHostOnly(const std::string& domain_string) {
- return (domain_string.empty() || domain_string[0] != '.');
-}
// Returns the effective TLD+1 for a given host. This only makes sense for http
// and https schemes. For other schemes, the host will be returned unchanged
// (minus any leading period).
-static std::string GetEffectiveDomain(const std::string& scheme,
- const std::string& host) {
+std::string GetEffectiveDomain(const std::string& scheme,
+ const std::string& host) {
if (scheme == "http" || scheme == "https")
return RegistryControlledDomainService::GetDomainAndRegistry(host);
@@ -563,51 +171,14 @@ static std::string GetEffectiveDomain(const std::string& scheme,
return host;
}
-// A wrapper around RegistryControlledDomainService::GetDomainAndRegistry
-// to make clear we're creating a key for our local map. Here and
-// in FindCookiesForHostAndDomain() are the only two places where
-// we need to conditionalize based on key type.
-//
-// Note that this key algorithm explicitly ignores the scheme. This is
-// because when we're entering cookies into the map from the backing store,
-// we in general won't have the scheme at that point.
-// In practical terms, this means that file cookies will be stored
-// in the map either by an empty string or by UNC name (and will be
-// limited by kMaxCookiesPerHost), and extension cookies will be stored
-// based on the single extension id, as the extension id won't have the
-// form of a DNS host and hence GetKey() will return it unchanged.
-//
-// Arguably the right thing to do here is to make the key
-// algorithm dependent on the scheme, and make sure that the scheme is
-// available everywhere the key must be obtained (specfically at backing
-// store load time). This would require either changing the backing store
-// database schema to include the scheme (far more trouble than it's worth), or
-// separating out file cookies into their own CookieMonster instance and
-// thus restricting each scheme to a single cookie monster (which might
-// be worth it, but is still too much trouble to solve what is currently a
-// non-problem).
-std::string CookieMonster::GetKey(const std::string& domain) const {
- if (expiry_and_key_scheme_ == EKS_DISCARD_RECENT_AND_PURGE_DOMAIN)
- return domain;
-
- std::string effective_domain(
- RegistryControlledDomainService::GetDomainAndRegistry(domain));
- if (effective_domain.empty())
- effective_domain = domain;
-
- if (!effective_domain.empty() && effective_domain[0] == '.')
- return effective_domain.substr(1);
- return effective_domain;
-}
-
// Determine the actual cookie domain based on the domain string passed
// (if any) and the URL from which the cookie came.
// On success returns true, and sets cookie_domain to either a
// -host cookie domain (ex: "google.com")
// -domain cookie domain (ex: ".google.com")
-static bool GetCookieDomainWithString(const GURL& url,
- const std::string& domain_string,
- std::string* result) {
+bool GetCookieDomainWithString(const GURL& url,
+ const std::string& domain_string,
+ std::string* result) {
const std::string url_host(url.host());
// If no domain was specified in the domain string, default to a host cookie.
@@ -659,17 +230,17 @@ static bool GetCookieDomainWithString(const GURL& url,
}
// Determine the cookie domain to use for setting the specified cookie.
-static bool GetCookieDomain(const GURL& url,
- const CookieMonster::ParsedCookie& pc,
- std::string* result) {
+bool GetCookieDomain(const GURL& url,
+ const CookieMonster::ParsedCookie& pc,
+ std::string* result) {
std::string domain_string;
if (pc.HasDomain())
domain_string = pc.Domain();
return GetCookieDomainWithString(url, domain_string, result);
}
-static std::string CanonPathWithString(const GURL& url,
- const std::string& path_string) {
+std::string CanonPathWithString(const GURL& url,
+ const std::string& path_string) {
// The RFC says the path should be a prefix of the current URL path.
// However, Mozilla allows you to set any path for compatibility with
// broken websites. We unfortunately will mimic this behavior. We try
@@ -698,16 +269,16 @@ static std::string CanonPathWithString(const GURL& url,
return url_path.substr(0, idx);
}
-static std::string CanonPath(const GURL& url,
- const CookieMonster::ParsedCookie& pc) {
+std::string CanonPath(const GURL& url,
+ const CookieMonster::ParsedCookie& pc) {
std::string path_string;
if (pc.HasPath())
path_string = pc.Path();
return CanonPathWithString(url, path_string);
}
-static Time CanonExpirationInternal(const CookieMonster::ParsedCookie& pc,
- const Time& current) {
+Time CanonExpirationInternal(const CookieMonster::ParsedCookie& pc,
+ const Time& current) {
// First, try the Max-Age attribute.
uint64 max_age = 0;
if (pc.HasMaxAge() &&
@@ -728,9 +299,9 @@ static Time CanonExpirationInternal(const CookieMonster::ParsedCookie& pc,
return Time();
}
-static Time CanonExpiration(const CookieMonster::ParsedCookie& pc,
- const Time& current,
- const CookieOptions& options) {
+Time CanonExpiration(const CookieMonster::ParsedCookie& pc,
+ const Time& current,
+ const CookieOptions& options) {
Time expiration_time = CanonExpirationInternal(pc, current);
if (options.force_session()) {
@@ -743,106 +314,195 @@ static Time CanonExpiration(const CookieMonster::ParsedCookie& pc,
return expiration_time;
}
-bool CookieMonster::HasCookieableScheme(const GURL& url) {
- lock_.AssertAcquired();
+// Helper for GarbageCollection. If |cookie_its->size() > num_max|, remove the
+// |num_max - num_purge| most recently accessed cookies from cookie_its.
+// (In other words, leave the entries that are candidates for
+// eviction in cookie_its.) The cookies returned will be in order sorted by
+// access time, least recently accessed first. The access time of the least
+// recently accessed entry not returned will be placed in
+// |*lra_removed| if that pointer is set. FindLeastRecentlyAccessed
+// returns false if no manipulation is done (because the list size is less
+// than num_max), true otherwise.
+bool FindLeastRecentlyAccessed(
+ size_t num_max,
+ size_t num_purge,
+ Time* lra_removed,
+ std::vector<CookieMonster::CookieMap::iterator>* cookie_its) {
+ DCHECK_LE(num_purge, num_max);
+ if (cookie_its->size() > num_max) {
+ VLOG(kVlogGarbageCollection)
+ << "FindLeastRecentlyAccessed() Deep Garbage Collect.";
+ num_purge += cookie_its->size() - num_max;
+ DCHECK_GT(cookie_its->size(), num_purge);
- // Make sure the request is on a cookie-able url scheme.
- for (size_t i = 0; i < cookieable_schemes_.size(); ++i) {
- // We matched a scheme.
- if (url.SchemeIs(cookieable_schemes_[i].c_str())) {
- // We've matched a supported scheme.
- return true;
- }
+ // Add 1 so that we can get the last time left in the store.
+ std::partial_sort(cookie_its->begin(), cookie_its->begin() + num_purge + 1,
+ cookie_its->end(), LRUCookieSorter);
+ *lra_removed =
+ (*(cookie_its->begin() + num_purge))->second->LastAccessDate();
+ cookie_its->erase(cookie_its->begin() + num_purge, cookie_its->end());
+ return true;
}
-
- // The scheme didn't match any in our whitelist.
- VLOG(kVlogPerCookieMonster) << "WARNING: Unsupported cookie scheme: "
- << url.scheme();
return false;
}
-void CookieMonster::SetCookieableSchemes(
- const char* schemes[], size_t num_schemes) {
- AutoLock autolock(lock_);
+} // namespace
- // Cookieable Schemes must be set before first use of function.
- DCHECK(!initialized_);
+// static
+bool CookieMonster::enable_file_scheme_ = false;
- cookieable_schemes_.clear();
- cookieable_schemes_.insert(cookieable_schemes_.end(),
- schemes, schemes + num_schemes);
+CookieMonster::CookieMonster(PersistentCookieStore* store, Delegate* delegate)
+ : initialized_(false),
+ expiry_and_key_scheme_(expiry_and_key_default_),
+ store_(store),
+ last_access_threshold_(
+ TimeDelta::FromSeconds(kDefaultAccessUpdateThresholdSeconds)),
+ delegate_(delegate),
+ last_statistic_record_time_(Time::Now()) {
+ InitializeHistograms();
+ SetDefaultCookieableSchemes();
}
-bool CookieMonster::SetCookieWithCreationTimeAndOptions(
- const GURL& url,
- const std::string& cookie_line,
- const Time& creation_time_or_null,
- const CookieOptions& options) {
- lock_.AssertAcquired();
+CookieMonster::CookieMonster(PersistentCookieStore* store,
+ Delegate* delegate,
+ int last_access_threshold_milliseconds)
+ : initialized_(false),
+ expiry_and_key_scheme_(expiry_and_key_default_),
+ store_(store),
+ last_access_threshold_(base::TimeDelta::FromMilliseconds(
+ last_access_threshold_milliseconds)),
+ delegate_(delegate),
+ last_statistic_record_time_(base::Time::Now()) {
+ InitializeHistograms();
+ SetDefaultCookieableSchemes();
+}
- VLOG(kVlogSetCookies) << "SetCookie() line: " << cookie_line;
+// Parse a cookie expiration time. We try to be lenient, but we need to
+// assume some order to distinguish the fields. The basic rules:
+// - The month name must be present and prefix the first 3 letters of the
+// full month name (jan for January, jun for June).
+// - If the year is <= 2 digits, it must occur after the day of month.
+// - The time must be of the format hh:mm:ss.
+// An average cookie expiration will look something like this:
+// Sat, 15-Apr-17 21:01:22 GMT
+Time CookieMonster::ParseCookieTime(const std::string& time_string) {
+ static const char* kMonths[] = { "jan", "feb", "mar", "apr", "may", "jun",
+ "jul", "aug", "sep", "oct", "nov", "dec" };
+ static const int kMonthsLen = arraysize(kMonths);
+ // We want to be pretty liberal, and support most non-ascii and non-digit
+ // characters as a delimiter. We can't treat : as a delimiter, because it
+ // is the delimiter for hh:mm:ss, and we want to keep this field together.
+ // We make sure to include - and +, since they could prefix numbers.
+ // If the cookie attribute came in in quotes (ex expires="XXX"), the quotes
+ // will be preserved, and we will get them here. So we make sure to include
+ // quote characters, and also \ for anything that was internally escaped.
+ static const char* kDelimiters = "\t !\"#$%&'()*+,-./;<=>?@[\\]^_`{|}~";
- Time creation_time = creation_time_or_null;
- if (creation_time.is_null()) {
- creation_time = CurrentTime();
- last_time_seen_ = creation_time;
- }
+ Time::Exploded exploded = {0};
- // Parse the cookie.
- ParsedCookie pc(cookie_line);
+ StringTokenizer tokenizer(time_string, kDelimiters);
- if (!pc.IsValid()) {
- VLOG(kVlogSetCookies) << "WARNING: Couldn't parse cookie";
- return false;
- }
+ bool found_day_of_month = false;
+ bool found_month = false;
+ bool found_time = false;
+ bool found_year = false;
- if (options.exclude_httponly() && pc.IsHttpOnly()) {
- VLOG(kVlogSetCookies) << "SetCookie() not setting httponly cookie";
- return false;
- }
+ while (tokenizer.GetNext()) {
+ const std::string token = tokenizer.token();
+ DCHECK(!token.empty());
+ bool numerical = IsAsciiDigit(token[0]);
- std::string cookie_domain;
- if (!GetCookieDomain(url, pc, &cookie_domain)) {
- return false;
+ // String field
+ if (!numerical) {
+ if (!found_month) {
+ for (int i = 0; i < kMonthsLen; ++i) {
+ // Match prefix, so we could match January, etc
+ if (base::strncasecmp(token.c_str(), kMonths[i], 3) == 0) {
+ exploded.month = i + 1;
+ found_month = true;
+ break;
+ }
+ }
+ } else {
+ // If we've gotten here, it means we've already found and parsed our
+ // month, and we have another string, which we would expect to be the
+ // the time zone name. According to the RFC and my experiments with
+ // how sites format their expirations, we don't have much of a reason
+ // to support timezones. We don't want to ever barf on user input,
+ // but this DCHECK should pass for well-formed data.
+ // DCHECK(token == "GMT");
+ }
+ // Numeric field w/ a colon
+ } else if (token.find(':') != std::string::npos) {
+ if (!found_time &&
+#ifdef COMPILER_MSVC
+ sscanf_s(
+#else
+ sscanf(
+#endif
+ token.c_str(), "%2u:%2u:%2u", &exploded.hour,
+ &exploded.minute, &exploded.second) == 3) {
+ found_time = true;
+ } else {
+ // We should only ever encounter one time-like thing. If we're here,
+ // it means we've found a second, which shouldn't happen. We keep
+ // the first. This check should be ok for well-formed input:
+ // NOTREACHED();
+ }
+ // Numeric field
+ } else {
+ // Overflow with atoi() is unspecified, so we enforce a max length.
+ if (!found_day_of_month && token.length() <= 2) {
+ exploded.day_of_month = atoi(token.c_str());
+ found_day_of_month = true;
+ } else if (!found_year && token.length() <= 5) {
+ exploded.year = atoi(token.c_str());
+ found_year = true;
+ } else {
+ // If we're here, it means we've either found an extra numeric field,
+ // or a numeric field which was too long. For well-formed input, the
+ // following check would be reasonable:
+ // NOTREACHED();
+ }
+ }
}
- std::string cookie_path = CanonPath(url, pc);
-
- scoped_ptr<CanonicalCookie> cc;
- Time cookie_expires = CanonExpiration(pc, creation_time, options);
+ if (!found_day_of_month || !found_month || !found_time || !found_year) {
+ // We didn't find all of the fields we need. For well-formed input, the
+ // following check would be reasonable:
+ // NOTREACHED() << "Cookie parse expiration failed: " << time_string;
+ return Time();
+ }
- cc.reset(new CanonicalCookie(pc.Name(), pc.Value(), cookie_domain,
- cookie_path,
- pc.IsSecure(), pc.IsHttpOnly(),
- creation_time, creation_time,
- !cookie_expires.is_null(), cookie_expires));
+ // Normalize the year to expand abbreviated years to the full year.
+ if (exploded.year >= 69 && exploded.year <= 99)
+ exploded.year += 1900;
+ if (exploded.year >= 0 && exploded.year <= 68)
+ exploded.year += 2000;
- if (!cc.get()) {
- VLOG(kVlogSetCookies) << "WARNING: Failed to allocate CanonicalCookie";
- return false;
+ // If our values are within their correct ranges, we got our time.
+ if (exploded.day_of_month >= 1 && exploded.day_of_month <= 31 &&
+ exploded.month >= 1 && exploded.month <= 12 &&
+ exploded.year >= 1601 && exploded.year <= 30827 &&
+ exploded.hour <= 23 && exploded.minute <= 59 && exploded.second <= 59) {
+ return Time::FromUTCExploded(exploded);
}
- return SetCanonicalCookie(&cc, creation_time, options);
-}
-bool CookieMonster::SetCookieWithCreationTime(const GURL& url,
- const std::string& cookie_line,
- const base::Time& creation_time) {
- AutoLock autolock(lock_);
+ // One of our values was out of expected range. For well-formed input,
+ // the following check would be reasonable:
+ // NOTREACHED() << "Cookie exploded expiration failed: " << time_string;
- if (!HasCookieableScheme(url)) {
- return false;
- }
+ return Time();
+}
- InitIfNecessary();
- return SetCookieWithCreationTimeAndOptions(url, cookie_line, creation_time,
- CookieOptions());
+bool CookieMonster::DomainIsHostOnly(const std::string& domain_string) {
+ return (domain_string.empty() || domain_string[0] != '.');
}
bool CookieMonster::SetCookieWithDetails(
const GURL& url, const std::string& name, const std::string& value,
const std::string& domain, const std::string& path,
const base::Time& expiration_time, bool secure, bool http_only) {
-
AutoLock autolock(lock_);
if (!HasCookieableScheme(url))
@@ -867,285 +527,63 @@ bool CookieMonster::SetCookieWithDetails(
return SetCanonicalCookie(&cc, creation_time, options);
}
-bool CookieMonster::SetCanonicalCookie(scoped_ptr<CanonicalCookie>* cc,
- const Time& creation_time,
- const CookieOptions& options) {
- const std::string key(GetKey((*cc)->Domain()));
- if (DeleteAnyEquivalentCookie(key, **cc, options.exclude_httponly())) {
- VLOG(kVlogSetCookies) << "SetCookie() not clobbering httponly cookie";
- return false;
- }
-
- VLOG(kVlogSetCookies) << "SetCookie() key: " << key << " cc: "
- << (*cc)->DebugString();
-
- // Realize that we might be setting an expired cookie, and the only point
- // was to delete the cookie which we've already done.
- if (!(*cc)->IsExpired(creation_time)) {
- // See InitializeHistograms() for details.
- histogram_expiration_duration_minutes_->Add(
- ((*cc)->ExpiryDate() - creation_time).InMinutes());
- InternalInsertCookie(key, cc->release(), true);
- }
-
- // We assume that hopefully setting a cookie will be less common than
- // querying a cookie. Since setting a cookie can put us over our limits,
- // make sure that we garbage collect... We can also make the assumption that
- // if a cookie was set, in the common case it will be used soon after,
- // and we will purge the expired cookies in GetCookies().
- GarbageCollect(creation_time, key);
-
- return true;
-}
-
-void CookieMonster::InternalInsertCookie(const std::string& key,
- CanonicalCookie* cc,
- bool sync_to_store) {
- lock_.AssertAcquired();
-
- if (cc->IsPersistent() && store_ && sync_to_store)
- store_->AddCookie(*cc);
- cookies_.insert(CookieMap::value_type(key, cc));
- if (delegate_.get())
- delegate_->OnCookieChanged(*cc, false);
-}
-
-void CookieMonster::InternalUpdateCookieAccessTime(CanonicalCookie* cc,
- const Time& current) {
- lock_.AssertAcquired();
-
- // Based off the Mozilla code. When a cookie has been accessed recently,
- // don't bother updating its access time again. This reduces the number of
- // updates we do during pageload, which in turn reduces the chance our storage
- // backend will hit its batch thresholds and be forced to update.
- if ((current - cc->LastAccessDate()) < last_access_threshold_)
- return;
-
- // See InitializeHistograms() for details.
- histogram_between_access_interval_minutes_->Add(
- (current - cc->LastAccessDate()).InMinutes());
-
- cc->SetLastAccessDate(current);
- if (cc->IsPersistent() && store_)
- store_->UpdateCookieAccessTime(*cc);
-}
-
-void CookieMonster::InternalDeleteCookie(CookieMap::iterator it,
- bool sync_to_store,
- DeletionCause deletion_cause) {
- lock_.AssertAcquired();
-
- // See InitializeHistograms() for details.
- if (deletion_cause != DELETE_COOKIE_DONT_RECORD)
- histogram_cookie_deletion_cause_->Add(deletion_cause);
-
- CanonicalCookie* cc = it->second;
- VLOG(kVlogSetCookies) << "InternalDeleteCookie() cc: " << cc->DebugString();
-
- if (cc->IsPersistent() && store_ && sync_to_store)
- store_->DeleteCookie(*cc);
- if (delegate_.get())
- delegate_->OnCookieChanged(*cc, true);
- cookies_.erase(it);
- delete cc;
-}
-
-bool CookieMonster::DeleteAnyEquivalentCookie(const std::string& key,
- const CanonicalCookie& ecc,
- bool skip_httponly) {
- lock_.AssertAcquired();
-
- bool found_equivalent_cookie = false;
- bool skipped_httponly = false;
- for (CookieMapItPair its = cookies_.equal_range(key);
- its.first != its.second; ) {
- CookieMap::iterator curit = its.first;
- CanonicalCookie* cc = curit->second;
- ++its.first;
- if (ecc.IsEquivalent(*cc)) {
- // We should never have more than one equivalent cookie, since they should
- // overwrite each other.
- CHECK(!found_equivalent_cookie) <<
- "Duplicate equivalent cookies found, cookie store is corrupted.";
- if (skip_httponly && cc->IsHttpOnly()) {
- skipped_httponly = true;
- } else {
- InternalDeleteCookie(curit, true, DELETE_COOKIE_OVERWRITE);
- }
- found_equivalent_cookie = true;
- }
- }
- return skipped_httponly;
-}
-
-static bool LRUCookieSorter(const CookieMonster::CookieMap::iterator& it1,
- const CookieMonster::CookieMap::iterator& it2) {
- // Cookies accessed less recently should be deleted first.
- if (it1->second->LastAccessDate() != it2->second->LastAccessDate())
- return it1->second->LastAccessDate() < it2->second->LastAccessDate();
+CookieList CookieMonster::GetAllCookies() {
+ AutoLock autolock(lock_);
+ InitIfNecessary();
- // In rare cases we might have two cookies with identical last access times.
- // To preserve the stability of the sort, in these cases prefer to delete
- // older cookies over newer ones. CreationDate() is guaranteed to be unique.
- return it1->second->CreationDate() < it2->second->CreationDate();
-}
+ // This function is being called to scrape the cookie list for management UI
+ // or similar. We shouldn't show expired cookies in this list since it will
+ // just be confusing to users, and this function is called rarely enough (and
+ // is already slow enough) that it's OK to take the time to garbage collect
+ // the expired cookies now.
+ //
+ // Note that this does not prune cookies to be below our limits (if we've
+ // exceeded them) the way that calling GarbageCollect() would.
+ GarbageCollectExpired(Time::Now(),
+ CookieMapItPair(cookies_.begin(), cookies_.end()),
+ NULL);
-// Helper for GarbageCollection. If |cookie_its->size() > num_max|, remove the
-// |num_max - num_purge| most recently accessed cookies from cookie_its.
-// (In other words, leave the entries that are candidates for
-// eviction in cookie_its.) The cookies returned will be in order sorted by
-// access time, least recently accessed first. The access time of the least
-// recently accessed entry not returned will be placed in
-// |*lra_removed| if that pointer is set. FindLeastRecentlyAccessed
-// returns false if no manipulation is done (because the list size is less
-// than num_max), true otherwise.
-static bool FindLeastRecentlyAccessed(
- size_t num_max,
- size_t num_purge,
- Time* lra_removed,
- std::vector<CookieMonster::CookieMap::iterator>* cookie_its) {
- DCHECK_LE(num_purge, num_max);
- if (cookie_its->size() > num_max) {
- VLOG(kVlogGarbageCollection)
- << "FindLeastRecentlyAccessed() Deep Garbage Collect.";
- num_purge += cookie_its->size() - num_max;
- DCHECK_GT(cookie_its->size(), num_purge);
+ // Copy the CanonicalCookie pointers from the map so that we can use the same
+ // sorter as elsewhere, then copy the result out.
+ std::vector<CanonicalCookie*> cookie_ptrs;
+ cookie_ptrs.reserve(cookies_.size());
+ for (CookieMap::iterator it = cookies_.begin(); it != cookies_.end(); ++it)
+ cookie_ptrs.push_back(it->second);
+ std::sort(cookie_ptrs.begin(), cookie_ptrs.end(), CookieSorter);
- // Add 1 so that we can get the last time left in the store.
- std::partial_sort(cookie_its->begin(), cookie_its->begin() + num_purge + 1,
- cookie_its->end(), LRUCookieSorter);
- *lra_removed =
- (*(cookie_its->begin() + num_purge))->second->LastAccessDate();
- cookie_its->erase(cookie_its->begin() + num_purge, cookie_its->end());
- return true;
- }
- return false;
-}
+ CookieList cookie_list;
+ cookie_list.reserve(cookie_ptrs.size());
+ for (std::vector<CanonicalCookie*>::const_iterator it = cookie_ptrs.begin();
+ it != cookie_ptrs.end(); ++it)
+ cookie_list.push_back(**it);
-int CookieMonster::GarbageCollectDeleteList(
- const Time& current,
- const Time& keep_accessed_after,
- DeletionCause cause,
- std::vector<CookieMap::iterator>& cookie_its) {
- int num_deleted = 0;
- for (std::vector<CookieMap::iterator>::iterator it = cookie_its.begin();
- it != cookie_its.end(); it++) {
- if (keep_accessed_after.is_null() ||
- (*it)->second->LastAccessDate() < keep_accessed_after) {
- histogram_evicted_last_access_minutes_->Add(
- (current - (*it)->second->LastAccessDate()).InMinutes());
- InternalDeleteCookie((*it), true, cause);
- num_deleted++;
- }
- }
- return num_deleted;
+ return cookie_list;
}
-// Domain expiry behavior is unchanged by key/expiry scheme (the
-// meaning of the key is different, but that's not visible to this
-// routine). Global garbage collection is dependent on key/expiry
-// scheme in that recently touched cookies are not saved if
-// expiry_and_key_scheme_ == EKS_DISCARD_RECENT_AND_PURGE_DOMAIN.
-int CookieMonster::GarbageCollect(const Time& current,
- const std::string& key) {
- lock_.AssertAcquired();
-
- int num_deleted = 0;
-
- // Collect garbage for this key.
- if (cookies_.count(key) > kDomainMaxCookies) {
- VLOG(kVlogGarbageCollection) << "GarbageCollect() key: " << key;
-
- std::vector<CookieMap::iterator> cookie_its;
- num_deleted += GarbageCollectExpired(
- current, cookies_.equal_range(key), &cookie_its);
- base::Time oldest_removed;
- if (FindLeastRecentlyAccessed(kDomainMaxCookies, kDomainPurgeCookies,
- &oldest_removed, &cookie_its)) {
- // Delete in two passes so we can figure out what we're nuking
- // that would be kept at the global level.
- int num_subject_to_global_purge =
- GarbageCollectDeleteList(
- current,
- Time::Now() - TimeDelta::FromDays(kSafeFromGlobalPurgeDays),
- DELETE_COOKIE_EVICTED_DOMAIN_PRE_SAFE,
- cookie_its);
- num_deleted += num_subject_to_global_purge;
- // Correct because FindLeastRecentlyAccessed returns a sorted list.
- cookie_its.erase(cookie_its.begin(),
- cookie_its.begin() + num_subject_to_global_purge);
- num_deleted +=
- GarbageCollectDeleteList(
- current,
- Time(),
- DELETE_COOKIE_EVICTED_DOMAIN_POST_SAFE,
- cookie_its);
- }
- }
+CookieList CookieMonster::GetAllCookiesForURLWithOptions(
+ const GURL& url,
+ const CookieOptions& options) {
+ AutoLock autolock(lock_);
+ InitIfNecessary();
- // Collect garbage for everything. With firefox style we want to
- // preserve cookies touched in kSafeFromGlobalPurgeDays, otherwise
- // not.
- if (cookies_.size() > kMaxCookies &&
- (expiry_and_key_scheme_ == EKS_DISCARD_RECENT_AND_PURGE_DOMAIN ||
- earliest_access_time_ <
- Time::Now() - TimeDelta::FromDays(kSafeFromGlobalPurgeDays))) {
- VLOG(kVlogGarbageCollection) << "GarbageCollect() everything";
- std::vector<CookieMap::iterator> cookie_its;
- base::Time oldest_left;
- num_deleted += GarbageCollectExpired(
- current, CookieMapItPair(cookies_.begin(), cookies_.end()),
- &cookie_its);
- if (FindLeastRecentlyAccessed(kMaxCookies, kPurgeCookies,
- &oldest_left, &cookie_its)) {
- Time oldest_safe_cookie(
- expiry_and_key_scheme_ == EKS_KEEP_RECENT_AND_PURGE_ETLDP1 ?
- (Time::Now() - TimeDelta::FromDays(kSafeFromGlobalPurgeDays)) :
- Time()); // Null time == ignore access time.
- int num_evicted = GarbageCollectDeleteList(
- current,
- oldest_safe_cookie,
- DELETE_COOKIE_EVICTED_GLOBAL,
- cookie_its);
+ std::vector<CanonicalCookie*> cookie_ptrs;
+ FindCookiesForHostAndDomain(url, options, false, &cookie_ptrs);
+ std::sort(cookie_ptrs.begin(), cookie_ptrs.end(), CookieSorter);
- // If no cookies were preserved by the time limit, the global last
- // access is set to the value returned from FindLeastRecentlyAccessed.
- // If the time limit preserved some cookies, we use the last access of
- // the oldest preserved cookie.
- if (num_evicted == static_cast<int>(cookie_its.size())) {
- earliest_access_time_ = oldest_left;
- } else {
- earliest_access_time_ =
- (*(cookie_its.begin() + num_evicted))->second->LastAccessDate();
- }
- num_deleted += num_evicted;
- }
- }
+ CookieList cookies;
+ for (std::vector<CanonicalCookie*>::const_iterator it = cookie_ptrs.begin();
+ it != cookie_ptrs.end(); it++)
+ cookies.push_back(**it);
- return num_deleted;
+ return cookies;
}
-int CookieMonster::GarbageCollectExpired(
- const Time& current,
- const CookieMapItPair& itpair,
- std::vector<CookieMap::iterator>* cookie_its) {
- lock_.AssertAcquired();
-
- int num_deleted = 0;
- for (CookieMap::iterator it = itpair.first, end = itpair.second; it != end;) {
- CookieMap::iterator curit = it;
- ++it;
-
- if (curit->second->IsExpired(current)) {
- InternalDeleteCookie(curit, true, DELETE_COOKIE_EXPIRED);
- ++num_deleted;
- } else if (cookie_its) {
- cookie_its->push_back(curit);
- }
- }
+CookieList CookieMonster::GetAllCookiesForURL(const GURL& url) {
+ CookieOptions options;
+ options.set_include_httponly();
- return num_deleted;
+ return GetAllCookiesForURLWithOptions(url, options);
}
int CookieMonster::DeleteAll(bool sync_to_store) {
@@ -1239,14 +677,39 @@ bool CookieMonster::DeleteCanonicalCookie(const CanonicalCookie& cookie) {
return false;
}
-// Mozilla sorts on the path length (longest first), and then it
-// sorts by creation time (oldest first).
-// The RFC says the sort order for the domain attribute is undefined.
-static bool CookieSorter(CookieMonster::CanonicalCookie* cc1,
- CookieMonster::CanonicalCookie* cc2) {
- if (cc1->Path().length() == cc2->Path().length())
- return cc1->CreationDate() < cc2->CreationDate();
- return cc1->Path().length() > cc2->Path().length();
+void CookieMonster::SetCookieableSchemes(
+ const char* schemes[], size_t num_schemes) {
+ AutoLock autolock(lock_);
+
+ // Cookieable Schemes must be set before first use of function.
+ DCHECK(!initialized_);
+
+ cookieable_schemes_.clear();
+ cookieable_schemes_.insert(cookieable_schemes_.end(),
+ schemes, schemes + num_schemes);
+}
+
+void CookieMonster::SetExpiryAndKeyScheme(ExpiryAndKeyScheme key_scheme) {
+ DCHECK(!initialized_);
+ expiry_and_key_scheme_ = key_scheme;
+}
+
+void CookieMonster::SetClearPersistentStoreOnExit(bool clear_local_store) {
+ if(store_)
+ store_->SetClearLocalStateOnExit(clear_local_store);
+}
+
+// static
+void CookieMonster::EnableFileScheme() {
+ enable_file_scheme_ = true;
+}
+
+void CookieMonster::FlushStore(Task* completion_task) {
+ AutoLock autolock(lock_);
+ if (initialized_ && store_)
+ store_->Flush(completion_task);
+ else if (completion_task)
+ MessageLoop::current()->PostTask(FROM_HERE, completion_task);
}
bool CookieMonster::SetCookieWithOptions(const GURL& url,
@@ -1336,64 +799,194 @@ CookieMonster* CookieMonster::GetCookieMonster() {
return this;
}
-CookieList CookieMonster::GetAllCookies() {
+CookieMonster::~CookieMonster() {
+ DeleteAll(false);
+}
+
+bool CookieMonster::SetCookieWithCreationTime(const GURL& url,
+ const std::string& cookie_line,
+ const base::Time& creation_time) {
AutoLock autolock(lock_);
+
+ if (!HasCookieableScheme(url)) {
+ return false;
+ }
+
InitIfNecessary();
+ return SetCookieWithCreationTimeAndOptions(url, cookie_line, creation_time,
+ CookieOptions());
+}
- // This function is being called to scrape the cookie list for management UI
- // or similar. We shouldn't show expired cookies in this list since it will
- // just be confusing to users, and this function is called rarely enough (and
- // is already slow enough) that it's OK to take the time to garbage collect
- // the expired cookies now.
- //
- // Note that this does not prune cookies to be below our limits (if we've
- // exceeded them) the way that calling GarbageCollect() would.
- GarbageCollectExpired(Time::Now(),
- CookieMapItPair(cookies_.begin(), cookies_.end()),
- NULL);
+void CookieMonster::InitStore() {
+ DCHECK(store_) << "Store must exist to initialize";
- // Copy the CanonicalCookie pointers from the map so that we can use the same
- // sorter as elsewhere, then copy the result out.
- std::vector<CanonicalCookie*> cookie_ptrs;
- cookie_ptrs.reserve(cookies_.size());
- for (CookieMap::iterator it = cookies_.begin(); it != cookies_.end(); ++it)
- cookie_ptrs.push_back(it->second);
- std::sort(cookie_ptrs.begin(), cookie_ptrs.end(), CookieSorter);
+ TimeTicks beginning_time(TimeTicks::Now());
- CookieList cookie_list;
- cookie_list.reserve(cookie_ptrs.size());
- for (std::vector<CanonicalCookie*>::const_iterator it = cookie_ptrs.begin();
- it != cookie_ptrs.end(); ++it)
- cookie_list.push_back(**it);
+ // Initialize the store and sync in any saved persistent cookies. We don't
+ // care if it's expired, insert it so it can be garbage collected, removed,
+ // and sync'd.
+ std::vector<CanonicalCookie*> cookies;
+ // Reserve space for the maximum amount of cookies a database should have.
+ // This prevents multiple vector growth / copies as we append cookies.
+ cookies.reserve(kMaxCookies);
+ store_->Load(&cookies);
- return cookie_list;
+ // Avoid ever letting cookies with duplicate creation times into the store;
+ // that way we don't have to worry about what sections of code are safe
+ // to call while it's in that state.
+ std::set<int64> creation_times;
+
+ // Presumably later than any access time in the store.
+ Time earliest_access_time;
+
+ for (std::vector<CanonicalCookie*>::const_iterator it = cookies.begin();
+ it != cookies.end(); ++it) {
+ int64 cookie_creation_time = (*it)->CreationDate().ToInternalValue();
+
+ if (creation_times.insert(cookie_creation_time).second) {
+ InternalInsertCookie(GetKey((*it)->Domain()), *it, false);
+ const Time cookie_access_time((*it)->LastAccessDate());
+ if (earliest_access_time.is_null() ||
+ cookie_access_time < earliest_access_time)
+ earliest_access_time = cookie_access_time;
+ } else {
+ LOG(ERROR) << base::StringPrintf("Found cookies with duplicate creation "
+ "times in backing store: "
+ "{name='%s', domain='%s', path='%s'}",
+ (*it)->Name().c_str(),
+ (*it)->Domain().c_str(),
+ (*it)->Path().c_str());
+ // We've been given ownership of the cookie and are throwing it
+ // away; reclaim the space.
+ delete (*it);
+ }
+ }
+ earliest_access_time_= earliest_access_time;
+
+ // After importing cookies from the PersistentCookieStore, verify that
+ // none of our other constraints are violated.
+ //
+ // In particular, the backing store might have given us duplicate cookies.
+ EnsureCookiesMapIsValid();
+
+ histogram_time_load_->AddTime(TimeTicks::Now() - beginning_time);
}
-CookieList CookieMonster::GetAllCookiesForURLWithOptions(
- const GURL& url,
- const CookieOptions& options) {
- AutoLock autolock(lock_);
- InitIfNecessary();
+void CookieMonster::EnsureCookiesMapIsValid() {
+ lock_.AssertAcquired();
- std::vector<CanonicalCookie*> cookie_ptrs;
- FindCookiesForHostAndDomain(url, options, false, &cookie_ptrs);
- std::sort(cookie_ptrs.begin(), cookie_ptrs.end(), CookieSorter);
+ int num_duplicates_trimmed = 0;
- CookieList cookies;
- for (std::vector<CanonicalCookie*>::const_iterator it = cookie_ptrs.begin();
- it != cookie_ptrs.end(); it++)
- cookies.push_back(**it);
+ // Iterate through all the of the cookies, grouped by host.
+ CookieMap::iterator prev_range_end = cookies_.begin();
+ while (prev_range_end != cookies_.end()) {
+ CookieMap::iterator cur_range_begin = prev_range_end;
+ const std::string key = cur_range_begin->first; // Keep a copy.
+ CookieMap::iterator cur_range_end = cookies_.upper_bound(key);
+ prev_range_end = cur_range_end;
- return cookies;
+ // Ensure no equivalent cookies for this host.
+ num_duplicates_trimmed +=
+ TrimDuplicateCookiesForKey(key, cur_range_begin, cur_range_end);
+ }
+
+ // Record how many duplicates were found in the database.
+ // See InitializeHistograms() for details.
+ histogram_cookie_deletion_cause_->Add(num_duplicates_trimmed);
}
-CookieList CookieMonster::GetAllCookiesForURL(const GURL& url) {
- CookieOptions options;
- options.set_include_httponly();
+int CookieMonster::TrimDuplicateCookiesForKey(
+ const std::string& key,
+ CookieMap::iterator begin,
+ CookieMap::iterator end) {
+ lock_.AssertAcquired();
- return GetAllCookiesForURLWithOptions(url, options);
+ // Set of cookies ordered by creation time.
+ typedef std::set<CookieMap::iterator, OrderByCreationTimeDesc> CookieSet;
+
+ // Helper map we populate to find the duplicates.
+ typedef std::map<CookieSignature, CookieSet> EquivalenceMap;
+ EquivalenceMap equivalent_cookies;
+
+ // The number of duplicate cookies that have been found.
+ int num_duplicates = 0;
+
+ // Iterate through all of the cookies in our range, and insert them into
+ // the equivalence map.
+ for (CookieMap::iterator it = begin; it != end; ++it) {
+ DCHECK_EQ(key, it->first);
+ CanonicalCookie* cookie = it->second;
+
+ CookieSignature signature(cookie->Name(), cookie->Domain(),
+ cookie->Path());
+ CookieSet& set = equivalent_cookies[signature];
+
+ // We found a duplicate!
+ if (!set.empty())
+ num_duplicates++;
+
+ // We save the iterator into |cookies_| rather than the actual cookie
+ // pointer, since we may need to delete it later.
+ bool insert_success = set.insert(it).second;
+ DCHECK(insert_success) <<
+ "Duplicate creation times found in duplicate cookie name scan.";
+ }
+
+ // If there were no duplicates, we are done!
+ if (num_duplicates == 0)
+ return 0;
+
+ // Make sure we find everything below that we did above.
+ int num_duplicates_found = 0;
+
+ // Otherwise, delete all the duplicate cookies, both from our in-memory store
+ // and from the backing store.
+ for (EquivalenceMap::iterator it = equivalent_cookies.begin();
+ it != equivalent_cookies.end();
+ ++it) {
+ const CookieSignature& signature = it->first;
+ CookieSet& dupes = it->second;
+
+ if (dupes.size() <= 1)
+ continue; // This cookiename/path has no duplicates.
+ num_duplicates_found += dupes.size() - 1;
+
+ // Since |dups| is sorted by creation time (descending), the first cookie
+ // is the most recent one, so we will keep it. The rest are duplicates.
+ dupes.erase(dupes.begin());
+
+ LOG(ERROR) << base::StringPrintf(
+ "Found %d duplicate cookies for host='%s', "
+ "with {name='%s', domain='%s', path='%s'}",
+ static_cast<int>(dupes.size()),
+ key.c_str(),
+ signature.name.c_str(),
+ signature.domain.c_str(),
+ signature.path.c_str());
+
+ // Remove all the cookies identified by |dupes|. It is valid to delete our
+ // list of iterators one at a time, since |cookies_| is a multimap (they
+ // don't invalidate existing iterators following deletion).
+ for (CookieSet::iterator dupes_it = dupes.begin();
+ dupes_it != dupes.end();
+ ++dupes_it) {
+ InternalDeleteCookie(*dupes_it, true /*sync_to_store*/,
+ DELETE_COOKIE_DUPLICATE_IN_BACKING_STORE);
+ }
+ }
+ DCHECK_EQ(num_duplicates, num_duplicates_found);
+
+ return num_duplicates;
}
+void CookieMonster::SetDefaultCookieableSchemes() {
+ // Note: file must be the last scheme.
+ static const char* kDefaultCookieableSchemes[] = { "http", "https", "file" };
+ int num_schemes = enable_file_scheme_ ? 3 : 2;
+ SetCookieableSchemes(kDefaultCookieableSchemes, num_schemes);
+}
+
+
void CookieMonster::FindCookiesForHostAndDomain(
const GURL& url,
const CookieOptions& options,
@@ -1494,6 +1087,349 @@ void CookieMonster::FindCookiesForKey(
}
}
+bool CookieMonster::DeleteAnyEquivalentCookie(const std::string& key,
+ const CanonicalCookie& ecc,
+ bool skip_httponly) {
+ lock_.AssertAcquired();
+
+ bool found_equivalent_cookie = false;
+ bool skipped_httponly = false;
+ for (CookieMapItPair its = cookies_.equal_range(key);
+ its.first != its.second; ) {
+ CookieMap::iterator curit = its.first;
+ CanonicalCookie* cc = curit->second;
+ ++its.first;
+
+ if (ecc.IsEquivalent(*cc)) {
+ // We should never have more than one equivalent cookie, since they should
+ // overwrite each other.
+ CHECK(!found_equivalent_cookie) <<
+ "Duplicate equivalent cookies found, cookie store is corrupted.";
+ if (skip_httponly && cc->IsHttpOnly()) {
+ skipped_httponly = true;
+ } else {
+ InternalDeleteCookie(curit, true, DELETE_COOKIE_OVERWRITE);
+ }
+ found_equivalent_cookie = true;
+ }
+ }
+ return skipped_httponly;
+}
+
+void CookieMonster::InternalInsertCookie(const std::string& key,
+ CanonicalCookie* cc,
+ bool sync_to_store) {
+ lock_.AssertAcquired();
+
+ if (cc->IsPersistent() && store_ && sync_to_store)
+ store_->AddCookie(*cc);
+ cookies_.insert(CookieMap::value_type(key, cc));
+ if (delegate_.get())
+ delegate_->OnCookieChanged(*cc, false);
+}
+
+bool CookieMonster::SetCookieWithCreationTimeAndOptions(
+ const GURL& url,
+ const std::string& cookie_line,
+ const Time& creation_time_or_null,
+ const CookieOptions& options) {
+ lock_.AssertAcquired();
+
+ VLOG(kVlogSetCookies) << "SetCookie() line: " << cookie_line;
+
+ Time creation_time = creation_time_or_null;
+ if (creation_time.is_null()) {
+ creation_time = CurrentTime();
+ last_time_seen_ = creation_time;
+ }
+
+ // Parse the cookie.
+ ParsedCookie pc(cookie_line);
+
+ if (!pc.IsValid()) {
+ VLOG(kVlogSetCookies) << "WARNING: Couldn't parse cookie";
+ return false;
+ }
+
+ if (options.exclude_httponly() && pc.IsHttpOnly()) {
+ VLOG(kVlogSetCookies) << "SetCookie() not setting httponly cookie";
+ return false;
+ }
+
+ std::string cookie_domain;
+ if (!GetCookieDomain(url, pc, &cookie_domain)) {
+ return false;
+ }
+
+ std::string cookie_path = CanonPath(url, pc);
+
+ scoped_ptr<CanonicalCookie> cc;
+ Time cookie_expires = CanonExpiration(pc, creation_time, options);
+
+ cc.reset(new CanonicalCookie(pc.Name(), pc.Value(), cookie_domain,
+ cookie_path,
+ pc.IsSecure(), pc.IsHttpOnly(),
+ creation_time, creation_time,
+ !cookie_expires.is_null(), cookie_expires));
+
+ if (!cc.get()) {
+ VLOG(kVlogSetCookies) << "WARNING: Failed to allocate CanonicalCookie";
+ return false;
+ }
+ return SetCanonicalCookie(&cc, creation_time, options);
+}
+
+bool CookieMonster::SetCanonicalCookie(scoped_ptr<CanonicalCookie>* cc,
+ const Time& creation_time,
+ const CookieOptions& options) {
+ const std::string key(GetKey((*cc)->Domain()));
+ if (DeleteAnyEquivalentCookie(key, **cc, options.exclude_httponly())) {
+ VLOG(kVlogSetCookies) << "SetCookie() not clobbering httponly cookie";
+ return false;
+ }
+
+ VLOG(kVlogSetCookies) << "SetCookie() key: " << key << " cc: "
+ << (*cc)->DebugString();
+
+ // Realize that we might be setting an expired cookie, and the only point
+ // was to delete the cookie which we've already done.
+ if (!(*cc)->IsExpired(creation_time)) {
+ // See InitializeHistograms() for details.
+ histogram_expiration_duration_minutes_->Add(
+ ((*cc)->ExpiryDate() - creation_time).InMinutes());
+ InternalInsertCookie(key, cc->release(), true);
+ }
+
+ // We assume that hopefully setting a cookie will be less common than
+ // querying a cookie. Since setting a cookie can put us over our limits,
+ // make sure that we garbage collect... We can also make the assumption that
+ // if a cookie was set, in the common case it will be used soon after,
+ // and we will purge the expired cookies in GetCookies().
+ GarbageCollect(creation_time, key);
+
+ return true;
+}
+
+void CookieMonster::InternalUpdateCookieAccessTime(CanonicalCookie* cc,
+ const Time& current) {
+ lock_.AssertAcquired();
+
+ // Based off the Mozilla code. When a cookie has been accessed recently,
+ // don't bother updating its access time again. This reduces the number of
+ // updates we do during pageload, which in turn reduces the chance our storage
+ // backend will hit its batch thresholds and be forced to update.
+ if ((current - cc->LastAccessDate()) < last_access_threshold_)
+ return;
+
+ // See InitializeHistograms() for details.
+ histogram_between_access_interval_minutes_->Add(
+ (current - cc->LastAccessDate()).InMinutes());
+
+ cc->SetLastAccessDate(current);
+ if (cc->IsPersistent() && store_)
+ store_->UpdateCookieAccessTime(*cc);
+}
+
+void CookieMonster::InternalDeleteCookie(CookieMap::iterator it,
+ bool sync_to_store,
+ DeletionCause deletion_cause) {
+ lock_.AssertAcquired();
+
+ // See InitializeHistograms() for details.
+ if (deletion_cause != DELETE_COOKIE_DONT_RECORD)
+ histogram_cookie_deletion_cause_->Add(deletion_cause);
+
+ CanonicalCookie* cc = it->second;
+ VLOG(kVlogSetCookies) << "InternalDeleteCookie() cc: " << cc->DebugString();
+
+ if (cc->IsPersistent() && store_ && sync_to_store)
+ store_->DeleteCookie(*cc);
+ if (delegate_.get())
+ delegate_->OnCookieChanged(*cc, true);
+ cookies_.erase(it);
+ delete cc;
+}
+
+// Domain expiry behavior is unchanged by key/expiry scheme (the
+// meaning of the key is different, but that's not visible to this
+// routine). Global garbage collection is dependent on key/expiry
+// scheme in that recently touched cookies are not saved if
+// expiry_and_key_scheme_ == EKS_DISCARD_RECENT_AND_PURGE_DOMAIN.
+int CookieMonster::GarbageCollect(const Time& current,
+ const std::string& key) {
+ lock_.AssertAcquired();
+
+ int num_deleted = 0;
+
+ // Collect garbage for this key.
+ if (cookies_.count(key) > kDomainMaxCookies) {
+ VLOG(kVlogGarbageCollection) << "GarbageCollect() key: " << key;
+
+ std::vector<CookieMap::iterator> cookie_its;
+ num_deleted += GarbageCollectExpired(
+ current, cookies_.equal_range(key), &cookie_its);
+ base::Time oldest_removed;
+ if (FindLeastRecentlyAccessed(kDomainMaxCookies, kDomainPurgeCookies,
+ &oldest_removed, &cookie_its)) {
+ // Delete in two passes so we can figure out what we're nuking
+ // that would be kept at the global level.
+ int num_subject_to_global_purge =
+ GarbageCollectDeleteList(
+ current,
+ Time::Now() - TimeDelta::FromDays(kSafeFromGlobalPurgeDays),
+ DELETE_COOKIE_EVICTED_DOMAIN_PRE_SAFE,
+ cookie_its);
+ num_deleted += num_subject_to_global_purge;
+ // Correct because FindLeastRecentlyAccessed returns a sorted list.
+ cookie_its.erase(cookie_its.begin(),
+ cookie_its.begin() + num_subject_to_global_purge);
+ num_deleted +=
+ GarbageCollectDeleteList(
+ current,
+ Time(),
+ DELETE_COOKIE_EVICTED_DOMAIN_POST_SAFE,
+ cookie_its);
+ }
+ }
+
+ // Collect garbage for everything. With firefox style we want to
+ // preserve cookies touched in kSafeFromGlobalPurgeDays, otherwise
+ // not.
+ if (cookies_.size() > kMaxCookies &&
+ (expiry_and_key_scheme_ == EKS_DISCARD_RECENT_AND_PURGE_DOMAIN ||
+ earliest_access_time_ <
+ Time::Now() - TimeDelta::FromDays(kSafeFromGlobalPurgeDays))) {
+ VLOG(kVlogGarbageCollection) << "GarbageCollect() everything";
+ std::vector<CookieMap::iterator> cookie_its;
+ base::Time oldest_left;
+ num_deleted += GarbageCollectExpired(
+ current, CookieMapItPair(cookies_.begin(), cookies_.end()),
+ &cookie_its);
+ if (FindLeastRecentlyAccessed(kMaxCookies, kPurgeCookies,
+ &oldest_left, &cookie_its)) {
+ Time oldest_safe_cookie(
+ expiry_and_key_scheme_ == EKS_KEEP_RECENT_AND_PURGE_ETLDP1 ?
+ (Time::Now() - TimeDelta::FromDays(kSafeFromGlobalPurgeDays)) :
+ Time()); // Null time == ignore access time.
+ int num_evicted = GarbageCollectDeleteList(
+ current,
+ oldest_safe_cookie,
+ DELETE_COOKIE_EVICTED_GLOBAL,
+ cookie_its);
+
+ // If no cookies were preserved by the time limit, the global last
+ // access is set to the value returned from FindLeastRecentlyAccessed.
+ // If the time limit preserved some cookies, we use the last access of
+ // the oldest preserved cookie.
+ if (num_evicted == static_cast<int>(cookie_its.size())) {
+ earliest_access_time_ = oldest_left;
+ } else {
+ earliest_access_time_ =
+ (*(cookie_its.begin() + num_evicted))->second->LastAccessDate();
+ }
+ num_deleted += num_evicted;
+ }
+ }
+
+ return num_deleted;
+}
+
+int CookieMonster::GarbageCollectExpired(
+ const Time& current,
+ const CookieMapItPair& itpair,
+ std::vector<CookieMap::iterator>* cookie_its) {
+ lock_.AssertAcquired();
+
+ int num_deleted = 0;
+ for (CookieMap::iterator it = itpair.first, end = itpair.second; it != end;) {
+ CookieMap::iterator curit = it;
+ ++it;
+
+ if (curit->second->IsExpired(current)) {
+ InternalDeleteCookie(curit, true, DELETE_COOKIE_EXPIRED);
+ ++num_deleted;
+ } else if (cookie_its) {
+ cookie_its->push_back(curit);
+ }
+ }
+
+ return num_deleted;
+}
+
+int CookieMonster::GarbageCollectDeleteList(
+ const Time& current,
+ const Time& keep_accessed_after,
+ DeletionCause cause,
+ std::vector<CookieMap::iterator>& cookie_its) {
+ int num_deleted = 0;
+ for (std::vector<CookieMap::iterator>::iterator it = cookie_its.begin();
+ it != cookie_its.end(); it++) {
+ if (keep_accessed_after.is_null() ||
+ (*it)->second->LastAccessDate() < keep_accessed_after) {
+ histogram_evicted_last_access_minutes_->Add(
+ (current - (*it)->second->LastAccessDate()).InMinutes());
+ InternalDeleteCookie((*it), true, cause);
+ num_deleted++;
+ }
+ }
+ return num_deleted;
+}
+
+// A wrapper around RegistryControlledDomainService::GetDomainAndRegistry
+// to make clear we're creating a key for our local map. Here and
+// in FindCookiesForHostAndDomain() are the only two places where
+// we need to conditionalize based on key type.
+//
+// Note that this key algorithm explicitly ignores the scheme. This is
+// because when we're entering cookies into the map from the backing store,
+// we in general won't have the scheme at that point.
+// In practical terms, this means that file cookies will be stored
+// in the map either by an empty string or by UNC name (and will be
+// limited by kMaxCookiesPerHost), and extension cookies will be stored
+// based on the single extension id, as the extension id won't have the
+// form of a DNS host and hence GetKey() will return it unchanged.
+//
+// Arguably the right thing to do here is to make the key
+// algorithm dependent on the scheme, and make sure that the scheme is
+// available everywhere the key must be obtained (specfically at backing
+// store load time). This would require either changing the backing store
+// database schema to include the scheme (far more trouble than it's worth), or
+// separating out file cookies into their own CookieMonster instance and
+// thus restricting each scheme to a single cookie monster (which might
+// be worth it, but is still too much trouble to solve what is currently a
+// non-problem).
+std::string CookieMonster::GetKey(const std::string& domain) const {
+ if (expiry_and_key_scheme_ == EKS_DISCARD_RECENT_AND_PURGE_DOMAIN)
+ return domain;
+
+ std::string effective_domain(
+ RegistryControlledDomainService::GetDomainAndRegistry(domain));
+ if (effective_domain.empty())
+ effective_domain = domain;
+
+ if (!effective_domain.empty() && effective_domain[0] == '.')
+ return effective_domain.substr(1);
+ return effective_domain;
+}
+
+bool CookieMonster::HasCookieableScheme(const GURL& url) {
+ lock_.AssertAcquired();
+
+ // Make sure the request is on a cookie-able url scheme.
+ for (size_t i = 0; i < cookieable_schemes_.size(); ++i) {
+ // We matched a scheme.
+ if (url.SchemeIs(cookieable_schemes_[i].c_str())) {
+ // We've matched a supported scheme.
+ return true;
+ }
+ }
+
+ // The scheme didn't match any in our whitelist.
+ VLOG(kVlogPerCookieMonster) << "WARNING: Unsupported cookie scheme: "
+ << url.scheme();
+ return false;
+}
+
// Test to see if stats should be recorded, and record them if so.
// The goal here is to get sampling for the average browser-hour of
// activity. We won't take samples when the web isn't being surfed,
@@ -1549,6 +1485,85 @@ void CookieMonster::RecordPeriodicStats(const base::Time& current_time) {
last_statistic_record_time_ = current_time;
}
+// Initialize all histogram counter variables used in this class.
+//
+// Normal histogram usage involves using the macros defined in
+// histogram.h, which automatically takes care of declaring these
+// variables (as statics), initializing them, and accumulating into
+// them, all from a single entry point. Unfortunately, that solution
+// doesn't work for the CookieMonster, as it's vulnerable to races between
+// separate threads executing the same functions and hence initializing the
+// same static variables. There isn't a race danger in the histogram
+// accumulation calls; they are written to be resilient to simultaneous
+// calls from multiple threads.
+//
+// The solution taken here is to have per-CookieMonster instance
+// variables that are constructed during CookieMonster construction.
+// Note that these variables refer to the same underlying histogram,
+// so we still race (but safely) with other CookieMonster instances
+// for accumulation.
+//
+// To do this we've expanded out the individual histogram macros calls,
+// with declarations of the variables in the class decl, initialization here
+// (done from the class constructor) and direct calls to the accumulation
+// methods where needed. The specific histogram macro calls on which the
+// initialization is based are included in comments below.
+void CookieMonster::InitializeHistograms() {
+ // From UMA_HISTOGRAM_CUSTOM_COUNTS
+ histogram_expiration_duration_minutes_ = base::Histogram::FactoryGet(
+ "Cookie.ExpirationDurationMinutes",
+ 1, kMinutesInTenYears, 50,
+ base::Histogram::kUmaTargetedHistogramFlag);
+ histogram_between_access_interval_minutes_ = base::Histogram::FactoryGet(
+ "Cookie.BetweenAccessIntervalMinutes",
+ 1, kMinutesInTenYears, 50,
+ base::Histogram::kUmaTargetedHistogramFlag);
+ histogram_evicted_last_access_minutes_ = base::Histogram::FactoryGet(
+ "Cookie.EvictedLastAccessMinutes",
+ 1, kMinutesInTenYears, 50,
+ base::Histogram::kUmaTargetedHistogramFlag);
+ histogram_count_ = base::Histogram::FactoryGet(
+ "Cookie.Count", 1, 4000, 50,
+ base::Histogram::kUmaTargetedHistogramFlag);
+ histogram_domain_count_ = base::Histogram::FactoryGet(
+ "Cookie.DomainCount", 1, 4000, 50,
+ base::Histogram::kUmaTargetedHistogramFlag);
+ histogram_etldp1_count_ = base::Histogram::FactoryGet(
+ "Cookie.Etldp1Count", 1, 4000, 50,
+ base::Histogram::kUmaTargetedHistogramFlag);
+ histogram_domain_per_etldp1_count_ = base::Histogram::FactoryGet(
+ "Cookie.DomainPerEtldp1Count", 1, 4000, 50,
+ base::Histogram::kUmaTargetedHistogramFlag);
+
+ // From UMA_HISTOGRAM_COUNTS_10000 & UMA_HISTOGRAM_CUSTOM_COUNTS
+ histogram_number_duplicate_db_cookies_ = base::Histogram::FactoryGet(
+ "Net.NumDuplicateCookiesInDb", 1, 10000, 50,
+ base::Histogram::kUmaTargetedHistogramFlag);
+
+ // From UMA_HISTOGRAM_ENUMERATION
+ histogram_cookie_deletion_cause_ = base::LinearHistogram::FactoryGet(
+ "Cookie.DeletionCause", 1,
+ DELETE_COOKIE_LAST_ENTRY - 1, DELETE_COOKIE_LAST_ENTRY,
+ base::Histogram::kUmaTargetedHistogramFlag);
+
+ // From UMA_HISTOGRAM_{CUSTOM_,}TIMES
+ histogram_time_get_ = base::Histogram::FactoryTimeGet("Cookie.TimeGet",
+ base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(1),
+ 50, base::Histogram::kUmaTargetedHistogramFlag);
+ histogram_time_load_ = base::Histogram::FactoryTimeGet("Cookie.TimeLoad",
+ base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(1),
+ 50, base::Histogram::kUmaTargetedHistogramFlag);
+}
+
+
+// The system resolution is not high enough, so we can have multiple
+// set cookies that result in the same system time. When this happens, we
+// increment by one Time unit. Let's hope computers don't get too fast.
+Time CookieMonster::CurrentTime() {
+ return std::max(Time::Now(),
+ Time::FromInternalValue(last_time_seen_.ToInternalValue() + 1));
+}
+
CookieMonster::ParsedCookie::ParsedCookie(const std::string& cookie_line)
: is_valid_(false),
path_index_(0),
@@ -1608,6 +1623,21 @@ const char CookieMonster::ParsedCookie::kWhitespace[] = " \t";
const char CookieMonster::ParsedCookie::kValueSeparator[] = ";";
const char CookieMonster::ParsedCookie::kTokenSeparator[] = ";=";
+// Create a cookie-line for the cookie. For debugging only!
+// If we want to use this for something more than debugging, we
+// should rewrite it better...
+std::string CookieMonster::ParsedCookie::DebugString() const {
+ std::string out;
+ for (PairList::const_iterator it = pairs_.begin();
+ it != pairs_.end(); ++it) {
+ out.append(it->first);
+ out.append("=");
+ out.append(it->second);
+ out.append("; ");
+ }
+ return out;
+}
+
std::string::const_iterator CookieMonster::ParsedCookie::FindFirstTerminator(
const std::string& s) {
std::string::const_iterator end = s.end();
@@ -1812,21 +1842,6 @@ void CookieMonster::ParsedCookie::SetupAttributes() {
}
}
-// Create a cookie-line for the cookie. For debugging only!
-// If we want to use this for something more than debugging, we
-// should rewrite it better...
-std::string CookieMonster::ParsedCookie::DebugString() const {
- std::string out;
- for (PairList::const_iterator it = pairs_.begin();
- it != pairs_.end(); ++it) {
- out.append(it->first);
- out.append("=");
- out.append(it->second);
- out.append("; ");
- }
- return out;
-}
-
CookieMonster::CanonicalCookie::CanonicalCookie() {
}
@@ -1879,20 +1894,6 @@ CookieMonster::CanonicalCookie::CanonicalCookie(const GURL& url,
domain_ = cookie_domain;
}
-CookieMonster::CookieMonster(PersistentCookieStore* store,
- Delegate* delegate,
- int last_access_threshold_milliseconds)
- : initialized_(false),
- expiry_and_key_scheme_(expiry_and_key_default_),
- store_(store),
- last_access_threshold_(base::TimeDelta::FromMilliseconds(
- last_access_threshold_milliseconds)),
- delegate_(delegate),
- last_statistic_record_time_(base::Time::Now()) {
- InitializeHistograms();
- SetDefaultCookieableSchemes();
-}
-
CookieMonster::CanonicalCookie::~CanonicalCookie() {
}
diff --git a/net/base/cookie_monster.h b/net/base/cookie_monster.h
index 4e31488..6a17d38 100644
--- a/net/base/cookie_monster.h
+++ b/net/base/cookie_monster.h
@@ -130,25 +130,6 @@ class CookieMonster : public CookieStore {
// i.e. it doesn't begin with a leading '.' character.
static bool DomainIsHostOnly(const std::string& domain_string);
- // CookieStore implementation.
-
- // Sets the cookies specified by |cookie_list| returned from |url|
- // with options |options| in effect.
- virtual bool SetCookieWithOptions(const GURL& url,
- const std::string& cookie_line,
- const CookieOptions& options);
-
- // Gets all cookies that apply to |url| given |options|.
- // The returned cookies are ordered by longest path, then earliest
- // creation date.
- virtual std::string GetCookiesWithOptions(const GURL& url,
- const CookieOptions& options);
-
- // Deletes all cookies with that might apply to |url| that has |cookie_name|.
- virtual void DeleteCookie(const GURL& url, const std::string& cookie_name);
-
- virtual CookieMonster* GetCookieMonster();
-
// Sets a cookie given explicit user-provided cookie attributes. The cookie
// name, value, domain, etc. are each provided as separate strings. This
// function expects each attribute to be well-formed. It will check for
@@ -222,7 +203,6 @@ class CookieMonster : public CookieStore {
// to enable support, but it should only be used for testing. Bug 1157243.
// Must be called before creating a CookieMonster instance.
static void EnableFileScheme();
- static bool enable_file_scheme_;
// Flush the backing store (if any) to disk and post the given task when done.
// WARNING: THE CALLBACK WILL RUN ON A RANDOM THREAD. IT MUST BE THREAD SAFE.
@@ -231,9 +211,26 @@ class CookieMonster : public CookieStore {
// to the thread you actually want to be notified on.
void FlushStore(Task* completion_task);
- private:
- ~CookieMonster();
+ // CookieStore implementation.
+ // Sets the cookies specified by |cookie_list| returned from |url|
+ // with options |options| in effect.
+ virtual bool SetCookieWithOptions(const GURL& url,
+ const std::string& cookie_line,
+ const CookieOptions& options);
+
+ // Gets all cookies that apply to |url| given |options|.
+ // The returned cookies are ordered by longest path, then earliest
+ // creation date.
+ virtual std::string GetCookiesWithOptions(const GURL& url,
+ const CookieOptions& options);
+
+ // Deletes all cookies with that might apply to |url| that has |cookie_name|.
+ virtual void DeleteCookie(const GURL& url, const std::string& cookie_name);
+
+ virtual CookieMonster* GetCookieMonster();
+
+ private:
// Testing support.
// For SetCookieWithCreationTime.
FRIEND_TEST_ALL_PREFIXES(CookieMonsterTest,
@@ -253,6 +250,28 @@ class CookieMonster : public CookieStore {
FRIEND_TEST_ALL_PREFIXES(CookieMonsterTest, GetKey);
FRIEND_TEST_ALL_PREFIXES(CookieMonsterTest, TestGetKey);
+ enum DeletionCause {
+ DELETE_COOKIE_EXPLICIT,
+ DELETE_COOKIE_OVERWRITE,
+ DELETE_COOKIE_EXPIRED,
+ DELETE_COOKIE_EVICTED,
+ DELETE_COOKIE_DUPLICATE_IN_BACKING_STORE,
+ DELETE_COOKIE_DONT_RECORD, // e.g. For final cleanup after flush to store.
+ DELETE_COOKIE_EVICTED_DOMAIN,
+ DELETE_COOKIE_EVICTED_GLOBAL,
+
+ // Cookies evicted during domain level garbage collection that
+ // were accessed longer ago than kSafeFromGlobalPurgeDays
+ DELETE_COOKIE_EVICTED_DOMAIN_PRE_SAFE,
+
+ // Cookies evicted during domain level garbage collection that
+ // were accessed more rencelyt than kSafeFromGlobalPurgeDays
+ // (and thus would have been preserved by global garbage collection).
+ DELETE_COOKIE_EVICTED_DOMAIN_POST_SAFE,
+
+ DELETE_COOKIE_LAST_ENTRY
+ };
+
// Cookie garbage collection thresholds. Based off of the Mozilla defaults.
// When the number of cookies gets to k{Domain,}MaxCookies
// purge down to k{Domain,}MaxCookies - k{Domain,}PurgeCookies.
@@ -285,6 +304,11 @@ class CookieMonster : public CookieStore {
static const ExpiryAndKeyScheme expiry_and_key_default_ =
EKS_KEEP_RECENT_AND_PURGE_ETLDP1;
+ // Record statistics every kRecordStatisticsIntervalSeconds of uptime.
+ static const int kRecordStatisticsIntervalSeconds = 10 * 60;
+
+ ~CookieMonster();
+
bool SetCookieWithCreationTime(const GURL& url,
const std::string& cookie_line,
const base::Time& creation_time);
@@ -363,28 +387,6 @@ class CookieMonster : public CookieStore {
void InternalUpdateCookieAccessTime(CanonicalCookie* cc,
const base::Time& current_time);
- enum DeletionCause {
- DELETE_COOKIE_EXPLICIT,
- DELETE_COOKIE_OVERWRITE,
- DELETE_COOKIE_EXPIRED,
- DELETE_COOKIE_EVICTED,
- DELETE_COOKIE_DUPLICATE_IN_BACKING_STORE,
- DELETE_COOKIE_DONT_RECORD, // e.g. For final cleanup after flush to store.
- DELETE_COOKIE_EVICTED_DOMAIN,
- DELETE_COOKIE_EVICTED_GLOBAL,
-
- // Cookies evicted during domain level garbage collection that
- // were accessed longer ago than kSafeFromGlobalPurgeDays
- DELETE_COOKIE_EVICTED_DOMAIN_PRE_SAFE,
-
- // Cookies evicted during domain level garbage collection that
- // were accessed more rencelyt than kSafeFromGlobalPurgeDays
- // (and thus would have been preserved by global garbage collection).
- DELETE_COOKIE_EVICTED_DOMAIN_POST_SAFE,
-
- DELETE_COOKIE_LAST_ENTRY
- };
-
// |deletion_cause| argument is for collecting statistics.
void InternalDeleteCookie(CookieMap::iterator it, bool sync_to_store,
DeletionCause deletion_cause);
@@ -422,13 +424,19 @@ class CookieMonster : public CookieStore {
bool HasCookieableScheme(const GURL& url);
// Statistics support
- // Record statistics every kRecordStatisticsIntervalSeconds of uptime.
- static const int kRecordStatisticsIntervalSeconds = 10 * 60;
// This function should be called repeatedly, and will record
// statistics if a sufficient time period has passed.
void RecordPeriodicStats(const base::Time& current_time);
+ // Initialize the above variables; should only be called from
+ // the constructor.
+ void InitializeHistograms();
+
+ // The resolution of our time isn't enough, so we do something
+ // ugly and increment when we've seen the same time twice.
+ base::Time CurrentTime();
+
// Histogram variables; see CookieMonster::InitializeHistograms() in
// cookie_monster.cc for details.
scoped_refptr<base::Histogram> histogram_expiration_duration_minutes_;
@@ -443,10 +451,6 @@ class CookieMonster : public CookieStore {
scoped_refptr<base::Histogram> histogram_time_get_;
scoped_refptr<base::Histogram> histogram_time_load_;
- // Initialize the above variables; should only be called from
- // the constructor.
- void InitializeHistograms();
-
CookieMap cookies_;
// Indicates whether the cookie store has been initialized. This happens
@@ -459,9 +463,6 @@ class CookieMonster : public CookieStore {
scoped_refptr<PersistentCookieStore> store_;
- // The resolution of our time isn't enough, so we do something
- // ugly and increment when we've seen the same time twice.
- base::Time CurrentTime();
base::Time last_time_seen_;
// Minimum delay after updating a cookie's LastAccessDate before we will
@@ -488,6 +489,8 @@ class CookieMonster : public CookieStore {
base::Time last_statistic_record_time_;
+ static bool enable_file_scheme_;
+
DISALLOW_COPY_AND_ASSIGN(CookieMonster);
};
diff --git a/net/base/directory_lister.cc b/net/base/directory_lister.cc
index a0f6317..5f17511 100644
--- a/net/base/directory_lister.cc
+++ b/net/base/directory_lister.cc
@@ -39,64 +39,6 @@ public:
int error;
};
-// Comparator for sorting lister results. This uses the locale aware filename
-// comparison function on the filenames for sorting in the user's locale.
-// Static.
-bool DirectoryLister::CompareAlphaDirsFirst(const DirectoryListerData& a,
- const DirectoryListerData& b) {
- // Parent directory before all else.
- if (file_util::IsDotDot(file_util::FileEnumerator::GetFilename(a.info)))
- return true;
- if (file_util::IsDotDot(file_util::FileEnumerator::GetFilename(b.info)))
- return false;
-
- // Directories before regular files.
- bool a_is_directory = file_util::FileEnumerator::IsDirectory(a.info);
- bool b_is_directory = file_util::FileEnumerator::IsDirectory(b.info);
- if (a_is_directory != b_is_directory)
- return a_is_directory;
-
- return file_util::LocaleAwareCompareFilenames(
- file_util::FileEnumerator::GetFilename(a.info),
- file_util::FileEnumerator::GetFilename(b.info));
-}
-
-// Static.
-bool DirectoryLister::CompareDate(const DirectoryListerData& a,
- const DirectoryListerData& b) {
- // Parent directory before all else.
- if (file_util::IsDotDot(file_util::FileEnumerator::GetFilename(a.info)))
- return true;
- if (file_util::IsDotDot(file_util::FileEnumerator::GetFilename(b.info)))
- return false;
-
- // Directories before regular files.
- bool a_is_directory = file_util::FileEnumerator::IsDirectory(a.info);
- bool b_is_directory = file_util::FileEnumerator::IsDirectory(b.info);
- if (a_is_directory != b_is_directory)
- return a_is_directory;
-#if defined(OS_POSIX)
- return a.info.stat.st_mtime > b.info.stat.st_mtime;
-#elif defined(OS_WIN)
- if (a.info.ftLastWriteTime.dwHighDateTime ==
- b.info.ftLastWriteTime.dwHighDateTime) {
- return a.info.ftLastWriteTime.dwLowDateTime >
- b.info.ftLastWriteTime.dwLowDateTime;
- } else {
- return a.info.ftLastWriteTime.dwHighDateTime >
- b.info.ftLastWriteTime.dwHighDateTime;
- }
-#endif
-}
-
-// Comparator for sorting find result by paths. This uses the locale-aware
-// comparison function on the filenames for sorting in the user's locale.
-// Static.
-bool DirectoryLister::CompareFullPath(const DirectoryListerData& a,
- const DirectoryListerData& b) {
- return file_util::LocaleAwareCompareFilenames(a.path, b.path);
-}
-
DirectoryLister::DirectoryLister(const FilePath& dir,
DirectoryListerDelegate* delegate)
: dir_(dir),
@@ -121,15 +63,6 @@ DirectoryLister::DirectoryLister(const FilePath& dir,
DCHECK(!dir.value().empty());
}
-DirectoryLister::~DirectoryLister() {
- if (thread_) {
- // This is a bug and we should stop joining this thread.
- // http://crbug.com/65331
- base::ThreadRestrictions::ScopedAllowIO allow_io;
- base::PlatformThread::Join(thread_);
- }
-}
-
bool DirectoryLister::Start() {
// spawn a thread to enumerate the specified directory
@@ -216,6 +149,73 @@ void DirectoryLister::ThreadMain() {
message_loop_->PostTask(FROM_HERE, e);
}
+DirectoryLister::~DirectoryLister() {
+ if (thread_) {
+ // This is a bug and we should stop joining this thread.
+ // http://crbug.com/65331
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+ base::PlatformThread::Join(thread_);
+ }
+}
+
+// Comparator for sorting lister results. This uses the locale aware filename
+// comparison function on the filenames for sorting in the user's locale.
+// Static.
+bool DirectoryLister::CompareAlphaDirsFirst(const DirectoryListerData& a,
+ const DirectoryListerData& b) {
+ // Parent directory before all else.
+ if (file_util::IsDotDot(file_util::FileEnumerator::GetFilename(a.info)))
+ return true;
+ if (file_util::IsDotDot(file_util::FileEnumerator::GetFilename(b.info)))
+ return false;
+
+ // Directories before regular files.
+ bool a_is_directory = file_util::FileEnumerator::IsDirectory(a.info);
+ bool b_is_directory = file_util::FileEnumerator::IsDirectory(b.info);
+ if (a_is_directory != b_is_directory)
+ return a_is_directory;
+
+ return file_util::LocaleAwareCompareFilenames(
+ file_util::FileEnumerator::GetFilename(a.info),
+ file_util::FileEnumerator::GetFilename(b.info));
+}
+
+// Static.
+bool DirectoryLister::CompareDate(const DirectoryListerData& a,
+ const DirectoryListerData& b) {
+ // Parent directory before all else.
+ if (file_util::IsDotDot(file_util::FileEnumerator::GetFilename(a.info)))
+ return true;
+ if (file_util::IsDotDot(file_util::FileEnumerator::GetFilename(b.info)))
+ return false;
+
+ // Directories before regular files.
+ bool a_is_directory = file_util::FileEnumerator::IsDirectory(a.info);
+ bool b_is_directory = file_util::FileEnumerator::IsDirectory(b.info);
+ if (a_is_directory != b_is_directory)
+ return a_is_directory;
+#if defined(OS_POSIX)
+ return a.info.stat.st_mtime > b.info.stat.st_mtime;
+#elif defined(OS_WIN)
+ if (a.info.ftLastWriteTime.dwHighDateTime ==
+ b.info.ftLastWriteTime.dwHighDateTime) {
+ return a.info.ftLastWriteTime.dwLowDateTime >
+ b.info.ftLastWriteTime.dwLowDateTime;
+ } else {
+ return a.info.ftLastWriteTime.dwHighDateTime >
+ b.info.ftLastWriteTime.dwHighDateTime;
+ }
+#endif
+}
+
+// Comparator for sorting find result by paths. This uses the locale-aware
+// comparison function on the filenames for sorting in the user's locale.
+// Static.
+bool DirectoryLister::CompareFullPath(const DirectoryListerData& a,
+ const DirectoryListerData& b) {
+ return file_util::LocaleAwareCompareFilenames(a.path, b.path);
+}
+
void DirectoryLister::OnReceivedData(const DirectoryListerData* data,
int count) {
// Since the delegate can clear itself during the OnListFile callback, we
diff --git a/net/base/directory_lister.h b/net/base/directory_lister.h
index 368c783..26ac0d7 100644
--- a/net/base/directory_lister.h
+++ b/net/base/directory_lister.h
@@ -88,6 +88,8 @@ class DirectoryLister : public base::RefCountedThreadSafe<DirectoryLister>,
friend class base::RefCountedThreadSafe<DirectoryLister>;
friend class DirectoryDataEvent;
+ ~DirectoryLister();
+
// Comparison methods for sorting, chosen based on |sort_|.
static bool CompareAlphaDirsFirst(const DirectoryListerData& a,
const DirectoryListerData& b);
@@ -96,8 +98,6 @@ class DirectoryLister : public base::RefCountedThreadSafe<DirectoryLister>,
static bool CompareFullPath(const DirectoryListerData& a,
const DirectoryListerData& b);
- ~DirectoryLister();
-
void OnReceivedData(const DirectoryListerData* data, int count);
void OnDone(int error);
diff --git a/net/base/dnssec_chain_verifier.cc b/net/base/dnssec_chain_verifier.cc
index 2dbacbc..e6d31ee 100644
--- a/net/base/dnssec_chain_verifier.cc
+++ b/net/base/dnssec_chain_verifier.cc
@@ -109,9 +109,11 @@ As l.google.com contains only a single DNSKEY, it is included without a signatur
A CERT record is presented for www.l.google.com. The verification is complete.
*/
+namespace {
+
// This is the 2048-bit DNS root key: http://www.iana.org/dnssec
// 19036 8 2 49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5
-static const unsigned char kRootKey[] = {
+const unsigned char kRootKey[] = {
0x01, 0x01, 0x03, 0x08, 0x03, 0x01, 0x00, 0x01, 0xa8, 0x00, 0x20, 0xa9, 0x55,
0x66, 0xba, 0x42, 0xe8, 0x86, 0xbb, 0x80, 0x4c, 0xda, 0x84, 0xe4, 0x7e, 0xf5,
0x6d, 0xbd, 0x7a, 0xec, 0x61, 0x26, 0x15, 0x55, 0x2c, 0xec, 0x90, 0x6d, 0x21,
@@ -136,7 +138,32 @@ static const unsigned char kRootKey[] = {
};
// kRootKeyID is the key id for kRootKey
-static const uint16 kRootKeyID = 19036;
+const uint16 kRootKeyID = 19036;
+
+// CountLabels returns the number of DNS labels in |a|, which must be in DNS,
+// length-prefixed form.
+unsigned CountLabels(base::StringPiece a) {
+ for (unsigned c = 0;; c++) {
+ if (!a.size())
+ return c;
+ uint8 label_len = a.data()[0];
+ a.remove_prefix(1);
+ DCHECK_GE(a.size(), label_len);
+ a.remove_prefix(label_len);
+ }
+}
+
+// RemoveLeadingLabel removes the first label from |a|, which must be in DNS,
+// length-prefixed form.
+void RemoveLeadingLabel(base::StringPiece* a) {
+ if (!a->size())
+ return;
+ uint8 label_len = a->data()[0];
+ a->remove_prefix(1);
+ a->remove_prefix(label_len);
+}
+
+} // namespace
namespace net {
@@ -217,6 +244,142 @@ const std::vector<base::StringPiece>& DNSSECChainVerifier::rrdatas() const {
return rrdatas_;
}
+// static
+std::map<std::string, std::string>
+DNSSECChainVerifier::ParseTLSTXTRecord(base::StringPiece rrdata) {
+ std::map<std::string, std::string> ret;
+
+ if (rrdata.empty())
+ return ret;
+
+ std::string txt;
+ txt.reserve(rrdata.size());
+
+ // TXT records are a series of 8-bit length prefixed substrings that we
+ // concatenate into |txt|
+ while (!rrdata.empty()) {
+ unsigned len = rrdata[0];
+ if (len == 0 || len + 1 > rrdata.size())
+ return ret;
+ txt.append(rrdata.data() + 1, len);
+ rrdata.remove_prefix(len + 1);
+ }
+
+ // We append a space to |txt| to make the parsing code, below, cleaner.
+ txt.append(" ");
+
+ // RECORD = KV (' '+ KV)*
+ // KV = KEY '=' VALUE
+ // KEY = [a-zA-Z0-9]+
+ // VALUE = [^ \0]*
+
+ enum State {
+ STATE_KEY,
+ STATE_VALUE,
+ STATE_SPACE,
+ };
+
+ State state = STATE_KEY;
+
+ std::map<std::string, std::string> m;
+
+ unsigned start = 0;
+ std::string key;
+
+ for (unsigned i = 0; i < txt.size(); i++) {
+ char c = txt[i];
+ if (c == 0)
+ return ret; // NUL values are never allowed.
+
+ switch (state) {
+ case STATE_KEY:
+ if (c == '=') {
+ if (i == start)
+ return ret; // zero length keys are not allowed.
+ key = txt.substr(start, i - start);
+ start = i + 1;
+ state = STATE_VALUE;
+ continue;
+ }
+ if (!IsAsciiAlpha(c) && !IsAsciiDigit(c))
+ return ret; // invalid key value
+ break;
+ case STATE_VALUE:
+ if (c == ' ') {
+ if (m.find(key) == m.end())
+ m.insert(make_pair(key, txt.substr(start, i - start)));
+ state = STATE_SPACE;
+ continue;
+ }
+ break;
+ case STATE_SPACE:
+ if (c != ' ') {
+ start = i;
+ i--;
+ state = STATE_KEY;
+ continue;
+ }
+ break;
+ default:
+ NOTREACHED();
+ return ret;
+ }
+ }
+
+ if (state != STATE_SPACE)
+ return ret;
+
+ ret.swap(m);
+ return ret;
+}
+
+// MatchingLabels returns the number of labels which |a| and |b| share,
+// counting right-to-left from the root. |a| and |b| must be DNS,
+// length-prefixed names. All names match at the root label, so this always
+// returns a value >= 1.
+
+// static
+unsigned DNSSECChainVerifier::MatchingLabels(base::StringPiece a,
+ base::StringPiece b) {
+ unsigned c = 0;
+ unsigned a_labels = CountLabels(a);
+ unsigned b_labels = CountLabels(b);
+
+ while (a_labels > b_labels) {
+ RemoveLeadingLabel(&a);
+ a_labels--;
+ }
+ while (b_labels > a_labels) {
+ RemoveLeadingLabel(&b);
+ b_labels--;
+ }
+
+ for (;;) {
+ if (!a.size()) {
+ if (!b.size())
+ return c;
+ return 0;
+ }
+ if (!b.size())
+ return 0;
+ uint8 a_length = a.data()[0];
+ a.remove_prefix(1);
+ uint8 b_length = b.data()[0];
+ b.remove_prefix(1);
+ DCHECK_GE(a.size(), a_length);
+ DCHECK_GE(b.size(), b_length);
+
+ if (a_length == b_length && memcmp(a.data(), b.data(), a_length) == 0) {
+ c++;
+ } else {
+ c = 0;
+ }
+
+ a.remove_prefix(a_length);
+ b.remove_prefix(b_length);
+ }
+}
+
// U8 reads, and removes, a single byte from |chain_|
bool DNSSECChainVerifier::U8(uint8* v) {
if (chain_.size() < 1)
@@ -468,19 +631,6 @@ DNSSECChainVerifier::Error DNSSECChainVerifier::EnterZone(
return OK;
}
-// CountLabels returns the number of DNS labels in |a|, which must be in DNS,
-// length-prefixed form.
-static unsigned CountLabels(base::StringPiece a) {
- for (unsigned c = 0;; c++) {
- if (!a.size())
- return c;
- uint8 label_len = a.data()[0];
- a.remove_prefix(1);
- DCHECK_GE(a.size(), label_len);
- a.remove_prefix(label_len);
- }
-}
-
// LeaveZone transitions out of the current zone, either by following DS
// records to validate the entry key of the next zone, or because the final
// resource records are given.
@@ -660,150 +810,4 @@ DNSSECChainVerifier::Error DNSSECChainVerifier::ReadCNAME(
return OK;
}
-// static
-std::map<std::string, std::string>
-DNSSECChainVerifier::ParseTLSTXTRecord(base::StringPiece rrdata) {
- std::map<std::string, std::string> ret;
-
- if (rrdata.empty())
- return ret;
-
- std::string txt;
- txt.reserve(rrdata.size());
-
- // TXT records are a series of 8-bit length prefixed substrings that we
- // concatenate into |txt|
- while (!rrdata.empty()) {
- unsigned len = rrdata[0];
- if (len == 0 || len + 1 > rrdata.size())
- return ret;
- txt.append(rrdata.data() + 1, len);
- rrdata.remove_prefix(len + 1);
- }
-
- // We append a space to |txt| to make the parsing code, below, cleaner.
- txt.append(" ");
-
- // RECORD = KV (' '+ KV)*
- // KV = KEY '=' VALUE
- // KEY = [a-zA-Z0-9]+
- // VALUE = [^ \0]*
-
- enum State {
- STATE_KEY,
- STATE_VALUE,
- STATE_SPACE,
- };
-
- State state = STATE_KEY;
-
- std::map<std::string, std::string> m;
-
- unsigned start = 0;
- std::string key;
-
- for (unsigned i = 0; i < txt.size(); i++) {
- char c = txt[i];
- if (c == 0)
- return ret; // NUL values are never allowed.
-
- switch (state) {
- case STATE_KEY:
- if (c == '=') {
- if (i == start)
- return ret; // zero length keys are not allowed.
- key = txt.substr(start, i - start);
- start = i + 1;
- state = STATE_VALUE;
- continue;
- }
- if (!IsAsciiAlpha(c) && !IsAsciiDigit(c))
- return ret; // invalid key value
- break;
- case STATE_VALUE:
- if (c == ' ') {
- if (m.find(key) == m.end())
- m.insert(make_pair(key, txt.substr(start, i - start)));
- state = STATE_SPACE;
- continue;
- }
- break;
- case STATE_SPACE:
- if (c != ' ') {
- start = i;
- i--;
- state = STATE_KEY;
- continue;
- }
- break;
- default:
- NOTREACHED();
- return ret;
- }
- }
-
- if (state != STATE_SPACE)
- return ret;
-
- ret.swap(m);
- return ret;
-}
-
-// RemoveLeadingLabel removes the first label from |a|, which must be in DNS,
-// length-prefixed form.
-static void RemoveLeadingLabel(base::StringPiece* a) {
- if (!a->size())
- return;
- uint8 label_len = a->data()[0];
- a->remove_prefix(1);
- a->remove_prefix(label_len);
-}
-
-// MatchingLabels returns the number of labels which |a| and |b| share,
-// counting right-to-left from the root. |a| and |b| must be DNS,
-// length-prefixed names. All names match at the root label, so this always
-// returns a value >= 1.
-
-// static
-unsigned DNSSECChainVerifier::MatchingLabels(base::StringPiece a,
- base::StringPiece b) {
- unsigned c = 0;
- unsigned a_labels = CountLabels(a);
- unsigned b_labels = CountLabels(b);
-
- while (a_labels > b_labels) {
- RemoveLeadingLabel(&a);
- a_labels--;
- }
- while (b_labels > a_labels) {
- RemoveLeadingLabel(&b);
- b_labels--;
- }
-
- for (;;) {
- if (!a.size()) {
- if (!b.size())
- return c;
- return 0;
- }
- if (!b.size())
- return 0;
- uint8 a_length = a.data()[0];
- a.remove_prefix(1);
- uint8 b_length = b.data()[0];
- b.remove_prefix(1);
- DCHECK_GE(a.size(), a_length);
- DCHECK_GE(b.size(), b_length);
-
- if (a_length == b_length && memcmp(a.data(), b.data(), a_length) == 0) {
- c++;
- } else {
- c = 0;
- }
-
- a.remove_prefix(a_length);
- b.remove_prefix(b_length);
- }
-}
-
} // namespace net
diff --git a/net/http/http_auth_handler_factory.cc b/net/http/http_auth_handler_factory.cc
index 7d34cff..8cf7d3e 100644
--- a/net/http/http_auth_handler_factory.cc
+++ b/net/http/http_auth_handler_factory.cc
@@ -79,6 +79,46 @@ bool IsSupportedScheme(const std::vector<std::string>& supported_schemes,
} // namespace
+HttpAuthHandlerRegistryFactory::HttpAuthHandlerRegistryFactory() {
+}
+
+HttpAuthHandlerRegistryFactory::~HttpAuthHandlerRegistryFactory() {
+ STLDeleteContainerPairSecondPointers(factory_map_.begin(),
+ factory_map_.end());
+}
+
+void HttpAuthHandlerRegistryFactory::SetURLSecurityManager(
+ const std::string& scheme,
+ URLSecurityManager* security_manager) {
+ HttpAuthHandlerFactory* factory = GetSchemeFactory(scheme);
+ if (factory)
+ factory->set_url_security_manager(security_manager);
+}
+
+void HttpAuthHandlerRegistryFactory::RegisterSchemeFactory(
+ const std::string& scheme,
+ HttpAuthHandlerFactory* factory) {
+ std::string lower_scheme = StringToLowerASCII(scheme);
+ FactoryMap::iterator it = factory_map_.find(lower_scheme);
+ if (it != factory_map_.end()) {
+ delete it->second;
+ }
+ if (factory)
+ factory_map_[lower_scheme] = factory;
+ else
+ factory_map_.erase(it);
+}
+
+HttpAuthHandlerFactory* HttpAuthHandlerRegistryFactory::GetSchemeFactory(
+ const std::string& scheme) const {
+ std::string lower_scheme = StringToLowerASCII(scheme);
+ FactoryMap::const_iterator it = factory_map_.find(lower_scheme);
+ if (it == factory_map_.end()) {
+ return NULL; // |scheme| is not registered.
+ }
+ return it->second;
+}
+
// static
HttpAuthHandlerRegistryFactory* HttpAuthHandlerRegistryFactory::Create(
const std::vector<std::string>& supported_schemes,
@@ -124,36 +164,6 @@ HttpAuthHandlerRegistryFactory* HttpAuthHandlerRegistryFactory::Create(
return registry_factory;
}
-HttpAuthHandlerRegistryFactory::HttpAuthHandlerRegistryFactory() {
-}
-
-HttpAuthHandlerRegistryFactory::~HttpAuthHandlerRegistryFactory() {
- STLDeleteContainerPairSecondPointers(factory_map_.begin(),
- factory_map_.end());
-}
-
-void HttpAuthHandlerRegistryFactory::SetURLSecurityManager(
- const std::string& scheme,
- URLSecurityManager* security_manager) {
- HttpAuthHandlerFactory* factory = GetSchemeFactory(scheme);
- if (factory)
- factory->set_url_security_manager(security_manager);
-}
-
-void HttpAuthHandlerRegistryFactory::RegisterSchemeFactory(
- const std::string& scheme,
- HttpAuthHandlerFactory* factory) {
- std::string lower_scheme = StringToLowerASCII(scheme);
- FactoryMap::iterator it = factory_map_.find(lower_scheme);
- if (it != factory_map_.end()) {
- delete it->second;
- }
- if (factory)
- factory_map_[lower_scheme] = factory;
- else
- factory_map_.erase(it);
-}
-
int HttpAuthHandlerRegistryFactory::CreateAuthHandler(
HttpAuth::ChallengeTokenizer* challenge,
HttpAuth::Target target,
@@ -178,14 +188,4 @@ int HttpAuthHandlerRegistryFactory::CreateAuthHandler(
digest_nonce_count, net_log, handler);
}
-HttpAuthHandlerFactory* HttpAuthHandlerRegistryFactory::GetSchemeFactory(
- const std::string& scheme) const {
- std::string lower_scheme = StringToLowerASCII(scheme);
- FactoryMap::const_iterator it = factory_map_.find(lower_scheme);
- if (it == factory_map_.end()) {
- return NULL; // |scheme| is not registered.
- }
- return it->second;
-}
-
} // namespace net
diff --git a/net/http/http_auth_handler_factory.h b/net/http/http_auth_handler_factory.h
index 2879aed..a56d5e1 100644
--- a/net/http/http_auth_handler_factory.h
+++ b/net/http/http_auth_handler_factory.h
@@ -152,16 +152,6 @@ class HttpAuthHandlerRegistryFactory : public HttpAuthHandlerFactory {
// registry factory is destroyed.
HttpAuthHandlerFactory* GetSchemeFactory(const std::string& scheme) const;
- // Creates an auth handler by dispatching out to the registered factories
- // based on the first token in |challenge|.
- virtual int CreateAuthHandler(HttpAuth::ChallengeTokenizer* challenge,
- HttpAuth::Target target,
- const GURL& origin,
- CreateReason reason,
- int digest_nonce_count,
- const BoundNetLog& net_log,
- scoped_ptr<HttpAuthHandler>* handler);
-
// Creates an HttpAuthHandlerRegistryFactory.
//
// |supported_schemes| is a list of authentication schemes. Valid values
@@ -189,6 +179,16 @@ class HttpAuthHandlerRegistryFactory : public HttpAuthHandlerFactory {
bool negotiate_disable_cname_lookup,
bool negotiate_enable_port);
+ // Creates an auth handler by dispatching out to the registered factories
+ // based on the first token in |challenge|.
+ virtual int CreateAuthHandler(HttpAuth::ChallengeTokenizer* challenge,
+ HttpAuth::Target target,
+ const GURL& origin,
+ CreateReason reason,
+ int digest_nonce_count,
+ const BoundNetLog& net_log,
+ scoped_ptr<HttpAuthHandler>* handler);
+
private:
typedef std::map<std::string, HttpAuthHandlerFactory*> FactoryMap;
diff --git a/net/http/http_cache.cc b/net/http/http_cache.cc
index 5710491..3ef5a7b7 100644
--- a/net/http/http_cache.cc
+++ b/net/http/http_cache.cc
@@ -383,29 +383,6 @@ disk_cache::Backend* HttpCache::GetCurrentBackend() {
return disk_cache_.get();
}
-int HttpCache::CreateTransaction(scoped_ptr<HttpTransaction>* trans) {
- // Do lazy initialization of disk cache if needed.
- if (!disk_cache_.get())
- CreateBackend(NULL, NULL); // We don't care about the result.
-
- trans->reset(new HttpCache::Transaction(this));
- return OK;
-}
-
-HttpCache* HttpCache::GetCache() {
- return this;
-}
-
-HttpNetworkSession* HttpCache::GetSession() {
- net::HttpNetworkLayer* network =
- static_cast<net::HttpNetworkLayer*>(network_layer_.get());
- return network->GetSession();
-}
-
-void HttpCache::Suspend(bool suspend) {
- network_layer_->Suspend(suspend);
-}
-
// static
bool HttpCache::ParseResponseInfo(const char* data, int len,
HttpResponseInfo* response_info,
@@ -442,6 +419,29 @@ void HttpCache::CloseCurrentConnections() {
}
}
+int HttpCache::CreateTransaction(scoped_ptr<HttpTransaction>* trans) {
+ // Do lazy initialization of disk cache if needed.
+ if (!disk_cache_.get())
+ CreateBackend(NULL, NULL); // We don't care about the result.
+
+ trans->reset(new HttpCache::Transaction(this));
+ return OK;
+}
+
+HttpCache* HttpCache::GetCache() {
+ return this;
+}
+
+HttpNetworkSession* HttpCache::GetSession() {
+ net::HttpNetworkLayer* network =
+ static_cast<net::HttpNetworkLayer*>(network_layer_.get());
+ return network->GetSession();
+}
+
+void HttpCache::Suspend(bool suspend) {
+ network_layer_->Suspend(suspend);
+}
+
//-----------------------------------------------------------------------------
int HttpCache::CreateBackend(disk_cache::Backend** backend,
diff --git a/net/http/http_cache.h b/net/http/http_cache.h
index 0641ca4..3438ba7 100644
--- a/net/http/http_cache.h
+++ b/net/http/http_cache.h
@@ -60,8 +60,6 @@ class HttpCache : public HttpTransactionFactory,
public base::SupportsWeakPtr<HttpCache>,
public base::NonThreadSafe {
public:
- ~HttpCache();
-
// The cache mode of operation.
enum Mode {
// Normal mode just behaves like a standard web cache.
@@ -145,6 +143,8 @@ class HttpCache : public HttpTransactionFactory,
NetLog* net_log,
BackendFactory* backend_factory);
+ ~HttpCache();
+
HttpTransactionFactory* network_layer() { return network_layer_.get(); }
// Retrieves the cache backend for this HttpCache instance. If the backend
@@ -157,12 +157,6 @@ class HttpCache : public HttpTransactionFactory,
// Returns the current backend (can be NULL).
disk_cache::Backend* GetCurrentBackend();
- // HttpTransactionFactory implementation:
- virtual int CreateTransaction(scoped_ptr<HttpTransaction>* trans);
- virtual HttpCache* GetCache();
- virtual HttpNetworkSession* GetSession();
- virtual void Suspend(bool suspend);
-
// Given a header data blob, convert it to a response info object.
static bool ParseResponseInfo(const char* data, int len,
HttpResponseInfo* response_info,
@@ -184,6 +178,12 @@ class HttpCache : public HttpTransactionFactory,
// immediately, but they will not be reusable. This is for debugging.
void CloseCurrentConnections();
+ // HttpTransactionFactory implementation:
+ virtual int CreateTransaction(scoped_ptr<HttpTransaction>* trans);
+ virtual HttpCache* GetCache();
+ virtual HttpNetworkSession* GetSession();
+ virtual void Suspend(bool suspend);
+
protected:
// Disk cache entry data indices.
enum {
@@ -211,15 +211,15 @@ class HttpCache : public HttpTransactionFactory,
typedef std::list<WorkItem*> WorkItemList;
struct ActiveEntry {
+ explicit ActiveEntry(disk_cache::Entry* entry);
+ ~ActiveEntry();
+
disk_cache::Entry* disk_entry;
Transaction* writer;
TransactionList readers;
TransactionList pending_queue;
bool will_process_pending_queue;
bool doomed;
-
- explicit ActiveEntry(disk_cache::Entry* entry);
- ~ActiveEntry();
};
typedef base::hash_map<std::string, ActiveEntry*> ActiveEntriesMap;
diff --git a/net/http/http_response_headers.cc b/net/http/http_response_headers.cc
index c2d098c..85df8d5 100644
--- a/net/http/http_response_headers.cc
+++ b/net/http/http_response_headers.cc
@@ -87,6 +87,17 @@ bool ShouldUpdateHeader(const std::string::const_iterator& name_begin,
} // namespace
+struct HttpResponseHeaders::ParsedHeader {
+ // A header "continuation" contains only a subsequent value for the
+ // preceding header. (Header values are comma separated.)
+ bool is_continuation() const { return name_begin == name_end; }
+
+ std::string::const_iterator name_begin;
+ std::string::const_iterator name_end;
+ std::string::const_iterator value_begin;
+ std::string::const_iterator value_end;
+};
+
//-----------------------------------------------------------------------------
HttpResponseHeaders::HttpResponseHeaders(const std::string& raw_input)
diff --git a/net/http/http_response_headers.h b/net/http/http_response_headers.h
index 2b556b3..df92c23 100644
--- a/net/http/http_response_headers.h
+++ b/net/http/http_response_headers.h
@@ -254,16 +254,7 @@ class HttpResponseHeaders
typedef base::hash_set<std::string> HeaderSet;
// The members of this structure point into raw_headers_.
- struct ParsedHeader {
- std::string::const_iterator name_begin;
- std::string::const_iterator name_end;
- std::string::const_iterator value_begin;
- std::string::const_iterator value_end;
-
- // A header "continuation" contains only a subsequent value for the
- // preceding header. (Header values are comma separated.)
- bool is_continuation() const { return name_begin == name_end; }
- };
+ struct ParsedHeader;
typedef std::vector<ParsedHeader> HeaderList;
HttpResponseHeaders();
diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h
index 8e6eb13..a1fd9ea 100644
--- a/net/socket/client_socket_pool_base.h
+++ b/net/socket/client_socket_pool_base.h
@@ -158,14 +158,14 @@ class ClientSocketPoolBaseHelper
: public ConnectJob::Delegate,
public NetworkChangeNotifier::Observer {
public:
+ typedef uint32 Flags;
+
// Used to specify specific behavior for the ClientSocketPool.
enum Flag {
NORMAL = 0, // Normal behavior.
NO_IDLE_SOCKETS = 0x1, // Do not return an idle socket. Create a new one.
};
- typedef uint32 Flags;
-
class Request {
public:
Request(ClientSocketHandle* handle,
@@ -261,12 +261,6 @@ class ClientSocketPoolBaseHelper
return ClientSocketPool::kMaxConnectRetryIntervalMs;
}
- // ConnectJob::Delegate methods:
- virtual void OnConnectJobComplete(int result, ConnectJob* job);
-
- // NetworkChangeNotifier::Observer methods:
- virtual void OnIPAddressChanged();
-
int NumConnectJobsInGroup(const std::string& group_name) const {
return group_map_.find(group_name)->second->jobs().size();
}
@@ -292,6 +286,12 @@ class ClientSocketPoolBaseHelper
static void set_connect_backup_jobs_enabled(bool enabled);
void EnableConnectBackupJobs();
+ // ConnectJob::Delegate methods:
+ virtual void OnConnectJobComplete(int result, ConnectJob* job);
+
+ // NetworkChangeNotifier::Observer methods:
+ virtual void OnIPAddressChanged();
+
private:
friend class base::RefCounted<ClientSocketPoolBaseHelper>;