diff options
author | erg@google.com <erg@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-01-25 17:29:39 +0000 |
---|---|---|
committer | erg@google.com <erg@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-01-25 17:29:39 +0000 |
commit | b7d08200bdfac32163cadd007403791949a590f2 (patch) | |
tree | c279d1a45b2629bd893e08faf9c186e83d5136f5 /base | |
parent | 5f59be200e9fb98af1424b56f113dd9461dfc0e4 (diff) | |
download | chromium_src-b7d08200bdfac32163cadd007403791949a590f2.zip chromium_src-b7d08200bdfac32163cadd007403791949a590f2.tar.gz chromium_src-b7d08200bdfac32163cadd007403791949a590f2.tar.bz2 |
Properly order the cc files based off the h files in base/.
BUG=68682
TEST=compiles
Review URL: http://codereview.chromium.org/6385003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@72505 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base')
-rw-r--r-- | base/metrics/field_trial.cc | 4 | ||||
-rw-r--r-- | base/metrics/histogram.cc | 572 | ||||
-rw-r--r-- | base/process_posix.cc | 32 | ||||
-rw-r--r-- | base/process_util.cc | 8 | ||||
-rw-r--r-- | base/process_util_linux.cc | 168 | ||||
-rw-r--r-- | base/weak_ptr.cc | 4 |
6 files changed, 389 insertions, 399 deletions
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc index 5db88c1..fceccde 100644 --- a/base/metrics/field_trial.cc +++ b/base/metrics/field_trial.cc @@ -114,6 +114,8 @@ void FieldTrial::EnableBenchmarking() { enable_benchmarking_ = true; } +FieldTrial::~FieldTrial() {} + // static Time FieldTrial::GetBuildTime() { Time integral_build_time; @@ -124,8 +126,6 @@ Time FieldTrial::GetBuildTime() { return integral_build_time; } -FieldTrial::~FieldTrial() {} - //------------------------------------------------------------------------------ // FieldTrialList methods and members. diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc index 1526cd8..a308932 100644 --- a/base/metrics/histogram.cc +++ b/base/metrics/histogram.cc @@ -53,48 +53,6 @@ scoped_refptr<Histogram> Histogram::FactoryTimeGet(const std::string& name, bucket_count, flags); } -Histogram::Histogram(const std::string& name, Sample minimum, - Sample maximum, size_t bucket_count) - : histogram_name_(name), - declared_min_(minimum), - declared_max_(maximum), - bucket_count_(bucket_count), - flags_(kNoFlags), - ranges_(bucket_count + 1, 0), - range_checksum_(0), - sample_() { - Initialize(); -} - -Histogram::Histogram(const std::string& name, TimeDelta minimum, - TimeDelta maximum, size_t bucket_count) - : histogram_name_(name), - declared_min_(static_cast<int> (minimum.InMilliseconds())), - declared_max_(static_cast<int> (maximum.InMilliseconds())), - bucket_count_(bucket_count), - flags_(kNoFlags), - ranges_(bucket_count + 1, 0), - range_checksum_(0), - sample_() { - Initialize(); -} - -Histogram::~Histogram() { - if (StatisticsRecorder::dump_on_exit()) { - std::string output; - WriteAscii(true, "\n", &output); - LOG(INFO) << output; - } - - // Just to make sure most derived class did this properly... - DCHECK(ValidateBucketRanges()); - DCHECK(HasValidRangeChecksum()); -} - -bool Histogram::PrintEmptyBucket(size_t index) const { - return true; -} - void Histogram::Add(int value) { if (value > kSampleType_MAX - 1) value = kSampleType_MAX - 1; @@ -190,31 +148,223 @@ void Histogram::WriteAscii(bool graph_it, const std::string& newline, DCHECK_EQ(sample_count, past); } -bool Histogram::ValidateBucketRanges() const { - // Standard assertions that all bucket ranges should satisfy. - DCHECK_EQ(bucket_count_ + 1, ranges_.size()); - DCHECK_EQ(0, ranges_[0]); - DCHECK_EQ(declared_min(), ranges_[1]); - DCHECK_EQ(declared_max(), ranges_[bucket_count_ - 1]); - DCHECK_EQ(kSampleType_MAX, ranges_[bucket_count_]); +// static +std::string Histogram::SerializeHistogramInfo(const Histogram& histogram, + const SampleSet& snapshot) { + DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type()); + + Pickle pickle; + pickle.WriteString(histogram.histogram_name()); + pickle.WriteInt(histogram.declared_min()); + pickle.WriteInt(histogram.declared_max()); + pickle.WriteSize(histogram.bucket_count()); + pickle.WriteInt(histogram.range_checksum()); + pickle.WriteInt(histogram.histogram_type()); + pickle.WriteInt(histogram.flags()); + + snapshot.Serialize(&pickle); + return std::string(static_cast<const char*>(pickle.data()), pickle.size()); +} + +// static +bool Histogram::DeserializeHistogramInfo(const std::string& histogram_info) { + if (histogram_info.empty()) { + return false; + } + + Pickle pickle(histogram_info.data(), + static_cast<int>(histogram_info.size())); + std::string histogram_name; + int declared_min; + int declared_max; + size_t bucket_count; + int range_checksum; + int histogram_type; + int pickle_flags; + SampleSet sample; + + void* iter = NULL; + if (!pickle.ReadString(&iter, &histogram_name) || + !pickle.ReadInt(&iter, &declared_min) || + !pickle.ReadInt(&iter, &declared_max) || + !pickle.ReadSize(&iter, &bucket_count) || + !pickle.ReadInt(&iter, &range_checksum) || + !pickle.ReadInt(&iter, &histogram_type) || + !pickle.ReadInt(&iter, &pickle_flags) || + !sample.Histogram::SampleSet::Deserialize(&iter, pickle)) { + LOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name; + return false; + } + DCHECK(pickle_flags & kIPCSerializationSourceFlag); + // Since these fields may have come from an untrusted renderer, do additional + // checks above and beyond those in Histogram::Initialize() + if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min || + INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) { + LOG(ERROR) << "Values error decoding Histogram: " << histogram_name; + return false; + } + + Flags flags = static_cast<Flags>(pickle_flags & ~kIPCSerializationSourceFlag); + + DCHECK_NE(NOT_VALID_IN_RENDERER, histogram_type); + + scoped_refptr<Histogram> render_histogram(NULL); + + if (histogram_type == HISTOGRAM) { + render_histogram = Histogram::FactoryGet( + histogram_name, declared_min, declared_max, bucket_count, flags); + } else if (histogram_type == LINEAR_HISTOGRAM) { + render_histogram = LinearHistogram::FactoryGet( + histogram_name, declared_min, declared_max, bucket_count, flags); + } else if (histogram_type == BOOLEAN_HISTOGRAM) { + render_histogram = BooleanHistogram::FactoryGet(histogram_name, flags); + } else { + LOG(ERROR) << "Error Deserializing Histogram Unknown histogram_type: " + << histogram_type; + return false; + } + + DCHECK_EQ(render_histogram->declared_min(), declared_min); + DCHECK_EQ(render_histogram->declared_max(), declared_max); + DCHECK_EQ(render_histogram->bucket_count(), bucket_count); + DCHECK_EQ(render_histogram->range_checksum(), range_checksum); + DCHECK_EQ(render_histogram->histogram_type(), histogram_type); + + if (render_histogram->flags() & kIPCSerializationSourceFlag) { + DVLOG(1) << "Single process mode, histogram observed and not copied: " + << histogram_name; + } else { + DCHECK_EQ(flags & render_histogram->flags(), flags); + render_histogram->AddSampleSet(sample); + } + return true; } -void Histogram::Initialize() { - sample_.Resize(*this); - if (declared_min_ < 1) - declared_min_ = 1; - if (declared_max_ > kSampleType_MAX - 1) - declared_max_ = kSampleType_MAX - 1; - DCHECK_LE(declared_min_, declared_max_); - DCHECK_GT(bucket_count_, 1u); - size_t maximal_bucket_count = declared_max_ - declared_min_ + 2; - DCHECK_LE(bucket_count_, maximal_bucket_count); - DCHECK_EQ(0, ranges_[0]); - ranges_[bucket_count_] = kSampleType_MAX; - InitializeBucketRange(); +//------------------------------------------------------------------------------ +// Methods for the validating a sample and a related histogram. +//------------------------------------------------------------------------------ + +Histogram::Inconsistencies Histogram::FindCorruption( + const SampleSet& snapshot) const { + int inconsistencies = NO_INCONSISTENCIES; + Sample previous_range = -1; // Bottom range is always 0. + Sample checksum = 0; + int64 count = 0; + for (size_t index = 0; index < bucket_count(); ++index) { + count += snapshot.counts(index); + int new_range = ranges(index); + checksum += new_range; + if (previous_range >= new_range) + inconsistencies |= BUCKET_ORDER_ERROR; + previous_range = new_range; + } + + if (checksum != range_checksum_) + inconsistencies |= RANGE_CHECKSUM_ERROR; + + int64 delta64 = snapshot.redundant_count() - count; + if (delta64 != 0) { + int delta = static_cast<int>(delta64); + if (delta != delta64) + delta = INT_MAX; // Flag all giant errors as INT_MAX. + // Since snapshots of histograms are taken asynchronously relative to + // sampling (and snapped from different threads), it is pretty likely that + // we'll catch a redundant count that doesn't match the sample count. We + // allow for a certain amount of slop before flagging this as an + // inconsistency. Even with an inconsistency, we'll snapshot it again (for + // UMA in about a half hour, so we'll eventually get the data, if it was + // not the result of a corruption. If histograms show that 1 is "too tight" + // then we may try to use 2 or 3 for this slop value. + const int kCommonRaceBasedCountMismatch = 1; + if (delta > 0) { + UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta); + if (delta > kCommonRaceBasedCountMismatch) + inconsistencies |= COUNT_HIGH_ERROR; + } else { + DCHECK_GT(0, delta); + UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta); + if (-delta > kCommonRaceBasedCountMismatch) + inconsistencies |= COUNT_LOW_ERROR; + } + } + return static_cast<Inconsistencies>(inconsistencies); +} + +Histogram::ClassType Histogram::histogram_type() const { + return HISTOGRAM; +} + +Histogram::Sample Histogram::ranges(size_t i) const { + return ranges_[i]; +} + +size_t Histogram::bucket_count() const { + return bucket_count_; +} + +// Do a safe atomic snapshot of sample data. +// This implementation assumes we are on a safe single thread. +void Histogram::SnapshotSample(SampleSet* sample) const { + // Note locking not done in this version!!! + *sample = sample_; +} + +bool Histogram::HasConstructorArguments(Sample minimum, + Sample maximum, + size_t bucket_count) { + return ((minimum == declared_min_) && (maximum == declared_max_) && + (bucket_count == bucket_count_)); +} + +bool Histogram::HasConstructorTimeDeltaArguments(TimeDelta minimum, + TimeDelta maximum, + size_t bucket_count) { + return ((minimum.InMilliseconds() == declared_min_) && + (maximum.InMilliseconds() == declared_max_) && + (bucket_count == bucket_count_)); +} + +Histogram::Histogram(const std::string& name, Sample minimum, + Sample maximum, size_t bucket_count) + : histogram_name_(name), + declared_min_(minimum), + declared_max_(maximum), + bucket_count_(bucket_count), + flags_(kNoFlags), + ranges_(bucket_count + 1, 0), + range_checksum_(0), + sample_() { + Initialize(); +} + +Histogram::Histogram(const std::string& name, TimeDelta minimum, + TimeDelta maximum, size_t bucket_count) + : histogram_name_(name), + declared_min_(static_cast<int> (minimum.InMilliseconds())), + declared_max_(static_cast<int> (maximum.InMilliseconds())), + bucket_count_(bucket_count), + flags_(kNoFlags), + ranges_(bucket_count + 1, 0), + range_checksum_(0), + sample_() { + Initialize(); +} + +Histogram::~Histogram() { + if (StatisticsRecorder::dump_on_exit()) { + std::string output; + WriteAscii(true, "\n", &output); + LOG(INFO) << output; + } + + // Just to make sure most derived class did this properly... DCHECK(ValidateBucketRanges()); - StatisticsRecorder::Register(this); + DCHECK(HasValidRangeChecksum()); +} + +bool Histogram::PrintEmptyBucket(size_t index) const { + return true; } // Calculate what range of values are held in each bucket. @@ -295,60 +445,64 @@ void Histogram::ResetRangeChecksum() { range_checksum_ = CalculateRangeChecksum(); } -bool Histogram::HasValidRangeChecksum() const { - return CalculateRangeChecksum() == range_checksum_; -} - -Histogram::Sample Histogram::CalculateRangeChecksum() const { - DCHECK_EQ(ranges_.size(), bucket_count() + 1); - Sample checksum = 0; - for (size_t index = 0; index < bucket_count(); ++index) { - checksum += ranges(index); - } - return checksum; +const std::string Histogram::GetAsciiBucketRange(size_t i) const { + std::string result; + if (kHexRangePrintingFlag & flags_) + StringAppendF(&result, "%#x", ranges(i)); + else + StringAppendF(&result, "%d", ranges(i)); + return result; } -//------------------------------------------------------------------------------ -// The following two methods can be overridden to provide a thread safe -// version of this class. The cost of locking is low... but an error in each -// of these methods has minimal impact. For now, I'll leave this unlocked, -// and I don't believe I can loose more than a count or two. -// The vectors are NOT reallocated, so there is no risk of them moving around. - // Update histogram data with new sample. void Histogram::Accumulate(Sample value, Count count, size_t index) { // Note locking not done in this version!!! sample_.Accumulate(value, count, index); } -// Do a safe atomic snapshot of sample data. -// This implementation assumes we are on a safe single thread. -void Histogram::SnapshotSample(SampleSet* sample) const { - // Note locking not done in this version!!! - *sample = sample_; +void Histogram::SetBucketRange(size_t i, Sample value) { + DCHECK_GT(bucket_count_, i); + ranges_[i] = value; } -bool Histogram::HasConstructorArguments(Sample minimum, - Sample maximum, - size_t bucket_count) { - return ((minimum == declared_min_) && (maximum == declared_max_) && - (bucket_count == bucket_count_)); +bool Histogram::ValidateBucketRanges() const { + // Standard assertions that all bucket ranges should satisfy. + DCHECK_EQ(bucket_count_ + 1, ranges_.size()); + DCHECK_EQ(0, ranges_[0]); + DCHECK_EQ(declared_min(), ranges_[1]); + DCHECK_EQ(declared_max(), ranges_[bucket_count_ - 1]); + DCHECK_EQ(kSampleType_MAX, ranges_[bucket_count_]); + return true; } -bool Histogram::HasConstructorTimeDeltaArguments(TimeDelta minimum, - TimeDelta maximum, - size_t bucket_count) { - return ((minimum.InMilliseconds() == declared_min_) && - (maximum.InMilliseconds() == declared_max_) && - (bucket_count == bucket_count_)); +void Histogram::Initialize() { + sample_.Resize(*this); + if (declared_min_ < 1) + declared_min_ = 1; + if (declared_max_ > kSampleType_MAX - 1) + declared_max_ = kSampleType_MAX - 1; + DCHECK_LE(declared_min_, declared_max_); + DCHECK_GT(bucket_count_, 1u); + size_t maximal_bucket_count = declared_max_ - declared_min_ + 2; + DCHECK_LE(bucket_count_, maximal_bucket_count); + DCHECK_EQ(0, ranges_[0]); + ranges_[bucket_count_] = kSampleType_MAX; + InitializeBucketRange(); + DCHECK(ValidateBucketRanges()); + StatisticsRecorder::Register(this); } -//------------------------------------------------------------------------------ -// Accessor methods +bool Histogram::HasValidRangeChecksum() const { + return CalculateRangeChecksum() == range_checksum_; +} -void Histogram::SetBucketRange(size_t i, Sample value) { - DCHECK_GT(bucket_count_, i); - ranges_[i] = value; +Histogram::Sample Histogram::CalculateRangeChecksum() const { + DCHECK_EQ(ranges_.size(), bucket_count() + 1); + Sample checksum = 0; + for (size_t index = 0; index < bucket_count(); ++index) { + checksum += ranges(index); + } + return checksum; } //------------------------------------------------------------------------------ @@ -400,15 +554,6 @@ void Histogram::WriteAsciiBucketContext(const int64 past, } } -const std::string Histogram::GetAsciiBucketRange(size_t i) const { - std::string result; - if (kHexRangePrintingFlag & flags_) - StringAppendF(&result, "%#x", ranges(i)); - else - StringAppendF(&result, "%d", ranges(i)); - return result; -} - void Histogram::WriteAsciiBucketValue(Count current, double scaled_sum, std::string* output) const { StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum); @@ -428,161 +573,6 @@ void Histogram::WriteAsciiBucketGraph(double current_size, double max_size, output->append(" "); } -// static -std::string Histogram::SerializeHistogramInfo(const Histogram& histogram, - const SampleSet& snapshot) { - DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type()); - - Pickle pickle; - pickle.WriteString(histogram.histogram_name()); - pickle.WriteInt(histogram.declared_min()); - pickle.WriteInt(histogram.declared_max()); - pickle.WriteSize(histogram.bucket_count()); - pickle.WriteInt(histogram.range_checksum()); - pickle.WriteInt(histogram.histogram_type()); - pickle.WriteInt(histogram.flags()); - - snapshot.Serialize(&pickle); - return std::string(static_cast<const char*>(pickle.data()), pickle.size()); -} - -// static -bool Histogram::DeserializeHistogramInfo(const std::string& histogram_info) { - if (histogram_info.empty()) { - return false; - } - - Pickle pickle(histogram_info.data(), - static_cast<int>(histogram_info.size())); - std::string histogram_name; - int declared_min; - int declared_max; - size_t bucket_count; - int range_checksum; - int histogram_type; - int pickle_flags; - SampleSet sample; - - void* iter = NULL; - if (!pickle.ReadString(&iter, &histogram_name) || - !pickle.ReadInt(&iter, &declared_min) || - !pickle.ReadInt(&iter, &declared_max) || - !pickle.ReadSize(&iter, &bucket_count) || - !pickle.ReadInt(&iter, &range_checksum) || - !pickle.ReadInt(&iter, &histogram_type) || - !pickle.ReadInt(&iter, &pickle_flags) || - !sample.Histogram::SampleSet::Deserialize(&iter, pickle)) { - LOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name; - return false; - } - DCHECK(pickle_flags & kIPCSerializationSourceFlag); - // Since these fields may have come from an untrusted renderer, do additional - // checks above and beyond those in Histogram::Initialize() - if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min || - INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) { - LOG(ERROR) << "Values error decoding Histogram: " << histogram_name; - return false; - } - - Flags flags = static_cast<Flags>(pickle_flags & ~kIPCSerializationSourceFlag); - - DCHECK_NE(NOT_VALID_IN_RENDERER, histogram_type); - - scoped_refptr<Histogram> render_histogram(NULL); - - if (histogram_type == HISTOGRAM) { - render_histogram = Histogram::FactoryGet( - histogram_name, declared_min, declared_max, bucket_count, flags); - } else if (histogram_type == LINEAR_HISTOGRAM) { - render_histogram = LinearHistogram::FactoryGet( - histogram_name, declared_min, declared_max, bucket_count, flags); - } else if (histogram_type == BOOLEAN_HISTOGRAM) { - render_histogram = BooleanHistogram::FactoryGet(histogram_name, flags); - } else { - LOG(ERROR) << "Error Deserializing Histogram Unknown histogram_type: " - << histogram_type; - return false; - } - - DCHECK_EQ(render_histogram->declared_min(), declared_min); - DCHECK_EQ(render_histogram->declared_max(), declared_max); - DCHECK_EQ(render_histogram->bucket_count(), bucket_count); - DCHECK_EQ(render_histogram->range_checksum(), range_checksum); - DCHECK_EQ(render_histogram->histogram_type(), histogram_type); - - if (render_histogram->flags() & kIPCSerializationSourceFlag) { - DVLOG(1) << "Single process mode, histogram observed and not copied: " - << histogram_name; - } else { - DCHECK_EQ(flags & render_histogram->flags(), flags); - render_histogram->AddSampleSet(sample); - } - - return true; -} - -//------------------------------------------------------------------------------ -// Methods for the validating a sample and a related histogram. -//------------------------------------------------------------------------------ - -Histogram::Inconsistencies Histogram::FindCorruption( - const SampleSet& snapshot) const { - int inconsistencies = NO_INCONSISTENCIES; - Sample previous_range = -1; // Bottom range is always 0. - Sample checksum = 0; - int64 count = 0; - for (size_t index = 0; index < bucket_count(); ++index) { - count += snapshot.counts(index); - int new_range = ranges(index); - checksum += new_range; - if (previous_range >= new_range) - inconsistencies |= BUCKET_ORDER_ERROR; - previous_range = new_range; - } - - if (checksum != range_checksum_) - inconsistencies |= RANGE_CHECKSUM_ERROR; - - int64 delta64 = snapshot.redundant_count() - count; - if (delta64 != 0) { - int delta = static_cast<int>(delta64); - if (delta != delta64) - delta = INT_MAX; // Flag all giant errors as INT_MAX. - // Since snapshots of histograms are taken asynchronously relative to - // sampling (and snapped from different threads), it is pretty likely that - // we'll catch a redundant count that doesn't match the sample count. We - // allow for a certain amount of slop before flagging this as an - // inconsistency. Even with an inconsistency, we'll snapshot it again (for - // UMA in about a half hour, so we'll eventually get the data, if it was - // not the result of a corruption. If histograms show that 1 is "too tight" - // then we may try to use 2 or 3 for this slop value. - const int kCommonRaceBasedCountMismatch = 1; - if (delta > 0) { - UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta); - if (delta > kCommonRaceBasedCountMismatch) - inconsistencies |= COUNT_HIGH_ERROR; - } else { - DCHECK_GT(0, delta); - UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta); - if (-delta > kCommonRaceBasedCountMismatch) - inconsistencies |= COUNT_LOW_ERROR; - } - } - return static_cast<Inconsistencies>(inconsistencies); -} - -Histogram::ClassType Histogram::histogram_type() const { - return HISTOGRAM; -} - -Histogram::Sample Histogram::ranges(size_t i) const { - return ranges_[i]; -} - -size_t Histogram::bucket_count() const { - return bucket_count_; -} - //------------------------------------------------------------------------------ // Methods for the Histogram::SampleSet class //------------------------------------------------------------------------------ @@ -700,6 +690,9 @@ bool Histogram::SampleSet::Deserialize(void** iter, const Pickle& pickle) { // buckets. //------------------------------------------------------------------------------ +LinearHistogram::~LinearHistogram() { +} + scoped_refptr<Histogram> LinearHistogram::FactoryGet(const std::string& name, Sample minimum, Sample maximum, @@ -733,7 +726,15 @@ scoped_refptr<Histogram> LinearHistogram::FactoryTimeGet( bucket_count, flags); } -LinearHistogram::~LinearHistogram() { +Histogram::ClassType LinearHistogram::histogram_type() const { + return LINEAR_HISTOGRAM; +} + +void LinearHistogram::SetRangeDescriptions( + const DescriptionPair descriptions[]) { + for (int i =0; descriptions[i].description; ++i) { + bucket_description_[descriptions[i].sample] = descriptions[i].description; + } } LinearHistogram::LinearHistogram(const std::string& name, @@ -757,30 +758,6 @@ LinearHistogram::LinearHistogram(const std::string& name, DCHECK(ValidateBucketRanges()); } -Histogram::ClassType LinearHistogram::histogram_type() const { - return LINEAR_HISTOGRAM; -} - -void LinearHistogram::SetRangeDescriptions( - const DescriptionPair descriptions[]) { - for (int i =0; descriptions[i].description; ++i) { - bucket_description_[descriptions[i].sample] = descriptions[i].description; - } -} - -const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const { - int range = ranges(i); - BucketDescriptionMap::const_iterator it = bucket_description_.find(range); - if (it == bucket_description_.end()) - return Histogram::GetAsciiBucketRange(i); - return it->second; -} - -bool LinearHistogram::PrintEmptyBucket(size_t index) const { - return bucket_description_.find(ranges(index)) == bucket_description_.end(); -} - - void LinearHistogram::InitializeBucketRange() { DCHECK_GT(declared_min(), 0); // 0 is the underflow bucket here. double min = declared_min(); @@ -802,6 +779,19 @@ double LinearHistogram::GetBucketSize(Count current, size_t i) const { return current/denominator; } +const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const { + int range = ranges(i); + BucketDescriptionMap::const_iterator it = bucket_description_.find(range); + if (it == bucket_description_.end()) + return Histogram::GetAsciiBucketRange(i); + return it->second; +} + +bool LinearHistogram::PrintEmptyBucket(size_t index) const { + return bucket_description_.find(ranges(index)) == bucket_description_.end(); +} + + //------------------------------------------------------------------------------ // This section provides implementation for BooleanHistogram. //------------------------------------------------------------------------------ diff --git a/base/process_posix.cc b/base/process_posix.cc index ee70e5a..6e65ebf 100644 --- a/base/process_posix.cc +++ b/base/process_posix.cc @@ -13,6 +13,22 @@ namespace base { +// static +Process Process::Current() { + return Process(GetCurrentProcessHandle()); +} + +ProcessId Process::pid() const { + if (process_ == 0) + return 0; + + return GetProcId(process_); +} + +bool Process::is_current() const { + return process_ == GetCurrentProcessHandle(); +} + void Process::Close() { process_ = 0; // if the process wasn't terminated (so we waited) or the state @@ -43,22 +59,6 @@ bool Process::SetProcessBackgrounded(bool value) { } #endif -ProcessId Process::pid() const { - if (process_ == 0) - return 0; - - return GetProcId(process_); -} - -bool Process::is_current() const { - return process_ == GetCurrentProcessHandle(); -} - -// static -Process Process::Current() { - return Process(GetCurrentProcessHandle()); -} - int Process::GetPriority() const { DCHECK(process_); return getpriority(PRIO_PROCESS, process_); diff --git a/base/process_util.cc b/base/process_util.cc index 7b2935d..462dcbf 100644 --- a/base/process_util.cc +++ b/base/process_util.cc @@ -44,10 +44,6 @@ const ProcessEntry* ProcessIterator::NextProcessEntry() { return NULL; } -bool ProcessIterator::IncludeEntry() { - return !filter_ || filter_->Includes(entry_); -} - ProcessIterator::ProcessEntries ProcessIterator::Snapshot() { ProcessEntries found; while (const ProcessEntry* process_entry = NextProcessEntry()) { @@ -56,6 +52,10 @@ ProcessIterator::ProcessEntries ProcessIterator::Snapshot() { return found; } +bool ProcessIterator::IncludeEntry() { + return !filter_ || filter_->Includes(entry_); +} + NamedProcessIterator::NamedProcessIterator( const FilePath::StringType& executable_name, const ProcessFilter* filter) : ProcessIterator(filter), diff --git a/base/process_util_linux.cc b/base/process_util_linux.cc index dcaeeb4..b6c6a2c 100644 --- a/base/process_util_linux.cc +++ b/base/process_util_linux.cc @@ -68,6 +68,39 @@ bool GetProcCmdline(pid_t pid, std::vector<std::string>* proc_cmd_line_args) { return true; } +// Get the total CPU of a single process. Return value is number of jiffies +// on success or -1 on error. +int GetProcessCPU(pid_t pid) { + // Synchronously reading files in /proc is safe. + base::ThreadRestrictions::ScopedAllowIO allow_io; + + // Use /proc/<pid>/task to find all threads and parse their /stat file. + FilePath path = FilePath(StringPrintf("/proc/%d/task/", pid)); + + DIR* dir = opendir(path.value().c_str()); + if (!dir) { + PLOG(ERROR) << "opendir(" << path.value() << ")"; + return -1; + } + + int total_cpu = 0; + while (struct dirent* ent = readdir(dir)) { + if (ent->d_name[0] == '.') + continue; + + FilePath stat_path = path.AppendASCII(ent->d_name).AppendASCII("stat"); + std::string stat; + if (file_util::ReadFileToString(stat_path, &stat)) { + int cpu = base::ParseProcStatCPU(stat); + if (cpu > 0) + total_cpu += cpu; + } + } + closedir(dir); + + return total_cpu; +} + } // namespace namespace base { @@ -226,14 +259,6 @@ bool NamedProcessIterator::IncludeEntry() { } -ProcessMetrics::ProcessMetrics(ProcessHandle process) - : process_(process), - last_time_(0), - last_system_time_(0), - last_cpu_(0) { - processor_count_ = base::SysInfo::NumberOfProcessors(); -} - // static ProcessMetrics* ProcessMetrics::CreateProcessMetrics(ProcessHandle process) { return new ProcessMetrics(process); @@ -399,6 +424,49 @@ bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const { return true; } +double ProcessMetrics::GetCPUUsage() { + // This queries the /proc-specific scaling factor which is + // conceptually the system hertz. To dump this value on another + // system, try + // od -t dL /proc/self/auxv + // and look for the number after 17 in the output; mine is + // 0000040 17 100 3 134512692 + // which means the answer is 100. + // It may be the case that this value is always 100. + static const int kHertz = sysconf(_SC_CLK_TCK); + + struct timeval now; + int retval = gettimeofday(&now, NULL); + if (retval) + return 0; + int64 time = TimeValToMicroseconds(now); + + if (last_time_ == 0) { + // First call, just set the last values. + last_time_ = time; + last_cpu_ = GetProcessCPU(process_); + return 0; + } + + int64 time_delta = time - last_time_; + DCHECK_NE(time_delta, 0); + if (time_delta == 0) + return 0; + + int cpu = GetProcessCPU(process_); + + // We have the number of jiffies in the time period. Convert to percentage. + // Note this means we will go *over* 100 in the case where multiple threads + // are together adding to more than one CPU's worth. + int percentage = 100 * (cpu - last_cpu_) / + (kHertz * TimeDelta::FromMicroseconds(time_delta).InSecondsF()); + + last_time_ = time; + last_cpu_ = cpu; + + return percentage; +} + // To have /proc/self/io file you must enable CONFIG_TASK_IO_ACCOUNTING // in your kernel configuration. bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const { @@ -446,6 +514,14 @@ bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const { return true; } +ProcessMetrics::ProcessMetrics(ProcessHandle process) + : process_(process), + last_time_(0), + last_system_time_(0), + last_cpu_(0) { + processor_count_ = base::SysInfo::NumberOfProcessors(); +} + // Exposed for testing. int ParseProcStatCPU(const std::string& input) { @@ -469,82 +545,6 @@ int ParseProcStatCPU(const std::string& input) { return fields11 + fields12; } -// Get the total CPU of a single process. Return value is number of jiffies -// on success or -1 on error. -static int GetProcessCPU(pid_t pid) { - // Synchronously reading files in /proc is safe. - base::ThreadRestrictions::ScopedAllowIO allow_io; - - // Use /proc/<pid>/task to find all threads and parse their /stat file. - FilePath path = FilePath(StringPrintf("/proc/%d/task/", pid)); - - DIR* dir = opendir(path.value().c_str()); - if (!dir) { - PLOG(ERROR) << "opendir(" << path.value() << ")"; - return -1; - } - - int total_cpu = 0; - while (struct dirent* ent = readdir(dir)) { - if (ent->d_name[0] == '.') - continue; - - FilePath stat_path = path.AppendASCII(ent->d_name).AppendASCII("stat"); - std::string stat; - if (file_util::ReadFileToString(stat_path, &stat)) { - int cpu = ParseProcStatCPU(stat); - if (cpu > 0) - total_cpu += cpu; - } - } - closedir(dir); - - return total_cpu; -} - -double ProcessMetrics::GetCPUUsage() { - // This queries the /proc-specific scaling factor which is - // conceptually the system hertz. To dump this value on another - // system, try - // od -t dL /proc/self/auxv - // and look for the number after 17 in the output; mine is - // 0000040 17 100 3 134512692 - // which means the answer is 100. - // It may be the case that this value is always 100. - static const int kHertz = sysconf(_SC_CLK_TCK); - - struct timeval now; - int retval = gettimeofday(&now, NULL); - if (retval) - return 0; - int64 time = TimeValToMicroseconds(now); - - if (last_time_ == 0) { - // First call, just set the last values. - last_time_ = time; - last_cpu_ = GetProcessCPU(process_); - return 0; - } - - int64 time_delta = time - last_time_; - DCHECK_NE(time_delta, 0); - if (time_delta == 0) - return 0; - - int cpu = GetProcessCPU(process_); - - // We have the number of jiffies in the time period. Convert to percentage. - // Note this means we will go *over* 100 in the case where multiple threads - // are together adding to more than one CPU's worth. - int percentage = 100 * (cpu - last_cpu_) / - (kHertz * TimeDelta::FromMicroseconds(time_delta).InSecondsF()); - - last_time_ = time; - last_cpu_ = cpu; - - return percentage; -} - namespace { // The format of /proc/meminfo is: diff --git a/base/weak_ptr.cc b/base/weak_ptr.cc index 6473b4a..86c89c1 100644 --- a/base/weak_ptr.cc +++ b/base/weak_ptr.cc @@ -61,10 +61,10 @@ void WeakReferenceOwner::Invalidate() { WeakPtrBase::WeakPtrBase() { } -WeakPtrBase::WeakPtrBase(const WeakReference& ref) : ref_(ref) { +WeakPtrBase::~WeakPtrBase() { } -WeakPtrBase::~WeakPtrBase() { +WeakPtrBase::WeakPtrBase(const WeakReference& ref) : ref_(ref) { } } // namespace internal |