diff options
author | ruuda <ruuda@google.com> | 2015-11-03 11:53:40 -0800 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-11-03 19:54:53 +0000 |
commit | d5ad756ad44daf91bb2f82899bf758553a104789 (patch) | |
tree | 5341cf0dae4f456e8aa55653e277ecfb969bcdc3 | |
parent | ba153f6097e563551587ffd9399994f8642f8f1c (diff) | |
download | chromium_src-d5ad756ad44daf91bb2f82899bf758553a104789.zip chromium_src-d5ad756ad44daf91bb2f82899bf758553a104789.tar.gz chromium_src-d5ad756ad44daf91bb2f82899bf758553a104789.tar.bz2 |
[Tracing] Add support type IDs to heap profiler
This adds a |type_id| field to |AllocationContext|, and it makes
|HeapDumpWriter| aggregate by type ID in addition to backtrace.
This is part of the heap profiler in chrome://tracing. The UI will break
temporarily because it does not handle type IDs yet.
BUG=524631
Review URL: https://codereview.chromium.org/1409163008
Cr-Commit-Position: refs/heads/master@{#357585}
4 files changed, 90 insertions, 20 deletions
diff --git a/base/trace_event/memory_profiler_allocation_context.cc b/base/trace_event/memory_profiler_allocation_context.cc index bafbf7f..6d02876 100644 --- a/base/trace_event/memory_profiler_allocation_context.cc +++ b/base/trace_event/memory_profiler_allocation_context.cc @@ -114,6 +114,10 @@ void StackFrameDeduplicator::AppendAsTraceFormat(std::string* out) const { out->append("}"); // End the |stackFrames| dictionary. } +bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) { + return (lhs.backtrace == rhs.backtrace) && (lhs.type_id == rhs.type_id); +} + AllocationContextTracker* AllocationContextTracker::GetThreadLocalTracker() { auto tracker = static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get()); @@ -181,6 +185,8 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() { std::fill(dst, dst_end, nullptr); } + ctx.type_id = 0; + return ctx; } @@ -188,6 +194,7 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() { } // namespace base namespace BASE_HASH_NAMESPACE { +using base::trace_event::AllocationContext; using base::trace_event::Backtrace; size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const { @@ -195,4 +202,14 @@ size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const { sizeof(backtrace.frames)); } +size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const { + size_t ctx_hash = hash<Backtrace>()(ctx.backtrace); + + // Multiply one side to break the commutativity of +. Multiplication with a + // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so + // randomness is preserved. The type ID is assumed to be distributed randomly + // already so there is no need to hash it. + return (ctx_hash * 3) + static_cast<size_t>(ctx.type_id); +} + } // BASE_HASH_NAMESPACE diff --git a/base/trace_event/memory_profiler_allocation_context.h b/base/trace_event/memory_profiler_allocation_context.h index 31c134d..7c110ef 100644 --- a/base/trace_event/memory_profiler_allocation_context.h +++ b/base/trace_event/memory_profiler_allocation_context.h @@ -142,10 +142,23 @@ class BASE_EXPORT StackFrameDeduplicator : public ConvertableToTraceFormat { // when heap profiling is enabled. To simplify memory management for // bookkeeping, this struct has a fixed size. All |const char*|s here // must have static lifetime. +// TODO(ruuda): Make the default constructor private to avoid accidentally +// constructing an instance and forgetting to initialize it. Only +// |AllocationContextTracker| should be able to construct. (And tests.) struct BASE_EXPORT AllocationContext { + // A type ID is a number that is unique for every C++ type. A type ID is + // stored instead of the type name to avoid inflating the binary with type + // name strings. There is an out of band lookup table mapping IDs to the type + // names. A value of 0 means that the type is not known. + using TypeId = uint16_t; + Backtrace backtrace; + TypeId type_id; }; +bool BASE_EXPORT operator==(const AllocationContext& lhs, + const AllocationContext& rhs); + // The allocation context tracker keeps track of thread-local context for heap // profiling. It includes a pseudo stack of trace events. On every allocation // the tracker provides a snapshot of its context in the form of an @@ -207,6 +220,11 @@ struct hash<base::trace_event::Backtrace> { size_t operator()(const base::trace_event::Backtrace& backtrace) const; }; +template <> +struct hash<base::trace_event::AllocationContext> { + size_t operator()(const base::trace_event::AllocationContext& context) const; +}; + } // BASE_HASH_NAMESPACE #endif // BASE_TRACE_EVENT_MEMORY_PROFILER_ALLOCATION_CONTEXT_H_ diff --git a/base/trace_event/memory_profiler_heap_dump_writer.cc b/base/trace_event/memory_profiler_heap_dump_writer.cc index 37395ac..81b1285 100644 --- a/base/trace_event/memory_profiler_heap_dump_writer.cc +++ b/base/trace_event/memory_profiler_heap_dump_writer.cc @@ -4,8 +4,8 @@ #include "base/trace_event/memory_profiler_heap_dump_writer.h" +#include <algorithm> #include <iterator> -#include <numeric> #include "base/format_macros.h" #include "base/strings/stringprintf.h" @@ -15,6 +15,8 @@ namespace base { namespace trace_event { +using TypeId = AllocationContext::TypeId; + namespace { template <typename T> @@ -24,8 +26,12 @@ bool PairSizeGt(const std::pair<T, size_t>& lhs, } template <typename T> -size_t PairSizeAdd(size_t acc, const std::pair<T, size_t>& rhs) { - return acc + rhs.second; +std::vector<std::pair<T, size_t>> SortDescending( + const hash_map<T, size_t>& grouped) { + std::vector<std::pair<T, size_t>> sorted; + std::copy(grouped.begin(), grouped.end(), std::back_inserter(sorted)); + std::sort(sorted.begin(), sorted.end(), PairSizeGt<T>); + return sorted; } } // namespace @@ -38,38 +44,50 @@ HeapDumpWriter::~HeapDumpWriter() {} void HeapDumpWriter::InsertAllocation(const AllocationContext& context, size_t size) { - bytes_by_backtrace_[context.backtrace] += size; + bytes_by_context_[context] += size; } scoped_refptr<TracedValue> HeapDumpWriter::WriteHeapDump() { - // Sort the backtraces by size in descending order. - std::vector<std::pair<Backtrace, size_t>> sorted_by_backtrace; + // Group by backtrace and by type ID, and compute the total heap size while + // iterating anyway. + size_t total_size = 0; + hash_map<Backtrace, size_t> bytes_by_backtrace; + hash_map<TypeId, size_t> bytes_by_type; + + for (auto context_size : bytes_by_context_) { + total_size += context_size.second; + bytes_by_backtrace[context_size.first.backtrace] += context_size.second; + bytes_by_type[context_size.first.type_id] += context_size.second; + } - std::copy(bytes_by_backtrace_.begin(), bytes_by_backtrace_.end(), - std::back_inserter(sorted_by_backtrace)); - std::sort(sorted_by_backtrace.begin(), sorted_by_backtrace.end(), - PairSizeGt<Backtrace>); + // Sort the backtraces and type IDs by size. + auto sorted_bytes_by_backtrace = SortDescending(bytes_by_backtrace); + auto sorted_bytes_by_type = SortDescending(bytes_by_type); traced_value_->BeginArray("entries"); // The global size, no column specified. { - size_t total_size = - std::accumulate(sorted_by_backtrace.begin(), sorted_by_backtrace.end(), - size_t(0), PairSizeAdd<Backtrace>); traced_value_->BeginDictionary(); WriteSize(total_size); traced_value_->EndDictionary(); } - // Size per backtrace. - for (auto it = sorted_by_backtrace.begin(); - it != sorted_by_backtrace.end(); it++) { + // Entries with the size per backtrace. + for (const auto& entry : sorted_bytes_by_backtrace) { traced_value_->BeginDictionary(); // Insert a forward reference to the backtrace that will be written to the // |stackFrames| dictionary later on. - WriteStackFrameIndex(stack_frame_deduplicator_->Insert(it->first)); - WriteSize(it->second); + WriteStackFrameIndex(stack_frame_deduplicator_->Insert(entry.first)); + WriteSize(entry.second); + traced_value_->EndDictionary(); + } + + // Entries with the size per type. + for (const auto& entry : sorted_bytes_by_type) { + traced_value_->BeginDictionary(); + WriteTypeId(entry.first); + WriteSize(entry.second); traced_value_->EndDictionary(); } @@ -91,6 +109,19 @@ void HeapDumpWriter::WriteStackFrameIndex(int index) { } } +void HeapDumpWriter::WriteTypeId(TypeId type_id) { + if (type_id == 0) { + // Type ID 0 represents "unknown type". Instead of writing it as "0" which + // could be mistaken for an actual type ID, an unknown type is represented + // by the empty string. + traced_value_->SetString("type", ""); + } else { + // Format the type ID as a string. + SStringPrintf(&buffer_, "%i", type_id); + traced_value_->SetString("type", buffer_); + } +} + void HeapDumpWriter::WriteSize(size_t size) { // Format size as hexadecimal string into |buffer_|. SStringPrintf(&buffer_, "%" PRIx64, static_cast<uint64_t>(size)); diff --git a/base/trace_event/memory_profiler_heap_dump_writer.h b/base/trace_event/memory_profiler_heap_dump_writer.h index f05e338f..10a64e5 100644 --- a/base/trace_event/memory_profiler_heap_dump_writer.h +++ b/base/trace_event/memory_profiler_heap_dump_writer.h @@ -44,6 +44,9 @@ class BASE_EXPORT HeapDumpWriter { // dictionary. void WriteStackFrameIndex(int index); + // Writes a "type" key with the stringified type ID. + void WriteTypeId(AllocationContext::TypeId type_id); + // Writes a "size" key with value |size| as a hexidecimal string to the traced // value. void WriteSize(size_t size); @@ -55,8 +58,9 @@ class BASE_EXPORT HeapDumpWriter { // this heap dump writer instance. StackFrameDeduplicator* const stack_frame_deduplicator_; - // A map of backtrace to the number of bytes allocated for that backtrace. - hash_map<Backtrace, size_t> bytes_by_backtrace_; + // A map of allocation context to the number of bytes allocated for that + // context. + hash_map<AllocationContext, size_t> bytes_by_context_; // Buffer for converting integers into strings, that is re-used throughout the // dump. |