summaryrefslogtreecommitdiffstats
path: root/base/debug/trace_event_unittest.cc
diff options
context:
space:
mode:
authorwangxianzhu@chromium.org <wangxianzhu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-09 20:03:19 +0000
committerwangxianzhu@chromium.org <wangxianzhu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-09 20:03:19 +0000
commitcb13e0a5455c86507e493498bb232e98467333c1 (patch)
treeb863305e5676cc7eaf86cad9681500835001120c /base/debug/trace_event_unittest.cc
parentb76b0dba22cc820fe92b8e4bf0e5d53a5baa95c7 (diff)
downloadchromium_src-cb13e0a5455c86507e493498bb232e98467333c1.zip
chromium_src-cb13e0a5455c86507e493498bb232e98467333c1.tar.gz
chromium_src-cb13e0a5455c86507e493498bb232e98467333c1.tar.bz2
Use chunks in trace event buffers
Separated from https://codereview.chromium.org/23799005/. Chunks are fixed sized arrays of TraceEvents in trace event buffers. Each thread borrows a chunk from the buffer, adds thread-local events into the chunk without locking, and returns the chunk when it is full. TraceLog also gets a chunk from the buffer for events that are not suitable to use thread-local buffer. With the chunks, TraceEvent never copies except when the trace buffer is cloned. Changed |flush_count_| to |generation| which indicates the version of the central TraceBuffer. If a thread failed to flush in the last flush, it may still hold a TraceBufferChunk from the original TraceBuffer. |generation| controls the thread not to return the chunk to the current TraceBuffer. This is preparation for the new design for X events which exposes TraceEventHandle through TRACE_EVENT_API. BUG=trace-viewer:142 TEST=Existing TraceEventTestFixture.* tests TEST=TraceEventTestFixture.TraceBufferRingBuffer* Review URL: https://codereview.chromium.org/25656003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@227783 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base/debug/trace_event_unittest.cc')
-rw-r--r--base/debug/trace_event_unittest.cc158
1 files changed, 136 insertions, 22 deletions
diff --git a/base/debug/trace_event_unittest.cc b/base/debug/trace_event_unittest.cc
index 49e9e56..4e5e2651 100644
--- a/base/debug/trace_event_unittest.cc
+++ b/base/debug/trace_event_unittest.cc
@@ -1164,13 +1164,6 @@ TEST_F(TraceEventTestFixture, EventWatchNotification) {
EndTraceAndFlush();
EXPECT_EQ(event_watch_notification_, 1);
- // Basic one occurrence before Set.
- BeginTrace();
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- TraceLog::GetInstance()->SetWatchEvent("cat", "event");
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 1);
-
// Auto-reset after end trace.
BeginTrace();
TraceLog::GetInstance()->SetWatchEvent("cat", "event");
@@ -1296,14 +1289,16 @@ TEST_F(TraceEventTestFixture, StaticStringVsString) {
"arg2", TRACE_STR_COPY("argval"));
size_t num_events = tracer->GetEventsSize();
EXPECT_GT(num_events, 1u);
- const TraceEvent& event1 = tracer->GetEventAt(num_events - 2);
- const TraceEvent& event2 = tracer->GetEventAt(num_events - 1);
- EXPECT_STREQ("name1", event1.name());
- EXPECT_STREQ("name2", event2.name());
- EXPECT_TRUE(event1.parameter_copy_storage() != NULL);
- EXPECT_TRUE(event2.parameter_copy_storage() != NULL);
- EXPECT_GT(event1.parameter_copy_storage()->size(), 0u);
- EXPECT_GT(event2.parameter_copy_storage()->size(), 0u);
+ const TraceEvent* event1 = tracer->GetEventAt(0);
+ const TraceEvent* event2 = tracer->GetEventAt(1);
+ ASSERT_TRUE(event1);
+ ASSERT_TRUE(event2);
+ EXPECT_STREQ("name1", event1->name());
+ EXPECT_STREQ("name2", event2->name());
+ EXPECT_TRUE(event1->parameter_copy_storage() != NULL);
+ EXPECT_TRUE(event2->parameter_copy_storage() != NULL);
+ EXPECT_GT(event1->parameter_copy_storage()->size(), 0u);
+ EXPECT_GT(event2->parameter_copy_storage()->size(), 0u);
EndTraceAndFlush();
}
@@ -1320,12 +1315,14 @@ TEST_F(TraceEventTestFixture, StaticStringVsString) {
"arg2", TRACE_STR_COPY(str2));
size_t num_events = tracer->GetEventsSize();
EXPECT_GT(num_events, 1u);
- const TraceEvent& event1 = tracer->GetEventAt(num_events - 2);
- const TraceEvent& event2 = tracer->GetEventAt(num_events - 1);
- EXPECT_STREQ("name1", event1.name());
- EXPECT_STREQ("name2", event2.name());
- EXPECT_TRUE(event1.parameter_copy_storage() == NULL);
- EXPECT_TRUE(event2.parameter_copy_storage() == NULL);
+ const TraceEvent* event1 = tracer->GetEventAt(0);
+ const TraceEvent* event2 = tracer->GetEventAt(1);
+ ASSERT_TRUE(event1);
+ ASSERT_TRUE(event2);
+ EXPECT_STREQ("name1", event1->name());
+ EXPECT_STREQ("name2", event2->name());
+ EXPECT_TRUE(event1->parameter_copy_storage() == NULL);
+ EXPECT_TRUE(event2->parameter_copy_storage() == NULL);
EndTraceAndFlush();
}
}
@@ -2010,7 +2007,124 @@ TEST_F(TraceEventCallbackTest, TraceEventCallbackWhileFull) {
EXPECT_EQ("a snake", collected_events_[0]);
}
-// TODO(dsinclair): Continuous Tracing unit test.
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferGetReturnChunk) {
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORD_CONTINUOUSLY);
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ uint32 last_seq = 0;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+
+ ScopedVector<TraceBufferChunk> chunks;
+ chunks.resize(num_chunks);
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ EXPECT_EQ((i + 1) * TraceBufferChunk::kTraceBufferChunkSize,
+ buffer->Size());
+ last_seq = chunks[i]->seq();
+ }
+
+ // Ring buffer is never full.
+ EXPECT_FALSE(buffer->IsFull());
+
+ // Return all chunks in original order.
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+
+ // Should recycle the chunks in the returned order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ last_seq = chunks[i]->seq();
+ }
+
+ // Return all chunks in reverse order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ buffer->ReturnChunk(
+ num_chunks - i - 1,
+ scoped_ptr<TraceBufferChunk>(chunks[num_chunks - i - 1]));
+ }
+ chunks.weak_clear();
+
+ // Should recycle the chunks in the returned order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(num_chunks - i - 1, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ last_seq = chunks[i]->seq();
+ }
+
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+ chunks.weak_clear();
+
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferHalfIteration) {
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORD_CONTINUOUSLY);
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+ EXPECT_FALSE(buffer->NextChunk());
+
+ size_t half_chunks = num_chunks / 2;
+ ScopedVector<TraceBufferChunk> chunks;
+ chunks.resize(half_chunks);
+
+ for (size_t i = 0; i < half_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ }
+ for (size_t i = 0; i < half_chunks; ++i)
+ buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+ chunks.weak_clear();
+
+ for (size_t i = 0; i < half_chunks; ++i)
+ EXPECT_EQ(chunks[i], buffer->NextChunk());
+ EXPECT_FALSE(buffer->NextChunk());
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferFullIteration) {
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORD_CONTINUOUSLY);
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+ EXPECT_FALSE(buffer->NextChunk());
+
+ ScopedVector<TraceBufferChunk> chunks;
+ chunks.resize(num_chunks);
+
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ }
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+ chunks.weak_clear();
+
+ for (size_t i = 0; i < num_chunks; ++i)
+ EXPECT_TRUE(chunks[i] == buffer->NextChunk());
+ EXPECT_FALSE(buffer->NextChunk());
+ TraceLog::GetInstance()->SetDisabled();
+}
// Test the category filter.
TEST_F(TraceEventTestFixture, CategoryFilter) {