summaryrefslogtreecommitdiffstats
path: root/chrome/browser/visitedlink_perftest.cc
diff options
context:
space:
mode:
authorinitial.commit <initial.commit@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-26 23:55:29 +0000
committerinitial.commit <initial.commit@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-26 23:55:29 +0000
commit09911bf300f1a419907a9412154760efd0b7abc3 (patch)
treef131325fb4e2ad12c6d3504ab75b16dd92facfed /chrome/browser/visitedlink_perftest.cc
parent586acc5fe142f498261f52c66862fa417c3d52d2 (diff)
downloadchromium_src-09911bf300f1a419907a9412154760efd0b7abc3.zip
chromium_src-09911bf300f1a419907a9412154760efd0b7abc3.tar.gz
chromium_src-09911bf300f1a419907a9412154760efd0b7abc3.tar.bz2
Add chrome to the repository.
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@15 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser/visitedlink_perftest.cc')
-rw-r--r--chrome/browser/visitedlink_perftest.cc215
1 files changed, 215 insertions, 0 deletions
diff --git a/chrome/browser/visitedlink_perftest.cc b/chrome/browser/visitedlink_perftest.cc
new file mode 100644
index 0000000..ef7629d
--- /dev/null
+++ b/chrome/browser/visitedlink_perftest.cc
@@ -0,0 +1,215 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/file_util.h"
+#include "base/perftimer.h"
+#include "base/shared_memory.h"
+#include "base/string_util.h"
+#include "chrome/browser/visitedlink_master.h"
+#include "chrome/test/test_file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// how we generate URLs, note that the two strings should be the same length
+const int add_count = 10000;
+const int load_test_add_count = 250000;
+const char added_prefix[] = "http://www.google.com/stuff/something/foo?session=85025602345625&id=1345142319023&seq=";
+const char unadded_prefix[] = "http://www.google.org/stuff/something/foo?session=39586739476365&id=2347624314402&seq=";
+
+// Returns a URL with the given prefix and index
+GURL TestURL(const char* prefix, int i) {
+ return GURL(StringPrintf("%s%d", prefix, i));
+}
+
+// we have no slaves, so this broadcase is a NOP
+VisitedLinkMaster::PostNewTableEvent DummyBroadcastNewTableEvent;
+void DummyBroadcastNewTableEvent(SharedMemory *table) {
+}
+
+// Call at the beginning of the test to retrieve the database name and to
+// delete any old databases left by previous unit tests. The input buffer
+// should be MAX_PATH long.
+void InitDBName(wchar_t* db_name) {
+ ASSERT_TRUE(GetCurrentDirectory(MAX_PATH, db_name));
+ if (db_name[wcslen(db_name) - 1] != file_util::kPathSeparator)
+ wcsncat_s(db_name, MAX_PATH, &file_util::kPathSeparator, 1);
+ wcscat_s(db_name, MAX_PATH, L"TempVisitedLinks");
+}
+
+// this checks IsVisited for the URLs starting with the given prefix and
+// within the given range
+void CheckVisited(VisitedLinkMaster& master, const char* prefix,
+ int begin, int end) {
+ for (int i = begin; i < end; i++)
+ master.IsVisited(TestURL(prefix, i));
+}
+
+// Fills that master's table with URLs starting with the given prefix and
+// within the given range
+void FillTable(VisitedLinkMaster& master, const char* prefix,
+ int begin, int end) {
+ for (int i = begin; i < end; i++)
+ master.AddURL(TestURL(prefix, i));
+}
+
+class VisitedLink : public testing::Test {
+ protected:
+ wchar_t db_name_[MAX_PATH];
+ virtual void SetUp() {
+ InitDBName(db_name_);
+ DeleteFile(db_name_);
+ }
+ virtual void TearDown() {
+ DeleteFile(db_name_);
+ }
+};
+
+} // namespace
+
+// This test tests adding many things to a database, and how long it takes
+// to query the database with different numbers of things in it. The time
+// is the total time to do all the operations, and as such, it is only
+// useful for a regression test. If there is a regression, it might be
+// useful to make another set of tests to test these things in isolation.
+TEST_F(VisitedLink, TestAddAndQuery) {
+ // init
+ VisitedLinkMaster master(NULL, DummyBroadcastNewTableEvent, NULL, true,
+ db_name_, 0);
+ ASSERT_TRUE(master.Init());
+
+ PerfTimeLogger timer("Visited_link_add_and_query");
+
+ // first check without anything in the table
+ CheckVisited(master, added_prefix, 0, add_count);
+
+ // now fill half the table
+ const int half_size = add_count / 2;
+ FillTable(master, added_prefix, 0, half_size);
+
+ // check the table again, half of these URLs will be visited, the other half
+ // will not
+ CheckVisited(master, added_prefix, 0, add_count);
+
+ // fill the rest of the table
+ FillTable(master, added_prefix, half_size, add_count);
+
+ // check URLs, doing half visited, half unvisited
+ CheckVisited(master, added_prefix, 0, add_count);
+ CheckVisited(master, unadded_prefix, 0, add_count);
+}
+
+// Tests how long it takes to write and read a large database to and from disk.
+TEST_F(VisitedLink, TestLoad) {
+ // create a big DB
+ {
+ PerfTimeLogger table_initialization_timer("Table_initialization");
+
+ VisitedLinkMaster master(NULL, DummyBroadcastNewTableEvent, NULL, true,
+ db_name_, 0);
+
+ // time init with empty table
+ PerfTimeLogger initTimer("Empty_visited_link_init");
+ bool success = master.Init();
+ initTimer.Done();
+ ASSERT_TRUE(success);
+
+ // add a bunch of stuff
+ // TODO(maruel): This is very inefficient because the file gets rewritten
+ // many time and this is the actual bottleneck of this test. The file should
+ // only get written that the end of the FillTable call, not 4169(!) times.
+ FillTable(master, added_prefix, 0, load_test_add_count);
+
+ // time writing the file out out
+ PerfTimeLogger flushTimer("Visited_link_database_flush");
+ master.RewriteFile();
+ // TODO(maruel): Without calling FlushFileBuffers(master.file_); you don't
+ // know really how much time it took to write the file.
+ flushTimer.Done();
+
+ table_initialization_timer.Done();
+ }
+
+ // test loading the DB back, we do this several times since the flushing is
+ // not very reliable.
+ const int load_count = 5;
+ std::vector<double> cold_load_times;
+ std::vector<double> hot_load_times;
+ for (int i = 0; i < load_count; i++)
+ {
+ // make sure the file has to be re-loaded
+ file_util::EvictFileFromSystemCache(db_name_);
+
+ // cold load (no OS cache, hopefully)
+ {
+ PerfTimer cold_timer;
+
+ VisitedLinkMaster master(NULL, DummyBroadcastNewTableEvent, NULL, true,
+ db_name_, 0);
+ bool success = master.Init();
+ TimeDelta elapsed = cold_timer.Elapsed();
+ ASSERT_TRUE(success);
+
+ cold_load_times.push_back(elapsed.InMillisecondsF());
+ }
+
+ // hot load (with OS caching the file in memory)
+ {
+ PerfTimer hot_timer;
+
+ VisitedLinkMaster master(NULL, DummyBroadcastNewTableEvent, NULL, true,
+ db_name_, 0);
+ bool success = master.Init();
+ TimeDelta elapsed = hot_timer.Elapsed();
+ ASSERT_TRUE(success);
+
+ hot_load_times.push_back(elapsed.InMillisecondsF());
+ }
+ }
+
+ // We discard the max and return the average time.
+ cold_load_times.erase(std::max_element(cold_load_times.begin(),
+ cold_load_times.end()));
+ hot_load_times.erase(std::max_element(hot_load_times.begin(),
+ hot_load_times.end()));
+
+ double cold_sum = 0, hot_sum = 0;
+ for (int i = 0; i < static_cast<int>(cold_load_times.size()); i++) {
+ cold_sum += cold_load_times[i];
+ hot_sum += hot_load_times[i];
+ }
+ LogPerfResult("Visited_link_cold_load_time",
+ cold_sum / cold_load_times.size(), "ms");
+ LogPerfResult("Visited_link_hot_load_time",
+ hot_sum / hot_load_times.size(), "ms");
+}