summaryrefslogtreecommitdiffstats
path: root/ppapi
diff options
context:
space:
mode:
authordmichael@chromium.org <dmichael@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-02-19 23:36:22 +0000
committerdmichael@chromium.org <dmichael@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-02-19 23:36:22 +0000
commit76aa16e46dae43c3ebb9c9b2818e7d25e5b34806 (patch)
tree29007161ec33f2d0aeccd9712c53c7f35bf22bab /ppapi
parentddf6f55c37ef48e717803e059b8e836fefd1d418 (diff)
downloadchromium_src-76aa16e46dae43c3ebb9c9b2818e7d25e5b34806.zip
chromium_src-76aa16e46dae43c3ebb9c9b2818e7d25e5b34806.tar.gz
chromium_src-76aa16e46dae43c3ebb9c9b2818e7d25e5b34806.tar.bz2
PPAPI/NaCl: Make related tests run in 1 fixture
I've only converted WebSockets tests in this CL. The idea is that the bulk of the test time is actually from starting up the test: - Starting HTTP and/or WebSocket and/or SSL server - Launching the renderer - Launching the NaCl loader - Downloading, validating, launching the .nexe Now this all happens once for all WebSocket sub-tests (times 4: in-process, out-of-process, NaCl Newlib, NaCl Glibc). Locally, the time goes from about 5 minutes to less than 20 seconds. The trick is now we can still enable/disable the individual tests from within ppapi_browsertest.cc. BUG= Review URL: https://chromiumcodereview.appspot.com/12193015 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@183319 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'ppapi')
-rw-r--r--ppapi/tests/test_broker.cc13
-rw-r--r--ppapi/tests/test_buffer.cc8
-rw-r--r--ppapi/tests/test_case.cc100
-rw-r--r--ppapi/tests/test_case.h91
-rw-r--r--ppapi/tests/test_image_data.cc14
-rw-r--r--ppapi/tests/test_input_event.cc22
-rw-r--r--ppapi/tests/test_scrollbar.cc2
-rw-r--r--ppapi/tests/testing_instance.cc58
-rw-r--r--ppapi/tests/testing_instance.h24
9 files changed, 253 insertions, 79 deletions
diff --git a/ppapi/tests/test_broker.cc b/ppapi/tests/test_broker.cc
index d66e9dd..0537b95 100644
--- a/ppapi/tests/test_broker.cc
+++ b/ppapi/tests/test_broker.cc
@@ -217,13 +217,12 @@ void TestBroker::RunTests(const std::string& filter) {
// The following tests require special setup, so only run them if they're
// explicitly specified by the filter.
- if (filter.empty())
- return;
-
- RUN_TEST(ConnectPermissionDenied, filter);
- RUN_TEST(ConnectPermissionGranted, filter);
- RUN_TEST(IsAllowedPermissionDenied, filter);
- RUN_TEST(IsAllowedPermissionGranted, filter);
+ if (!ShouldRunAllTests(filter)) {
+ RUN_TEST(ConnectPermissionDenied, filter);
+ RUN_TEST(ConnectPermissionGranted, filter);
+ RUN_TEST(IsAllowedPermissionDenied, filter);
+ RUN_TEST(IsAllowedPermissionGranted, filter);
+ }
}
std::string TestBroker::TestCreate() {
diff --git a/ppapi/tests/test_buffer.cc b/ppapi/tests/test_buffer.cc
index e6d7063..d143043 100644
--- a/ppapi/tests/test_buffer.cc
+++ b/ppapi/tests/test_buffer.cc
@@ -20,10 +20,10 @@ bool TestBuffer::Init() {
}
void TestBuffer::RunTests(const std::string& filter) {
- instance_->LogTest("InvalidSize", TestInvalidSize());
- instance_->LogTest("InitToZero", TestInitToZero());
- instance_->LogTest("IsBuffer", TestIsBuffer());
- instance_->LogTest("BasicLifecyle", TestBasicLifeCycle());
+ RUN_TEST(InvalidSize, filter);
+ RUN_TEST(InitToZero, filter);
+ RUN_TEST(IsBuffer, filter);
+ RUN_TEST(BasicLifeCycle, filter);
}
std::string TestBuffer::TestInvalidSize() {
diff --git a/ppapi/tests/test_case.cc b/ppapi/tests/test_case.cc
index f6fb485..7ae564e 100644
--- a/ppapi/tests/test_case.cc
+++ b/ppapi/tests/test_case.cc
@@ -4,16 +4,84 @@
#include "ppapi/tests/test_case.h"
+#include <string.h>
+
#include <sstream>
+#include "ppapi/cpp/core.h"
+#include "ppapi/cpp/module.h"
#include "ppapi/tests/pp_thread.h"
#include "ppapi/tests/test_utils.h"
#include "ppapi/tests/testing_instance.h"
+namespace {
+
+std::string StripPrefix(const std::string& test_name) {
+ const char* const prefixes[] = {
+ "FAILS_", "FLAKY_", "DISABLED_" };
+ for (size_t i = 0; i < sizeof(prefixes)/sizeof(prefixes[0]); ++i)
+ if (test_name.find(prefixes[i]) == 0)
+ return test_name.substr(strlen(prefixes[i]));
+ return test_name;
+}
+
+// Strip the TestCase name off and return the remainder (i.e., everything after
+// '_'). If there is no '_', assume only the TestCase was provided, and return
+// an empty string.
+// For example:
+// StripTestCase("TestCase_TestName");
+// returns
+// "TestName"
+// while
+// StripTestCase("TestCase);
+// returns
+// ""
+std::string StripTestCase(const std::string& full_test_name) {
+ size_t delim = full_test_name.find_first_of('_');
+ if (delim != std::string::npos)
+ return full_test_name.substr(delim+1);
+ // In this case, our "filter" is the empty string; the full test name is the
+ // same as the TestCase name with which we were constructed.
+ // TODO(dmichael): It might be nice to be able to PP_DCHECK against the
+ // TestCase class name, but we'd have to plumb that name to TestCase somehow.
+ return std::string();
+}
+
+// Parse |test_filter|, which is a comma-delimited list of (possibly prefixed)
+// test names and insert the un-prefixed names into |remaining_tests|, with
+// the bool indicating whether the test should be run.
+void ParseTestFilter(const std::string& test_filter,
+ std::map<std::string, bool>* remaining_tests) {
+ // We can't use base/string_util.h::Tokenize in ppapi, so we have to do it
+ // ourselves.
+ std::istringstream filter_stream(test_filter);
+ std::string current_test;
+ while (std::getline(filter_stream, current_test, ',')) {
+ // |current_test| might include a prefix, like DISABLED_Foo_TestBar, so we
+ // we strip it off if there is one.
+ std::string stripped_test_name(StripPrefix(current_test));
+ // Strip off the test case and use the test name as a key, because the test
+ // name ShouldRunTest wants to use to look up the test doesn't have the
+ // TestCase name.
+ std::string test_name_without_case(StripTestCase(stripped_test_name));
+
+ // If the test wasn't prefixed, it should be run.
+ bool should_run_test = (current_test == stripped_test_name);
+ PP_DCHECK(remaining_tests->count(test_name_without_case) == 0);
+ remaining_tests->insert(
+ std::make_pair(test_name_without_case, should_run_test));
+ }
+ // There may be a trailing comma; ignore empty strings.
+ remaining_tests->erase(std::string());
+}
+
+} // namespace
+
TestCase::TestCase(TestingInstance* instance)
: instance_(instance),
testing_interface_(NULL),
- callback_type_(PP_REQUIRED) {
+ callback_type_(PP_REQUIRED),
+ have_populated_remaining_tests_(false) {
// Get the testing_interface_ if it is available, so that we can do Resource
// and Var checks on shutdown (see CheckResourcesAndVars). If it is not
// available, testing_interface_ will be NULL. Some tests do not require it.
@@ -103,9 +171,35 @@ bool TestCase::EnsureRunningOverHTTP() {
return true;
}
-bool TestCase::MatchesFilter(const std::string& test_name,
+bool TestCase::ShouldRunAllTests(const std::string& filter) {
+ // If only the TestCase is listed, we're running all the tests in RunTests.
+ return (StripTestCase(filter) == std::string());
+}
+
+bool TestCase::ShouldRunTest(const std::string& test_name,
const std::string& filter) {
- return filter.empty() || (test_name == filter);
+ if (ShouldRunAllTests(filter))
+ return true;
+
+ // Lazily initialize our "remaining_tests_" map.
+ if (!have_populated_remaining_tests_) {
+ ParseTestFilter(filter, &remaining_tests_);
+ have_populated_remaining_tests_ = true;
+ }
+ std::map<std::string, bool>::iterator iter = remaining_tests_.find(test_name);
+ if (iter == remaining_tests_.end()) {
+ // The test name wasn't listed in the filter. Don't run it, but store it
+ // so TestingInstance::ExecuteTests can report an error later.
+ skipped_tests_.insert(test_name);
+ return false;
+ }
+ bool should_run_test = iter->second;
+ remaining_tests_.erase(iter);
+ return should_run_test;
+}
+
+PP_TimeTicks TestCase::NowInTimeTicks() {
+ return pp::Module::Get()->core()->GetTimeTicks();
}
std::string TestCase::CheckResourcesAndVars(std::string errors) {
diff --git a/ppapi/tests/test_case.h b/ppapi/tests/test_case.h
index c769996..47324b6 100644
--- a/ppapi/tests/test_case.h
+++ b/ppapi/tests/test_case.h
@@ -7,11 +7,13 @@
#include <cmath>
#include <limits>
+#include <map>
#include <set>
#include <string>
-#include "ppapi/c/pp_resource.h"
#include "ppapi/c/dev/ppb_testing_dev.h"
+#include "ppapi/c/pp_resource.h"
+#include "ppapi/c/pp_time.h"
#include "ppapi/cpp/dev/scrollbar_dev.h"
#include "ppapi/cpp/message_loop.h"
#include "ppapi/cpp/view.h"
@@ -43,10 +45,14 @@ class TestCase {
virtual bool Init();
// Override to implement the test case. It will be called after the plugin is
- // first displayed, passing a string. If the string is empty, the
- // should run all tests for this test case. Otherwise, it should run the test
- // whose name matches test_filter exactly (if there is one). This should
- // generally be implemented using the RUN_TEST* macros.
+ // first displayed, passing a string. If the string is empty, RunTests should
+ // run all tests for this test case. Otherwise, it must be a comma-delimited
+ // list of test names, possibly prefixed. E.g.:
+ // "Foo_GoodTest,DISABLED_Foo_BadTest,Foo_OtherGoodTest"
+ // All listed tests which are not prefixed will be run.
+ //
+ // This should generally be implemented in a TestCase subclass using the
+ // RUN_TEST* macros.
virtual void RunTests(const std::string& test_filter) = 0;
static std::string MakeFailureMessage(const char* file, int line,
@@ -82,6 +88,13 @@ class TestCase {
static void QuitMainMessageLoop(PP_Instance instance);
+ const std::map<std::string, bool>& remaining_tests() {
+ return remaining_tests_;
+ }
+ const std::set<std::string>& skipped_tests() {
+ return skipped_tests_;
+ }
+
protected:
#if !(defined __native_client__)
// Overridden by each test to supply a ScriptableObject corresponding to the
@@ -101,9 +114,15 @@ class TestCase {
// Makes sure the test is run over HTTP.
bool EnsureRunningOverHTTP();
+ // Returns true if |filter| only contains a TestCase name, which normally
+ // means "run all tests". Some TestCases require special setup for individual
+ // tests, and can use this function to decide whether to ignore those tests.
+ bool ShouldRunAllTests(const std::string& filter);
+
// Return true if the given test name matches the filter. This is true if
- // (a) filter is empty or (b) test_name and filter match exactly.
- bool MatchesFilter(const std::string& test_name, const std::string& filter);
+ // (a) filter is empty or (b) test_name matches a test name listed in filter
+ // exactly.
+ bool ShouldRunTest(const std::string& test_name, const std::string& filter);
// Check for leaked resources and vars at the end of the test. If any exist,
// return a string with some information about the error. Otherwise, return
@@ -113,6 +132,8 @@ class TestCase {
// CheckResourcesAndVars will do nothing and return the same string.
std::string CheckResourcesAndVars(std::string errors);
+ PP_TimeTicks NowInTimeTicks();
+
// Run the given test method on a background thread and return the result.
template <class T>
std::string RunOnThread(std::string(T::*test_to_run)()) {
@@ -212,6 +233,24 @@ class TestCase {
// Var ids that should be ignored when checking for leaks on shutdown.
std::set<int64_t> ignored_leaked_vars_;
+ // The tests that were found in test_filter but have not yet been run. The
+ // bool indicates whether the test should be run (i.e., it will be false if
+ // the test name was prefixed in the test_filter string).
+ //
+ // This is initialized lazily the first time that ShouldRunTest is called by
+ // RunTests. When RunTests is finished, this should be empty. Any remaining
+ // tests are tests that were listed in the test_filter but didn't match
+ // any calls to ShouldRunTest, meaning it was probably a typo. TestingInstance
+ // should log this and consider it a failure.
+ std::map<std::string, bool> remaining_tests_;
+ // Flag indicating whether we have populated remaining_tests_ yet.
+ bool have_populated_remaining_tests_;
+
+ // If ShouldRunTest is called but the given test name doesn't match anything
+ // in the test_filter, the test name will be added here. This allows
+ // TestingInstance to detect when not all tests were listed.
+ std::set<std::string> skipped_tests_;
+
#if !(defined __native_client__)
// Holds the test object, if any was retrieved from CreateTestObject.
pp::VarPrivate test_object_;
@@ -260,31 +299,42 @@ class TestCaseFactory {
// RunTest function. This assumes the function name is TestFoo where Foo is the
// test |name|.
#define RUN_TEST(name, test_filter) \
- if (MatchesFilter(#name, test_filter)) { \
+ if (ShouldRunTest(#name, test_filter)) { \
set_callback_type(PP_OPTIONAL); \
- instance_->LogTest(#name, CheckResourcesAndVars(Test##name())); \
+ PP_TimeTicks start_time(NowInTimeTicks()); \
+ instance_->LogTest(#name, \
+ CheckResourcesAndVars(Test##name()), \
+ start_time); \
}
// Like RUN_TEST above but forces functions taking callbacks to complete
// asynchronously on success or error.
#define RUN_TEST_FORCEASYNC(name, test_filter) \
- if (MatchesFilter(#name, test_filter)) { \
+ if (ShouldRunTest(#name, test_filter)) { \
set_callback_type(PP_REQUIRED); \
+ PP_TimeTicks start_time(NowInTimeTicks()); \
instance_->LogTest(#name"ForceAsync", \
- CheckResourcesAndVars(Test##name())); \
+ CheckResourcesAndVars(Test##name()), \
+ start_time); \
}
#define RUN_TEST_BLOCKING(test_case, name, test_filter) \
- if (MatchesFilter(#name, test_filter)) { \
+ if (ShouldRunTest(#name, test_filter)) { \
set_callback_type(PP_BLOCKING); \
- instance_->LogTest(#name"Blocking", \
- CheckResourcesAndVars(RunOnThread(&test_case::Test##name))); \
+ PP_TimeTicks start_time(NowInTimeTicks()); \
+ instance_->LogTest( \
+ #name"Blocking", \
+ CheckResourcesAndVars(RunOnThread(&test_case::Test##name)), \
+ start_time); \
}
#define RUN_TEST_BACKGROUND(test_case, name, test_filter) \
- if (MatchesFilter(#name, test_filter)) { \
- instance_->LogTest(#name"Background", \
- CheckResourcesAndVars(RunOnThread(&test_case::Test##name))); \
+ if (ShouldRunTest(#name, test_filter)) { \
+ PP_TimeTicks start_time(NowInTimeTicks()); \
+ instance_->LogTest( \
+ #name"Background", \
+ CheckResourcesAndVars(RunOnThread(&test_case::Test##name)), \
+ start_time); \
}
#define RUN_TEST_FORCEASYNC_AND_NOT(name, test_filter) \
@@ -303,7 +353,7 @@ class TestCaseFactory {
} while (false)
#define RUN_TEST_WITH_REFERENCE_CHECK(name, test_filter) \
- if (MatchesFilter(#name, test_filter)) { \
+ if (ShouldRunTest(#name, test_filter)) { \
set_callback_type(PP_OPTIONAL); \
uint32_t objects = testing_interface_->GetLiveObjectsForInstance( \
instance_->pp_instance()); \
@@ -313,7 +363,10 @@ class TestCaseFactory {
instance_->pp_instance()) != objects) \
error_message = MakeFailureMessage(__FILE__, __LINE__, \
"reference leak check"); \
- instance_->LogTest(#name, error_message); \
+ PP_TimeTicks start_time(NowInTimeTicks()); \
+ instance_->LogTest(#name, \
+ error_message, \
+ start_time); \
}
// TODO(dmichael): Add CheckResourcesAndVars above when Windows tests pass
// cleanly. crbug.com/173503
diff --git a/ppapi/tests/test_image_data.cc b/ppapi/tests/test_image_data.cc
index d5b78f0..856eaa3 100644
--- a/ppapi/tests/test_image_data.cc
+++ b/ppapi/tests/test_image_data.cc
@@ -19,13 +19,13 @@ bool TestImageData::Init() {
}
void TestImageData::RunTests(const std::string& filter) {
- instance_->LogTest("InvalidFormat", TestInvalidFormat());
- instance_->LogTest("GetNativeFormat", TestGetNativeFormat());
- instance_->LogTest("IsImageDataFormatSupported", TestFormatSupported());
- instance_->LogTest("InvalidSize", TestInvalidSize());
- instance_->LogTest("HugeSize", TestHugeSize());
- instance_->LogTest("InitToZero", TestInitToZero());
- instance_->LogTest("IsImageData", TestIsImageData());
+ RUN_TEST(InvalidFormat, filter);
+ RUN_TEST(GetNativeFormat, filter);
+ RUN_TEST(FormatSupported, filter);
+ RUN_TEST(InvalidSize, filter);
+ RUN_TEST(HugeSize, filter);
+ RUN_TEST(InitToZero, filter);
+ RUN_TEST(IsImageData, filter);
}
std::string TestImageData::TestInvalidFormat() {
diff --git a/ppapi/tests/test_input_event.cc b/ppapi/tests/test_input_event.cc
index c55912d..8aa8340 100644
--- a/ppapi/tests/test_input_event.cc
+++ b/ppapi/tests/test_input_event.cc
@@ -32,20 +32,16 @@ pp::Point GetCenter(const pp::Rect& rect) {
void TestInputEvent::RunTests(const std::string& filter) {
RUN_TEST(Events, filter);
-// Like RUN_TEST, but does an exact match with the filter (which means it does
-// not run the test if filter is empty).
-#define RUN_TEST_EXACT_MATCH(name, test_filter) \
- if (test_filter == #name) { \
- set_callback_type(PP_OPTIONAL); \
- instance_->LogTest(#name, CheckResourcesAndVars(Test##name())); \
+ // The AcceptTouchEvent_N tests should not be run when the filter is empty;
+ // they can only be run one at a time.
+ // TODO(dmichael): Figure out a way to make these run in the same test fixture
+ // instance.
+ if (!ShouldRunAllTests(filter)) {
+ RUN_TEST(AcceptTouchEvent_1, filter);
+ RUN_TEST(AcceptTouchEvent_2, filter);
+ RUN_TEST(AcceptTouchEvent_3, filter);
+ RUN_TEST(AcceptTouchEvent_4, filter);
}
-
- RUN_TEST_EXACT_MATCH(AcceptTouchEvent_1, filter);
- RUN_TEST_EXACT_MATCH(AcceptTouchEvent_2, filter);
- RUN_TEST_EXACT_MATCH(AcceptTouchEvent_3, filter);
- RUN_TEST_EXACT_MATCH(AcceptTouchEvent_4, filter);
-
-#undef RUN_TEST_EXACT_MATCH
}
TestInputEvent::TestInputEvent(TestingInstance* instance)
diff --git a/ppapi/tests/test_scrollbar.cc b/ppapi/tests/test_scrollbar.cc
index 83c8365..cde40a7 100644
--- a/ppapi/tests/test_scrollbar.cc
+++ b/ppapi/tests/test_scrollbar.cc
@@ -27,7 +27,7 @@ bool TestScrollbar::Init() {
}
void TestScrollbar::RunTests(const std::string& filter) {
- instance_->LogTest("HandleEvent", TestHandleEvent());
+ RUN_TEST(HandleEvent, filter);
}
std::string TestScrollbar::TestHandleEvent() {
diff --git a/ppapi/tests/testing_instance.cc b/ppapi/tests/testing_instance.cc
index 7874d20..465bbb1 100644
--- a/ppapi/tests/testing_instance.cc
+++ b/ppapi/tests/testing_instance.cc
@@ -6,9 +6,11 @@
#include <algorithm>
#include <cstring>
+#include <iomanip>
#include <sstream>
#include <vector>
+#include "ppapi/cpp/core.h"
#include "ppapi/cpp/module.h"
#include "ppapi/cpp/var.h"
#include "ppapi/cpp/view.h"
@@ -66,7 +68,7 @@ bool TestingInstance::Init(uint32_t argc,
if (argv[i][0] == '\0')
break;
current_case_ = CaseForTestName(argv[i]);
- test_filter_ = FilterForTestName(argv[i]);
+ test_filter_ = argv[i];
if (!current_case_)
errors_.append(std::string("Unknown test case ") + argv[i]);
else if (!current_case_->Init())
@@ -120,7 +122,15 @@ void TestingInstance::SetCookie(const std::string& name,
}
void TestingInstance::LogTest(const std::string& test_name,
- const std::string& error_message) {
+ const std::string& error_message,
+ PP_TimeTicks start_time) {
+ // Compute the time to run the test and save it in a string for logging:
+ PP_TimeTicks end_time(pp::Module::Get()->core()->GetTimeTicks());
+ std::ostringstream number_stream;
+ PP_TimeTicks elapsed_time(end_time - start_time);
+ number_stream << std::fixed << std::setprecision(3) << elapsed_time;
+ std::string time_string(number_stream.str());
+
// Tell the browser we're still working.
ReportProgress(kProgressSignal);
@@ -141,6 +151,10 @@ void TestingInstance::LogTest(const std::string& test_name,
errors_.append(", "); // Separator for different error messages.
errors_.append(test_name + " FAIL: " + error_message);
}
+ html.append(" <span class=\"time\">(");
+ html.append(time_string);
+ html.append("s)</span>");
+
html.append("</div>");
LogHTML(html);
}
@@ -172,11 +186,34 @@ void TestingInstance::ExecuteTests(int32_t unused) {
"restrictive: '" + test_filter_ + "'.");
LogError(errors_);
}
- else {
- // Automated PyAuto tests rely on finding the exact strings below.
- LogHTML(errors_.empty() ?
- "<span class=\"pass\">[SHUTDOWN]</span> All tests passed." :
- "<span class=\"fail\">[SHUTDOWN]</span> Some tests failed.");
+ if (current_case_->skipped_tests().size()) {
+ // TODO(dmichael): Convert all TestCases to run all tests in one fixture,
+ // and enable this check. Currently, a lot of our tests
+ // run 1 test per fixture, which is slow.
+ /*
+ errors_.append("Some tests were not listed and thus were not run. Make "
+ "sure all tests are passed in the test_case URL (even if "
+ "they are marked DISABLED_). Forgotten tests: ");
+ std::set<std::string>::const_iterator iter =
+ current_case_->skipped_tests().begin();
+ for (; iter != current_case_->skipped_tests().end(); ++iter) {
+ errors_.append(*iter);
+ errors_.append(" ");
+ }
+ LogError(errors_);
+ */
+ }
+ if (current_case_->remaining_tests().size()) {
+ errors_.append("Some listed tests were not found in the TestCase. Check "
+ "the test names that were passed to make sure they match "
+ "tests in the TestCase. Unknown tests: ");
+ std::map<std::string, bool>::const_iterator iter =
+ current_case_->remaining_tests().begin();
+ for (; iter != current_case_->remaining_tests().end(); ++iter) {
+ errors_.append(iter->first);
+ errors_.append(" ");
+ }
+ LogError(errors_);
}
}
@@ -199,13 +236,6 @@ TestCase* TestingInstance::CaseForTestName(const std::string& name) {
return NULL;
}
-std::string TestingInstance::FilterForTestName(const std::string& name) {
- size_t delim = name.find_first_of('_');
- if (delim != std::string::npos)
- return name.substr(delim+1);
- return "";
-}
-
void TestingInstance::SendTestCommand(const std::string& command) {
std::string msg("TESTING_MESSAGE:");
msg += command;
diff --git a/ppapi/tests/testing_instance.h b/ppapi/tests/testing_instance.h
index 2ce6c4b..a523137 100644
--- a/ppapi/tests/testing_instance.h
+++ b/ppapi/tests/testing_instance.h
@@ -62,12 +62,17 @@ pp::InstancePrivate {
// Outputs the information from one test run, using the format
// <test_name> [PASS|FAIL <error_message>]
+ //
+ // You should generally use one of the RUN_TEST* macros in test_case.h
+ // instead.
+ //
// If error_message is empty, we say the test passed and emit PASS. If
// error_message is nonempty, the test failed with that message as the error
// string.
//
// Intended usage:
- // LogTest("Foo", FooTest());
+ // PP_TimeTicks start_time(core.GetTimeTicks());
+ // LogTest("Foo", FooTest(), start_time);
//
// Where FooTest is defined as:
// std::string FooTest() {
@@ -75,7 +80,13 @@ pp::InstancePrivate {
// return "Something horrible happened";
// return "";
// }
- void LogTest(const std::string& test_name, const std::string& error_message);
+ //
+ // NOTE: It's important to get the start time in the previous line, rather
+ // than calling GetTimeTicks in the LogTestLine. There's no guarantee
+ // that GetTimeTicks will be evaluated before FooTest().
+ void LogTest(const std::string& test_name,
+ const std::string& error_message,
+ PP_TimeTicks start_time);
// Appends an error message to the log.
void AppendError(const std::string& message);
@@ -116,15 +127,6 @@ pp::InstancePrivate {
// test. Ownership is passed to the caller. The given string is split by '_'.
// The test case name is the first part.
TestCase* CaseForTestName(const std::string& name);
- // Returns the filter (second part) of the given string. If there is no '_',
- // returns the empty string, which means 'run all tests for this test case'.
- // E.g.:
- // http://testserver/test_case.html?testcase=PostMessage
- // Otherwise, the part of the testcase after '_' is returned, and the test
- // whose name matches that string (if any) will be run:
- // http://testserver/test_case.html?testcase=PostMessage_SendingData
- // Runs 'PostMessage_SendingData.
- std::string FilterForTestName(const std::string& name);
// Sends a test command to the page using PostMessage.
void SendTestCommand(const std::string& command);