summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornednguyen <nednguyen@google.com>2014-12-23 10:19:42 -0800
committerCommit bot <commit-bot@chromium.org>2014-12-23 18:20:42 +0000
commit687a0081935b3e20449ba6c21603c49fd8c34da1 (patch)
tree0b7536224efd9aaaedf0e2d0fa0f8cab8ce61e56
parent5abc69b1a406999169052b7d457de86fd67c6f8c (diff)
downloadchromium_src-687a0081935b3e20449ba6c21603c49fd8c34da1.zip
chromium_src-687a0081935b3e20449ba6c21603c49fd8c34da1.tar.gz
chromium_src-687a0081935b3e20449ba6c21603c49fd8c34da1.tar.bz2
[Telemetry] Add test that make sure no two benchmarks have the same name
BUG=441446 Review URL: https://codereview.chromium.org/818053003 Cr-Commit-Position: refs/heads/master@{#309562}
-rw-r--r--tools/perf/benchmarks/benchmark_unittest.py24
1 files changed, 21 insertions, 3 deletions
diff --git a/tools/perf/benchmarks/benchmark_unittest.py b/tools/perf/benchmarks/benchmark_unittest.py
index 22a4db7..f5ffa5a 100644
--- a/tools/perf/benchmarks/benchmark_unittest.py
+++ b/tools/perf/benchmarks/benchmark_unittest.py
@@ -4,8 +4,10 @@
"""For all the benchmarks that set options, test that the options are valid."""
+import logging
import os
import unittest
+from collections import defaultdict
from telemetry import benchmark as benchmark_module
from telemetry.core import browser_options
@@ -18,6 +20,11 @@ def _GetPerfDir(*subdirs):
return os.path.join(perf_dir, *subdirs)
+def _GetAllPerfBenchmarks():
+ return discover.DiscoverClasses(
+ _GetPerfDir('benchmarks'), _GetPerfDir(), benchmark_module.Benchmark,
+ index_by_class_name=True).values()
+
def _BenchmarkOptionsTestGenerator(benchmark):
def testBenchmarkOptions(self): # pylint: disable=W0613
"""Invalid options will raise benchmark.InvalidOptionsError."""
@@ -29,12 +36,22 @@ def _BenchmarkOptionsTestGenerator(benchmark):
return testBenchmarkOptions
+class TestNoBenchmarkNamesDuplication(unittest.TestCase):
+ def runTest(self):
+ all_benchmarks = _GetAllPerfBenchmarks()
+ names_to_benchmarks = defaultdict(list)
+ for b in all_benchmarks:
+ names_to_benchmarks[b.Name()].append(b)
+ for n in names_to_benchmarks:
+ self.assertEquals(1, len(names_to_benchmarks[n]),
+ 'Multiple benchmarks with the same name %s are '
+ 'found: %s' % (n, str(names_to_benchmarks[n])))
+
+
def _AddBenchmarkOptionsTests(suite):
# Using |index_by_class_name=True| allows returning multiple benchmarks
# from a module.
- all_benchmarks = discover.DiscoverClasses(
- _GetPerfDir('benchmarks'), _GetPerfDir(), benchmark_module.Benchmark,
- index_by_class_name=True).values()
+ all_benchmarks = _GetAllPerfBenchmarks()
for benchmark in all_benchmarks:
if not benchmark.options:
# No need to test benchmarks that have not defined options.
@@ -44,6 +61,7 @@ def _AddBenchmarkOptionsTests(suite):
setattr(BenchmarkOptionsTest, benchmark.Name(),
_BenchmarkOptionsTestGenerator(benchmark))
suite.addTest(BenchmarkOptionsTest(benchmark.Name()))
+ suite.addTest(TestNoBenchmarkNamesDuplication())
def load_tests(_, _2, _3):