summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornednguyen <nednguyen@google.com>2015-01-22 19:17:43 -0800
committerCommit bot <commit-bot@chromium.org>2015-01-23 03:19:08 +0000
commit6761cdf59ad5d83ac687637bc022b4764a2fcafc (patch)
treebce858e915bb3d985c22dba8f2596b4f54fb5e98
parentb3b0450905b9da1a3c6a997773d84d7f6727b3cd (diff)
downloadchromium_src-6761cdf59ad5d83ac687637bc022b4764a2fcafc.zip
chromium_src-6761cdf59ad5d83ac687637bc022b4764a2fcafc.tar.gz
chromium_src-6761cdf59ad5d83ac687637bc022b4764a2fcafc.tar.bz2
[Telemetry] Make the default Name of benchmark to be module_name.ClassName
*NOTE: this patch may *NOT READY FOR SUBMIT* yet until all existing benchmarks that rely on the old naming scheme have been explicitly define their Name methods to avoid naming change. BUG=446891 Review URL: https://codereview.chromium.org/857673002 Cr-Commit-Position: refs/heads/master@{#312755}
-rw-r--r--content/test/gpu/gpu_tests/context_lost.py5
-rw-r--r--content/test/gpu/gpu_tests/gpu_process.py4
-rw-r--r--content/test/gpu/gpu_tests/gpu_rasterization.py4
-rw-r--r--content/test/gpu/gpu_tests/hardware_accelerated_feature.py4
-rw-r--r--content/test/gpu/gpu_tests/maps.py4
-rw-r--r--content/test/gpu/gpu_tests/memory_test.py4
-rw-r--r--content/test/gpu/gpu_tests/pixel.py4
-rw-r--r--content/test/gpu/gpu_tests/screenshot_sync.py4
-rw-r--r--content/test/gpu/gpu_tests/trace_test.py4
-rw-r--r--content/test/gpu/gpu_tests/webgl_conformance.py4
-rw-r--r--content/test/gpu/gpu_tests/webgl_robustness.py4
-rw-r--r--tools/perf/benchmarks/benchmark_unittest.py22
-rw-r--r--tools/telemetry/telemetry/benchmark.py7
13 files changed, 46 insertions, 28 deletions
diff --git a/content/test/gpu/gpu_tests/context_lost.py b/content/test/gpu/gpu_tests/context_lost.py
index db5ced4..830937c 100644
--- a/content/test/gpu/gpu_tests/context_lost.py
+++ b/content/test/gpu/gpu_tests/context_lost.py
@@ -321,6 +321,11 @@ class ContextLost(benchmark_module.Benchmark):
enabled = True
test = _ContextLostValidator
+
+ @classmethod
+ def Name(cls):
+ return 'context_lost'
+
def CreateExpectations(self):
return context_lost_expectations.ContextLostExpectations()
diff --git a/content/test/gpu/gpu_tests/gpu_process.py b/content/test/gpu/gpu_tests/gpu_process.py
index d9e3ce5..91d43c1 100644
--- a/content/test/gpu/gpu_tests/gpu_process.py
+++ b/content/test/gpu/gpu_tests/gpu_process.py
@@ -37,6 +37,10 @@ class GpuProcess(benchmark.Benchmark):
"""Tests that accelerated content triggers the creation of a GPU process"""
test = _GpuProcessValidator
+ @classmethod
+ def Name(cls):
+ return 'gpu_process'
+
def CreateExpectations(self):
return expectations.GpuProcessExpectations()
diff --git a/content/test/gpu/gpu_tests/gpu_rasterization.py b/content/test/gpu/gpu_tests/gpu_rasterization.py
index 39146cf..404ed6e 100644
--- a/content/test/gpu/gpu_tests/gpu_rasterization.py
+++ b/content/test/gpu/gpu_tests/gpu_rasterization.py
@@ -67,6 +67,10 @@ class GpuRasterization(cloud_storage_test_base.TestBase):
"""Tests that GPU rasterization produces valid content"""
test = _GpuRasterizationValidator
+ @classmethod
+ def Name(cls):
+ return 'gpu_rasterization'
+
def CreatePageSet(self, options):
page_set = page_sets.GpuRasterizationTestsPageSet()
for page in page_set.pages:
diff --git a/content/test/gpu/gpu_tests/hardware_accelerated_feature.py b/content/test/gpu/gpu_tests/hardware_accelerated_feature.py
index 9b140b6..af81c0d 100644
--- a/content/test/gpu/gpu_tests/hardware_accelerated_feature.py
+++ b/content/test/gpu/gpu_tests/hardware_accelerated_feature.py
@@ -49,6 +49,10 @@ class HardwareAcceleratedFeature(benchmark.Benchmark):
"""Tests GPU acceleration is reported as active for various features"""
test = _HardwareAcceleratedFeatureValidator
+ @classmethod
+ def Name(cls):
+ return 'hardware_accelerated_feature'
+
def CreateExpectations(self):
return expectations.HardwareAcceleratedFeatureExpectations()
diff --git a/content/test/gpu/gpu_tests/maps.py b/content/test/gpu/gpu_tests/maps.py
index 079d69e..430657d 100644
--- a/content/test/gpu/gpu_tests/maps.py
+++ b/content/test/gpu/gpu_tests/maps.py
@@ -90,6 +90,10 @@ class Maps(cloud_storage_test_base.TestBase):
"""Google Maps pixel tests."""
test = _MapsValidator
+ @classmethod
+ def Name(cls):
+ return 'maps'
+
def CreateExpectations(self):
return maps_expectations.MapsExpectations()
diff --git a/content/test/gpu/gpu_tests/memory_test.py b/content/test/gpu/gpu_tests/memory_test.py
index 5ff6a43..576f13f 100644
--- a/content/test/gpu/gpu_tests/memory_test.py
+++ b/content/test/gpu/gpu_tests/memory_test.py
@@ -103,6 +103,10 @@ class MemoryTest(benchmark.Benchmark):
"""Tests GPU memory limits"""
test = _MemoryValidator
+ @classmethod
+ def Name(cls):
+ return 'memory_test'
+
def CreateExpectations(self):
return memory_test_expectations.MemoryTestExpectations()
diff --git a/content/test/gpu/gpu_tests/pixel.py b/content/test/gpu/gpu_tests/pixel.py
index 6613dae..6db39cb 100644
--- a/content/test/gpu/gpu_tests/pixel.py
+++ b/content/test/gpu/gpu_tests/pixel.py
@@ -150,6 +150,10 @@ class Pixel(cloud_storage_test_base.TestBase):
test = _PixelValidator
@classmethod
+ def Name(cls):
+ return 'pixel'
+
+ @classmethod
def AddBenchmarkCommandLineArgs(cls, group):
super(Pixel, cls).AddBenchmarkCommandLineArgs(group)
group.add_option('--reference-dir',
diff --git a/content/test/gpu/gpu_tests/screenshot_sync.py b/content/test/gpu/gpu_tests/screenshot_sync.py
index 7f4a5e2..f482c62 100644
--- a/content/test/gpu/gpu_tests/screenshot_sync.py
+++ b/content/test/gpu/gpu_tests/screenshot_sync.py
@@ -48,6 +48,10 @@ class ScreenshotSyncProcess(benchmark.Benchmark):
they were requested"""
test = _ScreenshotSyncValidator
+ @classmethod
+ def Name(cls):
+ return 'screenshot_sync'
+
def CreateExpectations(self):
return expectations.ScreenshotSyncExpectations()
diff --git a/content/test/gpu/gpu_tests/trace_test.py b/content/test/gpu/gpu_tests/trace_test.py
index 128448d..fb4de0f 100644
--- a/content/test/gpu/gpu_tests/trace_test.py
+++ b/content/test/gpu/gpu_tests/trace_test.py
@@ -59,6 +59,10 @@ class TraceTest(benchmark.Benchmark):
"""Tests GPU traces"""
test = _TraceValidator
+ @classmethod
+ def Name(cls):
+ return 'trace_test'
+
def CreateExpectations(self):
return trace_test_expectations.TraceTestExpectations()
diff --git a/content/test/gpu/gpu_tests/webgl_conformance.py b/content/test/gpu/gpu_tests/webgl_conformance.py
index 50d19f3..1de2fbb 100644
--- a/content/test/gpu/gpu_tests/webgl_conformance.py
+++ b/content/test/gpu/gpu_tests/webgl_conformance.py
@@ -103,6 +103,10 @@ class WebglConformance(benchmark_module.Benchmark):
super(WebglConformance, self).__init__(max_failures=10)
@classmethod
+ def Name(cls):
+ return 'webgl_conformance'
+
+ @classmethod
def AddBenchmarkCommandLineArgs(cls, group):
group.add_option('--webgl-conformance-version',
help='Version of the WebGL conformance tests to run.',
diff --git a/content/test/gpu/gpu_tests/webgl_robustness.py b/content/test/gpu/gpu_tests/webgl_robustness.py
index d1230a3..0612760 100644
--- a/content/test/gpu/gpu_tests/webgl_robustness.py
+++ b/content/test/gpu/gpu_tests/webgl_robustness.py
@@ -61,6 +61,10 @@ class WebglRobustnessPage(page.Page):
class WebglRobustness(benchmark.Benchmark):
test = WebglConformanceValidator
+ @classmethod
+ def Name(cls):
+ return 'webgl_robustness'
+
def CreatePageSet(self, options):
ps = page_set.PageSet(
file_path=conformance_path,
diff --git a/tools/perf/benchmarks/benchmark_unittest.py b/tools/perf/benchmarks/benchmark_unittest.py
index fb4eae8..1b6cc10 100644
--- a/tools/perf/benchmarks/benchmark_unittest.py
+++ b/tools/perf/benchmarks/benchmark_unittest.py
@@ -47,27 +47,6 @@ class TestNoBenchmarkNamesDuplication(unittest.TestCase):
'Multiple benchmarks with the same name %s are '
'found: %s' % (n, str(names_to_benchmarks[n])))
-# Test all perf benchmarks explicitly define the Name() method and the name
-# values are the same as the default one.
-# TODO(nednguyen): remove this test after all perf benchmarks define Name() and
-# checked in.
-class TestPerfBenchmarkNames(unittest.TestCase):
- def runTest(self):
- all_benchmarks = _GetAllPerfBenchmarks()
-
- def BenchmarkName(cls): # Copy from Benchmark.Name()'s implementation
- name = cls.__module__.split('.')[-1]
- if hasattr(cls, 'tag'):
- name += '.' + cls.tag
- if hasattr(cls, 'page_set'):
- name += '.' + cls.page_set.Name()
- return name
-
- for b in all_benchmarks:
- self.assertNotEquals(b.Name, benchmark_module.Benchmark.Name)
- self.assertEquals(b.Name(), BenchmarkName(b))
-
-
def _AddBenchmarkOptionsTests(suite):
# Using |index_by_class_name=True| allows returning multiple benchmarks
# from a module.
@@ -82,7 +61,6 @@ def _AddBenchmarkOptionsTests(suite):
_BenchmarkOptionsTestGenerator(benchmark))
suite.addTest(BenchmarkOptionsTest(benchmark.Name()))
suite.addTest(TestNoBenchmarkNamesDuplication())
- suite.addTest(TestPerfBenchmarkNames())
def load_tests(_, _2, _3):
diff --git a/tools/telemetry/telemetry/benchmark.py b/tools/telemetry/telemetry/benchmark.py
index f32a1fd..3489353 100644
--- a/tools/telemetry/telemetry/benchmark.py
+++ b/tools/telemetry/telemetry/benchmark.py
@@ -77,12 +77,7 @@ class Benchmark(command_line.Command):
@classmethod
def Name(cls):
- name = cls.__module__.split('.')[-1]
- if hasattr(cls, 'tag'):
- name += '.' + cls.tag
- if hasattr(cls, 'page_set'):
- name += '.' + cls.page_set.Name()
- return name
+ return '%s.%s' % (cls.__module__.split('.')[-1], cls.__name__)
@classmethod
def AddCommandLineArgs(cls, parser):