diff options
author | tonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-03-14 18:59:24 +0000 |
---|---|---|
committer | tonyg@chromium.org <tonyg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-03-14 18:59:24 +0000 |
commit | c409a5f26835cb7f5bffe0d323df71c376e445c4 (patch) | |
tree | 58fccc98ceb44548305fb430b4dee347326fbca9 /tools | |
parent | 76368388d792686163b82e77c6b0788e4e0e7969 (diff) | |
download | chromium_src-c409a5f26835cb7f5bffe0d323df71c376e445c4.zip chromium_src-c409a5f26835cb7f5bffe0d323df71c376e445c4.tar.gz chromium_src-c409a5f26835cb7f5bffe0d323df71c376e445c4.tar.bz2 |
[Telemetry] Add an --output-trace-tag parameter.
When the perf bots run a test multiple times in one step (like for the reference
builds), they need some tag on the traces to differentiate the results.
Currently we assume that the when --browser=exact is used that it is a reference
build and we append '_ref' to the traces. This hack worked, but the page cyclers
do multiple runs with different profiles and different trace tags. So we need a
way to pass in the trace tag to use along with each profile type.
This patch adds a --output-trace-tag parameter to accomplish that. One advantage
is that we can now use it for _ref and clean up the hack once the build bots
pass it.
BUG=None
TEST=tools/perf/run_multipage_benchmarks --output-trace-tag=_foo --browser=system sunspider tools/perf/page_sets/sunspider.json
Review URL: https://codereview.chromium.org/12434008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@188153 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rwxr-xr-x | tools/telemetry/telemetry/page/page_benchmark_runner.py | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/tools/telemetry/telemetry/page/page_benchmark_runner.py b/tools/telemetry/telemetry/page/page_benchmark_runner.py index f9450b6..5ba4591 100755 --- a/tools/telemetry/telemetry/page/page_benchmark_runner.py +++ b/tools/telemetry/telemetry/page/page_benchmark_runner.py @@ -47,6 +47,9 @@ def Main(benchmark_dir): parser.add_option('-o', '--output', dest='output_file', help='Redirects output to a file. Defaults to stdout.') + parser.add_option('--output-trace-tag', + dest='output_trace_tag', + help='Append a tag to the key of each result trace.') benchmark = None if benchmark_name is not None: @@ -95,9 +98,16 @@ Use --browser=list to figure out which are available.\n""" with page_runner.PageRunner(ps) as runner: runner.Run(options, possible_browser, benchmark, results) - # When using an exact executable, assume it is a reference build for the - # purpose of outputting the perf results. - results.PrintSummary(options.browser_executable and '_ref' or '') + + output_trace_tag = '' + if options.output_trace_tag: + output_trace_tag = options.output_trace_tag + elif options.browser_executable: + # When using an exact executable, assume it is a reference build for the + # purpose of outputting the perf results. + # TODO(tonyg): Remove this branch once the perfbots use --output-trace-tag. + output_trace_tag = '_ref' + results.PrintSummary(output_trace_tag) if len(results.page_failures): logging.warning('Failed pages: %s', '\n'.join( |