summaryrefslogtreecommitdiffstats
path: root/tools/accessibility/rebase_dump_accessibility_tree_test.py
blob: 560114458e9f69e1b3933eaba3d7d1daf6acab5a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Rebase DumpAccessibilityTree Tests.

This script is intended to be run when you make a change that could affect the
expected results of tests in:

    content/test/data/accessibility

It assumes that you've already uploaded a change and the try jobs have finished.
It collects all of the results from try jobs on all platforms and updates the
expectation files locally. From there you can run 'git diff' to make sure all
of the changes look reasonable, then upload the change for code review.
"""

import os
import re
import sys
import time
import urllib

# Load BeautifulSoup. It's checked into two places in the Chromium tree.
sys.path.append(
    'third_party/trace-viewer/third_party/tvcm/third_party/beautifulsoup')
from BeautifulSoup import BeautifulSoup

# The location of the DumpAccessibilityTree html test files and expectations.
TEST_DATA_PATH = os.path.join(os.getcwd(), 'content/test/data/accessibility')

# A global that keeps track of files we've already updated, so we don't
# bother to update the same file twice.
completed_files = set()

def GitClIssue():
  '''Retrieve the current issue number as a string.'''
  result = os.popen('git cl issue').read()
  # Returns string like: 'Issue number: 12345 (https://...)'
  return result.split()[2]

def ParseFailure(name, url):
  '''Parse given the name of a failing trybot and the url of its build log.'''

  # Figure out the platform.
  if name.find('android') >= 0:
    platform_suffix = '-expected-android.txt'
  elif name.find('mac') >= 0:
    platform_suffix = '-expected-mac.txt'
  elif name.find('win') >= 0:
    platform_suffix = '-expected-win.txt'
  else:
    return

  # Read the content_browsertests log file.
  data = None
  lines = None
  urls = []
  for url_suffix in [
      '/steps/content_browsertests%20(with%20patch)/logs/stdio/text',
      '/steps/content_browsertests/logs/stdio/text']:
    urls.append(url + url_suffix)
  for url in urls:
    response = urllib.urlopen(url)
    if response.getcode() == 200:
      data = response.read()
      lines = data.splitlines()
      break

  if not data:
    return

  # Parse the log file for failing tests and overwrite the expected
  # result file locally with the actual results from the log.
  test_name = None
  start = None
  filename = None
  for i in range(len(lines)):
    line = lines[i]
    if line[:12] == '[ RUN      ]':
      test_name = line[13:]
    if test_name and line[:8] == 'Testing:':
      filename = re.search('content.test.*accessibility.(.*)', line).group(1)
    if test_name and line == 'Actual':
      start = i + 2
    if start and test_name and filename and line[:12] == '[  FAILED  ]':
      # Get the path to the html file.
      dst_fullpath = os.path.join(TEST_DATA_PATH, filename)
      # Strip off .html and replace it with the platform expected suffix.
      dst_fullpath = dst_fullpath[:-5] + platform_suffix
      if dst_fullpath in completed_files:
        continue

      actual = [line for line in lines[start : i - 1] if line]
      fp = open(dst_fullpath, 'w')
      fp.write('\n'.join(actual))
      fp.close()
      print dst_fullpath
      completed_files.add(dst_fullpath)
      start = None
      test_name = None
      filename = None

def ParseTrybots(data):
  '''Parse the code review page to find links to try bots.'''
  soup = BeautifulSoup(data)
  failures = soup.findAll(
      'a',
      { "class" : "build-result build-status-color-failure" })
  print 'Found %d trybots that failed' % len(failures)
  for f in failures:
    name = f.text.replace(' ', '')
    url = f['href']
    ParseFailure(name, url)

def Run():
  '''Main. Get the issue number and parse the code review page.'''
  if len(sys.argv) == 2:
    issue = sys.argv[1]
  else:
    issue = GitClIssue()

  url = 'https://codereview.chromium.org/%s' % issue
  print 'Fetching issue from %s' % url
  response = urllib.urlopen(url)
  if response.getcode() != 200:
    print 'Error code %d accessing url: %s' % (response.getcode(), url)
  data = response.read()
  ParseTrybots(data)

if __name__ == '__main__':
  sys.exit(Run())