summaryrefslogtreecommitdiffstats
path: root/tools/bisect-builds.py
blob: a73978f3ef481fdfd30ddbbab1b8718475295129 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Snapshot Build Bisect Tool

This script bisects a snapshot archive using binary search. It starts at
a bad revision (it will try to guess HEAD) and asks for a last known-good
revision. It will then binary search across this revision range by downloading,
unzipping, and opening Chromium for you. After testing the specific revision,
it will ask you whether it is good or bad before continuing the search.
"""

# The root URL for storage.
BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots'

# The root URL for official builds.
OFFICIAL_BASE_URL = 'http://chrome4linux.mtv.corp.google.com/archives'

# Changelogs URL.
CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
                'perf/dashboard/ui/changelog.html?url=/trunk/src&range=%d%%3A%d'

# Official Changelogs URL.
OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\
                         'changelog?old_version=%s&new_version=%s'

# DEPS file URL.
DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d'
# WebKit Changelogs URL.
WEBKIT_CHANGELOG_URL = 'http://trac.webkit.org/log/' \
                       'trunk/?rev=%d&stop_rev=%d&verbose=on&limit=10000'

DONE_MESSAGE = 'You are probably looking for a change made after ' \
               '%s (known good), but no later than %s (first known bad).'

###############################################################################

import math
import optparse
import os
import pipes
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import urllib
from distutils.version import LooseVersion
from xml.etree import ElementTree
import zipfile


class PathContext(object):
  """A PathContext is used to carry the information used to construct URLs and
  paths when dealing with the storage server and archives."""
  def __init__(self, platform, good_revision, bad_revision, is_official):
    super(PathContext, self).__init__()
    # Store off the input parameters.
    self.platform = platform  # What's passed in to the '-a/--archive' option.
    self.good_revision = good_revision
    self.bad_revision = bad_revision
    self.is_official = is_official

    # The name of the ZIP file in a revision directory on the server.
    self.archive_name = None

    # Set some internal members:
    #   _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
    #   _archive_extract_dir = Uncompressed directory in the archive_name file.
    #   _binary_name = The name of the executable to run.
    if self.platform == 'linux' or self.platform == 'linux64':
      self._binary_name = 'chrome'
    elif self.platform == 'mac':
      self.archive_name = 'chrome-mac.zip'
      self._archive_extract_dir = 'chrome-mac'
    elif self.platform == 'win':
      self.archive_name = 'chrome-win32.zip'
      self._archive_extract_dir = 'chrome-win32'
      self._binary_name = 'chrome.exe'
    else:
      raise Exception('Invalid platform: %s' % self.platform)

    if is_official:
      if self.platform == 'linux':
        self._listing_platform_dir = 'lucid32bit/'
        self.archive_name = 'chrome-lucid32bit.zip'
        self._archive_extract_dir = 'chrome-lucid32bit'
      elif self.platform == 'linux64':
        self._listing_platform_dir = 'lucid64bit/'
        self.archive_name = 'chrome-lucid64bit.zip'
        self._archive_extract_dir = 'chrome-lucid64bit'
      elif self.platform == 'mac':
        self._listing_platform_dir = 'mac/'
        self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome'
      elif self.platform == 'win':
        self._listing_platform_dir = 'win/'
    else:
      if self.platform == 'linux' or self.platform == 'linux64':
        self.archive_name = 'chrome-linux.zip'
        self._archive_extract_dir = 'chrome-linux'
        if self.platform == 'linux':
          self._listing_platform_dir = 'Linux/'
        elif self.platform == 'linux64':
          self._listing_platform_dir = 'Linux_x64/'
      elif self.platform == 'mac':
        self._listing_platform_dir = 'Mac/'
        self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
      elif self.platform == 'win':
        self._listing_platform_dir = 'Win/'

  def GetListingURL(self, marker=None):
    """Returns the URL for a directory listing, with an optional marker."""
    marker_param = ''
    if marker:
      marker_param = '&marker=' + str(marker)
    return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \
        marker_param

  def GetDownloadURL(self, revision):
    """Gets the download URL for a build archive of a specific revision."""
    if self.is_official:
      return "%s/%s/%s%s" % (
          OFFICIAL_BASE_URL, revision, self._listing_platform_dir,
          self.archive_name)
    else:
      return "%s/%s%s/%s" % (
          BASE_URL, self._listing_platform_dir, revision, self.archive_name)

  def GetLastChangeURL(self):
    """Returns a URL to the LAST_CHANGE file."""
    return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE'

  def GetLaunchPath(self):
    """Returns a relative path (presumably from the archive extraction location)
    that is used to run the executable."""
    return os.path.join(self._archive_extract_dir, self._binary_name)

  def ParseDirectoryIndex(self):
    """Parses the Google Storage directory listing into a list of revision
    numbers. The range starts with self.good_revision and goes until
    self.bad_revision."""

    def _FetchAndParse(url):
      """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
      next-marker is not None, then the listing is a partial listing and another
      fetch should be performed with next-marker being the marker= GET
      parameter."""
      handle = urllib.urlopen(url)
      document = ElementTree.parse(handle)

      # All nodes in the tree are namespaced. Get the root's tag name to extract
      # the namespace. Etree does namespaces as |{namespace}tag|.
      root_tag = document.getroot().tag
      end_ns_pos = root_tag.find('}')
      if end_ns_pos == -1:
        raise Exception("Could not locate end namespace for directory index")
      namespace = root_tag[:end_ns_pos + 1]

      # Find the prefix (_listing_platform_dir) and whether or not the list is
      # truncated.
      prefix_len = len(document.find(namespace + 'Prefix').text)
      next_marker = None
      is_truncated = document.find(namespace + 'IsTruncated')
      if is_truncated is not None and is_truncated.text.lower() == 'true':
        next_marker = document.find(namespace + 'NextMarker').text

      # Get a list of all the revisions.
      all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
                                      namespace + 'Prefix')
      # The <Prefix> nodes have content of the form of
      # |_listing_platform_dir/revision/|. Strip off the platform dir and the
      # trailing slash to just have a number.
      revisions = []
      for prefix in all_prefixes:
        revnum = prefix.text[prefix_len:-1]
        try:
          revnum = int(revnum)
          revisions.append(revnum)
        except ValueError:
          pass
      return (revisions, next_marker)
      
    # Fetch the first list of revisions.
    (revisions, next_marker) = _FetchAndParse(self.GetListingURL())

    # If the result list was truncated, refetch with the next marker. Do this
    # until an entire directory listing is done.
    while next_marker:
      next_url = self.GetListingURL(next_marker)
      (new_revisions, next_marker) = _FetchAndParse(next_url)
      revisions.extend(new_revisions)
    return revisions

  def GetRevList(self):
    """Gets the list of revision numbers between self.good_revision and
    self.bad_revision."""
    # Download the revlist and filter for just the range between good and bad.
    minrev = self.good_revision
    maxrev = self.bad_revision
    revlist = map(int, self.ParseDirectoryIndex())
    revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)]
    revlist.sort()
    return revlist

  def GetOfficialBuildsList(self):
    """Gets the list of official build numbers between self.good_revision and
    self.bad_revision."""
    # Download the revlist and filter for just the range between good and bad.
    minrev = self.good_revision
    maxrev = self.bad_revision
    handle = urllib.urlopen(OFFICIAL_BASE_URL)
    dirindex = handle.read()
    handle.close()
    build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex)
    final_list = []
    start_index = 0
    end_index = 0
    i = 0

    parsed_build_numbers = [LooseVersion(x) for x in build_numbers]
    for build_number in sorted(parsed_build_numbers):
      path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \
             self._listing_platform_dir + self.archive_name
      i = i + 1
      try:
        connection = urllib.urlopen(path)
        connection.close()
        final_list.append(str(build_number))
        if str(build_number) == minrev:
          start_index = i
        if str(build_number) == maxrev:
          end_index = i
      except urllib.HTTPError, e:
        pass
    return final_list[start_index:end_index]

def UnzipFilenameToDir(filename, dir):
  """Unzip |filename| to directory |dir|."""
  cwd = os.getcwd()
  if not os.path.isabs(filename):
    filename = os.path.join(cwd, filename)
  zf = zipfile.ZipFile(filename)
  # Make base.
  try:
    if not os.path.isdir(dir):
      os.mkdir(dir)
    os.chdir(dir)
    # Extract files.
    for info in zf.infolist():
      name = info.filename
      if name.endswith('/'):  # dir
        if not os.path.isdir(name):
          os.makedirs(name)
      else:  # file
        dir = os.path.dirname(name)
        if not os.path.isdir(dir):
          os.makedirs(dir)
        out = open(name, 'wb')
        out.write(zf.read(name))
        out.close()
      # Set permissions. Permission info in external_attr is shifted 16 bits.
      os.chmod(name, info.external_attr >> 16L)
    os.chdir(cwd)
  except Exception, e:
    print >>sys.stderr, e
    sys.exit(1)


def FetchRevision(context, rev, filename, quit_event=None, progress_event=None):
  """Downloads and unzips revision |rev|.
  @param context A PathContext instance.
  @param rev The Chromium revision number/tag to download.
  @param filename The destination for the downloaded file.
  @param quit_event A threading.Event which will be set by the master thread to
                    indicate that the download should be aborted.
  @param progress_event A threading.Event which will be set by the master thread
                    to indicate that the progress of the download should be
                    displayed.
  """
  def ReportHook(blocknum, blocksize, totalsize):
    if quit_event and quit_event.isSet():
      raise RuntimeError("Aborting download of revision %s" % str(rev))
    if progress_event and progress_event.isSet():
      size = blocknum * blocksize
      if totalsize == -1:  # Total size not known.
        progress = "Received %d bytes" % size
      else:
        size = min(totalsize, size)
        progress = "Received %d of %d bytes, %.2f%%" % (
            size, totalsize, 100.0 * size / totalsize)
      # Send a \r to let all progress messages use just one line of output.
      sys.stdout.write("\r" + progress)
      sys.stdout.flush()

  download_url = context.GetDownloadURL(rev)
  try:
    urllib.urlretrieve(download_url, filename, ReportHook)
    if progress_event and progress_event.isSet():
      print
  except RuntimeError, e:
    pass


def RunRevision(context, revision, zipfile, profile, num_runs, args):
  """Given a zipped revision, unzip it and run the test."""
  print "Trying revision %s..." % str(revision)

  # Create a temp directory and unzip the revision into it.
  cwd = os.getcwd()
  tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
  UnzipFilenameToDir(zipfile, tempdir)
  os.chdir(tempdir)

  # Run the build as many times as specified.
  testargs = [context.GetLaunchPath(), '--user-data-dir=%s' % profile] + args
  # The sandbox must be run as root on Official Chrome, so bypass it.
  if context.is_official and (context.platform == 'linux' or
      context.platform == 'linux64'):
    testargs.append('--no-sandbox')

  for i in range(0, num_runs):
    subproc = subprocess.Popen(testargs,
                               bufsize=-1,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)
    (stdout, stderr) = subproc.communicate()

  os.chdir(cwd)
  try:
    shutil.rmtree(tempdir, True)
  except Exception, e:
    pass

  return (subproc.returncode, stdout, stderr)


def AskIsGoodBuild(rev, official_builds, status, stdout, stderr):
  """Ask the user whether build |rev| is good or bad."""
  # Loop until we get a response that we can parse.
  while True:
    response = raw_input('Revision %s is [(g)ood/(b)ad/(u)nknown/(q)uit]: ' %
                         str(rev))
    if response and response in ('g', 'b', 'u'):
      return response
    if response and response == 'q':
      raise SystemExit()


class DownloadJob(object):
  """DownloadJob represents a task to download a given Chromium revision."""
  def __init__(self, context, name, rev, zipfile):
    super(DownloadJob, self).__init__()
    # Store off the input parameters.
    self.context = context
    self.name = name
    self.rev = rev
    self.zipfile = zipfile
    self.quit_event = threading.Event()
    self.progress_event = threading.Event()

  def Start(self):
    """Starts the download."""
    fetchargs = (self.context,
                 self.rev,
                 self.zipfile,
                 self.quit_event,
                 self.progress_event)
    self.thread = threading.Thread(target=FetchRevision,
                                   name=self.name,
                                   args=fetchargs)
    self.thread.start()

  def Stop(self):
    """Stops the download which must have been started previously."""
    self.quit_event.set()
    self.thread.join()
    os.unlink(self.zipfile)

  def WaitFor(self):
    """Prints a message and waits for the download to complete. The download
    must have been started previously."""
    print "Downloading revision %s..." % str(self.rev)
    self.progress_event.set()  # Display progress of download.
    self.thread.join()


def Bisect(platform,
           official_builds,
           good_rev=0,
           bad_rev=0,
           num_runs=1,
           try_args=(),
           profile=None,
           evaluate=AskIsGoodBuild):
  """Given known good and known bad revisions, run a binary search on all
  archived revisions to determine the last known good revision.

  @param platform Which build to download/run ('mac', 'win', 'linux64', etc.).
  @param official_builds Specify build type (Chromium or Official build).
  @param good_rev Number/tag of the last known good revision.
  @param bad_rev Number/tag of the first known bad revision.
  @param num_runs Number of times to run each build for asking good/bad.
  @param try_args A tuple of arguments to pass to the test application.
  @param profile The name of the user profile to run with.
  @param evaluate A function which returns 'g' if the argument build is good,
                  'b' if it's bad or 'u' if unknown.

  Threading is used to fetch Chromium revisions in the background, speeding up
  the user's experience. For example, suppose the bounds of the search are
  good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
  whether revision 50 is good or bad, the next revision to check will be either
  25 or 75. So, while revision 50 is being checked, the script will download
  revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is
  known:

    - If rev 50 is good, the download of rev 25 is cancelled, and the next test
      is run on rev 75.

    - If rev 50 is bad, the download of rev 75 is cancelled, and the next test
      is run on rev 25.
  """

  if not profile:
    profile = 'profile'

  context = PathContext(platform, good_rev, bad_rev, official_builds)
  cwd = os.getcwd()



  print "Downloading list of known revisions..."
  _GetDownloadPath = lambda rev: os.path.join(cwd,
      '%s-%s' % (str(rev), context.archive_name))
  if official_builds:
    revlist = context.GetOfficialBuildsList()
  else:
    revlist = context.GetRevList()

  # Get a list of revisions to bisect across.
  if len(revlist) < 2:  # Don't have enough builds to bisect.
    msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
    raise RuntimeError(msg)

  # Figure out our bookends and first pivot point; fetch the pivot revision.
  good = 0
  bad = len(revlist) - 1
  pivot = bad / 2
  rev = revlist[pivot]
  zipfile = _GetDownloadPath(rev)
  initial_fetch = DownloadJob(context, 'initial_fetch', rev, zipfile)
  initial_fetch.Start()
  initial_fetch.WaitFor()

  # Binary search time!
  while zipfile and bad - good > 1:
    # Pre-fetch next two possible pivots
    #   - down_pivot is the next revision to check if the current revision turns
    #     out to be bad.
    #   - up_pivot is the next revision to check if the current revision turns
    #     out to be good.
    down_pivot = int((pivot - good) / 2) + good
    down_fetch = None
    if down_pivot != pivot and down_pivot != good:
      down_rev = revlist[down_pivot]
      down_fetch = DownloadJob(context, 'down_fetch', down_rev,
                               _GetDownloadPath(down_rev))
      down_fetch.Start()

    up_pivot = int((bad - pivot) / 2) + pivot
    up_fetch = None
    if up_pivot != pivot and up_pivot != bad:
      up_rev = revlist[up_pivot]
      up_fetch = DownloadJob(context, 'up_fetch', up_rev,
                             _GetDownloadPath(up_rev))
      up_fetch.Start()

    # Run test on the pivot revision.
    (status, stdout, stderr) = RunRevision(context,
                                           rev,
                                           zipfile,
                                           profile,
                                           num_runs,
                                           try_args)
    os.unlink(zipfile)
    zipfile = None

    # Call the evaluate function to see if the current revision is good or bad.
    # On that basis, kill one of the background downloads and complete the
    # other, as described in the comments above.
    try:
      answer = evaluate(rev, official_builds, status, stdout, stderr)
      if answer == 'g':
        good = pivot
        if down_fetch:
          down_fetch.Stop()  # Kill the download of the older revision.
        if up_fetch:
          up_fetch.WaitFor()
          pivot = up_pivot
          zipfile = up_fetch.zipfile
      elif answer == 'b':
        bad = pivot
        if up_fetch:
          up_fetch.Stop()  # Kill the download of the newer revision.
        if down_fetch:
          down_fetch.WaitFor()
          pivot = down_pivot
          zipfile = down_fetch.zipfile
      elif answer == 'u':
        # Nuke the revision from the revlist and choose a new pivot.
        revlist.pop(pivot)
        bad -= 1  # Assumes bad >= pivot.

        fetch = None
        if bad - good > 1:
          # Alternate between using down_pivot or up_pivot for the new pivot
          # point, without affecting the range. Do this instead of setting the
          # pivot to the midpoint of the new range because adjacent revisions
          # are likely affected by the same issue that caused the (u)nknown
          # response.
          if up_fetch and down_fetch:
            fetch = [up_fetch, down_fetch][len(revlist) % 2]
          elif up_fetch:
            fetch = up_fetch
          else:
            fetch = down_fetch
          fetch.WaitFor()
          if fetch == up_fetch:
            pivot = up_pivot - 1  # Subtracts 1 because revlist was resized.
          else:
            pivot = down_pivot
          zipfile = fetch.zipfile

        if down_fetch and fetch != down_fetch:
          down_fetch.Stop()
        if up_fetch and fetch != up_fetch:
          up_fetch.Stop()
      else:
        assert False, "Unexpected return value from evaluate(): " + answer
    except SystemExit:
      print "Cleaning up..."
      for f in [_GetDownloadPath(revlist[down_pivot]),
                _GetDownloadPath(revlist[up_pivot])]:
        try:
          os.unlink(f)
        except OSError:
          pass
      sys.exit(0)

    rev = revlist[pivot]

  return (revlist[good], revlist[bad])


def GetWebKitRevisionForChromiumRevision(rev):
  """Returns the webkit revision that was in chromium's DEPS file at
  chromium revision |rev|."""
  # . doesn't match newlines without re.DOTALL, so this is safe.
  webkit_re = re.compile(r'webkit_revision.:\D*(\d+)')
  url = urllib.urlopen(DEPS_FILE % rev)
  m = webkit_re.search(url.read())
  url.close()
  if m:
    return int(m.group(1))
  else:
    raise Exception('Could not get webkit revision for cr rev %d' % rev)


def main():
  usage = ('%prog [options] [-- chromium-options]\n'
           'Perform binary search on the snapshot builds.\n'
           '\n'
           'Tip: add "-- --no-first-run" to bypass the first run prompts.')
  parser = optparse.OptionParser(usage=usage)
  # Strangely, the default help output doesn't include the choice list.
  choices = ['mac', 'win', 'linux', 'linux64']
            # linux-chromiumos lacks a continuous archive http://crbug.com/78158
  parser.add_option('-a', '--archive',
                    choices = choices,
                    help = 'The buildbot archive to bisect [%s].' %
                           '|'.join(choices))
  parser.add_option('-o', action="store_true", dest='official_builds',
                    help = 'Bisect across official ' +
                    'Chrome builds (internal only) instead of ' +
                    'Chromium archives.')
  parser.add_option('-b', '--bad', type = 'str',
                    help = 'The bad revision to bisect to.')
  parser.add_option('-g', '--good', type = 'str',
                    help = 'The last known good revision to bisect from.')
  parser.add_option('-p', '--profile', '--user-data-dir', type = 'str',
                    help = 'Profile to use; this will not reset every run. ' +
                    'Defaults to a clean profile.', default = 'profile')
  parser.add_option('-t', '--times', type = 'int',
                    help = 'Number of times to run each build before asking ' +
                    'if it\'s good or bad. Temporary profiles are reused.',
                    default = 1)
  (opts, args) = parser.parse_args()

  if opts.archive is None:
    print 'Error: missing required parameter: --archive'
    print
    parser.print_help()
    return 1

  if opts.bad and opts.good and (opts.good > opts.bad):
    print ('The good revision (%d) must precede the bad revision (%d).\n' %
           (opts.good, opts.bad))
    parser.print_help()
    return 1

  # Create the context. Initialize 0 for the revisions as they are set below.
  context = PathContext(opts.archive, 0, 0, opts.official_builds)

  if opts.official_builds and opts.bad is None:
    print >>sys.stderr, 'Bisecting official builds requires a bad build number.'
    parser.print_help()
    return 1

  # Pick a starting point, try to get HEAD for this.
  if opts.bad:
    bad_rev = opts.bad
  else:
    bad_rev = 0
    try:
      # Location of the latest build revision number
      nh = urllib.urlopen(context.GetLastChangeURL())
      latest = int(nh.read())
      nh.close()
      bad_rev = raw_input('Bad revision [HEAD:%d]: ' % latest)
      if (bad_rev == ''):
        bad_rev = latest
      bad_rev = int(bad_rev)
    except Exception, e:
      print('Could not determine latest revision. This could be bad...')
      bad_rev = int(raw_input('Bad revision: '))

  # Find out when we were good.
  if opts.good:
    good_rev = opts.good
  else:
    good_rev = 0
    try:
      good_rev = int(raw_input('Last known good [0]: '))
    except Exception, e:
      pass

  if opts.times < 1:
    print('Number of times to run (%d) must be greater than or equal to 1.' %
          opts.times)
    parser.print_help()
    return 1

  (last_known_good_rev, first_known_bad_rev) = Bisect(
      opts.archive, opts.official_builds, good_rev, bad_rev, opts.times, args,
      opts.profile)

  # Get corresponding webkit revisions.
  try:
    last_known_good_webkit_rev = GetWebKitRevisionForChromiumRevision(
        last_known_good_rev)
    first_known_bad_webkit_rev = GetWebKitRevisionForChromiumRevision(
        first_known_bad_rev)
  except Exception, e:
    # Silently ignore the failure.
    last_known_good_webkit_rev, first_known_bad_webkit_rev = 0, 0

  # We're done. Let the user know the results in an official manner.
  print DONE_MESSAGE % (str(last_known_good_rev), str(first_known_bad_rev))
  if last_known_good_webkit_rev != first_known_bad_webkit_rev:
    print 'WEBKIT CHANGELOG URL:'
    print '  ' + WEBKIT_CHANGELOG_URL % (first_known_bad_webkit_rev,
                                         last_known_good_webkit_rev)
  print 'CHANGELOG URL:'
  if opts.official_builds:
    print OFFICIAL_CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev)
  else:
    print '  ' + CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev)

if __name__ == '__main__':
  sys.exit(main())