summaryrefslogtreecommitdiffstats
path: root/tools/sharding_supervisor/sharding_supervisor.py
blob: a95b5868f41879c121dae64e357ba0e8916f35c0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Shards a given test suite and runs the shards in parallel.

ShardingSupervisor is called to process the command line options and creates
the specified number of worker threads. These threads then run each shard of
the test in a separate process and report on the results. When all the shards
have been completed, the supervisor reprints any lines indicating a test
failure for convenience. If only one shard is to be run, a single subprocess
is started for that shard and the output is identical to gtest's output.
"""


import cStringIO
import itertools
import optparse
import os
import Queue
import random
import re
import sys
import threading

from xml.dom import minidom

# Add tools/ to path
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_PATH, ".."))
try:
  import find_depot_tools
  # Fixes a bug in Windows where some shards die upon starting
  # TODO(charleslee): actually fix this bug
  import subprocess2 as subprocess
except ImportError:
  # Unable to find depot_tools, so just use standard subprocess
  import subprocess

SS_USAGE = "python %prog [options] path/to/test [gtest_args]"
SS_DEFAULT_NUM_CORES = 4
SS_DEFAULT_SHARDS_PER_CORE = 5  # num_shards = cores * SHARDS_PER_CORE
SS_DEFAULT_RUNS_PER_CORE = 1  # num_workers = cores * RUNS_PER_CORE
SS_DEFAULT_RETRY_PERCENT = 5  # --retry-failed ignored if more than 5% fail
SS_DEFAULT_TIMEOUT = 530  # Slightly less than buildbot's default 600 seconds


def DetectNumCores():
  """Detects the number of cores on the machine.

  Returns:
    The number of cores on the machine or DEFAULT_NUM_CORES if it could not
    be found.
  """
  try:
    # Linux, Unix, MacOS
    if hasattr(os, "sysconf"):
      if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
        # Linux, Unix
        return int(os.sysconf("SC_NPROCESSORS_ONLN"))
      else:
        # OSX
        return int(os.popen2("sysctl -n hw.ncpu")[1].read())
    # Windows
    return int(os.environ["NUMBER_OF_PROCESSORS"])
  except ValueError:
    return SS_DEFAULT_NUM_CORES


def GetGTestOutput(args):
  """Extracts gtest_output from the args. Returns none if not present."""

  for arg in args:
    if '--gtest_output=' in arg:
      return arg.split('=')[1]
  return None


def AppendToGTestOutput(gtest_args, value):
  args = gtest_args[:]
  current_value = GetGTestOutput(args)
  if not current_value:
    return gtest_args

  current_arg = '--gtest_output=' + current_value
  args.remove(current_arg)
  args.append('--gtest_output=' + current_value + value)
  return args


def RemoveGTestOutput(gtest_args):
  args = gtest_args[:]
  current_value = GetGTestOutput(args)
  if not current_value:
    return gtest_args

  args.remove('--gtest_output=' + current_value)
  return args


def AppendToXML(final_xml, generic_path, shard):
  """Combine the shard xml file with the final xml file."""

  path = generic_path + str(shard)

  try:
    with open(path) as shard_xml_file:
      shard_xml = minidom.parse(shard_xml_file)
  except IOError:
    # If the shard crashed, gtest will not have generated an xml file.
    return final_xml

  if not final_xml:
    # Out final xml is empty, let's prepopulate it with the first one we see.
    return shard_xml

  shard_node = shard_xml.documentElement
  final_node = final_xml.documentElement

  testcases = shard_node.getElementsByTagName('testcase')
  final_testcases = final_node.getElementsByTagName('testcase')
  for testcase in testcases:
    name = testcase.getAttribute('name')
    classname = testcase.getAttribute('classname')
    failures = testcase.getElementsByTagName('failure')
    status = testcase.getAttribute('status')
    elapsed = testcase.getAttribute('time')

    # don't bother updating the final xml if there is no data.
    if status == 'notrun':
      continue

    # Look in our final xml to see if it's there.
    # There has to be a better way...
    for final_testcase in final_testcases:
      final_name = final_testcase.getAttribute('name')
      final_classname = final_testcase.getAttribute('classname')
      if final_name == name and final_classname == classname:
        # We got the same entry.
        final_testcase.setAttribute('status', status)
        final_testcase.setAttribute('time', elapsed)
        for failure in failures:
          final_testcase.appendChild(failure)

  return final_xml


def RunShard(test, total_shards, index, gtest_args, stdout, stderr):
  """Runs a single test shard in a subprocess.

  Returns:
    The Popen object representing the subprocess handle.
  """
  args = [test]

  # If there is a gtest_output
  test_args = AppendToGTestOutput(gtest_args, str(index))
  args.extend(test_args)
  env = os.environ.copy()
  env["GTEST_TOTAL_SHARDS"] = str(total_shards)
  env["GTEST_SHARD_INDEX"] = str(index)

  # Use a unique log file for each shard
  # Allows ui_tests to be run in parallel on the same machine
  env["CHROME_LOG_FILE"] = "chrome_log_%d" % index

  return subprocess.Popen(
      args, stdout=stdout,
      stderr=stderr,
      env=env,
      bufsize=0,
      universal_newlines=True)


class ShardRunner(threading.Thread):
  """Worker thread that manages a single shard at a time.

  Attributes:
    supervisor: The ShardingSupervisor that this worker reports to.
    counter: Called to get the next shard index to run.
    test_start: Regex that detects when a test runs.
    test_ok: Regex that detects a passing test.
    test_fail: Regex that detects a failing test.
    current_test: The name of the currently running test.
  """

  def __init__(self, supervisor, counter, test_start, test_ok, test_fail):
    """Inits ShardRunner and sets the current test to nothing."""
    threading.Thread.__init__(self)
    self.supervisor = supervisor
    self.counter = counter
    self.test_start = test_start
    self.test_ok = test_ok
    self.test_fail = test_fail
    self.current_test = ""

  def ReportFailure(self, description, index, test_name):
    """Assembles and reports a failure line to be printed later."""
    log_line = "%s (%i): %s\n" % (description, index, test_name)
    self.supervisor.LogTestFailure(log_line)

  def ProcessLine(self, index, line):
    """Checks a shard output line for test status, and reports a failure or
    incomplete test if needed.
    """
    results = self.test_start.search(line)
    if results:
      if self.current_test:
        self.ReportFailure("INCOMPLETE", index, self.current_test)
      self.current_test = results.group(1)
      self.supervisor.IncrementTestCount()
      return

    results = self.test_ok.search(line)
    if results:
      self.current_test = ""
      return

    results = self.test_fail.search(line)
    if results:
      self.ReportFailure("FAILED", index, results.group(1))
      self.current_test = ""

  def run(self):
    """Runs shards and outputs the results.

    Gets the next shard index from the supervisor, runs it in a subprocess,
    and collects the output. The output is read character by character in
    case the shard crashes without an ending newline. Each line is processed
    as it is finished.
    """
    while True:
      try:
        index = self.counter.get_nowait()
      except Queue.Empty:
        break
      chars = cStringIO.StringIO()
      shard_running = True
      shard = RunShard(
          self.supervisor.test, self.supervisor.total_shards, index,
          self.supervisor.gtest_args, subprocess.PIPE, subprocess.STDOUT)
      while shard_running:
        char = shard.stdout.read(1)
        if not char and shard.poll() is not None:
          shard_running = False
        chars.write(char)
        if char == "\n" or not shard_running:
          line = chars.getvalue()
          if not line and not shard_running:
            break
          self.ProcessLine(index, line)
          self.supervisor.LogOutputLine(index, line)
          chars.close()
          chars = cStringIO.StringIO()
      if self.current_test:
        self.ReportFailure("INCOMPLETE", index, self.current_test)
      self.supervisor.ShardIndexCompleted(index)
      if shard.returncode != 0:
        self.supervisor.LogShardFailure(index)


class ShardingSupervisor(object):
  """Supervisor object that handles the worker threads.

  Attributes:
    test: Name of the test to shard.
    num_shards_to_run: Total number of shards to split the test into.
    num_runs: Total number of worker threads to create for running shards.
    color: Indicates which coloring mode to use in the output.
    original_order: True if shard output should be printed as it comes.
    prefix: True if each line should indicate the shard index.
    retry_percent: Integer specifying the max percent of tests to retry.
    gtest_args: The options to pass to gtest.
    failed_tests: List of statements from shard output indicating a failure.
    failed_shards: List of shards that contained failing tests.
    shards_completed: List of flags indicating which shards have finished.
    shard_output: Buffer that stores the output from each shard.
    test_counter: Stores the total number of tests run.
    total_slaves: Total number of slaves running this test.
    slave_index: Current slave to run tests for.

  If total_slaves is set, we run only a subset of the tests. This is meant to be
  used when we want to shard across machines as well as across cpus. In that
  case the number of shards to execute will be the same, but they will be
  smaller, as the total number of shards in the test suite will be multiplied
  by 'total_slaves'.
 
  For example, if you are on a quad core machine, the sharding supervisor by
  default will use 20 shards for the whole suite. However, if you set
  total_slaves to 2, it will split the suite in 40 shards and will only run
  shards [0-19] or shards [20-39] depending if you set slave_index to 0 or 1.
  """

  SHARD_COMPLETED = object()

  def __init__(self, test, num_shards_to_run, num_runs, color, original_order,
               prefix, retry_percent, timeout, total_slaves, slave_index,
               gtest_args):
    """Inits ShardingSupervisor with given options and gtest arguments."""
    self.test = test
    # Number of shards to run locally.
    self.num_shards_to_run = num_shards_to_run
    # Total shards in the test suite running across all slaves.
    self.total_shards = num_shards_to_run * total_slaves
    self.slave_index = slave_index
    self.num_runs = num_runs
    self.color = color
    self.original_order = original_order
    self.prefix = prefix
    self.retry_percent = retry_percent
    self.timeout = timeout
    self.gtest_args = gtest_args
    self.failed_tests = []
    self.failed_shards = []
    self.shards_completed = [False] * self.num_shards_to_run
    self.shard_output = [Queue.Queue() for _ in range(self.num_shards_to_run)]
    self.test_counter = itertools.count()

  def ShardTest(self):
    """Runs the test and manages the worker threads.

    Runs the test and outputs a summary at the end. All the tests in the
    suite are run by creating (cores * runs_per_core) threads and
    (cores * shards_per_core) shards. When all the worker threads have
    finished, the lines saved in failed_tests are printed again. If enabled,
    and failed tests that do not have FLAKY or FAILS in their names are run
    again, serially, and the results are printed.

    Returns:
      1 if some unexpected (not FLAKY or FAILS) tests failed, 0 otherwise.
    """

    # Regular expressions for parsing GTest logs. Test names look like
    #   SomeTestCase.SomeTest
    #   SomeName/SomeTestCase.SomeTest/1
    # This regex also matches SomeName.SomeTest/1 and
    # SomeName/SomeTestCase.SomeTest, which should be harmless.
    test_name_regex = r"((\w+/)?\w+\.\w+(/\d+)?)"

    # Regex for filtering out ANSI escape codes when using color.
    ansi_regex = r"(?:\x1b\[.*?[a-zA-Z])?"

    test_start = re.compile(
        ansi_regex + r"\[\s+RUN\s+\] " + ansi_regex + test_name_regex)
    test_ok = re.compile(
        ansi_regex + r"\[\s+OK\s+\] " + ansi_regex + test_name_regex)
    test_fail = re.compile(
        ansi_regex + r"\[\s+FAILED\s+\] " + ansi_regex + test_name_regex)

    workers = []
    counter = Queue.Queue()
    start_point = self.num_shards_to_run * self.slave_index
    for i in range(start_point, start_point + self.num_shards_to_run):
      counter.put(i)

    for i in range(self.num_runs):
      worker = ShardRunner(
          self, counter, test_start, test_ok, test_fail)
      worker.start()
      workers.append(worker)
    if self.original_order:
      for worker in workers:
        worker.join()
    else:
      self.WaitForShards()

    # All the shards are done.  Merge all the XML files and generate the
    # main one.
    output_arg = GetGTestOutput(self.gtest_args)
    if output_arg:
      xml, xml_path = output_arg.split(':', 1)
      assert(xml == 'xml')
      final_xml = None
      for i in range(start_point, start_point + self.num_shards_to_run):
        final_xml = AppendToXML(final_xml, xml_path, i)

      if final_xml:
        with open(xml_path, 'w') as final_file:
          final_xml.writexml(final_file)

    num_failed = len(self.failed_shards)
    if num_failed > 0:
      self.failed_shards.sort()
      self.WriteText(sys.stdout,
                     "\nFAILED SHARDS: %s\n" % str(self.failed_shards),
                     "\x1b[1;5;31m")
    else:
      self.WriteText(sys.stdout, "\nALL SHARDS PASSED!\n", "\x1b[1;5;32m")
    self.PrintSummary(self.failed_tests)
    if self.retry_percent < 0:
      return len(self.failed_shards) > 0

    self.failed_tests = [x for x in self.failed_tests if x.find("FAILS_") < 0]
    self.failed_tests = [x for x in self.failed_tests if x.find("FLAKY_") < 0]
    if not self.failed_tests:
      return 0
    return self.RetryFailedTests()

  def LogTestFailure(self, line):
    """Saves a line in the lsit of failed tests to be printed at the end."""
    if line not in self.failed_tests:
      self.failed_tests.append(line)

  def LogShardFailure(self, index):
    """Records that a test in the given shard has failed."""
    self.failed_shards.append(index)

  def WaitForShards(self):
    """Prints the output from each shard in consecutive order, waiting for
    the current shard to finish before starting on the next shard.
    """
    try:
      for shard_index in range(self.num_shards_to_run):
        while True:
          try:
            line = self.shard_output[shard_index].get(True, self.timeout)
          except Queue.Empty:
            # Shard timed out, notice failure and move on.
            self.LogShardFailure(shard_index)
            # TODO(maruel): Print last test. It'd be simpler to have the
            # processing in the main thread.
            # TODO(maruel): Make sure the worker thread terminates.
            sys.stdout.write('TIMED OUT\n\n')
            LogTestFailure(
                'FAILURE: SHARD %d TIMED OUT; %d seconds' % (
                    shard_index, self.timeout))
            break
          if line is self.SHARD_COMPLETED:
            break
          sys.stdout.write(line)
    except:
      sys.stdout.flush()
      print 'CAUGHT EXCEPTION: dumping remaining data:'
      for shard_index in range(self.num_shards_to_run):
        while True:
          try:
            line = self.shard_output[shard_index].get(False)
          except Queue.Empty:
            # Shard timed out, notice failure and move on.
            self.LogShardFailure(shard_index)
            break
          if line is self.SHARD_COMPLETED:
            break
          sys.stdout.write(line)
      raise

  def LogOutputLine(self, index, line):
    """Either prints the shard output line immediately or saves it in the
    output buffer, depending on the settings. Also optionally adds a  prefix.
    """
    # Fix up the index.
    array_index = index - (self.num_shards_to_run * self.slave_index)
    if self.prefix:
      line = "%i>%s" % (index, line)
    if self.original_order:
      sys.stdout.write(line)
    else:
      self.shard_output[array_index].put(line)

  def IncrementTestCount(self):
    """Increments the number of tests run. This is relevant to the
    --retry-percent option.
    """
    self.test_counter.next()

  def ShardIndexCompleted(self, index):
    """Records that a shard has finished so the output from the next shard
    can now be printed.
    """
    # Fix up the index.
    array_index = index - (self.num_shards_to_run * self.slave_index)
    self.shard_output[array_index].put(self.SHARD_COMPLETED)

  def RetryFailedTests(self):
    """Reruns any failed tests serially and prints another summary of the
    results if no more than retry_percent failed.
    """
    num_tests_run = self.test_counter.next()
    if len(self.failed_tests) > self.retry_percent * num_tests_run:
      sys.stdout.write("\nNOT RETRYING FAILED TESTS (too many failed)\n")
      return 1
    self.WriteText(sys.stdout, "\nRETRYING FAILED TESTS:\n", "\x1b[1;5;33m")
    sharded_description = re.compile(r": (?:\d+>)?(.*)")
    gtest_filters = [sharded_description.search(line).group(1)
                     for line in self.failed_tests]
    failed_retries = []

    for test_filter in gtest_filters:
      args = [self.test, "--gtest_filter=" + test_filter]
      # Don't update the xml output files during retry.
      stripped_gtests_args = RemoveGTestOutput(self.gtest_args)
      args.extend(stripped_gtests_args)
      rerun = subprocess.Popen(args)
      rerun.wait()
      if rerun.returncode != 0:
        failed_retries.append(test_filter)

    self.WriteText(sys.stdout, "RETRY RESULTS:\n", "\x1b[1;5;33m")
    self.PrintSummary(failed_retries)
    return len(failed_retries) > 0

  def PrintSummary(self, failed_tests):
    """Prints a summary of the test results.

    If any shards had failing tests, the list is sorted and printed. Then all
    the lines that indicate a test failure are reproduced.
    """
    if failed_tests:
      self.WriteText(sys.stdout, "FAILED TESTS:\n", "\x1b[1;5;31m")
      for line in failed_tests:
        sys.stdout.write(line)
    else:
      self.WriteText(sys.stdout, "ALL TESTS PASSED!\n", "\x1b[1;5;32m")

  def WriteText(self, pipe, text, ansi):
    """Writes the text to the pipe with the ansi escape code, if colored
    output is set, for Unix systems.
    """
    if self.color:
      pipe.write(ansi)
    pipe.write(text)
    if self.color:
      pipe.write("\x1b[m")


def main():
  parser = optparse.OptionParser(usage=SS_USAGE)
  parser.add_option(
      "-n", "--shards_per_core", type="int", default=SS_DEFAULT_SHARDS_PER_CORE,
      help="number of shards to generate per CPU")
  parser.add_option(
      "-r", "--runs_per_core", type="int", default=SS_DEFAULT_RUNS_PER_CORE,
      help="number of shards to run in parallel per CPU")
  parser.add_option(
      "-c", "--color", action="store_true",
      default=sys.platform != "win32" and sys.stdout.isatty(),
      help="force color output, also used by gtest if --gtest_color is not"
      " specified")
  parser.add_option(
      "--no-color", action="store_false", dest="color",
      help="disable color output")
  parser.add_option(
      "-s", "--runshard", type="int", help="single shard index to run")
  parser.add_option(
      "--reorder", action="store_true",
      help="ensure that all output from an earlier shard is printed before"
      " output from a later shard")
  # TODO(charleslee): for backwards compatibility with master.cfg file
  parser.add_option(
      "--original-order", action="store_true",
      help="print shard output in its orginal jumbled order of execution"
      " (useful for debugging flaky tests)")
  parser.add_option(
      "--prefix", action="store_true",
      help="prefix each line of shard output with 'N>', where N is the shard"
      " index (forced True when --original-order is True)")
  parser.add_option(
      "--random-seed", action="store_true",
      help="shuffle the tests with a random seed value")
  parser.add_option(
      "--retry-failed", action="store_true",
      help="retry tests that did not pass serially")
  parser.add_option(
      "--retry-percent", type="int",
      default=SS_DEFAULT_RETRY_PERCENT,
      help="ignore --retry-failed if more than this percent fail [0, 100]"
      " (default = %i)" % SS_DEFAULT_RETRY_PERCENT)
  parser.add_option(
      "-t", "--timeout", type="int", default=SS_DEFAULT_TIMEOUT,
      help="timeout in seconds to wait for a shard (default=%default s)")
  parser.add_option(
      "--total-slaves", type="int", default=1,
      help="if running a subset, number of slaves sharing the test")
  parser.add_option(
      "--slave-index", type="int", default=0,
      help="if running a subset, index of the slave to run tests for")

  parser.disable_interspersed_args()
  (options, args) = parser.parse_args()

  if not args:
    parser.error("You must specify a path to test!")
  if not os.path.exists(args[0]):
    parser.error("%s does not exist!" % args[0])

  num_cores = DetectNumCores()

  if options.shards_per_core < 1:
    parser.error("You must have at least 1 shard per core!")
  num_shards_to_run = num_cores * options.shards_per_core

  if options.runs_per_core < 1:
    parser.error("You must have at least 1 run per core!")
  num_runs = num_cores * options.runs_per_core

  gtest_args = ["--gtest_color=%s" % {
      True: "yes", False: "no"}[options.color]] + args[1:]

  if options.original_order:
    options.prefix = True

  # TODO(charleslee): for backwards compatibility with buildbot's log_parser
  if options.reorder:
    options.original_order = False
    options.prefix = True

  if options.random_seed:
    seed = random.randint(1, 99999)
    gtest_args.extend(["--gtest_shuffle", "--gtest_random_seed=%i" % seed])

  if options.retry_failed:
    if options.retry_percent < 0 or options.retry_percent > 100:
      parser.error("Retry percent must be an integer [0, 100]!")
  else:
    options.retry_percent = -1

  if options.runshard != None:
    # run a single shard and exit
    if (options.runshard < 0 or options.runshard >= num_shards_to_run):
      parser.error("Invalid shard number given parameters!")
    shard = RunShard(
        args[0], num_shards_to_run, options.runshard, gtest_args, None, None)
    shard.communicate()
    return shard.poll()

  # shard and run the whole test
  ss = ShardingSupervisor(
      args[0], num_shards_to_run, num_runs, options.color,
      options.original_order, options.prefix, options.retry_percent,
      options.timeout, options.total_slaves, options.slave_index, gtest_args)
  return ss.ShardTest()


if __name__ == "__main__":
  sys.exit(main())