diff options
author | sbc <sbc@chromium.org> | 2014-11-24 09:25:29 -0800 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2014-11-24 17:25:51 +0000 |
commit | 0cec9d79bb20fa7706eed91c3671ac507ba375e7 (patch) | |
tree | 679ed6278fe47e8bc4df89759dc39428dd970f88 /third_party/pycoverage | |
parent | 698913910433d3d3a47b36360f1c583ff9d92f05 (diff) | |
download | chromium_src-0cec9d79bb20fa7706eed91c3671ac507ba375e7.zip chromium_src-0cec9d79bb20fa7706eed91c3671ac507ba375e7.tar.gz chromium_src-0cec9d79bb20fa7706eed91c3671ac507ba375e7.tar.bz2 |
Add python coverage module to third_party
Also add a top level Makefile to native_client_sdk which
excersises this.
Add exception to checklicenses.py since upstream does
not include license information in their source files.
BUG=435348,434551
Review URL: https://codereview.chromium.org/727003004
Cr-Commit-Position: refs/heads/master@{#305439}
Diffstat (limited to 'third_party/pycoverage')
50 files changed, 9503 insertions, 0 deletions
diff --git a/third_party/pycoverage/AUTHORS.txt b/third_party/pycoverage/AUTHORS.txt new file mode 100644 index 0000000..5ea7e04 --- /dev/null +++ b/third_party/pycoverage/AUTHORS.txt @@ -0,0 +1,43 @@ +Coverage.py was originally written by Gareth Rees, and since 2004 has been +extended and maintained by Ned Batchelder. + +Other contributions have been made by: + +Marc Abramowitz +Chris Adams +Geoff Bache +Julian Berman +Titus Brown +Brett Cannon +Pablo Carballo +Guillaume Chazarain +David Christian +Marcus Cobden +Matthew Desmarais +Danek Duvall +Ben Finney +Martin Fuzzey +Imri Goldberg +Bill Hart +Christian Heimes +Roger Hu +Devin Jeanpierre +Ross Lawley +Edward Loper +Sandra Martocchia +Patrick Mezard +Noel O'Boyle +Detlev Offenbach +JT Olds +George Paci +Catherine Proulx +Brandon Rhodes +Adi Roiban +Greg Rogers +George Song +David Stanek +Joseph Tate +Sigve Tjora +Mark van der Wal +Zooko Wilcox-O'Hearn +Christoph Zwerschke diff --git a/third_party/pycoverage/CHANGES.txt b/third_party/pycoverage/CHANGES.txt new file mode 100644 index 0000000..1e4b888 --- /dev/null +++ b/third_party/pycoverage/CHANGES.txt @@ -0,0 +1,1009 @@ +------------------------------ +Change history for Coverage.py +------------------------------ + +3.7.1 -- 13 December 2013 +------------------------- + +- Improved the speed of HTML report generation by about 20%. + +- Fixed the mechanism for finding OS-installed static files for the HTML report + so that it will actually find OS-installed static files. + + +3.7 --- 6 October 2013 +---------------------- + +- Added the ``--debug`` switch to ``coverage run``. It accepts a list of + options indicating the type of internal activity to log to stderr. + +- Improved the branch coverage facility, fixing `issue 92`_ and `issue 175`_. + +- Running code with ``coverage run -m`` now behaves more like Python does, + setting sys.path properly, which fixes `issue 207`_ and `issue 242`_. + +- Coverage can now run .pyc files directly, closing `issue 264`_. + +- Coverage properly supports .pyw files, fixing `issue 261`_. + +- Omitting files within a tree specified with the ``source`` option would + cause them to be incorrectly marked as unexecuted, as described in + `issue 218`_. This is now fixed. + +- When specifying paths to alias together during data combining, you can now + specify relative paths, fixing `issue 267`_. + +- Most file paths can now be specified with username expansion (``~/src``, or + ``~build/src``, for example), and with environment variable expansion + (``build/$BUILDNUM/src``). + +- Trying to create an XML report with no files to report on, would cause a + ZeroDivideError, but no longer does, fixing `issue 250`_. + +- When running a threaded program under the Python tracer, coverage no longer + issues a spurious warning about the trace function changing: "Trace function + changed, measurement is likely wrong: None." This fixes `issue 164`_. + +- Static files necessary for HTML reports are found in system-installed places, + to ease OS-level packaging of coverage.py. Closes `issue 259`_. + +- Source files with encoding declarations, but a blank first line, were not + decoded properly. Now they are. Thanks, Roger Hu. + +- The source kit now includes the ``__main__.py`` file in the root coverage + directory, fixing `issue 255`_. + +.. _issue 92: https://bitbucket.org/ned/coveragepy/issue/92/finally-clauses-arent-treated-properly-in +.. _issue 164: https://bitbucket.org/ned/coveragepy/issue/164/trace-function-changed-warning-when-using +.. _issue 175: https://bitbucket.org/ned/coveragepy/issue/175/branch-coverage-gets-confused-in-certain +.. _issue 207: https://bitbucket.org/ned/coveragepy/issue/207/run-m-cannot-find-module-or-package-in +.. _issue 242: https://bitbucket.org/ned/coveragepy/issue/242/running-a-two-level-package-doesnt-work +.. _issue 218: https://bitbucket.org/ned/coveragepy/issue/218/run-command-does-not-respect-the-omit-flag +.. _issue 250: https://bitbucket.org/ned/coveragepy/issue/250/uncaught-zerodivisionerror-when-generating +.. _issue 255: https://bitbucket.org/ned/coveragepy/issue/255/directory-level-__main__py-not-included-in +.. _issue 259: https://bitbucket.org/ned/coveragepy/issue/259/allow-use-of-system-installed-third-party +.. _issue 261: https://bitbucket.org/ned/coveragepy/issue/261/pyw-files-arent-reported-properly +.. _issue 264: https://bitbucket.org/ned/coveragepy/issue/264/coverage-wont-run-pyc-files +.. _issue 267: https://bitbucket.org/ned/coveragepy/issue/267/relative-path-aliases-dont-work + + +Version 3.6 --- 5 January 2013 +------------------------------ + +- Added a page to the docs about troublesome situations, closing `issue 226`_, + and added some info to the TODO file, closing `issue 227`_. + +.. _issue 226: https://bitbucket.org/ned/coveragepy/issue/226/make-readme-section-to-describe-when +.. _issue 227: https://bitbucket.org/ned/coveragepy/issue/227/update-todo + + +Version 3.6b3 --- 29 December 2012 +---------------------------------- + +- Beta 2 broke the nose plugin. It's fixed again, closing `issue 224`_. + +.. _issue 224: https://bitbucket.org/ned/coveragepy/issue/224/36b2-breaks-nosexcover + + +Version 3.6b2 --- 23 December 2012 +---------------------------------- + +- Coverage.py runs on Python 2.3 and 2.4 again. It was broken in 3.6b1. + +- The C extension is optionally compiled using a different more widely-used + technique, taking another stab at fixing `issue 80`_ once and for all. + +- Combining data files would create entries for phantom files if used with + ``source`` and path aliases. It no longer does. + +- ``debug sys`` now shows the configuration file path that was read. + +- If an oddly-behaved package claims that code came from an empty-string + filename, coverage.py no longer associates it with the directory name, + fixing `issue 221`_. + +.. _issue 80: https://bitbucket.org/ned/coveragepy/issue/80/is-there-a-duck-typing-way-to-know-we-cant +.. _issue 221: https://bitbucket.org/ned/coveragepy/issue/221/coveragepy-incompatible-with-pyratemp + + +Version 3.6b1 --- 28 November 2012 +---------------------------------- + +- Wildcards in ``include=`` and ``omit=`` arguments were not handled properly + in reporting functions, though they were when running. Now they are handled + uniformly, closing `issue 143`_ and `issue 163`_. **NOTE**: it is possible + that your configurations may now be incorrect. If you use ``include`` or + ``omit`` during reporting, whether on the command line, through the API, or + in a configuration file, please check carefully that you were not relying on + the old broken behavior. + +- The **report**, **html**, and **xml** commands now accept a ``--fail-under`` + switch that indicates in the exit status whether the coverage percentage was + less than a particular value. Closes `issue 139`_. + +- The reporting functions coverage.report(), coverage.html_report(), and + coverage.xml_report() now all return a float, the total percentage covered + measurement. + +- The HTML report's title can now be set in the configuration file, with the + ``--title`` switch on the command line, or via the API. + +- Configuration files now support substitution of environment variables, using + syntax like ``${WORD}``. Closes `issue 97`_. + +- Embarrassingly, the `[xml] output=` setting in the .coveragerc file simply + didn't work. Now it does. + +- The XML report now consistently uses filenames for the filename attribute, + rather than sometimes using module names. Fixes `issue 67`_. + Thanks, Marcus Cobden. + +- Coverage percentage metrics are now computed slightly differently under + branch coverage. This means that completely unexecuted files will now + correctly have 0% coverage, fixing `issue 156`_. This also means that your + total coverage numbers will generally now be lower if you are measuring + branch coverage. + +- When installing, now in addition to creating a "coverage" command, two new + aliases are also installed. A "coverage2" or "coverage3" command will be + created, depending on whether you are installing in Python 2.x or 3.x. + A "coverage-X.Y" command will also be created corresponding to your specific + version of Python. Closes `issue 111`_. + +- The coverage.py installer no longer tries to bootstrap setuptools or + Distribute. You must have one of them installed first, as `issue 202`_ + recommended. + +- The coverage.py kit now includes docs (closing `issue 137`_) and tests. + +- On Windows, files are now reported in their correct case, fixing `issue 89`_ + and `issue 203`_. + +- If a file is missing during reporting, the path shown in the error message + is now correct, rather than an incorrect path in the current directory. + Fixes `issue 60`_. + +- Running an HTML report in Python 3 in the same directory as an old Python 2 + HTML report would fail with a UnicodeDecodeError. This issue (`issue 193`_) + is now fixed. + +- Fixed yet another error trying to parse non-Python files as Python, this + time an IndentationError, closing `issue 82`_ for the fourth time... + +- If `coverage xml` fails because there is no data to report, it used to + create a zero-length XML file. Now it doesn't, fixing `issue 210`_. + +- Jython files now work with the ``--source`` option, fixing `issue 100`_. + +- Running coverage under a debugger is unlikely to work, but it shouldn't fail + with "TypeError: 'NoneType' object is not iterable". Fixes `issue 201`_. + +- On some Linux distributions, when installed with the OS package manager, + coverage.py would report its own code as part of the results. Now it won't, + fixing `issue 214`_, though this will take some time to be repackaged by the + operating systems. + +- Docstrings for the legacy singleton methods are more helpful. Thanks Marius + Gedminas. Closes `issue 205`_. + +- The pydoc tool can now show docmentation for the class `coverage.coverage`. + Closes `issue 206`_. + +- Added a page to the docs about contributing to coverage.py, closing + `issue 171`_. + +- When coverage.py ended unsuccessfully, it may have reported odd errors like + ``'NoneType' object has no attribute 'isabs'``. It no longer does, + so kiss `issue 153`_ goodbye. + +.. _issue 60: https://bitbucket.org/ned/coveragepy/issue/60/incorrect-path-to-orphaned-pyc-files +.. _issue 67: https://bitbucket.org/ned/coveragepy/issue/67/xml-report-filenames-may-be-generated +.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report +.. _issue 89: https://bitbucket.org/ned/coveragepy/issue/89/on-windows-all-packages-are-reported-in +.. _issue 97: https://bitbucket.org/ned/coveragepy/issue/97/allow-environment-variables-to-be +.. _issue 100: https://bitbucket.org/ned/coveragepy/issue/100/source-directive-doesnt-work-for-packages +.. _issue 111: https://bitbucket.org/ned/coveragepy/issue/111/when-installing-coverage-with-pip-not +.. _issue 137: https://bitbucket.org/ned/coveragepy/issue/137/provide-docs-with-source-distribution +.. _issue 139: https://bitbucket.org/ned/coveragepy/issue/139/easy-check-for-a-certain-coverage-in-tests +.. _issue 143: https://bitbucket.org/ned/coveragepy/issue/143/omit-doesnt-seem-to-work-in-coverage +.. _issue 153: https://bitbucket.org/ned/coveragepy/issue/153/non-existent-filename-triggers +.. _issue 156: https://bitbucket.org/ned/coveragepy/issue/156/a-completely-unexecuted-file-shows-14 +.. _issue 163: https://bitbucket.org/ned/coveragepy/issue/163/problem-with-include-and-omit-filename +.. _issue 171: https://bitbucket.org/ned/coveragepy/issue/171/how-to-contribute-and-run-tests +.. _issue 193: https://bitbucket.org/ned/coveragepy/issue/193/unicodedecodeerror-on-htmlpy +.. _issue 201: https://bitbucket.org/ned/coveragepy/issue/201/coverage-using-django-14-with-pydb-on +.. _issue 202: https://bitbucket.org/ned/coveragepy/issue/202/get-rid-of-ez_setuppy-and +.. _issue 203: https://bitbucket.org/ned/coveragepy/issue/203/duplicate-filenames-reported-when-filename +.. _issue 205: https://bitbucket.org/ned/coveragepy/issue/205/make-pydoc-coverage-more-friendly +.. _issue 206: https://bitbucket.org/ned/coveragepy/issue/206/pydoc-coveragecoverage-fails-with-an-error +.. _issue 210: https://bitbucket.org/ned/coveragepy/issue/210/if-theres-no-coverage-data-coverage-xml +.. _issue 214: https://bitbucket.org/ned/coveragepy/issue/214/coveragepy-measures-itself-on-precise + + +Version 3.5.3 --- 29 September 2012 +----------------------------------- + +- Line numbers in the HTML report line up better with the source lines, fixing + `issue 197`_, thanks Marius Gedminas. + +- When specifying a directory as the source= option, the directory itself no + longer needs to have a ``__init__.py`` file, though its subdirectories do, to + be considered as source files. + +- Files encoded as UTF-8 with a BOM are now properly handled, fixing + `issue 179`_. Thanks, Pablo Carballo. + +- Fixed more cases of non-Python files being reported as Python source, and + then not being able to parse them as Python. Closes `issue 82`_ (again). + Thanks, Julian Berman. + +- Fixed memory leaks under Python 3, thanks, Brett Cannon. Closes `issue 147`_. + +- Optimized .pyo files may not have been handled correctly, `issue 195`_. + Thanks, Marius Gedminas. + +- Certain unusually named file paths could have been mangled during reporting, + `issue 194`_. Thanks, Marius Gedminas. + +- Try to do a better job of the impossible task of detecting when we can't + build the C extension, fixing `issue 183`_. + +- Testing is now done with `tox`_, thanks, Marc Abramowitz. + +.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report +.. _issue 147: https://bitbucket.org/ned/coveragepy/issue/147/massive-memory-usage-by-ctracer +.. _issue 179: https://bitbucket.org/ned/coveragepy/issue/179/htmlreporter-fails-when-source-file-is +.. _issue 183: https://bitbucket.org/ned/coveragepy/issue/183/install-fails-for-python-23 +.. _issue 194: https://bitbucket.org/ned/coveragepy/issue/194/filelocatorrelative_filename-could-mangle +.. _issue 195: https://bitbucket.org/ned/coveragepy/issue/195/pyo-file-handling-in-codeunit +.. _issue 197: https://bitbucket.org/ned/coveragepy/issue/197/line-numbers-in-html-report-do-not-align +.. _tox: http://tox.readthedocs.org/ + + + +Version 3.5.2 --- 4 May 2012 +---------------------------- + +No changes since 3.5.2.b1 + + +Version 3.5.2b1 --- 29 April 2012 +--------------------------------- + +- The HTML report has slightly tweaked controls: the buttons at the top of + the page are color-coded to the source lines they affect. + +- Custom CSS can be applied to the HTML report by specifying a CSS file as + the extra_css configuration value in the [html] section. + +- Source files with custom encodings declared in a comment at the top are now + properly handled during reporting on Python 2. Python 3 always handled them + properly. This fixes `issue 157`_. + +- Backup files left behind by editors are no longer collected by the source= + option, fixing `issue 168`_. + +- If a file doesn't parse properly as Python, we don't report it as an error + if the filename seems like maybe it wasn't meant to be Python. This is a + pragmatic fix for `issue 82`_. + +- The ``-m`` switch on ``coverage report``, which includes missing line numbers + in the summary report, can now be specifed as ``show_missing`` in the + config file. Closes `issue 173`_. + +- When running a module with ``coverage run -m <modulename>``, certain details + of the execution environment weren't the same as for + ``python -m <modulename>``. This had the unfortunate side-effect of making + ``coverage run -m unittest discover`` not work if you had tests in a + directory named "test". This fixes `issue 155`_ and `issue 142`_. + +- Now the exit status of your product code is properly used as the process + status when running ``python -m coverage run ...``. Thanks, JT Olds. + +- When installing into pypy, we no longer attempt (and fail) to compile + the C tracer function, closing `issue 166`_. + +.. _issue 82: https://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report +.. _issue 142: https://bitbucket.org/ned/coveragepy/issue/142/executing-python-file-syspath-is-replaced +.. _issue 155: https://bitbucket.org/ned/coveragepy/issue/155/cant-use-coverage-run-m-unittest-discover +.. _issue 157: https://bitbucket.org/ned/coveragepy/issue/157/chokes-on-source-files-with-non-utf-8 +.. _issue 166: https://bitbucket.org/ned/coveragepy/issue/166/dont-try-to-compile-c-extension-on-pypy +.. _issue 168: https://bitbucket.org/ned/coveragepy/issue/168/dont-be-alarmed-by-emacs-droppings +.. _issue 173: https://bitbucket.org/ned/coveragepy/issue/173/theres-no-way-to-specify-show-missing-in + + +Version 3.5.1 --- 23 September 2011 +----------------------------------- + +- The ``[paths]`` feature unfortunately didn't work in real world situations + where you wanted to, you know, report on the combined data. Now all paths + stored in the combined file are canonicalized properly. + + +Version 3.5.1b1 --- 28 August 2011 +---------------------------------- + +- When combining data files from parallel runs, you can now instruct coverage + about which directories are equivalent on different machines. A ``[paths]`` + section in the configuration file lists paths that are to be considered + equivalent. Finishes `issue 17`_. + +- for-else constructs are understood better, and don't cause erroneous partial + branch warnings. Fixes `issue 122`_. + +- Branch coverage for ``with`` statements is improved, fixing `issue 128`_. + +- The number of partial branches reported on the HTML summary page was + different than the number reported on the individual file pages. This is + now fixed. + +- An explicit include directive to measure files in the Python installation + wouldn't work because of the standard library exclusion. Now the include + directive takes precendence, and the files will be measured. Fixes + `issue 138`_. + +- The HTML report now handles Unicode characters in Python source files + properly. This fixes `issue 124`_ and `issue 144`_. Thanks, Devin + Jeanpierre. + +- In order to help the core developers measure the test coverage of the + standard library, Brandon Rhodes devised an aggressive hack to trick Python + into running some coverage code before anything else in the process. + See the coverage/fullcoverage directory if you are interested. + +.. _issue 17: http://bitbucket.org/ned/coveragepy/issue/17/support-combining-coverage-data-from +.. _issue 122: http://bitbucket.org/ned/coveragepy/issue/122/for-else-always-reports-missing-branch +.. _issue 124: http://bitbucket.org/ned/coveragepy/issue/124/no-arbitrary-unicode-in-html-reports-in +.. _issue 128: http://bitbucket.org/ned/coveragepy/issue/128/branch-coverage-of-with-statement-in-27 +.. _issue 138: http://bitbucket.org/ned/coveragepy/issue/138/include-should-take-precedence-over-is +.. _issue 144: http://bitbucket.org/ned/coveragepy/issue/144/failure-generating-html-output-for + + +Version 3.5 --- 29 June 2011 +---------------------------- + +- The HTML report hotkeys now behave slightly differently when the current + chunk isn't visible at all: a chunk on the screen will be selected, + instead of the old behavior of jumping to the literal next chunk. + The hotkeys now work in Google Chrome. Thanks, Guido van Rossum. + + +Version 3.5b1 --- 5 June 2011 +----------------------------- + +- The HTML report now has hotkeys. Try ``n``, ``s``, ``m``, ``x``, ``b``, + ``p``, and ``c`` on the overview page to change the column sorting. + On a file page, ``r``, ``m``, ``x``, and ``p`` toggle the run, missing, + excluded, and partial line markings. You can navigate the highlighted + sections of code by using the ``j`` and ``k`` keys for next and previous. + The ``1`` (one) key jumps to the first highlighted section in the file, + and ``0`` (zero) scrolls to the top of the file. + +- The ``--omit`` and ``--include`` switches now interpret their values more + usefully. If the value starts with a wildcard character, it is used as-is. + If it does not, it is interpreted relative to the current directory. + Closes `issue 121`_. + +- Partial branch warnings can now be pragma'd away. The configuration option + ``partial_branches`` is a list of regular expressions. Lines matching any of + those expressions will never be marked as a partial branch. In addition, + there's a built-in list of regular expressions marking statements which should + never be marked as partial. This list includes ``while True:``, ``while 1:``, + ``if 1:``, and ``if 0:``. + +- The ``coverage()`` constructor accepts single strings for the ``omit=`` and + ``include=`` arguments, adapting to a common error in programmatic use. + +- Modules can now be run directly using ``coverage run -m modulename``, to + mirror Python's ``-m`` flag. Closes `issue 95`_, thanks, Brandon Rhodes. + +- ``coverage run`` didn't emulate Python accurately in one small detail: the + current directory inserted into ``sys.path`` was relative rather than + absolute. This is now fixed. + +- HTML reporting is now incremental: a record is kept of the data that + produced the HTML reports, and only files whose data has changed will + be generated. This should make most HTML reporting faster. + +- Pathological code execution could disable the trace function behind our + backs, leading to incorrect code measurement. Now if this happens, + coverage.py will issue a warning, at least alerting you to the problem. + Closes `issue 93`_. Thanks to Marius Gedminas for the idea. + +- The C-based trace function now behaves properly when saved and restored + with ``sys.gettrace()`` and ``sys.settrace()``. This fixes `issue 125`_ + and `issue 123`_. Thanks, Devin Jeanpierre. + +- Source files are now opened with Python 3.2's ``tokenize.open()`` where + possible, to get the best handling of Python source files with encodings. + Closes `issue 107`_, thanks, Brett Cannon. + +- Syntax errors in supposed Python files can now be ignored during reporting + with the ``-i`` switch just like other source errors. Closes `issue 115`_. + +- Installation from source now succeeds on machines without a C compiler, + closing `issue 80`_. + +- Coverage.py can now be run directly from a working tree by specifying + the directory name to python: ``python coverage_py_working_dir run ...``. + Thanks, Brett Cannon. + +- A little bit of Jython support: `coverage run` can now measure Jython + execution by adapting when $py.class files are traced. Thanks, Adi Roiban. + Jython still doesn't provide the Python libraries needed to make + coverage reporting work, unfortunately. + +- Internally, files are now closed explicitly, fixing `issue 104`_. Thanks, + Brett Cannon. + +.. _issue 80: https://bitbucket.org/ned/coveragepy/issue/80/is-there-a-duck-typing-way-to-know-we-cant +.. _issue 93: http://bitbucket.org/ned/coveragepy/issue/93/copying-a-mock-object-breaks-coverage +.. _issue 95: https://bitbucket.org/ned/coveragepy/issue/95/run-subcommand-should-take-a-module-name +.. _issue 104: https://bitbucket.org/ned/coveragepy/issue/104/explicitly-close-files +.. _issue 107: https://bitbucket.org/ned/coveragepy/issue/107/codeparser-not-opening-source-files-with +.. _issue 115: https://bitbucket.org/ned/coveragepy/issue/115/fail-gracefully-when-reporting-on-file +.. _issue 121: https://bitbucket.org/ned/coveragepy/issue/121/filename-patterns-are-applied-stupidly +.. _issue 123: https://bitbucket.org/ned/coveragepy/issue/123/pyeval_settrace-used-in-way-that-breaks +.. _issue 125: https://bitbucket.org/ned/coveragepy/issue/125/coverage-removes-decoratortoolss-tracing + + +Version 3.4 --- 19 September 2010 +--------------------------------- + +- The XML report is now sorted by package name, fixing `issue 88`_. + +- Programs that exited with ``sys.exit()`` with no argument weren't handled + properly, producing a coverage.py stack trace. That is now fixed. + +.. _issue 88: http://bitbucket.org/ned/coveragepy/issue/88/xml-report-lists-packages-in-random-order + + +Version 3.4b2 --- 6 September 2010 +---------------------------------- + +- Completely unexecuted files can now be included in coverage results, reported + as 0% covered. This only happens if the --source option is specified, since + coverage.py needs guidance about where to look for source files. + +- The XML report output now properly includes a percentage for branch coverage, + fixing `issue 65`_ and `issue 81`_. + +- Coverage percentages are now displayed uniformly across reporting methods. + Previously, different reports could round percentages differently. Also, + percentages are only reported as 0% or 100% if they are truly 0 or 100, and + are rounded otherwise. Fixes `issue 41`_ and `issue 70`_. + +- The precision of reported coverage percentages can be set with the + ``[report] precision`` config file setting. Completes `issue 16`_. + +- Threads derived from ``threading.Thread`` with an overridden `run` method + would report no coverage for the `run` method. This is now fixed, closing + `issue 85`_. + +.. _issue 16: http://bitbucket.org/ned/coveragepy/issue/16/allow-configuration-of-accuracy-of-percentage-totals +.. _issue 41: http://bitbucket.org/ned/coveragepy/issue/41/report-says-100-when-it-isnt-quite-there +.. _issue 65: http://bitbucket.org/ned/coveragepy/issue/65/branch-option-not-reported-in-cobertura +.. _issue 70: http://bitbucket.org/ned/coveragepy/issue/70/text-report-and-html-report-disagree-on-coverage +.. _issue 81: http://bitbucket.org/ned/coveragepy/issue/81/xml-report-does-not-have-condition-coverage-attribute-for-lines-with-a +.. _issue 85: http://bitbucket.org/ned/coveragepy/issue/85/threadrun-isnt-measured + + +Version 3.4b1 --- 21 August 2010 +-------------------------------- + +- BACKWARD INCOMPATIBILITY: the ``--omit`` and ``--include`` switches now take + file patterns rather than file prefixes, closing `issue 34`_ and `issue 36`_. + +- BACKWARD INCOMPATIBILITY: the `omit_prefixes` argument is gone throughout + coverage.py, replaced with `omit`, a list of filename patterns suitable for + `fnmatch`. A parallel argument `include` controls what files are included. + +- The run command now has a ``--source`` switch, a list of directories or + module names. If provided, coverage.py will only measure execution in those + source files. + +- Various warnings are printed to stderr for problems encountered during data + measurement: if a ``--source`` module has no Python source to measure, or is + never encountered at all, or if no data is collected. + +- The reporting commands (report, annotate, html, and xml) now have an + ``--include`` switch to restrict reporting to modules matching those file + patterns, similar to the existing ``--omit`` switch. Thanks, Zooko. + +- The run command now supports ``--include`` and ``--omit`` to control what + modules it measures. This can speed execution and reduce the amount of data + during reporting. Thanks Zooko. + +- Since coverage.py 3.1, using the Python trace function has been slower than + it needs to be. A cache of tracing decisions was broken, but has now been + fixed. + +- Python 2.7 and 3.2 have introduced new opcodes that are now supported. + +- Python files with no statements, for example, empty ``__init__.py`` files, + are now reported as having zero statements instead of one. Fixes `issue 1`_. + +- Reports now have a column of missed line counts rather than executed line + counts, since developers should focus on reducing the missed lines to zero, + rather than increasing the executed lines to varying targets. Once + suggested, this seemed blindingly obvious. + +- Line numbers in HTML source pages are clickable, linking directly to that + line, which is highlighted on arrival. Added a link back to the index page + at the bottom of each HTML page. + +- Programs that call ``os.fork`` will properly collect data from both the child + and parent processes. Use ``coverage run -p`` to get two data files that can + be combined with ``coverage combine``. Fixes `issue 56`_. + +- Coverage is now runnable as a module: ``python -m coverage``. Thanks, + Brett Cannon. + +- When measuring code running in a virtualenv, most of the system library was + being measured when it shouldn't have been. This is now fixed. + +- Doctest text files are no longer recorded in the coverage data, since they + can't be reported anyway. Fixes `issue 52`_ and `issue 61`_. + +- Jinja HTML templates compile into Python code using the HTML filename, + which confused coverage.py. Now these files are no longer traced, fixing + `issue 82`_. + +- Source files can have more than one dot in them (foo.test.py), and will be + treated properly while reporting. Fixes `issue 46`_. + +- Source files with DOS line endings are now properly tokenized for syntax + coloring on non-DOS machines. Fixes `issue 53`_. + +- Unusual code structure that confused exits from methods with exits from + classes is now properly analyzed. See `issue 62`_. + +- Asking for an HTML report with no files now shows a nice error message rather + than a cryptic failure ('int' object is unsubscriptable). Fixes `issue 59`_. + +.. _issue 1: http://bitbucket.org/ned/coveragepy/issue/1/empty-__init__py-files-are-reported-as-1-executable +.. _issue 34: http://bitbucket.org/ned/coveragepy/issue/34/enhanced-omit-globbing-handling +.. _issue 36: http://bitbucket.org/ned/coveragepy/issue/36/provide-regex-style-omit +.. _issue 46: http://bitbucket.org/ned/coveragepy/issue/46 +.. _issue 53: http://bitbucket.org/ned/coveragepy/issue/53 +.. _issue 52: http://bitbucket.org/ned/coveragepy/issue/52/doctesttestfile-confuses-source-detection +.. _issue 56: http://bitbucket.org/ned/coveragepy/issue/56 +.. _issue 61: http://bitbucket.org/ned/coveragepy/issue/61/annotate-i-doesnt-work +.. _issue 62: http://bitbucket.org/ned/coveragepy/issue/62 +.. _issue 59: http://bitbucket.org/ned/coveragepy/issue/59/html-report-fails-with-int-object-is +.. _issue 82: http://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report + + +Version 3.3.1 --- 6 March 2010 +------------------------------ + +- Using `parallel=True` in .coveragerc file prevented reporting, but now does + not, fixing `issue 49`_. + +- When running your code with "coverage run", if you call `sys.exit()`, + coverage.py will exit with that status code, fixing `issue 50`_. + +.. _issue 49: http://bitbucket.org/ned/coveragepy/issue/49 +.. _issue 50: http://bitbucket.org/ned/coveragepy/issue/50 + + +Version 3.3 --- 24 February 2010 +-------------------------------- + +- Settings are now read from a .coveragerc file. A specific file can be + specified on the command line with --rcfile=FILE. The name of the file can + be programmatically set with the `config_file` argument to the coverage() + constructor, or reading a config file can be disabled with + `config_file=False`. + +- Fixed a problem with nested loops having their branch possibilities + mischaracterized: `issue 39`_. + +- Added coverage.process_start to enable coverage measurement when Python + starts. + +- Parallel data file names now have a random number appended to them in + addition to the machine name and process id. + +- Parallel data files combined with "coverage combine" are deleted after + they're combined, to clean up unneeded files. Fixes `issue 40`_. + +- Exceptions thrown from product code run with "coverage run" are now displayed + without internal coverage.py frames, so the output is the same as when the + code is run without coverage.py. + +- The `data_suffix` argument to the coverage constructor is now appended with + an added dot rather than simply appended, so that .coveragerc files will not + be confused for data files. + +- Python source files that don't end with a newline can now be executed, fixing + `issue 47`_. + +- Added an AUTHORS.txt file. + +.. _issue 39: http://bitbucket.org/ned/coveragepy/issue/39 +.. _issue 40: http://bitbucket.org/ned/coveragepy/issue/40 +.. _issue 47: http://bitbucket.org/ned/coveragepy/issue/47 + + +Version 3.2 --- 5 December 2009 +------------------------------- + +- Added a ``--version`` option on the command line. + + +Version 3.2b4 --- 1 December 2009 +--------------------------------- + +- Branch coverage improvements: + + - The XML report now includes branch information. + +- Click-to-sort HTML report columns are now persisted in a cookie. Viewing + a report will sort it first the way you last had a coverage report sorted. + Thanks, `Chris Adams`_. + +- On Python 3.x, setuptools has been replaced by `Distribute`_. + +.. _Distribute: http://packages.python.org/distribute/ + + +Version 3.2b3 --- 23 November 2009 +---------------------------------- + +- Fixed a memory leak in the C tracer that was introduced in 3.2b1. + +- Branch coverage improvements: + + - Branches to excluded code are ignored. + +- The table of contents in the HTML report is now sortable: click the headers + on any column. Thanks, `Chris Adams`_. + +.. _Chris Adams: http://improbable.org/chris/ + + +Version 3.2b2 --- 19 November 2009 +---------------------------------- + +- Branch coverage improvements: + + - Classes are no longer incorrectly marked as branches: `issue 32`_. + + - "except" clauses with types are no longer incorrectly marked as branches: + `issue 35`_. + +- Fixed some problems syntax coloring sources with line continuations and + source with tabs: `issue 30`_ and `issue 31`_. + +- The --omit option now works much better than before, fixing `issue 14`_ and + `issue 33`_. Thanks, Danek Duvall. + +.. _issue 14: http://bitbucket.org/ned/coveragepy/issue/14 +.. _issue 30: http://bitbucket.org/ned/coveragepy/issue/30 +.. _issue 31: http://bitbucket.org/ned/coveragepy/issue/31 +.. _issue 32: http://bitbucket.org/ned/coveragepy/issue/32 +.. _issue 33: http://bitbucket.org/ned/coveragepy/issue/33 +.. _issue 35: http://bitbucket.org/ned/coveragepy/issue/35 + + +Version 3.2b1 --- 10 November 2009 +---------------------------------- + +- Branch coverage! + +- XML reporting has file paths that let Cobertura find the source code. + +- The tracer code has changed, it's a few percent faster. + +- Some exceptions reported by the command line interface have been cleaned up + so that tracebacks inside coverage.py aren't shown. Fixes `issue 23`_. + +.. _issue 23: http://bitbucket.org/ned/coveragepy/issue/23 + + +Version 3.1 --- 4 October 2009 +------------------------------ + +- Source code can now be read from eggs. Thanks, Ross Lawley. Fixes + `issue 25`_. + +.. _issue 25: http://bitbucket.org/ned/coveragepy/issue/25 + + +Version 3.1b1 --- 27 September 2009 +----------------------------------- + +- Python 3.1 is now supported. + +- Coverage.py has a new command line syntax with sub-commands. This expands + the possibilities for adding features and options in the future. The old + syntax is still supported. Try "coverage help" to see the new commands. + Thanks to Ben Finney for early help. + +- Added an experimental "coverage xml" command for producing coverage reports + in a Cobertura-compatible XML format. Thanks, Bill Hart. + +- Added the --timid option to enable a simpler slower trace function that works + for DecoratorTools projects, including TurboGears. Fixed `issue 12`_ and + `issue 13`_. + +- HTML reports show modules from other directories. Fixed `issue 11`_. + +- HTML reports now display syntax-colored Python source. + +- Programs that change directory will still write .coverage files in the + directory where execution started. Fixed `issue 24`_. + +- Added a "coverage debug" command for getting diagnostic information about the + coverage.py installation. + +.. _issue 11: http://bitbucket.org/ned/coveragepy/issue/11 +.. _issue 12: http://bitbucket.org/ned/coveragepy/issue/12 +.. _issue 13: http://bitbucket.org/ned/coveragepy/issue/13 +.. _issue 24: http://bitbucket.org/ned/coveragepy/issue/24 + + +Version 3.0.1 --- 7 July 2009 +----------------------------- + +- Removed the recursion limit in the tracer function. Previously, code that + ran more than 500 frames deep would crash. Fixed `issue 9`_. + +- Fixed a bizarre problem involving pyexpat, whereby lines following XML parser + invocations could be overlooked. Fixed `issue 10`_. + +- On Python 2.3, coverage.py could mis-measure code with exceptions being + raised. This is now fixed. + +- The coverage.py code itself will now not be measured by coverage.py, and no + coverage modules will be mentioned in the nose --with-cover plug-in. Fixed + `issue 8`_. + +- When running source files, coverage.py now opens them in universal newline + mode just like Python does. This lets it run Windows files on Mac, for + example. + +.. _issue 9: http://bitbucket.org/ned/coveragepy/issue/9 +.. _issue 10: http://bitbucket.org/ned/coveragepy/issue/10 +.. _issue 8: http://bitbucket.org/ned/coveragepy/issue/8 + + +Version 3.0 --- 13 June 2009 +---------------------------- + +- Fixed the way the Python library was ignored. Too much code was being + excluded the old way. + +- Tabs are now properly converted in HTML reports. Previously indentation was + lost. Fixed `issue 6`_. + +- Nested modules now get a proper flat_rootname. Thanks, Christian Heimes. + +.. _issue 6: http://bitbucket.org/ned/coveragepy/issue/6 + + +Version 3.0b3 --- 16 May 2009 +----------------------------- + +- Added parameters to coverage.__init__ for options that had been set on the + coverage object itself. + +- Added clear_exclude() and get_exclude_list() methods for programmatic + manipulation of the exclude regexes. + +- Added coverage.load() to read previously-saved data from the data file. + +- Improved the finding of code files. For example, .pyc files that have been + installed after compiling are now located correctly. Thanks, Detlev + Offenbach. + +- When using the object API (that is, constructing a coverage() object), data + is no longer saved automatically on process exit. You can re-enable it with + the auto_data=True parameter on the coverage() constructor. The module-level + interface still uses automatic saving. + + +Version 3.0b --- 30 April 2009 +------------------------------ + +HTML reporting, and continued refactoring. + +- HTML reports and annotation of source files: use the new -b (browser) switch. + Thanks to George Song for code, inspiration and guidance. + +- Code in the Python standard library is not measured by default. If you need + to measure standard library code, use the -L command-line switch during + execution, or the cover_pylib=True argument to the coverage() constructor. + +- Source annotation into a directory (-a -d) behaves differently. The + annotated files are named with their hierarchy flattened so that same-named + files from different directories no longer collide. Also, only files in the + current tree are included. + +- coverage.annotate_file is no longer available. + +- Programs executed with -x now behave more as they should, for example, + __file__ has the correct value. + +- .coverage data files have a new pickle-based format designed for better + extensibility. + +- Removed the undocumented cache_file argument to coverage.usecache(). + + +Version 3.0b1 --- 7 March 2009 +------------------------------ + +Major overhaul. + +- Coverage is now a package rather than a module. Functionality has been split + into classes. + +- The trace function is implemented in C for speed. Coverage runs are now + much faster. Thanks to David Christian for productive micro-sprints and + other encouragement. + +- Executable lines are identified by reading the line number tables in the + compiled code, removing a great deal of complicated analysis code. + +- Precisely which lines are considered executable has changed in some cases. + Therefore, your coverage stats may also change slightly. + +- The singleton coverage object is only created if the module-level functions + are used. This maintains the old interface while allowing better + programmatic use of Coverage. + +- The minimum supported Python version is 2.3. + + +Version 2.85 --- 14 September 2008 +---------------------------------- + +- Add support for finding source files in eggs. Don't check for + morf's being instances of ModuleType, instead use duck typing so that + pseudo-modules can participate. Thanks, Imri Goldberg. + +- Use os.realpath as part of the fixing of file names so that symlinks won't + confuse things. Thanks, Patrick Mezard. + + +Version 2.80 --- 25 May 2008 +---------------------------- + +- Open files in rU mode to avoid line ending craziness. Thanks, Edward Loper. + + +Version 2.78 --- 30 September 2007 +---------------------------------- + +- Don't try to predict whether a file is Python source based on the extension. + Extension-less files are often Pythons scripts. Instead, simply parse the file + and catch the syntax errors. Hat tip to Ben Finney. + + +Version 2.77 --- 29 July 2007 +----------------------------- + +- Better packaging. + + +Version 2.76 --- 23 July 2007 +----------------------------- + +- Now Python 2.5 is *really* fully supported: the body of the new with + statement is counted as executable. + + +Version 2.75 --- 22 July 2007 +----------------------------- + +- Python 2.5 now fully supported. The method of dealing with multi-line + statements is now less sensitive to the exact line that Python reports during + execution. Pass statements are handled specially so that their disappearance + during execution won't throw off the measurement. + + +Version 2.7 --- 21 July 2007 +---------------------------- + +- "#pragma: nocover" is excluded by default. + +- Properly ignore docstrings and other constant expressions that appear in the + middle of a function, a problem reported by Tim Leslie. + +- coverage.erase() shouldn't clobber the exclude regex. Change how parallel + mode is invoked, and fix erase() so that it erases the cache when called + programmatically. + +- In reports, ignore code executed from strings, since we can't do anything + useful with it anyway. + +- Better file handling on Linux, thanks Guillaume Chazarain. + +- Better shell support on Windows, thanks Noel O'Boyle. + +- Python 2.2 support maintained, thanks Catherine Proulx. + +- Minor changes to avoid lint warnings. + + +Version 2.6 --- 23 August 2006 +------------------------------ + +- Applied Joseph Tate's patch for function decorators. + +- Applied Sigve Tjora and Mark van der Wal's fixes for argument handling. + +- Applied Geoff Bache's parallel mode patch. + +- Refactorings to improve testability. Fixes to command-line logic for parallel + mode and collect. + + +Version 2.5 --- 4 December 2005 +------------------------------- + +- Call threading.settrace so that all threads are measured. Thanks Martin + Fuzzey. + +- Add a file argument to report so that reports can be captured to a different + destination. + +- coverage.py can now measure itself. + +- Adapted Greg Rogers' patch for using relative file names, and sorting and + omitting files to report on. + + +Version 2.2 --- 31 December 2004 +-------------------------------- + +- Allow for keyword arguments in the module global functions. Thanks, Allen. + + +Version 2.1 --- 14 December 2004 +-------------------------------- + +- Return 'analysis' to its original behavior and add 'analysis2'. Add a global + for 'annotate', and factor it, adding 'annotate_file'. + + +Version 2.0 --- 12 December 2004 +-------------------------------- + +Significant code changes. + +- Finding executable statements has been rewritten so that docstrings and + other quirks of Python execution aren't mistakenly identified as missing + lines. + +- Lines can be excluded from consideration, even entire suites of lines. + +- The file system cache of covered lines can be disabled programmatically. + +- Modernized the code. + + +Earlier History +--------------- + +2001-12-04 GDR Created. + +2001-12-06 GDR Added command-line interface and source code annotation. + +2001-12-09 GDR Moved design and interface to separate documents. + +2001-12-10 GDR Open cache file as binary on Windows. Allow simultaneous -e and +-x, or -a and -r. + +2001-12-12 GDR Added command-line help. Cache analysis so that it only needs to +be done once when you specify -a and -r. + +2001-12-13 GDR Improved speed while recording. Portable between Python 1.5.2 +and 2.1.1. + +2002-01-03 GDR Module-level functions work correctly. + +2002-01-07 GDR Update sys.path when running a file with the -x option, so that +it matches the value the program would get if it were run on its own. diff --git a/third_party/pycoverage/LICENSE b/third_party/pycoverage/LICENSE new file mode 100644 index 0000000..2212662 --- /dev/null +++ b/third_party/pycoverage/LICENSE @@ -0,0 +1,30 @@ +From __init__.py: +# COPYRIGHT AND LICENSE +# +# Copyright 2001 Gareth Rees. All rights reserved. +# Copyright 2004-2013 Ned Batchelder. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. diff --git a/third_party/pycoverage/MANIFEST.in b/third_party/pycoverage/MANIFEST.in new file mode 100644 index 0000000..7224674 --- /dev/null +++ b/third_party/pycoverage/MANIFEST.in @@ -0,0 +1,17 @@ +# MANIFEST.in file for coverage.py +recursive-include coverage/htmlfiles * +recursive-include coverage/fullcoverage * + +include coverage.egg-info/*.* +include setup.py +include __main__.py +include README.txt +include CHANGES.txt +include AUTHORS.txt +include requirements.txt +include igor.py +include tox.ini + +recursive-include tests * +recursive-include doc *.rst +global-exclude *.pyc diff --git a/third_party/pycoverage/OWNERS b/third_party/pycoverage/OWNERS new file mode 100644 index 0000000..334fe6f --- /dev/null +++ b/third_party/pycoverage/OWNERS @@ -0,0 +1,2 @@ +sbc@chromium.org +binji@chromium.org diff --git a/third_party/pycoverage/PKG-INFO b/third_party/pycoverage/PKG-INFO new file mode 100644 index 0000000..6079215 --- /dev/null +++ b/third_party/pycoverage/PKG-INFO @@ -0,0 +1,41 @@ +Metadata-Version: 1.0 +Name: coverage +Version: 3.7.1 +Summary: Code coverage measurement for Python +Home-page: http://nedbatchelder.com/code/coverage +Author: Ned Batchelder and others +Author-email: ned@nedbatchelder.com +License: BSD +Description: Coverage.py measures code coverage, typically during test execution. It uses + the code analysis tools and tracing hooks provided in the Python standard + library to determine which lines are executable, and which have been executed. + + Coverage.py runs on Pythons 2.3 through 3.3, and PyPy 1.9. + + Documentation is at `nedbatchelder.com <http://nedbatchelder.com/code/coverage>`_. Code repository and issue + tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_, with a + mirrored repo on `Github <https://github.com/nedbat/coveragepy>`_. + + New in 3.7: ``--debug``, and 12 bugs closed. + + New in 3.6: ``--fail-under``, and >20 bugs closed. + + New in 3.5: Branch coverage exclusions, keyboard shortcuts in HTML report. + + New in 3.4: Better control over source to measure, and unexecuted files + can be reported. + + New in 3.3: .coveragerc files. + + New in 3.2: Branch coverage! +Keywords: code coverage testing +Platform: UNKNOWN +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing +Classifier: Development Status :: 5 - Production/Stable diff --git a/third_party/pycoverage/README.chromium b/third_party/pycoverage/README.chromium new file mode 100644 index 0000000..e12c9f5 --- /dev/null +++ b/third_party/pycoverage/README.chromium @@ -0,0 +1,14 @@ +Name: coverage +URL: https://pypi.python.org/pypi/coverage +Version: 3.7.1 +Security Critical: no +License: BSD +License File: coverage/__init__.py + +Description: +Code coverage measurement for Python. Initially imported for testing +python script coverage in src/native_client_sdk. + +Local Modifications: +Stripped 'doc' and 'test' directories to reduce the size of the +tree. diff --git a/third_party/pycoverage/README.txt b/third_party/pycoverage/README.txt new file mode 100644 index 0000000..2c6fe3d --- /dev/null +++ b/third_party/pycoverage/README.txt @@ -0,0 +1,9 @@ +Coverage.py: code coverage testing for Python + +Coverage.py measures code coverage, typically during test execution. It uses +the code analysis tools and tracing hooks provided in the Python standard +library to determine which lines are executable, and which have been executed. + +For more information, see http://nedbatchelder.com/code/coverage + +Code repo and issue tracking are at http://bitbucket.org/ned/coveragepy diff --git a/third_party/pycoverage/__main__.py b/third_party/pycoverage/__main__.py new file mode 100644 index 0000000..b11dafc --- /dev/null +++ b/third_party/pycoverage/__main__.py @@ -0,0 +1,15 @@ +"""Be able to execute coverage.py by pointing Python at a working tree.""" + +import runpy, os + +PKG = 'coverage' + +try: + run_globals = runpy.run_module(PKG, run_name='__main__', alter_sys=True) + executed = os.path.splitext(os.path.basename(run_globals['__file__']))[0] + if executed != '__main__': # For Python 2.5 compatibility + raise ImportError( + 'Incorrectly executed %s instead of __main__' % executed + ) +except ImportError: # For Python 2.6 compatibility + runpy.run_module('%s.__main__' % PKG, run_name='__main__', alter_sys=True) diff --git a/third_party/pycoverage/coverage/__init__.py b/third_party/pycoverage/coverage/__init__.py new file mode 100644 index 0000000..193b7a1 --- /dev/null +++ b/third_party/pycoverage/coverage/__init__.py @@ -0,0 +1,120 @@ +"""Code coverage measurement for Python. + +Ned Batchelder +http://nedbatchelder.com/code/coverage + +""" + +from coverage.version import __version__, __url__ + +from coverage.control import coverage, process_startup +from coverage.data import CoverageData +from coverage.cmdline import main, CoverageScript +from coverage.misc import CoverageException + +# Module-level functions. The original API to this module was based on +# functions defined directly in the module, with a singleton of the coverage() +# class. That design hampered programmability, so the current api uses +# explicitly-created coverage objects. But for backward compatibility, here we +# define the top-level functions to create the singleton when they are first +# called. + +# Singleton object for use with module-level functions. The singleton is +# created as needed when one of the module-level functions is called. +_the_coverage = None + +def _singleton_method(name): + """Return a function to the `name` method on a singleton `coverage` object. + + The singleton object is created the first time one of these functions is + called. + + """ + # Disable pylint msg W0612, because a bunch of variables look unused, but + # they're accessed via locals(). + # pylint: disable=W0612 + + def wrapper(*args, **kwargs): + """Singleton wrapper around a coverage method.""" + global _the_coverage + if not _the_coverage: + _the_coverage = coverage(auto_data=True) + return getattr(_the_coverage, name)(*args, **kwargs) + + import inspect + meth = getattr(coverage, name) + args, varargs, kw, defaults = inspect.getargspec(meth) + argspec = inspect.formatargspec(args[1:], varargs, kw, defaults) + docstring = meth.__doc__ + wrapper.__doc__ = ("""\ + A first-use-singleton wrapper around coverage.%(name)s. + + This wrapper is provided for backward compatibility with legacy code. + New code should use coverage.%(name)s directly. + + %(name)s%(argspec)s: + + %(docstring)s + """ % locals() + ) + + return wrapper + + +# Define the module-level functions. +use_cache = _singleton_method('use_cache') +start = _singleton_method('start') +stop = _singleton_method('stop') +erase = _singleton_method('erase') +exclude = _singleton_method('exclude') +analysis = _singleton_method('analysis') +analysis2 = _singleton_method('analysis2') +report = _singleton_method('report') +annotate = _singleton_method('annotate') + + +# On Windows, we encode and decode deep enough that something goes wrong and +# the encodings.utf_8 module is loaded and then unloaded, I don't know why. +# Adding a reference here prevents it from being unloaded. Yuk. +import encodings.utf_8 + +# Because of the "from coverage.control import fooey" lines at the top of the +# file, there's an entry for coverage.coverage in sys.modules, mapped to None. +# This makes some inspection tools (like pydoc) unable to find the class +# coverage.coverage. So remove that entry. +import sys +try: + del sys.modules['coverage.coverage'] +except KeyError: + pass + + +# COPYRIGHT AND LICENSE +# +# Copyright 2001 Gareth Rees. All rights reserved. +# Copyright 2004-2013 Ned Batchelder. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. diff --git a/third_party/pycoverage/coverage/__main__.py b/third_party/pycoverage/coverage/__main__.py new file mode 100644 index 0000000..55e0d25 --- /dev/null +++ b/third_party/pycoverage/coverage/__main__.py @@ -0,0 +1,4 @@ +"""Coverage.py's main entry point.""" +import sys +from coverage.cmdline import main +sys.exit(main()) diff --git a/third_party/pycoverage/coverage/annotate.py b/third_party/pycoverage/coverage/annotate.py new file mode 100644 index 0000000..5c39678 --- /dev/null +++ b/third_party/pycoverage/coverage/annotate.py @@ -0,0 +1,102 @@ +"""Source file annotation for Coverage.""" + +import os, re + +from coverage.backward import sorted # pylint: disable=W0622 +from coverage.report import Reporter + +class AnnotateReporter(Reporter): + """Generate annotated source files showing line coverage. + + This reporter creates annotated copies of the measured source files. Each + .py file is copied as a .py,cover file, with a left-hand margin annotating + each line:: + + > def h(x): + - if 0: #pragma: no cover + - pass + > if x == 1: + ! a = 1 + > else: + > a = 2 + + > h(2) + + Executed lines use '>', lines not executed use '!', lines excluded from + consideration use '-'. + + """ + + def __init__(self, coverage, config): + super(AnnotateReporter, self).__init__(coverage, config) + self.directory = None + + blank_re = re.compile(r"\s*(#|$)") + else_re = re.compile(r"\s*else\s*:\s*(#|$)") + + def report(self, morfs, directory=None): + """Run the report. + + See `coverage.report()` for arguments. + + """ + self.report_files(self.annotate_file, morfs, directory) + + def annotate_file(self, cu, analysis): + """Annotate a single file. + + `cu` is the CodeUnit for the file to annotate. + + """ + if not cu.relative: + return + + filename = cu.filename + source = cu.source_file() + if self.directory: + dest_file = os.path.join(self.directory, cu.flat_rootname()) + dest_file += ".py,cover" + else: + dest_file = filename + ",cover" + dest = open(dest_file, 'w') + + statements = sorted(analysis.statements) + missing = sorted(analysis.missing) + excluded = sorted(analysis.excluded) + + lineno = 0 + i = 0 + j = 0 + covered = True + while True: + line = source.readline() + if line == '': + break + lineno += 1 + while i < len(statements) and statements[i] < lineno: + i += 1 + while j < len(missing) and missing[j] < lineno: + j += 1 + if i < len(statements) and statements[i] == lineno: + covered = j >= len(missing) or missing[j] > lineno + if self.blank_re.match(line): + dest.write(' ') + elif self.else_re.match(line): + # Special logic for lines containing only 'else:'. + if i >= len(statements) and j >= len(missing): + dest.write('! ') + elif i >= len(statements) or j >= len(missing): + dest.write('> ') + elif statements[i] == missing[j]: + dest.write('! ') + else: + dest.write('> ') + elif lineno in excluded: + dest.write('- ') + elif covered: + dest.write('> ') + else: + dest.write('! ') + dest.write(line) + source.close() + dest.close() diff --git a/third_party/pycoverage/coverage/backward.py b/third_party/pycoverage/coverage/backward.py new file mode 100644 index 0000000..7d26854 --- /dev/null +++ b/third_party/pycoverage/coverage/backward.py @@ -0,0 +1,184 @@ +"""Add things to old Pythons so I can pretend they are newer.""" + +# This file does lots of tricky stuff, so disable a bunch of lintisms. +# pylint: disable=F0401,W0611,W0622 +# F0401: Unable to import blah +# W0611: Unused import blah +# W0622: Redefining built-in blah + +import os, re, sys + +# Python 2.3 doesn't have `set` +try: + set = set # new in 2.4 +except NameError: + from sets import Set as set + +# Python 2.3 doesn't have `sorted`. +try: + sorted = sorted +except NameError: + def sorted(iterable): + """A 2.3-compatible implementation of `sorted`.""" + lst = list(iterable) + lst.sort() + return lst + +# Python 2.3 doesn't have `reversed`. +try: + reversed = reversed +except NameError: + def reversed(iterable): + """A 2.3-compatible implementation of `reversed`.""" + lst = list(iterable) + return lst[::-1] + +# rpartition is new in 2.5 +try: + "".rpartition +except AttributeError: + def rpartition(s, sep): + """Implement s.rpartition(sep) for old Pythons.""" + i = s.rfind(sep) + if i == -1: + return ('', '', s) + else: + return (s[:i], sep, s[i+len(sep):]) +else: + def rpartition(s, sep): + """A common interface for new Pythons.""" + return s.rpartition(sep) + +# Pythons 2 and 3 differ on where to get StringIO +try: + from cStringIO import StringIO + BytesIO = StringIO +except ImportError: + from io import StringIO, BytesIO + +# What's a string called? +try: + string_class = basestring +except NameError: + string_class = str + +# Where do pickles come from? +try: + import cPickle as pickle +except ImportError: + import pickle + +# range or xrange? +try: + range = xrange +except NameError: + range = range + +# A function to iterate listlessly over a dict's items. +try: + {}.iteritems +except AttributeError: + def iitems(d): + """Produce the items from dict `d`.""" + return d.items() +else: + def iitems(d): + """Produce the items from dict `d`.""" + return d.iteritems() + +# Exec is a statement in Py2, a function in Py3 +if sys.version_info >= (3, 0): + def exec_code_object(code, global_map): + """A wrapper around exec().""" + exec(code, global_map) +else: + # OK, this is pretty gross. In Py2, exec was a statement, but that will + # be a syntax error if we try to put it in a Py3 file, even if it is never + # executed. So hide it inside an evaluated string literal instead. + eval( + compile( + "def exec_code_object(code, global_map):\n" + " exec code in global_map\n", + "<exec_function>", "exec" + ) + ) + +# Reading Python source and interpreting the coding comment is a big deal. +if sys.version_info >= (3, 0): + # Python 3.2 provides `tokenize.open`, the best way to open source files. + import tokenize + try: + open_source = tokenize.open # pylint: disable=E1101 + except AttributeError: + from io import TextIOWrapper + detect_encoding = tokenize.detect_encoding # pylint: disable=E1101 + # Copied from the 3.2 stdlib: + def open_source(fname): + """Open a file in read only mode using the encoding detected by + detect_encoding(). + """ + buffer = open(fname, 'rb') + encoding, _ = detect_encoding(buffer.readline) + buffer.seek(0) + text = TextIOWrapper(buffer, encoding, line_buffering=True) + text.mode = 'r' + return text +else: + def open_source(fname): + """Open a source file the best way.""" + return open(fname, "rU") + + +# Python 3.x is picky about bytes and strings, so provide methods to +# get them right, and make them no-ops in 2.x +if sys.version_info >= (3, 0): + def to_bytes(s): + """Convert string `s` to bytes.""" + return s.encode('utf8') + + def to_string(b): + """Convert bytes `b` to a string.""" + return b.decode('utf8') + + def binary_bytes(byte_values): + """Produce a byte string with the ints from `byte_values`.""" + return bytes(byte_values) + + def byte_to_int(byte_value): + """Turn an element of a bytes object into an int.""" + return byte_value + + def bytes_to_ints(bytes_value): + """Turn a bytes object into a sequence of ints.""" + # In Py3, iterating bytes gives ints. + return bytes_value + +else: + def to_bytes(s): + """Convert string `s` to bytes (no-op in 2.x).""" + return s + + def to_string(b): + """Convert bytes `b` to a string (no-op in 2.x).""" + return b + + def binary_bytes(byte_values): + """Produce a byte string with the ints from `byte_values`.""" + return "".join([chr(b) for b in byte_values]) + + def byte_to_int(byte_value): + """Turn an element of a bytes object into an int.""" + return ord(byte_value) + + def bytes_to_ints(bytes_value): + """Turn a bytes object into a sequence of ints.""" + for byte in bytes_value: + yield ord(byte) + +# Md5 is available in different places. +try: + import hashlib + md5 = hashlib.md5 +except ImportError: + import md5 + md5 = md5.new diff --git a/third_party/pycoverage/coverage/bytecode.py b/third_party/pycoverage/coverage/bytecode.py new file mode 100644 index 0000000..8536063 --- /dev/null +++ b/third_party/pycoverage/coverage/bytecode.py @@ -0,0 +1,75 @@ +"""Bytecode manipulation for coverage.py""" + +import opcode, types + +from coverage.backward import byte_to_int + +class ByteCode(object): + """A single bytecode.""" + def __init__(self): + # The offset of this bytecode in the code object. + self.offset = -1 + + # The opcode, defined in the `opcode` module. + self.op = -1 + + # The argument, a small integer, whose meaning depends on the opcode. + self.arg = -1 + + # The offset in the code object of the next bytecode. + self.next_offset = -1 + + # The offset to jump to. + self.jump_to = -1 + + +class ByteCodes(object): + """Iterator over byte codes in `code`. + + Returns `ByteCode` objects. + + """ + # pylint: disable=R0924 + def __init__(self, code): + self.code = code + + def __getitem__(self, i): + return byte_to_int(self.code[i]) + + def __iter__(self): + offset = 0 + while offset < len(self.code): + bc = ByteCode() + bc.op = self[offset] + bc.offset = offset + + next_offset = offset+1 + if bc.op >= opcode.HAVE_ARGUMENT: + bc.arg = self[offset+1] + 256*self[offset+2] + next_offset += 2 + + label = -1 + if bc.op in opcode.hasjrel: + label = next_offset + bc.arg + elif bc.op in opcode.hasjabs: + label = bc.arg + bc.jump_to = label + + bc.next_offset = offset = next_offset + yield bc + + +class CodeObjects(object): + """Iterate over all the code objects in `code`.""" + def __init__(self, code): + self.stack = [code] + + def __iter__(self): + while self.stack: + # We're going to return the code object on the stack, but first + # push its children for later returning. + code = self.stack.pop() + for c in code.co_consts: + if isinstance(c, types.CodeType): + self.stack.append(c) + yield code diff --git a/third_party/pycoverage/coverage/cmdline.py b/third_party/pycoverage/coverage/cmdline.py new file mode 100644 index 0000000..ea112a8 --- /dev/null +++ b/third_party/pycoverage/coverage/cmdline.py @@ -0,0 +1,744 @@ +"""Command-line support for Coverage.""" + +import optparse, os, sys, time, traceback + +from coverage.backward import sorted # pylint: disable=W0622 +from coverage.execfile import run_python_file, run_python_module +from coverage.misc import CoverageException, ExceptionDuringRun, NoSource +from coverage.debug import info_formatter + + +class Opts(object): + """A namespace class for individual options we'll build parsers from.""" + + append = optparse.make_option( + '-a', '--append', action='store_false', dest="erase_first", + help="Append coverage data to .coverage, otherwise it is started " + "clean with each run." + ) + branch = optparse.make_option( + '', '--branch', action='store_true', + help="Measure branch coverage in addition to statement coverage." + ) + debug = optparse.make_option( + '', '--debug', action='store', metavar="OPTS", + help="Debug options, separated by commas" + ) + directory = optparse.make_option( + '-d', '--directory', action='store', metavar="DIR", + help="Write the output files to DIR." + ) + fail_under = optparse.make_option( + '', '--fail-under', action='store', metavar="MIN", type="int", + help="Exit with a status of 2 if the total coverage is less than MIN." + ) + help = optparse.make_option( + '-h', '--help', action='store_true', + help="Get help on this command." + ) + ignore_errors = optparse.make_option( + '-i', '--ignore-errors', action='store_true', + help="Ignore errors while reading source files." + ) + include = optparse.make_option( + '', '--include', action='store', + metavar="PAT1,PAT2,...", + help="Include files only when their filename path matches one of " + "these patterns. Usually needs quoting on the command line." + ) + pylib = optparse.make_option( + '-L', '--pylib', action='store_true', + help="Measure coverage even inside the Python installed library, " + "which isn't done by default." + ) + show_missing = optparse.make_option( + '-m', '--show-missing', action='store_true', + help="Show line numbers of statements in each module that weren't " + "executed." + ) + old_omit = optparse.make_option( + '-o', '--omit', action='store', + metavar="PAT1,PAT2,...", + help="Omit files when their filename matches one of these patterns. " + "Usually needs quoting on the command line." + ) + omit = optparse.make_option( + '', '--omit', action='store', + metavar="PAT1,PAT2,...", + help="Omit files when their filename matches one of these patterns. " + "Usually needs quoting on the command line." + ) + output_xml = optparse.make_option( + '-o', '', action='store', dest="outfile", + metavar="OUTFILE", + help="Write the XML report to this file. Defaults to 'coverage.xml'" + ) + parallel_mode = optparse.make_option( + '-p', '--parallel-mode', action='store_true', + help="Append the machine name, process id and random number to the " + ".coverage data file name to simplify collecting data from " + "many processes." + ) + module = optparse.make_option( + '-m', '--module', action='store_true', + help="<pyfile> is an importable Python module, not a script path, " + "to be run as 'python -m' would run it." + ) + rcfile = optparse.make_option( + '', '--rcfile', action='store', + help="Specify configuration file. Defaults to '.coveragerc'" + ) + source = optparse.make_option( + '', '--source', action='store', metavar="SRC1,SRC2,...", + help="A list of packages or directories of code to be measured." + ) + timid = optparse.make_option( + '', '--timid', action='store_true', + help="Use a simpler but slower trace method. Try this if you get " + "seemingly impossible results!" + ) + title = optparse.make_option( + '', '--title', action='store', metavar="TITLE", + help="A text string to use as the title on the HTML." + ) + version = optparse.make_option( + '', '--version', action='store_true', + help="Display version information and exit." + ) + + +class CoverageOptionParser(optparse.OptionParser, object): + """Base OptionParser for coverage. + + Problems don't exit the program. + Defaults are initialized for all options. + + """ + + def __init__(self, *args, **kwargs): + super(CoverageOptionParser, self).__init__( + add_help_option=False, *args, **kwargs + ) + self.set_defaults( + actions=[], + branch=None, + debug=None, + directory=None, + fail_under=None, + help=None, + ignore_errors=None, + include=None, + omit=None, + parallel_mode=None, + module=None, + pylib=None, + rcfile=True, + show_missing=None, + source=None, + timid=None, + title=None, + erase_first=None, + version=None, + ) + + self.disable_interspersed_args() + self.help_fn = self.help_noop + + def help_noop(self, error=None, topic=None, parser=None): + """No-op help function.""" + pass + + class OptionParserError(Exception): + """Used to stop the optparse error handler ending the process.""" + pass + + def parse_args(self, args=None, options=None): + """Call optparse.parse_args, but return a triple: + + (ok, options, args) + + """ + try: + options, args = \ + super(CoverageOptionParser, self).parse_args(args, options) + except self.OptionParserError: + return False, None, None + return True, options, args + + def error(self, msg): + """Override optparse.error so sys.exit doesn't get called.""" + self.help_fn(msg) + raise self.OptionParserError + + +class ClassicOptionParser(CoverageOptionParser): + """Command-line parser for coverage.py classic arguments.""" + + def __init__(self): + super(ClassicOptionParser, self).__init__() + + self.add_action('-a', '--annotate', 'annotate') + self.add_action('-b', '--html', 'html') + self.add_action('-c', '--combine', 'combine') + self.add_action('-e', '--erase', 'erase') + self.add_action('-r', '--report', 'report') + self.add_action('-x', '--execute', 'execute') + + self.add_options([ + Opts.directory, + Opts.help, + Opts.ignore_errors, + Opts.pylib, + Opts.show_missing, + Opts.old_omit, + Opts.parallel_mode, + Opts.timid, + Opts.version, + ]) + + def add_action(self, dash, dashdash, action_code): + """Add a specialized option that is the action to execute.""" + option = self.add_option(dash, dashdash, action='callback', + callback=self._append_action + ) + option.action_code = action_code + + def _append_action(self, option, opt_unused, value_unused, parser): + """Callback for an option that adds to the `actions` list.""" + parser.values.actions.append(option.action_code) + + +class CmdOptionParser(CoverageOptionParser): + """Parse one of the new-style commands for coverage.py.""" + + def __init__(self, action, options=None, defaults=None, usage=None, + cmd=None, description=None + ): + """Create an OptionParser for a coverage command. + + `action` is the slug to put into `options.actions`. + `options` is a list of Option's for the command. + `defaults` is a dict of default value for options. + `usage` is the usage string to display in help. + `cmd` is the command name, if different than `action`. + `description` is the description of the command, for the help text. + + """ + if usage: + usage = "%prog " + usage + super(CmdOptionParser, self).__init__( + prog="coverage %s" % (cmd or action), + usage=usage, + description=description, + ) + self.set_defaults(actions=[action], **(defaults or {})) + if options: + self.add_options(options) + self.cmd = cmd or action + + def __eq__(self, other): + # A convenience equality, so that I can put strings in unit test + # results, and they will compare equal to objects. + return (other == "<CmdOptionParser:%s>" % self.cmd) + +GLOBAL_ARGS = [ + Opts.rcfile, + Opts.help, + ] + +CMDS = { + 'annotate': CmdOptionParser("annotate", + [ + Opts.directory, + Opts.ignore_errors, + Opts.omit, + Opts.include, + ] + GLOBAL_ARGS, + usage = "[options] [modules]", + description = "Make annotated copies of the given files, marking " + "statements that are executed with > and statements that are " + "missed with !." + ), + + 'combine': CmdOptionParser("combine", GLOBAL_ARGS, + usage = " ", + description = "Combine data from multiple coverage files collected " + "with 'run -p'. The combined results are written to a single " + "file representing the union of the data." + ), + + 'debug': CmdOptionParser("debug", GLOBAL_ARGS, + usage = "<topic>", + description = "Display information on the internals of coverage.py, " + "for diagnosing problems. " + "Topics are 'data' to show a summary of the collected data, " + "or 'sys' to show installation information." + ), + + 'erase': CmdOptionParser("erase", GLOBAL_ARGS, + usage = " ", + description = "Erase previously collected coverage data." + ), + + 'help': CmdOptionParser("help", GLOBAL_ARGS, + usage = "[command]", + description = "Describe how to use coverage.py" + ), + + 'html': CmdOptionParser("html", + [ + Opts.directory, + Opts.fail_under, + Opts.ignore_errors, + Opts.omit, + Opts.include, + Opts.title, + ] + GLOBAL_ARGS, + usage = "[options] [modules]", + description = "Create an HTML report of the coverage of the files. " + "Each file gets its own page, with the source decorated to show " + "executed, excluded, and missed lines." + ), + + 'report': CmdOptionParser("report", + [ + Opts.fail_under, + Opts.ignore_errors, + Opts.omit, + Opts.include, + Opts.show_missing, + ] + GLOBAL_ARGS, + usage = "[options] [modules]", + description = "Report coverage statistics on modules." + ), + + 'run': CmdOptionParser("execute", + [ + Opts.append, + Opts.branch, + Opts.debug, + Opts.pylib, + Opts.parallel_mode, + Opts.module, + Opts.timid, + Opts.source, + Opts.omit, + Opts.include, + ] + GLOBAL_ARGS, + defaults = {'erase_first': True}, + cmd = "run", + usage = "[options] <pyfile> [program options]", + description = "Run a Python program, measuring code execution." + ), + + 'xml': CmdOptionParser("xml", + [ + Opts.fail_under, + Opts.ignore_errors, + Opts.omit, + Opts.include, + Opts.output_xml, + ] + GLOBAL_ARGS, + cmd = "xml", + usage = "[options] [modules]", + description = "Generate an XML report of coverage results." + ), + } + + +OK, ERR, FAIL_UNDER = 0, 1, 2 + + +class CoverageScript(object): + """The command-line interface to Coverage.""" + + def __init__(self, _covpkg=None, _run_python_file=None, + _run_python_module=None, _help_fn=None): + # _covpkg is for dependency injection, so we can test this code. + if _covpkg: + self.covpkg = _covpkg + else: + import coverage + self.covpkg = coverage + + # For dependency injection: + self.run_python_file = _run_python_file or run_python_file + self.run_python_module = _run_python_module or run_python_module + self.help_fn = _help_fn or self.help + self.classic = False + + self.coverage = None + + def command_line(self, argv): + """The bulk of the command line interface to Coverage. + + `argv` is the argument list to process. + + Returns 0 if all is well, 1 if something went wrong. + + """ + # Collect the command-line options. + if not argv: + self.help_fn(topic='minimum_help') + return OK + + # The command syntax we parse depends on the first argument. Classic + # syntax always starts with an option. + self.classic = argv[0].startswith('-') + if self.classic: + parser = ClassicOptionParser() + else: + parser = CMDS.get(argv[0]) + if not parser: + self.help_fn("Unknown command: '%s'" % argv[0]) + return ERR + argv = argv[1:] + + parser.help_fn = self.help_fn + ok, options, args = parser.parse_args(argv) + if not ok: + return ERR + + # Handle help and version. + if self.do_help(options, args, parser): + return OK + + # Check for conflicts and problems in the options. + if not self.args_ok(options, args): + return ERR + + # Listify the list options. + source = unshell_list(options.source) + omit = unshell_list(options.omit) + include = unshell_list(options.include) + debug = unshell_list(options.debug) + + # Do something. + self.coverage = self.covpkg.coverage( + data_suffix = options.parallel_mode, + cover_pylib = options.pylib, + timid = options.timid, + branch = options.branch, + config_file = options.rcfile, + source = source, + omit = omit, + include = include, + debug = debug, + ) + + if 'debug' in options.actions: + return self.do_debug(args) + + if 'erase' in options.actions or options.erase_first: + self.coverage.erase() + else: + self.coverage.load() + + if 'execute' in options.actions: + self.do_execute(options, args) + + if 'combine' in options.actions: + self.coverage.combine() + self.coverage.save() + + # Remaining actions are reporting, with some common options. + report_args = dict( + morfs = args, + ignore_errors = options.ignore_errors, + omit = omit, + include = include, + ) + + if 'report' in options.actions: + total = self.coverage.report( + show_missing=options.show_missing, **report_args) + if 'annotate' in options.actions: + self.coverage.annotate( + directory=options.directory, **report_args) + if 'html' in options.actions: + total = self.coverage.html_report( + directory=options.directory, title=options.title, + **report_args) + if 'xml' in options.actions: + outfile = options.outfile + total = self.coverage.xml_report(outfile=outfile, **report_args) + + if options.fail_under is not None: + if total >= options.fail_under: + return OK + else: + return FAIL_UNDER + else: + return OK + + def help(self, error=None, topic=None, parser=None): + """Display an error message, or the named topic.""" + assert error or topic or parser + if error: + print(error) + print("Use 'coverage help' for help.") + elif parser: + print(parser.format_help().strip()) + else: + help_msg = HELP_TOPICS.get(topic, '').strip() + if help_msg: + print(help_msg % self.covpkg.__dict__) + else: + print("Don't know topic %r" % topic) + + def do_help(self, options, args, parser): + """Deal with help requests. + + Return True if it handled the request, False if not. + + """ + # Handle help. + if options.help: + if self.classic: + self.help_fn(topic='help') + else: + self.help_fn(parser=parser) + return True + + if "help" in options.actions: + if args: + for a in args: + parser = CMDS.get(a) + if parser: + self.help_fn(parser=parser) + else: + self.help_fn(topic=a) + else: + self.help_fn(topic='help') + return True + + # Handle version. + if options.version: + self.help_fn(topic='version') + return True + + return False + + def args_ok(self, options, args): + """Check for conflicts and problems in the options. + + Returns True if everything is ok, or False if not. + + """ + for i in ['erase', 'execute']: + for j in ['annotate', 'html', 'report', 'combine']: + if (i in options.actions) and (j in options.actions): + self.help_fn("You can't specify the '%s' and '%s' " + "options at the same time." % (i, j)) + return False + + if not options.actions: + self.help_fn( + "You must specify at least one of -e, -x, -c, -r, -a, or -b." + ) + return False + args_allowed = ( + 'execute' in options.actions or + 'annotate' in options.actions or + 'html' in options.actions or + 'debug' in options.actions or + 'report' in options.actions or + 'xml' in options.actions + ) + if not args_allowed and args: + self.help_fn("Unexpected arguments: %s" % " ".join(args)) + return False + + if 'execute' in options.actions and not args: + self.help_fn("Nothing to do.") + return False + + return True + + def do_execute(self, options, args): + """Implementation of 'coverage run'.""" + + # Set the first path element properly. + old_path0 = sys.path[0] + + # Run the script. + self.coverage.start() + code_ran = True + try: + try: + if options.module: + sys.path[0] = '' + self.run_python_module(args[0], args) + else: + filename = args[0] + sys.path[0] = os.path.abspath(os.path.dirname(filename)) + self.run_python_file(filename, args) + except NoSource: + code_ran = False + raise + finally: + self.coverage.stop() + if code_ran: + self.coverage.save() + + # Restore the old path + sys.path[0] = old_path0 + + def do_debug(self, args): + """Implementation of 'coverage debug'.""" + + if not args: + self.help_fn("What information would you like: data, sys?") + return ERR + for info in args: + if info == 'sys': + print("-- sys ----------------------------------------") + for line in info_formatter(self.coverage.sysinfo()): + print(" %s" % line) + elif info == 'data': + print("-- data ---------------------------------------") + self.coverage.load() + print("path: %s" % self.coverage.data.filename) + print("has_arcs: %r" % self.coverage.data.has_arcs()) + summary = self.coverage.data.summary(fullpath=True) + if summary: + filenames = sorted(summary.keys()) + print("\n%d files:" % len(filenames)) + for f in filenames: + print("%s: %d lines" % (f, summary[f])) + else: + print("No data collected") + else: + self.help_fn("Don't know what you mean by %r" % info) + return ERR + return OK + + +def unshell_list(s): + """Turn a command-line argument into a list.""" + if not s: + return None + if sys.platform == 'win32': + # When running coverage as coverage.exe, some of the behavior + # of the shell is emulated: wildcards are expanded into a list of + # filenames. So you have to single-quote patterns on the command + # line, but (not) helpfully, the single quotes are included in the + # argument, so we have to strip them off here. + s = s.strip("'") + return s.split(',') + + +HELP_TOPICS = { +# ------------------------- +'classic': +r"""Coverage.py version %(__version__)s +Measure, collect, and report on code coverage in Python programs. + +Usage: + +coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] + Execute the module, passing the given command-line arguments, collecting + coverage data. With the -p option, include the machine name and process + id in the .coverage file name. With -L, measure coverage even inside the + Python installed library, which isn't done by default. With --timid, use a + simpler but slower trace method. + +coverage -e + Erase collected coverage data. + +coverage -c + Combine data from multiple coverage files (as created by -p option above) + and store it into a single file representing the union of the coverage. + +coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] + Report on the statement coverage for the given files. With the -m + option, show line numbers of the statements that weren't executed. + +coverage -b -d DIR [-i] [-o DIR,...] [FILE1 FILE2 ...] + Create an HTML report of the coverage of the given files. Each file gets + its own page, with the file listing decorated to show executed, excluded, + and missed lines. + +coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...] + Make annotated copies of the given files, marking statements that + are executed with > and statements that are missed with !. + +-d DIR + Write output files for -b or -a to this directory. + +-i Ignore errors while reporting or annotating. + +-o DIR,... + Omit reporting or annotating files when their filename path starts with + a directory listed in the omit list. + e.g. coverage -i -r -o c:\python25,lib\enthought\traits + +Coverage data is saved in the file .coverage by default. Set the +COVERAGE_FILE environment variable to save it somewhere else. +""", +# ------------------------- +'help': """\ +Coverage.py, version %(__version__)s +Measure, collect, and report on code coverage in Python programs. + +usage: coverage <command> [options] [args] + +Commands: + annotate Annotate source files with execution information. + combine Combine a number of data files. + erase Erase previously collected coverage data. + help Get help on using coverage.py. + html Create an HTML report. + report Report coverage stats on modules. + run Run a Python program and measure code execution. + xml Create an XML report of coverage results. + +Use "coverage help <command>" for detailed help on any command. +Use "coverage help classic" for help on older command syntax. +For more information, see %(__url__)s +""", +# ------------------------- +'minimum_help': """\ +Code coverage for Python. Use 'coverage help' for help. +""", +# ------------------------- +'version': """\ +Coverage.py, version %(__version__)s. %(__url__)s +""", +} + + +def main(argv=None): + """The main entry point to Coverage. + + This is installed as the script entry point. + + """ + if argv is None: + argv = sys.argv[1:] + try: + start = time.clock() + status = CoverageScript().command_line(argv) + end = time.clock() + if 0: + print("time: %.3fs" % (end - start)) + except ExceptionDuringRun: + # An exception was caught while running the product code. The + # sys.exc_info() return tuple is packed into an ExceptionDuringRun + # exception. + _, err, _ = sys.exc_info() + traceback.print_exception(*err.args) + status = ERR + except CoverageException: + # A controlled error inside coverage.py: print the message to the user. + _, err, _ = sys.exc_info() + print(err) + status = ERR + except SystemExit: + # The user called `sys.exit()`. Exit with their argument, if any. + _, err, _ = sys.exc_info() + if err.args: + status = err.args[0] + else: + status = None + return status diff --git a/third_party/pycoverage/coverage/codeunit.py b/third_party/pycoverage/coverage/codeunit.py new file mode 100644 index 0000000..ca1ae5c --- /dev/null +++ b/third_party/pycoverage/coverage/codeunit.py @@ -0,0 +1,145 @@ +"""Code unit (module) handling for Coverage.""" + +import glob, os + +from coverage.backward import open_source, string_class, StringIO +from coverage.misc import CoverageException + + +def code_unit_factory(morfs, file_locator): + """Construct a list of CodeUnits from polymorphic inputs. + + `morfs` is a module or a filename, or a list of same. + + `file_locator` is a FileLocator that can help resolve filenames. + + Returns a list of CodeUnit objects. + + """ + # Be sure we have a list. + if not isinstance(morfs, (list, tuple)): + morfs = [morfs] + + # On Windows, the shell doesn't expand wildcards. Do it here. + globbed = [] + for morf in morfs: + if isinstance(morf, string_class) and ('?' in morf or '*' in morf): + globbed.extend(glob.glob(morf)) + else: + globbed.append(morf) + morfs = globbed + + code_units = [CodeUnit(morf, file_locator) for morf in morfs] + + return code_units + + +class CodeUnit(object): + """Code unit: a filename or module. + + Instance attributes: + + `name` is a human-readable name for this code unit. + `filename` is the os path from which we can read the source. + `relative` is a boolean. + + """ + def __init__(self, morf, file_locator): + self.file_locator = file_locator + + if hasattr(morf, '__file__'): + f = morf.__file__ + else: + f = morf + # .pyc files should always refer to a .py instead. + if f.endswith('.pyc') or f.endswith('.pyo'): + f = f[:-1] + elif f.endswith('$py.class'): # Jython + f = f[:-9] + ".py" + self.filename = self.file_locator.canonical_filename(f) + + if hasattr(morf, '__name__'): + n = modname = morf.__name__ + self.relative = True + else: + n = os.path.splitext(morf)[0] + rel = self.file_locator.relative_filename(n) + if os.path.isabs(n): + self.relative = (rel != n) + else: + self.relative = True + n = rel + modname = None + self.name = n + self.modname = modname + + def __repr__(self): + return "<CodeUnit name=%r filename=%r>" % (self.name, self.filename) + + # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all + # of them defined. + + def __lt__(self, other): + return self.name < other.name + def __le__(self, other): + return self.name <= other.name + def __eq__(self, other): + return self.name == other.name + def __ne__(self, other): + return self.name != other.name + def __gt__(self, other): + return self.name > other.name + def __ge__(self, other): + return self.name >= other.name + + def flat_rootname(self): + """A base for a flat filename to correspond to this code unit. + + Useful for writing files about the code where you want all the files in + the same directory, but need to differentiate same-named files from + different directories. + + For example, the file a/b/c.py might return 'a_b_c' + + """ + if self.modname: + return self.modname.replace('.', '_') + else: + root = os.path.splitdrive(self.name)[1] + return root.replace('\\', '_').replace('/', '_').replace('.', '_') + + def source_file(self): + """Return an open file for reading the source of the code unit.""" + if os.path.exists(self.filename): + # A regular text file: open it. + return open_source(self.filename) + + # Maybe it's in a zip file? + source = self.file_locator.get_zip_data(self.filename) + if source is not None: + return StringIO(source) + + # Couldn't find source. + raise CoverageException( + "No source for code '%s'." % self.filename + ) + + def should_be_python(self): + """Does it seem like this file should contain Python? + + This is used to decide if a file reported as part of the exection of + a program was really likely to have contained Python in the first + place. + + """ + # Get the file extension. + _, ext = os.path.splitext(self.filename) + + # Anything named *.py* should be Python. + if ext.startswith('.py'): + return True + # A file with no extension should be Python. + if not ext: + return True + # Everything else is probably not Python. + return False diff --git a/third_party/pycoverage/coverage/collector.py b/third_party/pycoverage/coverage/collector.py new file mode 100644 index 0000000..8ba7d87 --- /dev/null +++ b/third_party/pycoverage/coverage/collector.py @@ -0,0 +1,353 @@ +"""Raw data collector for Coverage.""" + +import os, sys, threading + +try: + # Use the C extension code when we can, for speed. + from coverage.tracer import CTracer # pylint: disable=F0401,E0611 +except ImportError: + # Couldn't import the C extension, maybe it isn't built. + if os.getenv('COVERAGE_TEST_TRACER') == 'c': + # During testing, we use the COVERAGE_TEST_TRACER env var to indicate + # that we've fiddled with the environment to test this fallback code. + # If we thought we had a C tracer, but couldn't import it, then exit + # quickly and clearly instead of dribbling confusing errors. I'm using + # sys.exit here instead of an exception because an exception here + # causes all sorts of other noise in unittest. + sys.stderr.write( + "*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n" + ) + sys.exit(1) + CTracer = None + + +class PyTracer(object): + """Python implementation of the raw data tracer.""" + + # Because of poor implementations of trace-function-manipulating tools, + # the Python trace function must be kept very simple. In particular, there + # must be only one function ever set as the trace function, both through + # sys.settrace, and as the return value from the trace function. Put + # another way, the trace function must always return itself. It cannot + # swap in other functions, or return None to avoid tracing a particular + # frame. + # + # The trace manipulator that introduced this restriction is DecoratorTools, + # which sets a trace function, and then later restores the pre-existing one + # by calling sys.settrace with a function it found in the current frame. + # + # Systems that use DecoratorTools (or similar trace manipulations) must use + # PyTracer to get accurate results. The command-line --timid argument is + # used to force the use of this tracer. + + def __init__(self): + self.data = None + self.should_trace = None + self.should_trace_cache = None + self.warn = None + self.cur_file_data = None + self.last_line = 0 + self.data_stack = [] + self.last_exc_back = None + self.last_exc_firstlineno = 0 + self.arcs = False + self.thread = None + self.stopped = False + + def _trace(self, frame, event, arg_unused): + """The trace function passed to sys.settrace.""" + + if self.stopped: + return + + if 0: + sys.stderr.write("trace event: %s %r @%d\n" % ( + event, frame.f_code.co_filename, frame.f_lineno + )) + + if self.last_exc_back: + if frame == self.last_exc_back: + # Someone forgot a return event. + if self.arcs and self.cur_file_data: + pair = (self.last_line, -self.last_exc_firstlineno) + self.cur_file_data[pair] = None + self.cur_file_data, self.last_line = self.data_stack.pop() + self.last_exc_back = None + + if event == 'call': + # Entering a new function context. Decide if we should trace + # in this file. + self.data_stack.append((self.cur_file_data, self.last_line)) + filename = frame.f_code.co_filename + if filename not in self.should_trace_cache: + tracename = self.should_trace(filename, frame) + self.should_trace_cache[filename] = tracename + else: + tracename = self.should_trace_cache[filename] + #print("called, stack is %d deep, tracename is %r" % ( + # len(self.data_stack), tracename)) + if tracename: + if tracename not in self.data: + self.data[tracename] = {} + self.cur_file_data = self.data[tracename] + else: + self.cur_file_data = None + # Set the last_line to -1 because the next arc will be entering a + # code block, indicated by (-1, n). + self.last_line = -1 + elif event == 'line': + # Record an executed line. + if self.cur_file_data is not None: + if self.arcs: + #print("lin", self.last_line, frame.f_lineno) + self.cur_file_data[(self.last_line, frame.f_lineno)] = None + else: + #print("lin", frame.f_lineno) + self.cur_file_data[frame.f_lineno] = None + self.last_line = frame.f_lineno + elif event == 'return': + if self.arcs and self.cur_file_data: + first = frame.f_code.co_firstlineno + self.cur_file_data[(self.last_line, -first)] = None + # Leaving this function, pop the filename stack. + self.cur_file_data, self.last_line = self.data_stack.pop() + #print("returned, stack is %d deep" % (len(self.data_stack))) + elif event == 'exception': + #print("exc", self.last_line, frame.f_lineno) + self.last_exc_back = frame.f_back + self.last_exc_firstlineno = frame.f_code.co_firstlineno + return self._trace + + def start(self): + """Start this Tracer. + + Return a Python function suitable for use with sys.settrace(). + + """ + self.thread = threading.currentThread() + sys.settrace(self._trace) + return self._trace + + def stop(self): + """Stop this Tracer.""" + self.stopped = True + if self.thread != threading.currentThread(): + # Called on a different thread than started us: we can't unhook + # ourseves, but we've set the flag that we should stop, so we won't + # do any more tracing. + return + + if hasattr(sys, "gettrace") and self.warn: + if sys.gettrace() != self._trace: + msg = "Trace function changed, measurement is likely wrong: %r" + self.warn(msg % (sys.gettrace(),)) + #print("Stopping tracer on %s" % threading.current_thread().ident) + sys.settrace(None) + + def get_stats(self): + """Return a dictionary of statistics, or None.""" + return None + + +class Collector(object): + """Collects trace data. + + Creates a Tracer object for each thread, since they track stack + information. Each Tracer points to the same shared data, contributing + traced data points. + + When the Collector is started, it creates a Tracer for the current thread, + and installs a function to create Tracers for each new thread started. + When the Collector is stopped, all active Tracers are stopped. + + Threads started while the Collector is stopped will never have Tracers + associated with them. + + """ + + # The stack of active Collectors. Collectors are added here when started, + # and popped when stopped. Collectors on the stack are paused when not + # the top, and resumed when they become the top again. + _collectors = [] + + def __init__(self, should_trace, timid, branch, warn): + """Create a collector. + + `should_trace` is a function, taking a filename, and returning a + canonicalized filename, or None depending on whether the file should + be traced or not. + + If `timid` is true, then a slower simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions make the faster more sophisticated trace function not + operate properly. + + If `branch` is true, then branches will be measured. This involves + collecting data on which statements followed each other (arcs). Use + `get_arc_data` to get the arc data. + + `warn` is a warning function, taking a single string message argument, + to be used if a warning needs to be issued. + + """ + self.should_trace = should_trace + self.warn = warn + self.branch = branch + self.reset() + + if timid: + # Being timid: use the simple Python trace function. + self._trace_class = PyTracer + else: + # Being fast: use the C Tracer if it is available, else the Python + # trace function. + self._trace_class = CTracer or PyTracer + + def __repr__(self): + return "<Collector at 0x%x>" % id(self) + + def tracer_name(self): + """Return the class name of the tracer we're using.""" + return self._trace_class.__name__ + + def reset(self): + """Clear collected data, and prepare to collect more.""" + # A dictionary mapping filenames to dicts with linenumber keys, + # or mapping filenames to dicts with linenumber pairs as keys. + self.data = {} + + # A cache of the results from should_trace, the decision about whether + # to trace execution in a file. A dict of filename to (filename or + # None). + self.should_trace_cache = {} + + # Our active Tracers. + self.tracers = [] + + def _start_tracer(self): + """Start a new Tracer object, and store it in self.tracers.""" + tracer = self._trace_class() + tracer.data = self.data + tracer.arcs = self.branch + tracer.should_trace = self.should_trace + tracer.should_trace_cache = self.should_trace_cache + tracer.warn = self.warn + fn = tracer.start() + self.tracers.append(tracer) + return fn + + # The trace function has to be set individually on each thread before + # execution begins. Ironically, the only support the threading module has + # for running code before the thread main is the tracing function. So we + # install this as a trace function, and the first time it's called, it does + # the real trace installation. + + def _installation_trace(self, frame_unused, event_unused, arg_unused): + """Called on new threads, installs the real tracer.""" + # Remove ourselves as the trace function + sys.settrace(None) + # Install the real tracer. + fn = self._start_tracer() + # Invoke the real trace function with the current event, to be sure + # not to lose an event. + if fn: + fn = fn(frame_unused, event_unused, arg_unused) + # Return the new trace function to continue tracing in this scope. + return fn + + def start(self): + """Start collecting trace information.""" + if self._collectors: + self._collectors[-1].pause() + self._collectors.append(self) + #print("Started: %r" % self._collectors, file=sys.stderr) + + # Check to see whether we had a fullcoverage tracer installed. + traces0 = [] + if hasattr(sys, "gettrace"): + fn0 = sys.gettrace() + if fn0: + tracer0 = getattr(fn0, '__self__', None) + if tracer0: + traces0 = getattr(tracer0, 'traces', []) + + # Install the tracer on this thread. + fn = self._start_tracer() + + for args in traces0: + (frame, event, arg), lineno = args + try: + fn(frame, event, arg, lineno=lineno) + except TypeError: + raise Exception( + "fullcoverage must be run with the C trace function." + ) + + # Install our installation tracer in threading, to jump start other + # threads. + threading.settrace(self._installation_trace) + + def stop(self): + """Stop collecting trace information.""" + #print >>sys.stderr, "Stopping: %r" % self._collectors + assert self._collectors + assert self._collectors[-1] is self + + self.pause() + self.tracers = [] + + # Remove this Collector from the stack, and resume the one underneath + # (if any). + self._collectors.pop() + if self._collectors: + self._collectors[-1].resume() + + def pause(self): + """Pause tracing, but be prepared to `resume`.""" + for tracer in self.tracers: + tracer.stop() + stats = tracer.get_stats() + if stats: + print("\nCoverage.py tracer stats:") + for k in sorted(stats.keys()): + print("%16s: %s" % (k, stats[k])) + threading.settrace(None) + + def resume(self): + """Resume tracing after a `pause`.""" + for tracer in self.tracers: + tracer.start() + threading.settrace(self._installation_trace) + + def get_line_data(self): + """Return the line data collected. + + Data is { filename: { lineno: None, ...}, ...} + + """ + if self.branch: + # If we were measuring branches, then we have to re-build the dict + # to show line data. + line_data = {} + for f, arcs in self.data.items(): + line_data[f] = ldf = {} + for l1, _ in list(arcs.keys()): + if l1: + ldf[l1] = None + return line_data + else: + return self.data + + def get_arc_data(self): + """Return the arc data collected. + + Data is { filename: { (l1, l2): None, ...}, ...} + + Note that no data is collected or returned if the Collector wasn't + created with `branch` true. + + """ + if self.branch: + return self.data + else: + return {} diff --git a/third_party/pycoverage/coverage/config.py b/third_party/pycoverage/coverage/config.py new file mode 100644 index 0000000..87318ff --- /dev/null +++ b/third_party/pycoverage/coverage/config.py @@ -0,0 +1,213 @@ +"""Config file for coverage.py""" + +import os, re, sys +from coverage.backward import string_class, iitems + +# In py3, # ConfigParser was renamed to the more-standard configparser +try: + import configparser # pylint: disable=F0401 +except ImportError: + import ConfigParser as configparser + + +class HandyConfigParser(configparser.RawConfigParser): + """Our specialization of ConfigParser.""" + + def read(self, filename): + """Read a filename as UTF-8 configuration data.""" + kwargs = {} + if sys.version_info >= (3, 2): + kwargs['encoding'] = "utf-8" + return configparser.RawConfigParser.read(self, filename, **kwargs) + + def get(self, *args, **kwargs): + v = configparser.RawConfigParser.get(self, *args, **kwargs) + def dollar_replace(m): + """Called for each $replacement.""" + # Only one of the groups will have matched, just get its text. + word = [w for w in m.groups() if w is not None][0] + if word == "$": + return "$" + else: + return os.environ.get(word, '') + + dollar_pattern = r"""(?x) # Use extended regex syntax + \$(?: # A dollar sign, then + (?P<v1>\w+) | # a plain word, + {(?P<v2>\w+)} | # or a {-wrapped word, + (?P<char>[$]) # or a dollar sign. + ) + """ + v = re.sub(dollar_pattern, dollar_replace, v) + return v + + def getlist(self, section, option): + """Read a list of strings. + + The value of `section` and `option` is treated as a comma- and newline- + separated list of strings. Each value is stripped of whitespace. + + Returns the list of strings. + + """ + value_list = self.get(section, option) + values = [] + for value_line in value_list.split('\n'): + for value in value_line.split(','): + value = value.strip() + if value: + values.append(value) + return values + + def getlinelist(self, section, option): + """Read a list of full-line strings. + + The value of `section` and `option` is treated as a newline-separated + list of strings. Each value is stripped of whitespace. + + Returns the list of strings. + + """ + value_list = self.get(section, option) + return list(filter(None, value_list.split('\n'))) + + +# The default line exclusion regexes +DEFAULT_EXCLUDE = [ + '(?i)# *pragma[: ]*no *cover', + ] + +# The default partial branch regexes, to be modified by the user. +DEFAULT_PARTIAL = [ + '(?i)# *pragma[: ]*no *branch', + ] + +# The default partial branch regexes, based on Python semantics. +# These are any Python branching constructs that can't actually execute all +# their branches. +DEFAULT_PARTIAL_ALWAYS = [ + 'while (True|1|False|0):', + 'if (True|1|False|0):', + ] + + +class CoverageConfig(object): + """Coverage.py configuration. + + The attributes of this class are the various settings that control the + operation of coverage.py. + + """ + def __init__(self): + """Initialize the configuration attributes to their defaults.""" + # Metadata about the config. + self.attempted_config_files = [] + self.config_files = [] + + # Defaults for [run] + self.branch = False + self.cover_pylib = False + self.data_file = ".coverage" + self.parallel = False + self.timid = False + self.source = None + self.debug = [] + + # Defaults for [report] + self.exclude_list = DEFAULT_EXCLUDE[:] + self.ignore_errors = False + self.include = None + self.omit = None + self.partial_list = DEFAULT_PARTIAL[:] + self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] + self.precision = 0 + self.show_missing = False + + # Defaults for [html] + self.html_dir = "htmlcov" + self.extra_css = None + self.html_title = "Coverage report" + + # Defaults for [xml] + self.xml_output = "coverage.xml" + + # Defaults for [paths] + self.paths = {} + + def from_environment(self, env_var): + """Read configuration from the `env_var` environment variable.""" + # Timidity: for nose users, read an environment variable. This is a + # cheap hack, since the rest of the command line arguments aren't + # recognized, but it solves some users' problems. + env = os.environ.get(env_var, '') + if env: + self.timid = ('--timid' in env) + + MUST_BE_LIST = ["omit", "include", "debug"] + + def from_args(self, **kwargs): + """Read config values from `kwargs`.""" + for k, v in iitems(kwargs): + if v is not None: + if k in self.MUST_BE_LIST and isinstance(v, string_class): + v = [v] + setattr(self, k, v) + + def from_file(self, filename): + """Read configuration from a .rc file. + + `filename` is a file name to read. + + """ + self.attempted_config_files.append(filename) + + cp = HandyConfigParser() + files_read = cp.read(filename) + if files_read is not None: # return value changed in 2.4 + self.config_files.extend(files_read) + + for option_spec in self.CONFIG_FILE_OPTIONS: + self.set_attr_from_config_option(cp, *option_spec) + + # [paths] is special + if cp.has_section('paths'): + for option in cp.options('paths'): + self.paths[option] = cp.getlist('paths', option) + + CONFIG_FILE_OPTIONS = [ + # [run] + ('branch', 'run:branch', 'boolean'), + ('cover_pylib', 'run:cover_pylib', 'boolean'), + ('data_file', 'run:data_file'), + ('debug', 'run:debug', 'list'), + ('include', 'run:include', 'list'), + ('omit', 'run:omit', 'list'), + ('parallel', 'run:parallel', 'boolean'), + ('source', 'run:source', 'list'), + ('timid', 'run:timid', 'boolean'), + + # [report] + ('exclude_list', 'report:exclude_lines', 'linelist'), + ('ignore_errors', 'report:ignore_errors', 'boolean'), + ('include', 'report:include', 'list'), + ('omit', 'report:omit', 'list'), + ('partial_list', 'report:partial_branches', 'linelist'), + ('partial_always_list', 'report:partial_branches_always', 'linelist'), + ('precision', 'report:precision', 'int'), + ('show_missing', 'report:show_missing', 'boolean'), + + # [html] + ('html_dir', 'html:directory'), + ('extra_css', 'html:extra_css'), + ('html_title', 'html:title'), + + # [xml] + ('xml_output', 'xml:output'), + ] + + def set_attr_from_config_option(self, cp, attr, where, type_=''): + """Set an attribute on self if it exists in the ConfigParser.""" + section, option = where.split(":") + if cp.has_option(section, option): + method = getattr(cp, 'get'+type_) + setattr(self, attr, method(section, option)) diff --git a/third_party/pycoverage/coverage/control.py b/third_party/pycoverage/coverage/control.py new file mode 100644 index 0000000..f75a3dd --- /dev/null +++ b/third_party/pycoverage/coverage/control.py @@ -0,0 +1,779 @@ +"""Core control stuff for Coverage.""" + +import atexit, os, random, socket, sys + +from coverage.annotate import AnnotateReporter +from coverage.backward import string_class, iitems, sorted # pylint: disable=W0622 +from coverage.codeunit import code_unit_factory, CodeUnit +from coverage.collector import Collector +from coverage.config import CoverageConfig +from coverage.data import CoverageData +from coverage.debug import DebugControl +from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher +from coverage.files import PathAliases, find_python_files, prep_patterns +from coverage.html import HtmlReporter +from coverage.misc import CoverageException, bool_or_none, join_regex +from coverage.misc import file_be_gone +from coverage.results import Analysis, Numbers +from coverage.summary import SummaryReporter +from coverage.xmlreport import XmlReporter + +# Pypy has some unusual stuff in the "stdlib". Consider those locations +# when deciding where the stdlib is. +try: + import _structseq # pylint: disable=F0401 +except ImportError: + _structseq = None + + +class coverage(object): + """Programmatic access to coverage.py. + + To use:: + + from coverage import coverage + + cov = coverage() + cov.start() + #.. call your code .. + cov.stop() + cov.html_report(directory='covhtml') + + """ + def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, + auto_data=False, timid=None, branch=None, config_file=True, + source=None, omit=None, include=None, debug=None, + debug_file=None): + """ + `data_file` is the base name of the data file to use, defaulting to + ".coverage". `data_suffix` is appended (with a dot) to `data_file` to + create the final file name. If `data_suffix` is simply True, then a + suffix is created with the machine and process identity included. + + `cover_pylib` is a boolean determining whether Python code installed + with the Python interpreter is measured. This includes the Python + standard library and any packages installed with the interpreter. + + If `auto_data` is true, then any existing data file will be read when + coverage measurement starts, and data will be saved automatically when + measurement stops. + + If `timid` is true, then a slower and simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions breaks the faster trace function. + + If `branch` is true, then branch coverage will be measured in addition + to the usual statement coverage. + + `config_file` determines what config file to read. If it is a string, + it is the name of the config file to read. If it is True, then a + standard file is read (".coveragerc"). If it is False, then no file is + read. + + `source` is a list of file paths or package names. Only code located + in the trees indicated by the file paths or package names will be + measured. + + `include` and `omit` are lists of filename patterns. Files that match + `include` will be measured, files that match `omit` will not. Each + will also accept a single string argument. + + `debug` is a list of strings indicating what debugging information is + desired. `debug_file` is the file to write debug messages to, + defaulting to stderr. + + """ + from coverage import __version__ + + # A record of all the warnings that have been issued. + self._warnings = [] + + # Build our configuration from a number of sources: + # 1: defaults: + self.config = CoverageConfig() + + # 2: from the coveragerc file: + if config_file: + if config_file is True: + config_file = ".coveragerc" + try: + self.config.from_file(config_file) + except ValueError: + _, err, _ = sys.exc_info() + raise CoverageException( + "Couldn't read config file %s: %s" % (config_file, err) + ) + + # 3: from environment variables: + self.config.from_environment('COVERAGE_OPTIONS') + env_data_file = os.environ.get('COVERAGE_FILE') + if env_data_file: + self.config.data_file = env_data_file + + # 4: from constructor arguments: + self.config.from_args( + data_file=data_file, cover_pylib=cover_pylib, timid=timid, + branch=branch, parallel=bool_or_none(data_suffix), + source=source, omit=omit, include=include, debug=debug, + ) + + # Create and configure the debugging controller. + self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) + + self.auto_data = auto_data + + # _exclude_re is a dict mapping exclusion list names to compiled + # regexes. + self._exclude_re = {} + self._exclude_regex_stale() + + self.file_locator = FileLocator() + + # The source argument can be directories or package names. + self.source = [] + self.source_pkgs = [] + for src in self.config.source or []: + if os.path.exists(src): + self.source.append(self.file_locator.canonical_filename(src)) + else: + self.source_pkgs.append(src) + + self.omit = prep_patterns(self.config.omit) + self.include = prep_patterns(self.config.include) + + self.collector = Collector( + self._should_trace, timid=self.config.timid, + branch=self.config.branch, warn=self._warn + ) + + # Suffixes are a bit tricky. We want to use the data suffix only when + # collecting data, not when combining data. So we save it as + # `self.run_suffix` now, and promote it to `self.data_suffix` if we + # find that we are collecting data later. + if data_suffix or self.config.parallel: + if not isinstance(data_suffix, string_class): + # if data_suffix=True, use .machinename.pid.random + data_suffix = True + else: + data_suffix = None + self.data_suffix = None + self.run_suffix = data_suffix + + # Create the data file. We do this at construction time so that the + # data file will be written into the directory where the process + # started rather than wherever the process eventually chdir'd to. + self.data = CoverageData( + basename=self.config.data_file, + collector="coverage v%s" % __version__, + debug=self.debug, + ) + + # The dirs for files considered "installed with the interpreter". + self.pylib_dirs = [] + if not self.config.cover_pylib: + # Look at where some standard modules are located. That's the + # indication for "installed with the interpreter". In some + # environments (virtualenv, for example), these modules may be + # spread across a few locations. Look at all the candidate modules + # we've imported, and take all the different ones. + for m in (atexit, os, random, socket, _structseq): + if m is not None and hasattr(m, "__file__"): + m_dir = self._canonical_dir(m) + if m_dir not in self.pylib_dirs: + self.pylib_dirs.append(m_dir) + + # To avoid tracing the coverage code itself, we skip anything located + # where we are. + self.cover_dir = self._canonical_dir(__file__) + + # The matchers for _should_trace. + self.source_match = None + self.pylib_match = self.cover_match = None + self.include_match = self.omit_match = None + + # Set the reporting precision. + Numbers.set_precision(self.config.precision) + + # Is it ok for no data to be collected? + self._warn_no_data = True + self._warn_unimported_source = True + + # State machine variables: + # Have we started collecting and not stopped it? + self._started = False + # Have we measured some data and not harvested it? + self._measured = False + + atexit.register(self._atexit) + + def _canonical_dir(self, morf): + """Return the canonical directory of the module or file `morf`.""" + return os.path.split(CodeUnit(morf, self.file_locator).filename)[0] + + def _source_for_file(self, filename): + """Return the source file for `filename`.""" + if not filename.endswith(".py"): + if filename[-4:-1] == ".py": + filename = filename[:-1] + elif filename.endswith("$py.class"): # jython + filename = filename[:-9] + ".py" + return filename + + def _should_trace_with_reason(self, filename, frame): + """Decide whether to trace execution in `filename`, with a reason. + + This function is called from the trace function. As each new file name + is encountered, this function determines whether it is traced or not. + + Returns a pair of values: the first indicates whether the file should + be traced: it's a canonicalized filename if it should be traced, None + if it should not. The second value is a string, the resason for the + decision. + + """ + if not filename: + # Empty string is pretty useless + return None, "empty string isn't a filename" + + if filename.startswith('<'): + # Lots of non-file execution is represented with artificial + # filenames like "<string>", "<doctest readme.txt[0]>", or + # "<exec_function>". Don't ever trace these executions, since we + # can't do anything with the data later anyway. + return None, "not a real filename" + + self._check_for_packages() + + # Compiled Python files have two filenames: frame.f_code.co_filename is + # the filename at the time the .pyc was compiled. The second name is + # __file__, which is where the .pyc was actually loaded from. Since + # .pyc files can be moved after compilation (for example, by being + # installed), we look for __file__ in the frame and prefer it to the + # co_filename value. + dunder_file = frame.f_globals.get('__file__') + if dunder_file: + filename = self._source_for_file(dunder_file) + + # Jython reports the .class file to the tracer, use the source file. + if filename.endswith("$py.class"): + filename = filename[:-9] + ".py" + + canonical = self.file_locator.canonical_filename(filename) + + # If the user specified source or include, then that's authoritative + # about the outer bound of what to measure and we don't have to apply + # any canned exclusions. If they didn't, then we have to exclude the + # stdlib and coverage.py directories. + if self.source_match: + if not self.source_match.match(canonical): + return None, "falls outside the --source trees" + elif self.include_match: + if not self.include_match.match(canonical): + return None, "falls outside the --include trees" + else: + # If we aren't supposed to trace installed code, then check if this + # is near the Python standard library and skip it if so. + if self.pylib_match and self.pylib_match.match(canonical): + return None, "is in the stdlib" + + # We exclude the coverage code itself, since a little of it will be + # measured otherwise. + if self.cover_match and self.cover_match.match(canonical): + return None, "is part of coverage.py" + + # Check the file against the omit pattern. + if self.omit_match and self.omit_match.match(canonical): + return None, "is inside an --omit pattern" + + return canonical, "because we love you" + + def _should_trace(self, filename, frame): + """Decide whether to trace execution in `filename`. + + Calls `_should_trace_with_reason`, and returns just the decision. + + """ + canonical, reason = self._should_trace_with_reason(filename, frame) + if self.debug.should('trace'): + if not canonical: + msg = "Not tracing %r: %s" % (filename, reason) + else: + msg = "Tracing %r" % (filename,) + self.debug.write(msg) + return canonical + + def _warn(self, msg): + """Use `msg` as a warning.""" + self._warnings.append(msg) + sys.stderr.write("Coverage.py warning: %s\n" % msg) + + def _check_for_packages(self): + """Update the source_match matcher with latest imported packages.""" + # Our self.source_pkgs attribute is a list of package names we want to + # measure. Each time through here, we see if we've imported any of + # them yet. If so, we add its file to source_match, and we don't have + # to look for that package any more. + if self.source_pkgs: + found = [] + for pkg in self.source_pkgs: + try: + mod = sys.modules[pkg] + except KeyError: + continue + + found.append(pkg) + + try: + pkg_file = mod.__file__ + except AttributeError: + pkg_file = None + else: + d, f = os.path.split(pkg_file) + if f.startswith('__init__'): + # This is actually a package, return the directory. + pkg_file = d + else: + pkg_file = self._source_for_file(pkg_file) + pkg_file = self.file_locator.canonical_filename(pkg_file) + if not os.path.exists(pkg_file): + pkg_file = None + + if pkg_file: + self.source.append(pkg_file) + self.source_match.add(pkg_file) + else: + self._warn("Module %s has no Python source." % pkg) + + for pkg in found: + self.source_pkgs.remove(pkg) + + def use_cache(self, usecache): + """Control the use of a data file (incorrectly called a cache). + + `usecache` is true or false, whether to read and write data on disk. + + """ + self.data.usefile(usecache) + + def load(self): + """Load previously-collected coverage data from the data file.""" + self.collector.reset() + self.data.read() + + def start(self): + """Start measuring code coverage. + + Coverage measurement actually occurs in functions called after `start` + is invoked. Statements in the same scope as `start` won't be measured. + + Once you invoke `start`, you must also call `stop` eventually, or your + process might not shut down cleanly. + + """ + if self.run_suffix: + # Calling start() means we're running code, so use the run_suffix + # as the data_suffix when we eventually save the data. + self.data_suffix = self.run_suffix + if self.auto_data: + self.load() + + # Create the matchers we need for _should_trace + if self.source or self.source_pkgs: + self.source_match = TreeMatcher(self.source) + else: + if self.cover_dir: + self.cover_match = TreeMatcher([self.cover_dir]) + if self.pylib_dirs: + self.pylib_match = TreeMatcher(self.pylib_dirs) + if self.include: + self.include_match = FnmatchMatcher(self.include) + if self.omit: + self.omit_match = FnmatchMatcher(self.omit) + + # The user may want to debug things, show info if desired. + if self.debug.should('config'): + self.debug.write("Configuration values:") + config_info = sorted(self.config.__dict__.items()) + self.debug.write_formatted_info(config_info) + + if self.debug.should('sys'): + self.debug.write("Debugging info:") + self.debug.write_formatted_info(self.sysinfo()) + + self.collector.start() + self._started = True + self._measured = True + + def stop(self): + """Stop measuring code coverage.""" + self._started = False + self.collector.stop() + + def _atexit(self): + """Clean up on process shutdown.""" + if self._started: + self.stop() + if self.auto_data: + self.save() + + def erase(self): + """Erase previously-collected coverage data. + + This removes the in-memory data collected in this session as well as + discarding the data file. + + """ + self.collector.reset() + self.data.erase() + + def clear_exclude(self, which='exclude'): + """Clear the exclude list.""" + setattr(self.config, which + "_list", []) + self._exclude_regex_stale() + + def exclude(self, regex, which='exclude'): + """Exclude source lines from execution consideration. + + A number of lists of regular expressions are maintained. Each list + selects lines that are treated differently during reporting. + + `which` determines which list is modified. The "exclude" list selects + lines that are not considered executable at all. The "partial" list + indicates lines with branches that are not taken. + + `regex` is a regular expression. The regex is added to the specified + list. If any of the regexes in the list is found in a line, the line + is marked for special treatment during reporting. + + """ + excl_list = getattr(self.config, which + "_list") + excl_list.append(regex) + self._exclude_regex_stale() + + def _exclude_regex_stale(self): + """Drop all the compiled exclusion regexes, a list was modified.""" + self._exclude_re.clear() + + def _exclude_regex(self, which): + """Return a compiled regex for the given exclusion list.""" + if which not in self._exclude_re: + excl_list = getattr(self.config, which + "_list") + self._exclude_re[which] = join_regex(excl_list) + return self._exclude_re[which] + + def get_exclude_list(self, which='exclude'): + """Return a list of excluded regex patterns. + + `which` indicates which list is desired. See `exclude` for the lists + that are available, and their meaning. + + """ + return getattr(self.config, which + "_list") + + def save(self): + """Save the collected coverage data to the data file.""" + data_suffix = self.data_suffix + if data_suffix is True: + # If data_suffix was a simple true value, then make a suffix with + # plenty of distinguishing information. We do this here in + # `save()` at the last minute so that the pid will be correct even + # if the process forks. + extra = "" + if _TEST_NAME_FILE: + f = open(_TEST_NAME_FILE) + test_name = f.read() + f.close() + extra = "." + test_name + data_suffix = "%s%s.%s.%06d" % ( + socket.gethostname(), extra, os.getpid(), + random.randint(0, 999999) + ) + + self._harvest_data() + self.data.write(suffix=data_suffix) + + def combine(self): + """Combine together a number of similarly-named coverage data files. + + All coverage data files whose name starts with `data_file` (from the + coverage() constructor) will be read, and combined together into the + current measurements. + + """ + aliases = None + if self.config.paths: + aliases = PathAliases(self.file_locator) + for paths in self.config.paths.values(): + result = paths[0] + for pattern in paths[1:]: + aliases.add(pattern, result) + self.data.combine_parallel_data(aliases=aliases) + + def _harvest_data(self): + """Get the collected data and reset the collector. + + Also warn about various problems collecting data. + + """ + if not self._measured: + return + + self.data.add_line_data(self.collector.get_line_data()) + self.data.add_arc_data(self.collector.get_arc_data()) + self.collector.reset() + + # If there are still entries in the source_pkgs list, then we never + # encountered those packages. + if self._warn_unimported_source: + for pkg in self.source_pkgs: + self._warn("Module %s was never imported." % pkg) + + # Find out if we got any data. + summary = self.data.summary() + if not summary and self._warn_no_data: + self._warn("No data was collected.") + + # Find files that were never executed at all. + for src in self.source: + for py_file in find_python_files(src): + py_file = self.file_locator.canonical_filename(py_file) + + if self.omit_match and self.omit_match.match(py_file): + # Turns out this file was omitted, so don't pull it back + # in as unexecuted. + continue + + self.data.touch_file(py_file) + + self._measured = False + + # Backward compatibility with version 1. + def analysis(self, morf): + """Like `analysis2` but doesn't return excluded line numbers.""" + f, s, _, m, mf = self.analysis2(morf) + return f, s, m, mf + + def analysis2(self, morf): + """Analyze a module. + + `morf` is a module or a filename. It will be analyzed to determine + its coverage statistics. The return value is a 5-tuple: + + * The filename for the module. + * A list of line numbers of executable statements. + * A list of line numbers of excluded statements. + * A list of line numbers of statements not run (missing from + execution). + * A readable formatted string of the missing line numbers. + + The analysis uses the source file itself and the current measured + coverage data. + + """ + analysis = self._analyze(morf) + return ( + analysis.filename, + sorted(analysis.statements), + sorted(analysis.excluded), + sorted(analysis.missing), + analysis.missing_formatted(), + ) + + def _analyze(self, it): + """Analyze a single morf or code unit. + + Returns an `Analysis` object. + + """ + self._harvest_data() + if not isinstance(it, CodeUnit): + it = code_unit_factory(it, self.file_locator)[0] + + return Analysis(self, it) + + def report(self, morfs=None, show_missing=True, ignore_errors=None, + file=None, # pylint: disable=W0622 + omit=None, include=None + ): + """Write a summary report to `file`. + + Each module in `morfs` is listed, with counts of statements, executed + statements, missing statements, and a list of lines missed. + + `include` is a list of filename patterns. Modules whose filenames + match those patterns will be included in the report. Modules matching + `omit` will not be included in the report. + + Returns a float, the total percentage covered. + + """ + self._harvest_data() + self.config.from_args( + ignore_errors=ignore_errors, omit=omit, include=include, + show_missing=show_missing, + ) + reporter = SummaryReporter(self, self.config) + return reporter.report(morfs, outfile=file) + + def annotate(self, morfs=None, directory=None, ignore_errors=None, + omit=None, include=None): + """Annotate a list of modules. + + Each module in `morfs` is annotated. The source is written to a new + file, named with a ",cover" suffix, with each line prefixed with a + marker to indicate the coverage of the line. Covered lines have ">", + excluded lines have "-", and missing lines have "!". + + See `coverage.report()` for other arguments. + + """ + self._harvest_data() + self.config.from_args( + ignore_errors=ignore_errors, omit=omit, include=include + ) + reporter = AnnotateReporter(self, self.config) + reporter.report(morfs, directory=directory) + + def html_report(self, morfs=None, directory=None, ignore_errors=None, + omit=None, include=None, extra_css=None, title=None): + """Generate an HTML report. + + The HTML is written to `directory`. The file "index.html" is the + overview starting point, with links to more detailed pages for + individual modules. + + `extra_css` is a path to a file of other CSS to apply on the page. + It will be copied into the HTML directory. + + `title` is a text string (not HTML) to use as the title of the HTML + report. + + See `coverage.report()` for other arguments. + + Returns a float, the total percentage covered. + + """ + self._harvest_data() + self.config.from_args( + ignore_errors=ignore_errors, omit=omit, include=include, + html_dir=directory, extra_css=extra_css, html_title=title, + ) + reporter = HtmlReporter(self, self.config) + return reporter.report(morfs) + + def xml_report(self, morfs=None, outfile=None, ignore_errors=None, + omit=None, include=None): + """Generate an XML report of coverage results. + + The report is compatible with Cobertura reports. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + See `coverage.report()` for other arguments. + + Returns a float, the total percentage covered. + + """ + self._harvest_data() + self.config.from_args( + ignore_errors=ignore_errors, omit=omit, include=include, + xml_output=outfile, + ) + file_to_close = None + delete_file = False + if self.config.xml_output: + if self.config.xml_output == '-': + outfile = sys.stdout + else: + outfile = open(self.config.xml_output, "w") + file_to_close = outfile + try: + try: + reporter = XmlReporter(self, self.config) + return reporter.report(morfs, outfile=outfile) + except CoverageException: + delete_file = True + raise + finally: + if file_to_close: + file_to_close.close() + if delete_file: + file_be_gone(self.config.xml_output) + + def sysinfo(self): + """Return a list of (key, value) pairs showing internal information.""" + + import coverage as covmod + import platform, re + + try: + implementation = platform.python_implementation() + except AttributeError: + implementation = "unknown" + + info = [ + ('version', covmod.__version__), + ('coverage', covmod.__file__), + ('cover_dir', self.cover_dir), + ('pylib_dirs', self.pylib_dirs), + ('tracer', self.collector.tracer_name()), + ('config_files', self.config.attempted_config_files), + ('configs_read', self.config.config_files), + ('data_path', self.data.filename), + ('python', sys.version.replace('\n', '')), + ('platform', platform.platform()), + ('implementation', implementation), + ('executable', sys.executable), + ('cwd', os.getcwd()), + ('path', sys.path), + ('environment', sorted([ + ("%s = %s" % (k, v)) for k, v in iitems(os.environ) + if re.search(r"^COV|^PY", k) + ])), + ('command_line', " ".join(getattr(sys, 'argv', ['???']))), + ] + if self.source_match: + info.append(('source_match', self.source_match.info())) + if self.include_match: + info.append(('include_match', self.include_match.info())) + if self.omit_match: + info.append(('omit_match', self.omit_match.info())) + if self.cover_match: + info.append(('cover_match', self.cover_match.info())) + if self.pylib_match: + info.append(('pylib_match', self.pylib_match.info())) + + return info + + +def process_startup(): + """Call this at Python startup to perhaps measure coverage. + + If the environment variable COVERAGE_PROCESS_START is defined, coverage + measurement is started. The value of the variable is the config file + to use. + + There are two ways to configure your Python installation to invoke this + function when Python starts: + + #. Create or append to sitecustomize.py to add these lines:: + + import coverage + coverage.process_startup() + + #. Create a .pth file in your Python installation containing:: + + import coverage; coverage.process_startup() + + """ + cps = os.environ.get("COVERAGE_PROCESS_START") + if cps: + cov = coverage(config_file=cps, auto_data=True) + cov.start() + cov._warn_no_data = False + cov._warn_unimported_source = False + + +# A hack for debugging testing in subprocesses. +_TEST_NAME_FILE = "" #"/tmp/covtest.txt" diff --git a/third_party/pycoverage/coverage/data.py b/third_party/pycoverage/coverage/data.py new file mode 100644 index 0000000..fb88c5b --- /dev/null +++ b/third_party/pycoverage/coverage/data.py @@ -0,0 +1,278 @@ +"""Coverage data for Coverage.""" + +import os + +from coverage.backward import iitems, pickle, sorted # pylint: disable=W0622 +from coverage.files import PathAliases +from coverage.misc import file_be_gone + + +class CoverageData(object): + """Manages collected coverage data, including file storage. + + The data file format is a pickled dict, with these keys: + + * collector: a string identifying the collecting software + + * lines: a dict mapping filenames to sorted lists of line numbers + executed: + { 'file1': [17,23,45], 'file2': [1,2,3], ... } + + * arcs: a dict mapping filenames to sorted lists of line number pairs: + { 'file1': [(17,23), (17,25), (25,26)], ... } + + """ + + def __init__(self, basename=None, collector=None, debug=None): + """Create a CoverageData. + + `basename` is the name of the file to use for storing data. + + `collector` is a string describing the coverage measurement software. + + `debug` is a `DebugControl` object for writing debug messages. + + """ + self.collector = collector or 'unknown' + self.debug = debug + + self.use_file = True + + # Construct the filename that will be used for data file storage, if we + # ever do any file storage. + self.filename = basename or ".coverage" + self.filename = os.path.abspath(self.filename) + + # A map from canonical Python source file name to a dictionary in + # which there's an entry for each line number that has been + # executed: + # + # { + # 'filename1.py': { 12: None, 47: None, ... }, + # ... + # } + # + self.lines = {} + + # A map from canonical Python source file name to a dictionary with an + # entry for each pair of line numbers forming an arc: + # + # { + # 'filename1.py': { (12,14): None, (47,48): None, ... }, + # ... + # } + # + self.arcs = {} + + def usefile(self, use_file=True): + """Set whether or not to use a disk file for data.""" + self.use_file = use_file + + def read(self): + """Read coverage data from the coverage data file (if it exists).""" + if self.use_file: + self.lines, self.arcs = self._read_file(self.filename) + else: + self.lines, self.arcs = {}, {} + + def write(self, suffix=None): + """Write the collected coverage data to a file. + + `suffix` is a suffix to append to the base file name. This can be used + for multiple or parallel execution, so that many coverage data files + can exist simultaneously. A dot will be used to join the base name and + the suffix. + + """ + if self.use_file: + filename = self.filename + if suffix: + filename += "." + suffix + self.write_file(filename) + + def erase(self): + """Erase the data, both in this object, and from its file storage.""" + if self.use_file: + if self.filename: + file_be_gone(self.filename) + self.lines = {} + self.arcs = {} + + def line_data(self): + """Return the map from filenames to lists of line numbers executed.""" + return dict( + [(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)] + ) + + def arc_data(self): + """Return the map from filenames to lists of line number pairs.""" + return dict( + [(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)] + ) + + def write_file(self, filename): + """Write the coverage data to `filename`.""" + + # Create the file data. + data = {} + + data['lines'] = self.line_data() + arcs = self.arc_data() + if arcs: + data['arcs'] = arcs + + if self.collector: + data['collector'] = self.collector + + if self.debug and self.debug.should('dataio'): + self.debug.write("Writing data to %r" % (filename,)) + + # Write the pickle to the file. + fdata = open(filename, 'wb') + try: + pickle.dump(data, fdata, 2) + finally: + fdata.close() + + def read_file(self, filename): + """Read the coverage data from `filename`.""" + self.lines, self.arcs = self._read_file(filename) + + def raw_data(self, filename): + """Return the raw pickled data from `filename`.""" + if self.debug and self.debug.should('dataio'): + self.debug.write("Reading data from %r" % (filename,)) + fdata = open(filename, 'rb') + try: + data = pickle.load(fdata) + finally: + fdata.close() + return data + + def _read_file(self, filename): + """Return the stored coverage data from the given file. + + Returns two values, suitable for assigning to `self.lines` and + `self.arcs`. + + """ + lines = {} + arcs = {} + try: + data = self.raw_data(filename) + if isinstance(data, dict): + # Unpack the 'lines' item. + lines = dict([ + (f, dict.fromkeys(linenos, None)) + for f, linenos in iitems(data.get('lines', {})) + ]) + # Unpack the 'arcs' item. + arcs = dict([ + (f, dict.fromkeys(arcpairs, None)) + for f, arcpairs in iitems(data.get('arcs', {})) + ]) + except Exception: + pass + return lines, arcs + + def combine_parallel_data(self, aliases=None): + """Combine a number of data files together. + + Treat `self.filename` as a file prefix, and combine the data from all + of the data files starting with that prefix plus a dot. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. + + """ + aliases = aliases or PathAliases() + data_dir, local = os.path.split(self.filename) + localdot = local + '.' + for f in os.listdir(data_dir or '.'): + if f.startswith(localdot): + full_path = os.path.join(data_dir, f) + new_lines, new_arcs = self._read_file(full_path) + for filename, file_data in iitems(new_lines): + filename = aliases.map(filename) + self.lines.setdefault(filename, {}).update(file_data) + for filename, file_data in iitems(new_arcs): + filename = aliases.map(filename) + self.arcs.setdefault(filename, {}).update(file_data) + if f != local: + os.remove(full_path) + + def add_line_data(self, line_data): + """Add executed line data. + + `line_data` is { filename: { lineno: None, ... }, ...} + + """ + for filename, linenos in iitems(line_data): + self.lines.setdefault(filename, {}).update(linenos) + + def add_arc_data(self, arc_data): + """Add measured arc data. + + `arc_data` is { filename: { (l1,l2): None, ... }, ...} + + """ + for filename, arcs in iitems(arc_data): + self.arcs.setdefault(filename, {}).update(arcs) + + def touch_file(self, filename): + """Ensure that `filename` appears in the data, empty if needed.""" + self.lines.setdefault(filename, {}) + + def measured_files(self): + """A list of all files that had been measured.""" + return list(self.lines.keys()) + + def executed_lines(self, filename): + """A map containing all the line numbers executed in `filename`. + + If `filename` hasn't been collected at all (because it wasn't executed) + then return an empty map. + + """ + return self.lines.get(filename) or {} + + def executed_arcs(self, filename): + """A map containing all the arcs executed in `filename`.""" + return self.arcs.get(filename) or {} + + def add_to_hash(self, filename, hasher): + """Contribute `filename`'s data to the Md5Hash `hasher`.""" + hasher.update(self.executed_lines(filename)) + hasher.update(self.executed_arcs(filename)) + + def summary(self, fullpath=False): + """Return a dict summarizing the coverage data. + + Keys are based on the filenames, and values are the number of executed + lines. If `fullpath` is true, then the keys are the full pathnames of + the files, otherwise they are the basenames of the files. + + """ + summ = {} + if fullpath: + filename_fn = lambda f: f + else: + filename_fn = os.path.basename + for filename, lines in iitems(self.lines): + summ[filename_fn(filename)] = len(lines) + return summ + + def has_arcs(self): + """Does this data have arcs?""" + return bool(self.arcs) + + +if __name__ == '__main__': + # Ad-hoc: show the raw data in a data file. + import pprint, sys + covdata = CoverageData() + if sys.argv[1:]: + fname = sys.argv[1] + else: + fname = covdata.filename + pprint.pprint(covdata.raw_data(fname)) diff --git a/third_party/pycoverage/coverage/debug.py b/third_party/pycoverage/coverage/debug.py new file mode 100644 index 0000000..104f3b1 --- /dev/null +++ b/third_party/pycoverage/coverage/debug.py @@ -0,0 +1,54 @@ +"""Control of and utilities for debugging.""" + +import os + + +# When debugging, it can be helpful to force some options, especially when +# debugging the configuration mechanisms you usually use to control debugging! +# This is a list of forced debugging options. +FORCED_DEBUG = [] + + +class DebugControl(object): + """Control and output for debugging.""" + + def __init__(self, options, output): + """Configure the options and output file for debugging.""" + self.options = options + self.output = output + + def should(self, option): + """Decide whether to output debug information in category `option`.""" + return (option in self.options or option in FORCED_DEBUG) + + def write(self, msg): + """Write a line of debug output.""" + if self.should('pid'): + msg = "pid %5d: %s" % (os.getpid(), msg) + self.output.write(msg+"\n") + self.output.flush() + + def write_formatted_info(self, info): + """Write a sequence of (label,data) pairs nicely.""" + for line in info_formatter(info): + self.write(" %s" % line) + + +def info_formatter(info): + """Produce a sequence of formatted lines from info. + + `info` is a sequence of pairs (label, data). The produced lines are + nicely formatted, ready to print. + + """ + label_len = max([len(l) for l, _d in info]) + for label, data in info: + if data == []: + data = "-none-" + if isinstance(data, (list, tuple)): + prefix = "%*s:" % (label_len, label) + for e in data: + yield "%*s %s" % (label_len+1, prefix, e) + prefix = "" + else: + yield "%*s: %s" % (label_len, label, data) diff --git a/third_party/pycoverage/coverage/execfile.py b/third_party/pycoverage/coverage/execfile.py new file mode 100644 index 0000000..f6ebdf7 --- /dev/null +++ b/third_party/pycoverage/coverage/execfile.py @@ -0,0 +1,171 @@ +"""Execute files of Python code.""" + +import imp, marshal, os, sys + +from coverage.backward import exec_code_object, open_source +from coverage.misc import ExceptionDuringRun, NoCode, NoSource + + +try: + # In Py 2.x, the builtins were in __builtin__ + BUILTINS = sys.modules['__builtin__'] +except KeyError: + # In Py 3.x, they're in builtins + BUILTINS = sys.modules['builtins'] + + +def rsplit1(s, sep): + """The same as s.rsplit(sep, 1), but works in 2.3""" + parts = s.split(sep) + return sep.join(parts[:-1]), parts[-1] + + +def run_python_module(modulename, args): + """Run a python module, as though with ``python -m name args...``. + + `modulename` is the name of the module, possibly a dot-separated name. + `args` is the argument array to present as sys.argv, including the first + element naming the module being executed. + + """ + openfile = None + glo, loc = globals(), locals() + try: + try: + # Search for the module - inside its parent package, if any - using + # standard import mechanics. + if '.' in modulename: + packagename, name = rsplit1(modulename, '.') + package = __import__(packagename, glo, loc, ['__path__']) + searchpath = package.__path__ + else: + packagename, name = None, modulename + searchpath = None # "top-level search" in imp.find_module() + openfile, pathname, _ = imp.find_module(name, searchpath) + + # Complain if this is a magic non-file module. + if openfile is None and pathname is None: + raise NoSource( + "module does not live in a file: %r" % modulename + ) + + # If `modulename` is actually a package, not a mere module, then we + # pretend to be Python 2.7 and try running its __main__.py script. + if openfile is None: + packagename = modulename + name = '__main__' + package = __import__(packagename, glo, loc, ['__path__']) + searchpath = package.__path__ + openfile, pathname, _ = imp.find_module(name, searchpath) + except ImportError: + _, err, _ = sys.exc_info() + raise NoSource(str(err)) + finally: + if openfile: + openfile.close() + + # Finally, hand the file off to run_python_file for execution. + pathname = os.path.abspath(pathname) + args[0] = pathname + run_python_file(pathname, args, package=packagename) + + +def run_python_file(filename, args, package=None): + """Run a python file as if it were the main program on the command line. + + `filename` is the path to the file to execute, it need not be a .py file. + `args` is the argument array to present as sys.argv, including the first + element naming the file being executed. `package` is the name of the + enclosing package, if any. + + """ + # Create a module to serve as __main__ + old_main_mod = sys.modules['__main__'] + main_mod = imp.new_module('__main__') + sys.modules['__main__'] = main_mod + main_mod.__file__ = filename + if package: + main_mod.__package__ = package + main_mod.__builtins__ = BUILTINS + + # Set sys.argv properly. + old_argv = sys.argv + sys.argv = args + + try: + # Make a code object somehow. + if filename.endswith(".pyc") or filename.endswith(".pyo"): + code = make_code_from_pyc(filename) + else: + code = make_code_from_py(filename) + + # Execute the code object. + try: + exec_code_object(code, main_mod.__dict__) + except SystemExit: + # The user called sys.exit(). Just pass it along to the upper + # layers, where it will be handled. + raise + except: + # Something went wrong while executing the user code. + # Get the exc_info, and pack them into an exception that we can + # throw up to the outer loop. We peel two layers off the traceback + # so that the coverage.py code doesn't appear in the final printed + # traceback. + typ, err, tb = sys.exc_info() + raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next) + finally: + # Restore the old __main__ + sys.modules['__main__'] = old_main_mod + + # Restore the old argv and path + sys.argv = old_argv + +def make_code_from_py(filename): + """Get source from `filename` and make a code object of it.""" + # Open the source file. + try: + source_file = open_source(filename) + except IOError: + raise NoSource("No file to run: %r" % filename) + + try: + source = source_file.read() + finally: + source_file.close() + + # We have the source. `compile` still needs the last line to be clean, + # so make sure it is, then compile a code object from it. + if not source or source[-1] != '\n': + source += '\n' + code = compile(source, filename, "exec") + + return code + + +def make_code_from_pyc(filename): + """Get a code object from a .pyc file.""" + try: + fpyc = open(filename, "rb") + except IOError: + raise NoCode("No file to run: %r" % filename) + + try: + # First four bytes are a version-specific magic number. It has to + # match or we won't run the file. + magic = fpyc.read(4) + if magic != imp.get_magic(): + raise NoCode("Bad magic number in .pyc file") + + # Skip the junk in the header that we don't need. + fpyc.read(4) # Skip the moddate. + if sys.version_info >= (3, 3): + # 3.3 added another long to the header (size), skip it. + fpyc.read(4) + + # The rest of the file is the code object we want. + code = marshal.load(fpyc) + finally: + fpyc.close() + + return code diff --git a/third_party/pycoverage/coverage/files.py b/third_party/pycoverage/coverage/files.py new file mode 100644 index 0000000..464535a --- /dev/null +++ b/third_party/pycoverage/coverage/files.py @@ -0,0 +1,309 @@ +"""File wrangling.""" + +from coverage.backward import to_string +from coverage.misc import CoverageException +import fnmatch, os, os.path, re, sys +import ntpath, posixpath + +class FileLocator(object): + """Understand how filenames work.""" + + def __init__(self): + # The absolute path to our current directory. + self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep) + + # Cache of results of calling the canonical_filename() method, to + # avoid duplicating work. + self.canonical_filename_cache = {} + + def relative_filename(self, filename): + """Return the relative form of `filename`. + + The filename will be relative to the current directory when the + `FileLocator` was constructed. + + """ + fnorm = os.path.normcase(filename) + if fnorm.startswith(self.relative_dir): + filename = filename[len(self.relative_dir):] + return filename + + def canonical_filename(self, filename): + """Return a canonical filename for `filename`. + + An absolute path with no redundant components and normalized case. + + """ + if filename not in self.canonical_filename_cache: + if not os.path.isabs(filename): + for path in [os.curdir] + sys.path: + if path is None: + continue + f = os.path.join(path, filename) + if os.path.exists(f): + filename = f + break + cf = abs_file(filename) + self.canonical_filename_cache[filename] = cf + return self.canonical_filename_cache[filename] + + def get_zip_data(self, filename): + """Get data from `filename` if it is a zip file path. + + Returns the string data read from the zip file, or None if no zip file + could be found or `filename` isn't in it. The data returned will be + an empty string if the file is empty. + + """ + import zipimport + markers = ['.zip'+os.sep, '.egg'+os.sep] + for marker in markers: + if marker in filename: + parts = filename.split(marker) + try: + zi = zipimport.zipimporter(parts[0]+marker[:-1]) + except zipimport.ZipImportError: + continue + try: + data = zi.get_data(parts[1]) + except IOError: + continue + return to_string(data) + return None + + +if sys.platform == 'win32': + + def actual_path(path): + """Get the actual path of `path`, including the correct case.""" + if path in actual_path.cache: + return actual_path.cache[path] + + head, tail = os.path.split(path) + if not tail: + actpath = head + elif not head: + actpath = tail + else: + head = actual_path(head) + if head in actual_path.list_cache: + files = actual_path.list_cache[head] + else: + try: + files = os.listdir(head) + except OSError: + files = [] + actual_path.list_cache[head] = files + normtail = os.path.normcase(tail) + for f in files: + if os.path.normcase(f) == normtail: + tail = f + break + actpath = os.path.join(head, tail) + actual_path.cache[path] = actpath + return actpath + + actual_path.cache = {} + actual_path.list_cache = {} + +else: + def actual_path(filename): + """The actual path for non-Windows platforms.""" + return filename + + +def abs_file(filename): + """Return the absolute normalized form of `filename`.""" + path = os.path.expandvars(os.path.expanduser(filename)) + path = os.path.abspath(os.path.realpath(path)) + path = actual_path(path) + return path + + +def isabs_anywhere(filename): + """Is `filename` an absolute path on any OS?""" + return ntpath.isabs(filename) or posixpath.isabs(filename) + + +def prep_patterns(patterns): + """Prepare the file patterns for use in a `FnmatchMatcher`. + + If a pattern starts with a wildcard, it is used as a pattern + as-is. If it does not start with a wildcard, then it is made + absolute with the current directory. + + If `patterns` is None, an empty list is returned. + + """ + prepped = [] + for p in patterns or []: + if p.startswith("*") or p.startswith("?"): + prepped.append(p) + else: + prepped.append(abs_file(p)) + return prepped + + +class TreeMatcher(object): + """A matcher for files in a tree.""" + def __init__(self, directories): + self.dirs = directories[:] + + def __repr__(self): + return "<TreeMatcher %r>" % self.dirs + + def info(self): + """A list of strings for displaying when dumping state.""" + return self.dirs + + def add(self, directory): + """Add another directory to the list we match for.""" + self.dirs.append(directory) + + def match(self, fpath): + """Does `fpath` indicate a file in one of our trees?""" + for d in self.dirs: + if fpath.startswith(d): + if fpath == d: + # This is the same file! + return True + if fpath[len(d)] == os.sep: + # This is a file in the directory + return True + return False + + +class FnmatchMatcher(object): + """A matcher for files by filename pattern.""" + def __init__(self, pats): + self.pats = pats[:] + + def __repr__(self): + return "<FnmatchMatcher %r>" % self.pats + + def info(self): + """A list of strings for displaying when dumping state.""" + return self.pats + + def match(self, fpath): + """Does `fpath` match one of our filename patterns?""" + for pat in self.pats: + if fnmatch.fnmatch(fpath, pat): + return True + return False + + +def sep(s): + """Find the path separator used in this string, or os.sep if none.""" + sep_match = re.search(r"[\\/]", s) + if sep_match: + the_sep = sep_match.group(0) + else: + the_sep = os.sep + return the_sep + + +class PathAliases(object): + """A collection of aliases for paths. + + When combining data files from remote machines, often the paths to source + code are different, for example, due to OS differences, or because of + serialized checkouts on continuous integration machines. + + A `PathAliases` object tracks a list of pattern/result pairs, and can + map a path through those aliases to produce a unified path. + + `locator` is a FileLocator that is used to canonicalize the results. + + """ + def __init__(self, locator=None): + self.aliases = [] + self.locator = locator + + def add(self, pattern, result): + """Add the `pattern`/`result` pair to the list of aliases. + + `pattern` is an `fnmatch`-style pattern. `result` is a simple + string. When mapping paths, if a path starts with a match against + `pattern`, then that match is replaced with `result`. This models + isomorphic source trees being rooted at different places on two + different machines. + + `pattern` can't end with a wildcard component, since that would + match an entire tree, and not just its root. + + """ + # The pattern can't end with a wildcard component. + pattern = pattern.rstrip(r"\/") + if pattern.endswith("*"): + raise CoverageException("Pattern must not end with wildcards.") + pattern_sep = sep(pattern) + + # The pattern is meant to match a filepath. Let's make it absolute + # unless it already is, or is meant to match any prefix. + if not pattern.startswith('*') and not isabs_anywhere(pattern): + pattern = abs_file(pattern) + pattern += pattern_sep + + # Make a regex from the pattern. fnmatch always adds a \Z or $ to + # match the whole string, which we don't want. + regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(') + if regex_pat.endswith("$"): + regex_pat = regex_pat[:-1] + # We want */a/b.py to match on Windows too, so change slash to match + # either separator. + regex_pat = regex_pat.replace(r"\/", r"[\\/]") + # We want case-insensitive matching, so add that flag. + regex = re.compile(r"(?i)" + regex_pat) + + # Normalize the result: it must end with a path separator. + result_sep = sep(result) + result = result.rstrip(r"\/") + result_sep + self.aliases.append((regex, result, pattern_sep, result_sep)) + + def map(self, path): + """Map `path` through the aliases. + + `path` is checked against all of the patterns. The first pattern to + match is used to replace the root of the path with the result root. + Only one pattern is ever used. If no patterns match, `path` is + returned unchanged. + + The separator style in the result is made to match that of the result + in the alias. + + """ + for regex, result, pattern_sep, result_sep in self.aliases: + m = regex.match(path) + if m: + new = path.replace(m.group(0), result) + if pattern_sep != result_sep: + new = new.replace(pattern_sep, result_sep) + if self.locator: + new = self.locator.canonical_filename(new) + return new + return path + + +def find_python_files(dirname): + """Yield all of the importable Python files in `dirname`, recursively. + + To be importable, the files have to be in a directory with a __init__.py, + except for `dirname` itself, which isn't required to have one. The + assumption is that `dirname` was specified directly, so the user knows + best, but subdirectories are checked for a __init__.py to be sure we only + find the importable files. + + """ + for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): + if i > 0 and '__init__.py' not in filenames: + # If a directory doesn't have __init__.py, then it isn't + # importable and neither are its files + del dirnames[:] + continue + for filename in filenames: + # We're only interested in files that look like reasonable Python + # files: Must end with .py or .pyw, and must not have certain funny + # characters that probably mean they are editor junk. + if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): + yield os.path.join(dirpath, filename) diff --git a/third_party/pycoverage/coverage/fullcoverage/encodings.py b/third_party/pycoverage/coverage/fullcoverage/encodings.py new file mode 100644 index 0000000..6a258d6 --- /dev/null +++ b/third_party/pycoverage/coverage/fullcoverage/encodings.py @@ -0,0 +1,57 @@ +"""Imposter encodings module that installs a coverage-style tracer. + +This is NOT the encodings module; it is an imposter that sets up tracing +instrumentation and then replaces itself with the real encodings module. + +If the directory that holds this file is placed first in the PYTHONPATH when +using "coverage" to run Python's tests, then this file will become the very +first module imported by the internals of Python 3. It installs a +coverage-compatible trace function that can watch Standard Library modules +execute from the very earliest stages of Python's own boot process. This fixes +a problem with coverage - that it starts too late to trace the coverage of many +of the most fundamental modules in the Standard Library. + +""" + +import sys + +class FullCoverageTracer(object): + def __init__(self): + # `traces` is a list of trace events. Frames are tricky: the same + # frame object is used for a whole scope, with new line numbers + # written into it. So in one scope, all the frame objects are the + # same object, and will eventually all will point to the last line + # executed. So we keep the line numbers alongside the frames. + # The list looks like: + # + # traces = [ + # ((frame, event, arg), lineno), ... + # ] + # + self.traces = [] + + def fullcoverage_trace(self, *args): + frame, event, arg = args + self.traces.append((args, frame.f_lineno)) + return self.fullcoverage_trace + +sys.settrace(FullCoverageTracer().fullcoverage_trace) + +# In coverage/files.py is actual_filename(), which uses glob.glob. I don't +# understand why, but that use of glob borks everything if fullcoverage is in +# effect. So here we make an ugly hail-mary pass to switch off glob.glob over +# there. This means when using fullcoverage, Windows path names will not be +# their actual case. + +#sys.fullcoverage = True + +# Finally, remove our own directory from sys.path; remove ourselves from +# sys.modules; and re-import "encodings", which will be the real package +# this time. Note that the delete from sys.modules dictionary has to +# happen last, since all of the symbols in this module will become None +# at that exact moment, including "sys". + +parentdir = max(filter(__file__.startswith, sys.path), key=len) +sys.path.remove(parentdir) +del sys.modules['encodings'] +import encodings diff --git a/third_party/pycoverage/coverage/html.py b/third_party/pycoverage/coverage/html.py new file mode 100644 index 0000000..5242236c --- /dev/null +++ b/third_party/pycoverage/coverage/html.py @@ -0,0 +1,396 @@ +"""HTML reporting for Coverage.""" + +import os, re, shutil, sys + +import coverage +from coverage.backward import pickle +from coverage.misc import CoverageException, Hasher +from coverage.phystokens import source_token_lines, source_encoding +from coverage.report import Reporter +from coverage.results import Numbers +from coverage.templite import Templite + + +# Static files are looked for in a list of places. +STATIC_PATH = [ + # The place Debian puts system Javascript libraries. + "/usr/share/javascript", + + # Our htmlfiles directory. + os.path.join(os.path.dirname(__file__), "htmlfiles"), +] + +def data_filename(fname, pkgdir=""): + """Return the path to a data file of ours. + + The file is searched for on `STATIC_PATH`, and the first place it's found, + is returned. + + Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir` + is provided, at that subdirectory. + + """ + for static_dir in STATIC_PATH: + static_filename = os.path.join(static_dir, fname) + if os.path.exists(static_filename): + return static_filename + if pkgdir: + static_filename = os.path.join(static_dir, pkgdir, fname) + if os.path.exists(static_filename): + return static_filename + raise CoverageException("Couldn't find static file %r" % fname) + + +def data(fname): + """Return the contents of a data file of ours.""" + data_file = open(data_filename(fname)) + try: + return data_file.read() + finally: + data_file.close() + + +class HtmlReporter(Reporter): + """HTML reporting.""" + + # These files will be copied from the htmlfiles dir to the output dir. + STATIC_FILES = [ + ("style.css", ""), + ("jquery.min.js", "jquery"), + ("jquery.hotkeys.js", "jquery-hotkeys"), + ("jquery.isonscreen.js", "jquery-isonscreen"), + ("jquery.tablesorter.min.js", "jquery-tablesorter"), + ("coverage_html.js", ""), + ("keybd_closed.png", ""), + ("keybd_open.png", ""), + ] + + def __init__(self, cov, config): + super(HtmlReporter, self).__init__(cov, config) + self.directory = None + self.template_globals = { + 'escape': escape, + 'title': self.config.html_title, + '__url__': coverage.__url__, + '__version__': coverage.__version__, + } + self.source_tmpl = Templite( + data("pyfile.html"), self.template_globals + ) + + self.coverage = cov + + self.files = [] + self.arcs = self.coverage.data.has_arcs() + self.status = HtmlStatus() + self.extra_css = None + self.totals = Numbers() + + def report(self, morfs): + """Generate an HTML report for `morfs`. + + `morfs` is a list of modules or filenames. + + """ + assert self.config.html_dir, "must give a directory for html reporting" + + # Read the status data. + self.status.read(self.config.html_dir) + + # Check that this run used the same settings as the last run. + m = Hasher() + m.update(self.config) + these_settings = m.digest() + if self.status.settings_hash() != these_settings: + self.status.reset() + self.status.set_settings_hash(these_settings) + + # The user may have extra CSS they want copied. + if self.config.extra_css: + self.extra_css = os.path.basename(self.config.extra_css) + + # Process all the files. + self.report_files(self.html_file, morfs, self.config.html_dir) + + if not self.files: + raise CoverageException("No data to report.") + + # Write the index file. + self.index_file() + + self.make_local_static_report_files() + + return self.totals.pc_covered + + def make_local_static_report_files(self): + """Make local instances of static files for HTML report.""" + # The files we provide must always be copied. + for static, pkgdir in self.STATIC_FILES: + shutil.copyfile( + data_filename(static, pkgdir), + os.path.join(self.directory, static) + ) + + # The user may have extra CSS they want copied. + if self.extra_css: + shutil.copyfile( + self.config.extra_css, + os.path.join(self.directory, self.extra_css) + ) + + def write_html(self, fname, html): + """Write `html` to `fname`, properly encoded.""" + fout = open(fname, "wb") + try: + fout.write(html.encode('ascii', 'xmlcharrefreplace')) + finally: + fout.close() + + def file_hash(self, source, cu): + """Compute a hash that changes if the file needs to be re-reported.""" + m = Hasher() + m.update(source) + self.coverage.data.add_to_hash(cu.filename, m) + return m.digest() + + def html_file(self, cu, analysis): + """Generate an HTML file for one source file.""" + source_file = cu.source_file() + try: + source = source_file.read() + finally: + source_file.close() + + # Find out if the file on disk is already correct. + flat_rootname = cu.flat_rootname() + this_hash = self.file_hash(source, cu) + that_hash = self.status.file_hash(flat_rootname) + if this_hash == that_hash: + # Nothing has changed to require the file to be reported again. + self.files.append(self.status.index_info(flat_rootname)) + return + + self.status.set_file_hash(flat_rootname, this_hash) + + # If need be, determine the encoding of the source file. We use it + # later to properly write the HTML. + if sys.version_info < (3, 0): + encoding = source_encoding(source) + # Some UTF8 files have the dreaded UTF8 BOM. If so, junk it. + if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf": + source = source[3:] + encoding = "utf-8" + + # Get the numbers for this file. + nums = analysis.numbers + + if self.arcs: + missing_branch_arcs = analysis.missing_branch_arcs() + + # These classes determine which lines are highlighted by default. + c_run = "run hide_run" + c_exc = "exc" + c_mis = "mis" + c_par = "par " + c_run + + lines = [] + + for lineno, line in enumerate(source_token_lines(source)): + lineno += 1 # 1-based line numbers. + # Figure out how to mark this line. + line_class = [] + annotate_html = "" + annotate_title = "" + if lineno in analysis.statements: + line_class.append("stm") + if lineno in analysis.excluded: + line_class.append(c_exc) + elif lineno in analysis.missing: + line_class.append(c_mis) + elif self.arcs and lineno in missing_branch_arcs: + line_class.append(c_par) + annlines = [] + for b in missing_branch_arcs[lineno]: + if b < 0: + annlines.append("exit") + else: + annlines.append(str(b)) + annotate_html = " ".join(annlines) + if len(annlines) > 1: + annotate_title = "no jumps to these line numbers" + elif len(annlines) == 1: + annotate_title = "no jump to this line number" + elif lineno in analysis.statements: + line_class.append(c_run) + + # Build the HTML for the line + html = [] + for tok_type, tok_text in line: + if tok_type == "ws": + html.append(escape(tok_text)) + else: + tok_html = escape(tok_text) or ' ' + html.append( + "<span class='%s'>%s</span>" % (tok_type, tok_html) + ) + + lines.append({ + 'html': ''.join(html), + 'number': lineno, + 'class': ' '.join(line_class) or "pln", + 'annotate': annotate_html, + 'annotate_title': annotate_title, + }) + + # Write the HTML page for this file. + html = spaceless(self.source_tmpl.render({ + 'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run, + 'arcs': self.arcs, 'extra_css': self.extra_css, + 'cu': cu, 'nums': nums, 'lines': lines, + })) + + if sys.version_info < (3, 0): + html = html.decode(encoding) + + html_filename = flat_rootname + ".html" + html_path = os.path.join(self.directory, html_filename) + self.write_html(html_path, html) + + # Save this file's information for the index file. + index_info = { + 'nums': nums, + 'html_filename': html_filename, + 'name': cu.name, + } + self.files.append(index_info) + self.status.set_index_info(flat_rootname, index_info) + + def index_file(self): + """Write the index.html file for this report.""" + index_tmpl = Templite( + data("index.html"), self.template_globals + ) + + self.totals = sum([f['nums'] for f in self.files]) + + html = index_tmpl.render({ + 'arcs': self.arcs, + 'extra_css': self.extra_css, + 'files': self.files, + 'totals': self.totals, + }) + + if sys.version_info < (3, 0): + html = html.decode("utf-8") + self.write_html( + os.path.join(self.directory, "index.html"), + html + ) + + # Write the latest hashes for next time. + self.status.write(self.directory) + + +class HtmlStatus(object): + """The status information we keep to support incremental reporting.""" + + STATUS_FILE = "status.dat" + STATUS_FORMAT = 1 + + def __init__(self): + self.reset() + + def reset(self): + """Initialize to empty.""" + self.settings = '' + self.files = {} + + def read(self, directory): + """Read the last status in `directory`.""" + usable = False + try: + status_file = os.path.join(directory, self.STATUS_FILE) + fstatus = open(status_file, "rb") + try: + status = pickle.load(fstatus) + finally: + fstatus.close() + except (IOError, ValueError): + usable = False + else: + usable = True + if status['format'] != self.STATUS_FORMAT: + usable = False + elif status['version'] != coverage.__version__: + usable = False + + if usable: + self.files = status['files'] + self.settings = status['settings'] + else: + self.reset() + + def write(self, directory): + """Write the current status to `directory`.""" + status_file = os.path.join(directory, self.STATUS_FILE) + status = { + 'format': self.STATUS_FORMAT, + 'version': coverage.__version__, + 'settings': self.settings, + 'files': self.files, + } + fout = open(status_file, "wb") + try: + pickle.dump(status, fout) + finally: + fout.close() + + def settings_hash(self): + """Get the hash of the coverage.py settings.""" + return self.settings + + def set_settings_hash(self, settings): + """Set the hash of the coverage.py settings.""" + self.settings = settings + + def file_hash(self, fname): + """Get the hash of `fname`'s contents.""" + return self.files.get(fname, {}).get('hash', '') + + def set_file_hash(self, fname, val): + """Set the hash of `fname`'s contents.""" + self.files.setdefault(fname, {})['hash'] = val + + def index_info(self, fname): + """Get the information for index.html for `fname`.""" + return self.files.get(fname, {}).get('index', {}) + + def set_index_info(self, fname, info): + """Set the information for index.html for `fname`.""" + self.files.setdefault(fname, {})['index'] = info + + +# Helpers for templates and generating HTML + +def escape(t): + """HTML-escape the text in `t`.""" + return (t + # Convert HTML special chars into HTML entities. + .replace("&", "&").replace("<", "<").replace(">", ">") + .replace("'", "'").replace('"', """) + # Convert runs of spaces: "......" -> " . . ." + .replace(" ", " ") + # To deal with odd-length runs, convert the final pair of spaces + # so that "....." -> " . ." + .replace(" ", " ") + ) + +def spaceless(html): + """Squeeze out some annoying extra space from an HTML string. + + Nicely-formatted templates mean lots of extra space in the result. + Get rid of some. + + """ + html = re.sub(r">\s+<p ", ">\n<p ", html) + return html diff --git a/third_party/pycoverage/coverage/htmlfiles/coverage_html.js b/third_party/pycoverage/coverage/htmlfiles/coverage_html.js new file mode 100644 index 0000000..b24006d --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/coverage_html.js @@ -0,0 +1,376 @@ +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// Find all the elements with shortkey_* class, and use them to assign a shotrtcut key. +coverage.assign_shortkeys = function () { + $("*[class*='shortkey_']").each(function (i, e) { + $.each($(e).attr("class").split(" "), function (i, c) { + if (/^shortkey_/.test(c)) { + $(document).bind('keydown', c.substr(9), function () { + $(e).click(); + }); + } + }); + }); +}; + +// Create the events for the help panel. +coverage.wire_up_help_panel = function () { + $("#keyboard_icon").click(function () { + // Show the help panel, and position it so the keyboard icon in the + // panel is in the same place as the keyboard icon in the header. + $(".help_panel").show(); + var koff = $("#keyboard_icon").offset(); + var poff = $("#panel_icon").position(); + $(".help_panel").offset({ + top: koff.top-poff.top, + left: koff.left-poff.left + }); + }); + $("#panel_icon").click(function () { + $(".help_panel").hide(); + }); +}; + +// Loaded on index.html +coverage.index_ready = function ($) { + // Look for a cookie containing previous sort settings: + var sort_list = []; + var cookie_name = "COVERAGE_INDEX_SORT"; + var i; + + // This almost makes it worth installing the jQuery cookie plugin: + if (document.cookie.indexOf(cookie_name) > -1) { + var cookies = document.cookie.split(";"); + for (i = 0; i < cookies.length; i++) { + var parts = cookies[i].split("="); + + if ($.trim(parts[0]) === cookie_name && parts[1]) { + sort_list = eval("[[" + parts[1] + "]]"); + break; + } + } + } + + // Create a new widget which exists only to save and restore + // the sort order: + $.tablesorter.addWidget({ + id: "persistentSort", + + // Format is called by the widget before displaying: + format: function (table) { + if (table.config.sortList.length === 0 && sort_list.length > 0) { + // This table hasn't been sorted before - we'll use + // our stored settings: + $(table).trigger('sorton', [sort_list]); + } + else { + // This is not the first load - something has + // already defined sorting so we'll just update + // our stored value to match: + sort_list = table.config.sortList; + } + } + }); + + // Configure our tablesorter to handle the variable number of + // columns produced depending on report options: + var headers = []; + var col_count = $("table.index > thead > tr > th").length; + + headers[0] = { sorter: 'text' }; + for (i = 1; i < col_count-1; i++) { + headers[i] = { sorter: 'digit' }; + } + headers[col_count-1] = { sorter: 'percent' }; + + // Enable the table sorter: + $("table.index").tablesorter({ + widgets: ['persistentSort'], + headers: headers + }); + + coverage.assign_shortkeys(); + coverage.wire_up_help_panel(); + + // Watch for page unload events so we can save the final sort settings: + $(window).unload(function () { + document.cookie = cookie_name + "=" + sort_list.toString() + "; path=/"; + }); +}; + +// -- pyfile stuff -- + +coverage.pyfile_ready = function ($) { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === 'n') { + $(frag).addClass('highlight'); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } + else { + coverage.set_sel(0); + } + + $(document) + .bind('keydown', 'j', coverage.to_next_chunk_nicely) + .bind('keydown', 'k', coverage.to_prev_chunk_nicely) + .bind('keydown', '0', coverage.to_top) + .bind('keydown', '1', coverage.to_first_chunk) + ; + + $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");}); + $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");}); + $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");}); + $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");}); + + coverage.assign_shortkeys(); + coverage.wire_up_help_panel(); +}; + +coverage.toggle_lines = function (btn, cls) { + btn = $(btn); + var hide = "hide_"+cls; + if (btn.hasClass(hide)) { + $("#source ."+cls).removeClass(hide); + btn.removeClass(hide); + } + else { + $("#source ."+cls).addClass(hide); + btn.addClass(hide); + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return $("#t" + n); +}; + +// Return the nth line number div. +coverage.num_elt = function (n) { + return $("#n" + n); +}; + +// Return the container of all the code. +coverage.code_container = function () { + return $(".linenos"); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +coverage.is_transparent = function (color) { + // Different browsers return different colors for "none". + return color === "transparent" || color === "rgba(0, 0, 0, 0)"; +}; + +coverage.to_next_chunk = function () { + var c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + while (true) { + var probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + var color = probe_line.css("background-color"); + if (!c.is_transparent(color)) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_color = color; + while (next_color === color) { + probe++; + probe_line = c.line_elt(probe); + next_color = probe_line.css("background-color"); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + var c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + var color = probe_line.css("background-color"); + while (probe > 0 && c.is_transparent(color)) { + probe--; + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + color = probe_line.css("background-color"); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_color = color; + while (prev_color === color) { + probe--; + probe_line = c.line_elt(probe); + prev_color = probe_line.css("background-color"); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Return the line number of the line nearest pixel position pos +coverage.line_at_pos = function (pos) { + var l1 = coverage.line_elt(1), + l2 = coverage.line_elt(2), + result; + if (l1.length && l2.length) { + var l1_top = l1.offset().top, + line_height = l2.offset().top - l1_top, + nlines = (pos - l1_top) / line_height; + if (nlines < 1) { + result = 1; + } + else { + result = Math.ceil(nlines); + } + } + else { + result = 1; + } + return result; +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + var top = coverage.line_elt(coverage.sel_begin); + var next = coverage.line_elt(coverage.sel_end-1); + + return ( + (top.isOnScreen() ? 1 : 0) + + (next.isOnScreen() ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + coverage.finish_scrolling(); + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: select the top line on + // the screen. + var win = $(window); + coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop())); + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + coverage.finish_scrolling(); + if (coverage.selection_ends_on_screen() === 0) { + var win = $(window); + coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height())); + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (probe_line.length === 0) { + return; + } + var the_color = probe_line.css("background-color"); + if (!c.is_transparent(the_color)) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var color = the_color; + while (probe > 0 && color === the_color) { + probe--; + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + break; + } + color = probe_line.css("background-color"); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + color = the_color; + while (color === the_color) { + probe++; + probe_line = c.line_elt(probe); + color = probe_line.css("background-color"); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + var c = coverage; + + // Highlight the lines in the chunk + c.code_container().find(".highlight").removeClass("highlight"); + for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) { + c.num_elt(probe).addClass("highlight"); + } + + c.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + // Need to move the page. The html,body trick makes it scroll in all + // browsers, got it from http://stackoverflow.com/questions/3042651 + var top = coverage.line_elt(coverage.sel_begin); + var top_pos = parseInt(top.offset().top, 10); + coverage.scroll_window(top_pos - 30); + } +}; + +coverage.scroll_window = function (to_pos) { + $("html,body").animate({scrollTop: to_pos}, 200); +}; + +coverage.finish_scrolling = function () { + $("html,body").stop(true, true); +}; diff --git a/third_party/pycoverage/coverage/htmlfiles/index.html b/third_party/pycoverage/coverage/htmlfiles/index.html new file mode 100644 index 0000000..c831823 --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/index.html @@ -0,0 +1,104 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> +<html> +<head> + <meta http-equiv='Content-Type' content='text/html; charset=utf-8'> + <title>{{ title|escape }}</title> + <link rel='stylesheet' href='style.css' type='text/css'> + {% if extra_css %} + <link rel='stylesheet' href='{{ extra_css }}' type='text/css'> + {% endif %} + <script type='text/javascript' src='jquery.min.js'></script> + <script type='text/javascript' src='jquery.tablesorter.min.js'></script> + <script type='text/javascript' src='jquery.hotkeys.js'></script> + <script type='text/javascript' src='coverage_html.js'></script> + <script type='text/javascript' charset='utf-8'> + jQuery(document).ready(coverage.index_ready); + </script> +</head> +<body id='indexfile'> + +<div id='header'> + <div class='content'> + <h1>{{ title|escape }}: + <span class='pc_cov'>{{totals.pc_covered_str}}%</span> + </h1> + <img id='keyboard_icon' src='keybd_closed.png'> + </div> +</div> + +<div class='help_panel'> + <img id='panel_icon' src='keybd_open.png'> + <p class='legend'>Hot-keys on this page</p> + <div> + <p class='keyhelp'> + <span class='key'>n</span> + <span class='key'>s</span> + <span class='key'>m</span> + <span class='key'>x</span> + {% if arcs %} + <span class='key'>b</span> + <span class='key'>p</span> + {% endif %} + <span class='key'>c</span> change column sorting + </p> + </div> +</div> + +<div id='index'> + <table class='index'> + <thead> + {# The title='' attr doesn't work in Safari. #} + <tr class='tablehead' title='Click to sort'> + <th class='name left headerSortDown shortkey_n'>Module</th> + <th class='shortkey_s'>statements</th> + <th class='shortkey_m'>missing</th> + <th class='shortkey_x'>excluded</th> + {% if arcs %} + <th class='shortkey_b'>branches</th> + <th class='shortkey_p'>partial</th> + {% endif %} + <th class='right shortkey_c'>coverage</th> + </tr> + </thead> + {# HTML syntax requires thead, tfoot, tbody #} + <tfoot> + <tr class='total'> + <td class='name left'>Total</td> + <td>{{totals.n_statements}}</td> + <td>{{totals.n_missing}}</td> + <td>{{totals.n_excluded}}</td> + {% if arcs %} + <td>{{totals.n_branches}}</td> + <td>{{totals.n_partial_branches}}</td> + {% endif %} + <td class='right'>{{totals.pc_covered_str}}%</td> + </tr> + </tfoot> + <tbody> + {% for file in files %} + <tr class='file'> + <td class='name left'><a href='{{file.html_filename}}'>{{file.name}}</a></td> + <td>{{file.nums.n_statements}}</td> + <td>{{file.nums.n_missing}}</td> + <td>{{file.nums.n_excluded}}</td> + {% if arcs %} + <td>{{file.nums.n_branches}}</td> + <td>{{file.nums.n_partial_branches}}</td> + {% endif %} + <td class='right'>{{file.nums.pc_covered_str}}%</td> + </tr> + {% endfor %} + </tbody> + </table> +</div> + +<div id='footer'> + <div class='content'> + <p> + <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a> + </p> + </div> +</div> + +</body> +</html> diff --git a/third_party/pycoverage/coverage/htmlfiles/jquery.hotkeys.js b/third_party/pycoverage/coverage/htmlfiles/jquery.hotkeys.js new file mode 100644 index 0000000..09b21e03c --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/jquery.hotkeys.js @@ -0,0 +1,99 @@ +/* + * jQuery Hotkeys Plugin + * Copyright 2010, John Resig + * Dual licensed under the MIT or GPL Version 2 licenses. + * + * Based upon the plugin by Tzury Bar Yochay: + * http://github.com/tzuryby/hotkeys + * + * Original idea by: + * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/ +*/ + +(function(jQuery){ + + jQuery.hotkeys = { + version: "0.8", + + specialKeys: { + 8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause", + 20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home", + 37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del", + 96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7", + 104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/", + 112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8", + 120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta" + }, + + shiftNums: { + "`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&", + "8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<", + ".": ">", "/": "?", "\\": "|" + } + }; + + function keyHandler( handleObj ) { + // Only care when a possible input has been specified + if ( typeof handleObj.data !== "string" ) { + return; + } + + var origHandler = handleObj.handler, + keys = handleObj.data.toLowerCase().split(" "); + + handleObj.handler = function( event ) { + // Don't fire in text-accepting inputs that we didn't directly bind to + if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) || + event.target.type === "text") ) { + return; + } + + // Keypress represents characters, not special keys + var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ], + character = String.fromCharCode( event.which ).toLowerCase(), + key, modif = "", possible = {}; + + // check combinations (alt|ctrl|shift+anything) + if ( event.altKey && special !== "alt" ) { + modif += "alt+"; + } + + if ( event.ctrlKey && special !== "ctrl" ) { + modif += "ctrl+"; + } + + // TODO: Need to make sure this works consistently across platforms + if ( event.metaKey && !event.ctrlKey && special !== "meta" ) { + modif += "meta+"; + } + + if ( event.shiftKey && special !== "shift" ) { + modif += "shift+"; + } + + if ( special ) { + possible[ modif + special ] = true; + + } else { + possible[ modif + character ] = true; + possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true; + + // "$" can be triggered as "Shift+4" or "Shift+$" or just "$" + if ( modif === "shift+" ) { + possible[ jQuery.hotkeys.shiftNums[ character ] ] = true; + } + } + + for ( var i = 0, l = keys.length; i < l; i++ ) { + if ( possible[ keys[i] ] ) { + return origHandler.apply( this, arguments ); + } + } + }; + } + + jQuery.each([ "keydown", "keyup", "keypress" ], function() { + jQuery.event.special[ this ] = { add: keyHandler }; + }); + +})( jQuery ); diff --git a/third_party/pycoverage/coverage/htmlfiles/jquery.isonscreen.js b/third_party/pycoverage/coverage/htmlfiles/jquery.isonscreen.js new file mode 100644 index 0000000..0182ebd --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/jquery.isonscreen.js @@ -0,0 +1,53 @@ +/* Copyright (c) 2010 + * @author Laurence Wheway + * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php) + * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses. + * + * @version 1.2.0 + */ +(function($) { + jQuery.extend({ + isOnScreen: function(box, container) { + //ensure numbers come in as intgers (not strings) and remove 'px' is it's there + for(var i in box){box[i] = parseFloat(box[i])}; + for(var i in container){container[i] = parseFloat(container[i])}; + + if(!container){ + container = { + left: $(window).scrollLeft(), + top: $(window).scrollTop(), + width: $(window).width(), + height: $(window).height() + } + } + + if( box.left+box.width-container.left > 0 && + box.left < container.width+container.left && + box.top+box.height-container.top > 0 && + box.top < container.height+container.top + ) return true; + return false; + } + }) + + + jQuery.fn.isOnScreen = function (container) { + for(var i in container){container[i] = parseFloat(container[i])}; + + if(!container){ + container = { + left: $(window).scrollLeft(), + top: $(window).scrollTop(), + width: $(window).width(), + height: $(window).height() + } + } + + if( $(this).offset().left+$(this).width()-container.left > 0 && + $(this).offset().left < container.width+container.left && + $(this).offset().top+$(this).height()-container.top > 0 && + $(this).offset().top < container.height+container.top + ) return true; + return false; + } +})(jQuery); diff --git a/third_party/pycoverage/coverage/htmlfiles/jquery.min.js b/third_party/pycoverage/coverage/htmlfiles/jquery.min.js new file mode 100644 index 0000000..c941a5f --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/jquery.min.js @@ -0,0 +1,166 @@ +/*! + * jQuery JavaScript Library v1.4.3 + * http://jquery.com/ + * + * Copyright 2010, John Resig + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * Copyright 2010, The Dojo Foundation + * Released under the MIT, BSD, and GPL Licenses. + * + * Date: Thu Oct 14 23:10:06 2010 -0400 + */ +(function(E,A){function U(){return false}function ba(){return true}function ja(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function Ga(a){var b,d,e=[],f=[],h,k,l,n,s,v,B,D;k=c.data(this,this.nodeType?"events":"__events__");if(typeof k==="function")k=k.events;if(!(a.liveFired===this||!k||!k.live||a.button&&a.type==="click")){if(a.namespace)D=RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)");a.liveFired=this;var H=k.live.slice(0);for(n=0;n<H.length;n++){k=H[n];k.origType.replace(X, +"")===a.type?f.push(k.selector):H.splice(n--,1)}f=c(a.target).closest(f,a.currentTarget);s=0;for(v=f.length;s<v;s++){B=f[s];for(n=0;n<H.length;n++){k=H[n];if(B.selector===k.selector&&(!D||D.test(k.namespace))){l=B.elem;h=null;if(k.preType==="mouseenter"||k.preType==="mouseleave"){a.type=k.preType;h=c(a.relatedTarget).closest(k.selector)[0]}if(!h||h!==l)e.push({elem:l,handleObj:k,level:B.level})}}}s=0;for(v=e.length;s<v;s++){f=e[s];if(d&&f.level>d)break;a.currentTarget=f.elem;a.data=f.handleObj.data; +a.handleObj=f.handleObj;D=f.handleObj.origHandler.apply(f.elem,arguments);if(D===false||a.isPropagationStopped()){d=f.level;if(D===false)b=false}}return b}}function Y(a,b){return(a&&a!=="*"?a+".":"")+b.replace(Ha,"`").replace(Ia,"&")}function ka(a,b,d){if(c.isFunction(b))return c.grep(a,function(f,h){return!!b.call(f,h,f)===d});else if(b.nodeType)return c.grep(a,function(f){return f===b===d});else if(typeof b==="string"){var e=c.grep(a,function(f){return f.nodeType===1});if(Ja.test(b))return c.filter(b, +e,!d);else b=c.filter(b,e)}return c.grep(a,function(f){return c.inArray(f,b)>=0===d})}function la(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var e=c.data(a[d++]),f=c.data(this,e);if(e=e&&e.events){delete f.handle;f.events={};for(var h in e)for(var k in e[h])c.event.add(this,h,e[h][k],e[h][k].data)}}})}function Ka(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)} +function ma(a,b,d){var e=b==="width"?a.offsetWidth:a.offsetHeight;if(d==="border")return e;c.each(b==="width"?La:Ma,function(){d||(e-=parseFloat(c.css(a,"padding"+this))||0);if(d==="margin")e+=parseFloat(c.css(a,"margin"+this))||0;else e-=parseFloat(c.css(a,"border"+this+"Width"))||0});return e}function ca(a,b,d,e){if(c.isArray(b)&&b.length)c.each(b,function(f,h){d||Na.test(a)?e(a,h):ca(a+"["+(typeof h==="object"||c.isArray(h)?f:"")+"]",h,d,e)});else if(!d&&b!=null&&typeof b==="object")c.isEmptyObject(b)? +e(a,""):c.each(b,function(f,h){ca(a+"["+f+"]",h,d,e)});else e(a,b)}function S(a,b){var d={};c.each(na.concat.apply([],na.slice(0,b)),function(){d[this]=a});return d}function oa(a){if(!da[a]){var b=c("<"+a+">").appendTo("body"),d=b.css("display");b.remove();if(d==="none"||d==="")d="block";da[a]=d}return da[a]}function ea(a){return c.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var u=E.document,c=function(){function a(){if(!b.isReady){try{u.documentElement.doScroll("left")}catch(i){setTimeout(a, +1);return}b.ready()}}var b=function(i,r){return new b.fn.init(i,r)},d=E.jQuery,e=E.$,f,h=/^(?:[^<]*(<[\w\W]+>)[^>]*$|#([\w\-]+)$)/,k=/\S/,l=/^\s+/,n=/\s+$/,s=/\W/,v=/\d/,B=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,D=/^[\],:{}\s]*$/,H=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,w=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,G=/(?:^|:|,)(?:\s*\[)+/g,M=/(webkit)[ \/]([\w.]+)/,g=/(opera)(?:.*version)?[ \/]([\w.]+)/,j=/(msie) ([\w.]+)/,o=/(mozilla)(?:.*? rv:([\w.]+))?/,m=navigator.userAgent,p=false, +q=[],t,x=Object.prototype.toString,C=Object.prototype.hasOwnProperty,P=Array.prototype.push,N=Array.prototype.slice,R=String.prototype.trim,Q=Array.prototype.indexOf,L={};b.fn=b.prototype={init:function(i,r){var y,z,F;if(!i)return this;if(i.nodeType){this.context=this[0]=i;this.length=1;return this}if(i==="body"&&!r&&u.body){this.context=u;this[0]=u.body;this.selector="body";this.length=1;return this}if(typeof i==="string")if((y=h.exec(i))&&(y[1]||!r))if(y[1]){F=r?r.ownerDocument||r:u;if(z=B.exec(i))if(b.isPlainObject(r)){i= +[u.createElement(z[1])];b.fn.attr.call(i,r,true)}else i=[F.createElement(z[1])];else{z=b.buildFragment([y[1]],[F]);i=(z.cacheable?z.fragment.cloneNode(true):z.fragment).childNodes}return b.merge(this,i)}else{if((z=u.getElementById(y[2]))&&z.parentNode){if(z.id!==y[2])return f.find(i);this.length=1;this[0]=z}this.context=u;this.selector=i;return this}else if(!r&&!s.test(i)){this.selector=i;this.context=u;i=u.getElementsByTagName(i);return b.merge(this,i)}else return!r||r.jquery?(r||f).find(i):b(r).find(i); +else if(b.isFunction(i))return f.ready(i);if(i.selector!==A){this.selector=i.selector;this.context=i.context}return b.makeArray(i,this)},selector:"",jquery:"1.4.3",length:0,size:function(){return this.length},toArray:function(){return N.call(this,0)},get:function(i){return i==null?this.toArray():i<0?this.slice(i)[0]:this[i]},pushStack:function(i,r,y){var z=b();b.isArray(i)?P.apply(z,i):b.merge(z,i);z.prevObject=this;z.context=this.context;if(r==="find")z.selector=this.selector+(this.selector?" ": +"")+y;else if(r)z.selector=this.selector+"."+r+"("+y+")";return z},each:function(i,r){return b.each(this,i,r)},ready:function(i){b.bindReady();if(b.isReady)i.call(u,b);else q&&q.push(i);return this},eq:function(i){return i===-1?this.slice(i):this.slice(i,+i+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(N.apply(this,arguments),"slice",N.call(arguments).join(","))},map:function(i){return this.pushStack(b.map(this,function(r,y){return i.call(r, +y,r)}))},end:function(){return this.prevObject||b(null)},push:P,sort:[].sort,splice:[].splice};b.fn.init.prototype=b.fn;b.extend=b.fn.extend=function(){var i=arguments[0]||{},r=1,y=arguments.length,z=false,F,I,K,J,fa;if(typeof i==="boolean"){z=i;i=arguments[1]||{};r=2}if(typeof i!=="object"&&!b.isFunction(i))i={};if(y===r){i=this;--r}for(;r<y;r++)if((F=arguments[r])!=null)for(I in F){K=i[I];J=F[I];if(i!==J)if(z&&J&&(b.isPlainObject(J)||(fa=b.isArray(J)))){if(fa){fa=false;clone=K&&b.isArray(K)?K:[]}else clone= +K&&b.isPlainObject(K)?K:{};i[I]=b.extend(z,clone,J)}else if(J!==A)i[I]=J}return i};b.extend({noConflict:function(i){E.$=e;if(i)E.jQuery=d;return b},isReady:false,readyWait:1,ready:function(i){i===true&&b.readyWait--;if(!b.readyWait||i!==true&&!b.isReady){if(!u.body)return setTimeout(b.ready,1);b.isReady=true;if(!(i!==true&&--b.readyWait>0)){if(q){for(var r=0;i=q[r++];)i.call(u,b);q=null}b.fn.triggerHandler&&b(u).triggerHandler("ready")}}},bindReady:function(){if(!p){p=true;if(u.readyState==="complete")return setTimeout(b.ready, +1);if(u.addEventListener){u.addEventListener("DOMContentLoaded",t,false);E.addEventListener("load",b.ready,false)}else if(u.attachEvent){u.attachEvent("onreadystatechange",t);E.attachEvent("onload",b.ready);var i=false;try{i=E.frameElement==null}catch(r){}u.documentElement.doScroll&&i&&a()}}},isFunction:function(i){return b.type(i)==="function"},isArray:Array.isArray||function(i){return b.type(i)==="array"},isWindow:function(i){return i&&typeof i==="object"&&"setInterval"in i},isNaN:function(i){return i== +null||!v.test(i)||isNaN(i)},type:function(i){return i==null?String(i):L[x.call(i)]||"object"},isPlainObject:function(i){if(!i||b.type(i)!=="object"||i.nodeType||b.isWindow(i))return false;if(i.constructor&&!C.call(i,"constructor")&&!C.call(i.constructor.prototype,"isPrototypeOf"))return false;for(var r in i);return r===A||C.call(i,r)},isEmptyObject:function(i){for(var r in i)return false;return true},error:function(i){throw i;},parseJSON:function(i){if(typeof i!=="string"||!i)return null;i=b.trim(i); +if(D.test(i.replace(H,"@").replace(w,"]").replace(G,"")))return E.JSON&&E.JSON.parse?E.JSON.parse(i):(new Function("return "+i))();else b.error("Invalid JSON: "+i)},noop:function(){},globalEval:function(i){if(i&&k.test(i)){var r=u.getElementsByTagName("head")[0]||u.documentElement,y=u.createElement("script");y.type="text/javascript";if(b.support.scriptEval)y.appendChild(u.createTextNode(i));else y.text=i;r.insertBefore(y,r.firstChild);r.removeChild(y)}},nodeName:function(i,r){return i.nodeName&&i.nodeName.toUpperCase()=== +r.toUpperCase()},each:function(i,r,y){var z,F=0,I=i.length,K=I===A||b.isFunction(i);if(y)if(K)for(z in i){if(r.apply(i[z],y)===false)break}else for(;F<I;){if(r.apply(i[F++],y)===false)break}else if(K)for(z in i){if(r.call(i[z],z,i[z])===false)break}else for(y=i[0];F<I&&r.call(y,F,y)!==false;y=i[++F]);return i},trim:R?function(i){return i==null?"":R.call(i)}:function(i){return i==null?"":i.toString().replace(l,"").replace(n,"")},makeArray:function(i,r){var y=r||[];if(i!=null){var z=b.type(i);i.length== +null||z==="string"||z==="function"||z==="regexp"||b.isWindow(i)?P.call(y,i):b.merge(y,i)}return y},inArray:function(i,r){if(r.indexOf)return r.indexOf(i);for(var y=0,z=r.length;y<z;y++)if(r[y]===i)return y;return-1},merge:function(i,r){var y=i.length,z=0;if(typeof r.length==="number")for(var F=r.length;z<F;z++)i[y++]=r[z];else for(;r[z]!==A;)i[y++]=r[z++];i.length=y;return i},grep:function(i,r,y){var z=[],F;y=!!y;for(var I=0,K=i.length;I<K;I++){F=!!r(i[I],I);y!==F&&z.push(i[I])}return z},map:function(i, +r,y){for(var z=[],F,I=0,K=i.length;I<K;I++){F=r(i[I],I,y);if(F!=null)z[z.length]=F}return z.concat.apply([],z)},guid:1,proxy:function(i,r,y){if(arguments.length===2)if(typeof r==="string"){y=i;i=y[r];r=A}else if(r&&!b.isFunction(r)){y=r;r=A}if(!r&&i)r=function(){return i.apply(y||this,arguments)};if(i)r.guid=i.guid=i.guid||r.guid||b.guid++;return r},access:function(i,r,y,z,F,I){var K=i.length;if(typeof r==="object"){for(var J in r)b.access(i,J,r[J],z,F,y);return i}if(y!==A){z=!I&&z&&b.isFunction(y); +for(J=0;J<K;J++)F(i[J],r,z?y.call(i[J],J,F(i[J],r)):y,I);return i}return K?F(i[0],r):A},now:function(){return(new Date).getTime()},uaMatch:function(i){i=i.toLowerCase();i=M.exec(i)||g.exec(i)||j.exec(i)||i.indexOf("compatible")<0&&o.exec(i)||[];return{browser:i[1]||"",version:i[2]||"0"}},browser:{}});b.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(i,r){L["[object "+r+"]"]=r.toLowerCase()});m=b.uaMatch(m);if(m.browser){b.browser[m.browser]=true;b.browser.version= +m.version}if(b.browser.webkit)b.browser.safari=true;if(Q)b.inArray=function(i,r){return Q.call(r,i)};if(!/\s/.test("\u00a0")){l=/^[\s\xA0]+/;n=/[\s\xA0]+$/}f=b(u);if(u.addEventListener)t=function(){u.removeEventListener("DOMContentLoaded",t,false);b.ready()};else if(u.attachEvent)t=function(){if(u.readyState==="complete"){u.detachEvent("onreadystatechange",t);b.ready()}};return E.jQuery=E.$=b}();(function(){c.support={};var a=u.documentElement,b=u.createElement("script"),d=u.createElement("div"), +e="script"+c.now();d.style.display="none";d.innerHTML=" <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";var f=d.getElementsByTagName("*"),h=d.getElementsByTagName("a")[0],k=u.createElement("select"),l=k.appendChild(u.createElement("option"));if(!(!f||!f.length||!h)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(h.getAttribute("style")), +hrefNormalized:h.getAttribute("href")==="/a",opacity:/^0.55$/.test(h.style.opacity),cssFloat:!!h.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:l.selected,optDisabled:false,checkClone:false,scriptEval:false,noCloneEvent:true,boxModel:null,inlineBlockNeedsLayout:false,shrinkWrapBlocks:false,reliableHiddenOffsets:true};k.disabled=true;c.support.optDisabled=!l.disabled;b.type="text/javascript";try{b.appendChild(u.createTextNode("window."+e+"=1;"))}catch(n){}a.insertBefore(b, +a.firstChild);if(E[e]){c.support.scriptEval=true;delete E[e]}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function s(){c.support.noCloneEvent=false;d.detachEvent("onclick",s)});d.cloneNode(true).fireEvent("onclick")}d=u.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=u.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var s=u.createElement("div"); +s.style.width=s.style.paddingLeft="1px";u.body.appendChild(s);c.boxModel=c.support.boxModel=s.offsetWidth===2;if("zoom"in s.style){s.style.display="inline";s.style.zoom=1;c.support.inlineBlockNeedsLayout=s.offsetWidth===2;s.style.display="";s.innerHTML="<div style='width:4px;'></div>";c.support.shrinkWrapBlocks=s.offsetWidth!==2}s.innerHTML="<table><tr><td style='padding:0;display:none'></td><td>t</td></tr></table>";var v=s.getElementsByTagName("td");c.support.reliableHiddenOffsets=v[0].offsetHeight=== +0;v[0].style.display="";v[1].style.display="none";c.support.reliableHiddenOffsets=c.support.reliableHiddenOffsets&&v[0].offsetHeight===0;s.innerHTML="";u.body.removeChild(s).style.display="none"});a=function(s){var v=u.createElement("div");s="on"+s;var B=s in v;if(!B){v.setAttribute(s,"return;");B=typeof v[s]==="function"}return B};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=f=h=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength", +cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var pa={},Oa=/^(?:\{.*\}|\[.*\])$/;c.extend({cache:{},uuid:0,expando:"jQuery"+c.now(),noData:{embed:true,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:true},data:function(a,b,d){if(c.acceptData(a)){a=a==E?pa:a;var e=a.nodeType,f=e?a[c.expando]:null,h=c.cache;if(!(e&&!f&&typeof b==="string"&&d===A)){if(e)f||(a[c.expando]=f=++c.uuid);else h=a;if(typeof b==="object")if(e)h[f]= +c.extend(h[f],b);else c.extend(h,b);else if(e&&!h[f])h[f]={};a=e?h[f]:h;if(d!==A)a[b]=d;return typeof b==="string"?a[b]:a}}},removeData:function(a,b){if(c.acceptData(a)){a=a==E?pa:a;var d=a.nodeType,e=d?a[c.expando]:a,f=c.cache,h=d?f[e]:e;if(b){if(h){delete h[b];d&&c.isEmptyObject(h)&&c.removeData(a)}}else if(d&&c.support.deleteExpando)delete a[c.expando];else if(a.removeAttribute)a.removeAttribute(c.expando);else if(d)delete f[e];else for(var k in a)delete a[k]}},acceptData:function(a){if(a.nodeName){var b= +c.noData[a.nodeName.toLowerCase()];if(b)return!(b===true||a.getAttribute("classid")!==b)}return true}});c.fn.extend({data:function(a,b){if(typeof a==="undefined")return this.length?c.data(this[0]):null;else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===A){var e=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(e===A&&this.length){e=c.data(this[0],a);if(e===A&&this[0].nodeType===1){e=this[0].getAttribute("data-"+a);if(typeof e=== +"string")try{e=e==="true"?true:e==="false"?false:e==="null"?null:!c.isNaN(e)?parseFloat(e):Oa.test(e)?c.parseJSON(e):e}catch(f){}else e=A}}return e===A&&d[1]?this.data(d[0]):e}else return this.each(function(){var h=c(this),k=[d[0],b];h.triggerHandler("setData"+d[1]+"!",k);c.data(this,a,b);h.triggerHandler("changeData"+d[1]+"!",k)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var e=c.data(a,b);if(!d)return e|| +[];if(!e||c.isArray(d))e=c.data(a,b,c.makeArray(d));else e.push(d);return e}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),e=d.shift();if(e==="inprogress")e=d.shift();if(e){b==="fx"&&d.unshift("inprogress");e.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===A)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this, +a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var qa=/[\n\t]/g,ga=/\s+/,Pa=/\r/g,Qa=/^(?:href|src|style)$/,Ra=/^(?:button|input)$/i,Sa=/^(?:button|input|object|select|textarea)$/i,Ta=/^a(?:rea)?$/i,ra=/^(?:radio|checkbox)$/i;c.fn.extend({attr:function(a,b){return c.access(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this, +a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(s){var v=c(this);v.addClass(a.call(this,s,v.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ga),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1)if(f.className){for(var h=" "+f.className+" ",k=f.className,l=0,n=b.length;l<n;l++)if(h.indexOf(" "+b[l]+" ")<0)k+=" "+b[l];f.className=c.trim(k)}else f.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(n){var s= +c(this);s.removeClass(a.call(this,n,s.attr("class")))});if(a&&typeof a==="string"||a===A)for(var b=(a||"").split(ga),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1&&f.className)if(a){for(var h=(" "+f.className+" ").replace(qa," "),k=0,l=b.length;k<l;k++)h=h.replace(" "+b[k]+" "," ");f.className=c.trim(h)}else f.className=""}return this},toggleClass:function(a,b){var d=typeof a,e=typeof b==="boolean";if(c.isFunction(a))return this.each(function(f){var h=c(this);h.toggleClass(a.call(this, +f,h.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var f,h=0,k=c(this),l=b,n=a.split(ga);f=n[h++];){l=e?l:!k.hasClass(f);k[l?"addClass":"removeClass"](f)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,"__className__",this.className);this.className=this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(qa," ").indexOf(a)>-1)return true;return false}, +val:function(a){if(!arguments.length){var b=this[0];if(b){if(c.nodeName(b,"option")){var d=b.attributes.value;return!d||d.specified?b.value:b.text}if(c.nodeName(b,"select")){var e=b.selectedIndex;d=[];var f=b.options;b=b.type==="select-one";if(e<0)return null;var h=b?e:0;for(e=b?e+1:f.length;h<e;h++){var k=f[h];if(k.selected&&(c.support.optDisabled?!k.disabled:k.getAttribute("disabled")===null)&&(!k.parentNode.disabled||!c.nodeName(k.parentNode,"optgroup"))){a=c(k).val();if(b)return a;d.push(a)}}return d}if(ra.test(b.type)&& +!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Pa,"")}return A}var l=c.isFunction(a);return this.each(function(n){var s=c(this),v=a;if(this.nodeType===1){if(l)v=a.call(this,n,s.val());if(v==null)v="";else if(typeof v==="number")v+="";else if(c.isArray(v))v=c.map(v,function(D){return D==null?"":D+""});if(c.isArray(v)&&ra.test(this.type))this.checked=c.inArray(s.val(),v)>=0;else if(c.nodeName(this,"select")){var B=c.makeArray(v);c("option",this).each(function(){this.selected= +c.inArray(c(this).val(),B)>=0});if(!B.length)this.selectedIndex=-1}else this.value=v}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,e){if(!a||a.nodeType===3||a.nodeType===8)return A;if(e&&b in c.attrFn)return c(a)[b](d);e=a.nodeType!==1||!c.isXMLDoc(a);var f=d!==A;b=e&&c.props[b]||b;if(a.nodeType===1){var h=Qa.test(b);if((b in a||a[b]!==A)&&e&&!h){if(f){b==="type"&&Ra.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed"); +if(d===null)a.nodeType===1&&a.removeAttribute(b);else a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:Sa.test(a.nodeName)||Ta.test(a.nodeName)&&a.href?0:A;return a[b]}if(!c.support.style&&e&&b==="style"){if(f)a.style.cssText=""+d;return a.style.cssText}f&&a.setAttribute(b,""+d);if(!a.attributes[b]&&a.hasAttribute&&!a.hasAttribute(b))return A;a=!c.support.hrefNormalized&&e&& +h?a.getAttribute(b,2):a.getAttribute(b);return a===null?A:a}}});var X=/\.(.*)$/,ha=/^(?:textarea|input|select)$/i,Ha=/\./g,Ia=/ /g,Ua=/[^\w\s.|`]/g,Va=function(a){return a.replace(Ua,"\\$&")},sa={focusin:0,focusout:0};c.event={add:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(c.isWindow(a)&&a!==E&&!a.frameElement)a=E;if(d===false)d=U;var f,h;if(d.handler){f=d;d=f.handler}if(!d.guid)d.guid=c.guid++;if(h=c.data(a)){var k=a.nodeType?"events":"__events__",l=h[k],n=h.handle;if(typeof l=== +"function"){n=l.handle;l=l.events}else if(!l){a.nodeType||(h[k]=h=function(){});h.events=l={}}if(!n)h.handle=n=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(n.elem,arguments):A};n.elem=a;b=b.split(" ");for(var s=0,v;k=b[s++];){h=f?c.extend({},f):{handler:d,data:e};if(k.indexOf(".")>-1){v=k.split(".");k=v.shift();h.namespace=v.slice(0).sort().join(".")}else{v=[];h.namespace=""}h.type=k;if(!h.guid)h.guid=d.guid;var B=l[k],D=c.event.special[k]||{};if(!B){B=l[k]=[]; +if(!D.setup||D.setup.call(a,e,v,n)===false)if(a.addEventListener)a.addEventListener(k,n,false);else a.attachEvent&&a.attachEvent("on"+k,n)}if(D.add){D.add.call(a,h);if(!h.handler.guid)h.handler.guid=d.guid}B.push(h);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(d===false)d=U;var f,h,k=0,l,n,s,v,B,D,H=a.nodeType?"events":"__events__",w=c.data(a),G=w&&w[H];if(w&&G){if(typeof G==="function"){w=G;G=G.events}if(b&&b.type){d=b.handler;b=b.type}if(!b|| +typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(f in G)c.event.remove(a,f+b)}else{for(b=b.split(" ");f=b[k++];){v=f;l=f.indexOf(".")<0;n=[];if(!l){n=f.split(".");f=n.shift();s=RegExp("(^|\\.)"+c.map(n.slice(0).sort(),Va).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(B=G[f])if(d){v=c.event.special[f]||{};for(h=e||0;h<B.length;h++){D=B[h];if(d.guid===D.guid){if(l||s.test(D.namespace)){e==null&&B.splice(h--,1);v.remove&&v.remove.call(a,D)}if(e!=null)break}}if(B.length===0||e!=null&&B.length===1){if(!v.teardown|| +v.teardown.call(a,n)===false)c.removeEvent(a,f,w.handle);delete G[f]}}else for(h=0;h<B.length;h++){D=B[h];if(l||s.test(D.namespace)){c.event.remove(a,v,D.handler,h);B.splice(h--,1)}}}if(c.isEmptyObject(G)){if(b=w.handle)b.elem=null;delete w.events;delete w.handle;if(typeof w==="function")c.removeData(a,H);else c.isEmptyObject(w)&&c.removeData(a)}}}}},trigger:function(a,b,d,e){var f=a.type||a;if(!e){a=typeof a==="object"?a[c.expando]?a:c.extend(c.Event(f),a):c.Event(f);if(f.indexOf("!")>=0){a.type= +f=f.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[f]&&c.each(c.cache,function(){this.events&&this.events[f]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return A;a.result=A;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(e=d.nodeType?c.data(d,"handle"):(c.data(d,"__events__")||{}).handle)&&e.apply(d,b);e=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+f]&&d["on"+f].apply(d,b)=== +false){a.result=false;a.preventDefault()}}catch(h){}if(!a.isPropagationStopped()&&e)c.event.trigger(a,b,e,true);else if(!a.isDefaultPrevented()){e=a.target;var k,l=f.replace(X,""),n=c.nodeName(e,"a")&&l==="click",s=c.event.special[l]||{};if((!s._default||s._default.call(d,a)===false)&&!n&&!(e&&e.nodeName&&c.noData[e.nodeName.toLowerCase()])){try{if(e[l]){if(k=e["on"+l])e["on"+l]=null;c.event.triggered=true;e[l]()}}catch(v){}if(k)e["on"+l]=k;c.event.triggered=false}}},handle:function(a){var b,d,e; +d=[];var f,h=c.makeArray(arguments);a=h[0]=c.event.fix(a||E.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;if(!b){e=a.type.split(".");a.type=e.shift();d=e.slice(0).sort();e=RegExp("(^|\\.)"+d.join("\\.(?:.*\\.)?")+"(\\.|$)")}a.namespace=a.namespace||d.join(".");f=c.data(this,this.nodeType?"events":"__events__");if(typeof f==="function")f=f.events;d=(f||{})[a.type];if(f&&d){d=d.slice(0);f=0;for(var k=d.length;f<k;f++){var l=d[f];if(b||e.test(l.namespace)){a.handler=l.handler;a.data= +l.data;a.handleObj=l;l=l.handler.apply(this,h);if(l!==A){a.result=l;if(l===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "), +fix:function(a){if(a[c.expando])return a;var b=a;a=c.Event(b);for(var d=this.props.length,e;d;){e=this.props[--d];a[e]=b[e]}if(!a.target)a.target=a.srcElement||u;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=u.documentElement;d=u.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop|| +d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(a.which==null&&(a.charCode!=null||a.keyCode!=null))a.which=a.charCode!=null?a.charCode:a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==A)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,Y(a.origType,a.selector),c.extend({},a,{handler:Ga,guid:a.handler.guid}))},remove:function(a){c.event.remove(this, +Y(a.origType,a.selector),a)}},beforeunload:{setup:function(a,b,d){if(c.isWindow(this))this.onbeforeunload=d},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};c.removeEvent=u.removeEventListener?function(a,b,d){a.removeEventListener&&a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent&&a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=a;this.type=a.type}else this.type=a;this.timeStamp= +c.now();this[c.expando]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=ba;var a=this.originalEvent;if(a)if(a.preventDefault)a.preventDefault();else a.returnValue=false},stopPropagation:function(){this.isPropagationStopped=ba;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=ba;this.stopPropagation()},isDefaultPrevented:U,isPropagationStopped:U,isImmediatePropagationStopped:U}; +var ta=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},ua=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?ua:ta,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?ua:ta)}}});if(!c.support.submitBubbles)c.event.special.submit={setup:function(){if(this.nodeName.toLowerCase()!== +"form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length){a.liveFired=A;return ja("submit",this,arguments)}});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13){a.liveFired=A;return ja("submit",this,arguments)}})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};if(!c.support.changeBubbles){var V, +va=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(e){return e.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},Z=function(a,b){var d=a.target,e,f;if(!(!ha.test(d.nodeName)||d.readOnly)){e=c.data(d,"_change_data");f=va(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",f);if(!(e===A||f===e))if(e!=null||f){a.type="change";a.liveFired= +A;return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:Z,beforedeactivate:Z,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return Z.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return Z.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,"_change_data",va(a))}},setup:function(){if(this.type=== +"file")return false;for(var a in V)c.event.add(this,a+".specialChange",V[a]);return ha.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return ha.test(this.nodeName)}};V=c.event.special.change.filters;V.focus=V.beforeactivate}u.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(e){e=c.event.fix(e);e.type=b;return c.event.trigger(e,null,e.target)}c.event.special[b]={setup:function(){sa[b]++===0&&u.addEventListener(a,d,true)},teardown:function(){--sa[b]=== +0&&u.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,e,f){if(typeof d==="object"){for(var h in d)this[b](h,e,d[h],f);return this}if(c.isFunction(e)||e===false){f=e;e=A}var k=b==="one"?c.proxy(f,function(n){c(this).unbind(n,k);return f.apply(this,arguments)}):f;if(d==="unload"&&b!=="one")this.one(d,e,f);else{h=0;for(var l=this.length;h<l;h++)c.event.add(this[h],d,k,e)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&!a.preventDefault)for(var d in a)this.unbind(d, +a[d]);else{d=0;for(var e=this.length;d<e;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,e){return this.live(b,d,e,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){var d=c.Event(a);d.preventDefault();d.stopPropagation();c.event.trigger(d,b,this[0]);return d.result}},toggle:function(a){for(var b=arguments,d= +1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(e){var f=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,f+1);e.preventDefault();return b[f].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var wa={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,e,f,h){var k,l=0,n,s,v=h||this.selector;h=h?this:c(this.context);if(typeof d=== +"object"&&!d.preventDefault){for(k in d)h[b](k,e,d[k],v);return this}if(c.isFunction(e)){f=e;e=A}for(d=(d||"").split(" ");(k=d[l++])!=null;){n=X.exec(k);s="";if(n){s=n[0];k=k.replace(X,"")}if(k==="hover")d.push("mouseenter"+s,"mouseleave"+s);else{n=k;if(k==="focus"||k==="blur"){d.push(wa[k]+s);k+=s}else k=(wa[k]||k)+s;if(b==="live"){s=0;for(var B=h.length;s<B;s++)c.event.add(h[s],"live."+Y(k,v),{data:e,selector:v,handler:f,origType:k,origHandler:f,preType:n})}else h.unbind("live."+Y(k,v),f)}}return this}}); +c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),function(a,b){c.fn[b]=function(d,e){if(e==null){e=d;d=null}return arguments.length>0?this.bind(b,d,e):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});E.attachEvent&&!E.addEventListener&&c(E).bind("unload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}}); +(function(){function a(g,j,o,m,p,q){p=0;for(var t=m.length;p<t;p++){var x=m[p];if(x){x=x[g];for(var C=false;x;){if(x.sizcache===o){C=m[x.sizset];break}if(x.nodeType===1&&!q){x.sizcache=o;x.sizset=p}if(x.nodeName.toLowerCase()===j){C=x;break}x=x[g]}m[p]=C}}}function b(g,j,o,m,p,q){p=0;for(var t=m.length;p<t;p++){var x=m[p];if(x){x=x[g];for(var C=false;x;){if(x.sizcache===o){C=m[x.sizset];break}if(x.nodeType===1){if(!q){x.sizcache=o;x.sizset=p}if(typeof j!=="string"){if(x===j){C=true;break}}else if(l.filter(j, +[x]).length>0){C=x;break}}x=x[g]}m[p]=C}}}var d=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,e=0,f=Object.prototype.toString,h=false,k=true;[0,0].sort(function(){k=false;return 0});var l=function(g,j,o,m){o=o||[];var p=j=j||u;if(j.nodeType!==1&&j.nodeType!==9)return[];if(!g||typeof g!=="string")return o;var q=[],t,x,C,P,N=true,R=l.isXML(j),Q=g,L;do{d.exec("");if(t=d.exec(Q)){Q=t[3];q.push(t[1]);if(t[2]){P=t[3]; +break}}}while(t);if(q.length>1&&s.exec(g))if(q.length===2&&n.relative[q[0]])x=M(q[0]+q[1],j);else for(x=n.relative[q[0]]?[j]:l(q.shift(),j);q.length;){g=q.shift();if(n.relative[g])g+=q.shift();x=M(g,x)}else{if(!m&&q.length>1&&j.nodeType===9&&!R&&n.match.ID.test(q[0])&&!n.match.ID.test(q[q.length-1])){t=l.find(q.shift(),j,R);j=t.expr?l.filter(t.expr,t.set)[0]:t.set[0]}if(j){t=m?{expr:q.pop(),set:D(m)}:l.find(q.pop(),q.length===1&&(q[0]==="~"||q[0]==="+")&&j.parentNode?j.parentNode:j,R);x=t.expr?l.filter(t.expr, +t.set):t.set;if(q.length>0)C=D(x);else N=false;for(;q.length;){t=L=q.pop();if(n.relative[L])t=q.pop();else L="";if(t==null)t=j;n.relative[L](C,t,R)}}else C=[]}C||(C=x);C||l.error(L||g);if(f.call(C)==="[object Array]")if(N)if(j&&j.nodeType===1)for(g=0;C[g]!=null;g++){if(C[g]&&(C[g]===true||C[g].nodeType===1&&l.contains(j,C[g])))o.push(x[g])}else for(g=0;C[g]!=null;g++)C[g]&&C[g].nodeType===1&&o.push(x[g]);else o.push.apply(o,C);else D(C,o);if(P){l(P,p,o,m);l.uniqueSort(o)}return o};l.uniqueSort=function(g){if(w){h= +k;g.sort(w);if(h)for(var j=1;j<g.length;j++)g[j]===g[j-1]&&g.splice(j--,1)}return g};l.matches=function(g,j){return l(g,null,null,j)};l.matchesSelector=function(g,j){return l(j,null,null,[g]).length>0};l.find=function(g,j,o){var m;if(!g)return[];for(var p=0,q=n.order.length;p<q;p++){var t=n.order[p],x;if(x=n.leftMatch[t].exec(g)){var C=x[1];x.splice(1,1);if(C.substr(C.length-1)!=="\\"){x[1]=(x[1]||"").replace(/\\/g,"");m=n.find[t](x,j,o);if(m!=null){g=g.replace(n.match[t],"");break}}}}m||(m=j.getElementsByTagName("*")); +return{set:m,expr:g}};l.filter=function(g,j,o,m){for(var p=g,q=[],t=j,x,C,P=j&&j[0]&&l.isXML(j[0]);g&&j.length;){for(var N in n.filter)if((x=n.leftMatch[N].exec(g))!=null&&x[2]){var R=n.filter[N],Q,L;L=x[1];C=false;x.splice(1,1);if(L.substr(L.length-1)!=="\\"){if(t===q)q=[];if(n.preFilter[N])if(x=n.preFilter[N](x,t,o,q,m,P)){if(x===true)continue}else C=Q=true;if(x)for(var i=0;(L=t[i])!=null;i++)if(L){Q=R(L,x,i,t);var r=m^!!Q;if(o&&Q!=null)if(r)C=true;else t[i]=false;else if(r){q.push(L);C=true}}if(Q!== +A){o||(t=q);g=g.replace(n.match[N],"");if(!C)return[];break}}}if(g===p)if(C==null)l.error(g);else break;p=g}return t};l.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var n=l.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+\-]*)\))?/, +POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},relative:{"+":function(g,j){var o=typeof j==="string",m=o&&!/\W/.test(j);o=o&&!m;if(m)j=j.toLowerCase();m=0;for(var p=g.length,q;m<p;m++)if(q=g[m]){for(;(q=q.previousSibling)&&q.nodeType!==1;);g[m]=o||q&&q.nodeName.toLowerCase()=== +j?q||false:q===j}o&&l.filter(j,g,true)},">":function(g,j){var o=typeof j==="string",m,p=0,q=g.length;if(o&&!/\W/.test(j))for(j=j.toLowerCase();p<q;p++){if(m=g[p]){o=m.parentNode;g[p]=o.nodeName.toLowerCase()===j?o:false}}else{for(;p<q;p++)if(m=g[p])g[p]=o?m.parentNode:m.parentNode===j;o&&l.filter(j,g,true)}},"":function(g,j,o){var m=e++,p=b,q;if(typeof j==="string"&&!/\W/.test(j)){q=j=j.toLowerCase();p=a}p("parentNode",j,m,g,q,o)},"~":function(g,j,o){var m=e++,p=b,q;if(typeof j==="string"&&!/\W/.test(j)){q= +j=j.toLowerCase();p=a}p("previousSibling",j,m,g,q,o)}},find:{ID:function(g,j,o){if(typeof j.getElementById!=="undefined"&&!o)return(g=j.getElementById(g[1]))&&g.parentNode?[g]:[]},NAME:function(g,j){if(typeof j.getElementsByName!=="undefined"){for(var o=[],m=j.getElementsByName(g[1]),p=0,q=m.length;p<q;p++)m[p].getAttribute("name")===g[1]&&o.push(m[p]);return o.length===0?null:o}},TAG:function(g,j){return j.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,j,o,m,p,q){g=" "+g[1].replace(/\\/g, +"")+" ";if(q)return g;q=0;for(var t;(t=j[q])!=null;q++)if(t)if(p^(t.className&&(" "+t.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))o||m.push(t);else if(o)j[q]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},CHILD:function(g){if(g[1]==="nth"){var j=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=j[1]+(j[2]||1)-0;g[3]=j[3]-0}g[0]=e++;return g},ATTR:function(g,j,o, +m,p,q){j=g[1].replace(/\\/g,"");if(!q&&n.attrMap[j])g[1]=n.attrMap[j];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,j,o,m,p){if(g[1]==="not")if((d.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=l(g[3],null,null,j);else{g=l.filter(g[3],j,o,true^p);o||m.push.apply(m,g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled=== +true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,j,o){return!!l(o[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"=== +g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},setFilters:{first:function(g,j){return j===0},last:function(g,j,o,m){return j===m.length-1},even:function(g,j){return j%2===0},odd:function(g,j){return j%2===1},lt:function(g,j,o){return j<o[3]-0},gt:function(g,j,o){return j>o[3]-0},nth:function(g,j,o){return o[3]- +0===j},eq:function(g,j,o){return o[3]-0===j}},filter:{PSEUDO:function(g,j,o,m){var p=j[1],q=n.filters[p];if(q)return q(g,o,j,m);else if(p==="contains")return(g.textContent||g.innerText||l.getText([g])||"").indexOf(j[3])>=0;else if(p==="not"){j=j[3];o=0;for(m=j.length;o<m;o++)if(j[o]===g)return false;return true}else l.error("Syntax error, unrecognized expression: "+p)},CHILD:function(g,j){var o=j[1],m=g;switch(o){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(o=== +"first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":o=j[2];var p=j[3];if(o===1&&p===0)return true;var q=j[0],t=g.parentNode;if(t&&(t.sizcache!==q||!g.nodeIndex)){var x=0;for(m=t.firstChild;m;m=m.nextSibling)if(m.nodeType===1)m.nodeIndex=++x;t.sizcache=q}m=g.nodeIndex-p;return o===0?m===0:m%o===0&&m/o>=0}},ID:function(g,j){return g.nodeType===1&&g.getAttribute("id")===j},TAG:function(g,j){return j==="*"&&g.nodeType===1||g.nodeName.toLowerCase()=== +j},CLASS:function(g,j){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(j)>-1},ATTR:function(g,j){var o=j[1];o=n.attrHandle[o]?n.attrHandle[o](g):g[o]!=null?g[o]:g.getAttribute(o);var m=o+"",p=j[2],q=j[4];return o==null?p==="!=":p==="="?m===q:p==="*="?m.indexOf(q)>=0:p==="~="?(" "+m+" ").indexOf(q)>=0:!q?m&&o!==false:p==="!="?m!==q:p==="^="?m.indexOf(q)===0:p==="$="?m.substr(m.length-q.length)===q:p==="|="?m===q||m.substr(0,q.length+1)===q+"-":false},POS:function(g,j,o,m){var p=n.setFilters[j[2]]; +if(p)return p(g,o,j,m)}}},s=n.match.POS,v=function(g,j){return"\\"+(j-0+1)},B;for(B in n.match){n.match[B]=RegExp(n.match[B].source+/(?![^\[]*\])(?![^\(]*\))/.source);n.leftMatch[B]=RegExp(/(^(?:.|\r|\n)*?)/.source+n.match[B].source.replace(/\\(\d+)/g,v))}var D=function(g,j){g=Array.prototype.slice.call(g,0);if(j){j.push.apply(j,g);return j}return g};try{Array.prototype.slice.call(u.documentElement.childNodes,0)}catch(H){D=function(g,j){var o=j||[],m=0;if(f.call(g)==="[object Array]")Array.prototype.push.apply(o, +g);else if(typeof g.length==="number")for(var p=g.length;m<p;m++)o.push(g[m]);else for(;g[m];m++)o.push(g[m]);return o}}var w,G;if(u.documentElement.compareDocumentPosition)w=function(g,j){if(g===j){h=true;return 0}if(!g.compareDocumentPosition||!j.compareDocumentPosition)return g.compareDocumentPosition?-1:1;return g.compareDocumentPosition(j)&4?-1:1};else{w=function(g,j){var o=[],m=[],p=g.parentNode,q=j.parentNode,t=p;if(g===j){h=true;return 0}else if(p===q)return G(g,j);else if(p){if(!q)return 1}else return-1; +for(;t;){o.unshift(t);t=t.parentNode}for(t=q;t;){m.unshift(t);t=t.parentNode}p=o.length;q=m.length;for(t=0;t<p&&t<q;t++)if(o[t]!==m[t])return G(o[t],m[t]);return t===p?G(g,m[t],-1):G(o[t],j,1)};G=function(g,j,o){if(g===j)return o;for(g=g.nextSibling;g;){if(g===j)return-1;g=g.nextSibling}return 1}}l.getText=function(g){for(var j="",o,m=0;g[m];m++){o=g[m];if(o.nodeType===3||o.nodeType===4)j+=o.nodeValue;else if(o.nodeType!==8)j+=l.getText(o.childNodes)}return j};(function(){var g=u.createElement("div"), +j="script"+(new Date).getTime();g.innerHTML="<a name='"+j+"'/>";var o=u.documentElement;o.insertBefore(g,o.firstChild);if(u.getElementById(j)){n.find.ID=function(m,p,q){if(typeof p.getElementById!=="undefined"&&!q)return(p=p.getElementById(m[1]))?p.id===m[1]||typeof p.getAttributeNode!=="undefined"&&p.getAttributeNode("id").nodeValue===m[1]?[p]:A:[]};n.filter.ID=function(m,p){var q=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&q&&q.nodeValue===p}}o.removeChild(g); +o=g=null})();(function(){var g=u.createElement("div");g.appendChild(u.createComment(""));if(g.getElementsByTagName("*").length>0)n.find.TAG=function(j,o){var m=o.getElementsByTagName(j[1]);if(j[1]==="*"){for(var p=[],q=0;m[q];q++)m[q].nodeType===1&&p.push(m[q]);m=p}return m};g.innerHTML="<a href='#'></a>";if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")n.attrHandle.href=function(j){return j.getAttribute("href",2)};g=null})();u.querySelectorAll&& +function(){var g=l,j=u.createElement("div");j.innerHTML="<p class='TEST'></p>";if(!(j.querySelectorAll&&j.querySelectorAll(".TEST").length===0)){l=function(m,p,q,t){p=p||u;if(!t&&!l.isXML(p))if(p.nodeType===9)try{return D(p.querySelectorAll(m),q)}catch(x){}else if(p.nodeType===1&&p.nodeName.toLowerCase()!=="object"){var C=p.id,P=p.id="__sizzle__";try{return D(p.querySelectorAll("#"+P+" "+m),q)}catch(N){}finally{if(C)p.id=C;else p.removeAttribute("id")}}return g(m,p,q,t)};for(var o in g)l[o]=g[o]; +j=null}}();(function(){var g=u.documentElement,j=g.matchesSelector||g.mozMatchesSelector||g.webkitMatchesSelector||g.msMatchesSelector,o=false;try{j.call(u.documentElement,":sizzle")}catch(m){o=true}if(j)l.matchesSelector=function(p,q){try{if(o||!n.match.PSEUDO.test(q))return j.call(p,q)}catch(t){}return l(q,null,null,[p]).length>0}})();(function(){var g=u.createElement("div");g.innerHTML="<div class='test e'></div><div class='test'></div>";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length=== +0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){n.order.splice(1,0,"CLASS");n.find.CLASS=function(j,o,m){if(typeof o.getElementsByClassName!=="undefined"&&!m)return o.getElementsByClassName(j[1])};g=null}}})();l.contains=u.documentElement.contains?function(g,j){return g!==j&&(g.contains?g.contains(j):true)}:function(g,j){return!!(g.compareDocumentPosition(j)&16)};l.isXML=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false};var M=function(g, +j){for(var o=[],m="",p,q=j.nodeType?[j]:j;p=n.match.PSEUDO.exec(g);){m+=p[0];g=g.replace(n.match.PSEUDO,"")}g=n.relative[g]?g+"*":g;p=0;for(var t=q.length;p<t;p++)l(g,q[p],o);return l.filter(m,o)};c.find=l;c.expr=l.selectors;c.expr[":"]=c.expr.filters;c.unique=l.uniqueSort;c.text=l.getText;c.isXMLDoc=l.isXML;c.contains=l.contains})();var Wa=/Until$/,Xa=/^(?:parents|prevUntil|prevAll)/,Ya=/,/,Ja=/^.[^:#\[\.,]*$/,Za=Array.prototype.slice,$a=c.expr.match.POS;c.fn.extend({find:function(a){for(var b=this.pushStack("", +"find",a),d=0,e=0,f=this.length;e<f;e++){d=b.length;c.find(a,this[e],b);if(e>0)for(var h=d;h<b.length;h++)for(var k=0;k<d;k++)if(b[k]===b[h]){b.splice(h--,1);break}}return b},has:function(a){var b=c(a);return this.filter(function(){for(var d=0,e=b.length;d<e;d++)if(c.contains(this,b[d]))return true})},not:function(a){return this.pushStack(ka(this,a,false),"not",a)},filter:function(a){return this.pushStack(ka(this,a,true),"filter",a)},is:function(a){return!!a&&c.filter(a,this).length>0},closest:function(a, +b){var d=[],e,f,h=this[0];if(c.isArray(a)){var k={},l,n=1;if(h&&a.length){e=0;for(f=a.length;e<f;e++){l=a[e];k[l]||(k[l]=c.expr.match.POS.test(l)?c(l,b||this.context):l)}for(;h&&h.ownerDocument&&h!==b;){for(l in k){e=k[l];if(e.jquery?e.index(h)>-1:c(h).is(e))d.push({selector:l,elem:h,level:n})}h=h.parentNode;n++}}return d}k=$a.test(a)?c(a,b||this.context):null;e=0;for(f=this.length;e<f;e++)for(h=this[e];h;)if(k?k.index(h)>-1:c.find.matchesSelector(h,a)){d.push(h);break}else{h=h.parentNode;if(!h|| +!h.ownerDocument||h===b)break}d=d.length>1?c.unique(d):d;return this.pushStack(d,"closest",a)},index:function(a){if(!a||typeof a==="string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var d=typeof a==="string"?c(a,b||this.context):c.makeArray(a),e=c.merge(this.get(),d);return this.pushStack(!d[0]||!d[0].parentNode||d[0].parentNode.nodeType===11||!e[0]||!e[0].parentNode||e[0].parentNode.nodeType===11?e:c.unique(e))},andSelf:function(){return this.add(this.prevObject)}}); +c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling", +d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,b){c.fn[a]=function(d,e){var f=c.map(this,b,d);Wa.test(a)||(e=d);if(e&&typeof e==="string")f=c.filter(e,f);f=this.length>1?c.unique(f):f;if((this.length>1||Ya.test(e))&&Xa.test(a))f=f.reverse();return this.pushStack(f,a,Za.call(arguments).join(","))}}); +c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return b.length===1?c.find.matchesSelector(b[0],a)?[b[0]]:[]:c.find.matches(a,b)},dir:function(a,b,d){var e=[];for(a=a[b];a&&a.nodeType!==9&&(d===A||a.nodeType!==1||!c(a).is(d));){a.nodeType===1&&e.push(a);a=a[b]}return e},nth:function(a,b,d){b=b||1;for(var e=0;a;a=a[d])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var xa=/ jQuery\d+="(?:\d+|null)"/g, +$=/^\s+/,ya=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,za=/<([\w:]+)/,ab=/<tbody/i,bb=/<|&#?\w+;/,Aa=/<(?:script|object|embed|option|style)/i,Ba=/checked\s*(?:[^=]|=\s*.checked.)/i,cb=/\=([^="'>\s]+\/)>/g,O={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"], +area:[1,"<map>","</map>"],_default:[0,"",""]};O.optgroup=O.option;O.tbody=O.tfoot=O.colgroup=O.caption=O.thead;O.th=O.td;if(!c.support.htmlSerialize)O._default=[1,"div<div>","</div>"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==A)return this.empty().append((this[0]&&this[0].ownerDocument||u).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this, +d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})}, +unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a= +c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,e;(e=this[d])!=null;d++)if(!a||c.filter(a,[e]).length){if(!b&&e.nodeType===1){c.cleanData(e.getElementsByTagName("*")); +c.cleanData([e])}e.parentNode&&e.parentNode.removeChild(e)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,e=this.ownerDocument;if(!d){d=e.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(xa,"").replace(cb,'="$1">').replace($, +"")],e)[0]}else return this.cloneNode(true)});if(a===true){la(this,b);la(this.find("*"),b.find("*"))}return b},html:function(a){if(a===A)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(xa,""):null;else if(typeof a==="string"&&!Aa.test(a)&&(c.support.leadingWhitespace||!$.test(a))&&!O[(za.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ya,"<$1></$2>");try{for(var b=0,d=this.length;b<d;b++)if(this[b].nodeType===1){c.cleanData(this[b].getElementsByTagName("*"));this[b].innerHTML=a}}catch(e){this.empty().append(a)}}else c.isFunction(a)? +this.each(function(f){var h=c(this);h.html(a.call(this,f,h.html()))}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(c.isFunction(a))return this.each(function(b){var d=c(this),e=d.html();d.replaceWith(a.call(this,b,e))});if(typeof a!=="string")a=c(a).detach();return this.each(function(){var b=this.nextSibling,d=this.parentNode;c(this).remove();b?c(b).before(a):c(d).append(a)})}else return this.pushStack(c(c.isFunction(a)?a():a),"replaceWith",a)},detach:function(a){return this.remove(a, +true)},domManip:function(a,b,d){var e,f,h=a[0],k=[],l;if(!c.support.checkClone&&arguments.length===3&&typeof h==="string"&&Ba.test(h))return this.each(function(){c(this).domManip(a,b,d,true)});if(c.isFunction(h))return this.each(function(s){var v=c(this);a[0]=h.call(this,s,b?v.html():A);v.domManip(a,b,d)});if(this[0]){e=h&&h.parentNode;e=c.support.parentNode&&e&&e.nodeType===11&&e.childNodes.length===this.length?{fragment:e}:c.buildFragment(a,this,k);l=e.fragment;if(f=l.childNodes.length===1?l=l.firstChild: +l.firstChild){b=b&&c.nodeName(f,"tr");f=0;for(var n=this.length;f<n;f++)d.call(b?c.nodeName(this[f],"table")?this[f].getElementsByTagName("tbody")[0]||this[f].appendChild(this[f].ownerDocument.createElement("tbody")):this[f]:this[f],f>0||e.cacheable||this.length>1?l.cloneNode(true):l)}k.length&&c.each(k,Ka)}return this}});c.buildFragment=function(a,b,d){var e,f,h;b=b&&b[0]?b[0].ownerDocument||b[0]:u;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===u&&!Aa.test(a[0])&&(c.support.checkClone|| +!Ba.test(a[0]))){f=true;if(h=c.fragments[a[0]])if(h!==1)e=h}if(!e){e=b.createDocumentFragment();c.clean(a,b,e,d)}if(f)c.fragments[a[0]]=h?e:1;return{fragment:e,cacheable:f}};c.fragments={};c.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var e=[];d=c(d);var f=this.length===1&&this[0].parentNode;if(f&&f.nodeType===11&&f.childNodes.length===1&&d.length===1){d[b](this[0]);return this}else{f=0;for(var h= +d.length;f<h;f++){var k=(f>0?this.clone(true):this).get();c(d[f])[b](k);e=e.concat(k)}return this.pushStack(e,a,d.selector)}}});c.extend({clean:function(a,b,d,e){b=b||u;if(typeof b.createElement==="undefined")b=b.ownerDocument||b[0]&&b[0].ownerDocument||u;for(var f=[],h=0,k;(k=a[h])!=null;h++){if(typeof k==="number")k+="";if(k){if(typeof k==="string"&&!bb.test(k))k=b.createTextNode(k);else if(typeof k==="string"){k=k.replace(ya,"<$1></$2>");var l=(za.exec(k)||["",""])[1].toLowerCase(),n=O[l]||O._default, +s=n[0],v=b.createElement("div");for(v.innerHTML=n[1]+k+n[2];s--;)v=v.lastChild;if(!c.support.tbody){s=ab.test(k);l=l==="table"&&!s?v.firstChild&&v.firstChild.childNodes:n[1]==="<table>"&&!s?v.childNodes:[];for(n=l.length-1;n>=0;--n)c.nodeName(l[n],"tbody")&&!l[n].childNodes.length&&l[n].parentNode.removeChild(l[n])}!c.support.leadingWhitespace&&$.test(k)&&v.insertBefore(b.createTextNode($.exec(k)[0]),v.firstChild);k=v.childNodes}if(k.nodeType)f.push(k);else f=c.merge(f,k)}}if(d)for(h=0;f[h];h++)if(e&& +c.nodeName(f[h],"script")&&(!f[h].type||f[h].type.toLowerCase()==="text/javascript"))e.push(f[h].parentNode?f[h].parentNode.removeChild(f[h]):f[h]);else{f[h].nodeType===1&&f.splice.apply(f,[h+1,0].concat(c.makeArray(f[h].getElementsByTagName("script"))));d.appendChild(f[h])}return f},cleanData:function(a){for(var b,d,e=c.cache,f=c.event.special,h=c.support.deleteExpando,k=0,l;(l=a[k])!=null;k++)if(!(l.nodeName&&c.noData[l.nodeName.toLowerCase()]))if(d=l[c.expando]){if((b=e[d])&&b.events)for(var n in b.events)f[n]? +c.event.remove(l,n):c.removeEvent(l,n,b.handle);if(h)delete l[c.expando];else l.removeAttribute&&l.removeAttribute(c.expando);delete e[d]}}});var Ca=/alpha\([^)]*\)/i,db=/opacity=([^)]*)/,eb=/-([a-z])/ig,fb=/([A-Z])/g,Da=/^-?\d+(?:px)?$/i,gb=/^-?\d/,hb={position:"absolute",visibility:"hidden",display:"block"},La=["Left","Right"],Ma=["Top","Bottom"],W,ib=u.defaultView&&u.defaultView.getComputedStyle,jb=function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){if(arguments.length===2&&b===A)return this; +return c.access(this,a,b,true,function(d,e,f){return f!==A?c.style(d,e,f):c.css(d,e)})};c.extend({cssHooks:{opacity:{get:function(a,b){if(b){var d=W(a,"opacity","opacity");return d===""?"1":d}else return a.style.opacity}}},cssNumber:{zIndex:true,fontWeight:true,opacity:true,zoom:true,lineHeight:true},cssProps:{"float":c.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,d,e){if(!(!a||a.nodeType===3||a.nodeType===8||!a.style)){var f,h=c.camelCase(b),k=a.style,l=c.cssHooks[h];b=c.cssProps[h]|| +h;if(d!==A){if(!(typeof d==="number"&&isNaN(d)||d==null)){if(typeof d==="number"&&!c.cssNumber[h])d+="px";if(!l||!("set"in l)||(d=l.set(a,d))!==A)try{k[b]=d}catch(n){}}}else{if(l&&"get"in l&&(f=l.get(a,false,e))!==A)return f;return k[b]}}},css:function(a,b,d){var e,f=c.camelCase(b),h=c.cssHooks[f];b=c.cssProps[f]||f;if(h&&"get"in h&&(e=h.get(a,true,d))!==A)return e;else if(W)return W(a,b,f)},swap:function(a,b,d){var e={},f;for(f in b){e[f]=a.style[f];a.style[f]=b[f]}d.call(a);for(f in b)a.style[f]= +e[f]},camelCase:function(a){return a.replace(eb,jb)}});c.curCSS=c.css;c.each(["height","width"],function(a,b){c.cssHooks[b]={get:function(d,e,f){var h;if(e){if(d.offsetWidth!==0)h=ma(d,b,f);else c.swap(d,hb,function(){h=ma(d,b,f)});return h+"px"}},set:function(d,e){if(Da.test(e)){e=parseFloat(e);if(e>=0)return e+"px"}else return e}}});if(!c.support.opacity)c.cssHooks.opacity={get:function(a,b){return db.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"": +b?"1":""},set:function(a,b){var d=a.style;d.zoom=1;var e=c.isNaN(b)?"":"alpha(opacity="+b*100+")",f=d.filter||"";d.filter=Ca.test(f)?f.replace(Ca,e):d.filter+" "+e}};if(ib)W=function(a,b,d){var e;d=d.replace(fb,"-$1").toLowerCase();if(!(b=a.ownerDocument.defaultView))return A;if(b=b.getComputedStyle(a,null)){e=b.getPropertyValue(d);if(e===""&&!c.contains(a.ownerDocument.documentElement,a))e=c.style(a,d)}return e};else if(u.documentElement.currentStyle)W=function(a,b){var d,e,f=a.currentStyle&&a.currentStyle[b], +h=a.style;if(!Da.test(f)&&gb.test(f)){d=h.left;e=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;h.left=b==="fontSize"?"1em":f||0;f=h.pixelLeft+"px";h.left=d;a.runtimeStyle.left=e}return f};if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=a.offsetHeight;return a.offsetWidth===0&&b===0||!c.support.reliableHiddenOffsets&&(a.style.display||c.css(a,"display"))==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var kb=c.now(),lb=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi, +mb=/^(?:select|textarea)/i,nb=/^(?:color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,ob=/^(?:GET|HEAD|DELETE)$/,Na=/\[\]$/,T=/\=\?(&|$)/,ia=/\?/,pb=/([?&])_=[^&]*/,qb=/^(\w+:)?\/\/([^\/?#]+)/,rb=/%20/g,sb=/#.*$/,Ea=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!=="string"&&Ea)return Ea.apply(this,arguments);else if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var f=a.slice(e,a.length);a=a.slice(0,e)}e="GET";if(b)if(c.isFunction(b)){d= +b;b=null}else if(typeof b==="object"){b=c.param(b,c.ajaxSettings.traditional);e="POST"}var h=this;c.ajax({url:a,type:e,dataType:"html",data:b,complete:function(k,l){if(l==="success"||l==="notmodified")h.html(f?c("<div>").append(k.responseText.replace(lb,"")).find(f):k.responseText);d&&h.each(d,[k.responseText,l,k])}});return this},serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&& +!this.disabled&&(this.checked||mb.test(this.nodeName)||nb.test(this.type))}).map(function(a,b){var d=c(this).val();return d==null?null:c.isArray(d)?c.map(d,function(e){return{name:b.name,value:e}}):{name:b.name,value:d}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:e})}, +getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:e})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return new E.XMLHttpRequest},accepts:{xml:"application/xml, text/xml",html:"text/html", +script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},ajax:function(a){var b=c.extend(true,{},c.ajaxSettings,a),d,e,f,h=b.type.toUpperCase(),k=ob.test(h);b.url=b.url.replace(sb,"");b.context=a&&a.context!=null?a.context:b;if(b.data&&b.processData&&typeof b.data!=="string")b.data=c.param(b.data,b.traditional);if(b.dataType==="jsonp"){if(h==="GET")T.test(b.url)||(b.url+=(ia.test(b.url)?"&":"?")+(b.jsonp||"callback")+"=?");else if(!b.data|| +!T.test(b.data))b.data=(b.data?b.data+"&":"")+(b.jsonp||"callback")+"=?";b.dataType="json"}if(b.dataType==="json"&&(b.data&&T.test(b.data)||T.test(b.url))){d=b.jsonpCallback||"jsonp"+kb++;if(b.data)b.data=(b.data+"").replace(T,"="+d+"$1");b.url=b.url.replace(T,"="+d+"$1");b.dataType="script";var l=E[d];E[d]=function(m){f=m;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);if(c.isFunction(l))l(m);else{E[d]=A;try{delete E[d]}catch(p){}}v&&v.removeChild(B)}}if(b.dataType==="script"&&b.cache===null)b.cache= +false;if(b.cache===false&&h==="GET"){var n=c.now(),s=b.url.replace(pb,"$1_="+n);b.url=s+(s===b.url?(ia.test(b.url)?"&":"?")+"_="+n:"")}if(b.data&&h==="GET")b.url+=(ia.test(b.url)?"&":"?")+b.data;b.global&&c.active++===0&&c.event.trigger("ajaxStart");n=(n=qb.exec(b.url))&&(n[1]&&n[1]!==location.protocol||n[2]!==location.host);if(b.dataType==="script"&&h==="GET"&&n){var v=u.getElementsByTagName("head")[0]||u.documentElement,B=u.createElement("script");if(b.scriptCharset)B.charset=b.scriptCharset;B.src= +b.url;if(!d){var D=false;B.onload=B.onreadystatechange=function(){if(!D&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){D=true;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);B.onload=B.onreadystatechange=null;v&&B.parentNode&&v.removeChild(B)}}}v.insertBefore(B,v.firstChild);return A}var H=false,w=b.xhr();if(w){b.username?w.open(h,b.url,b.async,b.username,b.password):w.open(h,b.url,b.async);try{if(b.data!=null&&!k||a&&a.contentType)w.setRequestHeader("Content-Type", +b.contentType);if(b.ifModified){c.lastModified[b.url]&&w.setRequestHeader("If-Modified-Since",c.lastModified[b.url]);c.etag[b.url]&&w.setRequestHeader("If-None-Match",c.etag[b.url])}n||w.setRequestHeader("X-Requested-With","XMLHttpRequest");w.setRequestHeader("Accept",b.dataType&&b.accepts[b.dataType]?b.accepts[b.dataType]+", */*; q=0.01":b.accepts._default)}catch(G){}if(b.beforeSend&&b.beforeSend.call(b.context,w,b)===false){b.global&&c.active--===1&&c.event.trigger("ajaxStop");w.abort();return false}b.global&& +c.triggerGlobal(b,"ajaxSend",[w,b]);var M=w.onreadystatechange=function(m){if(!w||w.readyState===0||m==="abort"){H||c.handleComplete(b,w,e,f);H=true;if(w)w.onreadystatechange=c.noop}else if(!H&&w&&(w.readyState===4||m==="timeout")){H=true;w.onreadystatechange=c.noop;e=m==="timeout"?"timeout":!c.httpSuccess(w)?"error":b.ifModified&&c.httpNotModified(w,b.url)?"notmodified":"success";var p;if(e==="success")try{f=c.httpData(w,b.dataType,b)}catch(q){e="parsererror";p=q}if(e==="success"||e==="notmodified")d|| +c.handleSuccess(b,w,e,f);else c.handleError(b,w,e,p);d||c.handleComplete(b,w,e,f);m==="timeout"&&w.abort();if(b.async)w=null}};try{var g=w.abort;w.abort=function(){w&&g.call&&g.call(w);M("abort")}}catch(j){}b.async&&b.timeout>0&&setTimeout(function(){w&&!H&&M("timeout")},b.timeout);try{w.send(k||b.data==null?null:b.data)}catch(o){c.handleError(b,w,null,o);c.handleComplete(b,w,e,f)}b.async||M();return w}},param:function(a,b){var d=[],e=function(h,k){k=c.isFunction(k)?k():k;d[d.length]=encodeURIComponent(h)+ +"="+encodeURIComponent(k)};if(b===A)b=c.ajaxSettings.traditional;if(c.isArray(a)||a.jquery)c.each(a,function(){e(this.name,this.value)});else for(var f in a)ca(f,a[f],b,e);return d.join("&").replace(rb,"+")}});c.extend({active:0,lastModified:{},etag:{},handleError:function(a,b,d,e){a.error&&a.error.call(a.context,b,d,e);a.global&&c.triggerGlobal(a,"ajaxError",[b,a,e])},handleSuccess:function(a,b,d,e){a.success&&a.success.call(a.context,e,d,b);a.global&&c.triggerGlobal(a,"ajaxSuccess",[b,a])},handleComplete:function(a, +b,d){a.complete&&a.complete.call(a.context,b,d);a.global&&c.triggerGlobal(a,"ajaxComplete",[b,a]);a.global&&c.active--===1&&c.event.trigger("ajaxStop")},triggerGlobal:function(a,b,d){(a.context&&a.context.url==null?c(a.context):c.event).trigger(b,d)},httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===1223}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),e=a.getResponseHeader("Etag"); +if(d)c.lastModified[b]=d;if(e)c.etag[b]=e;return a.status===304},httpData:function(a,b,d){var e=a.getResponseHeader("content-type")||"",f=b==="xml"||!b&&e.indexOf("xml")>=0;a=f?a.responseXML:a.responseText;f&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b==="json"||!b&&e.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&e.indexOf("javascript")>=0)c.globalEval(a);return a}});if(E.ActiveXObject)c.ajaxSettings.xhr= +function(){if(E.location.protocol!=="file:")try{return new E.XMLHttpRequest}catch(a){}try{return new E.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}};c.support.ajax=!!c.ajaxSettings.xhr();var da={},tb=/^(?:toggle|show|hide)$/,ub=/^([+\-]=)?([\d+.\-]+)(.*)$/,aa,na=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b,d){if(a||a===0)return this.animate(S("show",3),a,b,d);else{a= +0;for(b=this.length;a<b;a++){if(!c.data(this[a],"olddisplay")&&this[a].style.display==="none")this[a].style.display="";this[a].style.display===""&&c.css(this[a],"display")==="none"&&c.data(this[a],"olddisplay",oa(this[a].nodeName))}for(a=0;a<b;a++)this[a].style.display=c.data(this[a],"olddisplay")||"";return this}},hide:function(a,b,d){if(a||a===0)return this.animate(S("hide",3),a,b,d);else{a=0;for(b=this.length;a<b;a++){d=c.css(this[a],"display");d!=="none"&&c.data(this[a],"olddisplay",d)}for(a= +0;a<b;a++)this[a].style.display="none";return this}},_toggle:c.fn.toggle,toggle:function(a,b,d){var e=typeof a==="boolean";if(c.isFunction(a)&&c.isFunction(b))this._toggle.apply(this,arguments);else a==null||e?this.each(function(){var f=e?a:c(this).is(":hidden");c(this)[f?"show":"hide"]()}):this.animate(S("toggle",3),a,b,d);return this},fadeTo:function(a,b,d,e){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,d,e)},animate:function(a,b,d,e){var f=c.speed(b,d,e);if(c.isEmptyObject(a))return this.each(f.complete); +return this[f.queue===false?"each":"queue"](function(){var h=c.extend({},f),k,l=this.nodeType===1,n=l&&c(this).is(":hidden"),s=this;for(k in a){var v=c.camelCase(k);if(k!==v){a[v]=a[k];delete a[k];k=v}if(a[k]==="hide"&&n||a[k]==="show"&&!n)return h.complete.call(this);if(l&&(k==="height"||k==="width")){h.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY];if(c.css(this,"display")==="inline"&&c.css(this,"float")==="none")if(c.support.inlineBlockNeedsLayout)if(oa(this.nodeName)=== +"inline")this.style.display="inline-block";else{this.style.display="inline";this.style.zoom=1}else this.style.display="inline-block"}if(c.isArray(a[k])){(h.specialEasing=h.specialEasing||{})[k]=a[k][1];a[k]=a[k][0]}}if(h.overflow!=null)this.style.overflow="hidden";h.curAnim=c.extend({},a);c.each(a,function(B,D){var H=new c.fx(s,h,B);if(tb.test(D))H[D==="toggle"?n?"show":"hide":D](a);else{var w=ub.exec(D),G=H.cur(true)||0;if(w){var M=parseFloat(w[2]),g=w[3]||"px";if(g!=="px"){c.style(s,B,(M||1)+g); +G=(M||1)/H.cur(true)*G;c.style(s,B,G+g)}if(w[1])M=(w[1]==="-="?-1:1)*M+G;H.custom(G,M,g)}else H.custom(G,D,"")}});return true})},stop:function(a,b){var d=c.timers;a&&this.queue([]);this.each(function(){for(var e=d.length-1;e>=0;e--)if(d[e].elem===this){b&&d[e](true);d.splice(e,1)}});b||this.dequeue();return this}});c.each({slideDown:S("show",1),slideUp:S("hide",1),slideToggle:S("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(a,b){c.fn[a]=function(d,e,f){return this.animate(b, +d,e,f)}});c.extend({speed:function(a,b,d){var e=a&&typeof a==="object"?c.extend({},a):{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};e.duration=c.fx.off?0:typeof e.duration==="number"?e.duration:e.duration in c.fx.speeds?c.fx.speeds[e.duration]:c.fx.speeds._default;e.old=e.complete;e.complete=function(){e.queue!==false&&c(this).dequeue();c.isFunction(e.old)&&e.old.call(this)};return e},easing:{linear:function(a,b,d,e){return d+e*a},swing:function(a,b,d,e){return(-Math.cos(a* +Math.PI)/2+0.5)*e+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||c.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a=parseFloat(c.css(this.elem,this.prop));return a&&a>-1E4?a:0},custom:function(a,b,d){function e(h){return f.step(h)} +this.startTime=c.now();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;this.pos=this.state=0;var f=this;a=c.fx;e.elem=this.elem;if(e()&&c.timers.push(e)&&!aa)aa=setInterval(a.tick,a.interval)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true; +this.custom(this.cur(),0)},step:function(a){var b=c.now(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var e in this.options.curAnim)if(this.options.curAnim[e]!==true)d=false;if(d){if(this.options.overflow!=null&&!c.support.shrinkWrapBlocks){var f=this.elem,h=this.options;c.each(["","X","Y"],function(l,n){f.style["overflow"+n]=h.overflow[l]})}this.options.hide&&c(this.elem).hide();if(this.options.hide|| +this.options.show)for(var k in this.options.curAnim)c.style(this.elem,k,this.options.orig[k]);this.options.complete.call(this.elem)}return false}else{a=b-this.startTime;this.state=a/this.options.duration;b=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||b](this.state,a,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a= +c.timers,b=0;b<a.length;b++)a[b]()||a.splice(b--,1);a.length||c.fx.stop()},interval:13,stop:function(){clearInterval(aa);aa=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){c.style(a.elem,"opacity",a.now)},_default:function(a){if(a.elem.style&&a.elem.style[a.prop]!=null)a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit;else a.elem[a.prop]=a.now}}});if(c.expr&&c.expr.filters)c.expr.filters.animated=function(a){return c.grep(c.timers,function(b){return a=== +b.elem}).length};var vb=/^t(?:able|d|h)$/i,Fa=/^(?:body|html)$/i;c.fn.offset="getBoundingClientRect"in u.documentElement?function(a){var b=this[0],d;if(a)return this.each(function(k){c.offset.setOffset(this,a,k)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);try{d=b.getBoundingClientRect()}catch(e){}var f=b.ownerDocument,h=f.documentElement;if(!d||!c.contains(h,b))return d||{top:0,left:0};b=f.body;f=ea(f);return{top:d.top+(f.pageYOffset||c.support.boxModel&& +h.scrollTop||b.scrollTop)-(h.clientTop||b.clientTop||0),left:d.left+(f.pageXOffset||c.support.boxModel&&h.scrollLeft||b.scrollLeft)-(h.clientLeft||b.clientLeft||0)}}:function(a){var b=this[0];if(a)return this.each(function(s){c.offset.setOffset(this,a,s)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);c.offset.initialize();var d=b.offsetParent,e=b.ownerDocument,f,h=e.documentElement,k=e.body;f=(e=e.defaultView)?e.getComputedStyle(b,null):b.currentStyle; +for(var l=b.offsetTop,n=b.offsetLeft;(b=b.parentNode)&&b!==k&&b!==h;){if(c.offset.supportsFixedPosition&&f.position==="fixed")break;f=e?e.getComputedStyle(b,null):b.currentStyle;l-=b.scrollTop;n-=b.scrollLeft;if(b===d){l+=b.offsetTop;n+=b.offsetLeft;if(c.offset.doesNotAddBorder&&!(c.offset.doesAddBorderForTableAndCells&&vb.test(b.nodeName))){l+=parseFloat(f.borderTopWidth)||0;n+=parseFloat(f.borderLeftWidth)||0}d=b.offsetParent}if(c.offset.subtractsBorderForOverflowNotVisible&&f.overflow!=="visible"){l+= +parseFloat(f.borderTopWidth)||0;n+=parseFloat(f.borderLeftWidth)||0}f=f}if(f.position==="relative"||f.position==="static"){l+=k.offsetTop;n+=k.offsetLeft}if(c.offset.supportsFixedPosition&&f.position==="fixed"){l+=Math.max(h.scrollTop,k.scrollTop);n+=Math.max(h.scrollLeft,k.scrollLeft)}return{top:l,left:n}};c.offset={initialize:function(){var a=u.body,b=u.createElement("div"),d,e,f,h=parseFloat(c.css(a,"marginTop"))||0;c.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px", +height:"1px",visibility:"hidden"});b.innerHTML="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";a.insertBefore(b,a.firstChild);d=b.firstChild;e=d.firstChild;f=d.nextSibling.firstChild.firstChild;this.doesNotAddBorder=e.offsetTop!==5;this.doesAddBorderForTableAndCells= +f.offsetTop===5;e.style.position="fixed";e.style.top="20px";this.supportsFixedPosition=e.offsetTop===20||e.offsetTop===15;e.style.position=e.style.top="";d.style.overflow="hidden";d.style.position="relative";this.subtractsBorderForOverflowNotVisible=e.offsetTop===-5;this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==h;a.removeChild(b);c.offset.initialize=c.noop},bodyOffset:function(a){var b=a.offsetTop,d=a.offsetLeft;c.offset.initialize();if(c.offset.doesNotIncludeMarginInBodyOffset){b+=parseFloat(c.css(a, +"marginTop"))||0;d+=parseFloat(c.css(a,"marginLeft"))||0}return{top:b,left:d}},setOffset:function(a,b,d){var e=c.css(a,"position");if(e==="static")a.style.position="relative";var f=c(a),h=f.offset(),k=c.css(a,"top"),l=c.css(a,"left"),n=e==="absolute"&&c.inArray("auto",[k,l])>-1;e={};var s={};if(n)s=f.position();k=n?s.top:parseInt(k,10)||0;l=n?s.left:parseInt(l,10)||0;if(c.isFunction(b))b=b.call(a,d,h);if(b.top!=null)e.top=b.top-h.top+k;if(b.left!=null)e.left=b.left-h.left+l;"using"in b?b.using.call(a, +e):f.css(e)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),e=Fa.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.css(a,"marginTop"))||0;d.left-=parseFloat(c.css(a,"marginLeft"))||0;e.top+=parseFloat(c.css(b[0],"borderTopWidth"))||0;e.left+=parseFloat(c.css(b[0],"borderLeftWidth"))||0;return{top:d.top-e.top,left:d.left-e.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||u.body;a&&!Fa.test(a.nodeName)&& +c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(e){var f=this[0],h;if(!f)return null;if(e!==A)return this.each(function(){if(h=ea(this))h.scrollTo(!a?e:c(h).scrollLeft(),a?e:c(h).scrollTop());else this[d]=e});else return(h=ea(f))?"pageXOffset"in h?h[a?"pageYOffset":"pageXOffset"]:c.support.boxModel&&h.document.documentElement[d]||h.document.body[d]:f[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase(); +c.fn["inner"+b]=function(){return this[0]?parseFloat(c.css(this[0],d,"padding")):null};c.fn["outer"+b]=function(e){return this[0]?parseFloat(c.css(this[0],d,e?"margin":"border")):null};c.fn[d]=function(e){var f=this[0];if(!f)return e==null?null:this;if(c.isFunction(e))return this.each(function(h){var k=c(this);k[d](e.call(this,h,k[d]()))});return c.isWindow(f)?f.document.compatMode==="CSS1Compat"&&f.document.documentElement["client"+b]||f.document.body["client"+b]:f.nodeType===9?Math.max(f.documentElement["client"+ +b],f.body["scroll"+b],f.documentElement["scroll"+b],f.body["offset"+b],f.documentElement["offset"+b]):e===A?parseFloat(c.css(f,d)):this.css(d,typeof e==="string"?e:e+"px")}})})(window); diff --git a/third_party/pycoverage/coverage/htmlfiles/jquery.tablesorter.min.js b/third_party/pycoverage/coverage/htmlfiles/jquery.tablesorter.min.js new file mode 100644 index 0000000..64c7007 --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/jquery.tablesorter.min.js @@ -0,0 +1,2 @@ + +(function($){$.extend({tablesorter:new function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'.',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}var rows=table.tBodies[0].rows;if(table.tBodies[0].rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i<l;i++){var p=false;if($.metadata&&($($headers[i]).metadata()&&$($headers[i]).metadata().sorter)){p=getParserById($($headers[i]).metadata().sorter);}else if((table.config.headers[i]&&table.config.headers[i].sorter)){p=getParserById(table.config.headers[i].sorter);}if(!p){p=detectParserForColumn(table,cells[i]);}if(table.config.debug){parsersDebug+="column:"+i+" parser:"+p.id+"\n";}list.push(p);}}if(table.config.debug){log(parsersDebug);}return list;};function detectParserForColumn(table,node){var l=parsers.length;for(var i=1;i<l;i++){if(parsers[i].is($.trim(getElementText(table.config,node)),table,node)){return parsers[i];}}return parsers[0];}function getParserById(name){var l=parsers.length;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==name.toLowerCase()){return parsers[i];}}return false;}function buildCache(table){if(table.config.debug){var cacheTime=new Date();}var totalRows=(table.tBodies[0]&&table.tBodies[0].rows.length)||0,totalCells=(table.tBodies[0].rows[0]&&table.tBodies[0].rows[0].cells.length)||0,parsers=table.config.parsers,cache={row:[],normalized:[]};for(var i=0;i<totalRows;++i){var c=table.tBodies[0].rows[i],cols=[];cache.row.push($(c));for(var j=0;j<totalCells;++j){cols.push(parsers[j].format(getElementText(table.config,c.cells[j]),table,c.cells[j]));}cols.push(i);cache.normalized.push(cols);cols=null;};if(table.config.debug){benchmark("Building cache for "+totalRows+" rows:",cacheTime);}return cache;};function getElementText(config,node){if(!node)return"";var t="";if(config.textExtraction=="simple"){if(node.childNodes[0]&&node.childNodes[0].hasChildNodes()){t=node.childNodes[0].innerHTML;}else{t=node.innerHTML;}}else{if(typeof(config.textExtraction)=="function"){t=config.textExtraction(node);}else{t=$(node).text();}}return t;}function appendToTable(table,cache){if(table.config.debug){var appendTime=new Date()}var c=cache,r=c.row,n=c.normalized,totalRows=n.length,checkCell=(n[0].length-1),tableBody=$(table.tBodies[0]),rows=[];for(var i=0;i<totalRows;i++){rows.push(r[n[i][checkCell]]);if(!table.config.appender){var o=r[n[i][checkCell]];var l=o.length;for(var j=0;j<l;j++){tableBody[0].appendChild(o[j]);}}}if(table.config.appender){table.config.appender(table,rows);}rows=null;if(table.config.debug){benchmark("Rebuilt table:",appendTime);}applyWidget(table);setTimeout(function(){$(table).trigger("sortEnd");},0);};function buildHeaders(table){if(table.config.debug){var time=new Date();}var meta=($.metadata)?true:false,tableHeadersRows=[];for(var i=0;i<table.tHead.rows.length;i++){tableHeadersRows[i]=0;};$tableHeaders=$("thead th",table);$tableHeaders.each(function(index){this.count=0;this.column=index;this.order=formatSortingOrder(table.config.sortInitialOrder);if(checkHeaderMetadata(this)||checkHeaderOptions(table,index))this.sortDisabled=true;if(!this.sortDisabled){$(this).addClass(table.config.cssHeader);}table.config.headerList[index]=this;});if(table.config.debug){benchmark("Built headers:",time);log($tableHeaders);}return $tableHeaders;};function checkCellColSpan(table,rows,row){var arr=[],r=table.tHead.rows,c=r[row].cells;for(var i=0;i<c.length;i++){var cell=c[i];if(cell.colSpan>1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i<l;i++){getWidgetById(c[i]).format(table);}}function getWidgetById(name){var l=widgets.length;for(var i=0;i<l;i++){if(widgets[i].id.toLowerCase()==name.toLowerCase()){return widgets[i];}}};function formatSortingOrder(v){if(typeof(v)!="Number"){i=(v.toLowerCase()=="desc")?1:0;}else{i=(v==(0||1))?v:0;}return i;}function isValueInArray(v,a){var l=a.length;for(var i=0;i<l;i++){if(a[i][0]==v){return true;}}return false;}function setHeadersCss(table,$headers,list,css){$headers.removeClass(css[0]).removeClass(css[1]);var h=[];$headers.each(function(offset){if(!this.sortDisabled){h[this.column]=$(this);}});var l=list.length;for(var i=0;i<l;i++){h[list[i][0]].addClass(css[list[i][1]]);}}function fixColumnWidth(table,$headers){var c=table.config;if(c.widthFixed){var colgroup=$('<colgroup>');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('<col>').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i<l;i++){var s=sortList[i],o=c.headerList[s[0]];o.count=s[1];o.count++;}}function multisort(table,sortList,cache){if(table.config.debug){var sortTime=new Date();}var dynamicExp="var sortWrapper = function(a,b) {",l=sortList.length;for(var i=0;i<l;i++){var c=sortList[i][0];var order=sortList[i][1];var s=(getCachedSortType(table.config.parsers,c)=="text")?((order==0)?"sortText":"sortTextDesc"):((order==0)?"sortNumeric":"sortNumericDesc");var e="e"+i;dynamicExp+="var "+e+" = "+s+"(a["+c+"],b["+c+"]); ";dynamicExp+="if("+e+") { return "+e+"; } ";dynamicExp+="else { ";}var orgOrderCol=cache.normalized[0].length-1;dynamicExp+="return a["+orgOrderCol+"]-b["+orgOrderCol+"];";for(var i=0;i<l;i++){dynamicExp+="}; ";}dynamicExp+="return 0; ";dynamicExp+="}; ";eval(dynamicExp);cache.normalized.sort(sortWrapper);if(table.config.debug){benchmark("Sorting on "+sortList.toString()+" and dir "+order+" time:",sortTime);}return cache;};function sortText(a,b){return((a<b)?-1:((a>b)?1:0));};function sortTextDesc(a,b){return((b<a)?-1:((b>a)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){$this.trigger("sortStart");var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){var $cell=$(this);var i=this.column;this.order=this.count++%2;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j<a.length;j++){if(a[j][0]!=i){config.sortList.push(a[j]);}}}config.sortList.push([i,this.order]);}else{if(isValueInArray(i,config.sortList)){for(var j=0;j<config.sortList.length;j++){var s=config.sortList[j],o=config.headerList[s[0]];if(s[0]==i){o.count=s[1];o.count++;s[1]=o.count%2;}}}else{config.sortList.push([i,this.order]);}};setTimeout(function(){setHeadersCss($this[0],$headers,config.sortList,sortCSS);appendToTable($this[0],multisort($this[0],config.sortList,cache));},1);return false;}}).mousedown(function(){if(config.cancelSelection){this.onselectstart=function(){return false};return false;}});$this.bind("update",function(){this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);}).bind("sorton",function(e,list){$(this).trigger("sortStart");config.sortList=list;var sortList=config.sortList;updateHeaderSortCount(this,sortList);setHeadersCss(this,$headers,sortList,sortCSS);appendToTable(this,multisort(this,sortList,cache));}).bind("appendCache",function(){appendToTable(this,cache);}).bind("applyWidgetId",function(e,id){getWidgetById(id).format(this);}).bind("applyWidgets",function(){applyWidget(this);});if($.metadata&&($(this).metadata()&&$(this).metadata().sortlist)){config.sortList=$(this).metadata().sortlist;}if(config.sortList.length>0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==parser.id.toLowerCase()){a=false;}}if(a){parsers.push(parser);};};this.addWidget=function(widget){widgets.push(widget);};this.formatFloat=function(s){var i=parseFloat(s);return(isNaN(i))?0:i;};this.formatInt=function(s){var i=parseInt(s);return(isNaN(i))?0:i;};this.isDigit=function(s,config){var DECIMAL='\\'+config.decimal;var exp='/(^[+]?0('+DECIMAL+'0+)?$)|(^([-+]?[1-9][0-9]*)$)|(^([-+]?((0?|[1-9][0-9]*)'+DECIMAL+'(0*[1-9][0-9]*)))$)|(^[-+]?[1-9]+[0-9]*'+DECIMAL+'0+$)/';return RegExp(exp).test($.trim(s));};this.clearTableBody=function(table){if($.browser.msie){function empty(){while(this.firstChild)this.removeChild(this.firstChild);}empty.apply(table.tBodies[0]);}else{table.tBodies[0].innerHTML="";}};}});$.fn.extend({tablesorter:$.tablesorter.construct});var ts=$.tablesorter;ts.addParser({id:"text",is:function(s){return true;},format:function(s){return $.trim(s.toLowerCase());},type:"text"});ts.addParser({id:"digit",is:function(s,table){var c=table.config;return $.tablesorter.isDigit(s,c);},format:function(s){return $.tablesorter.formatFloat(s);},type:"numeric"});ts.addParser({id:"currency",is:function(s){return/^[£$€?.]/.test(s);},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/[^0-9.]/g),""));},type:"numeric"});ts.addParser({id:"ipAddress",is:function(s){return/^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s);},format:function(s){var a=s.split("."),r="",l=a.length;for(var i=0;i<l;i++){var item=a[i];if(item.length==2){r+="0"+item;}else{r+=item;}}return $.tablesorter.formatFloat(r);},type:"numeric"});ts.addParser({id:"url",is:function(s){return/^(https?|ftp|file):\/\/$/.test(s);},format:function(s){return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//),''));},type:"text"});ts.addParser({id:"isoDate",is:function(s){return/^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s);},format:function(s){return $.tablesorter.formatFloat((s!="")?new Date(s.replace(new RegExp(/-/g),"/")).getTime():"0");},type:"numeric"});ts.addParser({id:"percent",is:function(s){return/\%$/.test($.trim(s));},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g),""));},type:"numeric"});ts.addParser({id:"usLongDate",is:function(s){return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/));},format:function(s){return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"shortDate",is:function(s){return/\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s);},format:function(s,table){var c=table.config;s=s.replace(/\-/g,"/");if(c.dateFormat=="us"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$1/$2");}else if(c.dateFormat=="uk"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$2/$1");}else if(c.dateFormat=="dd/mm/yy"||c.dateFormat=="dd-mm-yy"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/,"$1/$2/$3");}return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"time",is:function(s){return/^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s);},format:function(s){return $.tablesorter.formatFloat(new Date("2000/01/01 "+s).getTime());},type:"numeric"});ts.addParser({id:"metadata",is:function(s){return false;},format:function(s,table,cell){var c=table.config,p=(!c.parserMetadataName)?'sortValue':c.parserMetadataName;return $(cell).metadata()[p];},type:"numeric"});ts.addWidget({id:"zebra",format:function(table){if(table.config.debug){var time=new Date();}$("tr:visible",table.tBodies[0]).filter(':even').removeClass(table.config.widgetZebra.css[1]).addClass(table.config.widgetZebra.css[0]).end().filter(':odd').removeClass(table.config.widgetZebra.css[0]).addClass(table.config.widgetZebra.css[1]);if(table.config.debug){$.tablesorter.benchmark("Applying Zebra widget",time);}}});})(jQuery);
\ No newline at end of file diff --git a/third_party/pycoverage/coverage/htmlfiles/keybd_closed.png b/third_party/pycoverage/coverage/htmlfiles/keybd_closed.png Binary files differnew file mode 100644 index 0000000..f2b0418 --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/keybd_closed.png diff --git a/third_party/pycoverage/coverage/htmlfiles/keybd_open.png b/third_party/pycoverage/coverage/htmlfiles/keybd_open.png Binary files differnew file mode 100644 index 0000000..a77961d --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/keybd_open.png diff --git a/third_party/pycoverage/coverage/htmlfiles/pyfile.html b/third_party/pycoverage/coverage/htmlfiles/pyfile.html new file mode 100644 index 0000000..88c158d --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/pyfile.html @@ -0,0 +1,90 @@ +<!doctype html PUBLIC "-//W3C//DTD html 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> +<html> +<head> + <meta http-equiv='Content-Type' content='text/html; charset=utf-8'> + {# IE8 rounds line-height incorrectly, and adding this emulateIE7 line makes it right! #} + {# http://social.msdn.microsoft.com/Forums/en-US/iewebdevelopment/thread/7684445e-f080-4d8f-8529-132763348e21 #} + <meta http-equiv='X-UA-Compatible' content='IE=emulateIE7' /> + <title>Coverage for {{cu.name|escape}}: {{nums.pc_covered_str}}%</title> + <link rel='stylesheet' href='style.css' type='text/css'> + {% if extra_css %} + <link rel='stylesheet' href='{{ extra_css }}' type='text/css'> + {% endif %} + <script type='text/javascript' src='jquery.min.js'></script> + <script type='text/javascript' src='jquery.hotkeys.js'></script> + <script type='text/javascript' src='jquery.isonscreen.js'></script> + <script type='text/javascript' src='coverage_html.js'></script> + <script type='text/javascript' charset='utf-8'> + jQuery(document).ready(coverage.pyfile_ready); + </script> +</head> +<body id='pyfile'> + +<div id='header'> + <div class='content'> + <h1>Coverage for <b>{{cu.name|escape}}</b> : + <span class='pc_cov'>{{nums.pc_covered_str}}%</span> + </h1> + <img id='keyboard_icon' src='keybd_closed.png'> + <h2 class='stats'> + {{nums.n_statements}} statements + <span class='{{c_run}} shortkey_r button_toggle_run'>{{nums.n_executed}} run</span> + <span class='{{c_mis}} shortkey_m button_toggle_mis'>{{nums.n_missing}} missing</span> + <span class='{{c_exc}} shortkey_x button_toggle_exc'>{{nums.n_excluded}} excluded</span> + {% if arcs %} + <span class='{{c_par}} shortkey_p button_toggle_par'>{{nums.n_partial_branches}} partial</span> + {% endif %} + </h2> + </div> +</div> + +<div class='help_panel'> + <img id='panel_icon' src='keybd_open.png'> + <p class='legend'>Hot-keys on this page</p> + <div> + <p class='keyhelp'> + <span class='key'>r</span> + <span class='key'>m</span> + <span class='key'>x</span> + <span class='key'>p</span> toggle line displays + </p> + <p class='keyhelp'> + <span class='key'>j</span> + <span class='key'>k</span> next/prev highlighted chunk + </p> + <p class='keyhelp'> + <span class='key'>0</span> (zero) top of page + </p> + <p class='keyhelp'> + <span class='key'>1</span> (one) first highlighted chunk + </p> + </div> +</div> + +<div id='source'> + <table cellspacing='0' cellpadding='0'> + <tr> + <td class='linenos' valign='top'> + {% for line in lines %} + <p id='n{{line.number}}' class='{{line.class}}'><a href='#n{{line.number}}'>{{line.number}}</a></p> + {% endfor %} + </td> + <td class='text' valign='top'> + {% for line in lines %} + <p id='t{{line.number}}' class='{{line.class}}'>{% if line.annotate %}<span class='annotate' title='{{line.annotate_title}}'>{{line.annotate}}</span>{% endif %}{{line.html}}<span class='strut'> </span></p> + {% endfor %} + </td> + </tr> + </table> +</div> + +<div id='footer'> + <div class='content'> + <p> + <a class='nav' href='index.html'>« index</a> <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a> + </p> + </div> +</div> + +</body> +</html> diff --git a/third_party/pycoverage/coverage/htmlfiles/style.css b/third_party/pycoverage/coverage/htmlfiles/style.css new file mode 100644 index 0000000..811c640 --- /dev/null +++ b/third_party/pycoverage/coverage/htmlfiles/style.css @@ -0,0 +1,300 @@ +/* CSS styles for Coverage. */ +/* Page-wide styles */ +html, body, h1, h2, h3, p, td, th { + margin: 0; + padding: 0; + border: 0; + outline: 0; + font-weight: inherit; + font-style: inherit; + font-size: 100%; + font-family: inherit; + vertical-align: baseline; + } + +/* Set baseline grid to 16 pt. */ +body { + font-family: georgia, serif; + font-size: 1em; + } + +html>body { + font-size: 16px; + } + +/* Set base font size to 12/16 */ +p { + font-size: .75em; /* 12/16 */ + line-height: 1.33333333em; /* 16/12 */ + } + +table { + border-collapse: collapse; + } + +a.nav { + text-decoration: none; + color: inherit; + } +a.nav:hover { + text-decoration: underline; + color: inherit; + } + +/* Page structure */ +#header { + background: #f8f8f8; + width: 100%; + border-bottom: 1px solid #eee; + } + +#source { + padding: 1em; + font-family: "courier new", monospace; + } + +#indexfile #footer { + margin: 1em 3em; + } + +#pyfile #footer { + margin: 1em 1em; + } + +#footer .content { + padding: 0; + font-size: 85%; + font-family: verdana, sans-serif; + color: #666666; + font-style: italic; + } + +#index { + margin: 1em 0 0 3em; + } + +/* Header styles */ +#header .content { + padding: 1em 3em; + } + +h1 { + font-size: 1.25em; +} + +h2.stats { + margin-top: .5em; + font-size: 1em; +} +.stats span { + border: 1px solid; + padding: .1em .25em; + margin: 0 .1em; + cursor: pointer; + border-color: #999 #ccc #ccc #999; +} +.stats span.hide_run, .stats span.hide_exc, +.stats span.hide_mis, .stats span.hide_par, +.stats span.par.hide_run.hide_par { + border-color: #ccc #999 #999 #ccc; +} +.stats span.par.hide_run { + border-color: #999 #ccc #ccc #999; +} + +.stats span.run { + background: #ddffdd; +} +.stats span.exc { + background: #eeeeee; +} +.stats span.mis { + background: #ffdddd; +} +.stats span.hide_run { + background: #eeffee; +} +.stats span.hide_exc { + background: #f5f5f5; +} +.stats span.hide_mis { + background: #ffeeee; +} +.stats span.par { + background: #ffffaa; +} +.stats span.hide_par { + background: #ffffcc; +} + +/* Help panel */ +#keyboard_icon { + float: right; + cursor: pointer; +} + +.help_panel { + position: absolute; + background: #ffc; + padding: .5em; + border: 1px solid #883; + display: none; +} + +#indexfile .help_panel { + width: 20em; height: 4em; +} + +#pyfile .help_panel { + width: 16em; height: 8em; +} + +.help_panel .legend { + font-style: italic; + margin-bottom: 1em; +} + +#panel_icon { + float: right; + cursor: pointer; +} + +.keyhelp { + margin: .75em; +} + +.keyhelp .key { + border: 1px solid black; + border-color: #888 #333 #333 #888; + padding: .1em .35em; + font-family: monospace; + font-weight: bold; + background: #eee; +} + +/* Source file styles */ +.linenos p { + text-align: right; + margin: 0; + padding: 0 .5em; + color: #999999; + font-family: verdana, sans-serif; + font-size: .625em; /* 10/16 */ + line-height: 1.6em; /* 16/10 */ + } +.linenos p.highlight { + background: #ffdd00; + } +.linenos p a { + text-decoration: none; + color: #999999; + } +.linenos p a:hover { + text-decoration: underline; + color: #999999; + } + +td.text { + width: 100%; + } +.text p { + margin: 0; + padding: 0 0 0 .5em; + border-left: 2px solid #ffffff; + white-space: nowrap; + } + +.text p.mis { + background: #ffdddd; + border-left: 2px solid #ff0000; + } +.text p.run, .text p.run.hide_par { + background: #ddffdd; + border-left: 2px solid #00ff00; + } +.text p.exc { + background: #eeeeee; + border-left: 2px solid #808080; + } +.text p.par, .text p.par.hide_run { + background: #ffffaa; + border-left: 2px solid #eeee99; + } +.text p.hide_run, .text p.hide_exc, .text p.hide_mis, .text p.hide_par, +.text p.hide_run.hide_par { + background: inherit; + } + +.text span.annotate { + font-family: georgia; + font-style: italic; + color: #666; + float: right; + padding-right: .5em; + } +.text p.hide_par span.annotate { + display: none; + } + +/* Syntax coloring */ +.text .com { + color: green; + font-style: italic; + line-height: 1px; + } +.text .key { + font-weight: bold; + line-height: 1px; + } +.text .str { + color: #000080; + } + +/* index styles */ +#index td, #index th { + text-align: right; + width: 5em; + padding: .25em .5em; + border-bottom: 1px solid #eee; + } +#index th { + font-style: italic; + color: #333; + border-bottom: 1px solid #ccc; + cursor: pointer; + } +#index th:hover { + background: #eee; + border-bottom: 1px solid #999; + } +#index td.left, #index th.left { + padding-left: 0; + } +#index td.right, #index th.right { + padding-right: 0; + } +#index th.headerSortDown, #index th.headerSortUp { + border-bottom: 1px solid #000; + } +#index td.name, #index th.name { + text-align: left; + width: auto; + } +#index td.name a { + text-decoration: none; + color: #000; + } +#index td.name a:hover { + text-decoration: underline; + color: #000; + } +#index tr.total { + } +#index tr.total td { + font-weight: bold; + border-top: 1px solid #ccc; + border-bottom: none; + } +#index tr.file:hover { + background: #eeeeee; + } diff --git a/third_party/pycoverage/coverage/misc.py b/third_party/pycoverage/coverage/misc.py new file mode 100644 index 0000000..0378173 --- /dev/null +++ b/third_party/pycoverage/coverage/misc.py @@ -0,0 +1,167 @@ +"""Miscellaneous stuff for Coverage.""" + +import errno +import inspect +import os +import sys + +from coverage.backward import md5, sorted # pylint: disable=W0622 +from coverage.backward import string_class, to_bytes + + +def nice_pair(pair): + """Make a nice string representation of a pair of numbers. + + If the numbers are equal, just return the number, otherwise return the pair + with a dash between them, indicating the range. + + """ + start, end = pair + if start == end: + return "%d" % start + else: + return "%d-%d" % (start, end) + + +def format_lines(statements, lines): + """Nicely format a list of line numbers. + + Format a list of line numbers for printing by coalescing groups of lines as + long as the lines represent consecutive statements. This will coalesce + even if there are gaps between statements. + + For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and + `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". + + """ + pairs = [] + i = 0 + j = 0 + start = None + statements = sorted(statements) + lines = sorted(lines) + while i < len(statements) and j < len(lines): + if statements[i] == lines[j]: + if start == None: + start = lines[j] + end = lines[j] + j += 1 + elif start: + pairs.append((start, end)) + start = None + i += 1 + if start: + pairs.append((start, end)) + ret = ', '.join(map(nice_pair, pairs)) + return ret + + +def short_stack(): + """Return a string summarizing the call stack.""" + stack = inspect.stack()[:0:-1] + return "\n".join(["%30s : %s @%d" % (t[3],t[1],t[2]) for t in stack]) + + +def expensive(fn): + """A decorator to cache the result of an expensive operation. + + Only applies to methods with no arguments. + + """ + attr = "_cache_" + fn.__name__ + def _wrapped(self): + """Inner fn that checks the cache.""" + if not hasattr(self, attr): + setattr(self, attr, fn(self)) + return getattr(self, attr) + return _wrapped + + +def bool_or_none(b): + """Return bool(b), but preserve None.""" + if b is None: + return None + else: + return bool(b) + + +def join_regex(regexes): + """Combine a list of regexes into one that matches any of them.""" + if len(regexes) > 1: + return "|".join(["(%s)" % r for r in regexes]) + elif regexes: + return regexes[0] + else: + return "" + + +def file_be_gone(path): + """Remove a file, and don't get annoyed if it doesn't exist.""" + try: + os.remove(path) + except OSError: + _, e, _ = sys.exc_info() + if e.errno != errno.ENOENT: + raise + + +class Hasher(object): + """Hashes Python data into md5.""" + def __init__(self): + self.md5 = md5() + + def update(self, v): + """Add `v` to the hash, recursively if needed.""" + self.md5.update(to_bytes(str(type(v)))) + if isinstance(v, string_class): + self.md5.update(to_bytes(v)) + elif v is None: + pass + elif isinstance(v, (int, float)): + self.md5.update(to_bytes(str(v))) + elif isinstance(v, (tuple, list)): + for e in v: + self.update(e) + elif isinstance(v, dict): + keys = v.keys() + for k in sorted(keys): + self.update(k) + self.update(v[k]) + else: + for k in dir(v): + if k.startswith('__'): + continue + a = getattr(v, k) + if inspect.isroutine(a): + continue + self.update(k) + self.update(a) + + def digest(self): + """Retrieve the digest of the hash.""" + return self.md5.digest() + + +class CoverageException(Exception): + """An exception specific to Coverage.""" + pass + +class NoSource(CoverageException): + """We couldn't find the source for a module.""" + pass + +class NoCode(NoSource): + """We couldn't find any code at all.""" + pass + +class NotPython(CoverageException): + """A source file turned out not to be parsable Python.""" + pass + +class ExceptionDuringRun(CoverageException): + """An exception happened while running customer code. + + Construct it with three arguments, the values from `sys.exc_info`. + + """ + pass diff --git a/third_party/pycoverage/coverage/parser.py b/third_party/pycoverage/coverage/parser.py new file mode 100644 index 0000000..7a145a2 --- /dev/null +++ b/third_party/pycoverage/coverage/parser.py @@ -0,0 +1,700 @@ +"""Code parsing for Coverage.""" + +import dis, re, sys, token, tokenize + +from coverage.backward import set, sorted, StringIO # pylint: disable=W0622 +from coverage.backward import open_source, range # pylint: disable=W0622 +from coverage.backward import reversed # pylint: disable=W0622 +from coverage.backward import bytes_to_ints +from coverage.bytecode import ByteCodes, CodeObjects +from coverage.misc import nice_pair, expensive, join_regex +from coverage.misc import CoverageException, NoSource, NotPython + + +class CodeParser(object): + """Parse code to find executable lines, excluded lines, etc.""" + + def __init__(self, text=None, filename=None, exclude=None): + """ + Source can be provided as `text`, the text itself, or `filename`, from + which the text will be read. Excluded lines are those that match + `exclude`, a regex. + + """ + assert text or filename, "CodeParser needs either text or filename" + self.filename = filename or "<code>" + self.text = text + if not self.text: + try: + sourcef = open_source(self.filename) + try: + self.text = sourcef.read() + finally: + sourcef.close() + except IOError: + _, err, _ = sys.exc_info() + raise NoSource( + "No source for code: '%s': %s" % (self.filename, err) + ) + + # Scrap the BOM if it exists. + if self.text and ord(self.text[0]) == 0xfeff: + self.text = self.text[1:] + + self.exclude = exclude + + self.show_tokens = False + + # The text lines of the parsed code. + self.lines = self.text.split('\n') + + # The line numbers of excluded lines of code. + self.excluded = set() + + # The line numbers of docstring lines. + self.docstrings = set() + + # The line numbers of class definitions. + self.classdefs = set() + + # A dict mapping line numbers to (lo,hi) for multi-line statements. + self.multiline = {} + + # The line numbers that start statements. + self.statement_starts = set() + + # Lazily-created ByteParser + self._byte_parser = None + + def _get_byte_parser(self): + """Create a ByteParser on demand.""" + if not self._byte_parser: + self._byte_parser = \ + ByteParser(text=self.text, filename=self.filename) + return self._byte_parser + byte_parser = property(_get_byte_parser) + + def lines_matching(self, *regexes): + """Find the lines matching one of a list of regexes. + + Returns a set of line numbers, the lines that contain a match for one + of the regexes in `regexes`. The entire line needn't match, just a + part of it. + + """ + regex_c = re.compile(join_regex(regexes)) + matches = set() + for i, ltext in enumerate(self.lines): + if regex_c.search(ltext): + matches.add(i+1) + return matches + + def _raw_parse(self): + """Parse the source to find the interesting facts about its lines. + + A handful of member fields are updated. + + """ + # Find lines which match an exclusion pattern. + if self.exclude: + self.excluded = self.lines_matching(self.exclude) + + # Tokenize, to find excluded suites, to find docstrings, and to find + # multi-line statements. + indent = 0 + exclude_indent = 0 + excluding = False + prev_toktype = token.INDENT + first_line = None + empty = True + + tokgen = generate_tokens(self.text) + for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: + if self.show_tokens: # pragma: not covered + print("%10s %5s %-20r %r" % ( + tokenize.tok_name.get(toktype, toktype), + nice_pair((slineno, elineno)), ttext, ltext + )) + if toktype == token.INDENT: + indent += 1 + elif toktype == token.DEDENT: + indent -= 1 + elif toktype == token.NAME and ttext == 'class': + # Class definitions look like branches in the byte code, so + # we need to exclude them. The simplest way is to note the + # lines with the 'class' keyword. + self.classdefs.add(slineno) + elif toktype == token.OP and ttext == ':': + if not excluding and elineno in self.excluded: + # Start excluding a suite. We trigger off of the colon + # token so that the #pragma comment will be recognized on + # the same line as the colon. + exclude_indent = indent + excluding = True + elif toktype == token.STRING and prev_toktype == token.INDENT: + # Strings that are first on an indented line are docstrings. + # (a trick from trace.py in the stdlib.) This works for + # 99.9999% of cases. For the rest (!) see: + # http://stackoverflow.com/questions/1769332/x/1769794#1769794 + self.docstrings.update(range(slineno, elineno+1)) + elif toktype == token.NEWLINE: + if first_line is not None and elineno != first_line: + # We're at the end of a line, and we've ended on a + # different line than the first line of the statement, + # so record a multi-line range. + rng = (first_line, elineno) + for l in range(first_line, elineno+1): + self.multiline[l] = rng + first_line = None + + if ttext.strip() and toktype != tokenize.COMMENT: + # A non-whitespace token. + empty = False + if first_line is None: + # The token is not whitespace, and is the first in a + # statement. + first_line = slineno + # Check whether to end an excluded suite. + if excluding and indent <= exclude_indent: + excluding = False + if excluding: + self.excluded.add(elineno) + + prev_toktype = toktype + + # Find the starts of the executable statements. + if not empty: + self.statement_starts.update(self.byte_parser._find_statements()) + + def first_line(self, line): + """Return the first line number of the statement including `line`.""" + rng = self.multiline.get(line) + if rng: + first_line = rng[0] + else: + first_line = line + return first_line + + def first_lines(self, lines, *ignores): + """Map the line numbers in `lines` to the correct first line of the + statement. + + Skip any line mentioned in any of the sequences in `ignores`. + + Returns a set of the first lines. + + """ + ignore = set() + for ign in ignores: + ignore.update(ign) + lset = set() + for l in lines: + if l in ignore: + continue + new_l = self.first_line(l) + if new_l not in ignore: + lset.add(new_l) + return lset + + def parse_source(self): + """Parse source text to find executable lines, excluded lines, etc. + + Return values are 1) a set of executable line numbers, and 2) a set of + excluded line numbers. + + Reported line numbers are normalized to the first line of multi-line + statements. + + """ + try: + self._raw_parse() + except (tokenize.TokenError, IndentationError): + _, tokerr, _ = sys.exc_info() + msg, lineno = tokerr.args + raise NotPython( + "Couldn't parse '%s' as Python source: '%s' at %s" % + (self.filename, msg, lineno) + ) + + excluded_lines = self.first_lines(self.excluded) + lines = self.first_lines( + self.statement_starts, + excluded_lines, + self.docstrings + ) + + return lines, excluded_lines + + def arcs(self): + """Get information about the arcs available in the code. + + Returns a sorted list of line number pairs. Line numbers have been + normalized to the first line of multiline statements. + + """ + all_arcs = [] + for l1, l2 in self.byte_parser._all_arcs(): + fl1 = self.first_line(l1) + fl2 = self.first_line(l2) + if fl1 != fl2: + all_arcs.append((fl1, fl2)) + return sorted(all_arcs) + arcs = expensive(arcs) + + def exit_counts(self): + """Get a mapping from line numbers to count of exits from that line. + + Excluded lines are excluded. + + """ + excluded_lines = self.first_lines(self.excluded) + exit_counts = {} + for l1, l2 in self.arcs(): + if l1 < 0: + # Don't ever report -1 as a line number + continue + if l1 in excluded_lines: + # Don't report excluded lines as line numbers. + continue + if l2 in excluded_lines: + # Arcs to excluded lines shouldn't count. + continue + if l1 not in exit_counts: + exit_counts[l1] = 0 + exit_counts[l1] += 1 + + # Class definitions have one extra exit, so remove one for each: + for l in self.classdefs: + # Ensure key is there: classdefs can include excluded lines. + if l in exit_counts: + exit_counts[l] -= 1 + + return exit_counts + exit_counts = expensive(exit_counts) + + +## Opcodes that guide the ByteParser. + +def _opcode(name): + """Return the opcode by name from the dis module.""" + return dis.opmap[name] + +def _opcode_set(*names): + """Return a set of opcodes by the names in `names`.""" + s = set() + for name in names: + try: + s.add(_opcode(name)) + except KeyError: + pass + return s + +# Opcodes that leave the code object. +OPS_CODE_END = _opcode_set('RETURN_VALUE') + +# Opcodes that unconditionally end the code chunk. +OPS_CHUNK_END = _opcode_set( + 'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'RETURN_VALUE', 'RAISE_VARARGS', + 'BREAK_LOOP', 'CONTINUE_LOOP', + ) + +# Opcodes that unconditionally begin a new code chunk. By starting new chunks +# with unconditional jump instructions, we neatly deal with jumps to jumps +# properly. +OPS_CHUNK_BEGIN = _opcode_set('JUMP_ABSOLUTE', 'JUMP_FORWARD') + +# Opcodes that push a block on the block stack. +OPS_PUSH_BLOCK = _opcode_set( + 'SETUP_LOOP', 'SETUP_EXCEPT', 'SETUP_FINALLY', 'SETUP_WITH' + ) + +# Block types for exception handling. +OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY') + +# Opcodes that pop a block from the block stack. +OPS_POP_BLOCK = _opcode_set('POP_BLOCK') + +# Opcodes that have a jump destination, but aren't really a jump. +OPS_NO_JUMP = OPS_PUSH_BLOCK + +# Individual opcodes we need below. +OP_BREAK_LOOP = _opcode('BREAK_LOOP') +OP_END_FINALLY = _opcode('END_FINALLY') +OP_COMPARE_OP = _opcode('COMPARE_OP') +COMPARE_EXCEPTION = 10 # just have to get this const from the code. +OP_LOAD_CONST = _opcode('LOAD_CONST') +OP_RETURN_VALUE = _opcode('RETURN_VALUE') + + +class ByteParser(object): + """Parse byte codes to understand the structure of code.""" + + def __init__(self, code=None, text=None, filename=None): + if code: + self.code = code + self.text = text + else: + if not text: + assert filename, "If no code or text, need a filename" + sourcef = open_source(filename) + try: + text = sourcef.read() + finally: + sourcef.close() + self.text = text + + try: + # Python 2.3 and 2.4 don't like partial last lines, so be sure + # the text ends nicely for them. + self.code = compile(text + '\n', filename, "exec") + except SyntaxError: + _, synerr, _ = sys.exc_info() + raise NotPython( + "Couldn't parse '%s' as Python source: '%s' at line %d" % + (filename, synerr.msg, synerr.lineno) + ) + + # Alternative Python implementations don't always provide all the + # attributes on code objects that we need to do the analysis. + for attr in ['co_lnotab', 'co_firstlineno', 'co_consts', 'co_code']: + if not hasattr(self.code, attr): + raise CoverageException( + "This implementation of Python doesn't support code " + "analysis.\n" + "Run coverage.py under CPython for this command." + ) + + def child_parsers(self): + """Iterate over all the code objects nested within this one. + + The iteration includes `self` as its first value. + + """ + children = CodeObjects(self.code) + return [ByteParser(code=c, text=self.text) for c in children] + + def _bytes_lines(self): + """Map byte offsets to line numbers in `code`. + + Uses co_lnotab described in Python/compile.c to map byte offsets to + line numbers. Produces a sequence: (b0, l0), (b1, l1), ... + + Only byte offsets that correspond to line numbers are included in the + results. + + """ + # Adapted from dis.py in the standard library. + byte_increments = bytes_to_ints(self.code.co_lnotab[0::2]) + line_increments = bytes_to_ints(self.code.co_lnotab[1::2]) + + last_line_num = None + line_num = self.code.co_firstlineno + byte_num = 0 + for byte_incr, line_incr in zip(byte_increments, line_increments): + if byte_incr: + if line_num != last_line_num: + yield (byte_num, line_num) + last_line_num = line_num + byte_num += byte_incr + line_num += line_incr + if line_num != last_line_num: + yield (byte_num, line_num) + + def _find_statements(self): + """Find the statements in `self.code`. + + Produce a sequence of line numbers that start statements. Recurses + into all code objects reachable from `self.code`. + + """ + for bp in self.child_parsers(): + # Get all of the lineno information from this code. + for _, l in bp._bytes_lines(): + yield l + + def _block_stack_repr(self, block_stack): + """Get a string version of `block_stack`, for debugging.""" + blocks = ", ".join( + ["(%s, %r)" % (dis.opname[b[0]], b[1]) for b in block_stack] + ) + return "[" + blocks + "]" + + def _split_into_chunks(self): + """Split the code object into a list of `Chunk` objects. + + Each chunk is only entered at its first instruction, though there can + be many exits from a chunk. + + Returns a list of `Chunk` objects. + + """ + # The list of chunks so far, and the one we're working on. + chunks = [] + chunk = None + + # A dict mapping byte offsets of line starts to the line numbers. + bytes_lines_map = dict(self._bytes_lines()) + + # The block stack: loops and try blocks get pushed here for the + # implicit jumps that can occur. + # Each entry is a tuple: (block type, destination) + block_stack = [] + + # Some op codes are followed by branches that should be ignored. This + # is a count of how many ignores are left. + ignore_branch = 0 + + # We have to handle the last two bytecodes specially. + ult = penult = None + + # Get a set of all of the jump-to points. + jump_to = set() + bytecodes = list(ByteCodes(self.code.co_code)) + for bc in bytecodes: + if bc.jump_to >= 0: + jump_to.add(bc.jump_to) + + chunk_lineno = 0 + + # Walk the byte codes building chunks. + for bc in bytecodes: + # Maybe have to start a new chunk + start_new_chunk = False + first_chunk = False + if bc.offset in bytes_lines_map: + # Start a new chunk for each source line number. + start_new_chunk = True + chunk_lineno = bytes_lines_map[bc.offset] + first_chunk = True + elif bc.offset in jump_to: + # To make chunks have a single entrance, we have to make a new + # chunk when we get to a place some bytecode jumps to. + start_new_chunk = True + elif bc.op in OPS_CHUNK_BEGIN: + # Jumps deserve their own unnumbered chunk. This fixes + # problems with jumps to jumps getting confused. + start_new_chunk = True + + if not chunk or start_new_chunk: + if chunk: + chunk.exits.add(bc.offset) + chunk = Chunk(bc.offset, chunk_lineno, first_chunk) + chunks.append(chunk) + + # Look at the opcode + if bc.jump_to >= 0 and bc.op not in OPS_NO_JUMP: + if ignore_branch: + # Someone earlier wanted us to ignore this branch. + ignore_branch -= 1 + else: + # The opcode has a jump, it's an exit for this chunk. + chunk.exits.add(bc.jump_to) + + if bc.op in OPS_CODE_END: + # The opcode can exit the code object. + chunk.exits.add(-self.code.co_firstlineno) + if bc.op in OPS_PUSH_BLOCK: + # The opcode adds a block to the block_stack. + block_stack.append((bc.op, bc.jump_to)) + if bc.op in OPS_POP_BLOCK: + # The opcode pops a block from the block stack. + block_stack.pop() + if bc.op in OPS_CHUNK_END: + # This opcode forces the end of the chunk. + if bc.op == OP_BREAK_LOOP: + # A break is implicit: jump where the top of the + # block_stack points. + chunk.exits.add(block_stack[-1][1]) + chunk = None + if bc.op == OP_END_FINALLY: + # For the finally clause we need to find the closest exception + # block, and use its jump target as an exit. + for block in reversed(block_stack): + if block[0] in OPS_EXCEPT_BLOCKS: + chunk.exits.add(block[1]) + break + if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION: + # This is an except clause. We want to overlook the next + # branch, so that except's don't count as branches. + ignore_branch += 1 + + penult = ult + ult = bc + + if chunks: + # The last two bytecodes could be a dummy "return None" that + # shouldn't be counted as real code. Every Python code object seems + # to end with a return, and a "return None" is inserted if there + # isn't an explicit return in the source. + if ult and penult: + if penult.op == OP_LOAD_CONST and ult.op == OP_RETURN_VALUE: + if self.code.co_consts[penult.arg] is None: + # This is "return None", but is it dummy? A real line + # would be a last chunk all by itself. + if chunks[-1].byte != penult.offset: + ex = -self.code.co_firstlineno + # Split the last chunk + last_chunk = chunks[-1] + last_chunk.exits.remove(ex) + last_chunk.exits.add(penult.offset) + chunk = Chunk( + penult.offset, last_chunk.line, False + ) + chunk.exits.add(ex) + chunks.append(chunk) + + # Give all the chunks a length. + chunks[-1].length = bc.next_offset - chunks[-1].byte # pylint: disable=W0631,C0301 + for i in range(len(chunks)-1): + chunks[i].length = chunks[i+1].byte - chunks[i].byte + + #self.validate_chunks(chunks) + return chunks + + def validate_chunks(self, chunks): + """Validate the rule that chunks have a single entrance.""" + # starts is the entrances to the chunks + starts = set([ch.byte for ch in chunks]) + for ch in chunks: + assert all([(ex in starts or ex < 0) for ex in ch.exits]) + + def _arcs(self): + """Find the executable arcs in the code. + + Yields pairs: (from,to). From and to are integer line numbers. If + from is < 0, then the arc is an entrance into the code object. If to + is < 0, the arc is an exit from the code object. + + """ + chunks = self._split_into_chunks() + + # A map from byte offsets to chunks jumped into. + byte_chunks = dict([(c.byte, c) for c in chunks]) + + # There's always an entrance at the first chunk. + yield (-1, byte_chunks[0].line) + + # Traverse from the first chunk in each line, and yield arcs where + # the trace function will be invoked. + for chunk in chunks: + if not chunk.first: + continue + + chunks_considered = set() + chunks_to_consider = [chunk] + while chunks_to_consider: + # Get the chunk we're considering, and make sure we don't + # consider it again + this_chunk = chunks_to_consider.pop() + chunks_considered.add(this_chunk) + + # For each exit, add the line number if the trace function + # would be triggered, or add the chunk to those being + # considered if not. + for ex in this_chunk.exits: + if ex < 0: + yield (chunk.line, ex) + else: + next_chunk = byte_chunks[ex] + if next_chunk in chunks_considered: + continue + + # The trace function is invoked if visiting the first + # bytecode in a line, or if the transition is a + # backward jump. + backward_jump = next_chunk.byte < this_chunk.byte + if next_chunk.first or backward_jump: + if next_chunk.line != chunk.line: + yield (chunk.line, next_chunk.line) + else: + chunks_to_consider.append(next_chunk) + + def _all_chunks(self): + """Returns a list of `Chunk` objects for this code and its children. + + See `_split_into_chunks` for details. + + """ + chunks = [] + for bp in self.child_parsers(): + chunks.extend(bp._split_into_chunks()) + + return chunks + + def _all_arcs(self): + """Get the set of all arcs in this code object and its children. + + See `_arcs` for details. + + """ + arcs = set() + for bp in self.child_parsers(): + arcs.update(bp._arcs()) + + return arcs + + +class Chunk(object): + """A sequence of byte codes with a single entrance. + + To analyze byte code, we have to divide it into chunks, sequences of byte + codes such that each chunk has only one entrance, the first instruction in + the block. + + This is almost the CS concept of `basic block`_, except that we're willing + to have many exits from a chunk, and "basic block" is a more cumbersome + term. + + .. _basic block: http://en.wikipedia.org/wiki/Basic_block + + `line` is the source line number containing this chunk. + + `first` is true if this is the first chunk in the source line. + + An exit < 0 means the chunk can leave the code (return). The exit is + the negative of the starting line number of the code block. + + """ + def __init__(self, byte, line, first): + self.byte = byte + self.line = line + self.first = first + self.length = 0 + self.exits = set() + + def __repr__(self): + if self.first: + bang = "!" + else: + bang = "" + return "<%d+%d @%d%s %r>" % ( + self.byte, self.length, self.line, bang, list(self.exits) + ) + + +class CachedTokenizer(object): + """A one-element cache around tokenize.generate_tokens. + + When reporting, coverage.py tokenizes files twice, once to find the + structure of the file, and once to syntax-color it. Tokenizing is + expensive, and easily cached. + + This is a one-element cache so that our twice-in-a-row tokenizing doesn't + actually tokenize twice. + + """ + def __init__(self): + self.last_text = None + self.last_tokens = None + + def generate_tokens(self, text): + """A stand-in for `tokenize.generate_tokens`.""" + if text != self.last_text: + self.last_text = text + self.last_tokens = list( + tokenize.generate_tokens(StringIO(text).readline) + ) + return self.last_tokens + +# Create our generate_tokens cache as a callable replacement function. +generate_tokens = CachedTokenizer().generate_tokens diff --git a/third_party/pycoverage/coverage/phystokens.py b/third_party/pycoverage/coverage/phystokens.py new file mode 100644 index 0000000..99b1d5b --- /dev/null +++ b/third_party/pycoverage/coverage/phystokens.py @@ -0,0 +1,210 @@ +"""Better tokenizing for coverage.py.""" + +import codecs, keyword, re, sys, token, tokenize +from coverage.backward import set # pylint: disable=W0622 +from coverage.parser import generate_tokens + + +def phys_tokens(toks): + """Return all physical tokens, even line continuations. + + tokenize.generate_tokens() doesn't return a token for the backslash that + continues lines. This wrapper provides those tokens so that we can + re-create a faithful representation of the original source. + + Returns the same values as generate_tokens() + + """ + last_line = None + last_lineno = -1 + last_ttype = None + for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: + if last_lineno != elineno: + if last_line and last_line.endswith("\\\n"): + # We are at the beginning of a new line, and the last line + # ended with a backslash. We probably have to inject a + # backslash token into the stream. Unfortunately, there's more + # to figure out. This code:: + # + # usage = """\ + # HEY THERE + # """ + # + # triggers this condition, but the token text is:: + # + # '"""\\\nHEY THERE\n"""' + # + # so we need to figure out if the backslash is already in the + # string token or not. + inject_backslash = True + if last_ttype == tokenize.COMMENT: + # Comments like this \ + # should never result in a new token. + inject_backslash = False + elif ttype == token.STRING: + if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\': + # It's a multiline string and the first line ends with + # a backslash, so we don't need to inject another. + inject_backslash = False + if inject_backslash: + # Figure out what column the backslash is in. + ccol = len(last_line.split("\n")[-2]) - 1 + # Yield the token, with a fake token type. + yield ( + 99999, "\\\n", + (slineno, ccol), (slineno, ccol+2), + last_line + ) + last_line = ltext + last_ttype = ttype + yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext + last_lineno = elineno + + +def source_token_lines(source): + """Generate a series of lines, one for each line in `source`. + + Each line is a list of pairs, each pair is a token:: + + [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] + + Each pair has a token class, and the token text. + + If you concatenate all the token texts, and then join them with newlines, + you should have your original `source` back, with two differences: + trailing whitespace is not preserved, and a final line with no newline + is indistinguishable from a final line with a newline. + + """ + ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]) + line = [] + col = 0 + source = source.expandtabs(8).replace('\r\n', '\n') + tokgen = generate_tokens(source) + for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen): + mark_start = True + for part in re.split('(\n)', ttext): + if part == '\n': + yield line + line = [] + col = 0 + mark_end = False + elif part == '': + mark_end = False + elif ttype in ws_tokens: + mark_end = False + else: + if mark_start and scol > col: + line.append(("ws", " " * (scol - col))) + mark_start = False + tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3] + if ttype == token.NAME and keyword.iskeyword(ttext): + tok_class = "key" + line.append((tok_class, part)) + mark_end = True + scol = 0 + if mark_end: + col = ecol + + if line: + yield line + +def source_encoding(source): + """Determine the encoding for `source` (a string), according to PEP 263. + + Returns a string, the name of the encoding. + + """ + # Note: this function should never be called on Python 3, since py3 has + # built-in tools to do this. + assert sys.version_info < (3, 0) + + # This is mostly code adapted from Py3.2's tokenize module. + + cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") + + # Do this so the detect_encode code we copied will work. + readline = iter(source.splitlines(True)).next + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if re.match(r"^utf-8($|-)", enc): + return "utf-8" + if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc): + return "iso-8859-1" + return orig_enc + + # From detect_encode(): + # It detects the encoding from the presence of a utf-8 bom or an encoding + # cookie as specified in pep-0263. If both a bom and a cookie are present, + # but disagree, a SyntaxError will be raised. If the encoding cookie is an + # invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + # 'utf-8-sig' is returned. + + # If no encoding is specified, then the default will be returned. The + # default varied with version. + + if sys.version_info <= (2, 4): + default = 'iso-8859-1' + else: + default = 'ascii' + + bom_found = False + encoding = None + + def read_or_stop(): + """Get the next source line, or ''.""" + try: + return readline() + except StopIteration: + return '' + + def find_cookie(line): + """Find an encoding cookie in `line`.""" + try: + line_string = line.decode('ascii') + except UnicodeDecodeError: + return None + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = codecs.lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + raise SyntaxError("unknown encoding: " + encoding) + + if bom_found: + # codecs in 2.3 were raw tuples of functions, assume the best. + codec_name = getattr(codec, 'name', encoding) + if codec_name != 'utf-8': + # This behaviour mimics the Python interpreter + raise SyntaxError('encoding problem: utf-8') + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(codecs.BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default + + encoding = find_cookie(first) + if encoding: + return encoding + + second = read_or_stop() + if not second: + return default + + encoding = find_cookie(second) + if encoding: + return encoding + + return default diff --git a/third_party/pycoverage/coverage/report.py b/third_party/pycoverage/coverage/report.py new file mode 100644 index 0000000..34f4442 --- /dev/null +++ b/third_party/pycoverage/coverage/report.py @@ -0,0 +1,92 @@ +"""Reporter foundation for Coverage.""" + +import fnmatch, os +from coverage.codeunit import code_unit_factory +from coverage.files import prep_patterns +from coverage.misc import CoverageException, NoSource, NotPython + +class Reporter(object): + """A base class for all reporters.""" + + def __init__(self, coverage, config): + """Create a reporter. + + `coverage` is the coverage instance. `config` is an instance of + CoverageConfig, for controlling all sorts of behavior. + + """ + self.coverage = coverage + self.config = config + + # The code units to report on. Set by find_code_units. + self.code_units = [] + + # The directory into which to place the report, used by some derived + # classes. + self.directory = None + + def find_code_units(self, morfs): + """Find the code units we'll report on. + + `morfs` is a list of modules or filenames. + + """ + morfs = morfs or self.coverage.data.measured_files() + file_locator = self.coverage.file_locator + self.code_units = code_unit_factory(morfs, file_locator) + + if self.config.include: + patterns = prep_patterns(self.config.include) + filtered = [] + for cu in self.code_units: + for pattern in patterns: + if fnmatch.fnmatch(cu.filename, pattern): + filtered.append(cu) + break + self.code_units = filtered + + if self.config.omit: + patterns = prep_patterns(self.config.omit) + filtered = [] + for cu in self.code_units: + for pattern in patterns: + if fnmatch.fnmatch(cu.filename, pattern): + break + else: + filtered.append(cu) + self.code_units = filtered + + self.code_units.sort() + + def report_files(self, report_fn, morfs, directory=None): + """Run a reporting function on a number of morfs. + + `report_fn` is called for each relative morf in `morfs`. It is called + as:: + + report_fn(code_unit, analysis) + + where `code_unit` is the `CodeUnit` for the morf, and `analysis` is + the `Analysis` for the morf. + + """ + self.find_code_units(morfs) + + if not self.code_units: + raise CoverageException("No data to report.") + + self.directory = directory + if self.directory and not os.path.exists(self.directory): + os.makedirs(self.directory) + + for cu in self.code_units: + try: + report_fn(cu, self.coverage._analyze(cu)) + except NoSource: + if not self.config.ignore_errors: + raise + except NotPython: + # Only report errors for .py files, and only if we didn't + # explicitly suppress those errors. + if cu.should_be_python() and not self.config.ignore_errors: + raise diff --git a/third_party/pycoverage/coverage/results.py b/third_party/pycoverage/coverage/results.py new file mode 100644 index 0000000..db6df0d --- /dev/null +++ b/third_party/pycoverage/coverage/results.py @@ -0,0 +1,286 @@ +"""Results of coverage measurement.""" + +import os + +from coverage.backward import iitems, set, sorted # pylint: disable=W0622 +from coverage.misc import format_lines, join_regex, NoSource +from coverage.parser import CodeParser + + +class Analysis(object): + """The results of analyzing a code unit.""" + + def __init__(self, cov, code_unit): + self.coverage = cov + self.code_unit = code_unit + + self.filename = self.code_unit.filename + actual_filename, source = self.find_source(self.filename) + + self.parser = CodeParser( + text=source, filename=actual_filename, + exclude=self.coverage._exclude_regex('exclude') + ) + self.statements, self.excluded = self.parser.parse_source() + + # Identify missing statements. + executed = self.coverage.data.executed_lines(self.filename) + exec1 = self.parser.first_lines(executed) + self.missing = self.statements - exec1 + + if self.coverage.data.has_arcs(): + self.no_branch = self.parser.lines_matching( + join_regex(self.coverage.config.partial_list), + join_regex(self.coverage.config.partial_always_list) + ) + n_branches = self.total_branches() + mba = self.missing_branch_arcs() + n_partial_branches = sum( + [len(v) for k,v in iitems(mba) if k not in self.missing] + ) + n_missing_branches = sum([len(v) for k,v in iitems(mba)]) + else: + n_branches = n_partial_branches = n_missing_branches = 0 + self.no_branch = set() + + self.numbers = Numbers( + n_files=1, + n_statements=len(self.statements), + n_excluded=len(self.excluded), + n_missing=len(self.missing), + n_branches=n_branches, + n_partial_branches=n_partial_branches, + n_missing_branches=n_missing_branches, + ) + + def find_source(self, filename): + """Find the source for `filename`. + + Returns two values: the actual filename, and the source. + + The source returned depends on which of these cases holds: + + * The filename seems to be a non-source file: returns None + + * The filename is a source file, and actually exists: returns None. + + * The filename is a source file, and is in a zip file or egg: + returns the source. + + * The filename is a source file, but couldn't be found: raises + `NoSource`. + + """ + source = None + + base, ext = os.path.splitext(filename) + TRY_EXTS = { + '.py': ['.py', '.pyw'], + '.pyw': ['.pyw'], + } + try_exts = TRY_EXTS.get(ext) + if not try_exts: + return filename, None + + for try_ext in try_exts: + try_filename = base + try_ext + if os.path.exists(try_filename): + return try_filename, None + source = self.coverage.file_locator.get_zip_data(try_filename) + if source: + return try_filename, source + raise NoSource("No source for code: '%s'" % filename) + + def missing_formatted(self): + """The missing line numbers, formatted nicely. + + Returns a string like "1-2, 5-11, 13-14". + + """ + return format_lines(self.statements, self.missing) + + def has_arcs(self): + """Were arcs measured in this result?""" + return self.coverage.data.has_arcs() + + def arc_possibilities(self): + """Returns a sorted list of the arcs in the code.""" + arcs = self.parser.arcs() + return arcs + + def arcs_executed(self): + """Returns a sorted list of the arcs actually executed in the code.""" + executed = self.coverage.data.executed_arcs(self.filename) + m2fl = self.parser.first_line + executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed] + return sorted(executed) + + def arcs_missing(self): + """Returns a sorted list of the arcs in the code not executed.""" + possible = self.arc_possibilities() + executed = self.arcs_executed() + missing = [ + p for p in possible + if p not in executed + and p[0] not in self.no_branch + ] + return sorted(missing) + + def arcs_unpredicted(self): + """Returns a sorted list of the executed arcs missing from the code.""" + possible = self.arc_possibilities() + executed = self.arcs_executed() + # Exclude arcs here which connect a line to itself. They can occur + # in executed data in some cases. This is where they can cause + # trouble, and here is where it's the least burden to remove them. + unpredicted = [ + e for e in executed + if e not in possible + and e[0] != e[1] + ] + return sorted(unpredicted) + + def branch_lines(self): + """Returns a list of line numbers that have more than one exit.""" + exit_counts = self.parser.exit_counts() + return [l1 for l1,count in iitems(exit_counts) if count > 1] + + def total_branches(self): + """How many total branches are there?""" + exit_counts = self.parser.exit_counts() + return sum([count for count in exit_counts.values() if count > 1]) + + def missing_branch_arcs(self): + """Return arcs that weren't executed from branch lines. + + Returns {l1:[l2a,l2b,...], ...} + + """ + missing = self.arcs_missing() + branch_lines = set(self.branch_lines()) + mba = {} + for l1, l2 in missing: + if l1 in branch_lines: + if l1 not in mba: + mba[l1] = [] + mba[l1].append(l2) + return mba + + def branch_stats(self): + """Get stats about branches. + + Returns a dict mapping line numbers to a tuple: + (total_exits, taken_exits). + """ + + exit_counts = self.parser.exit_counts() + missing_arcs = self.missing_branch_arcs() + stats = {} + for lnum in self.branch_lines(): + exits = exit_counts[lnum] + try: + missing = len(missing_arcs[lnum]) + except KeyError: + missing = 0 + stats[lnum] = (exits, exits - missing) + return stats + + +class Numbers(object): + """The numerical results of measuring coverage. + + This holds the basic statistics from `Analysis`, and is used to roll + up statistics across files. + + """ + # A global to determine the precision on coverage percentages, the number + # of decimal places. + _precision = 0 + _near0 = 1.0 # These will change when _precision is changed. + _near100 = 99.0 + + def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0, + n_branches=0, n_partial_branches=0, n_missing_branches=0 + ): + self.n_files = n_files + self.n_statements = n_statements + self.n_excluded = n_excluded + self.n_missing = n_missing + self.n_branches = n_branches + self.n_partial_branches = n_partial_branches + self.n_missing_branches = n_missing_branches + + def set_precision(cls, precision): + """Set the number of decimal places used to report percentages.""" + assert 0 <= precision < 10 + cls._precision = precision + cls._near0 = 1.0 / 10**precision + cls._near100 = 100.0 - cls._near0 + set_precision = classmethod(set_precision) + + def _get_n_executed(self): + """Returns the number of executed statements.""" + return self.n_statements - self.n_missing + n_executed = property(_get_n_executed) + + def _get_n_executed_branches(self): + """Returns the number of executed branches.""" + return self.n_branches - self.n_missing_branches + n_executed_branches = property(_get_n_executed_branches) + + def _get_pc_covered(self): + """Returns a single percentage value for coverage.""" + if self.n_statements > 0: + pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) / + (self.n_statements + self.n_branches)) + else: + pc_cov = 100.0 + return pc_cov + pc_covered = property(_get_pc_covered) + + def _get_pc_covered_str(self): + """Returns the percent covered, as a string, without a percent sign. + + Note that "0" is only returned when the value is truly zero, and "100" + is only returned when the value is truly 100. Rounding can never + result in either "0" or "100". + + """ + pc = self.pc_covered + if 0 < pc < self._near0: + pc = self._near0 + elif self._near100 < pc < 100: + pc = self._near100 + else: + pc = round(pc, self._precision) + return "%.*f" % (self._precision, pc) + pc_covered_str = property(_get_pc_covered_str) + + def pc_str_width(cls): + """How many characters wide can pc_covered_str be?""" + width = 3 # "100" + if cls._precision > 0: + width += 1 + cls._precision + return width + pc_str_width = classmethod(pc_str_width) + + def __add__(self, other): + nums = Numbers() + nums.n_files = self.n_files + other.n_files + nums.n_statements = self.n_statements + other.n_statements + nums.n_excluded = self.n_excluded + other.n_excluded + nums.n_missing = self.n_missing + other.n_missing + nums.n_branches = self.n_branches + other.n_branches + nums.n_partial_branches = ( + self.n_partial_branches + other.n_partial_branches + ) + nums.n_missing_branches = ( + self.n_missing_branches + other.n_missing_branches + ) + return nums + + def __radd__(self, other): + # Implementing 0+Numbers allows us to sum() a list of Numbers. + if other == 0: + return self + return NotImplemented diff --git a/third_party/pycoverage/coverage/summary.py b/third_party/pycoverage/coverage/summary.py new file mode 100644 index 0000000..c99c530 --- /dev/null +++ b/third_party/pycoverage/coverage/summary.py @@ -0,0 +1,86 @@ +"""Summary reporting""" + +import sys + +from coverage.report import Reporter +from coverage.results import Numbers +from coverage.misc import NotPython + + +class SummaryReporter(Reporter): + """A reporter for writing the summary report.""" + + def __init__(self, coverage, config): + super(SummaryReporter, self).__init__(coverage, config) + self.branches = coverage.data.has_arcs() + + def report(self, morfs, outfile=None): + """Writes a report summarizing coverage statistics per module. + + `outfile` is a file object to write the summary to. + + """ + self.find_code_units(morfs) + + # Prepare the formatting strings + max_name = max([len(cu.name) for cu in self.code_units] + [5]) + fmt_name = "%%- %ds " % max_name + fmt_err = "%s %s: %s\n" + header = (fmt_name % "Name") + " Stmts Miss" + fmt_coverage = fmt_name + "%6d %6d" + if self.branches: + header += " Branch BrMiss" + fmt_coverage += " %6d %6d" + width100 = Numbers.pc_str_width() + header += "%*s" % (width100+4, "Cover") + fmt_coverage += "%%%ds%%%%" % (width100+3,) + if self.config.show_missing: + header += " Missing" + fmt_coverage += " %s" + rule = "-" * len(header) + "\n" + header += "\n" + fmt_coverage += "\n" + + if not outfile: + outfile = sys.stdout + + # Write the header + outfile.write(header) + outfile.write(rule) + + total = Numbers() + + for cu in self.code_units: + try: + analysis = self.coverage._analyze(cu) + nums = analysis.numbers + args = (cu.name, nums.n_statements, nums.n_missing) + if self.branches: + args += (nums.n_branches, nums.n_missing_branches) + args += (nums.pc_covered_str,) + if self.config.show_missing: + args += (analysis.missing_formatted(),) + outfile.write(fmt_coverage % args) + total += nums + except KeyboardInterrupt: # pragma: not covered + raise + except: + report_it = not self.config.ignore_errors + if report_it: + typ, msg = sys.exc_info()[:2] + if typ is NotPython and not cu.should_be_python(): + report_it = False + if report_it: + outfile.write(fmt_err % (cu.name, typ.__name__, msg)) + + if total.n_files > 1: + outfile.write(rule) + args = ("TOTAL", total.n_statements, total.n_missing) + if self.branches: + args += (total.n_branches, total.n_missing_branches) + args += (total.pc_covered_str,) + if self.config.show_missing: + args += ("",) + outfile.write(fmt_coverage % args) + + return total.pc_covered diff --git a/third_party/pycoverage/coverage/templite.py b/third_party/pycoverage/coverage/templite.py new file mode 100644 index 0000000..e5c0baf --- /dev/null +++ b/third_party/pycoverage/coverage/templite.py @@ -0,0 +1,208 @@ +"""A simple Python template renderer, for a nano-subset of Django syntax.""" + +# Coincidentally named the same as http://code.activestate.com/recipes/496702/ + +import re + +from coverage.backward import set # pylint: disable=W0622 + + +class CodeBuilder(object): + """Build source code conveniently.""" + + def __init__(self, indent=0): + self.code = [] + self.indent_amount = indent + + def add_line(self, line): + """Add a line of source to the code. + + Don't include indentations or newlines. + + """ + self.code.append(" " * self.indent_amount) + self.code.append(line) + self.code.append("\n") + + def add_section(self): + """Add a section, a sub-CodeBuilder.""" + sect = CodeBuilder(self.indent_amount) + self.code.append(sect) + return sect + + def indent(self): + """Increase the current indent for following lines.""" + self.indent_amount += 4 + + def dedent(self): + """Decrease the current indent for following lines.""" + self.indent_amount -= 4 + + def __str__(self): + return "".join([str(c) for c in self.code]) + + def get_function(self, fn_name): + """Compile the code, and return the function `fn_name`.""" + assert self.indent_amount == 0 + g = {} + code_text = str(self) + exec(code_text, g) + return g[fn_name] + + +class Templite(object): + """A simple template renderer, for a nano-subset of Django syntax. + + Supported constructs are extended variable access:: + + {{var.modifer.modifier|filter|filter}} + + loops:: + + {% for var in list %}...{% endfor %} + + and ifs:: + + {% if var %}...{% endif %} + + Comments are within curly-hash markers:: + + {# This will be ignored #} + + Construct a Templite with the template text, then use `render` against a + dictionary context to create a finished string. + + """ + def __init__(self, text, *contexts): + """Construct a Templite with the given `text`. + + `contexts` are dictionaries of values to use for future renderings. + These are good for filters and global values. + + """ + self.text = text + self.context = {} + for context in contexts: + self.context.update(context) + + # We construct a function in source form, then compile it and hold onto + # it, and execute it to render the template. + code = CodeBuilder() + + code.add_line("def render(ctx, dot):") + code.indent() + vars_code = code.add_section() + self.all_vars = set() + self.loop_vars = set() + code.add_line("result = []") + code.add_line("a = result.append") + code.add_line("e = result.extend") + code.add_line("s = str") + + buffered = [] + def flush_output(): + """Force `buffered` to the code builder.""" + if len(buffered) == 1: + code.add_line("a(%s)" % buffered[0]) + elif len(buffered) > 1: + code.add_line("e([%s])" % ",".join(buffered)) + del buffered[:] + + # Split the text to form a list of tokens. + toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) + + ops_stack = [] + for tok in toks: + if tok.startswith('{{'): + # An expression to evaluate. + buffered.append("s(%s)" % self.expr_code(tok[2:-2].strip())) + elif tok.startswith('{#'): + # Comment: ignore it and move on. + continue + elif tok.startswith('{%'): + # Action tag: split into words and parse further. + flush_output() + words = tok[2:-2].strip().split() + if words[0] == 'if': + # An if statement: evaluate the expression to determine if. + assert len(words) == 2 + ops_stack.append('if') + code.add_line("if %s:" % self.expr_code(words[1])) + code.indent() + elif words[0] == 'for': + # A loop: iterate over expression result. + assert len(words) == 4 and words[2] == 'in' + ops_stack.append('for') + self.loop_vars.add(words[1]) + code.add_line( + "for c_%s in %s:" % ( + words[1], + self.expr_code(words[3]) + ) + ) + code.indent() + elif words[0].startswith('end'): + # Endsomething. Pop the ops stack + end_what = words[0][3:] + if ops_stack[-1] != end_what: + raise SyntaxError("Mismatched end tag: %r" % end_what) + ops_stack.pop() + code.dedent() + else: + raise SyntaxError("Don't understand tag: %r" % words[0]) + else: + # Literal content. If it isn't empty, output it. + if tok: + buffered.append("%r" % tok) + flush_output() + + for var_name in self.all_vars - self.loop_vars: + vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name)) + + if ops_stack: + raise SyntaxError("Unmatched action tag: %r" % ops_stack[-1]) + + code.add_line("return ''.join(result)") + code.dedent() + self.render_function = code.get_function('render') + + def expr_code(self, expr): + """Generate a Python expression for `expr`.""" + if "|" in expr: + pipes = expr.split("|") + code = self.expr_code(pipes[0]) + for func in pipes[1:]: + self.all_vars.add(func) + code = "c_%s(%s)" % (func, code) + elif "." in expr: + dots = expr.split(".") + code = self.expr_code(dots[0]) + args = [repr(d) for d in dots[1:]] + code = "dot(%s, %s)" % (code, ", ".join(args)) + else: + self.all_vars.add(expr) + code = "c_%s" % expr + return code + + def render(self, context=None): + """Render this template by applying it to `context`. + + `context` is a dictionary of values to use in this rendering. + + """ + # Make the complete context we'll use. + ctx = dict(self.context) + if context: + ctx.update(context) + return self.render_function(ctx, self.do_dots) + + def do_dots(self, value, *dots): + """Evaluate dotted expressions at runtime.""" + for dot in dots: + try: + value = getattr(value, dot) + except AttributeError: + value = value[dot] + if hasattr(value, '__call__'): + value = value() + return value diff --git a/third_party/pycoverage/coverage/tracer.c b/third_party/pycoverage/coverage/tracer.c new file mode 100644 index 0000000..97dd113 --- /dev/null +++ b/third_party/pycoverage/coverage/tracer.c @@ -0,0 +1,730 @@ +/* C-based Tracer for Coverage. */ + +#include "Python.h" +#include "compile.h" /* in 2.3, this wasn't part of Python.h */ +#include "eval.h" /* or this. */ +#include "structmember.h" +#include "frameobject.h" + +/* Compile-time debugging helpers */ +#undef WHAT_LOG /* Define to log the WHAT params in the trace function. */ +#undef TRACE_LOG /* Define to log our bookkeeping. */ +#undef COLLECT_STATS /* Collect counters: stats are printed when tracer is stopped. */ + +#if COLLECT_STATS +#define STATS(x) x +#else +#define STATS(x) +#endif + +/* Py 2.x and 3.x compatibility */ + +#ifndef Py_TYPE +#define Py_TYPE(o) (((PyObject*)(o))->ob_type) +#endif + +#if PY_MAJOR_VERSION >= 3 + +#define MyText_Type PyUnicode_Type +#define MyText_Check(o) PyUnicode_Check(o) +#define MyText_AS_BYTES(o) PyUnicode_AsASCIIString(o) +#define MyText_AS_STRING(o) PyBytes_AS_STRING(o) +#define MyInt_FromLong(l) PyLong_FromLong(l) + +#define MyType_HEAD_INIT PyVarObject_HEAD_INIT(NULL, 0) + +#else + +#define MyText_Type PyString_Type +#define MyText_Check(o) PyString_Check(o) +#define MyText_AS_BYTES(o) (Py_INCREF(o), o) +#define MyText_AS_STRING(o) PyString_AS_STRING(o) +#define MyInt_FromLong(l) PyInt_FromLong(l) + +#define MyType_HEAD_INIT PyObject_HEAD_INIT(NULL) 0, + +#endif /* Py3k */ + +/* The values returned to indicate ok or error. */ +#define RET_OK 0 +#define RET_ERROR -1 + +/* An entry on the data stack. For each call frame, we need to record the + dictionary to capture data, and the last line number executed in that + frame. +*/ +typedef struct { + PyObject * file_data; /* PyMem_Malloc'ed, a borrowed ref. */ + int last_line; +} DataStackEntry; + +/* The CTracer type. */ + +typedef struct { + PyObject_HEAD + + /* Python objects manipulated directly by the Collector class. */ + PyObject * should_trace; + PyObject * warn; + PyObject * data; + PyObject * should_trace_cache; + PyObject * arcs; + + /* Has the tracer been started? */ + int started; + /* Are we tracing arcs, or just lines? */ + int tracing_arcs; + + /* + The data stack is a stack of dictionaries. Each dictionary collects + data for a single source file. The data stack parallels the call stack: + each call pushes the new frame's file data onto the data stack, and each + return pops file data off. + + The file data is a dictionary whose form depends on the tracing options. + If tracing arcs, the keys are line number pairs. If not tracing arcs, + the keys are line numbers. In both cases, the value is irrelevant + (None). + */ + /* The index of the last-used entry in data_stack. */ + int depth; + /* The file data at each level, or NULL if not recording. */ + DataStackEntry * data_stack; + int data_stack_alloc; /* number of entries allocated at data_stack. */ + + /* The current file_data dictionary. Borrowed. */ + PyObject * cur_file_data; + + /* The line number of the last line recorded, for tracing arcs. + -1 means there was no previous line, as when entering a code object. + */ + int last_line; + + /* The parent frame for the last exception event, to fix missing returns. */ + PyFrameObject * last_exc_back; + int last_exc_firstlineno; + +#if COLLECT_STATS + struct { + unsigned int calls; + unsigned int lines; + unsigned int returns; + unsigned int exceptions; + unsigned int others; + unsigned int new_files; + unsigned int missed_returns; + unsigned int stack_reallocs; + unsigned int errors; + } stats; +#endif /* COLLECT_STATS */ +} CTracer; + +#define STACK_DELTA 100 + +static int +CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused) +{ +#if COLLECT_STATS + self->stats.calls = 0; + self->stats.lines = 0; + self->stats.returns = 0; + self->stats.exceptions = 0; + self->stats.others = 0; + self->stats.new_files = 0; + self->stats.missed_returns = 0; + self->stats.stack_reallocs = 0; + self->stats.errors = 0; +#endif /* COLLECT_STATS */ + + self->should_trace = NULL; + self->warn = NULL; + self->data = NULL; + self->should_trace_cache = NULL; + self->arcs = NULL; + + self->started = 0; + self->tracing_arcs = 0; + + self->depth = -1; + self->data_stack = PyMem_Malloc(STACK_DELTA*sizeof(DataStackEntry)); + if (self->data_stack == NULL) { + STATS( self->stats.errors++; ) + PyErr_NoMemory(); + return RET_ERROR; + } + self->data_stack_alloc = STACK_DELTA; + + self->cur_file_data = NULL; + self->last_line = -1; + + self->last_exc_back = NULL; + + return RET_OK; +} + +static void +CTracer_dealloc(CTracer *self) +{ + if (self->started) { + PyEval_SetTrace(NULL, NULL); + } + + Py_XDECREF(self->should_trace); + Py_XDECREF(self->warn); + Py_XDECREF(self->data); + Py_XDECREF(self->should_trace_cache); + + PyMem_Free(self->data_stack); + + Py_TYPE(self)->tp_free((PyObject*)self); +} + +#if TRACE_LOG +static const char * +indent(int n) +{ + static const char * spaces = + " " + " " + " " + " " + ; + return spaces + strlen(spaces) - n*2; +} + +static int logging = 0; +/* Set these constants to be a file substring and line number to start logging. */ +static const char * start_file = "tests/views"; +static int start_line = 27; + +static void +showlog(int depth, int lineno, PyObject * filename, const char * msg) +{ + if (logging) { + printf("%s%3d ", indent(depth), depth); + if (lineno) { + printf("%4d", lineno); + } + else { + printf(" "); + } + if (filename) { + PyObject *ascii = MyText_AS_BYTES(filename); + printf(" %s", MyText_AS_STRING(ascii)); + Py_DECREF(ascii); + } + if (msg) { + printf(" %s", msg); + } + printf("\n"); + } +} + +#define SHOWLOG(a,b,c,d) showlog(a,b,c,d) +#else +#define SHOWLOG(a,b,c,d) +#endif /* TRACE_LOG */ + +#if WHAT_LOG +static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "}; +#endif + +/* Record a pair of integers in self->cur_file_data. */ +static int +CTracer_record_pair(CTracer *self, int l1, int l2) +{ + int ret = RET_OK; + + PyObject * t = Py_BuildValue("(ii)", l1, l2); + if (t != NULL) { + if (PyDict_SetItem(self->cur_file_data, t, Py_None) < 0) { + STATS( self->stats.errors++; ) + ret = RET_ERROR; + } + Py_DECREF(t); + } + else { + STATS( self->stats.errors++; ) + ret = RET_ERROR; + } + return ret; +} + +/* + * The Trace Function + */ +static int +CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unused) +{ + int ret = RET_OK; + PyObject * filename = NULL; + PyObject * tracename = NULL; + #if WHAT_LOG || TRACE_LOG + PyObject * ascii = NULL; + #endif + + #if WHAT_LOG + if (what <= sizeof(what_sym)/sizeof(const char *)) { + ascii = MyText_AS_BYTES(frame->f_code->co_filename); + printf("trace: %s @ %s %d\n", what_sym[what], MyText_AS_STRING(ascii), frame->f_lineno); + Py_DECREF(ascii); + } + #endif + + #if TRACE_LOG + ascii = MyText_AS_BYTES(frame->f_code->co_filename); + if (strstr(MyText_AS_STRING(ascii), start_file) && frame->f_lineno == start_line) { + logging = 1; + } + Py_DECREF(ascii); + #endif + + /* See below for details on missing-return detection. */ + if (self->last_exc_back) { + if (frame == self->last_exc_back) { + /* Looks like someone forgot to send a return event. We'll clear + the exception state and do the RETURN code here. Notice that the + frame we have in hand here is not the correct frame for the RETURN, + that frame is gone. Our handling for RETURN doesn't need the + actual frame, but we do log it, so that will look a little off if + you're looking at the detailed log. + + If someday we need to examine the frame when doing RETURN, then + we'll need to keep more of the missed frame's state. + */ + STATS( self->stats.missed_returns++; ) + if (self->depth >= 0) { + if (self->tracing_arcs && self->cur_file_data) { + if (CTracer_record_pair(self, self->last_line, -self->last_exc_firstlineno) < 0) { + return RET_ERROR; + } + } + SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "missedreturn"); + self->cur_file_data = self->data_stack[self->depth].file_data; + self->last_line = self->data_stack[self->depth].last_line; + self->depth--; + } + } + self->last_exc_back = NULL; + } + + + switch (what) { + case PyTrace_CALL: /* 0 */ + STATS( self->stats.calls++; ) + /* Grow the stack. */ + self->depth++; + if (self->depth >= self->data_stack_alloc) { + STATS( self->stats.stack_reallocs++; ) + /* We've outgrown our data_stack array: make it bigger. */ + int bigger = self->data_stack_alloc + STACK_DELTA; + DataStackEntry * bigger_data_stack = PyMem_Realloc(self->data_stack, bigger * sizeof(DataStackEntry)); + if (bigger_data_stack == NULL) { + STATS( self->stats.errors++; ) + PyErr_NoMemory(); + self->depth--; + return RET_ERROR; + } + self->data_stack = bigger_data_stack; + self->data_stack_alloc = bigger; + } + + /* Push the current state on the stack. */ + self->data_stack[self->depth].file_data = self->cur_file_data; + self->data_stack[self->depth].last_line = self->last_line; + + /* Check if we should trace this line. */ + filename = frame->f_code->co_filename; + tracename = PyDict_GetItem(self->should_trace_cache, filename); + if (tracename == NULL) { + STATS( self->stats.new_files++; ) + /* We've never considered this file before. */ + /* Ask should_trace about it. */ + PyObject * args = Py_BuildValue("(OO)", filename, frame); + tracename = PyObject_Call(self->should_trace, args, NULL); + Py_DECREF(args); + if (tracename == NULL) { + /* An error occurred inside should_trace. */ + STATS( self->stats.errors++; ) + return RET_ERROR; + } + if (PyDict_SetItem(self->should_trace_cache, filename, tracename) < 0) { + STATS( self->stats.errors++; ) + return RET_ERROR; + } + } + else { + Py_INCREF(tracename); + } + + /* If tracename is a string, then we're supposed to trace. */ + if (MyText_Check(tracename)) { + PyObject * file_data = PyDict_GetItem(self->data, tracename); + if (file_data == NULL) { + file_data = PyDict_New(); + if (file_data == NULL) { + STATS( self->stats.errors++; ) + return RET_ERROR; + } + ret = PyDict_SetItem(self->data, tracename, file_data); + Py_DECREF(file_data); + if (ret < 0) { + STATS( self->stats.errors++; ) + return RET_ERROR; + } + } + self->cur_file_data = file_data; + /* Make the frame right in case settrace(gettrace()) happens. */ + Py_INCREF(self); + frame->f_trace = (PyObject*)self; + SHOWLOG(self->depth, frame->f_lineno, filename, "traced"); + } + else { + self->cur_file_data = NULL; + SHOWLOG(self->depth, frame->f_lineno, filename, "skipped"); + } + + Py_DECREF(tracename); + + self->last_line = -1; + break; + + case PyTrace_RETURN: /* 3 */ + STATS( self->stats.returns++; ) + /* A near-copy of this code is above in the missing-return handler. */ + if (self->depth >= 0) { + if (self->tracing_arcs && self->cur_file_data) { + int first = frame->f_code->co_firstlineno; + if (CTracer_record_pair(self, self->last_line, -first) < 0) { + return RET_ERROR; + } + } + + SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "return"); + self->cur_file_data = self->data_stack[self->depth].file_data; + self->last_line = self->data_stack[self->depth].last_line; + self->depth--; + } + break; + + case PyTrace_LINE: /* 2 */ + STATS( self->stats.lines++; ) + if (self->depth >= 0) { + SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "line"); + if (self->cur_file_data) { + /* We're tracing in this frame: record something. */ + if (self->tracing_arcs) { + /* Tracing arcs: key is (last_line,this_line). */ + if (CTracer_record_pair(self, self->last_line, frame->f_lineno) < 0) { + return RET_ERROR; + } + } + else { + /* Tracing lines: key is simply this_line. */ + PyObject * this_line = MyInt_FromLong(frame->f_lineno); + if (this_line == NULL) { + STATS( self->stats.errors++; ) + return RET_ERROR; + } + ret = PyDict_SetItem(self->cur_file_data, this_line, Py_None); + Py_DECREF(this_line); + if (ret < 0) { + STATS( self->stats.errors++; ) + return RET_ERROR; + } + } + } + self->last_line = frame->f_lineno; + } + break; + + case PyTrace_EXCEPTION: + /* Some code (Python 2.3, and pyexpat anywhere) fires an exception event + without a return event. To detect that, we'll keep a copy of the + parent frame for an exception event. If the next event is in that + frame, then we must have returned without a return event. We can + synthesize the missing event then. + + Python itself fixed this problem in 2.4. Pyexpat still has the bug. + I've reported the problem with pyexpat as http://bugs.python.org/issue6359 . + If it gets fixed, this code should still work properly. Maybe some day + the bug will be fixed everywhere coverage.py is supported, and we can + remove this missing-return detection. + + More about this fix: http://nedbatchelder.com/blog/200907/a_nasty_little_bug.html + */ + STATS( self->stats.exceptions++; ) + self->last_exc_back = frame->f_back; + self->last_exc_firstlineno = frame->f_code->co_firstlineno; + break; + + default: + STATS( self->stats.others++; ) + break; + } + + return RET_OK; +} + +/* + * Python has two ways to set the trace function: sys.settrace(fn), which + * takes a Python callable, and PyEval_SetTrace(func, obj), which takes + * a C function and a Python object. The way these work together is that + * sys.settrace(pyfn) calls PyEval_SetTrace(builtin_func, pyfn), using the + * Python callable as the object in PyEval_SetTrace. So sys.gettrace() + * simply returns the Python object used as the second argument to + * PyEval_SetTrace. So sys.gettrace() will return our self parameter, which + * means it must be callable to be used in sys.settrace(). + * + * So we make our self callable, equivalent to invoking our trace function. + * + * To help with the process of replaying stored frames, this function has an + * optional keyword argument: + * + * def CTracer_call(frame, event, arg, lineno=0) + * + * If provided, the lineno argument is used as the line number, and the + * frame's f_lineno member is ignored. + */ +static PyObject * +CTracer_call(CTracer *self, PyObject *args, PyObject *kwds) +{ + PyFrameObject *frame; + PyObject *what_str; + PyObject *arg; + int lineno = 0; + int what; + int orig_lineno; + PyObject *ret = NULL; + + static char *what_names[] = { + "call", "exception", "line", "return", + "c_call", "c_exception", "c_return", + NULL + }; + + #if WHAT_LOG + printf("pytrace\n"); + #endif + + static char *kwlist[] = {"frame", "event", "arg", "lineno", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O|i:Tracer_call", kwlist, + &PyFrame_Type, &frame, &MyText_Type, &what_str, &arg, &lineno)) { + goto done; + } + + /* In Python, the what argument is a string, we need to find an int + for the C function. */ + for (what = 0; what_names[what]; what++) { + PyObject *ascii = MyText_AS_BYTES(what_str); + int should_break = !strcmp(MyText_AS_STRING(ascii), what_names[what]); + Py_DECREF(ascii); + if (should_break) { + break; + } + } + + /* Save off the frame's lineno, and use the forced one, if provided. */ + orig_lineno = frame->f_lineno; + if (lineno > 0) { + frame->f_lineno = lineno; + } + + /* Invoke the C function, and return ourselves. */ + if (CTracer_trace(self, frame, what, arg) == RET_OK) { + Py_INCREF(self); + ret = (PyObject *)self; + } + + /* Clean up. */ + frame->f_lineno = orig_lineno; + +done: + return ret; +} + +static PyObject * +CTracer_start(CTracer *self, PyObject *args_unused) +{ + PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self); + self->started = 1; + self->tracing_arcs = self->arcs && PyObject_IsTrue(self->arcs); + self->last_line = -1; + + /* start() returns a trace function usable with sys.settrace() */ + Py_INCREF(self); + return (PyObject *)self; +} + +static PyObject * +CTracer_stop(CTracer *self, PyObject *args_unused) +{ + if (self->started) { + PyEval_SetTrace(NULL, NULL); + self->started = 0; + } + + return Py_BuildValue(""); +} + +static PyObject * +CTracer_get_stats(CTracer *self) +{ +#if COLLECT_STATS + return Py_BuildValue( + "{sI,sI,sI,sI,sI,sI,sI,sI,si,sI}", + "calls", self->stats.calls, + "lines", self->stats.lines, + "returns", self->stats.returns, + "exceptions", self->stats.exceptions, + "others", self->stats.others, + "new_files", self->stats.new_files, + "missed_returns", self->stats.missed_returns, + "stack_reallocs", self->stats.stack_reallocs, + "stack_alloc", self->data_stack_alloc, + "errors", self->stats.errors + ); +#else + return Py_BuildValue(""); +#endif /* COLLECT_STATS */ +} + +static PyMemberDef +CTracer_members[] = { + { "should_trace", T_OBJECT, offsetof(CTracer, should_trace), 0, + PyDoc_STR("Function indicating whether to trace a file.") }, + + { "warn", T_OBJECT, offsetof(CTracer, warn), 0, + PyDoc_STR("Function for issuing warnings.") }, + + { "data", T_OBJECT, offsetof(CTracer, data), 0, + PyDoc_STR("The raw dictionary of trace data.") }, + + { "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0, + PyDoc_STR("Dictionary caching should_trace results.") }, + + { "arcs", T_OBJECT, offsetof(CTracer, arcs), 0, + PyDoc_STR("Should we trace arcs, or just lines?") }, + + { NULL } +}; + +static PyMethodDef +CTracer_methods[] = { + { "start", (PyCFunction) CTracer_start, METH_VARARGS, + PyDoc_STR("Start the tracer") }, + + { "stop", (PyCFunction) CTracer_stop, METH_VARARGS, + PyDoc_STR("Stop the tracer") }, + + { "get_stats", (PyCFunction) CTracer_get_stats, METH_VARARGS, + PyDoc_STR("Get statistics about the tracing") }, + + { NULL } +}; + +static PyTypeObject +CTracerType = { + MyType_HEAD_INIT + "coverage.CTracer", /*tp_name*/ + sizeof(CTracer), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor)CTracer_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + (ternaryfunc)CTracer_call, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + "CTracer objects", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + CTracer_methods, /* tp_methods */ + CTracer_members, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)CTracer_init, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ +}; + +/* Module definition */ + +#define MODULE_DOC PyDoc_STR("Fast coverage tracer.") + +#if PY_MAJOR_VERSION >= 3 + +static PyModuleDef +moduledef = { + PyModuleDef_HEAD_INIT, + "coverage.tracer", + MODULE_DOC, + -1, + NULL, /* methods */ + NULL, + NULL, /* traverse */ + NULL, /* clear */ + NULL +}; + + +PyObject * +PyInit_tracer(void) +{ + PyObject * mod = PyModule_Create(&moduledef); + if (mod == NULL) { + return NULL; + } + + CTracerType.tp_new = PyType_GenericNew; + if (PyType_Ready(&CTracerType) < 0) { + Py_DECREF(mod); + return NULL; + } + + Py_INCREF(&CTracerType); + PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType); + + return mod; +} + +#else + +void +inittracer(void) +{ + PyObject * mod; + + mod = Py_InitModule3("coverage.tracer", NULL, MODULE_DOC); + if (mod == NULL) { + return; + } + + CTracerType.tp_new = PyType_GenericNew; + if (PyType_Ready(&CTracerType) < 0) { + return; + } + + Py_INCREF(&CTracerType); + PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType); +} + +#endif /* Py3k */ diff --git a/third_party/pycoverage/coverage/version.py b/third_party/pycoverage/coverage/version.py new file mode 100644 index 0000000..a43bde8 --- /dev/null +++ b/third_party/pycoverage/coverage/version.py @@ -0,0 +1,9 @@ +"""The version and URL for coverage.py""" +# This file is exec'ed in setup.py, don't import anything! + +__version__ = "3.7.1" # see detailed history in CHANGES.txt + +__url__ = "http://nedbatchelder.com/code/coverage" +if max(__version__).isalpha(): + # For pre-releases, use a version-specific URL. + __url__ += "/" + __version__ diff --git a/third_party/pycoverage/coverage/xmlreport.py b/third_party/pycoverage/coverage/xmlreport.py new file mode 100644 index 0000000..26ac02a --- /dev/null +++ b/third_party/pycoverage/coverage/xmlreport.py @@ -0,0 +1,155 @@ +"""XML reporting for coverage.py""" + +import os, sys, time +import xml.dom.minidom + +from coverage import __url__, __version__ +from coverage.backward import sorted, rpartition # pylint: disable=W0622 +from coverage.report import Reporter + +def rate(hit, num): + """Return the fraction of `hit`/`num`, as a string.""" + return "%.4g" % (float(hit) / (num or 1.0)) + + +class XmlReporter(Reporter): + """A reporter for writing Cobertura-style XML coverage results.""" + + def __init__(self, coverage, config): + super(XmlReporter, self).__init__(coverage, config) + + self.packages = None + self.xml_out = None + self.arcs = coverage.data.has_arcs() + + def report(self, morfs, outfile=None): + """Generate a Cobertura-compatible XML report for `morfs`. + + `morfs` is a list of modules or filenames. + + `outfile` is a file object to write the XML to. + + """ + # Initial setup. + outfile = outfile or sys.stdout + + # Create the DOM that will store the data. + impl = xml.dom.minidom.getDOMImplementation() + docType = impl.createDocumentType( + "coverage", None, + "http://cobertura.sourceforge.net/xml/coverage-03.dtd" + ) + self.xml_out = impl.createDocument(None, "coverage", docType) + + # Write header stuff. + xcoverage = self.xml_out.documentElement + xcoverage.setAttribute("version", __version__) + xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) + xcoverage.appendChild(self.xml_out.createComment( + " Generated by coverage.py: %s " % __url__ + )) + xpackages = self.xml_out.createElement("packages") + xcoverage.appendChild(xpackages) + + # Call xml_file for each file in the data. + self.packages = {} + self.report_files(self.xml_file, morfs) + + lnum_tot, lhits_tot = 0, 0 + bnum_tot, bhits_tot = 0, 0 + + # Populate the XML DOM with the package info. + for pkg_name in sorted(self.packages.keys()): + pkg_data = self.packages[pkg_name] + class_elts, lhits, lnum, bhits, bnum = pkg_data + xpackage = self.xml_out.createElement("package") + xpackages.appendChild(xpackage) + xclasses = self.xml_out.createElement("classes") + xpackage.appendChild(xclasses) + for class_name in sorted(class_elts.keys()): + xclasses.appendChild(class_elts[class_name]) + xpackage.setAttribute("name", pkg_name.replace(os.sep, '.')) + xpackage.setAttribute("line-rate", rate(lhits, lnum)) + xpackage.setAttribute("branch-rate", rate(bhits, bnum)) + xpackage.setAttribute("complexity", "0") + + lnum_tot += lnum + lhits_tot += lhits + bnum_tot += bnum + bhits_tot += bhits + + xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot)) + xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot)) + + # Use the DOM to write the output file. + outfile.write(self.xml_out.toprettyxml()) + + # Return the total percentage. + denom = lnum_tot + bnum_tot + if denom == 0: + pct = 0.0 + else: + pct = 100.0 * (lhits_tot + bhits_tot) / denom + return pct + + def xml_file(self, cu, analysis): + """Add to the XML report for a single file.""" + + # Create the 'lines' and 'package' XML elements, which + # are populated later. Note that a package == a directory. + package_name = rpartition(cu.name, ".")[0] + className = cu.name + + package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0]) + + xclass = self.xml_out.createElement("class") + + xclass.appendChild(self.xml_out.createElement("methods")) + + xlines = self.xml_out.createElement("lines") + xclass.appendChild(xlines) + + xclass.setAttribute("name", className) + filename = cu.file_locator.relative_filename(cu.filename) + xclass.setAttribute("filename", filename.replace("\\", "/")) + xclass.setAttribute("complexity", "0") + + branch_stats = analysis.branch_stats() + + # For each statement, create an XML 'line' element. + for line in sorted(analysis.statements): + xline = self.xml_out.createElement("line") + xline.setAttribute("number", str(line)) + + # Q: can we get info about the number of times a statement is + # executed? If so, that should be recorded here. + xline.setAttribute("hits", str(int(line not in analysis.missing))) + + if self.arcs: + if line in branch_stats: + total, taken = branch_stats[line] + xline.setAttribute("branch", "true") + xline.setAttribute("condition-coverage", + "%d%% (%d/%d)" % (100*taken/total, taken, total) + ) + xlines.appendChild(xline) + + class_lines = len(analysis.statements) + class_hits = class_lines - len(analysis.missing) + + if self.arcs: + class_branches = sum([t for t,k in branch_stats.values()]) + missing_branches = sum([t-k for t,k in branch_stats.values()]) + class_br_hits = class_branches - missing_branches + else: + class_branches = 0.0 + class_br_hits = 0.0 + + # Finalize the statistics that are collected in the XML DOM. + xclass.setAttribute("line-rate", rate(class_hits, class_lines)) + xclass.setAttribute("branch-rate", rate(class_br_hits, class_branches)) + package[0][className] = xclass + package[1] += class_hits + package[2] += class_lines + package[3] += class_br_hits + package[4] += class_branches diff --git a/third_party/pycoverage/igor.py b/third_party/pycoverage/igor.py new file mode 100644 index 0000000..12ec6c8 --- /dev/null +++ b/third_party/pycoverage/igor.py @@ -0,0 +1,262 @@ +"""Helper for building, testing, and linting coverage.py. + +To get portability, all these operations are written in Python here instead +of in shell scripts, batch files, or Makefiles. + +""" + +import fnmatch +import glob +import inspect +import os +import platform +import socket +import sys +import zipfile + + +# Functions named do_* are executable from the command line: do_blah is run +# by "python igor.py blah". + + +def do_remove_extension(): + """Remove the compiled C extension, no matter what its name.""" + + so_patterns = """ + tracer.so + tracer.*.so + tracer.pyd + """.split() + + for pattern in so_patterns: + pattern = os.path.join("coverage", pattern) + for filename in glob.glob(pattern): + try: + os.remove(filename) + except OSError: + pass + +def run_tests(tracer, *nose_args): + """The actual running of tests.""" + import nose.core + if tracer == "py": + label = "with Python tracer" + else: + label = "with C tracer" + if os.environ.get("COVERAGE_NO_EXTENSION"): + print("Skipping tests, no C extension in this environment") + return + print_banner(label) + os.environ["COVERAGE_TEST_TRACER"] = tracer + nose_args = ["nosetests"] + list(nose_args) + nose.core.main(argv=nose_args) + +def run_tests_with_coverage(tracer, *nose_args): + """Run tests, but with coverage.""" + import coverage + + os.environ['COVERAGE_PROCESS_START'] = os.path.abspath('metacov.ini') + os.environ['COVERAGE_HOME'] = os.getcwd() + + # Create the .pth file that will let us measure coverage in sub-processes. + import nose + pth_dir = os.path.dirname(os.path.dirname(nose.__file__)) + pth_path = os.path.join(pth_dir, "covcov.pth") + pth_file = open(pth_path, "w") + try: + pth_file.write("import coverage; coverage.process_startup()\n") + finally: + pth_file.close() + + version = "%s%s" % sys.version_info[:2] + suffix = "%s_%s_%s" % (version, tracer, socket.gethostname()) + + cov = coverage.coverage(config_file="metacov.ini", data_suffix=suffix) + # Cheap trick: the coverage code itself is excluded from measurement, but + # if we clobber the cover_prefix in the coverage object, we can defeat the + # self-detection. + cov.cover_prefix = "Please measure coverage.py!" + cov.erase() + cov.start() + + try: + # Re-import coverage to get it coverage tested! I don't understand all + # the mechanics here, but if I don't carry over the imported modules + # (in covmods), then things go haywire (os == None, eventually). + covmods = {} + covdir = os.path.split(coverage.__file__)[0] + # We have to make a list since we'll be deleting in the loop. + modules = list(sys.modules.items()) + for name, mod in modules: + if name.startswith('coverage'): + if getattr(mod, '__file__', "??").startswith(covdir): + covmods[name] = mod + del sys.modules[name] + import coverage # don't warn about re-import: pylint: disable=W0404 + sys.modules.update(covmods) + + # Run nosetests, with the arguments from our command line. + try: + run_tests(tracer, *nose_args) + except SystemExit: + # nose3 seems to raise SystemExit, not sure why? + pass + finally: + cov.stop() + os.remove(pth_path) + + cov.save() + +def do_combine_html(): + """Combine data from a meta-coverage run, and make the HTML report.""" + import coverage + os.environ['COVERAGE_HOME'] = os.getcwd() + cov = coverage.coverage(config_file="metacov.ini") + cov.load() + cov.combine() + cov.save() + cov.html_report() + +def do_test_with_tracer(tracer, *noseargs): + """Run nosetests with a particular tracer.""" + if os.environ.get("COVERAGE_COVERAGE", ""): + return run_tests_with_coverage(tracer, *noseargs) + else: + return run_tests(tracer, *noseargs) + +def do_zip_mods(): + """Build the zipmods.zip file.""" + zf = zipfile.ZipFile("tests/zipmods.zip", "w") + zf.write("tests/covmodzip1.py", "covmodzip1.py") + zf.close() + +def do_install_egg(): + """Install the egg1 egg for tests.""" + # I am pretty certain there are easier ways to install eggs... + # pylint: disable=F0401,E0611,E1101 + import distutils.core + cur_dir = os.getcwd() + os.chdir("tests/eggsrc") + distutils.core.run_setup("setup.py", ["--quiet", "bdist_egg"]) + egg = glob.glob("dist/*.egg")[0] + distutils.core.run_setup( + "setup.py", ["--quiet", "easy_install", "--no-deps", "--zip-ok", egg] + ) + os.chdir(cur_dir) + +def do_check_eol(): + """Check files for incorrect newlines and trailing whitespace.""" + + ignore_dirs = [ + '.svn', '.hg', '.tox', '.tox_kits', 'coverage.egg-info', + '_build', 'covtestegg1.egg-info', + ] + checked = set([]) + + def check_file(fname, crlf=True, trail_white=True): + """Check a single file for whitespace abuse.""" + fname = os.path.relpath(fname) + if fname in checked: + return + checked.add(fname) + + line = None + for n, line in enumerate(open(fname, "rb")): + if crlf: + if "\r" in line: + print("%s@%d: CR found" % (fname, n+1)) + return + if trail_white: + line = line[:-1] + if not crlf: + line = line.rstrip('\r') + if line.rstrip() != line: + print("%s@%d: trailing whitespace found" % (fname, n+1)) + return + + if line is not None and not line.strip(): + print("%s: final blank line" % (fname,)) + + def check_files(root, patterns, **kwargs): + """Check a number of files for whitespace abuse.""" + for root, dirs, files in os.walk(root): + for f in files: + fname = os.path.join(root, f) + for p in patterns: + if fnmatch.fnmatch(fname, p): + check_file(fname, **kwargs) + break + for dir_name in ignore_dirs: + if dir_name in dirs: + dirs.remove(dir_name) + + check_files("coverage", ["*.py", "*.c"]) + check_files("coverage/htmlfiles", ["*.html", "*.css", "*.js"]) + check_file("tests/farm/html/src/bom.py", crlf=False) + check_files("tests", ["*.py"]) + check_files("tests", ["*,cover"], trail_white=False) + check_files("tests/js", ["*.js", "*.html"]) + check_file("setup.py") + check_file("igor.py") + check_file("Makefile") + check_file(".hgignore") + check_file(".travis.yml") + check_files("doc", ["*.rst"]) + check_files(".", ["*.txt"]) + + +def print_banner(label): + """Print the version of Python.""" + try: + impl = platform.python_implementation() + except AttributeError: + impl = "Python" + + version = platform.python_version() + + if '__pypy__' in sys.builtin_module_names: + pypy_version = sys.pypy_version_info # pylint: disable=E1101 + version += " (pypy %s)" % ".".join([str(v) for v in pypy_version]) + + print('=== %s %s %s (%s) ===' % (impl, version, label, sys.executable)) + + +def do_help(): + """List the available commands""" + items = list(globals().items()) + items.sort() + for name, value in items: + if name.startswith('do_'): + print("%-20s%s" % (name[3:], value.__doc__)) + + +def main(args): + """Main command-line execution for igor. + + Verbs are taken from the command line, and extra words taken as directed + by the arguments needed by the handler. + + """ + while args: + verb = args.pop(0) + handler = globals().get('do_'+verb) + if handler is None: + print("*** No handler for %r" % verb) + return 1 + argspec = inspect.getargspec(handler) + if argspec[1]: + # Handler has *args, give it all the rest of the command line. + handler_args = args + args = [] + else: + # Handler has specific arguments, give it only what it needs. + num_args = len(argspec[0]) + handler_args = args[:num_args] + args = args[num_args:] + ret = handler(*handler_args) + # If a handler returns a failure-like value, stop. + if ret: + return ret + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/third_party/pycoverage/requirements.txt b/third_party/pycoverage/requirements.txt new file mode 100644 index 0000000..4c66774 --- /dev/null +++ b/third_party/pycoverage/requirements.txt @@ -0,0 +1,5 @@ +nose +mock +tox +pylint +sphinx diff --git a/third_party/pycoverage/setup.cfg b/third_party/pycoverage/setup.cfg new file mode 100644 index 0000000..861a9f5 --- /dev/null +++ b/third_party/pycoverage/setup.cfg @@ -0,0 +1,5 @@ +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff --git a/third_party/pycoverage/setup.py b/third_party/pycoverage/setup.py new file mode 100644 index 0000000..f0e83e6 --- /dev/null +++ b/third_party/pycoverage/setup.py @@ -0,0 +1,202 @@ +# setup.py for coverage.py + +"""Code coverage measurement for Python + +Coverage.py measures code coverage, typically during test execution. It uses +the code analysis tools and tracing hooks provided in the Python standard +library to determine which lines are executable, and which have been executed. + +Coverage.py runs on Pythons 2.3 through 3.3, and PyPy 1.9. + +Documentation is at `nedbatchelder.com <%s>`_. Code repository and issue +tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_, with a +mirrored repo on `Github <https://github.com/nedbat/coveragepy>`_. + +New in 3.7: ``--debug``, and 12 bugs closed. + +New in 3.6: ``--fail-under``, and >20 bugs closed. + +New in 3.5: Branch coverage exclusions, keyboard shortcuts in HTML report. + +New in 3.4: Better control over source to measure, and unexecuted files +can be reported. + +New in 3.3: .coveragerc files. + +New in 3.2: Branch coverage! +""" + +# This file is used unchanged under all versions of Python, 2.x and 3.x. + +classifiers = """\ +Environment :: Console +Intended Audience :: Developers +License :: OSI Approved :: BSD License +Operating System :: OS Independent +Programming Language :: Python :: 2 +Programming Language :: Python :: 3 +Topic :: Software Development :: Quality Assurance +Topic :: Software Development :: Testing +""" + +# Pull in the tools we need. +import os, sys + +from setuptools import setup +from distutils.core import Extension # pylint: disable=E0611,F0401 +from distutils.command.build_ext import build_ext # pylint: disable=E0611,F0401,C0301 +from distutils import errors # pylint: disable=E0611,F0401 + +# Get or massage our metadata. We exec coverage/version.py so we can avoid +# importing the product code into setup.py. + +doc = __doc__ # __doc__ will be overwritten by version.py. +__version__ = __url__ = "" # Keep pylint happy. + +cov_ver_py = os.path.join(os.path.split(__file__)[0], "coverage/version.py") +version_file = open(cov_ver_py) +try: + exec(compile(version_file.read(), cov_ver_py, 'exec')) +finally: + version_file.close() + +doclines = (doc % __url__).splitlines() +classifier_list = classifiers.splitlines() + +if 'a' in __version__: + devstat = "3 - Alpha" +elif 'b' in __version__: + devstat = "4 - Beta" +else: + devstat = "5 - Production/Stable" +classifier_list.append("Development Status :: " + devstat) + +# Install a script as "coverage", and as "coverage[23]", and as +# "coverage-2.7" (or whatever). +scripts = [ + 'coverage = coverage:main', + 'coverage%d = coverage:main' % sys.version_info[:1], + 'coverage-%d.%d = coverage:main' % sys.version_info[:2], + ] + +# Create the keyword arguments for setup() + +setup_args = dict( + name = 'coverage', + version = __version__, + + packages = [ + 'coverage', + ], + + package_data = { + 'coverage': [ + 'htmlfiles/*.*', + ] + }, + + entry_points = {'console_scripts': scripts}, + + # We need to get HTML assets from our htmlfiles dir. + zip_safe = False, + + author = 'Ned Batchelder and others', + author_email = 'ned@nedbatchelder.com', + description = doclines[0], + long_description = '\n'.join(doclines[2:]), + keywords = 'code coverage testing', + license = 'BSD', + classifiers = classifier_list, + url = __url__, + ) + +# A replacement for the build_ext command which raises a single exception +# if the build fails, so we can fallback nicely. + +ext_errors = ( + errors.CCompilerError, + errors.DistutilsExecError, + errors.DistutilsPlatformError, +) +if sys.platform == 'win32' and sys.version_info > (2, 6): + # 2.6's distutils.msvc9compiler can raise an IOError when failing to + # find the compiler + ext_errors += (IOError,) + +class BuildFailed(Exception): + """Raise this to indicate the C extension wouldn't build.""" + def __init__(self): + Exception.__init__(self) + self.cause = sys.exc_info()[1] # work around py 2/3 different syntax + +class ve_build_ext(build_ext): + """Build C extensions, but fail with a straightforward exception.""" + + def run(self): + """Wrap `run` with `BuildFailed`.""" + try: + build_ext.run(self) + except errors.DistutilsPlatformError: + raise BuildFailed() + + def build_extension(self, ext): + """Wrap `build_extension` with `BuildFailed`.""" + try: + # Uncomment to test compile failures: + # raise errors.CCompilerError("OOPS") + build_ext.build_extension(self, ext) + except ext_errors: + raise BuildFailed() + except ValueError: + # this can happen on Windows 64 bit, see Python issue 7511 + if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3 + raise BuildFailed() + raise + +# There are a few reasons we might not be able to compile the C extension. +# Figure out if we should attempt the C extension or not. + +compile_extension = True + +if sys.platform.startswith('java'): + # Jython can't compile C extensions + compile_extension = False + +if '__pypy__' in sys.builtin_module_names: + # Pypy can't compile C extensions + compile_extension = False + +if compile_extension: + setup_args.update(dict( + ext_modules = [ + Extension("coverage.tracer", sources=["coverage/tracer.c"]) + ], + cmdclass = { + 'build_ext': ve_build_ext, + }, + )) + +# Py3.x-specific details. + +if sys.version_info >= (3, 0): + setup_args.update(dict( + use_2to3 = False, + )) + +def main(): + """Actually invoke setup() with the arguments we built above.""" + # For a variety of reasons, it might not be possible to install the C + # extension. Try it with, and if it fails, try it without. + try: + setup(**setup_args) + except BuildFailed: + msg = "Couldn't install with extension module, trying without it..." + exc = sys.exc_info()[1] + exc_msg = "%s: %s" % (exc.__class__.__name__, exc.cause) + print("**\n** %s\n** %s\n**" % (msg, exc_msg)) + + del setup_args['ext_modules'] + setup(**setup_args) + +if __name__ == '__main__': + main() diff --git a/third_party/pycoverage/tox.ini b/third_party/pycoverage/tox.ini new file mode 100644 index 0000000..7e78ea4 --- /dev/null +++ b/third_party/pycoverage/tox.ini @@ -0,0 +1,32 @@ +# Tox (http://tox.testrun.org/) is a tool for running tests +# in multiple virtualenvs. This configuration file will run the +# test suite on all supported python versions. To use it, "pip install tox" +# and then run "tox" from this directory. + +[tox] +envlist = py25, py26, py27, py31, py32, py33, pypy + +[testenv] +commands = + {envpython} setup.py --quiet clean develop + + # Create tests/zipmods.zip + # Install the egg1 egg + # Remove the C extension so that we can test the PyTracer + {envpython} igor.py zip_mods install_egg remove_extension + + # Test with the PyTracer + {envpython} igor.py test_with_tracer py {posargs} + + # Build the C extension and test with the CTracer + {envpython} setup.py --quiet build_ext --inplace + {envpython} igor.py test_with_tracer c {posargs} + +deps = + nose + mock + +[testenv:pypy] +# PyPy has no C extensions +setenv = + COVERAGE_NO_EXTENSION=1 |