summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authormaruel@chromium.org <maruel@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-03-05 12:46:38 +0000
committermaruel@chromium.org <maruel@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-03-05 12:46:38 +0000
commitf0a51fb571f46531025fa09240bbc3e1af925e84 (patch)
tree558b4f0e737fda4b9ab60f252c9c23b8a4ca523e /tools
parent6390be368205705f49ead3cec40396519f13b889 (diff)
downloadchromium_src-f0a51fb571f46531025fa09240bbc3e1af925e84.zip
chromium_src-f0a51fb571f46531025fa09240bbc3e1af925e84.tar.gz
chromium_src-f0a51fb571f46531025fa09240bbc3e1af925e84.tar.bz2
Fixes CRLF and trailing white spaces.
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@10982 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'tools')
-rw-r--r--tools/channel_changer/resource.h2
-rw-r--r--tools/code_coverage/coverage.py70
-rw-r--r--tools/code_coverage/process_coverage.py24
-rw-r--r--tools/grit/grit/clique.py116
-rw-r--r--tools/grit/grit/clique_unittest.py38
-rw-r--r--tools/grit/grit/extern/FP.py2
-rw-r--r--tools/grit/grit/extern/tclib.py4
-rwxr-xr-xtools/grit/grit/format/html_inline.py22
-rw-r--r--tools/grit/grit/format/interface.py6
-rw-r--r--tools/grit/grit/format/rc.py78
-rw-r--r--tools/grit/grit/format/rc_header.py34
-rw-r--r--tools/grit/grit/format/rc_header_unittest.py8
-rw-r--r--tools/grit/grit/format/rc_unittest.py24
-rw-r--r--tools/grit/grit/gather/admin_template.py18
-rw-r--r--tools/grit/grit/gather/admin_template_unittest.py16
-rw-r--r--tools/grit/grit/gather/interface.py38
-rw-r--r--tools/grit/grit/gather/muppet_strings.py32
-rw-r--r--tools/grit/grit/gather/muppet_strings_unittest.py4
-rw-r--r--tools/grit/grit/gather/rc.py76
-rw-r--r--tools/grit/grit/gather/rc_unittest.py42
-rw-r--r--tools/grit/grit/gather/regexp.py38
-rw-r--r--tools/grit/grit/gather/tr_html.py148
-rw-r--r--tools/grit/grit/gather/tr_html_unittest.py92
-rw-r--r--tools/grit/grit/gather/txt.py12
-rw-r--r--tools/grit/grit/grd_reader.py38
-rw-r--r--tools/grit/grit/grd_reader_unittest.py6
-rw-r--r--tools/grit/grit/grit_runner.py22
-rw-r--r--tools/grit/grit/grit_runner_unittest.py4
-rw-r--r--tools/grit/grit/node/base.py102
-rw-r--r--tools/grit/grit/node/base_unittest.py26
-rw-r--r--tools/grit/grit/node/custom/__init__.py2
-rw-r--r--tools/grit/grit/node/custom/filename.py8
-rw-r--r--tools/grit/grit/node/custom/filename_unittest.py2
-rw-r--r--tools/grit/grit/node/empty.py2
-rw-r--r--tools/grit/grit/node/include.py14
-rw-r--r--tools/grit/grit/node/io.py30
-rw-r--r--tools/grit/grit/node/io_unittest.py4
-rw-r--r--tools/grit/grit/node/mapping.py4
-rw-r--r--tools/grit/grit/node/message.py48
-rw-r--r--tools/grit/grit/node/message_unittest.py4
-rw-r--r--tools/grit/grit/node/misc.py62
-rw-r--r--tools/grit/grit/node/misc_unittest.py20
-rw-r--r--tools/grit/grit/node/structure.py44
-rw-r--r--tools/grit/grit/node/structure_unittest.py6
-rw-r--r--tools/grit/grit/node/variant.py8
-rw-r--r--tools/grit/grit/pseudo.py12
-rw-r--r--tools/grit/grit/pseudo_unittest.py6
-rw-r--r--tools/grit/grit/shortcuts.py16
-rw-r--r--tools/grit/grit/shortcuts_unittests.py22
-rw-r--r--tools/grit/grit/tclib.py62
-rw-r--r--tools/grit/grit/tclib_unittest.py10
-rw-r--r--tools/grit/grit/test_suite_all.py2
-rw-r--r--tools/grit/grit/tool/build.py24
-rw-r--r--tools/grit/grit/tool/count.py8
-rw-r--r--tools/grit/grit/tool/diff_structures.py18
-rw-r--r--tools/grit/grit/tool/interface.py16
-rw-r--r--tools/grit/grit/tool/menu_from_parts.py16
-rw-r--r--tools/grit/grit/tool/newgrd.py2
-rw-r--r--tools/grit/grit/tool/postprocess_interface.py4
-rw-r--r--tools/grit/grit/tool/postprocess_unittest.py14
-rw-r--r--tools/grit/grit/tool/preprocess_interface.py2
-rw-r--r--tools/grit/grit/tool/preprocess_unittest.py12
-rw-r--r--tools/grit/grit/tool/rc2grd.py72
-rw-r--r--tools/grit/grit/tool/rc2grd_unittest.py10
-rw-r--r--tools/grit/grit/tool/resize.py36
-rw-r--r--tools/grit/grit/tool/test.py4
-rw-r--r--tools/grit/grit/tool/toolbar_postprocess.py10
-rw-r--r--tools/grit/grit/tool/toolbar_preprocess.py12
-rw-r--r--tools/grit/grit/tool/transl2tc.py52
-rw-r--r--tools/grit/grit/tool/transl2tc_unittest.py16
-rw-r--r--tools/grit/grit/tool/unit.py4
-rw-r--r--tools/grit/grit/util.py38
-rw-r--r--tools/grit/grit/util_unittest.py8
-rw-r--r--tools/grit/grit/xtb_reader.py16
-rw-r--r--tools/grit/grit/xtb_reader_unittest.py8
-rw-r--r--tools/measure_page_load_time/ie_bho/MeasurePageLoadTime.cpp2
-rw-r--r--tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.cpp16
-rw-r--r--tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.h4
-rw-r--r--tools/measure_page_load_time/ie_bho/resource.h2
-rw-r--r--tools/measure_page_load_time/ie_bho/stdafx.h6
-rw-r--r--tools/memory_watcher/ia32_modrm_map.cc18
-rw-r--r--tools/memory_watcher/ia32_opcode_map.cc60
-rw-r--r--tools/memory_watcher/memory_watcher.cc4
-rw-r--r--tools/memory_watcher/memory_watcher.h2
-rw-r--r--tools/memory_watcher/mini_disassembler.cc50
-rw-r--r--tools/memory_watcher/mini_disassembler.h14
-rw-r--r--tools/memory_watcher/mini_disassembler_types.h14
-rw-r--r--tools/memory_watcher/preamble_patcher.cc52
-rw-r--r--tools/memory_watcher/preamble_patcher.h40
-rw-r--r--tools/memory_watcher/preamble_patcher_with_stub.cc6
-rw-r--r--tools/purify/chrome_tests.py24
-rw-r--r--tools/purify/common.py26
-rw-r--r--tools/purify/purify_analyze.py48
-rw-r--r--tools/purify/purify_coverage.py10
-rw-r--r--tools/purify/purify_inuse.py8
-rw-r--r--tools/purify/purify_message.py16
-rw-r--r--tools/purify/purify_test.py10
-rw-r--r--tools/purify/quantify_test.py12
-rw-r--r--tools/python/google/httpd_utils.py2
-rw-r--r--tools/python/google/logging_utils.py10
-rw-r--r--tools/python/google/path_utils.py4
-rw-r--r--tools/python/google/platform_utils_win.py2
-rw-r--r--tools/site_compare/command_line.py292
-rw-r--r--tools/site_compare/commands/compare2.py38
-rw-r--r--tools/site_compare/commands/maskmaker.py46
-rw-r--r--tools/site_compare/commands/measure.py4
-rw-r--r--tools/site_compare/commands/scrape.py6
-rw-r--r--tools/site_compare/commands/timeload.py38
-rw-r--r--tools/site_compare/drivers/__init__.py2
-rw-r--r--tools/site_compare/drivers/win32/keyboard.py66
-rw-r--r--tools/site_compare/drivers/win32/mouse.py80
-rw-r--r--tools/site_compare/drivers/win32/windowing.py110
-rw-r--r--tools/site_compare/operators/__init__.py10
-rw-r--r--tools/site_compare/operators/equals.py16
-rw-r--r--tools/site_compare/operators/equals_with_mask.py26
-rw-r--r--tools/site_compare/scrapers/__init__.py12
-rw-r--r--tools/site_compare/scrapers/chrome/__init__.py14
-rw-r--r--tools/site_compare/scrapers/chrome/chrome011010.py8
-rw-r--r--tools/site_compare/scrapers/chrome/chrome01970.py12
-rw-r--r--tools/site_compare/scrapers/chrome/chromebase.py70
-rw-r--r--tools/site_compare/scrapers/firefox/__init__.py16
-rw-r--r--tools/site_compare/scrapers/firefox/firefox2.py86
-rw-r--r--tools/site_compare/scrapers/ie/__init__.py12
-rw-r--r--tools/site_compare/scrapers/ie/ie7.py66
-rw-r--r--tools/site_compare/site_compare.py58
-rw-r--r--tools/site_compare/utils/browser_iterate.py22
-rwxr-xr-xtools/traceline/traceline/assembler.h2
-rwxr-xr-xtools/traceline/traceline/main.cc22
-rwxr-xr-xtools/traceline/traceline/sym_resolver.h4
129 files changed, 1766 insertions, 1766 deletions
diff --git a/tools/channel_changer/resource.h b/tools/channel_changer/resource.h
index fe16158..6316987 100644
--- a/tools/channel_changer/resource.h
+++ b/tools/channel_changer/resource.h
@@ -12,7 +12,7 @@
#define IDC_CUTTING_EDGE 1003
#define IDC_SECONDARY_LABEL 1004
// Next default values for new objects
-//
+//
#ifdef APSTUDIO_INVOKED
#ifndef APSTUDIO_READONLY_SYMBOLS
#define _APS_NO_MFC 1
diff --git a/tools/code_coverage/coverage.py b/tools/code_coverage/coverage.py
index c28c03d..976fb86 100644
--- a/tools/code_coverage/coverage.py
+++ b/tools/code_coverage/coverage.py
@@ -6,12 +6,12 @@
"""Module to setup and generate code coverage data
-This module first sets up the environment for code coverage, instruments the
+This module first sets up the environment for code coverage, instruments the
binaries, runs the tests and collects the code coverage data.
Usage:
- coverage.py --upload=<upload_location>
+ coverage.py --upload=<upload_location>
--revision=<revision_number>
--src_root=<root_of_source_tree>
[--tools_path=<tools_path>]
@@ -50,13 +50,13 @@ windows_binaries = [#'chrome.exe',
'test_shell_tests.exe',
'test_shell.exe',
'activex_test_control.dll']
-
-# The list of [tests, args] that will be run.
+
+# The list of [tests, args] that will be run.
# Failing tests have been commented out.
# TODO(niranjan): Need to add layout tests that excercise the test shell.
windows_tests = [
['unit_tests.exe', ''],
-# ['automated_ui_tests.exe', ''],
+# ['automated_ui_tests.exe', ''],
['ui_tests.exe', '--no-sandbox'],
['installer_unittests.exe', ''],
['ipc_tests.exe', ''],
@@ -78,7 +78,7 @@ def IsWindows():
class Coverage(object):
"""Class to set up and generate code coverage.
- This class contains methods that are useful to set up the environment for
+ This class contains methods that are useful to set up the environment for
code coverage.
Attributes:
@@ -86,7 +86,7 @@ class Coverage(object):
instrumented.
"""
- def __init__(self,
+ def __init__(self,
revision,
src_path = None,
tools_path = None,
@@ -106,13 +106,13 @@ class Coverage(object):
self.src_path = src_path
self._dir = tempfile.mkdtemp()
self._archive = archive
-
-
+
+
def SetUp(self, binaries):
"""Set up the platform specific environment and instrument the binaries for
coverage.
- This method sets up the environment, instruments all the compiled binaries
+ This method sets up the environment, instruments all the compiled binaries
and sets up the code coverage counters.
Args:
@@ -127,7 +127,7 @@ class Coverage(object):
return False
if IsWindows():
# Stop all previous instance of VSPerfMon counters
- counters_command = ('%s -shutdown' %
+ counters_command = ('%s -shutdown' %
(os.path.join(self.tools_path, 'vsperfcmd.exe')))
(retcode, output) = proc.RunCommandFull(counters_command,
collect_output=True)
@@ -156,20 +156,20 @@ class Coverage(object):
# We are now ready to run tests and measure code coverage.
self.instrumented = True
return True
-
+
def TearDown(self):
"""Tear down method.
This method shuts down the counters, and cleans up all the intermediate
- artifacts.
+ artifacts.
"""
if self.instrumented == False:
return
-
+
if IsWindows():
# Stop counters
- counters_command = ('%s -shutdown' %
+ counters_command = ('%s -shutdown' %
(os.path.join(self.tools_path, 'vsperfcmd.exe')))
(retcode, output) = proc.RunCommandFull(counters_command,
collect_output=True)
@@ -187,12 +187,12 @@ class Coverage(object):
logging.info('Cleaned up temporary files and folders')
# Reset the instrumented flag.
self.instrumented = False
-
+
def RunTest(self, src_root, test):
"""Run tests and collect the .coverage file
- Args:
+ Args:
src_root: Path to the root of the source.
test: Path to the test to be run.
@@ -203,13 +203,13 @@ class Coverage(object):
# Generate the intermediate file name for the coverage results
test_name = os.path.split(test[0])[1].strip('.exe')
# test_command = binary + args
- test_command = '%s %s' % (os.path.join(src_root,
+ test_command = '%s %s' % (os.path.join(src_root,
'chrome',
'Release',
- test[0]),
+ test[0]),
test[1])
-
- coverage_file = os.path.join(self._dir, '%s_win32_%s.coverage' %
+
+ coverage_file = os.path.join(self._dir, '%s_win32_%s.coverage' %
(test_name, self.revision))
logging.info('.coverage file for test %s: %s' % (test_name, coverage_file))
@@ -221,16 +221,16 @@ class Coverage(object):
# VSPerfCmd spawns another process before terminating and this confuses
# the subprocess.Popen() used by RunCommandFull.
retcode = subprocess.call(counters_command)
-
+
# Run the test binary
logging.info('Executing test %s: ' % test_command)
(retcode, output) = proc.RunCommandFull(test_command, collect_output=True)
if retcode != 0: # Return error if the tests fail
logging.error('One or more tests failed in %s.' % test_command)
return None
-
+
# Stop the counters
- counters_command = ('%s -shutdown' %
+ counters_command = ('%s -shutdown' %
(os.path.join(self.tools_path, 'vsperfcmd.exe')))
(retcode, output) = proc.RunCommandFull(counters_command,
collect_output=True)
@@ -238,7 +238,7 @@ class Coverage(object):
# Return the intermediate .coverage file
return coverage_file
-
+
def Upload(self, list_coverage, upload_path, sym_path=None, src_root=None):
"""Upload the results to the dashboard.
@@ -251,7 +251,7 @@ class Coverage(object):
upload_path: Destination where the coverage data will be processed.
sym_path: Symbol path for the build (Win32 only)
src_root: Root folder of the source tree (Win32 only)
-
+
Returns:
True on success.
False on failure.
@@ -259,15 +259,15 @@ class Coverage(object):
if upload_path == None:
logging.info('Upload path not specified. Will not convert to LCOV')
return True
-
+
if IsWindows():
# Stop counters
- counters_command = ('%s -shutdown' %
+ counters_command = ('%s -shutdown' %
(os.path.join(self.tools_path, 'vsperfcmd.exe')))
(retcode, output) = proc.RunCommandFull(counters_command,
collect_output=True)
logging.info('Counters shut down: %s' % (output))
- lcov_file = os.path.join(upload_path, 'chrome_win32_%s.lcov' %
+ lcov_file = os.path.join(upload_path, 'chrome_win32_%s.lcov' %
(self.revision))
lcov = open(lcov_file, 'w')
for coverage_file in list_coverage:
@@ -276,8 +276,8 @@ class Coverage(object):
logging.error('Lcov converter tool not found')
return False
self.tools_path = self.tools_path.rstrip('\\')
- convert_command = ('%s -sym_path=%s -src_root=%s %s' %
- (os.path.join(self.tools_path,
+ convert_command = ('%s -sym_path=%s -src_root=%s %s' %
+ (os.path.join(self.tools_path,
'coverage_analyzer.exe'),
sym_path,
src_root,
@@ -333,12 +333,12 @@ def main():
help='Archive location of the intermediate .coverage data')
(options, args) = parser.parse_args()
-
+
if options.revision == None:
parser.error('Revision number not specified')
if options.src_root == None:
parser.error('Source root not specified')
-
+
if IsWindows():
# Initialize coverage
cov = Coverage(options.revision,
@@ -355,7 +355,7 @@ def main():
return 1
# Collect the intermediate file
list_coverage.append(coverage)
- else:
+ else:
logging.error('Error during instrumentation.')
sys.exit(1)
@@ -368,4 +368,4 @@ def main():
if __name__ == '__main__':
sys.exit(main())
-
+
diff --git a/tools/code_coverage/process_coverage.py b/tools/code_coverage/process_coverage.py
index e42e95b..a1f3c1e 100644
--- a/tools/code_coverage/process_coverage.py
+++ b/tools/code_coverage/process_coverage.py
@@ -54,7 +54,7 @@ win32_srcs_exclude = ['parse.y',
'cssgrammar.cpp',
'csspropertynames.gperf']
-# Number of lines of a new coverage data set
+# Number of lines of a new coverage data set
# to send at a time to the dashboard.
POST_CHUNK_SIZE = 50
@@ -190,7 +190,7 @@ def CleanWin32Lcov(lcov_path, src_root):
stderr=subprocess.STDOUT).communicate()[0]
if output.rfind('error:'):
return None
-
+
tmp_buf1 = output.split('=')
tmp_buf2 = tmp_buf1[len(tmp_buf1) - 2].split('x')[0].split(' ')
loc = tmp_buf2[len(tmp_buf2) - 2]
@@ -220,7 +220,7 @@ def ParseCoverageDataForDashboard(lcov_path):
Args:
lcov_path: File path to lcov coverage data.
-
+
Returns:
List of strings with comma separated source node and coverage.
"""
@@ -241,7 +241,7 @@ def ParseCoverageDataForDashboard(lcov_path):
instrumented_set = {}
executed_set = {}
srcfile_name = line[len('SF:'):]
-
+
# Mark coverage data points hashlist style for the current src file.
if line[:len('DA:')] == 'DA:':
line_info = line[len('DA:'):].split(',')
@@ -251,18 +251,18 @@ def ParseCoverageDataForDashboard(lcov_path):
# line_was_executed is '0' or '1'
if int(line_was_executed):
executed_set[line_num] = True
-
+
# Update results for the current src file at record end.
if line == 'end_of_record':
instrumented = len(instrumented_set.keys())
executed = len(executed_set.keys())
parent_directory = srcfile_name[:srcfile_name.rfind('/') + 1]
linecount_point = linecounts[srcfile_index].strip().split(',')
- assert(len(linecount_point) == 2,
+ assert(len(linecount_point) == 2,
'lintcount format unexpected - %s' % linecounts[srcfile_index])
(linecount_path, linecount_count) = linecount_point
srcfile_index += 1
-
+
# Sanity check that path names in the lcov and linecount are lined up.
if linecount_path[-10:] != srcfile_name[-10:]:
print 'NAME MISMATCH: %s :: %s' % (srcfile_name, linecount_path)
@@ -282,7 +282,7 @@ def ParseCoverageDataForDashboard(lcov_path):
# The first key (sorted) will be the base directory '/'
# but its full path may be '/mnt/chrome_src/src/'
# using this offset will ignore the part '/mnt/chrome_src/src'.
- # Offset is the last '/' that isn't the last character for the
+ # Offset is the last '/' that isn't the last character for the
# first directory name in results (position 1 in keys).
offset = len(keys[1][:keys[1][:-1].rfind('/')])
lines = []
@@ -299,7 +299,7 @@ def ParseCoverageDataForDashboard(lcov_path):
def AddResults(results, location, lines_total, lines_executed):
"""Add resulting line tallies to a location's total.
-
+
Args:
results: Map of node location to corresponding coverage data.
location: Source node string.
@@ -315,7 +315,7 @@ def AddResults(results, location, lines_total, lines_executed):
def PostResultsToDashboard(lcov_path, results, post_url):
"""Post coverage results to coverage dashboard.
-
+
Args:
lcov_path: File path for lcov data in the expected format:
<project>_<platform>_<cl#>.coverage.lcov
@@ -346,7 +346,7 @@ num_fails = 0
def SendPost(req):
"""Execute a post request and retry for up to MAX_FAILURES.
-
+
Args:
req: A urllib2 request object.
@@ -428,7 +428,7 @@ def main():
else:
print 'Unsupported platform'
os.exit(1)
-
+
# Prep coverage results for dashboard and post new set.
parsed_data = ParseCoverageDataForDashboard(options.lcov_path)
PostResultsToDashboard(options.lcov_path, parsed_data, options.post_url)
diff --git a/tools/grit/grit/clique.py b/tools/grit/grit/clique.py
index 2f6fe40..52a50c6 100644
--- a/tools/grit/grit/clique.py
+++ b/tools/grit/grit/clique.py
@@ -19,7 +19,7 @@ class UberClique(object):
'''A factory (NOT a singleton factory) for making cliques. It has several
methods for working with the cliques created using the factory.
'''
-
+
def __init__(self):
# A map from message ID to list of cliques whose source messages have
# that ID. This will contain all cliques created using this factory.
@@ -27,14 +27,14 @@ class UberClique(object):
# same translateable portion and placeholder names, but occur in different
# places in the resource tree.
self.cliques_ = {}
-
+
# A map of clique IDs to list of languages to indicate translations where we
# fell back to English.
self.fallback_translations_ = {}
-
+
# A map of clique IDs to list of languages to indicate missing translations.
self.missing_translations_ = {}
-
+
def _AddMissingTranslation(self, lang, clique, is_error):
tl = self.fallback_translations_
if is_error:
@@ -44,10 +44,10 @@ class UberClique(object):
tl[id] = {}
if lang not in tl[id]:
tl[id][lang] = 1
-
+
def HasMissingTranslations(self):
return len(self.missing_translations_) > 0
-
+
def MissingTranslationsReport(self):
'''Returns a string suitable for printing to report missing
and fallback translations to the user.
@@ -78,13 +78,13 @@ class UberClique(object):
def MakeClique(self, message, translateable=True):
'''Create a new clique initialized with a message.
-
+
Args:
message: tclib.Message()
translateable: True | False
'''
clique = MessageClique(self, message, translateable)
-
+
# Enable others to find this clique by its message ID
if message.GetId() in self.cliques_:
presentable_text = clique.GetMessage().GetPresentableContent()
@@ -93,17 +93,17 @@ class UberClique(object):
self.cliques_[message.GetId()].append(clique)
else:
self.cliques_[message.GetId()] = [clique]
-
+
return clique
def FindCliqueAndAddTranslation(self, translation, language):
'''Adds the specified translation to the clique with the source message
it is a translation of.
-
+
Args:
translation: tclib.Translation()
language: 'en' | 'fr' ...
-
+
Return:
True if the source message was found, otherwise false.
'''
@@ -113,7 +113,7 @@ class UberClique(object):
return True
else:
return False
-
+
def BestClique(self, id):
'''Returns the "best" clique from a list of cliques. All the cliques
must have the same ID. The "best" clique is chosen in the following
@@ -127,14 +127,14 @@ class UberClique(object):
for clique in clique_list:
if not clique_to_ret:
clique_to_ret = clique
-
+
description = clique.GetMessage().GetDescription()
if description and len(description) > 0:
clique_to_ret = clique
if not description.startswith('ID:'):
break # this is the preferred case so we exit right away
return clique_to_ret
-
+
def BestCliquePerId(self):
'''Iterates over the list of all cliques and returns the best clique for
each ID. This will be the first clique with a source message that has a
@@ -143,7 +143,7 @@ class UberClique(object):
'''
for id in self.cliques_:
yield self.BestClique(id)
-
+
def BestCliqueByOriginalText(self, text, meaning):
'''Finds the "best" (as in BestClique()) clique that has original text
'text' and meaning 'meaning'. Returns None if there is no such clique.
@@ -160,7 +160,7 @@ class UberClique(object):
'''Returns a list of all defined message IDs.
'''
return self.cliques_.keys()
-
+
def AllCliques(self):
'''Iterates over all cliques. Note that this can return multiple cliques
with the same ID.
@@ -168,14 +168,14 @@ class UberClique(object):
for cliques in self.cliques_.values():
for c in cliques:
yield c
-
+
def GenerateXtbParserCallback(self, lang, debug=False):
'''Creates a callback function as required by grit.xtb_reader.Parse().
This callback will create Translation objects for each message from
the XTB that exists in this uberclique, and add them as translations for
the relevant cliques. The callback will add translations to the language
specified by 'lang'
-
+
Args:
lang: 'fr'
debug: True | False
@@ -184,13 +184,13 @@ class UberClique(object):
if id not in self.cliques_:
if debug: print "Ignoring translation #%s" % id
return
-
+
if debug: print "Adding translation #%s" % id
-
+
# We fetch placeholder information from the original message (the XTB file
# only contains placeholder names).
original_msg = self.BestClique(id).GetMessage()
-
+
translation = tclib.Translation(id=id)
for is_ph,text in structure:
if not is_ph:
@@ -215,27 +215,27 @@ class CustomType(object):
'''A base class you should implement if you wish to specify a custom type
for a message clique (i.e. custom validation and optional modification of
translations).'''
-
+
def Validate(self, message):
'''Returns true if the message (a tclib.Message object) is valid,
otherwise false.
'''
raise NotImplementedError()
-
+
def ValidateAndModify(self, lang, translation):
'''Returns true if the translation (a tclib.Translation object) is valid,
otherwise false. The language is also passed in. This method may modify
the translation that is passed in, if it so wishes.
'''
raise NotImplementedError()
-
+
def ModifyTextPart(self, lang, text):
'''If you call ModifyEachTextPart, it will turn around and call this method
for each text part of the translation. You should return the modified
version of the text, or just the original text to not change anything.
'''
raise NotImplementedError()
-
+
def ModifyEachTextPart(self, lang, translation):
'''Call this to easily modify one or more of the textual parts of a
translation. It will call ModifyTextPart for each part of the
@@ -271,18 +271,18 @@ class OneOffCustomType(CustomType):
class MessageClique(object):
'''A message along with all of its translations. Also code to bring
translations together with their original message.'''
-
+
# change this to the language code of Messages you add to cliques_.
# TODO(joi) Actually change this based on the <grit> node's source language
source_language = 'en'
-
+
# A constant translation we use when asked for a translation into the
# special language constants.CONSTANT_LANGUAGE.
CONSTANT_TRANSLATION = tclib.Translation(text='TTTTTT')
-
+
def __init__(self, uber_clique, message, translateable=True, custom_type=None):
'''Create a new clique initialized with just a message.
-
+
Args:
uber_clique: Our uber-clique (collection of cliques)
message: tclib.Message()
@@ -305,21 +305,21 @@ class MessageClique(object):
# be used to validate the original message and translations thereof, and
# will also get a chance to modify translations of the message.
self.SetCustomType(custom_type)
-
+
def GetMessage(self):
'''Retrieves the tclib.Message that is the source for this clique.'''
return self.clique[MessageClique.source_language]
-
+
def GetId(self):
'''Retrieves the message ID of the messages in this clique.'''
return self.GetMessage().GetId()
-
+
def IsTranslateable(self):
return self.translateable
-
+
def AddToShortcutGroup(self, group):
self.shortcut_groups.append(group)
-
+
def SetCustomType(self, custom_type):
'''Makes this clique use custom_type for validating messages and
translations, and optionally modifying translations.
@@ -327,53 +327,53 @@ class MessageClique(object):
self.custom_type = custom_type
if custom_type and not custom_type.Validate(self.GetMessage()):
raise exception.InvalidMessage(self.GetMessage().GetRealContent())
-
+
def MessageForLanguage(self, lang, pseudo_if_no_match=True, fallback_to_english=False):
'''Returns the message/translation for the specified language, providing
a pseudotranslation if there is no available translation and a pseudo-
translation is requested.
-
+
The translation of any message whatsoever in the special language
'x_constant' is the message "TTTTTT".
-
+
Args:
lang: 'en'
pseudo_if_no_match: True
fallback_to_english: False
-
+
Return:
tclib.BaseMessage
'''
if not self.translateable:
return self.GetMessage()
-
+
if lang == constants.CONSTANT_LANGUAGE:
return self.CONSTANT_TRANSLATION
-
+
for msglang in self.clique.keys():
if lang == msglang:
return self.clique[msglang]
-
+
if fallback_to_english:
self.uber_clique._AddMissingTranslation(lang, self, is_error=False)
return self.GetMessage()
-
+
# If we're not supposed to generate pseudotranslations, we add an error
# report to a list of errors, then fail at a higher level, so that we
# get a list of all messages that are missing translations.
if not pseudo_if_no_match:
self.uber_clique._AddMissingTranslation(lang, self, is_error=True)
-
+
return pseudo.PseudoMessage(self.GetMessage())
def AllMessagesThatMatch(self, lang_re, include_pseudo = True):
'''Returns a map of all messages that match 'lang', including the pseudo
translation if requested.
-
+
Args:
lang_re: re.compile('fr|en')
include_pseudo: True
-
+
Return:
{ 'en' : tclib.Message,
'fr' : tclib.Translation,
@@ -381,27 +381,27 @@ class MessageClique(object):
'''
if not self.translateable:
return [self.GetMessage()]
-
+
matches = {}
for msglang in self.clique:
if lang_re.match(msglang):
matches[msglang] = self.clique[msglang]
-
+
if include_pseudo:
matches[pseudo.PSEUDO_LANG] = pseudo.PseudoMessage(self.GetMessage())
-
+
return matches
-
+
def AddTranslation(self, translation, language):
'''Add a translation to this clique. The translation must have the same
ID as the message that is the source for this clique.
-
+
If this clique is not translateable, the function just returns.
-
+
Args:
translation: tclib.Translation()
language: 'en'
-
+
Throws:
grit.exception.InvalidTranslation if the translation you're trying to add
doesn't have the same message ID as the source message of this clique.
@@ -411,9 +411,9 @@ class MessageClique(object):
if translation.GetId() != self.GetId():
raise exception.InvalidTranslation(
'Msg ID %s, transl ID %s' % (self.GetId(), translation.GetId()))
-
+
assert not language in self.clique
-
+
# Because two messages can differ in the original content of their
# placeholders yet share the same ID (because they are otherwise the
# same), the translation we are getting may have different original
@@ -424,20 +424,20 @@ class MessageClique(object):
#
# See grit.clique_unittest.MessageCliqueUnittest.testSemiIdenticalCliques
# for a concrete explanation of why this is necessary.
-
+
original = self.MessageForLanguage(self.source_language, False)
if len(original.GetPlaceholders()) != len(translation.GetPlaceholders()):
print ("ERROR: '%s' translation of message id %s does not match" %
(language, translation.GetId()))
assert False
-
+
transl_msg = tclib.Translation(id=self.GetId(),
text=translation.GetPresentableContent(),
placeholders=original.GetPlaceholders())
-
+
if self.custom_type and not self.custom_type.ValidateAndModify(language, transl_msg):
print "WARNING: %s translation failed validation: %s" % (
language, transl_msg.GetId())
-
+
self.clique[language] = transl_msg
diff --git a/tools/grit/grit/clique_unittest.py b/tools/grit/grit/clique_unittest.py
index a1bc487..537f1b7 100644
--- a/tools/grit/grit/clique_unittest.py
+++ b/tools/grit/grit/clique_unittest.py
@@ -28,24 +28,24 @@ class MessageCliqueUnittest(unittest.TestCase):
placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
c = factory.MakeClique(msg)
-
+
self.failUnless(c.GetMessage() == msg)
self.failUnless(c.GetId() == msg.GetId())
-
+
msg_fr = tclib.Translation(text='Bonjour USERNAME, comment ca va?',
id=msg.GetId(), placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
msg_de = tclib.Translation(text='Guten tag USERNAME, wie geht es dir?',
id=msg.GetId(), placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
-
+
c.AddTranslation(msg_fr, 'fr')
factory.FindCliqueAndAddTranslation(msg_de, 'de')
-
+
# sort() sorts lists in-place and does not return them
for lang in ('en', 'fr', 'de'):
self.failUnless(lang in c.clique)
-
+
self.failUnless(c.MessageForLanguage('fr').GetRealContent() ==
msg_fr.GetRealContent())
@@ -56,11 +56,11 @@ class MessageCliqueUnittest(unittest.TestCase):
pass
self.failUnless(c.MessageForLanguage('zh-CN', True) != None)
-
+
rex = re.compile('fr|de|bingo')
self.failUnless(len(c.AllMessagesThatMatch(rex, False)) == 2)
self.failUnless(c.AllMessagesThatMatch(rex, True)[pseudo.PSEUDO_LANG] != None)
-
+
def testBestClique(self):
factory = clique.UberClique()
factory.MakeClique(tclib.Message(text='Alfur', description='alfaholl'))
@@ -72,7 +72,7 @@ class MessageCliqueUnittest(unittest.TestCase):
factory.MakeClique(tclib.Message(text='Gryla', description='vondakerling'))
factory.MakeClique(tclib.Message(text='Leppaludi', description='ID: IDS_LL'))
factory.MakeClique(tclib.Message(text='Leppaludi', description=''))
-
+
count_best_cliques = 0
for c in factory.BestCliquePerId():
count_best_cliques += 1
@@ -111,7 +111,7 @@ class MessageCliqueUnittest(unittest.TestCase):
self.failUnless('Hello %s, how are you doing today?' in content_list)
self.failUnless('Jack "Black" Daniels' in content_list)
self.failUnless('Hello!' in content_list)
-
+
def testCorrectExceptionIfWrongEncodingOnResourceFile(self):
'''This doesn't really belong in this unittest file, but what the heck.'''
resources = grd_reader.Parse(util.WrapInputStream(
@@ -133,37 +133,37 @@ class MessageCliqueUnittest(unittest.TestCase):
placeholders=[tclib.Placeholder('USERNAME', '%s', 'Joi')]),
]
self.failUnless(messages[0].GetId() == messages[1].GetId())
-
+
# Both of the above would share a translation.
translation = tclib.Translation(id=messages[0].GetId(),
text='Bonjour USERNAME',
placeholders=[tclib.Placeholder(
'USERNAME', '$1', 'Joi')])
-
+
factory = clique.UberClique()
cliques = [factory.MakeClique(msg) for msg in messages]
-
+
for clq in cliques:
clq.AddTranslation(translation, 'fr')
-
+
self.failUnless(cliques[0].MessageForLanguage('fr').GetRealContent() ==
'Bonjour $1')
self.failUnless(cliques[1].MessageForLanguage('fr').GetRealContent() ==
'Bonjour %s')
-
+
def testMissingTranslations(self):
messages = [ tclib.Message(text='Hello'), tclib.Message(text='Goodbye') ]
factory = clique.UberClique()
cliques = [factory.MakeClique(msg) for msg in messages]
-
+
cliques[1].MessageForLanguage('fr', False, True)
-
+
self.failUnless(not factory.HasMissingTranslations())
cliques[0].MessageForLanguage('de', False, False)
-
+
self.failUnless(factory.HasMissingTranslations())
-
+
report = factory.MissingTranslationsReport()
self.failUnless(report.count('WARNING') == 1)
self.failUnless(report.count('8053599568341804890 "Goodbye" fr') == 1)
@@ -179,7 +179,7 @@ class MessageCliqueUnittest(unittest.TestCase):
self.fail()
except:
pass # expected case - 'Bingo bongo' does not start with 'jjj'
-
+
message = tclib.Message(text='jjjBingo bongo')
c = factory.MakeClique(message)
c.SetCustomType(util.NewClassInstance(
diff --git a/tools/grit/grit/extern/FP.py b/tools/grit/grit/extern/FP.py
index 1e0bce8..77b1adc 100644
--- a/tools/grit/grit/extern/FP.py
+++ b/tools/grit/grit/extern/FP.py
@@ -31,4 +31,4 @@ def FingerPrint(str, encoding='utf-8'):
if fp & 0x8000000000000000L:
fp = - ((~fp & 0xFFFFFFFFFFFFFFFFL) + 1)
return fp
-
+
diff --git a/tools/grit/grit/extern/tclib.py b/tools/grit/grit/extern/tclib.py
index 8438835..c9dba2b 100644
--- a/tools/grit/grit/extern/tclib.py
+++ b/tools/grit/grit/extern/tclib.py
@@ -358,7 +358,7 @@ class Message(BaseMessage):
self.__meaning = meaning
self.__time_created = time_created
self.SetIsHidden(is_hidden)
-
+
# String representation
def __str__(self):
s = 'source: %s, id: %s, content: "%s", meaning: "%s", ' \
@@ -443,7 +443,7 @@ class Message(BaseMessage):
is_hidden : 0 or 1 - if the message should be hidden, 0 otherwise
"""
if is_hidden not in [0, 1]:
- raise MessageTranslationError, "is_hidden must be 0 or 1, got %s"
+ raise MessageTranslationError, "is_hidden must be 0 or 1, got %s"
self.__is_hidden = is_hidden
def IsHidden(self):
diff --git a/tools/grit/grit/format/html_inline.py b/tools/grit/grit/format/html_inline.py
index f66737c..cf8effe 100755
--- a/tools/grit/grit/format/html_inline.py
+++ b/tools/grit/grit/format/html_inline.py
@@ -25,10 +25,10 @@ DIST_SUBSTR = '%DISTRIBUTION%'
def ReadFile(input_filename):
"""Helper function that returns input_filename as a string.
-
+
Args:
input_filename: name of file to be read
-
+
Returns:
string
"""
@@ -40,7 +40,7 @@ def ReadFile(input_filename):
def SrcInline(src_match, base_path, distribution):
"""regex replace function.
- Takes a regex match for src="filename", attempts to read the file
+ Takes a regex match for src="filename", attempts to read the file
at 'filename' and returns the src attribute with the file inlined
as a data URI. If it finds DIST_SUBSTR string in file name, replaces
it with distribution.
@@ -60,37 +60,37 @@ def SrcInline(src_match, base_path, distribution):
return src_match.group(0)
filename = filename.replace('%DISTRIBUTION%', distribution)
- filepath = os.path.join(base_path, filename)
+ filepath = os.path.join(base_path, filename)
mimetype = mimetypes.guess_type(filename)[0] or 'text/plain'
inline_data = base64.standard_b64encode(ReadFile(filepath))
prefix = src_match.string[src_match.start():src_match.start('filename')-1]
return "%s\"data:%s;base64,%s\"" % (prefix, mimetype, inline_data)
-
+
def InlineFile(input_filename, output_filename):
"""Inlines the resources in a specified file.
-
+
Reads input_filename, finds all the src attributes and attempts to
inline the files they are referring to, then writes the result
to output_filename.
-
+
Args:
input_filename: name of file to read in
output_filename: name of file to be written to
"""
print "inlining %s to %s" % (input_filename, output_filename)
- input_filepath = os.path.dirname(input_filename)
-
+ input_filepath = os.path.dirname(input_filename)
+
distribution = DIST_DEFAULT
if DIST_ENV_VAR in os.environ.keys():
distribution = os.environ[DIST_ENV_VAR]
if len(distribution) > 1 and distribution[0] == '_':
distribution = distribution[1:].lower()
-
+
def SrcReplace(src_match):
"""Helper function to provide SrcInline with the base file path"""
return SrcInline(src_match, input_filepath, distribution)
-
+
# TODO(glen): Make this regex not match src="" text that is not inside a tag
flat_text = re.sub('src="(?P<filename>[^"\']*)"',
SrcReplace,
diff --git a/tools/grit/grit/format/interface.py b/tools/grit/grit/format/interface.py
index a9395eb..3f91ab2 100644
--- a/tools/grit/grit/format/interface.py
+++ b/tools/grit/grit/format/interface.py
@@ -12,19 +12,19 @@ import re
class ItemFormatter(object):
'''Base class for a formatter that knows how to format a single item.'''
-
+
def Format(self, item, lang='', begin_item=True, output_dir='.'):
'''Returns a Unicode string representing 'item' in the format known by this
item formatter, for the language 'lang'. May be called once at the
start of the item (begin_item == True) and again at the end
(begin_item == False), or only at the start of the item (begin_item == True)
-
+
Args:
item: anything
lang: 'en'
begin_item: True | False
output_dir: '.'
-
+
Return:
u'hello'
'''
diff --git a/tools/grit/grit/format/rc.py b/tools/grit/grit/format/rc.py
index 1975acf..d7656a0 100644
--- a/tools/grit/grit/format/rc.py
+++ b/tools/grit/grit/format/rc.py
@@ -19,20 +19,20 @@ _LINEBREAKS = re.compile('\r\n|\n|\r')
'''
This dictionary defines the langauge charset pair lookup table, which is used
-for replacing the GRIT expand variables for language info in Product Version
-resource. The key is the language ISO country code, and the value
+for replacing the GRIT expand variables for language info in Product Version
+resource. The key is the language ISO country code, and the value
is the language and character-set pair, which is a hexadecimal string
consisting of the concatenation of the language and character-set identifiers.
-The first 4 digit of the value is the hex value of LCID, the remaining
+The first 4 digit of the value is the hex value of LCID, the remaining
4 digits is the hex value of character-set id(code page)of the language.
-
+
We have defined three GRIT expand_variables to be used in the version resource
-file to set the language info. Here is an example how they should be used in
-the VS_VERSION_INFO section of the resource file to allow GRIT to localize
+file to set the language info. Here is an example how they should be used in
+the VS_VERSION_INFO section of the resource file to allow GRIT to localize
the language info correctly according to product locale.
VS_VERSION_INFO VERSIONINFO
-...
+...
BEGIN
BLOCK "StringFileInfo"
BEGIN
@@ -90,7 +90,7 @@ _LANGUAGE_CHARSET_PAIR = {
'sk' : '041b04e2',
'et' : '042504e9',
'ja' : '041103a4',
- 'sl' : '042404e2',
+ 'sl' : '042404e2',
'en' : '040904b0',
}
@@ -135,28 +135,28 @@ _LANGUAGE_DIRECTIVE_PAIR = {
'sk' : 'LANG_SLOVAK, SUBLANG_DEFAULT',
'et' : 'LANG_ESTONIAN, SUBLANG_DEFAULT',
'ja' : 'LANG_JAPANESE, SUBLANG_DEFAULT',
- 'sl' : 'LANG_SLOVENIAN, SUBLANG_DEFAULT',
+ 'sl' : 'LANG_SLOVENIAN, SUBLANG_DEFAULT',
'en' : 'LANG_ENGLISH, SUBLANG_ENGLISH_US',
}
-def GetLangCharsetPair(language) :
+def GetLangCharsetPair(language) :
if _LANGUAGE_CHARSET_PAIR.has_key(language) :
return _LANGUAGE_CHARSET_PAIR[language]
else :
print 'Warning:GetLangCharsetPair() found undefined language %s' %(language)
return ''
-def GetLangDirectivePair(language) :
+def GetLangDirectivePair(language) :
if _LANGUAGE_DIRECTIVE_PAIR.has_key(language) :
return _LANGUAGE_DIRECTIVE_PAIR[language]
else :
print 'Warning:GetLangDirectivePair() found undefined language %s' %(language)
return 'unknown language: see tools/grit/format/rc.py'
-def GetLangIdHex(language) :
+def GetLangIdHex(language) :
if _LANGUAGE_CHARSET_PAIR.has_key(language) :
- langcharset = _LANGUAGE_CHARSET_PAIR[language]
- lang_id = '0x' + langcharset[0:4]
+ langcharset = _LANGUAGE_CHARSET_PAIR[language]
+ lang_id = '0x' + langcharset[0:4]
return lang_id
else :
print 'Warning:GetLangIdHex() found undefined language %s' %(language)
@@ -165,8 +165,8 @@ def GetLangIdHex(language) :
def GetCharsetIdDecimal(language) :
if _LANGUAGE_CHARSET_PAIR.has_key(language) :
- langcharset = _LANGUAGE_CHARSET_PAIR[language]
- charset_decimal = int(langcharset[4:], 16)
+ langcharset = _LANGUAGE_CHARSET_PAIR[language]
+ charset_decimal = int(langcharset[4:], 16)
return str(charset_decimal)
else :
print 'Warning:GetCharsetIdDecimal() found undefined language %s' %(language)
@@ -181,15 +181,15 @@ def GetUnifiedLangCode(language) :
else :
return language
-
+
def _MakeRelativePath(base_path, path_to_make_relative):
'''Returns a relative path such from the base_path to
the path_to_make_relative.
-
+
In other words, os.join(base_path,
MakeRelativePath(base_path, path_to_make_relative))
is the same location as path_to_make_relative.
-
+
Args:
base_path: the root path
path_to_make_relative: an absolute path that is on the same drive
@@ -199,7 +199,7 @@ def _MakeRelativePath(base_path, path_to_make_relative):
def _GetPathAfterPrefix(prefix_path, path_with_prefix):
'''Gets the subpath within in prefix_path for the path_with_prefix
with no beginning or trailing path separators.
-
+
Args:
prefix_path: the base path
path_with_prefix: a path that starts with prefix_path
@@ -210,7 +210,7 @@ def _MakeRelativePath(base_path, path_to_make_relative):
if normalized_path == '.':
normalized_path = ''
return normalized_path
-
+
def _GetCommonBaseDirectory(*args):
'''Returns the common prefix directory for the given paths
@@ -253,13 +253,13 @@ def _MakeRelativePath(base_path, path_to_make_relative):
# common to all paths, so we can quit going through all of
# the paths.
break
- return prefix
+ return prefix
prefix = _GetCommonBaseDirectory(base_path, path_to_make_relative)
# If the paths had no commonality at all, then return the absolute path
# because it is the best that can be done. If the path had to be relative
# then eventually this absolute path will be discovered (when a build breaks)
- # and an appropriate fix can be made, but having this allows for the best
+ # and an appropriate fix can be made, but having this allows for the best
# backward compatibility with the absolute path behavior in the past.
if len(prefix) <= 0:
return path_to_make_relative
@@ -270,7 +270,7 @@ def _MakeRelativePath(base_path, path_to_make_relative):
path_pieces = remaining_base_path.split(os.path.sep)
base_depth_from_prefix = len([d for d in path_pieces if len(d)])
base_to_prefix = (".." + os.path.sep) * base_depth_from_prefix
-
+
# Put add in the path from the prefix to the path_to_make_relative
remaining_other_path = _GetPathAfterPrefix(prefix, path_to_make_relative)
return base_to_prefix + remaining_other_path
@@ -297,11 +297,11 @@ class TopLevel(interface.ItemFormatter):
continue
if output.attrs['language_section'] == '':
# If no language_section is requested, no directive is added
- # (Used when the generated rc will be included from another rc
+ # (Used when the generated rc will be included from another rc
# file that will have the appropriate language directive)
language_directive = ''
elif output.attrs['language_section'] == 'neutral':
- # If a neutral language section is requested (default), add a
+ # If a neutral language section is requested (default), add a
# neutral language directive
language_directive = 'LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL'
elif output.attrs['language_section'] == 'lang':
@@ -337,37 +337,37 @@ class StringTable(interface.ItemFormatter):
class Message(interface.ItemFormatter):
'''Writes out a single message to a string table.'''
-
+
def Format(self, item, lang='en', begin_item=True, output_dir='.'):
from grit.node import message
if not begin_item:
return ''
-
+
assert isinstance(lang, types.StringTypes)
assert isinstance(item, message.MessageNode)
-
+
message = item.ws_at_start + item.Translate(lang) + item.ws_at_end
# Escape quotation marks (RC format uses doubling-up
message = message.replace('"', '""')
# Replace linebreaks with a \n escape
message = _LINEBREAKS.sub(r'\\n', message)
-
+
name_attr = item.GetTextualIds()[0]
-
+
return ' %-15s "%s"\n' % (name_attr, message)
class RcSection(interface.ItemFormatter):
'''Writes out an .rc file section.'''
-
+
def Format(self, item, lang='en', begin_item=True, output_dir='.'):
if not begin_item:
return ''
-
+
assert isinstance(lang, types.StringTypes)
from grit.node import structure
assert isinstance(item, structure.StructureNode)
-
+
if item.IsExcludedFromRc():
return ''
else:
@@ -378,7 +378,7 @@ class RcSection(interface.ItemFormatter):
# Replace the language expand_variables in version rc info.
unified_lang_code = GetUnifiedLangCode(lang)
- text = text.replace('[GRITVERLANGCHARSETHEX]',
+ text = text.replace('[GRITVERLANGCHARSETHEX]',
GetLangCharsetPair(unified_lang_code))
text = text.replace('[GRITVERLANGID]', GetLangIdHex(unified_lang_code))
text = text.replace('[GRITVERCHARSETID]',
@@ -389,13 +389,13 @@ class RcSection(interface.ItemFormatter):
class RcInclude(interface.ItemFormatter):
'''Writes out an item that is included in an .rc file (e.g. an ICON)'''
-
+
def __init__(self, type, filenameWithoutPath = 0, relative_path = 0,
flatten_html = 0):
'''Indicates to the instance what the type of the resource include is,
e.g. 'ICON' or 'HTML'. Case must be correct, i.e. if the type is all-caps
the parameter should be all-caps.
-
+
Args:
type: 'ICON'
'''
@@ -403,7 +403,7 @@ class RcInclude(interface.ItemFormatter):
self.filenameWithoutPath = filenameWithoutPath
self.relative_path_ = relative_path
self.flatten_html = flatten_html
-
+
def Format(self, item, lang='en', begin_item=True, output_dir='.'):
if not begin_item:
return ''
@@ -414,7 +414,7 @@ class RcInclude(interface.ItemFormatter):
assert isinstance(item, (structure.StructureNode, include.IncludeNode))
assert (isinstance(item, include.IncludeNode) or
item.attrs['type'] in ['tr_html', 'admin_template', 'txt', 'muppet'])
-
+
# By default, we use relative pathnames to included resources so that
# sharing the resulting .rc files is possible.
#
diff --git a/tools/grit/grit/format/rc_header.py b/tools/grit/grit/format/rc_header.py
index b67a1cb..4da974a 100644
--- a/tools/grit/grit/format/rc_header.py
+++ b/tools/grit/grit/format/rc_header.py
@@ -18,7 +18,7 @@ from grit.extern import FP
class TopLevel(interface.ItemFormatter):
'''Writes the necessary preamble for a resource.h file.'''
-
+
def Format(self, item, lang='', begin_item=True, output_dir='.'):
if not begin_item:
return ''
@@ -38,9 +38,9 @@ class TopLevel(interface.ItemFormatter):
for child in output_node.children:
if child.name == 'emit':
if child.attrs['emit_type'] == 'prepend':
- return header_string
+ return header_string
# else print out the default header with include
- return header_string + '''
+ return header_string + '''
#include <atlres.h>
'''
@@ -61,7 +61,7 @@ class Item(interface.ItemFormatter):
dialog resources) it should define a function GetTextIds(self) that returns
a list of textual IDs (strings). Otherwise the formatter will use the
'name' attribute of the node.'''
-
+
# All IDs allocated so far, mapped to the textual ID they represent.
# Used to detect and resolve collisions.
ids_ = {}
@@ -70,7 +70,7 @@ class Item(interface.ItemFormatter):
# represent. Used when literal IDs are being defined in the 'identifiers'
# section of the GRD file to define other message IDs.
tids_ = {}
-
+
def _VerifyId(self, id, tid, msg_if_error):
if id in self.ids_ and self.ids_[id] != tid:
raise exception.IdRangeOverlap(msg_if_error +
@@ -79,7 +79,7 @@ class Item(interface.ItemFormatter):
if id < 101:
print ('WARNING: Numeric resource IDs should be greater than 100 to avoid\n'
'conflicts with system-defined resource IDs.')
-
+
def Format(self, item, lang='', begin_item=True, output_dir='.'):
if not begin_item:
return ''
@@ -91,7 +91,7 @@ class Item(interface.ItemFormatter):
if 'generateid' in item.attrs:
if item.attrs['generateid'] == 'false':
return ''
-
+
text_ids = item.GetTextualIds()
# We consider the "parent" of the item to be the GroupingNode containing
@@ -101,7 +101,7 @@ class Item(interface.ItemFormatter):
while item_parent and not isinstance(item_parent,
grit.node.empty.GroupingNode):
item_parent = item_parent.parent
-
+
lines = []
for tid in text_ids:
if util.SYSTEM_IDENTIFIERS.match(tid):
@@ -113,11 +113,11 @@ class Item(interface.ItemFormatter):
if hasattr(item, 'GetId') and item.GetId():
id = long(item.GetId())
- elif ('offset' in item.attrs and item_parent and
+ elif ('offset' in item.attrs and item_parent and
'first_id' in item_parent.attrs and item_parent.attrs['first_id'] != ''):
offset_text = item.attrs['offset']
parent_text = item_parent.attrs['first_id']
-
+
try:
offset_id = long(offset_text)
except ValueError:
@@ -125,11 +125,11 @@ class Item(interface.ItemFormatter):
try:
parent_id = long(parent_text)
- except ValueError:
+ except ValueError:
parent_id = self.tids_[parent_text]
-
+
id = parent_id + offset_id
-
+
# We try to allocate IDs sequentially for blocks of items that might
# be related, for instance strings in a stringtable (as their IDs might be
# used e.g. as IDs for some radio buttons, in which case the IDs must
@@ -151,7 +151,7 @@ class Item(interface.ItemFormatter):
# Automatically generate the ID based on the first clique from the
# first child of the first child node of our parent (i.e. when we
# first get to this location in the code).
-
+
# According to
# http://msdn.microsoft.com/en-us/library/t2zechd4(VS.71).aspx
# the safe usable range for resource IDs in Windows is from decimal
@@ -160,11 +160,11 @@ class Item(interface.ItemFormatter):
id = FP.UnsignedFingerPrint(tid)
id = id % (0x7FFF - 101)
id += 101
-
+
self._VerifyId(id, tid,
'Automatic (fingerprint-based) numeric ID for %s (%d) overlapped\n'
'with a previously allocated range.' % (tid, id))
-
+
if item_parent:
item_parent._last_id_ = id
else:
@@ -173,7 +173,7 @@ class Item(interface.ItemFormatter):
self._VerifyId(id, tid,
'Wanted to make numeric value for ID %s (%d) follow the numeric value of\n'
'the previous ID in the .grd file, but it was already used.' % (tid, id))
-
+
if tid not in self.ids_.values():
self.ids_[id] = tid
self.tids_[tid] = id
diff --git a/tools/grit/grit/format/rc_header_unittest.py b/tools/grit/grit/format/rc_header_unittest.py
index b1ffba3..3f2a228 100644
--- a/tools/grit/grit/format/rc_header_unittest.py
+++ b/tools/grit/grit/format/rc_header_unittest.py
@@ -26,7 +26,7 @@ class RcHeaderFormatterUnittest(unittest.TestCase):
def setUp(self):
self.formatter = rc_header.Item()
self.formatter.ids_ = {} # need to reset this between tests
-
+
def FormatAll(self, grd):
output = []
for node in grd:
@@ -35,7 +35,7 @@ class RcHeaderFormatterUnittest(unittest.TestCase):
output.append(self.formatter.Format(node))
output = ''.join(output)
return output.replace(' ', '')
-
+
def testFormatter(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3" base_dir=".">
@@ -60,7 +60,7 @@ class RcHeaderFormatterUnittest(unittest.TestCase):
output = self.FormatAll(grd)
self.failUnless(output.count('IDS_GREETING10000'))
self.failUnless(output.count('ID_LOGO300'))
-
+
def testExplicitFirstIdOverlaps(self):
# second first_id will overlap preexisting range
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
@@ -79,7 +79,7 @@ class RcHeaderFormatterUnittest(unittest.TestCase):
</release>
</grit>'''), '.')
self.assertRaises(exception.IdRangeOverlap, self.FormatAll, grd)
-
+
def testImplicitOverlapsPreexisting(self):
# second message in <messages> will overlap preexisting range
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
diff --git a/tools/grit/grit/format/rc_unittest.py b/tools/grit/grit/format/rc_unittest.py
index 4e97575..c7b0ee8 100644
--- a/tools/grit/grit/format/rc_unittest.py
+++ b/tools/grit/grit/format/rc_unittest.py
@@ -50,7 +50,7 @@ Sting sting
</messages>
'''), flexible_root = True)
util.FixRootForUnittest(root)
-
+
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = buf.getvalue()
@@ -73,12 +73,12 @@ END'''.strip())
</structures>'''), flexible_root = True)
util.FixRootForUnittest(root)
root.RunGatherers(recursive = True)
-
+
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = buf.getvalue()
self.failUnless(output.strip() == u'''
-IDC_KLONKMENU MENU
+IDC_KLONKMENU MENU
BEGIN
POPUP "&File"
BEGIN
@@ -150,7 +150,7 @@ END'''.strip())
</structures>'''), flexible_root = True)
util.FixRootForUnittest(root, '/temp')
# We do not run gatherers as it is not needed and wouldn't find the file
-
+
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = buf.getvalue()
@@ -159,7 +159,7 @@ END'''.strip())
% (util.normpath('/temp/bingo.html').replace('\\', '\\\\'),
util.normpath('/temp/bingo2.html').replace('\\', '\\\\')))
# hackety hack to work on win32&lin
- output = re.sub('"[c-zC-Z]:', '"', output)
+ output = re.sub('"[c-zC-Z]:', '"', output)
self.failUnless(output.strip() == expected)
def testRcIncludeFile(self):
@@ -169,7 +169,7 @@ END'''.strip())
<include type="TXT" name="TEXT_TWO" file="bingo2.txt" filenameonly="true" />
</includes>'''), flexible_root = True)
util.FixRootForUnittest(root, '/temp')
-
+
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf)
output = buf.getvalue()
@@ -191,21 +191,21 @@ END'''.strip())
# We must run the gatherers since we'll be wanting the translation of the
# file. The file exists in the location pointed to.
root.RunGatherers(recursive=True)
-
+
output_dir = tempfile.gettempdir()
en_file = root.FileForLanguage('en', output_dir)
self.failUnless(en_file == input_file)
fr_file = root.FileForLanguage('fr', output_dir)
self.failUnless(fr_file == os.path.join(output_dir, 'fr_simple.html'))
-
+
fo = file(fr_file)
contents = fo.read()
fo.close()
-
+
self.failUnless(contents.find('<p>') != -1) # should contain the markup
self.failUnless(contents.find('Hello!') == -1) # should be translated
-
-
+
+
def testFallbackToEnglish(self):
root = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
@@ -217,7 +217,7 @@ END'''.strip())
</grit>'''), util.PathFromRoot('.'))
util.FixRootForUnittest(root)
root.RunGatherers(recursive = True)
-
+
node = root.GetNodeById("IDD_ABOUTBOX")
formatter = node.ItemFormatter('rc_all')
output = formatter.Format(node, 'bingobongo')
diff --git a/tools/grit/grit/gather/admin_template.py b/tools/grit/grit/gather/admin_template.py
index ae63904..fc07ed8 100644
--- a/tools/grit/grit/gather/admin_template.py
+++ b/tools/grit/grit/gather/admin_template.py
@@ -23,7 +23,7 @@ class MalformedAdminTemplateException(exception.Base):
class AdmGatherer(regexp.RegexpGatherer):
'''Gatherer for the translateable portions of an admin template.
-
+
This gatherer currently makes the following assumptions:
- there is only one [strings] section and it is always the last section
of the file
@@ -33,17 +33,17 @@ class AdmGatherer(regexp.RegexpGatherer):
# Finds the strings section as the group named 'strings'
_STRINGS_SECTION = re.compile('(?P<first_part>.+^\[strings\])(?P<strings>.+)\Z',
re.MULTILINE | re.DOTALL)
-
+
# Finds the translateable sections from within the [strings] section.
_TRANSLATEABLES = re.compile('^\s*[A-Za-z0-9_]+\s*=\s*"(?P<text>.+)"\s*$',
re.MULTILINE)
-
+
def __init__(self, text):
regexp.RegexpGatherer.__init__(self, text)
-
+
def Escape(self, text):
return text.replace('\n', '\\n')
-
+
def UnEscape(self, text):
return text.replace('\\n', '\n')
@@ -57,18 +57,18 @@ class AdmGatherer(regexp.RegexpGatherer):
self._AddNontranslateableChunk(m.group('first_part'))
# Then parse the rest using the _TRANSLATEABLES regexp.
self._RegExpParse(self._TRANSLATEABLES, m.group('strings'))
-
+
# static method
def FromFile(adm_file, ext_key=None, encoding='cp1252'):
'''Loads the contents of 'adm_file' in encoding 'encoding' and creates
an AdmGatherer instance that gathers from those contents.
-
+
The 'ext_key' parameter is ignored.
-
+
Args:
adm_file: file('bingo.rc') | 'filename.rc'
encoding: 'utf-8'
-
+
Return:
AdmGatherer(contents_of_file)
'''
diff --git a/tools/grit/grit/gather/admin_template_unittest.py b/tools/grit/grit/gather/admin_template_unittest.py
index 249349a..1889b72 100644
--- a/tools/grit/grit/gather/admin_template_unittest.py
+++ b/tools/grit/grit/gather/admin_template_unittest.py
@@ -34,10 +34,10 @@ class AdmGathererUnittest(unittest.TestCase):
self.failUnless(len(gatherer.GetCliques()) == 2)
self.failUnless(gatherer.GetCliques()[1].GetMessage().GetRealContent() ==
'bingolabongola "the wise" fingulafongula')
-
+
translation = gatherer.Translate('en')
self.failUnless(translation == gatherer.GetText().strip())
-
+
def testErrorHandling(self):
pseudofile = StringIO.StringIO(
'bingo bongo\n'
@@ -47,7 +47,7 @@ class AdmGathererUnittest(unittest.TestCase):
gatherer = admin_template.AdmGatherer.FromFile(pseudofile)
self.assertRaises(admin_template.MalformedAdminTemplateException,
gatherer.Parse)
-
+
_TRANSLATABLES_FROM_FILE = (
'Google', 'Google Desktop Search', 'Preferences',
'Controls Google Deskop Search preferences',
@@ -56,20 +56,20 @@ class AdmGathererUnittest(unittest.TestCase):
'Prevent indexing of e-mail',
# there are lots more but we don't check any further
)
-
+
def VerifyCliquesFromAdmFile(self, cliques):
self.failUnless(len(cliques) > 20)
for ix in range(len(self._TRANSLATABLES_FROM_FILE)):
text = cliques[ix].GetMessage().GetRealContent()
self.failUnless(text == self._TRANSLATABLES_FROM_FILE[ix])
-
+
def testFromFile(self):
fname = util.PathFromRoot('grit/test/data/GoogleDesktopSearch.adm')
gatherer = admin_template.AdmGatherer.FromFile(fname)
gatherer.Parse()
cliques = gatherer.GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
-
+
def MakeGrd(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3">
@@ -92,7 +92,7 @@ class AdmGathererUnittest(unittest.TestCase):
grd = self.MakeGrd()
cliques = grd.children[0].children[0].children[0].GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
-
+
def testFileIsOutput(self):
grd = self.MakeGrd()
dirname = tempfile.mkdtemp()
@@ -102,7 +102,7 @@ class AdmGathererUnittest(unittest.TestCase):
tool.output_directory = dirname
tool.res = grd
tool.Process()
-
+
self.failUnless(os.path.isfile(
os.path.join(dirname, 'de_GoogleDesktopSearch.adm')))
self.failUnless(os.path.isfile(
diff --git a/tools/grit/grit/gather/interface.py b/tools/grit/grit/gather/interface.py
index 984dea4..8a6532e 100644
--- a/tools/grit/grit/gather/interface.py
+++ b/tools/grit/grit/gather/interface.py
@@ -27,58 +27,58 @@ class GathererBase(object):
become part of the uberclique supplied by the user.
'''
self.uberclique = uberclique
-
+
def SetSkeleton(self, is_skeleton):
self.is_skeleton = is_skeleton
-
+
def IsSkeleton(self):
return self.is_skeleton
-
+
def Parse(self):
'''Parses the contents of what is being gathered.'''
raise NotImplementedError()
-
+
def GetText(self):
'''Returns the text of what is being gathered.'''
raise NotImplementedError()
-
+
def GetTextualIds(self):
'''Returns the mnemonic IDs that need to be defined for the resource
being gathered to compile correctly.'''
return []
-
+
def GetCliques(self):
'''Returns the MessageClique objects for all translateable portions.'''
return []
-
+
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
'''Returns the resource being gathered, with translateable portions filled
with the translation for language 'lang'.
-
+
If pseudo_if_not_available is true, a pseudotranslation will be used for any
message that doesn't have a real translation available.
-
+
If no translation is available and pseudo_if_not_available is false,
fallback_to_english controls the behavior. If it is false, throw an error.
If it is true, use the English version of the message as its own
"translation".
-
+
If skeleton_gatherer is specified, the translation will use the nontranslateable
parts from the gatherer 'skeleton_gatherer', which must be of the same type
as 'self'.
-
- If fallback_to_english
-
+
+ If fallback_to_english
+
Args:
lang: 'en'
pseudo_if_not_available: True | False
skeleton_gatherer: other_gatherer
fallback_to_english: True | False
-
+
Return:
e.g. 'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND'
-
+
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
@@ -87,19 +87,19 @@ class GathererBase(object):
requested language.
'''
raise NotImplementedError()
-
+
def FromFile(rc_file, extkey=None, encoding = 'cp1252'):
'''Loads the resource from the file 'rc_file'. Optionally an external key
(which gets passed to the gatherer's constructor) can be specified.
-
+
If 'rc_file' is a filename, it will be opened for reading using 'encoding'.
Otherwise the 'encoding' parameter is ignored.
-
+
Args:
rc_file: file('') | 'filename.rc'
extkey: e.g. 'ID_MY_DIALOG'
encoding: 'utf-8'
-
+
Return:
grit.gather.interface.GathererBase subclass
'''
diff --git a/tools/grit/grit/gather/muppet_strings.py b/tools/grit/grit/gather/muppet_strings.py
index bade44f..545b129 100644
--- a/tools/grit/grit/gather/muppet_strings.py
+++ b/tools/grit/grit/gather/muppet_strings.py
@@ -24,7 +24,7 @@ PLACEHOLDER_RE = re.compile('(\[!\[|\]!\])')
class MuppetStringsContentHandler(xml.sax.handler.ContentHandler):
'''A very dumb parser for splitting the strings.xml file into translateable
and nontranslateable chunks.'''
-
+
def __init__(self, parent):
self.curr_elem = ''
self.curr_text = ''
@@ -32,11 +32,11 @@ class MuppetStringsContentHandler(xml.sax.handler.ContentHandler):
self.description = ''
self.meaning = ''
self.translateable = True
-
+
def startElement(self, name, attrs):
if (name != 'strings'):
self.curr_elem = name
-
+
attr_names = attrs.getQNames()
if 'desc' in attr_names:
self.description = attrs.getValueByQName('desc')
@@ -46,7 +46,7 @@ class MuppetStringsContentHandler(xml.sax.handler.ContentHandler):
value = attrs.getValueByQName('translateable')
if value.lower() not in ['true', 'yes']:
self.translateable = False
-
+
att_text = []
for attr_name in attr_names:
att_text.append(' ')
@@ -54,14 +54,14 @@ class MuppetStringsContentHandler(xml.sax.handler.ContentHandler):
att_text.append('=')
att_text.append(
xml.sax.saxutils.quoteattr(attrs.getValueByQName(attr_name)))
-
+
self.parent._AddNontranslateableChunk("<%s%s>" %
(name, ''.join(att_text)))
-
+
def characters(self, content):
if self.curr_elem != '':
self.curr_text += content
-
+
def endElement(self, name):
if name != 'strings':
self.parent.AddMessage(self.curr_text, self.description,
@@ -72,13 +72,13 @@ class MuppetStringsContentHandler(xml.sax.handler.ContentHandler):
self.description = ''
self.meaning = ''
self.translateable = True
-
+
def ignorableWhitespace(self, whitespace):
pass
class MuppetStrings(regexp.RegexpGatherer):
'''Supports the strings.xml format used by Muppet gadgets.'''
-
+
def __init__(self, text):
if util.IsExtraVerbose():
print text
@@ -87,9 +87,9 @@ class MuppetStrings(regexp.RegexpGatherer):
def AddMessage(self, msgtext, description, meaning, translateable):
if msgtext == '':
return
-
+
msg = tclib.Message(description=description, meaning=meaning)
-
+
unescaped_text = self.UnEscape(msgtext)
parts = PLACEHOLDER_RE.split(unescaped_text)
in_placeholder = False
@@ -106,15 +106,15 @@ class MuppetStrings(regexp.RegexpGatherer):
'(placeholder)'))
else:
msg.AppendText(part)
-
+
self.skeleton_.append(
self.uberclique.MakeClique(msg, translateable=translateable))
-
+
# if statement needed because this is supposed to be idempotent (so never
# set back to false)
if translateable:
self.translatable_chunk_ = True
-
+
# Although we use the RegexpGatherer base class, we do not use the
# _RegExpParse method of that class to implement Parse(). Instead, we
# parse using a SAX parser.
@@ -126,10 +126,10 @@ class MuppetStrings(regexp.RegexpGatherer):
handler = MuppetStringsContentHandler(self)
xml.sax.parse(stream, handler)
self._AddNontranslateableChunk(u'</strings>\n')
-
+
def Escape(self, text):
return util.EncodeCdata(text)
-
+
def FromFile(filename_or_stream, extkey=None, encoding='cp1252'):
if isinstance(filename_or_stream, types.StringTypes):
if util.IsVerbose():
diff --git a/tools/grit/grit/gather/muppet_strings_unittest.py b/tools/grit/grit/gather/muppet_strings_unittest.py
index 97b7eb1..434d9f8 100644
--- a/tools/grit/grit/gather/muppet_strings_unittest.py
+++ b/tools/grit/grit/gather/muppet_strings_unittest.py
@@ -21,7 +21,7 @@ class MuppetStringsUnittest(unittest.TestCase):
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 2)
self.failUnless(gatherer.Translate('en').replace('\n', '') == original)
-
+
def testEscapingAndLinebreaks(self):
original = ('''\
<strings>
@@ -52,7 +52,7 @@ you?</LINEBREAK> <ESCAPED meaning="bingo">4 &lt; 6</ESCAPED>
ph = msg.GetPlaceholders()[0]
self.failUnless(ph.GetOriginal() == '[![USER]!]')
self.failUnless(ph.GetPresentation() == 'USER')
-
+
def testTranslateable(self):
original = "<strings><BINGO translateable='false'>Yo yo hi there</BINGO></strings>"
gatherer = muppet_strings.MuppetStrings(original)
diff --git a/tools/grit/grit/gather/rc.py b/tools/grit/grit/gather/rc.py
index e7cdf5d..f0af109 100644
--- a/tools/grit/grit/gather/rc.py
+++ b/tools/grit/grit/gather/rc.py
@@ -44,15 +44,15 @@ _UNESCAPE_CHARS = dict([[value, key] for key, value in _ESCAPE_CHARS.items()])
class Section(regexp.RegexpGatherer):
'''A section from a resource file.'''
-
+
def __init__(self, section_text):
'''Creates a new object.
-
+
Args:
section_text: 'ID_SECTION_ID SECTIONTYPE\n.....\nBEGIN\n.....\nEND'
'''
regexp.RegexpGatherer.__init__(self, section_text)
-
+
# static method
def Escape(text):
'''Returns a version of 'text' with characters escaped that need to be
@@ -61,26 +61,26 @@ class Section(regexp.RegexpGatherer):
return _ESCAPE_CHARS[match.group()]
return _NEED_ESCAPE.sub(Replace, text)
Escape = staticmethod(Escape)
-
- # static method
+
+ # static method
def UnEscape(text):
'''Returns a version of 'text' with escaped characters unescaped.'''
def Replace(match):
return _UNESCAPE_CHARS[match.group()]
return _NEED_UNESCAPE.sub(Replace, text)
UnEscape = staticmethod(UnEscape)
-
+
def _RegExpParse(self, rexp, text_to_parse):
'''Overrides _RegExpParse to add shortcut group handling. Otherwise
the same.
'''
regexp.RegexpGatherer._RegExpParse(self, rexp, text_to_parse)
-
+
if not self.IsSkeleton() and len(self.GetTextualIds()) > 0:
group_name = self.GetTextualIds()[0]
for c in self.GetCliques():
c.AddToShortcutGroup(group_name)
-
+
# Static method
def FromFileImpl(rc_file, extkey, encoding, type):
'''Implementation of FromFile. Need to keep separate so we can have
@@ -88,14 +88,14 @@ class Section(regexp.RegexpGatherer):
'''
if isinstance(rc_file, types.StringTypes):
rc_file = util.WrapInputStream(file(rc_file, 'r'), encoding)
-
+
out = ''
begin_count = 0
for line in rc_file.readlines():
if len(out) > 0 or (line.strip().startswith(extkey) and
line.strip().split()[0] == extkey):
out += line
-
+
# we stop once we reach the END for the outermost block.
begin_count_was = begin_count
if len(out) > 0 and line.strip() == 'BEGIN':
@@ -104,31 +104,31 @@ class Section(regexp.RegexpGatherer):
begin_count -= 1
if begin_count_was == 1 and begin_count == 0:
break
-
+
if len(out) == 0:
raise exception.SectionNotFound('%s in file %s' % (extkey, rc_file))
return type(out)
FromFileImpl = staticmethod(FromFileImpl)
-
+
# static method
def FromFile(rc_file, extkey, encoding='cp1252'):
'''Retrieves the section of 'rc_file' that has the key 'extkey'. This is
matched against the start of a line, and that line and the rest of that
section in the RC file is returned.
-
+
If 'rc_file' is a filename, it will be opened for reading using 'encoding'.
Otherwise the 'encoding' parameter is ignored.
-
+
This method instantiates an object of type 'type' with the text from the
file.
-
+
Args:
rc_file: file('') | 'filename.rc'
extkey: 'ID_MY_DIALOG'
encoding: 'utf-8'
type: class to instantiate with text of section
-
+
Return:
type(text_of_section)
'''
@@ -154,7 +154,7 @@ class Dialog(Section):
# CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
# BS_AUTORADIOBUTTON,46,51,84,10
# END
-
+
# We are using a sorted set of keys, and we assume that the
# group name used for descriptions (type) will come after the "text"
# group in alphabetical order. We also assume that there cannot be
@@ -184,11 +184,11 @@ class Dialog(Section):
# Lines for controls that have only an ID and then just numbers
\s+[A-Z]+\s+(?P<id4>[A-Z0-9_]*[A-Z][A-Z0-9_]*)\s*,
''', re.MULTILINE | re.VERBOSE)
-
+
def Parse(self):
'''Knows how to parse dialog resource sections.'''
self._RegExpParse(self.dialog_re_, self.text_)
-
+
# static method
def FromFile(rc_file, extkey, encoding = 'cp1252'):
return Section.FromFileImpl(rc_file, extkey, encoding, Dialog)
@@ -197,10 +197,10 @@ class Dialog(Section):
class Menu(Section):
'''A resource section that contains a menu resource.'''
-
+
# A typical menu resource section looks something like this:
#
- # IDC_KLONK MENU
+ # IDC_KLONK MENU
# BEGIN
# POPUP "&File"
# BEGIN
@@ -227,7 +227,7 @@ class Menu(Section):
'in the menu. Please make sure that no two items in the same menu share '
'the same shortcut.'
)
-
+
# A dandy regexp to suck all the IDs and translateables out of a menu
# resource
menu_re_ = re.compile('''
@@ -240,7 +240,7 @@ class Menu(Section):
# Match the caption & ID of a MENUITEM
MENUITEM\s+"(?P<text2>.*?([^"]|""))"\s*,\s*(?P<id2>[A-Z0-9_]+)
''', re.MULTILINE | re.VERBOSE)
-
+
def Parse(self):
'''Knows how to parse menu resource sections. Because it is important that
menu shortcuts are unique within the menu, we return each menu as a single
@@ -249,7 +249,7 @@ class Menu(Section):
with instructions for the translators.'''
self.single_message_ = tclib.Message(description=self.MENU_MESSAGE_DESCRIPTION)
self._RegExpParse(self.menu_re_, self.text_)
-
+
# static method
def FromFile(rc_file, extkey, encoding = 'cp1252'):
return Section.FromFileImpl(rc_file, extkey, encoding, Menu)
@@ -258,7 +258,7 @@ class Menu(Section):
class Version(Section):
'''A resource section that contains a VERSIONINFO resource.'''
-
+
# A typical version info resource can look like this:
#
# VS_VERSION_INFO VERSIONINFO
@@ -297,7 +297,7 @@ class Version(Section):
#
# In addition to the above fields, VALUE fields named "Comments" and
# "LegalTrademarks" may also be translateable.
-
+
version_re_ = re.compile('''
# Match the ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+VERSIONINFO
@@ -309,7 +309,7 @@ class Version(Section):
ProductName|Comments|LegalTrademarks
)",\s+"(?P<text1>.*?([^"]|""))"\s
''', re.MULTILINE | re.VERBOSE)
-
+
def Parse(self):
'''Knows how to parse VERSIONINFO resource sections.'''
self._RegExpParse(self.version_re_, self.text_)
@@ -321,18 +321,18 @@ class Version(Section):
def FromFile(rc_file, extkey, encoding = 'cp1252'):
return Section.FromFileImpl(rc_file, extkey, encoding, Version)
FromFile = staticmethod(FromFile)
-
+
class RCData(Section):
'''A resource section that contains some data .'''
# A typical rcdataresource section looks like this:
#
# IDR_BLAH RCDATA { 1, 2, 3, 4 }
-
+
dialog_re_ = re.compile('''
^(?P<id1>[A-Z0-9_]+)\s+RCDATA\s+(DISCARDABLE)?\s+\{.*?\}
''', re.MULTILINE | re.VERBOSE | re.DOTALL)
-
+
def Parse(self):
'''Knows how to parse RCDATA resource sections.'''
self._RegExpParse(self.dialog_re_, self.text_)
@@ -343,14 +343,14 @@ class RCData(Section):
'''
if isinstance(rc_file, types.StringTypes):
rc_file = util.WrapInputStream(file(rc_file, 'r'), encoding)
-
+
out = ''
begin_count = 0
openbrace_count = 0
for line in rc_file.readlines():
if len(out) > 0 or line.strip().startswith(extkey):
out += line
-
+
# we stop once balance the braces (could happen on one line)
begin_count_was = begin_count
if len(out) > 0:
@@ -360,10 +360,10 @@ class RCData(Section):
if ((begin_count_was == 1 and begin_count == 0) or
(openbrace_count > 0 and begin_count == 0)):
break
-
+
if len(out) == 0:
raise exception.SectionNotFound('%s in file %s' % (extkey, rc_file))
-
+
return RCData(out)
FromFile = staticmethod(FromFile)
@@ -371,7 +371,7 @@ class RCData(Section):
class Accelerators(Section):
'''An ACCELERATORS table.
'''
-
+
# A typical ACCELERATORS section looks like this:
#
# IDR_ACCELERATOR1 ACCELERATORS
@@ -380,7 +380,7 @@ class Accelerators(Section):
# "^V", ID_ACCELERATOR32771, ASCII, NOINVERT
# VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT
# END
-
+
accelerators_re_ = re.compile('''
# Match the ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+ACCELERATORS\s+
@@ -391,11 +391,11 @@ class Accelerators(Section):
# Match accelerators specified as e.g. "^C"
\s+"[^"]*",\s+(?P<id3>[A-Z0-9_]+)\s*,
''', re.MULTILINE | re.VERBOSE)
-
+
def Parse(self):
'''Knows how to parse ACCELERATORS resource sections.'''
self._RegExpParse(self.accelerators_re_, self.text_)
-
+
# static method
def FromFile(rc_file, extkey, encoding = 'cp1252'):
return Section.FromFileImpl(rc_file, extkey, encoding, Accelerators)
diff --git a/tools/grit/grit/gather/rc_unittest.py b/tools/grit/grit/gather/rc_unittest.py
index 051b26f..636b421 100644
--- a/tools/grit/grit/gather/rc_unittest.py
+++ b/tools/grit/grit/gather/rc_unittest.py
@@ -20,7 +20,7 @@ from grit import util
class RcUnittest(unittest.TestCase):
- part_we_want = '''IDC_KLONKACC ACCELERATORS
+ part_we_want = '''IDC_KLONKACC ACCELERATORS
BEGIN
"?", IDM_ABOUT, ASCII, ALT
"/", IDM_ABOUT, ASCII, ALT
@@ -41,7 +41,7 @@ END
''' % self.part_we_want
f = StringIO.StringIO(buf)
-
+
out = rc.Section.FromFile(f, 'IDC_KLONKACC')
self.failUnless(out.GetText() == self.part_we_want)
@@ -74,7 +74,7 @@ END
self.failUnless(len(dlg.GetCliques()) == 6)
self.failUnless(dlg.GetCliques()[1].GetMessage().GetRealContent() ==
'klonk Version "yibbee" 1.0')
-
+
transl = dlg.Translate('en')
self.failUnless(transl.strip() == dlg.GetText().strip())
@@ -89,7 +89,7 @@ BEGIN
END
''')
dlg.Parse()
-
+
alt_dlg = rc.Dialog('''IDD_ABOUTBOX DIALOGEX 040704, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "XXXXXXXXX"
@@ -100,14 +100,14 @@ BEGIN
END
''')
alt_dlg.Parse()
-
+
transl = dlg.Translate('en', skeleton_gatherer=alt_dlg)
self.failUnless(transl.count('040704') and
transl.count('110978'))
self.failUnless(transl.count('Yipee skippy'))
def testMenu(self):
- menu = rc.Menu('''IDC_KLONK MENU
+ menu = rc.Menu('''IDC_KLONK MENU
BEGIN
POPUP "&File """
BEGIN
@@ -117,7 +117,7 @@ BEGIN
BEGIN
MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS
END
- MENUITEM "This is a very long menu caption to try to see if we can make the ID go to a continuation line, blablabla blablabla bla blabla blablabla blablabla blablabla blablabla...",
+ MENUITEM "This is a very long menu caption to try to see if we can make the ID go to a continuation line, blablabla blablabla bla blabla blablabla blablabla blablabla blablabla...",
ID_FILE_THISISAVERYLONGMENUCAPTIONTOTRYTOSEEIFWECANMAKETHEIDGOTOACONTINUATIONLINE
END
POPUP "&Help"
@@ -125,13 +125,13 @@ BEGIN
MENUITEM "&About ...", IDM_ABOUT
END
END''')
-
+
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 6)
self.failUnless(len(menu.GetCliques()) == 1)
self.failUnless(len(menu.GetCliques()[0].GetMessage().GetPlaceholders()) ==
9)
-
+
transl = menu.Translate('en')
self.failUnless(transl.strip() == menu.GetText().strip())
@@ -173,7 +173,7 @@ END
version.Parse()
self.failUnless(len(version.GetTextualIds()) == 1)
self.failUnless(len(version.GetCliques()) == 4)
-
+
transl = version.Translate('en')
self.failUnless(transl.strip() == version.GetText().strip())
@@ -200,8 +200,8 @@ BEGIN
END'''.strip())
dialog.Parse()
self.failUnless(len(dialog.GetTextualIds()) == 10)
-
-
+
+
def testRegressionDialogBox2(self):
dialog = rc.Dialog('''
IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE DIALOG DISCARDABLE 0, 0, 264, 220
@@ -213,7 +213,7 @@ BEGIN
PUSHBUTTON "Add Filter...",IDC_SIDEBAR_EMAIL_ADD_FILTER,196,38,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_EMAIL_REMOVE,196,174,50,14
PUSHBUTTON "", IDC_SIDEBAR_EMAIL_HIDDEN, 200, 178, 5, 5, NOT WS_VISIBLE
- LISTBOX IDC_SIDEBAR_EMAIL_LIST,16,60,230,108,
+ LISTBOX IDC_SIDEBAR_EMAIL_LIST,16,60,230,108,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
LTEXT "You can prevent certain emails from showing up in the sidebar with a filter.",
IDC_STATIC,16,18,234,18
@@ -221,7 +221,7 @@ END'''.strip())
dialog.Parse()
self.failUnless('IDC_SIDEBAR_EMAIL_HIDDEN' in dialog.GetTextualIds())
-
+
def testRegressionMenuId(self):
menu = rc.Menu('''
IDR_HYPERMENU_FOLDER MENU
@@ -247,7 +247,7 @@ END'''.strip())
transl = menu.Translate('en')
# Shouldn't find \\n (the \n shouldn't be changed to \\n)
self.failUnless(transl.find('\\\\n') == -1)
-
+
def testRegressionTabs(self):
menu = rc.Menu('''
IDR_HYPERMENU_FOLDER MENU
@@ -273,7 +273,7 @@ END'''.strip())
original = '..\\\\..\\\\trs\\\\res\\\\nav_first.gif'
unescaped = rc.Section.UnEscape(original)
self.failUnless(unescaped == '..\\..\\trs\\res\\nav_first.gif')
-
+
def testRegressionDialogItemsTextOnly(self):
dialog = rc.Dialog('''IDD_OPTIONS_SEARCH DIALOGEX 0, 0, 280, 292
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
@@ -294,7 +294,7 @@ END''')
for c in dialog.GetCliques()]
self.failUnless('Select search buttons and options' in translateables)
self.failUnless('Use Google site:' in translateables)
-
+
def testAccelerators(self):
acc = rc.Accelerators('''\
IDR_ACCELERATOR1 ACCELERATORS
@@ -315,7 +315,7 @@ END
def testRegressionEmptyString(self):
dlg = rc.Dialog('''\
IDD_CONFIRM_QUIT_GD_DLG DIALOGEX 0, 0, 267, 108
-STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
+STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_CAPTION
EXSTYLE WS_EX_TOPMOST
CAPTION "Google Desktop"
@@ -326,15 +326,15 @@ BEGIN
ICON 32514,IDC_STATIC,7,9,21,20
EDITTEXT IDC_TEXTBOX,34,7,231,60,ES_MULTILINE | ES_READONLY | NOT WS_BORDER
CONTROL "",
- IDC_ENABLE_GD_AUTOSTART,"Button",BS_AUTOCHECKBOX |
+ IDC_ENABLE_GD_AUTOSTART,"Button",BS_AUTOCHECKBOX |
WS_TABSTOP,33,70,231,10
END''')
dlg.Parse()
-
+
def Check():
self.failUnless(transl.count('IDC_ENABLE_GD_AUTOSTART'))
self.failUnless(transl.count('END'))
-
+
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
diff --git a/tools/grit/grit/gather/regexp.py b/tools/grit/grit/gather/regexp.py
index c1e1a93..88c7413 100644
--- a/tools/grit/grit/gather/regexp.py
+++ b/tools/grit/grit/gather/regexp.py
@@ -30,7 +30,7 @@ class RegexpGatherer(interface.GathererBase):
'LTEXT': 'This is the text for a label',
'PUSHBUTTON': 'This is the text for a button',
}
-
+
def __init__(self, text):
interface.GathererBase.__init__(self)
# Original text of what we're parsing
@@ -53,38 +53,38 @@ class RegexpGatherer(interface.GathererBase):
# Number to use for the next placeholder name. Used only if single_message
# is not None
self.ph_counter_ = 1
-
+
def GetText(self):
'''Returns the original text of the section'''
return self.text_
-
+
def Escape(self, text):
'''Subclasses can override. Base impl is identity.
'''
return text
-
+
def UnEscape(self, text):
'''Subclasses can override. Base impl is identity.
'''
return text
-
+
def GetTextualIds(self):
'''Returns the list of textual IDs that need to be defined for this
resource section to compile correctly.'''
return self.ids_
-
+
def GetCliques(self):
'''Returns the message cliques for each translateable message in the
resource section.'''
return filter(lambda x: isinstance(x, clique.MessageClique), self.skeleton_)
-
+
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
if len(self.skeleton_) == 0:
raise exception.NotReady()
if skeleton_gatherer:
assert len(skeleton_gatherer.skeleton_) == len(self.skeleton_)
-
+
out = []
for ix in range(len(self.skeleton_)):
if isinstance(self.skeleton_[ix], types.StringTypes):
@@ -101,7 +101,7 @@ class RegexpGatherer(interface.GathererBase):
msg = self.skeleton_[ix].MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english)
-
+
def MyEscape(text):
return self.Escape(text)
text = msg.GetRealContent(escaping_function=MyEscape)
@@ -121,7 +121,7 @@ class RegexpGatherer(interface.GathererBase):
if (self.translatable_chunk_):
message = self.skeleton_[len(self.skeleton_) - 1].GetMessage()
message.SetDescription(description)
-
+
def Parse(self):
'''Parses the section. Implemented by subclasses. Idempotent.'''
raise NotImplementedError()
@@ -134,14 +134,14 @@ class RegexpGatherer(interface.GathererBase):
self.single_message_.AppendPlaceholder(ph)
else:
self.skeleton_.append(chunk)
-
+
def _AddTranslateableChunk(self, chunk):
'''Adds a translateable chunk. It will be unescaped before being added.'''
# We don't want empty messages since they are redundant and the TC
# doesn't allow them.
if chunk == '':
return
-
+
unescaped_text = self.UnEscape(chunk)
if self.single_message_:
self.single_message_.AppendText(unescaped_text)
@@ -149,23 +149,23 @@ class RegexpGatherer(interface.GathererBase):
self.skeleton_.append(self.uberclique.MakeClique(
tclib.Message(text=unescaped_text)))
self.translatable_chunk_ = True
-
+
def _AddTextualId(self, id):
self.ids_.append(id)
-
+
def _RegExpParse(self, regexp, text_to_parse):
'''An implementation of Parse() that can be used for resource sections that
can be parsed using a single multi-line regular expression.
-
+
All translateables must be in named groups that have names starting with
'text'. All textual IDs must be in named groups that have names starting
with 'id'. All type definitions that can be included in the description
field for contextualization purposes should have a name that starts with
'type'.
-
+
Args:
regexp: re.compile('...', re.MULTILINE)
- text_to_parse:
+ text_to_parse:
'''
if self.have_parsed_:
return
@@ -192,9 +192,9 @@ class RegexpGatherer(interface.GathererBase):
# group in alphabetical order. We also assume that there cannot be
# more than one description per regular expression match.
self.AddDescriptionElement(groups[group])
-
+
self._AddNontranslateableChunk(text_to_parse[chunk_start:])
-
+
if self.single_message_:
self.skeleton_.append(self.uberclique.MakeClique(self.single_message_))
diff --git a/tools/grit/grit/gather/tr_html.py b/tools/grit/grit/gather/tr_html.py
index 35c70b2..269e6f2 100644
--- a/tools/grit/grit/gather/tr_html.py
+++ b/tools/grit/grit/gather/tr_html.py
@@ -118,7 +118,7 @@ _NONTRANSLATEABLES = re.compile(r'''
|
<\s*[a-zA-Z_]+:.+?> # custom tag (open)
|
- <\s*/\s*[a-zA-Z_]+:.+?> # custom tag (close)
+ <\s*/\s*[a-zA-Z_]+:.+?> # custom tag (close)
|
<!\s*[A-Z]+\s*([^>]+|"[^"]+"|'[^']+')*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
@@ -202,13 +202,13 @@ class HtmlChunks(object):
chunks, where each chunk is either translateable or non-translateable.
The chunks are unmodified sections of the original document, so concatenating
the text of all chunks would result in the original document.'''
-
+
def InTranslateable(self):
return self.last_translateable != -1
-
+
def Rest(self):
return self.text_[self.current:]
-
+
def StartTranslateable(self):
assert not self.InTranslateable()
if self.current != 0:
@@ -220,7 +220,7 @@ class HtmlChunks(object):
self.chunk_start = self.last_nontranslateable + 1
self.last_translateable = self.current
self.last_nontranslateable = -1
-
+
def EndTranslateable(self):
assert self.InTranslateable()
# Append a translateable chunk
@@ -229,10 +229,10 @@ class HtmlChunks(object):
self.chunk_start = self.last_translateable + 1
self.last_translateable = -1
self.last_nontranslateable = self.current
-
+
def AdvancePast(self, match):
self.current += match.end()
-
+
def AddChunk(self, translateable, text):
'''Adds a chunk to self, removing linebreaks and duplicate whitespace
if appropriate.
@@ -242,68 +242,68 @@ class HtmlChunks(object):
text = text.replace('\r', ' ')
text = text.replace(' ', ' ')
text = text.replace(' ', ' ')
-
+
m = _DESCRIPTION_COMMENT.search(text)
if m:
self.last_description = m.group('description')
# remove the description from the output text
text = _DESCRIPTION_COMMENT.sub('', text)
-
+
if translateable:
description = self.last_description
self.last_description = ''
else:
description = ''
-
+
if text != '':
self.chunks_.append((translateable, text, description))
-
+
def Parse(self, text):
'''Parses self.text_ into an intermediate format stored in self.chunks_
which is translateable and nontranslateable chunks. Also returns
self.chunks_
-
+
Return:
[chunk1, chunk2, chunk3, ...] (instances of class Chunk)
'''
#
# Chunker state
#
-
+
self.text_ = text
-
+
# A list of tuples (is_translateable, text) which represents the document
# after chunking.
self.chunks_ = []
-
+
# Start index of the last chunk, whether translateable or not
self.chunk_start = 0
-
+
# Index of the last for-sure translateable character if we are parsing
# a translateable chunk, -1 to indicate we are not in a translateable chunk.
# This is needed so that we don't include trailing whitespace in the
# translateable chunk (whitespace is neutral).
self.last_translateable = -1
-
+
# Index of the last for-sure nontranslateable character if we are parsing
# a nontranslateable chunk, -1 if we are not in a nontranslateable chunk.
# This is needed to make sure we can group e.g. "<b>Hello</b> there"
# together instead of just "Hello</b> there" which would be much worse
# for translation.
self.last_nontranslateable = -1
-
+
# Index of the character we're currently looking at.
self.current = 0
-
+
# The name of the last block element parsed.
self.last_element_ = ''
-
+
# The last explicit description we found.
self.last_description = ''
-
+
while self.current < len(self.text_):
_DebugPrint('REST: %s' % self.text_[self.current:self.current+60])
-
+
# First try to match whitespace
m = _WHITESPACE.match(self.Rest())
if m:
@@ -319,7 +319,7 @@ class HtmlChunks(object):
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
-
+
# Then we try to match nontranslateables
m = _NONTRANSLATEABLES.match(self.Rest())
if m:
@@ -328,7 +328,7 @@ class HtmlChunks(object):
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
-
+
# Now match all other HTML element tags (opening, closing, or empty, we
# don't care).
m = _ELEMENT.match(self.Rest())
@@ -338,7 +338,7 @@ class HtmlChunks(object):
self.last_element_ = element_name
if self.InTranslateable():
self.EndTranslateable()
-
+
# Check for "special" elements, i.e. ones that have a translateable
# attribute, and handle them correctly. Note that all of the
# "special" elements are block tags, so no need to check for this
@@ -349,7 +349,7 @@ class HtmlChunks(object):
for group in sm.groupdict().keys():
if sm.groupdict()[group]:
break
-
+
# First make a nontranslateable chunk up to and including the
# quote before the translateable attribute value
self.AddChunk(False, self.text_[
@@ -358,7 +358,7 @@ class HtmlChunks(object):
self.AddChunk(True, self.Rest()[sm.start(group) : sm.end(group)])
# Finally correct the data invariant for the parser
self.chunk_start = self.current + sm.end(group)
-
+
self.last_nontranslateable = self.current + m.end() - 1
elif self.InTranslateable():
# We're in a translateable and the tag is an inline tag, so we
@@ -366,7 +366,7 @@ class HtmlChunks(object):
self.last_translateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
-
+
# Anything else we find must be translateable, so we advance one character
# at a time until one of the above matches.
if not self.InTranslateable():
@@ -374,13 +374,13 @@ class HtmlChunks(object):
else:
self.last_translateable = self.current
self.current += 1
-
+
# Close the final chunk
if self.InTranslateable():
self.AddChunk(True, self.text_[self.chunk_start : ])
else:
self.AddChunk(False, self.text_[self.chunk_start : ])
-
+
return self.chunks_
@@ -388,14 +388,14 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
'''Takes a bit of HTML, which must contain only "inline" HTML elements,
and changes it into a tclib.Message. This involves escaping any entities and
replacing any HTML code with placeholders.
-
+
If include_block_tags is true, no error will be given if block tags (e.g.
<p> or <br>) are included in the HTML.
-
+
Args:
html: 'Hello <b>[USERNAME]</b>, how&nbsp;<i>are</i> you?'
include_block_tags: False
-
+
Return:
tclib.Message('Hello START_BOLD1USERNAMEEND_BOLD, '
'howNBSPSTART_ITALICareEND_ITALIC you?',
@@ -408,26 +408,26 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
# Approach is:
# - first placeholderize, finding <elements>, [REPLACEABLES] and &nbsp;
# - then escape all character entities in text in-between placeholders
-
+
parts = [] # List of strings (for text chunks) and tuples (ID, original)
# for placeholders
-
+
count_names = {} # Map of base names to number of times used
end_names = {} # Map of base names to stack of end tags (for correct nesting)
-
+
def MakeNameClosure(base, type = ''):
'''Returns a closure that can be called once all names have been allocated
to return the final name of the placeholder. This allows us to minimally
number placeholders for non-overlap.
-
+
Also ensures that END_XXX_Y placeholders have the same Y as the
corresponding BEGIN_XXX_Y placeholder when we have nested tags of the same
type.
-
+
Args:
base: 'phname'
type: '' | 'begin' | 'end'
-
+
Return:
Closure()
'''
@@ -439,7 +439,7 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
count_names[name] += 1
else:
count_names[name] = 1
-
+
def MakeFinalName(name_ = name, index = count_names[name] - 1):
if (type.lower() == 'end' and
base in end_names.keys() and len(end_names[base])):
@@ -455,20 +455,20 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
end_names[base].append(end_name)
else:
end_names[base] = [end_name]
-
+
return name_
-
+
return MakeFinalName
-
+
current = 0
-
+
while current < len(html):
m = _NBSP.match(html[current:])
if m:
parts.append((MakeNameClosure('SPACE'), m.group()))
current += m.end()
continue
-
+
m = _REPLACEABLE.match(html[current:])
if m:
# Replaceables allow - but placeholders don't, so replace - with _
@@ -476,7 +476,7 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
parts.append((ph_name, m.group()))
current += m.end()
continue
-
+
m = _SPECIAL_ELEMENT.match(html[current:])
if m:
if not include_block_tags:
@@ -493,7 +493,7 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
html[current + m.end(group) : current + m.end()]))
current += m.end()
continue
-
+
m = _ELEMENT.match(html[current:])
if m:
element_name = m.group('element').lower()
@@ -501,7 +501,7 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
raise exception.BlockTagInTranslateableChunk(html[current:])
if element_name in _HTML_PLACEHOLDER_NAMES: # use meaningful names
element_name = _HTML_PLACEHOLDER_NAMES[element_name]
-
+
# Make a name for the placeholder
type = ''
if not m.group('empty'):
@@ -512,13 +512,13 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
parts.append((MakeNameClosure(element_name, type), m.group()))
current += m.end()
continue
-
+
if len(parts) and isinstance(parts[-1], types.StringTypes):
parts[-1] += html[current]
else:
parts.append(html[current])
current += 1
-
+
msg_text = ''
placeholders = []
for part in parts:
@@ -529,36 +529,36 @@ def HtmlToMessage(html, include_block_tags=False, description=''):
placeholders.append(tclib.Placeholder(final_name, original, '(HTML code)'))
else:
msg_text += part
-
+
msg = tclib.Message(text=msg_text, placeholders=placeholders,
description=description)
content = msg.GetContent()
for ix in range(len(content)):
if isinstance(content[ix], types.StringTypes):
content[ix] = util.UnescapeHtml(content[ix], replace_nbsp=False)
-
+
return msg
class TrHtml(interface.GathererBase):
'''Represents a document or message in the template format used by
Total Recall for HTML documents.'''
-
+
def __init__(self, text):
'''Creates a new object that represents 'text'.
Args:
text: '<html>...</html>'
'''
super(type(self), self).__init__()
-
- self.text_ = text
+
+ self.text_ = text
self.have_parsed_ = False
self.skeleton_ = [] # list of strings and MessageClique objects
-
+
def GetText(self):
'''Returns the original text of the HTML document'''
return self.text_
-
+
def GetCliques(self):
'''Returns the message cliques for each translateable message in the
document.'''
@@ -568,14 +568,14 @@ class TrHtml(interface.GathererBase):
skeleton_gatherer=None, fallback_to_english=False):
'''Returns this document with translateable messages filled with
the translation for language 'lang'.
-
+
Args:
lang: 'en'
pseudo_if_not_available: True
-
+
Return:
'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND
-
+
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
@@ -584,9 +584,9 @@ class TrHtml(interface.GathererBase):
'''
if len(self.skeleton_) == 0:
raise exception.NotReady()
-
+
# TODO(joi) Implement support for skeleton gatherers here.
-
+
out = []
for item in self.skeleton_:
if isinstance(item, types.StringTypes):
@@ -602,21 +602,21 @@ class TrHtml(interface.GathererBase):
# We escape " characters to increase the chance that attributes
# will be properly escaped.
out.append(util.EscapeHtml(content, True))
-
+
return ''.join(out)
-
-
+
+
# Parsing is done in two phases: First, we break the document into
# translateable and nontranslateable chunks. Second, we run through each
# translateable chunk and insert placeholders for any HTML elements, unescape
# escaped characters, etc.
- def Parse(self):
+ def Parse(self):
if self.have_parsed_:
return
self.have_parsed_ = True
text = self.text_
-
+
# First handle the silly little [!]-prefixed header because it's not
# handled by our HTML parsers.
m = _SILLY_HEADER.match(text)
@@ -626,16 +626,16 @@ class TrHtml(interface.GathererBase):
tclib.Message(text=text[m.start('title'):m.end('title')])))
self.skeleton_.append(text[m.end('title') : m.end()])
text = text[m.end():]
-
+
chunks = HtmlChunks().Parse(text)
-
+
for chunk in chunks:
if chunk[0]: # Chunk is translateable
self.skeleton_.append(self.uberclique.MakeClique(
HtmlToMessage(chunk[1], description=chunk[2])))
else:
self.skeleton_.append(chunk[1])
-
+
# Go through the skeleton and change any messages that consist solely of
# placeholders and whitespace into nontranslateable strings.
for ix in range(len(self.skeleton_)):
@@ -649,20 +649,20 @@ class TrHtml(interface.GathererBase):
break
if not got_text:
self.skeleton_[ix] = msg.GetRealContent()
-
-
+
+
# Static method
def FromFile(html, extkey=None, encoding = 'utf-8'):
'''Creates a TrHtml object from the contents of 'html' which are decoded
using 'encoding'. Returns a new TrHtml object, upon which Parse() has not
been called.
-
+
Args:
html: file('') | 'filename.html'
extkey: ignored
encoding: 'utf-8' (note that encoding is ignored if 'html' is not a file
name but instead an open file or file-like object)
-
+
Return:
TrHtml(text_of_file)
'''
diff --git a/tools/grit/grit/gather/tr_html_unittest.py b/tools/grit/grit/gather/tr_html_unittest.py
index b02305d..e0c96ba 100644
--- a/tools/grit/grit/gather/tr_html_unittest.py
+++ b/tools/grit/grit/gather/tr_html_unittest.py
@@ -26,24 +26,24 @@ class ParserUnittest(unittest.TestCase):
self.failUnless(chunks == [
(False, '<p>', ''), (True, 'Hello <b>dear</b> how <i>are</i>you?', ''),
(False, '<p>', ''), (True, 'Fine!', '')])
-
+
chunks = p.Parse('<p> Hello <b>dear</b> how <i>are</i>you? <p>Fine!')
self.failUnless(chunks == [
(False, '<p> ', ''), (True, 'Hello <b>dear</b> how <i>are</i>you?', ''),
(False, ' <p>', ''), (True, 'Fine!', '')])
-
+
chunks = p.Parse('<p> Hello <b>dear how <i>are you? <p> Fine!')
self.failUnless(chunks == [
(False, '<p> ', ''), (True, 'Hello <b>dear how <i>are you?', ''),
(False, ' <p> ', ''), (True, 'Fine!', '')])
-
+
# Ensure translateable sections that start with inline tags contain
# the starting inline tag.
chunks = p.Parse('<b>Hello!</b> how are you?<p><i>I am fine.</i>')
self.failUnless(chunks == [
(True, '<b>Hello!</b> how are you?', ''), (False, '<p>', ''),
(True, '<i>I am fine.</i>', '')])
-
+
# Ensure translateable sections that end with inline tags contain
# the ending inline tag.
chunks = p.Parse("Hello! How are <b>you?</b><p><i>I'm fine!</i>")
@@ -65,14 +65,14 @@ class ParserUnittest(unittest.TestCase):
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', 'bi ngo !'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
-
+
# In this case, because the explicit description appears after the first
# translateable, it will actually apply to the second translateable.
chunks = p.Parse('<B>Hello!</B> how are you?<!-- desc=bingo! --><P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', ''), (False, '<P>', ''),
(True, '<I>I am fine.</I>', 'bingo!')])
-
+
# Check that replaceables within block tags (where attributes would go) are
# handled correctly.
chunks = p.Parse('<b>Hello!</b> how are you?<p [BINGO] [$~BONGO~$]>'
@@ -81,17 +81,17 @@ class ParserUnittest(unittest.TestCase):
(True, '<b>Hello!</b> how are you?', ''),
(False, '<p [BINGO] [$~BONGO~$]>', ''),
(True, '<i>I am fine.</i>', '')])
-
+
# Check that the contents of preformatted tags preserve line breaks.
chunks = p.Parse('<textarea>Hello\nthere\nhow\nare\nyou?</textarea>')
self.failUnless(chunks == [(False, '<textarea>', ''),
(True, 'Hello\nthere\nhow\nare\nyou?', ''), (False, '</textarea>', '')])
-
+
# ...and that other tags' line breaks are converted to spaces
chunks = p.Parse('<p>Hello\nthere\nhow\nare\nyou?</p>')
self.failUnless(chunks == [(False, '<p>', ''),
(True, 'Hello there how are you?', ''), (False, '</p>', '')])
-
+
def testTranslateableAttributes(self):
p = tr_html.HtmlChunks()
@@ -107,8 +107,8 @@ class ParserUnittest(unittest.TestCase):
(False, '"><input type="button" value="', ''), (True, 'hello', ''),
(False, '"><input type=\'text\' value=\'', ''), (True, 'Howdie', ''),
(False, '\'>', '')])
-
-
+
+
def testTranslateableHtmlToMessage(self):
msg = tr_html.HtmlToMessage(
'Hello <b>[USERNAME]</b>, &lt;how&gt;&nbsp;<i>are</i> you?')
@@ -116,13 +116,13 @@ class ParserUnittest(unittest.TestCase):
self.failUnless(pres ==
'Hello BEGIN_BOLDX_USERNAME_XEND_BOLD, '
'<how>&nbsp;BEGIN_ITALICareEND_ITALIC you?')
-
+
msg = tr_html.HtmlToMessage('<b>Hello</b><I>Hello</I><b>Hello</b>')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'BEGIN_BOLD_1HelloEND_BOLD_1BEGIN_ITALICHelloEND_ITALIC'
'BEGIN_BOLD_2HelloEND_BOLD_2')
-
+
# Check that nesting (of the <font> tags) is handled correctly - i.e. that
# the closing placeholder numbers match the opening placeholders.
msg = tr_html.HtmlToMessage(
@@ -135,11 +135,11 @@ class ParserUnittest(unittest.TestCase):
'BEGIN_FONT_1BEGIN_FONT_2Update!END_FONT_2 BEGIN_LINK'
'New FeaturesEND_LINK: Now search PDFs, MP3s, Firefox '
'web history, and moreEND_FONT_1')
-
+
msg = tr_html.HtmlToMessage('''<a href='[$~URL~$]'><b>[NUM][CAT]</b></a>''')
pres = msg.GetPresentableContent()
self.failUnless(pres == 'BEGIN_LINKBEGIN_BOLDX_NUM_XX_CAT_XEND_BOLDEND_LINK')
-
+
msg = tr_html.HtmlToMessage(
'''<font size=-1><a class=q onClick='return window.qs?qs(this):1' '''
'''href='http://[WEBSERVER][SEARCH_URI]'>Desktop</a></font>&nbsp;&nbsp;'''
@@ -147,20 +147,20 @@ class ParserUnittest(unittest.TestCase):
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'''BEGIN_FONTBEGIN_LINKDesktopEND_LINKEND_FONTSPACE''')
-
+
msg = tr_html.HtmlToMessage(
'''<br><br><center><font size=-2>&copy;2005 Google </font></center>''', 1)
pres = msg.GetPresentableContent()
self.failUnless(pres ==
u'BEGIN_BREAK_1BEGIN_BREAK_2BEGIN_CENTERBEGIN_FONT\xa92005'
u' Google END_FONTEND_CENTER')
-
+
msg = tr_html.HtmlToMessage(
'''&nbsp;-&nbsp;<a class=c href=[$~CACHE~$]>Cached</a>''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'&nbsp;-&nbsp;BEGIN_LINKCachedEND_LINK')
-
+
# Check that upper-case tags are handled correctly.
msg = tr_html.HtmlToMessage(
'''You can read the <A HREF='http://desktop.google.com/privacypolicy.'''
@@ -170,7 +170,7 @@ class ParserUnittest(unittest.TestCase):
self.failUnless(pres ==
'You can read the BEGIN_LINK_1Privacy PolicyEND_LINK_1 and '
'BEGIN_LINK_2Privacy FAQEND_LINK_2 online.')
-
+
# Check that tags with linebreaks immediately preceding them are handled
# correctly.
msg = tr_html.HtmlToMessage(
@@ -195,7 +195,7 @@ class TrHtmlUnittest(unittest.TestCase):
html.Parse()
self.failUnless(html.skeleton_[3].GetMessage().GetPresentableContent() ==
'BEGIN_LINKPreferences&nbsp;HelpEND_LINK')
-
+
def testSubmitAttribute(self):
html = tr_html.TrHtml('''</td>
<td class="header-element"><input type=submit value="Save Preferences"
@@ -204,7 +204,7 @@ name=submit2></td>
html.Parse()
self.failUnless(html.skeleton_[1].GetMessage().GetPresentableContent() ==
'Save Preferences')
-
+
def testWhitespaceAfterInlineTag(self):
'''Test that even if there is whitespace after an inline tag at the start
of a translateable section the inline tag will be included.
@@ -213,7 +213,7 @@ name=submit2></td>
html.Parse()
self.failUnless(html.skeleton_[1].GetMessage().GetRealContent() ==
'<font size=-1> Hello</font>')
-
+
def testSillyHeader(self):
html = tr_html.TrHtml('''[!]
title\tHello
@@ -229,16 +229,16 @@ bla
# Right after the translateable the nontranslateable should start with
# a linebreak (this catches a bug we had).
self.failUnless(html.skeleton_[2][0] == '\n')
-
-
+
+
def testExplicitDescriptions(self):
html = tr_html.TrHtml('Hello [USER]<br/><!-- desc=explicit --><input type="button">Go!</input>')
html.Parse()
msg = html.GetCliques()[1].GetMessage()
self.failUnless(msg.GetDescription() == 'explicit')
self.failUnless(msg.GetRealContent() == 'Go!')
-
-
+
+
def testRegressionInToolbarAbout(self):
html = tr_html.TrHtml.FromFile(
util.PathFromRoot(r'grit/test/data/toolbar_about.html'))
@@ -249,11 +249,11 @@ bla
if content.count('De parvis grandis acervus erit'):
self.failIf(content.count('$/translate'))
-
+
def HtmlFromFileWithManualCheck(self, f):
html = tr_html.TrHtml.FromFile(f)
html.Parse()
-
+
# For manual results inspection only...
list = []
for item in html.skeleton_:
@@ -261,40 +261,40 @@ bla
list.append(item)
else:
list.append(item.GetMessage().GetPresentableContent())
-
+
return html
def testPrivacyHtml(self):
html = self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/test/data/privacy.html'))
-
+
self.failUnless(html.skeleton_[1].GetMessage().GetRealContent() ==
'Privacy and Google Desktop Search')
- self.failUnless(html.skeleton_[3].startswith('<'))
+ self.failUnless(html.skeleton_[3].startswith('<'))
self.failUnless(len(html.skeleton_) > 10)
def testPreferencesHtml(self):
html = self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/test/data/preferences.html'))
-
+
# Verify that we don't get '[STATUS-MESSAGE]' as the original content of
# one of the MessageClique objects (it would be a placeholder-only message
# and we're supposed to have stripped those).
-
+
for item in filter(lambda x: isinstance(x, clique.MessageClique),
html.skeleton_):
if (item.GetMessage().GetRealContent() == '[STATUS-MESSAGE]' or
item.GetMessage().GetRealContent() == '[ADDIN-DO] [ADDIN-OPTIONS]'):
self.fail()
-
+
self.failUnless(len(html.skeleton_) > 100)
-
+
def AssertNumberOfTranslateables(self, files, num):
'''Fails if any of the files in files don't have exactly
num translateable sections.
-
+
Args:
files: ['file1', 'file2']
num: 3
@@ -303,14 +303,14 @@ bla
f = util.PathFromRoot(r'grit/test/data/%s' % f)
html = self.HtmlFromFileWithManualCheck(f)
self.failUnless(len(html.GetCliques()) == num)
-
+
def testFewTranslateables(self):
self.AssertNumberOfTranslateables(['browser.html', 'email_thread.html',
'header.html', 'mini.html',
'oneclick.html', 'script.html',
'time_related.html', 'versions.html'], 0)
self.AssertNumberOfTranslateables(['footer.html', 'hover.html'], 1)
-
+
def testOtherHtmlFilesForManualInspection(self):
files = [
'about.html', 'bad_browser.html', 'cache_prefix.html',
@@ -326,7 +326,7 @@ bla
for f in files:
self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/test/data/%s' % f))
-
+
def testTranslate(self):
# Note that the English translation of documents that use character
# literals (e.g. &copy;) will not be the same as the original document
@@ -363,27 +363,27 @@ bla
trans = html.Translate('en')
if (html.GetText() != trans):
self.fail()
-
-
+
+
def testHtmlToMessageWithBlockTags(self):
msg = tr_html.HtmlToMessage(
'Hello<p>Howdie<img alt="bingo" src="image.gif">', True)
result = msg.GetPresentableContent()
self.failUnless(
result == 'HelloBEGIN_PARAGRAPHHowdieBEGIN_BLOCKbingoEND_BLOCK')
-
+
msg = tr_html.HtmlToMessage(
'Hello<p>Howdie<input type="button" value="bingo">', True)
result = msg.GetPresentableContent()
self.failUnless(
result == 'HelloBEGIN_PARAGRAPHHowdieBEGIN_BLOCKbingoEND_BLOCK')
-
-
+
+
def testHtmlToMessageRegressions(self):
msg = tr_html.HtmlToMessage(' - ', True)
result = msg.GetPresentableContent()
self.failUnless(result == ' - ')
-
+
def testEscapeUnescaped(self):
text = '&copy;&nbsp; & &quot;&lt;hello&gt;&quot;'
@@ -401,7 +401,7 @@ bla
html = self.HtmlFromFileWithManualCheck(util.PathFromRoot(
r'grit/test/data/ko_oem_enable_bug.html'))
self.failUnless(True)
-
+
def testRegressionCpuHang(self):
# If this regression occurs, the unit test will never return
html = tr_html.TrHtml(
diff --git a/tools/grit/grit/gather/txt.py b/tools/grit/grit/gather/txt.py
index 5452e9d..428c202 100644
--- a/tools/grit/grit/gather/txt.py
+++ b/tools/grit/grit/gather/txt.py
@@ -17,7 +17,7 @@ class TxtFile(interface.GathererBase):
'''A text file gatherer. Very simple, all text from the file becomes a
single clique.
'''
-
+
def __init__(self, contents):
super(type(self), self).__init__()
self.text_ = contents
@@ -26,24 +26,24 @@ class TxtFile(interface.GathererBase):
def Parse(self):
self.clique_ = self.uberclique.MakeClique(tclib.Message(text=self.text_))
pass
-
+
def GetText(self):
'''Returns the text of what is being gathered.'''
return self.text_
-
+
def GetTextualIds(self):
return []
-
+
def GetCliques(self):
'''Returns the MessageClique objects for all translateable portions.'''
return [self.clique_]
-
+
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
return self.clique_.MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english).GetRealContent()
-
+
def FromFile(filename_or_stream, extkey=None, encoding = 'cp1252'):
if isinstance(filename_or_stream, types.StringTypes):
filename_or_stream = util.WrapInputStream(file(filename_or_stream, 'rb'), encoding)
diff --git a/tools/grit/grit/grd_reader.py b/tools/grit/grit/grd_reader.py
index d1d2753..4f03fab1 100644
--- a/tools/grit/grit/grd_reader.py
+++ b/tools/grit/grit/grd_reader.py
@@ -34,10 +34,10 @@ class GrdContentHandler(xml.sax.handler.ContentHandler):
self.stack = []
self.stop_after = stop_after
self.debug = debug
-
+
def startElement(self, name, attrs):
assert not self.root or len(self.stack) > 0
-
+
if self.debug:
attr_list = []
for attr in attrs.getNames():
@@ -45,28 +45,28 @@ class GrdContentHandler(xml.sax.handler.ContentHandler):
if len(attr_list) == 0: attr_list = ['(none)']
attr_list = ' '.join(attr_list)
print "Starting parsing of element %s with attributes %r" % (name, attr_list)
-
+
typeattr = None
if 'type' in attrs.getNames():
typeattr = attrs.getValue('type')
-
+
node = mapping.ElementToClass(name, typeattr)()
-
+
if not self.root:
self.root = node
-
+
if len(self.stack) > 0:
self.stack[-1].AddChild(node)
node.StartParsing(name, self.stack[-1])
else:
node.StartParsing(name, None)
-
+
# Push
self.stack.append(node)
-
+
for attr in attrs.getNames():
node.HandleAttribute(attr, attrs.getValue(attr))
-
+
def endElement(self, name):
if self.debug:
print "End parsing of element %s" % name
@@ -76,11 +76,11 @@ class GrdContentHandler(xml.sax.handler.ContentHandler):
self.stack = self.stack[:-1]
if self.stop_after and name == self.stop_after:
raise StopParsingException()
-
+
def characters(self, content):
if self.stack[-1]:
self.stack[-1].AppendContent(content)
-
+
def ignorableWhitespace(self, whitespace):
# TODO(joi) This is not supported by expat. Should use a different XML parser?
pass
@@ -89,33 +89,33 @@ class GrdContentHandler(xml.sax.handler.ContentHandler):
def Parse(filename_or_stream, dir = None, flexible_root = False,
stop_after=None, debug=False):
'''Parses a GRD file into a tree of nodes (from grit.node).
-
+
If flexible_root is False, the root node must be a <grit> element. Otherwise
it can be any element. The "own" directory of the file will only be fixed up
if the root node is a <grit> element.
-
+
'dir' should point to the directory of the input file, or be the full path
to the input file (the filename will be stripped).
-
+
If 'stop_after' is provided, the parsing will stop once the first node
with this name has been fully parsed (including all its contents).
-
+
If 'debug' is true, lots of information about the parsing events will be
printed out during parsing of the file.
-
+
Args:
filename_or_stream: './bla.xml' (must be filename if dir is None)
dir: '.' or None (only if filename_or_stream is a filename)
flexible_root: True | False
stop_after: 'inputs'
debug: False
-
+
Return:
Subclass of grit.node.base.Node
-
+
Throws:
grit.exception.Parsing
- '''
+ '''
handler = GrdContentHandler(stop_after=stop_after, debug=debug)
try:
xml.sax.parse(filename_or_stream, handler)
diff --git a/tools/grit/grit/grd_reader_unittest.py b/tools/grit/grit/grd_reader_unittest.py
index 49c3e4a..4612bbf 100644
--- a/tools/grit/grit/grd_reader_unittest.py
+++ b/tools/grit/grit/grd_reader_unittest.py
@@ -75,7 +75,7 @@ class GrdReaderUnittest(unittest.TestCase):
# only an <outputs> child
self.failUnless(len(tree.children) == 1)
self.failUnless(tree.children[0].name == 'outputs')
-
+
def testLongLinesWithComments(self):
input = u'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
@@ -91,7 +91,7 @@ class GrdReaderUnittest(unittest.TestCase):
</grit>'''
pseudo_file = StringIO.StringIO(input)
tree = grd_reader.Parse(pseudo_file, '.')
-
+
greeting = tree.GetNodeById('IDS_GREETING')
self.failUnless(greeting.GetCliques()[0].GetMessage().GetRealContent() ==
'This is a very long line with no linebreaks yes yes it '
@@ -99,4 +99,4 @@ class GrdReaderUnittest(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
-
+
diff --git a/tools/grit/grit/grit_runner.py b/tools/grit/grit/grit_runner.py
index 723d701..d1ff9be 100644
--- a/tools/grit/grit/grit_runner.py
+++ b/tools/grit/grit/grit_runner.py
@@ -66,7 +66,7 @@ def PrintUsage():
for (tool, info) in _TOOLS:
if not _HIDDEN in info.keys():
tool_list += ' %-12s %s\n' % (tool, info[_CLASS]().ShortDescription())
-
+
# TODO(joi) Put these back into the usage when appropriate:
#
# -d Work disconnected. This causes GRIT not to attempt connections with
@@ -83,12 +83,12 @@ Global options:
named 'resource.grd' in the current working directory.
-v Print more verbose runtime information.
-
+
-x Print extremely verbose runtime information. Implies -v
-
+
-p FNAME Specifies that GRIT should profile its execution and output the
results to the file FNAME.
-
+
Tools:
TOOL can be one of the following:
@@ -100,7 +100,7 @@ Tools:
class Options(object):
'''Option storage and parsing.'''
-
+
def __init__(self):
self.disconnected = False
self.client = ''
@@ -109,7 +109,7 @@ class Options(object):
self.extra_verbose = False
self.output_stream = sys.stdout
self.profile_dest = None
-
+
def ReadOptions(self, args):
'''Reads options from the start of args and returns the remainder.'''
(opts, args) = getopt.getopt(args, 'g:dvxc:i:p:')
@@ -126,15 +126,15 @@ class Options(object):
self.extra_verbose = True
util.extra_verbose = True
elif key == '-p': self.profile_dest = val
-
+
if not self.input:
if 'GRIT_INPUT' in os.environ:
self.input = os.environ['GRIT_INPUT']
else:
self.input = 'resource.grd'
-
+
return args
-
+
def __repr__(self):
return '(disconnected: %d, verbose: %d, client: %s, input: %s)' % (
self.disconnected, self.verbose, self.client, self.input)
@@ -154,7 +154,7 @@ def Main(args):
'''Parses arguments and does the appropriate thing.'''
util.ChangeStdoutEncoding()
print _COPYRIGHT
-
+
if not len(args) or len(args) == 1 and args[0] == 'help':
PrintUsage()
return 0
@@ -163,7 +163,7 @@ def Main(args):
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
-
+
print ("Help for 'grit %s' (for general help, run 'grit help'):\n"
% (tool))
print _GetToolInfo(tool)[_CLASS].__doc__
diff --git a/tools/grit/grit/grit_runner_unittest.py b/tools/grit/grit/grit_runner_unittest.py
index 057f646..39e1f23 100644
--- a/tools/grit/grit/grit_runner_unittest.py
+++ b/tools/grit/grit/grit_runner_unittest.py
@@ -21,10 +21,10 @@ class OptionArgsUnittest(unittest.TestCase):
self.buf = StringIO.StringIO()
self.old_stdout = sys.stdout
sys.stdout = self.buf
-
+
def tearDown(self):
sys.stdout = self.old_stdout
-
+
def testSimple(self):
grit.grit_runner.Main(['-i',
util.PathFromRoot('grit/test/data/simple-input.xml'),
diff --git a/tools/grit/grit/node/base.py b/tools/grit/grit/node/base.py
index 76739c3..b35c200 100644
--- a/tools/grit/grit/node/base.py
+++ b/tools/grit/grit/node/base.py
@@ -25,7 +25,7 @@ class Node(grit.format.interface.ItemFormatter):
_CONTENT_TYPE_NONE = 0 # No CDATA content but may have children
_CONTENT_TYPE_CDATA = 1 # Only CDATA, no children.
_CONTENT_TYPE_MIXED = 2 # CDATA and children, possibly intermingled
-
+
def __init__(self):
self.children = [] # A list of child elements
self.mixed_content = [] # A list of u'' and/or child elements (this
@@ -35,12 +35,12 @@ class Node(grit.format.interface.ItemFormatter):
self.attrs = {} # The set of attributes (keys to values)
self.parent = None # Our parent unless we are the root element.
self.uberclique = None # Allows overriding uberclique for parts of tree
-
+
def __iter__(self):
'''An in-order iteration through the tree that this node is the
root of.'''
return self.inorder()
-
+
def inorder(self):
'''Generator that generates first this node, then the same generator for
any child nodes.'''
@@ -48,14 +48,14 @@ class Node(grit.format.interface.ItemFormatter):
for child in self.children:
for iterchild in child.inorder():
yield iterchild
-
+
def GetRoot(self):
'''Returns the root Node in the tree this Node belongs to.'''
curr = self
while curr.parent:
curr = curr.parent
return curr
-
+
# TODO(joi) Use this (currently untested) optimization?:
#if hasattr(self, '_root'):
# return self._root
@@ -67,10 +67,10 @@ class Node(grit.format.interface.ItemFormatter):
#else:
# self._root = curr
#return self._root
-
+
def StartParsing(self, name, parent):
'''Called at the start of parsing.
-
+
Args:
name: u'elementname'
parent: grit.node.base.Node or subclass or None
@@ -111,13 +111,13 @@ class Node(grit.format.interface.ItemFormatter):
self.mixed_content.pop(index)
break
index += 1
-
+
def AppendContent(self, content):
'''Appends a chunk of text as content of this node.
-
+
Args:
content: u'hello'
-
+
Return:
None
'''
@@ -126,15 +126,15 @@ class Node(grit.format.interface.ItemFormatter):
self.mixed_content.append(content)
elif content.strip() != '':
raise exception.UnexpectedContent()
-
+
def HandleAttribute(self, attrib, value):
'''Informs the node of an attribute that was parsed out of the GRD file
for it.
-
+
Args:
attrib: 'name'
value: 'fooblat'
-
+
Return:
None
'''
@@ -144,10 +144,10 @@ class Node(grit.format.interface.ItemFormatter):
self.attrs[attrib] = value
else:
raise exception.UnexpectedAttribute(attrib)
-
+
def EndParsing(self):
'''Called at the end of parsing.'''
-
+
# TODO(joi) Rewrite this, it's extremely ugly!
if len(self.mixed_content):
if isinstance(self.mixed_content[0], types.StringTypes):
@@ -180,15 +180,15 @@ class Node(grit.format.interface.ItemFormatter):
isinstance(self.mixed_content[-1], types.StringTypes)):
if self.mixed_content[-1].endswith("'''"):
self.mixed_content[-1] = self.mixed_content[-1][:-3]
-
+
# Check that all mandatory attributes are there.
for node_mandatt in self.MandatoryAttributes():
- mandatt_list = []
+ mandatt_list = []
if node_mandatt.find('|') >= 0:
mandatt_list = node_mandatt.split('|')
else:
mandatt_list.append(node_mandatt)
-
+
mandatt_option_found = False
for mandatt in mandatt_list:
assert mandatt not in self.DefaultAttributes().keys()
@@ -197,34 +197,34 @@ class Node(grit.format.interface.ItemFormatter):
mandatt_option_found = True
else:
raise exception.MutuallyExclusiveMandatoryAttribute(mandatt)
-
- if not mandatt_option_found:
+
+ if not mandatt_option_found:
raise exception.MissingMandatoryAttribute(mandatt)
-
+
# Add default attributes if not specified in input file.
for defattr in self.DefaultAttributes():
if not defattr in self.attrs:
self.attrs[defattr] = self.DefaultAttributes()[defattr]
-
+
def GetCdata(self):
'''Returns all CDATA of this element, concatenated into a single
string. Note that this ignores any elements embedded in CDATA.'''
return ''.join(filter(lambda c: isinstance(c, types.StringTypes),
self.mixed_content))
-
+
def __unicode__(self):
'''Returns this node and all nodes below it as an XML document in a Unicode
string.'''
header = u'<?xml version="1.0" encoding="UTF-8"?>\n'
return header + self.FormatXml()
-
+
# Compliance with ItemFormatter interface.
def Format(self, item, lang_re = None, begin_item=True):
if not begin_item:
return ''
else:
return item.FormatXml()
-
+
def FormatXml(self, indent = u'', one_line = False):
'''Returns this node and all nodes below it as an XML
element in a Unicode string. This differs from __unicode__ in that it does
@@ -233,11 +233,11 @@ class Node(grit.format.interface.ItemFormatter):
whitespace.
'''
assert isinstance(indent, types.StringTypes)
-
+
content_one_line = (one_line or
self._ContentType() == self._CONTENT_TYPE_MIXED)
inside_content = self.ContentsAsXml(indent, content_one_line)
-
+
# Then the attributes for this node.
attribs = u' '
for (attrib, value) in self.attrs.iteritems():
@@ -247,7 +247,7 @@ class Node(grit.format.interface.ItemFormatter):
attribs += u'%s=%s ' % (attrib, saxutils.quoteattr(value))
attribs = attribs.rstrip() # if no attribs, we end up with '', otherwise
# we end up with a space-prefixed string
-
+
# Finally build the XML for our node and return it
if len(inside_content) > 0:
if one_line:
@@ -264,12 +264,12 @@ class Node(grit.format.interface.ItemFormatter):
indent, self.name)
else:
return u'%s<%s%s />' % (indent, self.name, attribs)
-
+
def ContentsAsXml(self, indent, one_line):
'''Returns the contents of this node (CDATA and child elements) in XML
format. If 'one_line' is true, the content will be laid out on one line.'''
assert isinstance(indent, types.StringTypes)
-
+
# Build the contents of the element.
inside_parts = []
last_item = None
@@ -291,7 +291,7 @@ class Node(grit.format.interface.ItemFormatter):
# trailing \n
if len(inside_parts) and inside_parts[-1] == '\n':
inside_parts = inside_parts[:-1]
-
+
# If the last item is a string (not a node) and ends with whitespace,
# we need to add the ''' delimiter.
if (isinstance(last_item, types.StringTypes) and
@@ -299,13 +299,13 @@ class Node(grit.format.interface.ItemFormatter):
inside_parts[-1] = inside_parts[-1] + u"'''"
return u''.join(inside_parts)
-
+
def RunGatherers(self, recursive=0, debug=False):
'''Runs all gatherers on this object, which may add to the data stored
by the object. If 'recursive' is true, will call RunGatherers() recursively
on all child nodes first. If 'debug' is True, will print out information
as it is running each nodes' gatherers.
-
+
Gatherers for <translations> child nodes will always be run after all other
child nodes have been gathered.
'''
@@ -318,14 +318,14 @@ class Node(grit.format.interface.ItemFormatter):
child.RunGatherers(recursive=recursive, debug=debug)
for child in process_last:
child.RunGatherers(recursive=recursive, debug=debug)
-
+
def ItemFormatter(self, type):
'''Returns an instance of the item formatter for this object of the
specified type, or None if not supported.
-
+
Args:
type: 'rc-header'
-
+
Return:
(object RcHeaderItemFormatter)
'''
@@ -333,12 +333,12 @@ class Node(grit.format.interface.ItemFormatter):
return self
else:
return None
-
+
def SatisfiesOutputCondition(self):
'''Returns true if this node is either not a child of an <if> element
or if it is a child of an <if> element and the conditions for it being
output are satisfied.
-
+
Used to determine whether to return item formatters for formats that
obey conditional output of resources (e.g. the RC formatters).
'''
@@ -359,7 +359,7 @@ class Node(grit.format.interface.ItemFormatter):
subclasses unless they have only mandatory attributes.'''
return (name in self.MandatoryAttributes() or
name in self.DefaultAttributes())
-
+
def _ContentType(self):
'''Returns the type of content this element can have. Overridden by
subclasses. The content type can be one of the _CONTENT_TYPE_XXX constants
@@ -368,7 +368,7 @@ class Node(grit.format.interface.ItemFormatter):
def MandatoryAttributes(self):
'''Returns a list of attribute names that are mandatory (non-optional)
- on the current element. One can specify a list of
+ on the current element. One can specify a list of
"mutually exclusive mandatory" attributes by specifying them as one
element in the list, separated by a "|" character.
'''
@@ -378,11 +378,11 @@ class Node(grit.format.interface.ItemFormatter):
'''Returns a dictionary of attribute names that have defaults, mapped to
the default value. Overridden by subclasses.'''
return {}
-
+
def GetCliques(self):
'''Returns all MessageClique objects belonging to this node. Overridden
by subclasses.
-
+
Return:
[clique1, clique2] or []
'''
@@ -392,10 +392,10 @@ class Node(grit.format.interface.ItemFormatter):
'''Returns a real path (which can be absolute or relative to the current
working directory), given a path that is relative to the base directory
set for the GRIT input file.
-
+
Args:
path_from_basedir: '..'
-
+
Return:
'resource'
'''
@@ -426,7 +426,7 @@ class Node(grit.format.interface.ItemFormatter):
if not node.uberclique:
node.uberclique = clique.UberClique()
return node.uberclique
-
+
def IsTranslateable(self):
'''Returns false if the node has contents that should not be translated,
otherwise returns false (even if the node has no contents).
@@ -451,12 +451,12 @@ class Node(grit.format.interface.ItemFormatter):
'''
if 'name' in self.attrs:
return [self.attrs['name']]
- return None
+ return None
def EvaluateCondition(self, expr):
'''Returns true if and only if the Python expression 'expr' evaluates
to true.
-
+
The expression is given a few local variables:
- 'lang' is the language currently being output
- 'defs' is a map of C preprocessor-style define names to their values
@@ -483,10 +483,10 @@ class Node(grit.format.interface.ItemFormatter):
'os': sys.platform,
'pp_ifdef' : pp_ifdef,
'pp_if' : pp_if})
-
+
def OnlyTheseTranslations(self, languages):
'''Turns off loading of translations for languages not in the provided list.
-
+
Attrs:
languages: ['fr', 'zh_cn']
'''
@@ -495,7 +495,7 @@ class Node(grit.format.interface.ItemFormatter):
node.IsTranslation() and
node.GetLang() not in languages):
node.DisableLoading()
-
+
def PseudoIsAllowed(self):
'''Returns true if this node is allowed to use pseudo-translations. This
is true by default, unless this node is within a <release> node that has
@@ -507,7 +507,7 @@ class Node(grit.format.interface.ItemFormatter):
return (p.attrs['allow_pseudo'].lower() == 'true')
p = p.parent
return True
-
+
def ShouldFallbackToEnglish(self):
'''Returns true iff this node should fall back to English when
pseudotranslations are disabled and no translation is available for a
diff --git a/tools/grit/grit/node/base_unittest.py b/tools/grit/grit/node/base_unittest.py
index dcbc4d6..958e715 100644
--- a/tools/grit/grit/node/base_unittest.py
+++ b/tools/grit/grit/node/base_unittest.py
@@ -36,7 +36,7 @@ class NodeUnittest(unittest.TestCase):
node.AppendContent(u" ''' two spaces ")
node.EndParsing()
self.failUnless(node.GetCdata() == u' two spaces')
-
+
node = message.MessageNode()
node.StartParsing(u'message', None)
node.HandleAttribute(u'name', u'bla')
@@ -66,7 +66,7 @@ class NodeUnittest(unittest.TestCase):
node.StartParsing(u'message', None)
node.HandleAttribute(u'name', u'name')
node.AppendContent(u'Hello <young> ')
-
+
ph = message.PhNode()
ph.StartParsing(u'ph', None)
ph.HandleAttribute(u'name', u'USERNAME')
@@ -77,15 +77,15 @@ class NodeUnittest(unittest.TestCase):
ex.EndParsing()
ph.AddChild(ex)
ph.EndParsing()
-
+
node.AddChild(ph)
node.EndParsing()
-
+
non_indented_xml = node.Format(node)
self.failUnless(non_indented_xml == u'<message name="name">\n Hello '
u'&lt;young&gt; <ph name="USERNAME">$1<ex>Joi</ex></ph>'
u'\n</message>')
-
+
indented_xml = node.FormatXml(u' ')
self.failUnless(indented_xml == u' <message name="name">\n Hello '
u'&lt;young&gt; <ph name="USERNAME">$1<ex>Joi</ex></ph>'
@@ -98,7 +98,7 @@ class NodeUnittest(unittest.TestCase):
node.StartParsing(u'message', None)
node.HandleAttribute(u'name', u'name')
node.AppendContent(u"''' Hello <young> ")
-
+
ph = message.PhNode()
ph.StartParsing(u'ph', None)
ph.HandleAttribute(u'name', u'USERNAME')
@@ -109,25 +109,25 @@ class NodeUnittest(unittest.TestCase):
ex.EndParsing()
ph.AddChild(ex)
ph.EndParsing()
-
+
node.AddChild(ph)
node.AppendContent(u" yessiree '''")
node.EndParsing()
-
+
non_indented_xml = node.Format(node)
self.failUnless(non_indented_xml ==
u"<message name=\"name\">\n ''' Hello"
u' &lt;young&gt; <ph name="USERNAME">$1<ex>Joi</ex></ph>'
u" yessiree '''\n</message>")
-
+
indented_xml = node.FormatXml(u' ')
self.failUnless(indented_xml ==
u" <message name=\"name\">\n ''' Hello"
u' &lt;young&gt; <ph name="USERNAME">$1<ex>Joi</ex></ph>'
u" yessiree '''\n </message>")
-
+
self.failUnless(node.GetNodeById('name'))
-
+
def testXmlFormatContentWithEntities(self):
'''Tests a bug where &nbsp; would not be escaped correctly.'''
from grit import tclib
@@ -140,7 +140,7 @@ class NodeUnittest(unittest.TestCase):
'BINGOBONGO')
xml = msg_node.FormatXml()
self.failUnless(xml.find('&nbsp;') == -1, 'should have no entities')
-
+
def testIter(self):
# First build a little tree of message and ph nodes.
node = message.MessageNode()
@@ -156,7 +156,7 @@ class NodeUnittest(unittest.TestCase):
node.AddChild(ph)
node.AddChild(message.PhNode())
node.AppendContent(u" space before two after '''")
-
+
order = [message.MessageNode, message.PhNode, message.ExNode, message.PhNode]
for n in node:
self.failUnless(type(n) == order[0])
diff --git a/tools/grit/grit/node/custom/__init__.py b/tools/grit/grit/node/custom/__init__.py
index 0a30448..f8479b9 100644
--- a/tools/grit/grit/node/custom/__init__.py
+++ b/tools/grit/grit/node/custom/__init__.py
@@ -6,4 +6,4 @@
'''Package 'grit.node.custom'
'''
-pass \ No newline at end of file
+pass
diff --git a/tools/grit/grit/node/custom/filename.py b/tools/grit/grit/node/custom/filename.py
index 8e2eb56..d6de1d0 100644
--- a/tools/grit/grit/node/custom/filename.py
+++ b/tools/grit/grit/node/custom/filename.py
@@ -14,16 +14,16 @@ class WindowsFilename(clique.CustomType):
'''Validates that messages can be used as Windows filenames, and strips
illegal characters out of translations.
'''
-
+
BANNED = re.compile('\+|:|\/|\\\\|\*|\?|\"|\<|\>|\|')
-
+
def Validate(self, message):
return not self.BANNED.search(message.GetPresentableContent())
-
+
def ValidateAndModify(self, lang, translation):
is_ok = self.Validate(translation)
self.ModifyEachTextPart(lang, translation)
return is_ok
-
+
def ModifyTextPart(self, lang, text):
return self.BANNED.sub(' ', text)
diff --git a/tools/grit/grit/node/custom/filename_unittest.py b/tools/grit/grit/node/custom/filename_unittest.py
index f3d9b48..69ac0bd 100644
--- a/tools/grit/grit/node/custom/filename_unittest.py
+++ b/tools/grit/grit/node/custom/filename_unittest.py
@@ -18,7 +18,7 @@ from grit import tclib
class WindowsFilenameUnittest(unittest.TestCase):
-
+
def testValidate(self):
factory = clique.UberClique()
msg = tclib.Message(text='Bingo bongo')
diff --git a/tools/grit/grit/node/empty.py b/tools/grit/grit/node/empty.py
index 95c2195..516bdde 100644
--- a/tools/grit/grit/node/empty.py
+++ b/tools/grit/grit/node/empty.py
@@ -36,7 +36,7 @@ class MessagesNode(GroupingNode):
'''The <messages> element.'''
def _IsValidChild(self, child):
return isinstance(child, (message.MessageNode, misc.IfNode))
-
+
def ItemFormatter(self, t):
'''Return the stringtable itemformatter if an RC is being formatted.'''
if t in ['rc_all', 'rc_translateable', 'rc_nontranslateable']:
diff --git a/tools/grit/grit/node/include.py b/tools/grit/grit/node/include.py
index e9ea31c..2175240 100644
--- a/tools/grit/grit/node/include.py
+++ b/tools/grit/grit/node/include.py
@@ -16,7 +16,7 @@ from grit import util
class IncludeNode(base.Node):
'''An <include> element.'''
-
+
def _IsValidChild(self, child):
return False
@@ -24,8 +24,8 @@ class IncludeNode(base.Node):
return ['name', 'type', 'file']
def DefaultAttributes(self):
- return {'translateable' : 'true',
- 'generateid': 'true',
+ return {'translateable' : 'true',
+ 'generateid': 'true',
'filenameonly': 'false',
'flattenhtml': 'false',
'relativepath': 'false',
@@ -36,13 +36,13 @@ class IncludeNode(base.Node):
return grit.format.rc_header.Item()
elif (t in ['rc_all', 'rc_translateable', 'rc_nontranslateable'] and
self.SatisfiesOutputCondition()):
- return grit.format.rc.RcInclude(self.attrs['type'].upper(),
+ return grit.format.rc.RcInclude(self.attrs['type'].upper(),
self.attrs['filenameonly'] == 'true',
self.attrs['relativepath'] == 'true',
self.attrs['flattenhtml'] == 'true')
else:
return super(type(self), self).ItemFormatter(t)
-
+
def FileForLanguage(self, lang, output_dir):
'''Returns the file for the specified language. This allows us to return
different files for different language variants of the include file.
@@ -68,7 +68,7 @@ class IncludeNode(base.Node):
return id, data
# static method
- def Construct(parent, name, type, file, translateable=True,
+ def Construct(parent, name, type, file, translateable=True,
filenameonly=False, relativepath=False):
'''Creates a new node which is a child of 'parent', with attributes set
by parameters of the same name.
@@ -77,7 +77,7 @@ class IncludeNode(base.Node):
translateable = util.BoolToString(translateable)
filenameonly = util.BoolToString(filenameonly)
relativepath = util.BoolToString(relativepath)
-
+
node = IncludeNode()
node.StartParsing('include', parent)
node.HandleAttribute('name', name)
diff --git a/tools/grit/grit/node/io.py b/tools/grit/grit/node/io.py
index 3de4e7f..81d2f4c 100644
--- a/tools/grit/grit/node/io.py
+++ b/tools/grit/grit/node/io.py
@@ -18,28 +18,28 @@ from grit import xtb_reader
class FileNode(base.Node):
'''A <file> element.'''
-
+
def __init__(self):
super(type(self), self).__init__()
self.re = None
self.should_load_ = True
-
+
def IsTranslation(self):
return True
-
+
def GetLang(self):
return self.attrs['lang']
-
+
def DisableLoading(self):
self.should_load_ = False
-
+
def MandatoryAttributes(self):
return ['path', 'lang']
-
+
def RunGatherers(self, recursive=False, debug=False):
if not self.should_load_:
return
-
+
xtb_file = file(self.GetFilePath())
try:
lang = xtb_reader.Parse(xtb_file,
@@ -51,22 +51,22 @@ class FileNode(base.Node):
assert lang == self.attrs['lang'], ('The XTB file you '
'reference must contain messages in the language specified\n'
'by the \'lang\' attribute.')
-
+
def GetFilePath(self):
return self.ToRealPath(os.path.expandvars(self.attrs['path']))
class OutputNode(base.Node):
'''An <output> element.'''
-
+
def MandatoryAttributes(self):
return ['filename', 'type']
-
+
def DefaultAttributes(self):
return { 'lang' : '', # empty lang indicates all languages
'language_section' : 'neutral' # defines a language neutral section
- }
-
+ }
+
def GetType(self):
return self.attrs['type']
@@ -98,9 +98,9 @@ class EmitNode(base.ContentNode):
def ItemFormatter(self, t):
if t == 'rc_header':
- return grit.format.rc_header.EmitAppender()
+ return grit.format.rc_header.EmitAppender()
else:
- return super(type(self), self).ItemFormatter(t)
-
+ return super(type(self), self).ItemFormatter(t)
+
diff --git a/tools/grit/grit/node/io_unittest.py b/tools/grit/grit/node/io_unittest.py
index d421045..853c985 100644
--- a/tools/grit/grit/node/io_unittest.py
+++ b/tools/grit/grit/node/io_unittest.py
@@ -36,11 +36,11 @@ class FileNodeUnittest(unittest.TestCase):
file_node.HandleAttribute(u'path', ur'flugel\kugel.pdf')
translations.AddChild(file_node)
root.EndParsing()
-
+
self.failUnless(file_node.GetFilePath() ==
util.normpath(
os.path.join(ur'../resource', ur'flugel/kugel.pdf')))
-
+
def testLoadTranslations(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
diff --git a/tools/grit/grit/node/mapping.py b/tools/grit/grit/node/mapping.py
index b02673d..6e6be93 100644
--- a/tools/grit/grit/node/mapping.py
+++ b/tools/grit/grit/node/mapping.py
@@ -44,11 +44,11 @@ _ELEMENT_TO_CLASS = {
def ElementToClass(name, typeattr):
'''Maps an element to a class that handles the element.
-
+
Args:
name: 'element' (the name of the element)
typeattr: 'type' (the value of the type attribute, if present, else None)
-
+
Return:
type
'''
diff --git a/tools/grit/grit/node/message.py b/tools/grit/grit/node/message.py
index 7086538..a6346b1 100644
--- a/tools/grit/grit/node/message.py
+++ b/tools/grit/grit/node/message.py
@@ -27,23 +27,23 @@ _WHITESPACE = re.compile('(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z',
class MessageNode(base.ContentNode):
'''A <message> element.'''
-
+
# For splitting a list of things that can be separated by commas or
# whitespace
_SPLIT_RE = re.compile('\s*,\s*|\s+')
-
+
def __init__(self):
super(type(self), self).__init__()
# Valid after EndParsing, this is the MessageClique that contains the
# source message and any translations of it that have been loaded.
self.clique = None
-
+
# We don't send leading and trailing whitespace into the translation
# console, but rather tack it onto the source message and any
# translations when formatting them into RC files or what have you.
self.ws_at_start = '' # Any whitespace characters at the start of the text
self.ws_at_end = '' # --"-- at the end of the text
-
+
# A list of "shortcut groups" this message is in. We check to make sure
# that shortcut keys (e.g. &J) within each shortcut group are unique.
self.shortcut_groups_ = []
@@ -59,10 +59,10 @@ class MessageNode(base.ContentNode):
if name == 'translateable' and value not in ['true', 'false']:
return False
return True
-
+
def MandatoryAttributes(self):
return ['name|offset']
-
+
def DefaultAttributes(self):
return {
'translateable' : 'true',
@@ -89,12 +89,12 @@ class MessageNode(base.ContentNode):
while grouping_parent and not isinstance(grouping_parent,
grit.node.empty.GroupingNode):
grouping_parent = grouping_parent.parent
-
+
assert 'first_id' in grouping_parent.attrs
return [grouping_parent.attrs['first_id'] + '_' + self.attrs['offset']]
else:
return super(type(self), self).GetTextualIds()
-
+
def IsTranslateable(self):
return self.attrs['translateable'] == 'true'
@@ -109,11 +109,11 @@ class MessageNode(base.ContentNode):
def EndParsing(self):
super(type(self), self).EndParsing()
-
+
# Make the text (including placeholder references) and list of placeholders,
# then strip and store leading and trailing whitespace and create the
# tclib.Message() and a clique to contain it.
-
+
text = ''
placeholders = []
for item in self.mixed_content:
@@ -127,16 +127,16 @@ class MessageNode(base.ContentNode):
ex = item.children[0].GetCdata()
original = item.GetCdata()
placeholders.append(tclib.Placeholder(presentation, original, ex))
-
+
m = _WHITESPACE.match(text)
if m:
self.ws_at_start = m.group('start')
self.ws_at_end = m.group('end')
text = m.group('body')
-
+
self.shortcut_groups_ = self._SPLIT_RE.split(self.attrs['shortcut_groups'])
self.shortcut_groups_ = [i for i in self.shortcut_groups_ if i != '']
-
+
description_or_id = self.attrs['desc']
if description_or_id == '' and 'name' in self.attrs:
description_or_id = 'ID: %s' % self.attrs['name']
@@ -157,13 +157,13 @@ class MessageNode(base.ContentNode):
elif self.attrs['validation_expr'] != '':
self.clique.SetCustomType(
clique.OneOffCustomType(self.attrs['validation_expr']))
-
+
def GetCliques(self):
if self.clique:
return [self.clique]
else:
return []
-
+
def Translate(self, lang):
'''Returns a translated version of this message.
'''
@@ -173,7 +173,7 @@ class MessageNode(base.ContentNode):
self.ShouldFallbackToEnglish()
).GetRealContent()
return msg.replace('[GRITLANGCODE]', lang)
-
+
def NameOrOffset(self):
if 'name' in self.attrs:
return self.attrs['name']
@@ -205,49 +205,49 @@ class MessageNode(base.ContentNode):
translateable = 'true'
else:
translateable = 'false'
-
+
node = MessageNode()
node.StartParsing('message', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('desc', desc)
node.HandleAttribute('meaning', meaning)
node.HandleAttribute('translateable', translateable)
-
+
items = message.GetContent()
for ix in range(len(items)):
if isinstance(items[ix], types.StringTypes):
text = items[ix]
-
+
# Ensure whitespace at front and back of message is correctly handled.
if ix == 0:
text = "'''" + text
if ix == len(items) - 1:
text = text + "'''"
-
+
node.AppendContent(text)
else:
phnode = PhNode()
phnode.StartParsing('ph', node)
phnode.HandleAttribute('name', items[ix].GetPresentation())
phnode.AppendContent(items[ix].GetOriginal())
-
+
if len(items[ix].GetExample()) and items[ix].GetExample() != ' ':
exnode = ExNode()
exnode.StartParsing('ex', phnode)
exnode.AppendContent(items[ix].GetExample())
exnode.EndParsing()
phnode.AddChild(exnode)
-
+
phnode.EndParsing()
node.AddChild(phnode)
-
+
node.EndParsing()
return node
Construct = staticmethod(Construct)
class PhNode(base.ContentNode):
'''A <ph> element.'''
-
+
def _IsValidChild(self, child):
return isinstance(child, ExNode)
diff --git a/tools/grit/grit/node/message_unittest.py b/tools/grit/grit/node/message_unittest.py
index 5722927..4255dcb 100644
--- a/tools/grit/grit/node/message_unittest.py
+++ b/tools/grit/grit/node/message_unittest.py
@@ -28,7 +28,7 @@ class MessageUnittest(unittest.TestCase):
cliques = res.GetCliques()
content = cliques[0].GetMessage().GetPresentableContent()
self.failUnless(content == 'Hello USERNAME, how are you doing today?')
-
+
def testMessageWithWhitespace(self):
buf = StringIO.StringIO('<message name="IDS_BLA" desc="">'
'\'\'\' Hello there <ph name="USERNAME">%s</ph> \'\'\''
@@ -50,7 +50,7 @@ class MessageUnittest(unittest.TestCase):
self.failUnless(msg_node.children[1].children[0].GetCdata() == '11')
self.failUnless(msg_node.ws_at_start == ' ')
self.failUnless(msg_node.ws_at_end == '\t\t')
-
+
def testUnicodeConstruct(self):
text = u'Howdie \u00fe'
msg = tclib.Message(text=text)
diff --git a/tools/grit/grit/node/misc.py b/tools/grit/grit/node/misc.py
index 27f8b66..d3441f2 100644
--- a/tools/grit/grit/node/misc.py
+++ b/tools/grit/grit/node/misc.py
@@ -22,7 +22,7 @@ import grit.format.rc_header
class IfNode(base.Node):
'''A node for conditional inclusion of resources.
'''
-
+
def _IsValidChild(self, child):
from grit.node import empty
assert self.parent, '<if> node should never be root.'
@@ -37,10 +37,10 @@ class IfNode(base.Node):
return isinstance(child, structure.StructureNode)
else:
return False
-
+
def MandatoryAttributes(self):
return ['expr']
-
+
def IsConditionSatisfied(self):
'''Returns true if and only if the Python expression stored in attribute
'expr' evaluates to true.
@@ -55,7 +55,7 @@ class ReleaseNode(base.Node):
from grit.node import empty
return isinstance(child, (empty.IncludesNode, empty.MessagesNode,
empty.StructuresNode, empty.IdentifiersNode))
-
+
def _IsValidAttribute(self, name, value):
return (
(name == 'seq' and int(value) <= self.GetRoot().GetCurrentRelease()) or
@@ -64,10 +64,10 @@ class ReleaseNode(base.Node):
def MandatoryAttributes(self):
return ['seq']
-
+
def DefaultAttributes(self):
return { 'allow_pseudo' : 'true' }
-
+
def GetReleaseNumber():
'''Returns the sequence number of this release.'''
return self.attribs['seq']
@@ -81,17 +81,17 @@ class ReleaseNode(base.Node):
class GritNode(base.Node):
'''The <grit> root element.'''
-
+
def __init__(self):
base.Node.__init__(self)
self.output_language = ''
self.defines = {}
-
+
def _IsValidChild(self, child):
from grit.node import empty
return isinstance(child, (ReleaseNode, empty.TranslationsNode,
empty.OutputsNode))
-
+
def _IsValidAttribute(self, name, value):
if name not in ['base_dir', 'source_lang_id',
'latest_public_release', 'current_release',
@@ -101,10 +101,10 @@ class GritNode(base.Node):
'0123456789') != '':
return False
return True
-
+
def MandatoryAttributes(self):
return ['latest_public_release', 'current_release']
-
+
def DefaultAttributes(self):
return {
'base_dir' : '.',
@@ -119,9 +119,9 @@ class GritNode(base.Node):
> int(self.attrs['current_release'])):
raise exception.Parsing('latest_public_release cannot have a greater '
'value than current_release')
-
+
self.ValidateUniqueIds()
-
+
# Add the encoding check if it's not present (should ensure that it's always
# present in all .grd files generated by GRIT). If it's present, assert if
# it's not correct.
@@ -130,7 +130,7 @@ class GritNode(base.Node):
else:
assert self.attrs['enc_check'] == constants.ENCODING_CHECK, (
'Are you sure your .grd file is in the correct encoding (UTF-8)?')
-
+
def ValidateUniqueIds(self):
'''Validate that 'name' attribute is unique in all nodes in this tree
except for nodes that are children of <if> nodes.
@@ -140,55 +140,55 @@ class GritNode(base.Node):
for node in self:
if isinstance(node, message.PhNode):
continue # PhNode objects have a 'name' attribute which is not an ID
-
+
node_ids = node.GetTextualIds()
if node_ids:
for node_id in node_ids:
if util.SYSTEM_IDENTIFIERS.match(node_id):
continue # predefined IDs are sometimes used more than once
-
+
# Don't complain about duplicate IDs if they occur in a node that is
# inside an <if> node.
if (node_id in unique_names and node_id not in duplicate_names and
(not node.parent or not isinstance(node.parent, IfNode))):
duplicate_names.append(node_id)
unique_names[node_id] = 1
-
+
if len(duplicate_names):
raise exception.DuplicateKey(', '.join(duplicate_names))
-
+
def GetCurrentRelease(self):
'''Returns the current release number.'''
return int(self.attrs['current_release'])
-
+
def GetLatestPublicRelease(self):
'''Returns the latest public release number.'''
return int(self.attrs['latest_public_release'])
-
+
def GetSourceLanguage(self):
'''Returns the language code of the source language.'''
return self.attrs['source_lang_id']
-
+
def GetTcProject(self):
'''Returns the name of this project in the TranslationConsole, or
'NEED_TO_SET_tc_project_ATTRIBUTE' if it is not defined.'''
return self.attrs['tc_project']
-
+
def SetOwnDir(self, dir):
'''Informs the 'grit' element of the directory the file it is in resides.
This allows it to calculate relative paths from the input file, which is
what we desire (rather than from the current path).
-
+
Args:
dir: r'c:\bla'
-
+
Return:
None
'''
assert dir
self.base_dir = os.path.normpath(os.path.join(dir, self.attrs['base_dir']))
-
+
def GetBaseDir(self):
'''Returns the base directory, relative to the working directory. To get
the base directory as set in the .grd file, use GetOriginalBaseDir()
@@ -197,12 +197,12 @@ class GritNode(base.Node):
return self.base_dir
else:
return self.GetOriginalBaseDir()
-
+
def GetOriginalBaseDir(self):
'''Returns the base directory, as set in the .grd file.
'''
return self.attrs['base_dir']
-
+
def GetOutputFiles(self):
'''Returns the list of <file> nodes that are children of this node's
<outputs> child.'''
@@ -231,10 +231,10 @@ class IdentifierNode(base.Node):
header file, and be unique amongst all other resource identifiers, but don't
have any other attributes or reference any resources.
'''
-
+
def MandatoryAttributes(self):
return ['name']
-
+
def DefaultAttributes(self):
return { 'comment' : '', 'id' : '' }
@@ -247,8 +247,8 @@ class IdentifierNode(base.Node):
'''
if 'id' in self.attrs:
return self.attrs['id']
- return None
-
+ return None
+
# static method
def Construct(parent, name, id, comment):
'''Creates a new node which is a child of 'parent', with attributes set
diff --git a/tools/grit/grit/node/misc_unittest.py b/tools/grit/grit/node/misc_unittest.py
index b7e2c2e..b7ee4de 100644
--- a/tools/grit/grit/node/misc_unittest.py
+++ b/tools/grit/grit/node/misc_unittest.py
@@ -54,7 +54,7 @@ class IfNodeUnittest(unittest.TestCase):
</messages>
</release>
</grit>'''), dir='.')
-
+
messages_node = grd.children[0].children[0]
bingo_message = messages_node.children[0].children[0]
hello_message = messages_node.children[1].children[0]
@@ -62,18 +62,18 @@ class IfNodeUnittest(unittest.TestCase):
assert bingo_message.name == 'message'
assert hello_message.name == 'message'
assert french_message.name == 'message'
-
+
grd.SetOutputContext('fr', {'hello' : '1'})
self.failUnless(not bingo_message.SatisfiesOutputCondition())
self.failUnless(hello_message.SatisfiesOutputCondition())
self.failUnless(french_message.SatisfiesOutputCondition())
-
+
grd.SetOutputContext('en', {'bingo' : 1})
self.failUnless(bingo_message.SatisfiesOutputCondition())
self.failUnless(not hello_message.SatisfiesOutputCondition())
self.failUnless(not french_message.SatisfiesOutputCondition())
-
- grd.SetOutputContext('en', {'FORCE_FRENCH' : '1', 'bingo' : '1'})
+
+ grd.SetOutputContext('en', {'FORCE_FRENCH' : '1', 'bingo' : '1'})
self.failUnless(bingo_message.SatisfiesOutputCondition())
self.failUnless(not hello_message.SatisfiesOutputCondition())
self.failUnless(french_message.SatisfiesOutputCondition())
@@ -105,18 +105,18 @@ class ReleaseNodeUnittest(unittest.TestCase):
</release>
</grit>'''), util.PathFromRoot('grit/test/data'))
grd.RunGatherers(recursive=True)
-
+
hello = grd.GetNodeById('IDS_HELLO')
aboutbox = grd.GetNodeById('IDD_ABOUTBOX')
bingo = grd.GetNodeById('IDS_BINGO')
menu = grd.GetNodeById('IDC_KLONKMENU')
-
+
for node in [hello, aboutbox]:
self.failUnless(not node.PseudoIsAllowed())
-
+
for node in [bingo, menu]:
self.failUnless(node.PseudoIsAllowed())
-
+
for node in [hello, aboutbox]:
try:
formatter = node.ItemFormatter('rc_all')
@@ -124,7 +124,7 @@ class ReleaseNodeUnittest(unittest.TestCase):
self.fail('Should have failed during Format since pseudo is not allowed')
except:
pass # expected case
-
+
for node in [bingo, menu]:
try:
formatter = node.ItemFormatter('rc_all')
diff --git a/tools/grit/grit/node/structure.py b/tools/grit/grit/node/structure.py
index 30cfb6d..7058cd3 100644
--- a/tools/grit/grit/node/structure.py
+++ b/tools/grit/grit/node/structure.py
@@ -26,7 +26,7 @@ import grit.format.rc_header
# RTL languages
# TODO(jennyz): remove this fixed set of RTL language array
-# when generic expand_variable code is added by grit team.
+# when generic expand_variable code is added by grit team.
_RTL_LANGS = [
'ar',
'iw',
@@ -68,12 +68,12 @@ _RC_FORMATTERS = {
class StructureNode(base.Node):
'''A <structure> element.'''
-
+
def __init__(self):
base.Node.__init__(self)
self.gatherer = None
self.skeletons = {} # expressions to skeleton gatherers
-
+
def _IsValidChild(self, child):
return isinstance(child, variant.SkeletonNode)
@@ -95,10 +95,10 @@ class StructureNode(base.Node):
# dependencies.
'sconsdep' : 'false',
}
-
+
def IsExcludedFromRc(self):
return self.attrs['exclude_from_rc'] == 'true'
-
+
def GetLineEnd(self):
'''Returns the end-of-line character or characters for files output because
of this node ('\r\n', '\n', or '\r' depending on the 'line_end' attribute).
@@ -112,13 +112,13 @@ class StructureNode(base.Node):
else:
raise exception.UnexpectedAttribute(
"Attribute 'line_end' must be one of 'linux' (default), 'windows' or 'mac'")
-
+
def GetCliques(self):
if self.gatherer:
return self.gatherer.GetCliques()
else:
return []
-
+
def GetTextualIds(self):
if self.gatherer and self.attrs['type'] not in ['tr_html', 'admin_template', 'txt']:
return self.gatherer.GetTextualIds()
@@ -137,18 +137,18 @@ class StructureNode(base.Node):
def RunGatherers(self, recursive=False, debug=False):
if self.gatherer:
return # idempotent
-
+
gathertype = _GATHERERS[self.attrs['type']]
if debug:
print 'Running gatherer %s for file %s' % (str(gathertype), self.FilenameToOpen())
-
+
self.gatherer = gathertype.FromFile(self.FilenameToOpen(),
self.attrs['name'],
self.attrs['encoding'])
self.gatherer.SetUberClique(self.UberClique())
self.gatherer.Parse()
-
+
for child in self.children:
assert isinstance(child, variant.SkeletonNode)
skel = gathertype.FromFile(child.FilenameToOpen(),
@@ -158,7 +158,7 @@ class StructureNode(base.Node):
skel.SetSkeleton(True)
skel.Parse()
self.skeletons[child.attrs['expr']] = skel
-
+
def GetSkeletonGatherer(self):
'''Returns the gatherer for the alternate skeleton that should be used,
based on the expressions for selecting skeletons, or None if the skeleton
@@ -168,25 +168,25 @@ class StructureNode(base.Node):
if self.EvaluateCondition(expr):
return self.skeletons[expr]
return None
-
+
def GetFilePath(self):
return self.ToRealPath(self.attrs['file'])
-
+
def HasFileForLanguage(self):
return self.attrs['type'] in ['tr_html', 'admin_template', 'txt', 'muppet']
-
+
def FileForLanguage(self, lang, output_dir, create_file=True,
return_if_not_generated=True):
'''Returns the filename of the file associated with this structure,
for the specified language.
-
+
Args:
lang: 'fr'
output_dir: 'c:\temp'
create_file: True
'''
assert self.HasFileForLanguage()
- if (lang == self.GetRoot().GetSourceLanguage() and
+ if (lang == self.GetRoot().GetSourceLanguage() and
self.attrs['expand_variables'] != 'true'):
if return_if_not_generated:
return self.GetFilePath()
@@ -200,14 +200,14 @@ class StructureNode(base.Node):
assert len(filename)
filename = '%s_%s' % (lang, filename)
filename = os.path.join(output_dir, filename)
-
+
if create_file:
text = self.gatherer.Translate(
lang,
pseudo_if_not_available=self.PseudoIsAllowed(),
fallback_to_english=self.ShouldFallbackToEnglish(),
skeleton_gatherer=self.GetSkeletonGatherer())
-
+
file_object = util.WrapOutputStream(file(filename, 'wb'),
self._GetOutputEncoding())
file_contents = util.FixLineEnd(text, self.GetLineEnd())
@@ -224,9 +224,9 @@ class StructureNode(base.Node):
file_object.write(constants.BOM)
file_object.write(file_contents)
file_object.close()
-
+
return filename
-
+
def _GetOutputEncoding(self):
'''Python doesn't natively support UTF encodings with a BOM signature,
so we add support by allowing you to append '-sig' to the encoding name.
@@ -237,12 +237,12 @@ class StructureNode(base.Node):
return enc[0:len(enc) - len('-sig')]
else:
return enc
-
+
def _ShouldAddBom(self):
'''Returns true if output files should have the Unicode BOM prepended.
'''
return self.attrs['output_encoding'].endswith('-sig')
-
+
# static method
def Construct(parent, name, type, file, encoding='cp1252'):
'''Creates a new node which is a child of 'parent', with attributes set
diff --git a/tools/grit/grit/node/structure_unittest.py b/tools/grit/grit/node/structure_unittest.py
index a2ce9c2..0bb9757 100644
--- a/tools/grit/grit/node/structure_unittest.py
+++ b/tools/grit/grit/node/structure_unittest.py
@@ -34,15 +34,15 @@ class StructureUnittest(unittest.TestCase):
</grit>'''), dir=util.PathFromRoot('grit\\test\\data'))
grd.RunGatherers(recursive=True)
grd.output_language = 'fr'
-
+
node = grd.GetNodeById('IDD_ABOUTBOX')
formatter = node.ItemFormatter('rc_all')
self.failUnless(formatter)
transl = formatter.Format(node, 'fr')
-
+
self.failUnless(transl.count('040704') and transl.count('110978'))
self.failUnless(transl.count('2005",IDC_STATIC'))
-
+
def testOutputEncoding(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
diff --git a/tools/grit/grit/node/variant.py b/tools/grit/grit/node/variant.py
index d18b815..0249d17 100644
--- a/tools/grit/grit/node/variant.py
+++ b/tools/grit/grit/node/variant.py
@@ -12,13 +12,13 @@ from grit.node import base
class SkeletonNode(base.Node):
'''A <skeleton> element.'''
-
+
# TODO(joi) Support inline skeleton variants as CDATA instead of requiring
# a 'file' attribute.
-
+
def MandatoryAttributes(self):
return ['expr', 'variant_of_revision', 'file']
-
+
def DefaultAttributes(self):
'''If not specified, 'encoding' will actually default to the parent node's
encoding.
@@ -30,7 +30,7 @@ class SkeletonNode(base.Node):
return self._CONTENT_TYPE_NONE
else:
return self._CONTENT_TYPE_CDATA
-
+
def GetEncodingToUse(self):
if self.attrs['encoding'] == '':
return self.parent.attrs['encoding']
diff --git a/tools/grit/grit/pseudo.py b/tools/grit/grit/pseudo.py
index a6dc859..4c3db07 100644
--- a/tools/grit/grit/pseudo.py
+++ b/tools/grit/grit/pseudo.py
@@ -86,7 +86,7 @@ def PseudoString(str):
P-language.'''
if str in _existing_translations:
return _existing_translations[str]
-
+
outstr = u''
ix = 0
while ix < len(str):
@@ -104,27 +104,27 @@ def PseudoString(str):
outstr += changed_vowels
outstr += _QOF
outstr += changed_vowels
-
+
_existing_translations[str] = outstr
return outstr
def PseudoMessage(message):
'''Returns a pseudotranslation of the provided message.
-
+
Args:
message: tclib.Message()
-
+
Return:
tclib.Translation()
'''
transl = tclib.Translation()
-
+
for part in message.GetContent():
if isinstance(part, tclib.Placeholder):
transl.AppendPlaceholder(part)
else:
transl.AppendText(PseudoString(part))
-
+
return transl
diff --git a/tools/grit/grit/pseudo_unittest.py b/tools/grit/grit/pseudo_unittest.py
index 97d4538..6191b20 100644
--- a/tools/grit/grit/pseudo_unittest.py
+++ b/tools/grit/grit/pseudo_unittest.py
@@ -22,16 +22,16 @@ class PseudoUnittest(unittest.TestCase):
u'\u00e5b\u00e9b\u00efb\u00f4b\u00fcb\u00fd')
self.failUnless(pseudo.MapVowels('ABEBIBOBUBY') ==
u'\u00c5B\u00c9B\u00cfB\u00d4B\u00dcB\u00dd')
-
+
def testPseudoString(self):
out = pseudo.PseudoString('hello')
self.failUnless(out == pseudo.MapVowels(u'hePelloPo', True))
-
+
def testConsecutiveVowels(self):
out = pseudo.PseudoString("beautiful weather, ain't it?")
self.failUnless(out == pseudo.MapVowels(
u"beauPeautiPifuPul weaPeathePer, aiPain't iPit?", 1))
-
+
def testCapitals(self):
out = pseudo.PseudoString("HOWDIE DOODIE, DR. JONES")
self.failUnless(out == pseudo.MapVowels(
diff --git a/tools/grit/grit/shortcuts.py b/tools/grit/grit/shortcuts.py
index de18e09..c1f7fb1 100644
--- a/tools/grit/grit/shortcuts.py
+++ b/tools/grit/grit/shortcuts.py
@@ -26,7 +26,7 @@ class ShortcutGroup(object):
self.keys_by_lang = {}
# List of cliques in this group
self.cliques = []
-
+
def AddClique(self, c):
for existing_clique in self.cliques:
if existing_clique.GetId() == c.GetId():
@@ -34,13 +34,13 @@ class ShortcutGroup(object):
# <if expr1><structure 1></if> <if expr2><structure 2></if>
# where only one will really be included in the output.
return
-
+
self.cliques.append(c)
for (lang, msg) in c.clique.items():
if lang not in self.keys_by_lang:
self.keys_by_lang[lang] = {}
keymap = self.keys_by_lang[lang]
-
+
content = msg.GetRealContent()
keys = [groups[1] for groups in self.SHORTCUT_RE.findall(content)]
for key in keys:
@@ -49,7 +49,7 @@ class ShortcutGroup(object):
keymap[key] += 1
else:
keymap[key] = 1
-
+
def GenerateWarnings(self, tc_project):
# For any language that has more than one occurrence of any shortcut,
# make a list of the conflicting shortcuts.
@@ -60,7 +60,7 @@ class ShortcutGroup(object):
if lang not in problem_langs:
problem_langs[lang] = []
problem_langs[lang].append(key)
-
+
warnings = []
if len(problem_langs):
warnings.append("WARNING - duplicate keys exist in shortcut group %s" %
@@ -74,11 +74,11 @@ def GenerateDuplicateShortcutsWarnings(uberclique, tc_project):
'''Given an UberClique and a project name, will print out helpful warnings
if there are conflicting shortcuts within shortcut groups in the provided
UberClique.
-
+
Args:
uberclique: clique.UberClique()
tc_project: 'MyProjectNameInTheTranslationConsole'
-
+
Returns:
['warning line 1', 'warning line 2', ...]
'''
@@ -90,6 +90,6 @@ def GenerateDuplicateShortcutsWarnings(uberclique, tc_project):
groups[group] = ShortcutGroup(group)
groups[group].AddClique(c)
for group in groups.values():
- warnings += group.GenerateWarnings(tc_project)
+ warnings += group.GenerateWarnings(tc_project)
return warnings
diff --git a/tools/grit/grit/shortcuts_unittests.py b/tools/grit/grit/shortcuts_unittests.py
index 10c42eb..9517346 100644
--- a/tools/grit/grit/shortcuts_unittests.py
+++ b/tools/grit/grit/shortcuts_unittests.py
@@ -19,28 +19,28 @@ from grit import tclib
from grit.gather import rc
class ShortcutsUnittest(unittest.TestCase):
-
+
def setUp(self):
self.uq = clique.UberClique()
-
+
def testFunctionality(self):
c = self.uq.MakeClique(tclib.Message(text="Hello &there"))
c.AddToShortcutGroup('group_name')
c = self.uq.MakeClique(tclib.Message(text="Howdie &there partner"))
c.AddToShortcutGroup('group_name')
-
+
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT')
self.failUnless(warnings)
-
+
def testAmpersandEscaping(self):
c = self.uq.MakeClique(tclib.Message(text="Hello &there"))
c.AddToShortcutGroup('group_name')
c = self.uq.MakeClique(tclib.Message(text="S&&T are the &letters S and T"))
c.AddToShortcutGroup('group_name')
-
+
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT')
self.failUnless(len(warnings) == 0)
-
+
def testDialog(self):
dlg = rc.Dialog('''\
IDD_SIDEBAR_RSS_PANEL_PROPPAGE DIALOGEX 0, 0, 239, 221
@@ -52,14 +52,14 @@ BEGIN
PUSHBUTTON "&Remove",IDC_SIDEBAR_RSS_REMOVE,183,200,56,14
PUSHBUTTON "&Edit",IDC_SIDEBAR_RSS_EDIT,123,200,56,14
CONTROL "&Automatically add commonly viewed clips",
- IDC_SIDEBAR_RSS_AUTO_ADD,"Button",BS_AUTOCHECKBOX |
+ IDC_SIDEBAR_RSS_AUTO_ADD,"Button",BS_AUTOCHECKBOX |
BS_MULTILINE | WS_TABSTOP,0,200,120,17
PUSHBUTTON "",IDC_SIDEBAR_RSS_HIDDEN,179,208,6,6,NOT WS_VISIBLE
LTEXT "You can display clips from blogs, news sites, and other online sources.",
IDC_STATIC,0,0,239,10
- LISTBOX IDC_SIDEBAR_DISPLAYED_FEED_LIST,0,69,239,127,LBS_SORT |
- LBS_OWNERDRAWFIXED | LBS_HASSTRINGS |
- LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_HSCROLL |
+ LISTBOX IDC_SIDEBAR_DISPLAYED_FEED_LIST,0,69,239,127,LBS_SORT |
+ LBS_OWNERDRAWFIXED | LBS_HASSTRINGS |
+ LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_HSCROLL |
WS_TABSTOP
LTEXT "Add a clip from a recently viewed website by clicking Add Recent Clips.",
IDC_STATIC,0,13,141,19
@@ -70,7 +70,7 @@ BEGIN
END''')
dlg.SetUberClique(self.uq)
dlg.Parse()
-
+
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT')
self.failUnless(len(warnings) == 0)
diff --git a/tools/grit/grit/tclib.py b/tools/grit/grit/tclib.py
index bfacb94..388c4b9 100644
--- a/tools/grit/grit/tclib.py
+++ b/tools/grit/grit/tclib.py
@@ -20,7 +20,7 @@ def Identity(i):
class BaseMessage(object):
'''Base class with methods shared by Message and Translation.
'''
-
+
def __init__(self, text='', placeholders=[], description='', meaning=''):
self.parts = []
self.placeholders = []
@@ -28,7 +28,7 @@ class BaseMessage(object):
self.meaning = meaning
self.dirty = True # True if self.id is (or might be) wrong
self.id = 0
-
+
if text != '':
if not placeholders or placeholders == []:
self.AppendText(text)
@@ -48,11 +48,11 @@ class BaseMessage(object):
self.AppendText(chunk)
for key in tag_map.keys():
assert tag_map[key][1] != 0
-
+
def GetRealContent(self, escaping_function=Identity):
'''Returns the original content, i.e. what your application and users
will see.
-
+
Specify a function to escape each translateable bit, if you like.
'''
bits = []
@@ -62,7 +62,7 @@ class BaseMessage(object):
else:
bits.append(item.GetOriginal())
return ''.join(bits)
-
+
def GetPresentableContent(self):
presentable_content = []
for part in self.parts:
@@ -71,7 +71,7 @@ class BaseMessage(object):
else:
presentable_content.append(part)
return ''.join(presentable_content)
-
+
def AppendPlaceholder(self, placeholder):
assert isinstance(placeholder, Placeholder)
dup = False
@@ -79,19 +79,19 @@ class BaseMessage(object):
if other.presentation == placeholder.presentation:
assert other.original == placeholder.original
dup = True
-
+
if not dup:
self.placeholders.append(placeholder)
self.parts.append(placeholder)
self.dirty = True
-
+
def AppendText(self, text):
assert isinstance(text, types.StringTypes)
assert text != ''
-
+
self.parts.append(text)
self.dirty = True
-
+
def GetContent(self):
'''Returns the parts of the message. You may modify parts if you wish.
Note that you must not call GetId() on this object until you have finished
@@ -99,34 +99,34 @@ class BaseMessage(object):
'''
self.dirty = True # user might modify content
return self.parts
-
+
def GetDescription(self):
return self.description
-
+
def SetDescription(self, description):
self.description = description
-
+
def GetMeaning(self):
return self.meaning
-
+
def GetId(self):
if self.dirty:
self.id = self.GenerateId()
self.dirty = False
return self.id
-
+
def GenerateId(self):
# Must use a UTF-8 encoded version of the presentable content, along with
# the meaning attribute, to match the TC.
return grit.extern.tclib.GenerateMessageId(
self.GetPresentableContent().encode('utf-8'), self.meaning)
-
+
def GetPlaceholders(self):
return self.placeholders
-
+
def FillTclibBaseMessage(self, msg):
msg.SetDescription(self.description.encode('utf-8'))
-
+
for part in self.parts:
if isinstance(part, Placeholder):
ph = grit.extern.tclib.Placeholder(
@@ -139,13 +139,13 @@ class BaseMessage(object):
class Message(BaseMessage):
- '''A message.'''
-
+ '''A message.'''
+
def __init__(self, text='', placeholders=[], description='', meaning='',
assigned_id=None):
BaseMessage.__init__(self, text, placeholders, description, meaning)
self.assigned_id = assigned_id
-
+
def ToTclibMessage(self):
msg = grit.extern.tclib.Message('utf-8', meaning=self.meaning)
self.FillTclibBaseMessage(msg)
@@ -161,18 +161,18 @@ class Message(BaseMessage):
class Translation(BaseMessage):
'''A translation.'''
-
+
def __init__(self, text='', id='', placeholders=[], description='', meaning=''):
BaseMessage.__init__(self, text, placeholders, description, meaning)
self.id = id
-
+
def GetId(self):
assert id != '', "ID has not been set."
return self.id
-
+
def SetId(self, id):
self.id = id
-
+
def ToTclibMessage(self):
msg = grit.extern.tclib.Message(
'utf-8', id=self.id, meaning=self.meaning)
@@ -183,13 +183,13 @@ class Translation(BaseMessage):
class Placeholder(grit.extern.tclib.Placeholder):
'''Modifies constructor to accept a Unicode string
'''
-
+
# Must match placeholder presentation names
_NAME_RE = re.compile('[A-Za-z0-9_]+')
-
+
def __init__(self, presentation, original, example):
'''Creates a new placeholder.
-
+
Args:
presentation: 'USERNAME'
original: '%s'
@@ -203,13 +203,13 @@ class Placeholder(grit.extern.tclib.Placeholder):
self.presentation = presentation
self.original = original
self.example = example
-
+
def GetPresentation(self):
return self.presentation
-
+
def GetOriginal(self):
return self.original
-
+
def GetExample(self):
return self.example
diff --git a/tools/grit/grit/tclib_unittest.py b/tools/grit/grit/tclib_unittest.py
index 352d917..0d20f1a 100644
--- a/tools/grit/grit/tclib_unittest.py
+++ b/tools/grit/grit/tclib_unittest.py
@@ -24,19 +24,19 @@ class TclibUnittest(unittest.TestCase):
msg = tclib.Message(text=u'Hello Earthlings')
self.failUnless(msg.GetPresentableContent() == 'Hello Earthlings')
self.failUnless(isinstance(msg.GetPresentableContent(), types.StringTypes))
-
+
def testGetAttr(self):
msg = tclib.Message()
msg.AppendText(u'Hello') # Tests __getattr__
self.failUnless(msg.GetPresentableContent() == 'Hello')
self.failUnless(isinstance(msg.GetPresentableContent(), types.StringTypes))
-
+
def testAll(self):
text = u'Howdie USERNAME'
phs = [tclib.Placeholder(u'USERNAME', u'%s', 'Joi')]
msg = tclib.Message(text=text, placeholders=phs)
self.failUnless(msg.GetPresentableContent() == 'Howdie USERNAME')
-
+
trans = tclib.Translation(text=text, placeholders=phs)
self.failUnless(trans.GetPresentableContent() == 'Howdie USERNAME')
self.failUnless(isinstance(trans.GetPresentableContent(), types.StringTypes))
@@ -47,7 +47,7 @@ class TclibUnittest(unittest.TestCase):
self.failUnless(msg.GetPresentableContent() == text)
from_list = msg.GetContent()[0]
self.failUnless(from_list == text)
-
+
def testRegressionTranslationInherited(self):
'''Regression tests a bug that was caused by grit.tclib.Translation
inheriting from the translation console's Translation object
@@ -62,7 +62,7 @@ class TclibUnittest(unittest.TestCase):
placeholders=msg.GetPlaceholders())
content = transl.GetContent()
self.failUnless(isinstance(content[3], types.UnicodeType))
-
+
def testFingerprint(self):
# This has Windows line endings. That is on purpose.
id = grit.extern.tclib.GenerateMessageId(
diff --git a/tools/grit/grit/test_suite_all.py b/tools/grit/grit/test_suite_all.py
index a94b160..83add26 100644
--- a/tools/grit/grit/test_suite_all.py
+++ b/tools/grit/grit/test_suite_all.py
@@ -45,7 +45,7 @@ class TestSuiteAll(unittest.TestSuite):
from grit import shortcuts_unittests
from grit.gather import muppet_strings_unittest
from grit.node.custom import filename_unittest
-
+
test_classes = [
base_unittest.NodeUnittest,
io_unittest.FileNodeUnittest,
diff --git a/tools/grit/grit/tool/build.py b/tools/grit/grit/tool/build.py
index a8488e8..a2683fd 100644
--- a/tools/grit/grit/tool/build.py
+++ b/tools/grit/grit/tool/build.py
@@ -47,7 +47,7 @@ Options:
-o OUTPUTDIR Specify what directory output paths are relative to.
Defaults to the current directory.
-
+
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
@@ -90,7 +90,7 @@ are exported to translation interchange files (e.g. XMB files), etc.
# Default file-creation function is built-in file(). Only done to allow
# overriding by unit test.
self.fo_create = file
-
+
# key/value pairs of C-preprocessor like defines that are used for
# conditional output of resources
self.defines = {}
@@ -98,24 +98,24 @@ are exported to translation interchange files (e.g. XMB files), etc.
# self.res is a fully-populated resource tree if Run()
# has been called, otherwise None.
self.res = None
-
+
# Set to a list of filenames for the output nodes that are relative
# to the current working directory. They are in the same order as the
# output nodes in the file.
self.scons_targets = None
-
+
# static method
def ProcessNode(node, output_node, outfile):
'''Processes a node in-order, calling its formatter before and after
recursing to its children.
-
+
Args:
node: grit.node.base.Node subclass
output_node: grit.node.io.File
outfile: open filehandle
'''
base_dir = util.dirname(output_node.GetOutputFilename())
-
+
try:
formatter = node.ItemFormatter(output_node.GetType())
if formatter:
@@ -153,7 +153,7 @@ are exported to translation interchange files (e.g. XMB files), etc.
for output in self.res.GetOutputFiles():
output.output_filename = os.path.abspath(os.path.join(
self.output_directory, output.GetFilename()))
-
+
for output in self.res.GetOutputFiles():
self.VerboseOut('Creating %s...' % output.GetFilename())
@@ -173,25 +173,25 @@ are exported to translation interchange files (e.g. XMB files), etc.
if output.GetType() != 'data_package':
outfile = util.WrapOutputStream(outfile, encoding)
-
+
# Set the context, for conditional inclusion of resources
self.res.SetOutputContext(output.GetLanguage(), self.defines)
-
+
# TODO(joi) Handle this more gracefully
import grit.format.rc_header
grit.format.rc_header.Item.ids_ = {}
-
+
# Iterate in-order through entire resource tree, calling formatters on
# the entry into a node and on exit out of it.
self.ProcessNode(self.res, output, outfile)
outfile.close()
self.VerboseOut(' done.\n')
-
+
# Print warnings if there are any duplicate shortcuts.
print '\n'.join(shortcuts.GenerateDuplicateShortcutsWarnings(
self.res.UberClique(), self.res.GetTcProject()))
-
+
# Print out any fallback warnings, and missing translation errors, and
# exit with an error code if there are missing translations in a non-pseudo
# build
diff --git a/tools/grit/grit/tool/count.py b/tools/grit/grit/tool/count.py
index 5beaaa3..52e6bbe 100644
--- a/tools/grit/grit/tool/count.py
+++ b/tools/grit/grit/tool/count.py
@@ -26,19 +26,19 @@ class CountMessage(interface.Tool):
def ShortDescription(self):
return 'Exports all translateable messages into an XMB file.'
-
+
def Run(self, opts, args):
self.SetOptions(opts)
-
+
id = args[0]
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
res_tree.OnlyTheseTranslations([])
res_tree.RunGatherers(True)
-
+
count = 0
for c in res_tree.UberClique().AllCliques():
if c.GetId() == id:
count += 1
-
+
print "There are %d occurrences of message %s." % (count, id)
diff --git a/tools/grit/grit/tool/diff_structures.py b/tools/grit/grit/tool/diff_structures.py
index 55b144a..b4d6b624 100644
--- a/tools/grit/grit/tool/diff_structures.py
+++ b/tools/grit/grit/tool/diff_structures.py
@@ -37,7 +37,7 @@ for gatherer in structure._GATHERERS:
class DiffStructures(interface.Tool):
__doc__ = _class_doc
-
+
def __init__(self):
self.section = None
self.left_encoding = 'cp1252'
@@ -66,24 +66,24 @@ class DiffStructures(interface.Tool):
if len(args) != 2:
print "Incorrect usage - 'grit help sdiff' for usage details."
return 2
-
+
if 'P4DIFF' not in os.environ:
print "Environment variable P4DIFF not set; defaulting to 'windiff'."
diff_program = 'windiff'
else:
diff_program = os.environ['P4DIFF']
-
+
left_trans = self.MakeStaticTranslation(args[0], self.left_encoding)
try:
try:
right_trans = self.MakeStaticTranslation(args[1], self.right_encoding)
-
+
os.system('%s %s %s' % (diff_program, left_trans, right_trans))
finally:
os.unlink(right_trans)
finally:
os.unlink(left_trans)
-
+
def MakeStaticTranslation(self, original_filename, encoding):
"""Given the name of the structure type (self.structure_type), the filename
of the file holding the original structure, and optionally the "section" key
@@ -92,10 +92,10 @@ class DiffStructures(interface.Tool):
(i.e. one where all translateable parts have been replaced with "TTTTTT")
and returns the temporary file name. It is the caller's responsibility to
delete the file when finished.
-
+
Args:
original_filename: 'c:\\bingo\\bla.rc'
-
+
Return:
'c:\\temp\\werlkjsdf334.tmp'
"""
@@ -103,12 +103,12 @@ class DiffStructures(interface.Tool):
original_filename, extkey=self.section, encoding=encoding)
original.Parse()
translated = original.Translate(constants.CONSTANT_LANGUAGE, False)
-
+
fname = tempfile.mktemp()
fhandle = file(fname, 'w')
writer = util.WrapOutputStream(fhandle)
writer.write("Original filename: %s\n=============\n\n" % original_filename)
writer.write(translated) # write in UTF-8
fhandle.close()
-
+
return fname
diff --git a/tools/grit/grit/tool/interface.py b/tools/grit/grit/tool/interface.py
index 0223db0..8470b37 100644
--- a/tools/grit/grit/tool/interface.py
+++ b/tools/grit/grit/tool/interface.py
@@ -20,14 +20,14 @@ class Tool(object):
def ShortDescription(self):
'''Returns a short description of the functionality of the tool.'''
raise NotImplementedError()
-
+
def Run(self, global_options, my_arguments):
'''Runs the tool.
-
+
Args:
global_options: object grit_runner.Options
my_arguments: [arg1 arg2 ...]
-
+
Return:
0 for success, non-0 for error
'''
@@ -36,22 +36,22 @@ class Tool(object):
#
# Base class implementation
#
-
+
def __init__(self):
self.o = None
-
+
def SetOptions(self, opts):
self.o = opts
-
+
def Out(self, text):
'''Always writes out 'text'.'''
self.o.output_stream.write(text)
-
+
def VerboseOut(self, text):
'''Writes out 'text' if the verbose option is on.'''
if self.o.verbose:
self.o.output_stream.write(text)
-
+
def ExtraVerboseOut(self, text):
'''Writes out 'text' if the extra-verbose option is on.
'''
diff --git a/tools/grit/grit/tool/menu_from_parts.py b/tools/grit/grit/tool/menu_from_parts.py
index 370678e..7fa77b6 100644
--- a/tools/grit/grit/tool/menu_from_parts.py
+++ b/tools/grit/grit/tool/menu_from_parts.py
@@ -29,18 +29,18 @@ to being one message for the whole menu.'''
def ShortDescription(self):
return ('Create translations of whole menus from existing translations of '
'menu items.')
-
+
def Run(self, globopt, args):
self.SetOptions(globopt)
assert len(args) == 2, "Need exactly two arguments, the XTB file and the output file"
-
+
xtb_file = args[0]
output_file = args[1]
-
+
grd = grd_reader.Parse(self.o.input, debug=self.o.extra_verbose)
grd.OnlyTheseTranslations([]) # don't load translations
grd.RunGatherers(recursive = True)
-
+
xtb = {}
def Callback(msg_id, parts):
msg = []
@@ -55,14 +55,14 @@ to being one message for the whole menu.'''
f = file(xtb_file)
xtb_reader.Parse(f, Callback)
f.close()
-
+
translations = [] # list of translations as per transl2tc.WriteTranslations
for node in grd:
if node.name == 'structure' and node.attrs['type'] == 'menu':
assert len(node.GetCliques()) == 1
message = node.GetCliques()[0].GetMessage()
translation = []
-
+
contents = message.GetContent()
for part in contents:
if isinstance(part, types.StringTypes):
@@ -74,10 +74,10 @@ to being one message for the whole menu.'''
translation.append(xtb[id])
else:
translation.append(part.GetPresentation())
-
+
if len(translation):
translations.append([message.GetId(), ''.join(translation)])
-
+
f = util.WrapOutputStream(file(output_file, 'w'))
transl2tc.TranslationToTc.WriteTranslations(f, translations)
f.close()
diff --git a/tools/grit/grit/tool/newgrd.py b/tools/grit/grit/tool/newgrd.py
index c5db92b..060f29b 100644
--- a/tools/grit/grit/tool/newgrd.py
+++ b/tools/grit/grit/tool/newgrd.py
@@ -59,7 +59,7 @@ where in the file.'''
def ShortDescription(self):
return 'Create a new empty .grd file.'
-
+
def Run(self, global_options, my_arguments):
if not len(my_arguments) == 1:
print 'This tool requires exactly one argument, the name of the output file.'
diff --git a/tools/grit/grit/tool/postprocess_interface.py b/tools/grit/grit/tool/postprocess_interface.py
index 7f057a6..c0381ee 100644
--- a/tools/grit/grit/tool/postprocess_interface.py
+++ b/tools/grit/grit/tool/postprocess_interface.py
@@ -21,9 +21,9 @@ class PostProcessor(object):
Args:
rctext: string containing the contents of the RC file being processed.
rcpath: the path used to access the file.
- grdtext: the root node of the grd xml data generated by
+ grdtext: the root node of the grd xml data generated by
the rc2grd tool.
-
+
Return:
The root node of the processed GRD tree.
'''
diff --git a/tools/grit/grit/tool/postprocess_unittest.py b/tools/grit/grit/tool/postprocess_unittest.py
index 92bbbed..e408e74 100644
--- a/tools/grit/grit/tool/postprocess_unittest.py
+++ b/tools/grit/grit/tool/postprocess_unittest.py
@@ -21,28 +21,28 @@ from grit.tool import rc2grd
class PostProcessingUnittest(unittest.TestCase):
-
+
def testPostProcessing(self):
rctext = '''STRINGTABLE
BEGIN
DUMMY_STRING_1 "String 1"
- // Some random description
+ // Some random description
DUMMY_STRING_2 "This text was added during preprocessing"
-END
+END
'''
tool = rc2grd.Rc2Grd()
class DummyOpts(object):
verbose = False
extra_verbose = False
- tool.o = DummyOpts()
+ tool.o = DummyOpts()
tool.post_process = 'grit.tool.postprocess_unittest.DummyPostProcessor'
result = tool.Process(rctext, '.\resource.rc')
-
+
self.failUnless(
result.children[2].children[2].children[0].attrs['name'] == 'SMART_STRING_1')
self.failUnless(
result.children[2].children[2].children[1].attrs['name'] == 'SMART_STRING_2')
-
+
class DummyPostProcessor(grit.tool.postprocess_interface.PostProcessor):
'''
Post processing replaces all message name attributes containing "DUMMY" to
@@ -56,7 +56,7 @@ class DummyPostProcessor(grit.tool.postprocess_interface.PostProcessor):
m = smarter.search(name_attr)
if m:
node.attrs['name'] = 'SMART' + m.group(2)
- return grdnode
+ return grdnode
if __name__ == '__main__':
unittest.main()
diff --git a/tools/grit/grit/tool/preprocess_interface.py b/tools/grit/grit/tool/preprocess_interface.py
index 0efc329..6d4166a 100644
--- a/tools/grit/grit/tool/preprocess_interface.py
+++ b/tools/grit/grit/tool/preprocess_interface.py
@@ -19,7 +19,7 @@ class PreProcessor(object):
Args:
rctext: string containing the contents of the RC file being processed
rcpath: the path used to access the file.
-
+
Return:
The processed text.
'''
diff --git a/tools/grit/grit/tool/preprocess_unittest.py b/tools/grit/grit/tool/preprocess_unittest.py
index 6fe55fb..ff9ad39 100644
--- a/tools/grit/grit/tool/preprocess_unittest.py
+++ b/tools/grit/grit/tool/preprocess_unittest.py
@@ -20,27 +20,27 @@ from grit.tool import rc2grd
class PreProcessingUnittest(unittest.TestCase):
-
+
def testPreProcessing(self):
tool = rc2grd.Rc2Grd()
class DummyOpts(object):
verbose = False
extra_verbose = False
- tool.o = DummyOpts()
+ tool.o = DummyOpts()
tool.pre_process = 'grit.tool.preprocess_unittest.DummyPreProcessor'
result = tool.Process('', '.\resource.rc')
-
+
self.failUnless(
result.children[2].children[2].children[0].attrs['name'] == 'DUMMY_STRING_1')
-
+
class DummyPreProcessor(grit.tool.preprocess_interface.PreProcessor):
def Process(self, rctext, rcpath):
rctext = '''STRINGTABLE
BEGIN
DUMMY_STRING_1 "String 1"
- // Some random description
+ // Some random description
DUMMY_STRING_2 "This text was added during preprocessing"
-END
+END
'''
return rctext
diff --git a/tools/grit/grit/tool/rc2grd.py b/tools/grit/grit/tool/rc2grd.py
index ac7b90b..638ca2f 100644
--- a/tools/grit/grit/tool/rc2grd.py
+++ b/tools/grit/grit/tool/rc2grd.py
@@ -101,7 +101,7 @@ cleaned up manually.
OPTIONS may be any of the following:
-e ENCODING Specify the ENCODING of the .rc file. Default is 'cp1252'.
-
+
-h TYPE Specify the TYPE attribute for HTML structures.
Default is 'tr_html'.
@@ -139,10 +139,10 @@ is #if 0-ed out it will still be included in the output of this tool
Therefore, if your .rc file contains sections like this, you should run the
C preprocessor on the .rc file or manually edit it before using this tool.
'''
-
+
def ShortDescription(self):
return 'A tool for converting .rc source files to .grd files.'
-
+
def __init__(self):
self.input_encoding = 'cp1252'
self.html_type = 'tr_html'
@@ -151,7 +151,7 @@ C preprocessor on the .rc file or manually edit it before using this tool.
self.role_model = None
self.pre_process = None
self.post_process = None
-
+
def ParseOptions(self, args):
'''Given a list of arguments, set this object's options and return
all non-option arguments.
@@ -181,32 +181,32 @@ C preprocessor on the .rc file or manually edit it before using this tool.
'.rc file to process.')
return 2
self.SetOptions(opts)
-
+
path = args[0]
out_path = os.path.join(util.dirname(path),
os.path.splitext(os.path.basename(path))[0] + '.grd')
-
+
rcfile = util.WrapInputStream(file(path, 'r'), self.input_encoding)
rctext = rcfile.read()
-
+
grd_text = unicode(self.Process(rctext, path))
rcfile.close()
-
+
outfile = util.WrapOutputStream(file(out_path, 'w'), 'utf-8')
outfile.write(grd_text)
outfile.close()
-
+
print 'Wrote output file %s.\nPlease check for TODO items in the file.' % out_path
-
+
def Process(self, rctext, rc_path):
'''Processes 'rctext' and returns a resource tree corresponding to it.
-
+
Args:
rctext: complete text of the rc file
rc_path: 'resource\resource.rc'
-
+
Return:
grit.node.base.Node subclass
'''
@@ -239,15 +239,15 @@ C preprocessor on the .rc file or manually edit it before using this tool.
assert (isinstance(includes, grit.node.empty.IncludesNode) and
isinstance(structures, grit.node.empty.StructuresNode) and
isinstance(messages, grit.node.empty.MessagesNode))
-
+
self.AddIncludes(rctext, includes)
self.AddStructures(rctext, structures, os.path.basename(rc_path))
self.AddMessages(rctext, messages)
-
+
self.VerboseOut('Validating that all IDs are unique...\n')
root.ValidateUniqueIds()
self.ExtraVerboseOut('Done validating that all IDs are unique.\n')
-
+
if self.post_process:
postprocess_class = util.NewClassInstance(self.post_process,
postprocess_interface.PostProcessor)
@@ -271,8 +271,8 @@ C preprocessor on the .rc file or manually edit it before using this tool.
if type != 'HTML':
self.VerboseOut('Processing %s with ID %s (filename: %s)\n' % (type, id, fname))
node.AddChild(include.IncludeNode.Construct(node, id, type, fname))
-
-
+
+
def AddStructures(self, rctext, node, rc_filename):
'''Scans 'rctext' for structured resources (e.g. menus, dialogs, version
information resources and HTML templates) and adds each as a <structure>
@@ -285,7 +285,7 @@ C preprocessor on the .rc file or manually edit it before using this tool.
if type == 'HTML':
node.AddChild(structure.StructureNode.Construct(
node, id, self.html_type, fname, self.html_encoding))
-
+
# Then add all RC includes
def AddStructure(type, id):
self.VerboseOut('Processing %s with ID %s\n' % (type, id))
@@ -298,8 +298,8 @@ C preprocessor on the .rc file or manually edit it before using this tool.
AddStructure('dialog', m.group('id'))
for m in _VERSIONINFO.finditer(rctext):
AddStructure('version', m.group('id'))
-
-
+
+
def AddMessages(self, rctext, node):
'''Scans 'rctext' for all messages in string tables, preprocesses them as
much as possible for placeholders (e.g. messages containing $1, $2 or %s, %d
@@ -314,27 +314,27 @@ C preprocessor on the .rc file or manually edit it before using this tool.
for cm in _COMMENT_TEXT.finditer(comment_block):
comment_text.append(cm.group('text'))
comment_text = ' '.join(comment_text)
-
+
id = mm.group('id')
text = rc.Section.UnEscape(mm.group('text'))
-
+
self.VerboseOut('Processing message %s (text: "%s")\n' % (id, text))
-
+
msg_obj = self.Placeholderize(text)
-
+
# Messages that contain only placeholders do not need translation.
is_translateable = False
for item in msg_obj.GetContent():
if isinstance(item, types.StringTypes):
if not _WHITESPACE_ONLY.match(item):
is_translateable = True
-
+
if self.not_localizable_re.search(comment_text):
is_translateable = False
-
+
message_meaning = ''
internal_comment = ''
-
+
# If we have a "role model" (existing GRD file) and this node exists
# in the role model, use the description, meaning and translateable
# attributes from the role model.
@@ -345,29 +345,29 @@ C preprocessor on the .rc file or manually edit it before using this tool.
message_meaning = role_node.attrs['meaning']
comment_text = role_node.attrs['desc']
internal_comment = role_node.attrs['internal_comment']
-
+
# For nontranslateable messages, we don't want the complexity of
# placeholderizing everything.
if not is_translateable:
msg_obj = tclib.Message(text=text)
-
+
msg_node = message.MessageNode.Construct(node, msg_obj, id,
desc=comment_text,
translateable=is_translateable,
meaning=message_meaning)
msg_node.attrs['internal_comment'] = internal_comment
-
+
node.AddChild(msg_node)
self.ExtraVerboseOut('Done processing message %s\n' % id)
-
-
+
+
def Placeholderize(self, text):
'''Creates a tclib.Message object from 'text', attempting to recognize
a few different formats of text that can be automatically placeholderized
(HTML code, printf-style format strings, and FormatMessage-style format
strings).
'''
-
+
try:
# First try HTML placeholderizing.
# TODO(joi) Allow use of non-TotalRecall flavors of HTML placeholderizing
@@ -375,7 +375,7 @@ C preprocessor on the .rc file or manually edit it before using this tool.
for item in msg.GetContent():
if not isinstance(item, types.StringTypes):
return msg # Contained at least one placeholder, so we're done
-
+
# HTML placeholderization didn't do anything, so try to find printf or
# FormatMessage format specifiers and change them into placeholders.
msg = tclib.Message()
@@ -388,14 +388,14 @@ C preprocessor on the .rc file or manually edit it before using this tool.
todo_counter += 1
elif part != '':
msg.AppendText(part)
-
+
if self.role_model and len(parts) > 1: # there are TODO placeholders
role_model_msg = self.role_model.UberClique().BestCliqueByOriginalText(
msg.GetRealContent(), '')
if role_model_msg:
# replace wholesale to get placeholder names and examples
msg = role_model_msg
-
+
return msg
except:
print 'Exception processing message with text "%s"' % text
diff --git a/tools/grit/grit/tool/rc2grd_unittest.py b/tools/grit/grit/tool/rc2grd_unittest.py
index ab3f884..836d196 100644
--- a/tools/grit/grit/tool/rc2grd_unittest.py
+++ b/tools/grit/grit/tool/rc2grd_unittest.py
@@ -27,7 +27,7 @@ class Rc2GrdUnittest(unittest.TestCase):
msg = tool.Placeholderize(original)
self.failUnless(msg.GetPresentableContent() == "Hello TODO_0001, how are you? I'm TODO_0002 years old!")
self.failUnless(msg.GetRealContent() == original)
-
+
def testHtmlPlaceholderize(self):
tool = rc2grd.Rc2Grd()
original = "Hello <b>[USERNAME]</b>, how are you? I'm [AGE] years old!"
@@ -35,7 +35,7 @@ class Rc2GrdUnittest(unittest.TestCase):
self.failUnless(msg.GetPresentableContent() ==
"Hello BEGIN_BOLDX_USERNAME_XEND_BOLD, how are you? I'm X_AGE_X years old!")
self.failUnless(msg.GetRealContent() == original)
-
+
def testMenuWithoutWhitespaceRegression(self):
# There was a problem in the original regular expression for parsing out
# menu sections, that would parse the following block of text as a single
@@ -72,11 +72,11 @@ END
extra_verbose = False
tool.not_localizable_re = re.compile('')
tool.o = DummyNode()
-
+
rc_text = '''STRINGTABLE\nBEGIN\nID_BINGO "<SPAN id=hp style='BEHAVIOR: url(#default#homepage)'></SPAN><script>if (!hp.isHomePage('[$~HOMEPAGE~$]')) {document.write(""<a href=\\""[$~SETHOMEPAGEURL~$]\\"" >Set As Homepage</a> - "");}</script>"\nEND\n'''
tool.AddMessages(rc_text, tool.o)
self.failUnless(tool.o.node.GetCdata().find('Set As Homepage') != -1)
-
+
# TODO(joi) Improve the HTML parser to support translateables inside
# <script> blocks?
self.failUnless(tool.o.node.attrs['translateable'] == 'false')
@@ -111,7 +111,7 @@ The installation will not proceed if you choose to cancel.
</messages>
</release>
</grit>'''), dir='.')
-
+
# test rig
class DummyOpts(object):
verbose = False
diff --git a/tools/grit/grit/tool/resize.py b/tools/grit/grit/tool/resize.py
index 23451fd..93e3c1c 100644
--- a/tools/grit/grit/tool/resize.py
+++ b/tools/grit/grit/tool/resize.py
@@ -170,7 +170,7 @@ near the top of the file, before you open it in Visual Studio.
# TODO(joi) It would be cool to have this tool note the Perforce revision
# of the original RC file somewhere, such that the <skeleton> node could warn
# if the original RC file gets updated without the skeleton file being updated.
-
+
# TODO(joi) Would be cool to have option to add the files to Perforce
def __init__(self):
@@ -179,7 +179,7 @@ near the top of the file, before you open it in Visual Studio.
self.base_folder = '.'
self.codepage_number = 1252
self.codepage_number_specified_explicitly = False
-
+
def SetLanguage(self, lang):
'''Sets the language code to output things in.
'''
@@ -196,10 +196,10 @@ near the top of the file, before you open it in Visual Studio.
def ShortDescription(self):
return 'Generate a file where you can resize a given dialog.'
-
+
def Run(self, opts, args):
self.SetOptions(opts)
-
+
own_opts, args = getopt.getopt(args, 'l:f:c:D:')
for key, val in own_opts:
if key == '-l':
@@ -212,11 +212,11 @@ near the top of the file, before you open it in Visual Studio.
if key == '-D':
name, val = build.ParseDefine(val)
self.defines[name] = val
-
+
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
res_tree.OnlyTheseTranslations([self.lang])
res_tree.RunGatherers(True)
-
+
# Dialog IDs are either explicitly listed, or we output all dialogs from the
# .grd file
dialog_ids = args
@@ -224,31 +224,31 @@ near the top of the file, before you open it in Visual Studio.
for node in res_tree:
if node.name == 'structure' and node.attrs['type'] == 'dialog':
dialog_ids.append(node.attrs['name'])
-
+
self.Process(res_tree, dialog_ids)
-
+
def Process(self, grd, dialog_ids):
'''Outputs an RC file and header file for the dialog 'dialog_id' stored in
resource tree 'grd', to self.base_folder, as discussed in this class's
documentation.
-
+
Arguments:
grd: grd = grd_reader.Parse(...); grd.RunGatherers()
dialog_ids: ['IDD_MYDIALOG', 'IDD_OTHERDIALOG']
'''
grd.SetOutputContext(self.lang, self.defines)
-
+
project_name = dialog_ids[0]
-
+
dir_path = os.path.join(self.base_folder, project_name)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
-
+
# If this fails then we're not on Windows (or you don't have the required
# win32all Python libraries installed), so what are you doing mucking
# about with RC files anyway? :)
import pythoncom
-
+
# Create the .vcproj file
project_text = PROJECT_TEMPLATE.replace(
'[[PROJECT_GUID]]', str(pythoncom.CreateGuid())
@@ -256,7 +256,7 @@ near the top of the file, before you open it in Visual Studio.
fname = os.path.join(dir_path, '%s.vcproj' % project_name)
self.WriteFile(fname, project_text)
print "Wrote %s" % fname
-
+
# Create the .rc file
# Output all <include> nodes since the dialogs might depend on them (e.g.
# for icons and bitmaps).
@@ -269,7 +269,7 @@ near the top of the file, before you open it in Visual Studio.
rc_text = RC_TEMPLATE.replace('[[CODEPAGE_NUM]]',
str(self.codepage_number))
rc_text = rc_text.replace('[[INCLUDES]]', ''.join(include_items))
-
+
# Then output the dialogs we have been asked to output.
dialogs = []
for dialog_id in dialog_ids:
@@ -278,11 +278,11 @@ near the top of the file, before you open it in Visual Studio.
formatter = node.ItemFormatter('rc_all')
dialogs.append(formatter.Format(node, self.lang))
rc_text = rc_text.replace('[[DIALOGS]]', ''.join(dialogs))
-
+
fname = os.path.join(dir_path, '%s.rc' % project_name)
self.WriteFile(fname, rc_text, self.GetEncoding())
print "Wrote %s" % fname
-
+
# Create the resource.h file
header_defines = []
for node in grd:
@@ -293,7 +293,7 @@ near the top of the file, before you open it in Visual Studio.
fname = os.path.join(dir_path, 'resource.h')
self.WriteFile(fname, header_text)
print "Wrote %s" % fname
-
+
def WriteFile(self, filename, contents, encoding='cp1252'):
f = util.WrapOutputStream(file(filename, 'wb'), encoding)
f.write(contents)
diff --git a/tools/grit/grit/tool/test.py b/tools/grit/grit/tool/test.py
index 92e387c..aa02c4a 100644
--- a/tools/grit/grit/tool/test.py
+++ b/tools/grit/grit/tool/test.py
@@ -10,10 +10,10 @@ class TestTool(interface.Tool):
tool-specific arguments that it receives. It is intended only for testing,
hence the name :)
'''
-
+
def ShortDescription(self):
return 'A do-nothing tool for testing command-line parsing.'
-
+
def Run(self, global_options, my_arguments):
print 'NOTE This tool is only for testing the parsing of global options and'
print 'tool-specific arguments that it receives. You may have intended to'
diff --git a/tools/grit/grit/tool/toolbar_postprocess.py b/tools/grit/grit/tool/toolbar_postprocess.py
index 5fb2fae..3408985 100644
--- a/tools/grit/grit/tool/toolbar_postprocess.py
+++ b/tools/grit/grit/tool/toolbar_postprocess.py
@@ -28,16 +28,16 @@ class ToolbarPostProcessor(postprocess_interface.PostProcessor):
Args:
rctext: string containing the contents of the RC file being processed.
rcpath: the path used to access the file.
- grdnode: the root node of the grd xml data generated by
+ grdnode: the root node of the grd xml data generated by
the rc2grd tool.
-
+
Return:
The root node of the processed GRD tree.
'''
release = grdnode.children[2]
messages = release.children[2]
-
+
identifiers = grit.node.empty.IdentifiersNode()
identifiers.StartParsing('identifiers', release)
identifiers.EndParsing()
@@ -97,8 +97,8 @@ class ToolbarPostProcessor(postprocess_interface.PostProcessor):
previous_name_attr = name_attr
previous_node = node
-
- self.AddIdentifiers(rctext, identifiers)
+
+ self.AddIdentifiers(rctext, identifiers)
return grdnode
def ConstructNewMessages(self, parent):
diff --git a/tools/grit/grit/tool/toolbar_preprocess.py b/tools/grit/grit/tool/toolbar_preprocess.py
index e453059..ee566f6 100644
--- a/tools/grit/grit/tool/toolbar_preprocess.py
+++ b/tools/grit/grit/tool/toolbar_preprocess.py
@@ -15,7 +15,7 @@ import sys
import codecs
class ToolbarPreProcessor(preprocess_interface.PreProcessor):
- ''' Toolbar PreProcessing class.
+ ''' Toolbar PreProcessing class.
'''
_IDS_COMMAND_MACRO = re.compile(r'(.*IDS_COMMAND)\s*\(([a-zA-Z0-9_]*)\s*,\s*([a-zA-Z0-9_]*)\)(.*)')
@@ -29,7 +29,7 @@ class ToolbarPreProcessor(preprocess_interface.PreProcessor):
Args:
rctext: string containing the contents of the RC file being processed
rcpath: the path used to access the file.
-
+
Return:
The processed text.
'''
@@ -45,8 +45,8 @@ class ToolbarPreProcessor(preprocess_interface.PreProcessor):
mm = self._COMMENT.search(line)
if mm:
line = '%s//' % mm.group(1)
-
- else:
+
+ else:
# Replace $lf by the right linefeed character
line = self._LINE_FEED_PH.sub(r'\\n', line)
@@ -54,8 +54,8 @@ class ToolbarPreProcessor(preprocess_interface.PreProcessor):
mo = self._IDS_COMMAND_MACRO.search(line)
if mo:
line = '%s_%s_%s%s' % (mo.group(1), mo.group(2), mo.group(3), mo.group(4))
-
- ret += (line + '\n')
+
+ ret += (line + '\n')
return ret
diff --git a/tools/grit/grit/tool/transl2tc.py b/tools/grit/grit/tool/transl2tc.py
index 833f415..d0602af 100644
--- a/tools/grit/grit/tool/transl2tc.py
+++ b/tools/grit/grit/tool/transl2tc.py
@@ -48,10 +48,10 @@ refer to, and match together the source messages and translated messages. It
will output a file (OUTPUT_FILE) you can import directly into the TC using the
Bulk Translation Upload tool.
'''
-
+
def ShortDescription(self):
return 'Import existing translations in RC format into the TC'
-
+
def Setup(self, globopt, args):
'''Sets the instance up for use.
'''
@@ -65,20 +65,20 @@ Bulk Translation Upload tool.
limit_file.close()
args = args[2:]
return self.rc2grd.ParseOptions(args)
-
+
def Run(self, globopt, args):
args = self.Setup(globopt, args)
-
+
if len(args) != 3:
self.Out('This tool takes exactly three arguments:\n'
' 1. The path to the original RC file\n'
' 2. The path to the translated RC file\n'
' 3. The output file path.\n')
return 2
-
+
grd = grd_reader.Parse(self.o.input, debug=self.o.extra_verbose)
grd.RunGatherers(recursive = True)
-
+
source_rc = util.WrapInputStream(file(args[0], 'r'), self.rc2grd.input_encoding)
transl_rc = util.WrapInputStream(file(args[1], 'r'), self.rc2grd.input_encoding)
translations = self.ExtractTranslations(grd,
@@ -90,26 +90,26 @@ Bulk Translation Upload tool.
output_file = util.WrapOutputStream(file(args[2], 'w'))
self.WriteTranslations(output_file, translations.items())
output_file.close()
-
+
self.Out('Wrote output file %s' % args[2])
-
+
def ExtractTranslations(self, current_grd, source_rc, source_path, transl_rc, transl_path):
'''Extracts translations from the translated RC file, matching them with
translations in the source RC file to calculate their ID, and correcting
placeholders, limiting output to translateables, etc. using the supplied
.grd file which is the current .grd file for your project.
-
+
If this object's 'limits' attribute is not None but a list, the output of
this function will be further limited to include only messages that have
message IDs in the 'limits' list.
-
+
Args:
current_grd: grit.node.base.Node child, that has had RunGatherers(True) run on it
source_rc: Complete text of source RC file
source_path: Path to the source RC file
transl_rc: Complete text of translated RC file
transl_path: Path to the translated RC file
-
+
Return:
{ id1 : text1, '12345678' : 'Hello USERNAME, howzit?' }
'''
@@ -120,7 +120,7 @@ Bulk Translation Upload tool.
self.VerboseOut('Read %s into GRIT format, running gatherers.\n' % transl_path)
transl_grd.RunGatherers(recursive=True, debug=self.o.extra_verbose)
self.VerboseOut('Done running gatherers for %s.\n' % transl_path)
-
+
# Proceed to create a map from ID to translation, getting the ID from the
# source GRD and the translation from the translated GRD.
id2transl = {}
@@ -128,12 +128,12 @@ Bulk Translation Upload tool.
source_cliques = source_node.GetCliques()
if not len(source_cliques):
continue
-
+
assert 'name' in source_node.attrs, 'All nodes with cliques should have an ID'
node_id = source_node.attrs['name']
self.ExtraVerboseOut('Processing node %s\n' % node_id)
transl_node = transl_grd.GetNodeById(node_id)
-
+
if transl_node:
transl_cliques = transl_node.GetCliques()
if not len(transl_cliques) == len(source_cliques):
@@ -144,7 +144,7 @@ Bulk Translation Upload tool.
else:
self.Out('Warning: No translation for %s, skipping.\n' % node_id)
continue
-
+
if source_node.name == 'message':
# Fixup placeholders as well as possible based on information from
# the current .grd file if they are 'TODO_XXXX' placeholders. We need
@@ -154,10 +154,10 @@ Bulk Translation Upload tool.
current_node = current_grd.GetNodeById(node_id)
if current_node:
assert len(source_cliques) == 1 and len(current_node.GetCliques()) == 1
-
+
source_msg = source_cliques[0].GetMessage()
current_msg = current_node.GetCliques()[0].GetMessage()
-
+
# Only do this for messages whose source version has not changed.
if (source_msg.GetRealContent() != current_msg.GetRealContent()):
self.VerboseOut('Info: Message %s has changed; skipping\n' % node_id)
@@ -166,7 +166,7 @@ Bulk Translation Upload tool.
transl_content = transl_msg.GetContent()
current_content = current_msg.GetContent()
source_content = source_msg.GetContent()
-
+
ok_to_fixup = True
if (len(transl_content) != len(current_content)):
# message structure of translation is different, don't try fixup
@@ -185,7 +185,7 @@ Bulk Translation Upload tool.
if isinstance(current_content[ix], tclib.Placeholder):
ok_to_fixup = False # placeholders have likely been reordered
break
-
+
if not ok_to_fixup:
self.VerboseOut(
'Info: Structure of message %s has changed; skipping.\n' % node_id)
@@ -199,7 +199,7 @@ Bulk Translation Upload tool.
for ix in range(len(transl_content)):
Fixup(transl_content, ix)
Fixup(source_content, ix)
-
+
# Only put each translation once into the map. Warn if translations
# for the same message are different.
for ix in range(len(transl_cliques)):
@@ -207,7 +207,7 @@ Bulk Translation Upload tool.
source_msg.GenerateId() # needed to refresh ID based on new placeholders
message_id = source_msg.GetId()
translated_content = transl_cliques[ix].GetMessage().GetPresentableContent()
-
+
if message_id in id2transl:
existing_translation = id2transl[message_id]
if existing_translation != translated_content:
@@ -218,7 +218,7 @@ Bulk Translation Upload tool.
(original_text, existing_translation, translated_content))
else:
id2transl[message_id] = translated_content
-
+
# Remove translations for messages that do not occur in the current .grd
# or have been marked as not translateable, or do not occur in the 'limits'
# list (if it has been set).
@@ -228,19 +228,19 @@ Bulk Translation Upload tool.
not current_grd.UberClique().BestClique(message_id).IsTranslateable() or
(self.limits and message_id not in self.limits)):
del id2transl[message_id]
-
+
return id2transl
-
+
# static method
def WriteTranslations(output_file, translations):
'''Writes the provided list of translations to the provided output file
in the format used by the TC's Bulk Translation Upload tool. The file
must be UTF-8 encoded.
-
+
Args:
output_file: util.WrapOutputStream(file('bingo.out', 'w'))
translations: [ [id1, text1], ['12345678', 'Hello USERNAME, howzit?'] ]
-
+
Return:
None
'''
diff --git a/tools/grit/grit/tool/transl2tc_unittest.py b/tools/grit/grit/tool/transl2tc_unittest.py
index 4205de2..cc65020 100644
--- a/tools/grit/grit/tool/transl2tc_unittest.py
+++ b/tools/grit/grit/tool/transl2tc_unittest.py
@@ -25,7 +25,7 @@ def MakeOptions():
class TranslationToTcUnittest(unittest.TestCase):
-
+
def testOutput(self):
buf = StringIO.StringIO()
tool = transl2tc.TranslationToTc()
@@ -85,12 +85,12 @@ how are you?
</release>
</grit>'''), path)
current_grd.RunGatherers(recursive=True)
-
+
source_rc_path = util.PathFromRoot('grit/test/data/source.rc')
source_rc = file(source_rc_path).read()
transl_rc_path = util.PathFromRoot('grit/test/data/transl.rc')
transl_rc = file(transl_rc_path).read()
-
+
tool = transl2tc.TranslationToTc()
output_buf = StringIO.StringIO()
globopts = MakeOptions()
@@ -100,10 +100,10 @@ how are you?
translations = tool.ExtractTranslations(current_grd,
source_rc, source_rc_path,
transl_rc, transl_rc_path)
-
+
values = translations.values()
output = output_buf.getvalue()
-
+
self.failUnless('Ein' in values)
self.failUnless('NUMBIRDS Vogeln' in values)
self.failUnless('ITEM von COUNT' in values)
@@ -114,11 +114,11 @@ how are you?
self.failIf('Nicht verwendet' in values)
self.failUnless(('Howdie' in values or 'Hallo sagt man' in values) and not
('Howdie' in values and 'Hallo sagt man' in values))
-
+
self.failUnless('XX01XX&SkraXX02XX&HaettaXX03XXThetta er "Klonk" sem eg fylaXX04XXgonkurinnXX05XXKlonk && er "gott"XX06XX&HjalpXX07XX&Um...XX08XX' in values)
-
+
self.failUnless('I lagi' in values)
-
+
self.failUnless(output.count('Structure of message IDS_REORDERED_PLACEHOLDERS has changed'))
self.failUnless(output.count('Message IDS_CHANGED has changed'))
self.failUnless(output.count('Structure of message IDS_LONGER_TRANSLATED has changed'))
diff --git a/tools/grit/grit/tool/unit.py b/tools/grit/grit/tool/unit.py
index ba424dd..7eb94ba 100644
--- a/tools/grit/grit/tool/unit.py
+++ b/tools/grit/grit/tool/unit.py
@@ -16,10 +16,10 @@ class UnitTestTool(interface.Tool):
'''By using this tool (e.g. 'grit unit') you run all the unit tests for GRIT.
This happens in the environment that is set up by the basic GRIT runner, i.e.
whether to run disconnected has been specified, etc.'''
-
+
def ShortDescription(self):
return 'Use this tool to run all the unit tests for GRIT.'
-
+
def Run(self, opts, args):
return unittest.TextTestRunner(verbosity=2).run(
grit.test_suite_all.TestSuiteAll())
diff --git a/tools/grit/grit/util.py b/tools/grit/grit/util.py
index b837d52..a0aac74 100644
--- a/tools/grit/grit/util.py
+++ b/tools/grit/grit/util.py
@@ -24,9 +24,9 @@ _root_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
SYSTEM_IDENTIFIERS = re.compile(
r'''\bIDOK\b | \bIDCANCEL\b | \bIDC_STATIC\b | \bIDYES\b | \bIDNO\b |
\bID_FILE_NEW\b | \bID_FILE_OPEN\b | \bID_FILE_CLOSE\b | \bID_FILE_SAVE\b |
- \bID_FILE_SAVE_AS\b | \bID_FILE_PAGE_SETUP\b | \bID_FILE_PRINT_SETUP\b |
- \bID_FILE_PRINT\b | \bID_FILE_PRINT_DIRECT\b | \bID_FILE_PRINT_PREVIEW\b |
- \bID_FILE_UPDATE\b | \bID_FILE_SAVE_COPY_AS\b | \bID_FILE_SEND_MAIL\b |
+ \bID_FILE_SAVE_AS\b | \bID_FILE_PAGE_SETUP\b | \bID_FILE_PRINT_SETUP\b |
+ \bID_FILE_PRINT\b | \bID_FILE_PRINT_DIRECT\b | \bID_FILE_PRINT_PREVIEW\b |
+ \bID_FILE_UPDATE\b | \bID_FILE_SAVE_COPY_AS\b | \bID_FILE_SEND_MAIL\b |
\bID_FILE_MRU_FIRST\b | \bID_FILE_MRU_LAST\b |
\bID_EDIT_CLEAR\b | \bID_EDIT_CLEAR_ALL\b | \bID_EDIT_COPY\b |
\bID_EDIT_CUT\b | \bID_EDIT_FIND\b | \bID_EDIT_PASTE\b | \bID_EDIT_PASTE_LINK\b |
@@ -87,7 +87,7 @@ def UnescapeHtml(text, replace_nbsp=True):
'''Returns 'text' with all HTML character entities (both named character
entities and those specified by decimal or hexadecimal Unicode ordinal)
replaced by their Unicode characters (or latin1 characters if possible).
-
+
The only exception is that &nbsp; will not be escaped if 'replace_nbsp' is
False.
'''
@@ -106,7 +106,7 @@ def UnescapeHtml(text, replace_nbsp=True):
return unichr(htmlentitydefs.name2codepoint[name])
else:
return match.group() # Unknown HTML character entity - don't replace
-
+
out = _HTML_ENTITY.sub(Replace, text)
return out
@@ -115,10 +115,10 @@ def EncodeCdata(cdata):
'''Returns the provided cdata in either escaped format or <![CDATA[xxx]]>
format, depending on which is more appropriate for easy editing. The data
is escaped for inclusion in an XML element's body.
-
+
Args:
cdata: 'If x < y and y < z then x < z'
-
+
Return:
'<![CDATA[If x < y and y < z then x < z]]>'
'''
@@ -132,12 +132,12 @@ def FixupNamedParam(function, param_name, param_value):
'''Returns a closure that is identical to 'function' but ensures that the
named parameter 'param_name' is always set to 'param_value' unless explicitly
set by the caller.
-
+
Args:
function: callable
param_name: 'bingo'
param_value: 'bongo' (any type)
-
+
Return:
callable
'''
@@ -152,10 +152,10 @@ def PathFromRoot(path):
'''Takes a path relative to the root directory for GRIT (the one that grit.py
resides in) and returns a path that is either absolute or relative to the
current working directory (i.e .a path you can use to open the file).
-
+
Args:
path: 'rel_dir\file.ext'
-
+
Return:
'c:\src\tools\rel_dir\file.ext
'''
@@ -200,10 +200,10 @@ _LANGUAGE_SPLIT_RE = re.compile('-|_|/')
def CanonicalLanguage(code):
'''Canonicalizes two-part language codes by using a dash and making the
second part upper case. Returns one-part language codes unchanged.
-
+
Args:
code: 'zh_cn'
-
+
Return:
code: 'zh-CN'
'''
@@ -237,12 +237,12 @@ _LANG_TO_CODEPAGE = {
def LanguageToCodepage(lang):
'''Returns the codepage _number_ that can be used to represent 'lang', which
may be either in formats such as 'en', 'pt_br', 'pt-BR', etc.
-
+
The codepage returned will be one of the 'cpXXXX' codepage numbers.
-
+
Args:
lang: 'de'
-
+
Return:
1252
'''
@@ -266,7 +266,7 @@ def NewClassInstance(class_name, class_type):
'''
lastdot = class_name.rfind('.')
module_name = ''
- if lastdot >= 0:
+ if lastdot >= 0:
module_name = class_name[0:lastdot]
if module_name:
class_name = class_name[lastdot+1:]
@@ -276,7 +276,7 @@ def NewClassInstance(class_name, class_type):
class_instance = class_()
if isinstance(class_instance, class_type):
return class_instance
- return None
+ return None
def FixLineEnd(text, line_end):
@@ -293,7 +293,7 @@ def BoolToString(bool):
return 'true'
else:
return 'false'
-
+
verbose = False
extra_verbose = False
diff --git a/tools/grit/grit/util_unittest.py b/tools/grit/grit/util_unittest.py
index 54104d0..cd8c97b 100644
--- a/tools/grit/grit/util_unittest.py
+++ b/tools/grit/grit/util_unittest.py
@@ -19,17 +19,17 @@ from grit import util
class UtilUnittest(unittest.TestCase):
''' Tests functions from util
'''
-
+
def testNewClassInstance(self):
# Test short class name with no fully qualified package name
# Should fail, it is not supported by the function now (as documented)
cls = util.NewClassInstance('grit.util.TestClassToLoad',
TestBaseClassToLoad)
self.failUnless(cls == None)
-
+
# Test non existent class name
cls = util.NewClassInstance('grit.util_unittest.NotExistingClass',
- TestBaseClassToLoad)
+ TestBaseClassToLoad)
self.failUnless(cls == None)
# Test valid class name and valid base class
@@ -41,7 +41,7 @@ class UtilUnittest(unittest.TestCase):
cls = util.NewClassInstance('grit.util_unittest.TestClassNoBase',
TestBaseClassToLoad)
self.failUnless(cls == None)
-
+
def testCanonicalLanguage(self):
self.failUnless(util.CanonicalLanguage('en') == 'en')
self.failUnless(util.CanonicalLanguage('pt_br') == 'pt-BR')
diff --git a/tools/grit/grit/xtb_reader.py b/tools/grit/grit/xtb_reader.py
index 3d1a42a..f785879 100644
--- a/tools/grit/grit/xtb_reader.py
+++ b/tools/grit/grit/xtb_reader.py
@@ -15,7 +15,7 @@ class XtbContentHandler(xml.sax.handler.ContentHandler):
'''A content handler that calls a given callback function for each
translation in the XTB file.
'''
-
+
def __init__(self, callback, debug=False):
self.callback = callback
self.debug = debug
@@ -28,7 +28,7 @@ class XtbContentHandler(xml.sax.handler.ContentHandler):
self.current_structure = []
# Set to the language ID when we see the <translationbundle> node.
self.language = ''
-
+
def startElement(self, name, attrs):
if name == 'translation':
assert self.current_id == 0 and len(self.current_structure) == 0, (
@@ -61,10 +61,10 @@ class XtbContentHandler(xml.sax.handler.ContentHandler):
class XtbErrorHandler(xml.sax.handler.ErrorHandler):
def error(self, exception):
pass
-
+
def fatalError(self, exception):
raise exception
-
+
def warning(self, exception):
pass
@@ -72,16 +72,16 @@ class XtbErrorHandler(xml.sax.handler.ErrorHandler):
def Parse(xtb_file, callback_function, debug=False):
'''Parse xtb_file, making a call to callback_function for every translation
in the XTB file.
-
+
The callback function must have the signature as described below. The 'parts'
parameter is a list of tuples (is_placeholder, text). The 'text' part is
either the raw text (if is_placeholder is False) or the name of the placeholder
(if is_placeholder is True).
-
+
Args:
xtb_file: file('fr.xtb')
callback_function: def Callback(msg_id, parts): pass
-
+
Return:
The language of the XTB, e.g. 'fr'
'''
@@ -91,7 +91,7 @@ def Parse(xtb_file, callback_function, debug=False):
# XTB files somehow?
front_of_file = xtb_file.read(1024)
xtb_file.seek(front_of_file.find('<translationbundle'))
-
+
handler = XtbContentHandler(callback=callback_function, debug=debug)
xml.sax.parse(xtb_file, handler)
assert handler.language != ''
diff --git a/tools/grit/grit/xtb_reader_unittest.py b/tools/grit/grit/xtb_reader_unittest.py
index 16e701e..f978876 100644
--- a/tools/grit/grit/xtb_reader_unittest.py
+++ b/tools/grit/grit/xtb_reader_unittest.py
@@ -36,7 +36,7 @@ and another
and another after a blank line.</translation>
</translationbundle>''')
-
+
messages = []
def Callback(id, structure):
messages.append((id, structure))
@@ -52,19 +52,19 @@ and another after a blank line.</translation>
<message name="ID_MEGA">Fantastic!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>'''), dir='.', flexible_root=True)
-
+
clique_mega = grd.children[0].GetCliques()[0]
msg_mega = clique_mega.GetMessage()
clique_hello_user = grd.children[1].GetCliques()[0]
msg_hello_user = clique_hello_user.GetMessage()
-
+
xtb_file = StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE translationbundle>
<translationbundle lang="is">
<translation id="%s">Meirihattar!</translation>
<translation id="%s">Saelir <ph name="USERNAME"/></translation>
</translationbundle>''' % (msg_mega.GetId(), msg_hello_user.GetId()))
-
+
xtb_reader.Parse(xtb_file, grd.UberClique().GenerateXtbParserCallback('is'))
self.failUnless(clique_mega.MessageForLanguage('is').GetRealContent() ==
'Meirihattar!')
diff --git a/tools/measure_page_load_time/ie_bho/MeasurePageLoadTime.cpp b/tools/measure_page_load_time/ie_bho/MeasurePageLoadTime.cpp
index 55f4de2..bb2fa51 100644
--- a/tools/measure_page_load_time/ie_bho/MeasurePageLoadTime.cpp
+++ b/tools/measure_page_load_time/ie_bho/MeasurePageLoadTime.cpp
@@ -55,7 +55,7 @@ extern "C" BOOL WINAPI DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpRes
{
DisableThreadLibraryCalls(hInstance);
}
- return _AtlModule.DllMain(dwReason, lpReserved);
+ return _AtlModule.DllMain(dwReason, lpReserved);
}
#ifdef _MANAGED
diff --git a/tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.cpp b/tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.cpp
index 7e9d5c0..a88f1bf 100644
--- a/tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.cpp
+++ b/tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.cpp
@@ -38,7 +38,7 @@
#define MAX_URL 1024 // size of URL buffer
#define MAX_PAGELOADTIME (4*60*1000) // assume all pages take < 4 minutes
-#define PORT 42492 // port to listen on. Also jhaas's
+#define PORT 42492 // port to listen on. Also jhaas's
// old MSFT employee number
@@ -99,7 +99,7 @@ STDMETHODIMP CMeasurePageLoadTimeBHO::SetSite(IUnknown* pUnkSite)
void STDMETHODCALLTYPE CMeasurePageLoadTimeBHO::OnDocumentComplete(IDispatch *pDisp, VARIANT *pvarURL)
{
- if (pDisp == m_spWebBrowser)
+ if (pDisp == m_spWebBrowser)
{
// Fire the event when the page is done loading
// to unblock the other thread.
@@ -108,7 +108,7 @@ void STDMETHODCALLTYPE CMeasurePageLoadTimeBHO::OnDocumentComplete(IDispatch *pD
}
-void CMeasurePageLoadTimeBHO::ProcessPageTimeRequests()
+void CMeasurePageLoadTimeBHO::ProcessPageTimeRequests()
{
CoInitialize(NULL);
@@ -205,7 +205,7 @@ void CMeasurePageLoadTimeBHO::ProcessPageTimeRequests()
fReceivedCR = true;
}
- // The below call will not block, since we determined with
+ // The below call will not block, since we determined with
// MSG_PEEK that at least cbRead bytes are in the TCP receive buffer
recv(m_sockTransport, pbBuffer, cbRead, 0);
pbBuffer[cbRead] = '\0';
@@ -250,8 +250,8 @@ void CMeasurePageLoadTimeBHO::ProcessPageTimeRequests()
);
// The main browser thread will call OnDocumentComplete() when
- // the page is done loading, which will in turn trigger
- // m_hEvent. Wait here until then; the event will reset itself
+ // the page is done loading, which will in turn trigger
+ // m_hEvent. Wait here until then; the event will reset itself
// once this thread is released
if (WaitForSingleObject(m_hEvent, MAX_PAGELOADTIME) == WAIT_TIMEOUT)
{
@@ -297,7 +297,7 @@ void CMeasurePageLoadTimeBHO::ProcessPageTimeRequests()
void CMeasurePageLoadTimeBHO::ErrorExit()
{
- // Unlink from IE, close the sockets, then terminate this
+ // Unlink from IE, close the sockets, then terminate this
// thread
SetSite(NULL);
@@ -314,4 +314,4 @@ void CMeasurePageLoadTimeBHO::ErrorExit()
}
TerminateThread(GetCurrentThread(), -1);
-} \ No newline at end of file
+}
diff --git a/tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.h b/tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.h
index b6e518a..8fe07ac 100644
--- a/tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.h
+++ b/tools/measure_page_load_time/ie_bho/MeasurePageLoadTimeBHO.h
@@ -50,7 +50,7 @@ BEGIN_SINK_MAP(CMeasurePageLoadTimeBHO)
END_SINK_MAP()
// DWebBrowserEvents2
- void STDMETHODCALLTYPE OnDocumentComplete(IDispatch *pDisp, VARIANT *pvarURL);
+ void STDMETHODCALLTYPE OnDocumentComplete(IDispatch *pDisp, VARIANT *pvarURL);
STDMETHOD(SetSite)(IUnknown *pUnkSite);
DECLARE_PROTECT_FINAL_CONSTRUCT()
@@ -70,7 +70,7 @@ END_SINK_MAP()
private:
CComPtr<IWebBrowser2> m_spWebBrowser;
- BOOL m_fAdvised;
+ BOOL m_fAdvised;
// Handle to global interface table
DWORD m_dwCookie;
diff --git a/tools/measure_page_load_time/ie_bho/resource.h b/tools/measure_page_load_time/ie_bho/resource.h
index a8004dc..38dc825 100644
--- a/tools/measure_page_load_time/ie_bho/resource.h
+++ b/tools/measure_page_load_time/ie_bho/resource.h
@@ -7,7 +7,7 @@
#define IDR_MEASUREPAGELOADTIMEBHO 102
// Next default values for new objects
-//
+//
#ifdef APSTUDIO_INVOKED
#ifndef APSTUDIO_READONLY_SYMBOLS
#define _APS_NEXT_RESOURCE_VALUE 201
diff --git a/tools/measure_page_load_time/ie_bho/stdafx.h b/tools/measure_page_load_time/ie_bho/stdafx.h
index 3f26087..a2727ce 100644
--- a/tools/measure_page_load_time/ie_bho/stdafx.h
+++ b/tools/measure_page_load_time/ie_bho/stdafx.h
@@ -14,9 +14,9 @@
#define WINVER 0x0501 // Change this to the appropriate value to target other versions of Windows.
#endif
-#ifndef _WIN32_WINNT // Allow use of features specific to Windows XP or later.
+#ifndef _WIN32_WINNT // Allow use of features specific to Windows XP or later.
#define _WIN32_WINNT 0x0501 // Change this to the appropriate value to target other versions of Windows.
-#endif
+#endif
#ifndef _WIN32_WINDOWS // Allow use of features specific to Windows 98 or later.
#define _WIN32_WINDOWS 0x0410 // Change this to the appropriate value to target Windows Me or later.
@@ -36,4 +36,4 @@
#include <atlbase.h>
#include <atlcom.h>
-using namespace ATL; \ No newline at end of file
+using namespace ATL;
diff --git a/tools/memory_watcher/ia32_modrm_map.cc b/tools/memory_watcher/ia32_modrm_map.cc
index eaae07c..c2f9625 100644
--- a/tools/memory_watcher/ia32_modrm_map.cc
+++ b/tools/memory_watcher/ia32_modrm_map.cc
@@ -1,10 +1,10 @@
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -48,7 +48,7 @@ const ModrmEntry MiniDisassembler::s_ia16_modrm_map_[] = {
/* r/m == 100 */ { false, false, OS_ZERO },
/* r/m == 101 */ { false, false, OS_ZERO },
/* r/m == 110 */ { true, false, OS_WORD },
- /* r/m == 111 */ { false, false, OS_ZERO },
+ /* r/m == 111 */ { false, false, OS_ZERO },
// mod == 01
/* r/m == 000 */ { true, false, OS_BYTE },
/* r/m == 001 */ { true, false, OS_BYTE },
@@ -57,7 +57,7 @@ const ModrmEntry MiniDisassembler::s_ia16_modrm_map_[] = {
/* r/m == 100 */ { true, false, OS_BYTE },
/* r/m == 101 */ { true, false, OS_BYTE },
/* r/m == 110 */ { true, false, OS_BYTE },
- /* r/m == 111 */ { true, false, OS_BYTE },
+ /* r/m == 111 */ { true, false, OS_BYTE },
// mod == 10
/* r/m == 000 */ { true, false, OS_WORD },
/* r/m == 001 */ { true, false, OS_WORD },
@@ -66,7 +66,7 @@ const ModrmEntry MiniDisassembler::s_ia16_modrm_map_[] = {
/* r/m == 100 */ { true, false, OS_WORD },
/* r/m == 101 */ { true, false, OS_WORD },
/* r/m == 110 */ { true, false, OS_WORD },
- /* r/m == 111 */ { true, false, OS_WORD },
+ /* r/m == 111 */ { true, false, OS_WORD },
// mod == 11
/* r/m == 000 */ { false, false, OS_ZERO },
/* r/m == 001 */ { false, false, OS_ZERO },
@@ -87,7 +87,7 @@ const ModrmEntry MiniDisassembler::s_ia32_modrm_map_[] = {
/* r/m == 100 */ { false, true, OS_ZERO },
/* r/m == 101 */ { true, false, OS_DOUBLE_WORD },
/* r/m == 110 */ { false, false, OS_ZERO },
- /* r/m == 111 */ { false, false, OS_ZERO },
+ /* r/m == 111 */ { false, false, OS_ZERO },
// mod == 01
/* r/m == 000 */ { true, false, OS_BYTE },
/* r/m == 001 */ { true, false, OS_BYTE },
@@ -96,7 +96,7 @@ const ModrmEntry MiniDisassembler::s_ia32_modrm_map_[] = {
/* r/m == 100 */ { true, true, OS_BYTE },
/* r/m == 101 */ { true, false, OS_BYTE },
/* r/m == 110 */ { true, false, OS_BYTE },
- /* r/m == 111 */ { true, false, OS_BYTE },
+ /* r/m == 111 */ { true, false, OS_BYTE },
// mod == 10
/* r/m == 000 */ { true, false, OS_DOUBLE_WORD },
/* r/m == 001 */ { true, false, OS_DOUBLE_WORD },
@@ -105,7 +105,7 @@ const ModrmEntry MiniDisassembler::s_ia32_modrm_map_[] = {
/* r/m == 100 */ { true, true, OS_DOUBLE_WORD },
/* r/m == 101 */ { true, false, OS_DOUBLE_WORD },
/* r/m == 110 */ { true, false, OS_DOUBLE_WORD },
- /* r/m == 111 */ { true, false, OS_DOUBLE_WORD },
+ /* r/m == 111 */ { true, false, OS_DOUBLE_WORD },
// mod == 11
/* r/m == 000 */ { false, false, OS_ZERO },
/* r/m == 001 */ { false, false, OS_ZERO },
diff --git a/tools/memory_watcher/ia32_opcode_map.cc b/tools/memory_watcher/ia32_opcode_map.cc
index df57b2a..aa10efa 100644
--- a/tools/memory_watcher/ia32_opcode_map.cc
+++ b/tools/memory_watcher/ia32_opcode_map.cc
@@ -1,10 +1,10 @@
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -262,10 +262,10 @@ const Opcode s_first_opcode_byte[] = {
/* 0xD5 */ { 0, IT_GENERIC, AM_I | OT_B, AM_NOT_USED, AM_NOT_USED, "aad", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0xD6 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0xD7 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "xlat", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
-
+
// The following 8 lines would be references to the FPU tables, but we currently
// do not support the FPU instructions in this disassembler.
-
+
/* 0xD8 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0xD9 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0xDA */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
@@ -274,8 +274,8 @@ const Opcode s_first_opcode_byte[] = {
/* 0xDD */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0xDE */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0xDF */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
-
-
+
+
/* 0xE0 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "loopnz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0xE1 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "loopz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0xE2 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "loop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
@@ -587,7 +587,7 @@ const Opcode s_opcode_byte_after_0f[] = {
/* F3h */ { 0 },
/* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pcmpeqd" } },
/* 0x77 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "emms", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
-
+
// The following six opcodes are escapes into the MMX stuff, which this disassembler does not support.
/* 0x78 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0x79 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
@@ -595,7 +595,7 @@ const Opcode s_opcode_byte_after_0f[] = {
/* 0x7B */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0x7C */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
/* 0x7D */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
-
+
/* 0x7E */ { 0, IT_GENERIC, AM_E | OT_D, AM_P | OT_D, AM_NOT_USED, "movd", true,
/* F2h */ { 0 },
/* F3h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_W | OT_Q, AM_NOT_USED, "movq" },
@@ -1160,27 +1160,27 @@ const OpcodeTable MiniDisassembler::s_ia32_opcode_map_[]={
/* 1 */ {s_opcode_byte_after_0f, 0, 0xff, 0, 0xff},
// Start of tables for opcodes using ModR/M bits as extension
/* 2 */ {s_opcode_byte_after_80, 3, 0x07, 0, 0x07},
- /* 3 */ {s_opcode_byte_after_81, 3, 0x07, 0, 0x07},
- /* 4 */ {s_opcode_byte_after_82, 3, 0x07, 0, 0x07},
- /* 5 */ {s_opcode_byte_after_83, 3, 0x07, 0, 0x07},
- /* 6 */ {s_opcode_byte_after_c0, 3, 0x07, 0, 0x07},
- /* 7 */ {s_opcode_byte_after_c1, 3, 0x07, 0, 0x07},
- /* 8 */ {s_opcode_byte_after_d0, 3, 0x07, 0, 0x07},
- /* 9 */ {s_opcode_byte_after_d1, 3, 0x07, 0, 0x07},
- /* 10 */ {s_opcode_byte_after_d2, 3, 0x07, 0, 0x07},
- /* 11 */ {s_opcode_byte_after_d3, 3, 0x07, 0, 0x07},
- /* 12 */ {s_opcode_byte_after_f6, 3, 0x07, 0, 0x07},
- /* 13 */ {s_opcode_byte_after_f7, 3, 0x07, 0, 0x07},
- /* 14 */ {s_opcode_byte_after_fe, 3, 0x07, 0, 0x01},
- /* 15 */ {s_opcode_byte_after_ff, 3, 0x07, 0, 0x07},
- /* 16 */ {s_opcode_byte_after_0f00, 3, 0x07, 0, 0x07},
- /* 17 */ {s_opcode_byte_after_0f01, 3, 0x07, 0, 0x07},
- /* 18 */ {s_opcode_byte_after_0f18, 3, 0x07, 0, 0x07},
- /* 19 */ {s_opcode_byte_after_0f71, 3, 0x07, 0, 0x07},
- /* 20 */ {s_opcode_byte_after_0f72, 3, 0x07, 0, 0x07},
- /* 21 */ {s_opcode_byte_after_0f73, 3, 0x07, 0, 0x07},
- /* 22 */ {s_opcode_byte_after_0fae, 3, 0x07, 0, 0x07},
- /* 23 */ {s_opcode_byte_after_0fba, 3, 0x07, 0, 0x07},
+ /* 3 */ {s_opcode_byte_after_81, 3, 0x07, 0, 0x07},
+ /* 4 */ {s_opcode_byte_after_82, 3, 0x07, 0, 0x07},
+ /* 5 */ {s_opcode_byte_after_83, 3, 0x07, 0, 0x07},
+ /* 6 */ {s_opcode_byte_after_c0, 3, 0x07, 0, 0x07},
+ /* 7 */ {s_opcode_byte_after_c1, 3, 0x07, 0, 0x07},
+ /* 8 */ {s_opcode_byte_after_d0, 3, 0x07, 0, 0x07},
+ /* 9 */ {s_opcode_byte_after_d1, 3, 0x07, 0, 0x07},
+ /* 10 */ {s_opcode_byte_after_d2, 3, 0x07, 0, 0x07},
+ /* 11 */ {s_opcode_byte_after_d3, 3, 0x07, 0, 0x07},
+ /* 12 */ {s_opcode_byte_after_f6, 3, 0x07, 0, 0x07},
+ /* 13 */ {s_opcode_byte_after_f7, 3, 0x07, 0, 0x07},
+ /* 14 */ {s_opcode_byte_after_fe, 3, 0x07, 0, 0x01},
+ /* 15 */ {s_opcode_byte_after_ff, 3, 0x07, 0, 0x07},
+ /* 16 */ {s_opcode_byte_after_0f00, 3, 0x07, 0, 0x07},
+ /* 17 */ {s_opcode_byte_after_0f01, 3, 0x07, 0, 0x07},
+ /* 18 */ {s_opcode_byte_after_0f18, 3, 0x07, 0, 0x07},
+ /* 19 */ {s_opcode_byte_after_0f71, 3, 0x07, 0, 0x07},
+ /* 20 */ {s_opcode_byte_after_0f72, 3, 0x07, 0, 0x07},
+ /* 21 */ {s_opcode_byte_after_0f73, 3, 0x07, 0, 0x07},
+ /* 22 */ {s_opcode_byte_after_0fae, 3, 0x07, 0, 0x07},
+ /* 23 */ {s_opcode_byte_after_0fba, 3, 0x07, 0, 0x07},
/* 24 */ {s_opcode_byte_after_0fc7, 3, 0x07, 0, 0x01}
};
diff --git a/tools/memory_watcher/memory_watcher.cc b/tools/memory_watcher/memory_watcher.cc
index d5e11db..9b6e3ed 100644
--- a/tools/memory_watcher/memory_watcher.cc
+++ b/tools/memory_watcher/memory_watcher.cc
@@ -99,7 +99,7 @@ void MemoryWatcher::OnTrack(HANDLE heap, int32 id, int32 size) {
AutoLock lock(block_map_lock_);
- // Ideally, we'd like to verify that the block being added
+ // Ideally, we'd like to verify that the block being added
// here is not already in our list of tracked blocks. However,
// the lookup in our hash table is expensive and slows us too
// much. Uncomment this line if you think you need it.
@@ -175,7 +175,7 @@ void MemoryWatcher::OnUntrack(HANDLE heap, int32 id, int32 size) {
void MemoryWatcher::SetLogName(char* log_name) {
if (!log_name)
return;
-
+
log_name_ = log_name;
}
diff --git a/tools/memory_watcher/memory_watcher.h b/tools/memory_watcher/memory_watcher.h
index f6281b5..d5a1d10 100644
--- a/tools/memory_watcher/memory_watcher.h
+++ b/tools/memory_watcher/memory_watcher.h
@@ -33,7 +33,7 @@ class MemoryWatcher : MemoryObserver {
// MemoryObserver interface.
virtual void OnTrack(HANDLE heap, int32 id, int32 size);
virtual void OnUntrack(HANDLE heap, int32 id, int32 size);
-
+
// Sets a name that appears in the generated file name.
void SetLogName(char* log_name);
diff --git a/tools/memory_watcher/mini_disassembler.cc b/tools/memory_watcher/mini_disassembler.cc
index 6b1dec8..a5fcf54 100644
--- a/tools/memory_watcher/mini_disassembler.cc
+++ b/tools/memory_watcher/mini_disassembler.cc
@@ -1,10 +1,10 @@
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -110,7 +110,7 @@ void MiniDisassembler::Initialize() {
InstructionType MiniDisassembler::ProcessPrefixes(unsigned char* start_byte,
unsigned int& size) {
- InstructionType instruction_type = IT_GENERIC;
+ InstructionType instruction_type = IT_GENERIC;
const Opcode& opcode = s_ia32_opcode_map_[0].table_[*start_byte];
switch (opcode.type_) {
@@ -121,14 +121,14 @@ InstructionType MiniDisassembler::ProcessPrefixes(unsigned char* start_byte,
operand_is_32_bits_ = !operand_default_is_32_bits_;
nochangeoperand:
case IT_PREFIX:
-
+
if (0xF2 == (*start_byte))
got_f2_prefix_ = true;
else if (0xF3 == (*start_byte))
got_f3_prefix_ = true;
else if (0x66 == (*start_byte))
got_66_prefix_ = true;
-
+
instruction_type = opcode.type_;
size ++;
// we got a prefix, so add one and check next byte
@@ -146,7 +146,7 @@ InstructionType MiniDisassembler::ProcessOpcode(unsigned char* start_byte,
const OpcodeTable& table = s_ia32_opcode_map_[table_index]; // Get our table
unsigned char current_byte = (*start_byte) >> table.shift_;
current_byte = current_byte & table.mask_; // Mask out the bits we will use
-
+
// Check whether the byte we have is inside the table we have.
if (current_byte < table.min_lim_ || current_byte > table.max_lim_) {
instruction_type_ = IT_UNKNOWN;
@@ -234,10 +234,10 @@ bool MiniDisassembler::ProcessOperand(int flag_operand) {
case AM_F: // EFLAGS register
case AM_X: // Memory addressed by the DS:SI register pair
case AM_Y: // Memory addressed by the ES:DI register pair
- case AM_IMPLICIT: // Parameter is implicit, occupies no space in
+ case AM_IMPLICIT: // Parameter is implicit, occupies no space in
// instruction
break;
-
+
// There is a ModR/M byte but it does not necessarily need
// to be decoded.
case AM_C: // reg field of ModR/M selects a control register
@@ -250,20 +250,20 @@ bool MiniDisassembler::ProcessOperand(int flag_operand) {
case AM_V: // reg field of ModR/M selects a 128-bit XMM register
have_modrm_ = true;
break;
-
+
// In these addressing modes, there is a ModR/M byte and it needs to be
// decoded. No other (e.g. immediate) params than indicated in ModR/M.
- case AM_E: // Operand is either a general-purpose register or memory,
+ case AM_E: // Operand is either a general-purpose register or memory,
// specified by ModR/M byte
case AM_M: // ModR/M byte will refer only to memory
- case AM_Q: // Operand is either an MMX register or memory (complex
+ case AM_Q: // Operand is either an MMX register or memory (complex
// evaluation), specified by ModR/M byte
- case AM_W: // Operand is either a 128-bit XMM register or memory (complex
+ case AM_W: // Operand is either a 128-bit XMM register or memory (complex
// eval), specified by ModR/M byte
have_modrm_ = true;
should_decode_modrm_ = true;
break;
-
+
// These addressing modes specify an immediate or an offset value
// directly, so we need to look at the operand type to see how many
// bytes.
@@ -286,7 +286,7 @@ bool MiniDisassembler::ProcessOperand(int flag_operand) {
case OT_DQ: // Double-quadword, regardless of operand-size attribute.
operand_bytes_ += OS_DOUBLE_QUAD_WORD;
break;
- case OT_P: // 32-bit or 48-bit pointer, depending on operand-size
+ case OT_P: // 32-bit or 48-bit pointer, depending on operand-size
// attribute.
if (operand_is_32_bits_)
operand_bytes_ += OS_48_BIT_POINTER;
@@ -307,9 +307,9 @@ bool MiniDisassembler::ProcessOperand(int flag_operand) {
operand_bytes_ += OS_DOUBLE_PRECISION_FLOATING;
break;
case OT_SS:
- // Scalar element of a 128-bit packed single-precision
+ // Scalar element of a 128-bit packed single-precision
// floating data.
- // We simply return enItUnknown since we don't have to support
+ // We simply return enItUnknown since we don't have to support
// floating point
succeeded = false;
break;
@@ -322,19 +322,19 @@ bool MiniDisassembler::ProcessOperand(int flag_operand) {
case OT_W: // Word, regardless of operand-size attribute.
operand_bytes_ += OS_WORD;
break;
-
+
// Can safely ignore these.
- case OT_A: // Two one-word operands in memory or two double-word
+ case OT_A: // Two one-word operands in memory or two double-word
// operands in memory
case OT_PI: // Quadword MMX technology register (e.g. mm0)
case OT_SI: // Doubleword integer register (e.g., eax)
break;
-
+
default:
break;
}
break;
-
+
default:
break;
}
@@ -342,7 +342,7 @@ bool MiniDisassembler::ProcessOperand(int flag_operand) {
return succeeded;
}
-bool MiniDisassembler::ProcessModrm(unsigned char* start_byte,
+bool MiniDisassembler::ProcessModrm(unsigned char* start_byte,
unsigned int& size) {
// If we don't need to decode, we just return the size of the ModR/M
// byte (there is never a SIB byte in this case).
@@ -372,7 +372,7 @@ bool MiniDisassembler::ProcessModrm(unsigned char* start_byte,
// Invariant: modrm_entry points to information that we need to decode
// the ModR/M byte.
-
+
// Add to the count of operand bytes, if the ModR/M byte indicates
// that some operands are encoded in the instruction.
if (modrm_entry->is_encoded_in_instruction_)
@@ -389,8 +389,8 @@ bool MiniDisassembler::ProcessModrm(unsigned char* start_byte,
}
}
-bool MiniDisassembler::ProcessSib(unsigned char* start_byte,
- unsigned char mod,
+bool MiniDisassembler::ProcessSib(unsigned char* start_byte,
+ unsigned char mod,
unsigned int& size) {
// get the mod field from the 2..0 bits of the SIB byte
unsigned char sib_base = (*start_byte) & 0x07;
diff --git a/tools/memory_watcher/mini_disassembler.h b/tools/memory_watcher/mini_disassembler.h
index 8af2c42..1a75c54 100644
--- a/tools/memory_watcher/mini_disassembler.h
+++ b/tools/memory_watcher/mini_disassembler.h
@@ -1,10 +1,10 @@
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -51,8 +51,8 @@ namespace sidestep {
//
// The limitations include at least the following:
// -# No support for coprocessor opcodes, MMX, etc.
-// -# No machine-readable identification of opcodes or decoding of
-// assembly parameters. The name of the opcode (as a string) is given,
+// -# No machine-readable identification of opcodes or decoding of
+// assembly parameters. The name of the opcode (as a string) is given,
// however, to aid debugging.
//
// You may ask what this little disassembler actually does, then? The answer is
@@ -115,8 +115,8 @@ class MiniDisassembler {
// Sets the flag for whether we have ModR/M, and increments
// operand_bytes_ if any are specifies by the opcode directly.
// @return Number of opcode bytes.
- InstructionType ProcessOpcode(unsigned char * start,
- unsigned int table,
+ InstructionType ProcessOpcode(unsigned char * start,
+ unsigned int table,
unsigned int& size);
// Checks the type of the supplied operand. Increments
diff --git a/tools/memory_watcher/mini_disassembler_types.h b/tools/memory_watcher/mini_disassembler_types.h
index 3abc85d..d4fef2a 100644
--- a/tools/memory_watcher/mini_disassembler_types.h
+++ b/tools/memory_watcher/mini_disassembler_types.h
@@ -1,10 +1,10 @@
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -81,7 +81,7 @@ enum OperandSize {
// is a mask for the rest. The other enumeration values are named for the
// names given to the addressing methods in the manual, e.g. enAm_D is for
// the D addressing method.
-//
+//
// The reason we use a full 4 bytes and a mask, is that we need to combine
// these flags with the enOperandType to store the details
// on the operand in a single integer.
@@ -137,7 +137,7 @@ enum OperandType {
OT_W = 0x0E000000,
OT_SD = 0x0F000000, // scalar double-precision floating-point value
OT_PD = 0x10000000, // double-precision floating point
- // dummy "operand type" for address mode M - which doesn't specify
+ // dummy "operand type" for address mode M - which doesn't specify
// operand type
OT_ADDRESS_MODE_M = 0x80000000
};
@@ -147,7 +147,7 @@ enum OperandType {
struct SpecificOpcode {
// Index to continuation table, or 0 if this is the last
// byte in the opcode.
- int table_index_;
+ int table_index_;
// The opcode type
InstructionType type_;
@@ -168,7 +168,7 @@ struct SpecificOpcode {
struct Opcode {
// Index to continuation table, or 0 if this is the last
// byte in the opcode.
- int table_index_;
+ int table_index_;
// The opcode type
InstructionType type_;
diff --git a/tools/memory_watcher/preamble_patcher.cc b/tools/memory_watcher/preamble_patcher.cc
index 2beb555..78dbc7e 100644
--- a/tools/memory_watcher/preamble_patcher.cc
+++ b/tools/memory_watcher/preamble_patcher.cc
@@ -1,10 +1,10 @@
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -64,9 +64,9 @@ SideStepError PreamblePatcher::RawPatchWithStubAndProtections(
return SIDESTEP_ACCESS_DENIED;
}
- SideStepError error_code = RawPatchWithStub(target_function,
- replacement_function,
- preamble_stub,
+ SideStepError error_code = RawPatchWithStub(target_function,
+ replacement_function,
+ preamble_stub,
stub_size,
bytes_needed);
if (SIDESTEP_SUCCESS != error_code) {
@@ -76,9 +76,9 @@ SideStepError PreamblePatcher::RawPatchWithStubAndProtections(
// Restore the protection of the first MAX_PREAMBLE_STUB_SIZE bytes of
// pTargetFunction to what they were before we started goofing around.
- succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
- MAX_PREAMBLE_STUB_SIZE,
- old_target_function_protect,
+ succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
+ MAX_PREAMBLE_STUB_SIZE,
+ old_target_function_protect,
&old_target_function_protect);
if (!succeeded) {
ASSERT(false, "Failed to restore protection to target function.");
@@ -94,8 +94,8 @@ SideStepError PreamblePatcher::RawPatchWithStubAndProtections(
// XP machines. I'm not sure why this is so, but it is, yet I want to keep the
// call to the API here for correctness in case there is a difference in
// some variants of Windows/hardware.
- succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
- target_function,
+ succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
+ target_function,
MAX_PREAMBLE_STUB_SIZE);
if (!succeeded) {
ASSERT(false, "Failed to flush instruction cache.");
@@ -116,9 +116,9 @@ SideStepError PreamblePatcher::RawPatch(void* target_function,
return SIDESTEP_INVALID_PARAMETER;
}
- // @see MAX_PREAMBLE_STUB_SIZE for an explanation of how we arrives at
+ // @see MAX_PREAMBLE_STUB_SIZE for an explanation of how we arrives at
// this size
- unsigned char* preamble_stub =
+ unsigned char* preamble_stub =
reinterpret_cast<unsigned char*>(
MemoryHook::Alloc(sizeof(unsigned char) * MAX_PREAMBLE_STUB_SIZE));
if (!preamble_stub) {
@@ -139,9 +139,9 @@ SideStepError PreamblePatcher::RawPatch(void* target_function,
return SIDESTEP_ACCESS_DENIED;
}
- SideStepError error_code = RawPatchWithStubAndProtections(target_function,
- replacement_function,
- preamble_stub,
+ SideStepError error_code = RawPatchWithStubAndProtections(target_function,
+ replacement_function,
+ preamble_stub,
MAX_PREAMBLE_STUB_SIZE,
NULL);
if (SIDESTEP_SUCCESS != error_code) {
@@ -149,14 +149,14 @@ SideStepError PreamblePatcher::RawPatch(void* target_function,
delete[] preamble_stub;
return error_code;
}
-
+
*original_function_stub = reinterpret_cast<void*>(preamble_stub);
- // NOTE: For hooking malloc/free, we don't want to use streams which
- // allocate. Basically, we've hooked malloc, but not necessarily
+ // NOTE: For hooking malloc/free, we don't want to use streams which
+ // allocate. Basically, we've hooked malloc, but not necessarily
// hooked free yet. To do anything which uses the heap could crash
// with a mismatched malloc/free!
- //LOG(INFO) << "PreamblePatcher::RawPatch successfully patched 0x" <<
+ //LOG(INFO) << "PreamblePatcher::RawPatch successfully patched 0x" <<
// target_function;
return SIDESTEP_SUCCESS;
@@ -175,7 +175,7 @@ SideStepError PreamblePatcher::Unpatch(void* target_function,
MiniDisassembler disassembler;
unsigned int preamble_bytes = 0;
while (preamble_bytes < 5) {
- InstructionType instruction_type =
+ InstructionType instruction_type =
disassembler.Disassemble(
reinterpret_cast<unsigned char*>(original_function_stub) + preamble_bytes,
preamble_bytes);
@@ -234,17 +234,17 @@ SideStepError PreamblePatcher::Unpatch(void* target_function,
// Restore the protection of the first MAX_PREAMBLE_STUB_SIZE bytes of
// target to what they were before we started goofing around.
- succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
- MAX_PREAMBLE_STUB_SIZE,
- old_target_function_protect,
+ succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
+ MAX_PREAMBLE_STUB_SIZE,
+ old_target_function_protect,
&old_target_function_protect);
// Flush the instruction cache to make sure the processor doesn't execute the
// old version of the instructions (before our patch).
//
// See comment on FlushInstructionCache elsewhere in this file.
- succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
- target,
+ succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
+ target,
MAX_PREAMBLE_STUB_SIZE);
if (!succeeded) {
ASSERT(false, "Failed to flush instruction cache.");
diff --git a/tools/memory_watcher/preamble_patcher.h b/tools/memory_watcher/preamble_patcher.h
index eafe88a..ccacb72 100644
--- a/tools/memory_watcher/preamble_patcher.h
+++ b/tools/memory_watcher/preamble_patcher.h
@@ -1,10 +1,10 @@
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -46,7 +46,7 @@
// bytes of the function. Considering the worst case scenario, we need 4
// bytes + the max instruction size + 5 more bytes for our jump back to
// the original code. With that in mind, 32 is a good number :)
-#define MAX_PREAMBLE_STUB_SIZE (32)
+#define MAX_PREAMBLE_STUB_SIZE (32)
namespace sidestep {
@@ -77,28 +77,28 @@ enum SideStepError {
// See the TODO in preamble_patcher_with_stub.cc for instructions on what
// we need to do before using it in production code; it's fairly simple
// but unnecessary for now since we only intend to use it in unit tests.
-//
+//
// To patch a function, use either of the typesafe Patch() methods. You
// can unpatch a function using Unpatch().
-//
+//
// Typical usage goes something like this:
// @code
// typedef int (*MyTypesafeFuncPtr)(int x);
// MyTypesafeFuncPtr original_func_stub;
// int MyTypesafeFunc(int x) { return x + 1; }
// int HookMyTypesafeFunc(int x) { return 1 + original_func_stub(x); }
-//
+//
// void MyPatchInitializingFunction() {
// original_func_stub = PreamblePatcher::Patch(
// MyTypesafeFunc, HookMyTypesafeFunc);
// if (!original_func_stub) {
// // ... error handling ...
// }
-//
+//
// // ... continue - you have patched the function successfully ...
// }
// @endcode
-//
+//
// Note that there are a number of ways that this method of patching can
// fail. The most common are:
// - If there is a jump (jxx) instruction in the first 5 bytes of
@@ -113,7 +113,7 @@ enum SideStepError {
// - If there is another thread currently executing within the bytes
// that are copied to the preamble stub, it will crash in an undefined
// way.
-//
+//
// If you get any other error than the above, you're either pointing the
// patcher at an invalid instruction (e.g. into the middle of a multi-
// byte instruction, or not at memory containing executable instructions)
@@ -286,9 +286,9 @@ class PreamblePatcher {
// exactly the same calling convention and parameters as the original
// function.
//
- // @param preamble_stub A pointer to a buffer where the preamble stub
+ // @param preamble_stub A pointer to a buffer where the preamble stub
// should be copied. The size of the buffer should be sufficient to
- // hold the preamble bytes.
+ // hold the preamble bytes.
//
// @param stub_size Size in bytes of the buffer allocated for the
// preamble_stub
@@ -298,19 +298,19 @@ class PreamblePatcher {
// not interested.
//
// @return An error code indicating the result of patching.
- static SideStepError RawPatchWithStubAndProtections(void* target_function,
- void *replacement_function,
- unsigned char* preamble_stub,
- unsigned long stub_size,
+ static SideStepError RawPatchWithStubAndProtections(void* target_function,
+ void *replacement_function,
+ unsigned char* preamble_stub,
+ unsigned long stub_size,
unsigned long* bytes_needed);
// A helper function used by RawPatchWithStubAndProtections -- it does
// everything but the VirtualProtect wsork. Defined in
// preamble_patcher_with_stub.cc.
- static SideStepError RawPatchWithStub(void* target_function,
- void *replacement_function,
- unsigned char* preamble_stub,
- unsigned long stub_size,
+ static SideStepError RawPatchWithStub(void* target_function,
+ void *replacement_function,
+ unsigned char* preamble_stub,
+ unsigned long stub_size,
unsigned long* bytes_needed);
};
diff --git a/tools/memory_watcher/preamble_patcher_with_stub.cc b/tools/memory_watcher/preamble_patcher_with_stub.cc
index d2ce6af..dc38872 100644
--- a/tools/memory_watcher/preamble_patcher_with_stub.cc
+++ b/tools/memory_watcher/preamble_patcher_with_stub.cc
@@ -1,10 +1,10 @@
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
diff --git a/tools/purify/chrome_tests.py b/tools/purify/chrome_tests.py
index 6831fa06..a9ce16c 100644
--- a/tools/purify/chrome_tests.py
+++ b/tools/purify/chrome_tests.py
@@ -66,7 +66,7 @@ class ChromeTests:
# On the buildbot, we archive to a specific location on chrome-web
# with a directory based on the test name and the current svn revision.
# NOTE: These modules are located in trunk/tools/buildbot, which is not
- # in the default config. You'll need to check this out and add
+ # in the default config. You'll need to check this out and add
# scripts/* to your PYTHONPATH to test outside of the buildbot.
import slave_utils
import chromium_config
@@ -79,7 +79,7 @@ class ChromeTests:
os.makedirs(self._report_dir)
purify_test = os.path.join(script_dir, "purify_test.py")
- self._command_preamble = ["python.exe", purify_test, "--echo_to_stdout",
+ self._command_preamble = ["python.exe", purify_test, "--echo_to_stdout",
"--source_dir=%s" % (self._source_dir),
"--save_cache"]
@@ -131,7 +131,7 @@ class ChromeTests:
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test]()
-
+
def _ReadGtestFilterFile(self, name, cmd):
'''Read a file which is a list of tests to filter out with --gtest_filter
and append the command-line option to cmd.
@@ -170,13 +170,13 @@ class ChromeTests:
def ScriptedTest(self, module, exe, name, script, multi=False, cmd_args=None,
out_dir_extra=None):
- '''Purify a target exe, which will be executed one or more times via a
+ '''Purify a target exe, which will be executed one or more times via a
script or driver program.
Args:
module - which top level component this test is from (webkit, base, etc.)
exe - the name of the exe (it's assumed to exist in build_dir)
name - the name of this test (used to name output files)
- script - the driver program or script. If it's python.exe, we use
+ script - the driver program or script. If it's python.exe, we use
search-path behavior to execute, otherwise we assume that it is in
build_dir.
multi - a boolean hint that the exe will be run multiple times, generating
@@ -227,7 +227,7 @@ class ChromeTests:
def TestIpc(self):
return self.SimpleTest("chrome", "ipc_tests.exe")
-
+
def TestNet(self):
return self.SimpleTest("net", "net_unittests.exe")
@@ -245,8 +245,8 @@ class ChromeTests:
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
- # to continuously run small slices of the layout tests under purify rather
- # than having to run all of them in one shot.
+ # to continuously run small slices of the layout tests under purify rather
+ # than having to run all of them in one shot.
chunk_num = 0
# Tests currently seem to take about 20-30s each.
chunk_size = 120 # so about 40-60 minutes per run
@@ -265,7 +265,7 @@ class ChromeTests:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
- logging.error("error reading from file %s (%d, %s)" % (chunk_file,
+ logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
@@ -282,7 +282,7 @@ class ChromeTests:
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
-
+
if run_all:
ret = self.ScriptedTest("webkit", "test_shell.exe", "layout",
script_cmd, multi=True, cmd_args=["--timeout=0"])
@@ -323,7 +323,7 @@ class ChromeTests:
instrumentation_error = self.InstrumentDll()
if instrumentation_error:
return instrumentation_error
- return self.ScriptedTest("chrome", "chrome.exe", "ui_tests",
+ return self.ScriptedTest("chrome", "chrome.exe", "ui_tests",
["ui_tests.exe",
"--single-process",
"--ui-test-timeout=180000",
@@ -351,7 +351,7 @@ def _main(argv):
help="Don't force a re-instrumentation for ui_tests")
parser.add_option("", "--run-singly", action="store_true", default=False,
help="run tests independently of each other so that they "
- "don't interfere with each other and so that errors "
+ "don't interfere with each other and so that errors "
"can be accurately attributed to their source");
parser.add_option("", "--report_dir",
help="path where report files are saved")
diff --git a/tools/purify/common.py b/tools/purify/common.py
index 9bd55ae..102c4af6 100644
--- a/tools/purify/common.py
+++ b/tools/purify/common.py
@@ -10,17 +10,17 @@ running of Rational Purify and Quantify in a consistent manner.
"""
# Purify and Quantify have a front-end (e.g. quantifyw.exe) which talks to a
-# back-end engine (e.g. quantifye.exe). The back-end seems to handle
-# instrumentation, while the front-end controls program execution and
+# back-end engine (e.g. quantifye.exe). The back-end seems to handle
+# instrumentation, while the front-end controls program execution and
# measurement. The front-end will dynamically launch the back-end if
-# instrumentation is needed (sometimes in the middle of a run if a dll is
+# instrumentation is needed (sometimes in the middle of a run if a dll is
# loaded dynamically).
# In an ideal world, this script would simply execute the front-end and check
# the output. However, purify is not the most reliable or well-documented app
# on the planet, and my attempts to get it to run this way led to the back-end
# engine hanging during instrumentation. The workaround to this was to run two
-# passes, first running the engine to do instrumentation rather than letting
-# the front-end do it for you, then running the front-end to actually do the
+# passes, first running the engine to do instrumentation rather than letting
+# the front-end do it for you, then running the front-end to actually do the
# run. Each time through we're deleting all of the instrumented files in the
# cache to ensure that we're testing that instrumentation works from scratch.
# (although this can be changed with an option)
@@ -60,7 +60,7 @@ def _print_line(line, flush=True):
def RunSubprocess(proc, timeout=0, detach=False):
""" Runs a subprocess, until it finishes or |timeout| is exceeded and the
process is killed with taskkill. A |timeout| <= 0 means no timeout.
-
+
Args:
proc: list of process components (exe + args)
timeout: how long to wait before killing, <= 0 means wait forever
@@ -156,13 +156,13 @@ class Rational(object):
common argument parsing as well as the general program flow of Instrument,
Execute, Analyze.
'''
-
+
def __init__(self):
google.logging_utils.config_root()
self._out_file = None
def Run(self):
- '''Call this to run through the whole process:
+ '''Call this to run through the whole process:
Setup, Instrument, Execute, Analyze'''
start = datetime.datetime.now()
retcode = -1
@@ -208,7 +208,7 @@ class Rational(object):
parser.add_option("-o", "--out_file", dest="out_file", metavar="OUTFILE",
default="",
help="output data is written to OUTFILE")
- parser.add_option("-s", "--save_cache",
+ parser.add_option("-s", "--save_cache",
dest="save_cache", action="store_true", default=False,
help="don't delete instrumentation cache")
parser.add_option("-c", "--cache_dir", dest="cache_dir", metavar="CACHEDIR",
@@ -231,10 +231,10 @@ class Rational(object):
if self.ParseArgv():
logging.info("instrumentation cache in %s" % self._cache_dir)
logging.info("output saving to %s" % self._out_file)
- # Ensure that Rational's common dir and cache dir are in the front of the
+ # Ensure that Rational's common dir and cache dir are in the front of the
# path. The common dir is required for purify to run in any case, and
# the cache_dir is required when using the /Replace=yes option.
- os.environ["PATH"] = (COMMON_PATH + ";" + self._cache_dir + ";" +
+ os.environ["PATH"] = (COMMON_PATH + ";" + self._cache_dir + ";" +
os.environ["PATH"])
# clear the cache to make sure we're starting clean
self.__ClearInstrumentationCache()
@@ -262,7 +262,7 @@ class Rational(object):
return False
def Execute(self, proc):
- ''' Execute the app to be tested after successful instrumentation.
+ ''' Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc.'''
logging.info("starting execution...")
# note that self._args begins with the exe to be run
@@ -330,5 +330,5 @@ class Rational(object):
try:
os.remove(file)
except:
- logging.warning("unable to delete file %s: %s" % (file,
+ logging.warning("unable to delete file %s: %s" % (file,
sys.exc_info()[0]))
diff --git a/tools/purify/purify_analyze.py b/tools/purify/purify_analyze.py
index 70a3610..2c02b0c 100644
--- a/tools/purify/purify_analyze.py
+++ b/tools/purify/purify_analyze.py
@@ -70,7 +70,7 @@ class MemoryTreeNode(object):
return root
def __init__(self, function, bytes, blocks):
- '''
+ '''
Args:
function: A string representing a unique method or function.
bytes: initial number of bytes allocated in this node
@@ -105,15 +105,15 @@ class MemoryTreeNode(object):
(other._bytes, other._blocks, other._function))
def __str__(self):
- return "(%d bytes, %d blocks, %d allocs) %s" % (
+ return "(%d bytes, %d blocks, %d allocs) %s" % (
self._bytes, self._blocks, self._allocs, self._function)
def PrintRecursive(self, padding="", byte_filter=0):
'''Print the tree and all of its children recursively (depth-first). All
nodes at a given level of the tree are sorted in descending order by size.
-
+
Args:
- padding: Printed at the front of the line. Each recursive call adds a
+ padding: Printed at the front of the line. Each recursive call adds a
single space character.
byte_filter: a number of bytes below which we'll prune the tree
'''
@@ -174,7 +174,7 @@ class PurifyAnalyze:
# A symbolic name for the run being analyzed, often the name of the
# exe which was purified.
self._name = name
-
+
# The top of the source code tree of the code we're analyzing.
# This prefix is stripped from all filenames in stacks for normalization.
if source_dir:
@@ -224,7 +224,7 @@ class PurifyAnalyze:
return False
# check ignore patterns against title and top-most visible stack frames
- strings = [msg._title]
+ strings = [msg._title]
err = msg.GetErrorStack()
if err:
line = err.GetTopVisibleStackLine().get('function', None)
@@ -403,7 +403,7 @@ class PurifyAnalyze:
# Purify output should never end with a real message
if message:
logging.error("Unexpected message at end of file %s" % file)
-
+
return fatal_errors == 0
def GetMessageList(self, key):
@@ -433,13 +433,13 @@ class PurifyAnalyze:
count = 0
for msg in all:
count += msg._count
- self._PrintAndSave("%s(%s) unique:%d total:%d" % (self._name,
+ self._PrintAndSave("%s(%s) unique:%d total:%d" % (self._name,
purify_message.GetMessageType(key), len(unique), count), file)
if key not in ["MIU"]:
ignore_file = "%s_%s_ignore.txt" % (self._name, key)
ignore_hashes = self._MessageHashesFromFile(ignore_file)
ignored = 0
-
+
groups = list.UniqueMessageGroups()
group_keys = groups.keys()
group_keys.sort(cmp=lambda x,y: len(groups[y]) - len(groups[x]))
@@ -449,11 +449,11 @@ class PurifyAnalyze:
ignored += len(groups[group]) - len(kept_msgs)
groups[group] = kept_msgs
if ignored:
- self._PrintAndSave("%s(%s) ignored:%d" % (self._name,
+ self._PrintAndSave("%s(%s) ignored:%d" % (self._name,
purify_message.GetMessageType(key), ignored), file)
total = reduce(lambda x, y: x + len(groups[y]), group_keys, 0)
if total:
- self._PrintAndSave("%s(%s) group summary:" % (self._name,
+ self._PrintAndSave("%s(%s) group summary:" % (self._name,
purify_message.GetMessageType(key)), file)
self._PrintAndSave(" TOTAL: %d" % total, file)
for group in group_keys:
@@ -491,7 +491,7 @@ class PurifyAnalyze:
for sublist in sublists:
tree = MemoryTreeNode.CreateTree(sublist)
trees.append(tree)
-
+
# while the tree is a hierarchical assignment from the root/bottom of the
# stack down, the summary is simply adding the total of the top-most
# stack item from our code
@@ -550,7 +550,7 @@ class PurifyAnalyze:
sys.stderr.flush()
sys.stdout.flush()
logging.info("summary of Purify bugs:")
-
+
# This is a specialized set of counters for unit tests, with some
# unfortunate hard-coded knowledge.
test_counts = {}
@@ -596,7 +596,7 @@ class PurifyAnalyze:
prog_args = prog.split(" ")
arg_prefix = "--test-name="
test_name = "UNKNOWN"
- for arg in prog_args:
+ for arg in prog_args:
index = arg.find(arg_prefix)
if index >= 0:
test_name = arg[len(arg_prefix):]
@@ -636,7 +636,7 @@ class PurifyAnalyze:
def SaveStrings(self, string_list, key, fname_extra=""):
'''Output a list of strings to a file in the report dir.
'''
- out = os.path.join(self._report_dir,
+ out = os.path.join(self._report_dir,
"%s_%s%s.txt" % (self._name, key, fname_extra))
logging.info("saving %s" % (out))
try:
@@ -778,7 +778,7 @@ class PurifyAnalyze:
# type of message which is used to generate filenames and descriptive
# error messages
type_name = "%s_%s" % (self._name, type)
-
+
# open the baseline file to compare against
baseline_file = "%s.txt" % type_name
baseline_hashes = self._MessageHashesFromFile(baseline_file)
@@ -795,7 +795,7 @@ class PurifyAnalyze:
current_list = self.GetMessageList(type)
if current_list:
# Since we're looking at the list of unique messages,
- # if the number of occurrances of a given unique message
+ # if the number of occurrances of a given unique message
# changes, it won't show up as an error.
current_messages = current_list.UniqueMessages()
else:
@@ -825,10 +825,10 @@ class PurifyAnalyze:
len(type_errors), len(type_fixes)))
if len(type_errors):
- strs = [current_hashes[x].NormalizedStr(verbose=True)
+ strs = [current_hashes[x].NormalizedStr(verbose=True)
for x in type_errors]
logging.error("%d new '%s(%s)' errors found\n%s" % (len(type_errors),
- purify_message.GetMessageType(type), type,
+ purify_message.GetMessageType(type), type,
'\n'.join(strs)))
strs = [current_hashes[x].NormalizedStr() for x in type_errors]
self.SaveStrings(strs, type, "_NEW")
@@ -838,7 +838,7 @@ class PurifyAnalyze:
# we don't have access to the original message, so all we can do is log
# the non-verbose normalized text
logging.warning("%d new '%s(%s)' unexpected fixes found\n%s" % (
- len(type_fixes), purify_message.GetMessageType(type),
+ len(type_fixes), purify_message.GetMessageType(type),
type, '\n'.join(type_fixes)))
self.SaveStrings(type_fixes, type, "_FIXED")
fixes += len(type_fixes)
@@ -872,10 +872,10 @@ def _main():
parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
parser.add_option("-b", "--baseline", action="store_true", default=False,
help="save output to baseline files")
- parser.add_option("-m", "--memory_in_use",
+ parser.add_option("-m", "--memory_in_use",
action="store_true", default=False,
help="print memory in use summary")
- parser.add_option("", "--validate",
+ parser.add_option("", "--validate",
action="store_true", default=False,
help="validate results vs. baseline")
parser.add_option("-e", "--echo_to_stdout",
@@ -908,7 +908,7 @@ def _main():
google.logging_utils.config_root(level=logging.DEBUG)
else:
google.logging_utils.config_root(level=logging.INFO)
- pa = PurifyAnalyze(filenames, options.echo_to_stdout, options.name,
+ pa = PurifyAnalyze(filenames, options.echo_to_stdout, options.name,
options.source_dir, options.data_dir, options.report_dir)
execute_crash = not pa.ReadFile()
if options.bug_report:
@@ -934,6 +934,6 @@ def _main():
sys.exit(retcode)
if __name__ == "__main__":
- _main()
+ _main()
diff --git a/tools/purify/purify_coverage.py b/tools/purify/purify_coverage.py
index e2fef1c..e88af50 100644
--- a/tools/purify/purify_coverage.py
+++ b/tools/purify/purify_coverage.py
@@ -43,16 +43,16 @@ class PurifyCoverage(common.Rational):
self._name = os.path.basename(self._exe)
# _out_file can be set in common.Rational.ParseArgv
if not self._out_file:
- self._out_file = os.path.join(self._latest_dir,
+ self._out_file = os.path.join(self._latest_dir,
"%s_coverage.txt" % (self._name))
self._source_dir = self._options.source_dir
return True
return False
-
+
def _PurifyCommand(self):
- cmd = [common.PURIFYW_PATH, "/CacheDir=" + self._cache_dir,
+ cmd = [common.PURIFYW_PATH, "/CacheDir=" + self._cache_dir,
"/ShowInstrumentationProgress=no", "/ShowLoadLibraryProgress=no",
- "/AllocCallStackLength=30", "/Coverage",
+ "/AllocCallStackLength=30", "/Coverage",
"/CoverageDefaultInstrumentationType=line"]
return cmd
@@ -62,7 +62,7 @@ class PurifyCoverage(common.Rational):
cmd.append("/Run=no")
cmd.append(os.path.abspath(self._exe))
return common.Rational.Instrument(self, cmd)
-
+
def Execute(self):
cmd = self._PurifyCommand()
cmd.append("/SaveTextData=" + self._out_file)
diff --git a/tools/purify/purify_inuse.py b/tools/purify/purify_inuse.py
index ed708a7..12d13f2 100644
--- a/tools/purify/purify_inuse.py
+++ b/tools/purify/purify_inuse.py
@@ -50,9 +50,9 @@ class PurifyInUse(common.Rational):
self._byte_filter = int(self._options.byte_filter)
return True
return False
-
+
def _PurifyCommand(self):
- cmd = [common.PURIFYW_PATH, "/CacheDir=" + self._cache_dir,
+ cmd = [common.PURIFYW_PATH, "/CacheDir=" + self._cache_dir,
"/ShowInstrumentationProgress=no", "/ShowLoadLibraryProgress=no",
"/AllocCallStackLength=30", "/ErrorCallStackLength=30",
"/LeaksAtExit=no", "/InUseAtExit=yes"]
@@ -64,7 +64,7 @@ class PurifyInUse(common.Rational):
cmd.append("/Run=no")
cmd.append(os.path.abspath(self._exe))
return common.Rational.Instrument(self, cmd)
-
+
def Execute(self):
cmd = self._PurifyCommand()
cmd.append("/SaveTextData=" + self._out_file)
@@ -74,7 +74,7 @@ class PurifyInUse(common.Rational):
if not os.path.isfile(self._out_file):
logging.info("no output file %s" % self._out_file)
return -1
- pa = purify_analyze.PurifyAnalyze(self._out_file, False,
+ pa = purify_analyze.PurifyAnalyze(self._out_file, False,
self._name, self._source_dir)
if not pa.ReadFile():
logging.warning("inuse summary suspect due to fatal error during run")
diff --git a/tools/purify/purify_message.py b/tools/purify/purify_message.py
index 9ff107f..83ed039 100644
--- a/tools/purify/purify_message.py
+++ b/tools/purify/purify_message.py
@@ -117,7 +117,7 @@ class Stack:
# if functions match the following, elide them from the stack
pat_func_elide = (re.compile('^std::'), re.compile('^new\('))
# if files match the following, elide them from the stack
- pat_file_elide = (re.compile('.*platformsdk_win2008.*'),
+ pat_file_elide = (re.compile('.*platformsdk_win2008.*'),
re.compile('.*.(dll|DLL)$'),
# bug 1069902
re.compile('webkit/pending/wtf/fastmalloc\.h'),
@@ -162,7 +162,7 @@ class Stack:
(e.g. group.subgroup.subsubgroup)
'''
return self._group;
-
+
def _ComputeStackLine(self, line):
line = line.lstrip()
m = Stack.pat_stack_line.match(line)
@@ -249,7 +249,7 @@ class Stack:
self._stack.append(stack_line)
self._eliding = False
self._all_external = False
-
+
# when we reach one of the known common stack entry points, truncate
# the stack to avoid printing overly redundant information
if len(self._stack) > 1:
@@ -313,7 +313,7 @@ class Stack:
len_self = len(self._stack)
len_other = len(other._stack)
min_len = min(len_self, len_other)
- # sort stacks from the bottom up
+ # sort stacks from the bottom up
for i in range(-1, -(min_len + 1), -1):
# compare file, then func, but omit line number
ret = cmp((self._stack[i]['file'], self._stack[i]['function']),
@@ -530,11 +530,11 @@ class MessageList:
self._unique_messages = None
self._sublists = None
self._bytes = 0
-
+
def GetType(self):
return self._type
- def BeginNewSublist(self):
+ def BeginNewSublist(self):
'''Some message types are logically grouped into sets of messages which
should not be mixed in the same list. Specifically, Memory In Use (MIU),
Memory Leak (MLK) and Potential Memory Leak (MPK) are generated in a set
@@ -547,7 +547,7 @@ class MessageList:
When the caller determines that one list of messages of a type has ended
and a new list has begun, it calls BeginNewSublist() which takes the current
set of messages, puts them into a new MessageList and puts that into the
- sublists array. Later, when the caller needs to get at these messages,
+ sublists array. Later, when the caller needs to get at these messages,
GetSublists() should be called.
'''
if len(self._messages):
@@ -584,7 +584,7 @@ class MessageList:
return self._messages
def UniqueMessages(self):
- '''Returns an array of the unique normalized Message objects in this
+ '''Returns an array of the unique normalized Message objects in this
MessageList.
'''
# the list is lazily computed since we have to create a sorted list,
diff --git a/tools/purify/purify_test.py b/tools/purify/purify_test.py
index 1928df5..ae74738 100644
--- a/tools/purify/purify_test.py
+++ b/tools/purify/purify_test.py
@@ -6,9 +6,9 @@
# purify_test.py
'''Runs an exe through Purify and verifies that Purify was
-able to successfully instrument and run it. The original purpose was
-to be able to identify when a change to our code breaks our ability to Purify
-the app. This can happen with seemingly innocuous changes to code due to bugs
+able to successfully instrument and run it. The original purpose was
+to be able to identify when a change to our code breaks our ability to Purify
+the app. This can happen with seemingly innocuous changes to code due to bugs
in Purify, and is notoriously difficult to track down when it does happen.
Perhaps more importantly in the long run, this can also automate detection of
leaks and other memory bugs. It also may be useful to allow people to run
@@ -117,7 +117,7 @@ class Purify(common.Rational):
# seems to lose its mind, and we have a number of tests that use
# much larger than the default of 5.
"option -number-of-puts=30",
- # With our large pdbs, purify's default timeout (30) isn't always
+ # With our large pdbs, purify's default timeout (30) isn't always
# enough. If this isn't enough, -1 means no timeout.
"option -server-comm-timeout=120",
# check stack memory loads for UMRs, etc.
@@ -224,7 +224,7 @@ class Purify(common.Rational):
if self._instrument_only:
return
cmd = self._PurifyCommand()
- # undo the /Replace=yes that was done in Instrument(), which means to
+ # undo the /Replace=yes that was done in Instrument(), which means to
# remove the instrumented exe, and then rename exe.Original back to exe.
cmd.append("/UndoReplace")
cmd.append(os.path.abspath(self._exe))
diff --git a/tools/purify/quantify_test.py b/tools/purify/quantify_test.py
index 29691578..0c8a700 100644
--- a/tools/purify/quantify_test.py
+++ b/tools/purify/quantify_test.py
@@ -5,9 +5,9 @@
# quantify_test.py
-'''Runs an app through Quantify and verifies that Quantify was able to
+'''Runs an app through Quantify and verifies that Quantify was able to
successfully instrument and run it. The original purpose was to allow people
-to run Quantify in a consistent manner without having to worry about broken
+to run Quantify in a consistent manner without having to worry about broken
PATHs, corrupt instrumentation, or other per-machine flakiness that Quantify is
sometimes subject to. Unlike purify_test, the output from quantify_test is
a binary file, which is much more useful in manual analysis. As such, this
@@ -23,7 +23,7 @@ import common
class Quantify(common.Rational):
def __init__(self):
common.Rational.__init__(self)
-
+
def CreateOptionParser(self):
common.Rational.CreateOptionParser(self)
self._parser.description = __doc__
@@ -42,11 +42,11 @@ class Quantify(common.Rational):
"/CacheDir=" + self._cache_dir,
"-first-search-dir=" + self._exe_dir, self._exe]
return common.Rational.Instrument(self, proc)
-
+
def Execute(self):
# TODO(erikkay): add an option to also do /SaveTextData and add an
# Analyze method for automated analysis of that data.
- proc = [common.QUANTIFYW_PATH, "/CacheDir=" + self._cache_dir,
+ proc = [common.QUANTIFYW_PATH, "/CacheDir=" + self._cache_dir,
"/ShowInstrumentationProgress=no", "/ShowLoadLibraryProgress=no",
"/SaveData=" + self._out_file]
return common.Rational.Execute(self, proc)
@@ -57,5 +57,5 @@ if __name__ == "__main__":
if rational.Run():
retcode = 0
sys.exit(retcode)
-
+
diff --git a/tools/python/google/httpd_utils.py b/tools/python/google/httpd_utils.py
index 0281228..28952ed 100644
--- a/tools/python/google/httpd_utils.py
+++ b/tools/python/google/httpd_utils.py
@@ -32,7 +32,7 @@ def GetCygserverPath(start_dir, apache2=False):
'cygwin', 'usr', 'sbin')
return cygserver_path
-
+
def StartServer(document_root=None, output_dir=None, apache2=False):
"""Starts a local server on port 8000 using the basic configuration files.
diff --git a/tools/python/google/logging_utils.py b/tools/python/google/logging_utils.py
index 673097b..5fd95d3 100644
--- a/tools/python/google/logging_utils.py
+++ b/tools/python/google/logging_utils.py
@@ -26,11 +26,11 @@ class StdoutStderrHandler(logging.Handler):
sys.stdout
'''
logging.Handler.__init__(self)
- self._err = logging.StreamHandler(err)
+ self._err = logging.StreamHandler(err)
self._out = logging.StreamHandler(out)
self._threshold = threshold
self._last_was_err = False
-
+
def setLevel(self, lvl):
logging.Handler.setLevel(self, lvl)
self._err.setLevel(lvl)
@@ -62,17 +62,17 @@ class StdoutStderrHandler(logging.Handler):
FORMAT = "%(asctime)s %(filename)s [%(levelname)s] %(message)s"
DATEFMT = "%H:%M:%S"
-def config_root(level=logging.INFO, threshold=logging.WARNING, format=FORMAT,
+def config_root(level=logging.INFO, threshold=logging.WARNING, format=FORMAT,
datefmt=DATEFMT):
''' Configure the root logger to use a StdoutStderrHandler and some default
- formatting.
+ formatting.
Args:
level: messages below this level are ignored
threshold: below this logging level messages are sent to stdout,
otherwise they are sent to stderr
format: format for log messages, see logger.Format
datefmt: format for date in log messages
-
+
'''
# to set the handler of the root logging object, we need to do setup
# manually rather than using basicConfig
diff --git a/tools/python/google/path_utils.py b/tools/python/google/path_utils.py
index 751cfb5..6f94a84 100644
--- a/tools/python/google/path_utils.py
+++ b/tools/python/google/path_utils.py
@@ -12,7 +12,7 @@ import os
import sys
class PathNotFound(Exception): pass
-
+
def ScriptDir():
"""Get the full path to the directory containing the current script."""
script_filename = os.path.abspath(sys.argv[0])
@@ -20,7 +20,7 @@ def ScriptDir():
def FindAncestor(start_dir, ancestor):
"""Finds an ancestor dir in a path.
-
+
For example, FindAncestor('c:\foo\bar\baz', 'bar') would return
'c:\foo\bar'. Unlike FindUpward*, this only looks at direct path ancestors.
"""
diff --git a/tools/python/google/platform_utils_win.py b/tools/python/google/platform_utils_win.py
index 45419d7..eef2b50 100644
--- a/tools/python/google/platform_utils_win.py
+++ b/tools/python/google/platform_utils_win.py
@@ -157,7 +157,7 @@ class PlatformUtility(object):
' -C \'ServerRoot "%(server_root)s"\''
)
if apache2:
- httpd_cmd_string = ('export CYGWIN=server;' + httpd_cmd_string +
+ httpd_cmd_string = ('export CYGWIN=server;' + httpd_cmd_string +
' -c \'SSLCertificateFile "%(ssl_certificate_file)s"\'')
if document_root:
httpd_cmd_string += ' -C \'DocumentRoot "%(document_root)s"\''
diff --git a/tools/site_compare/command_line.py b/tools/site_compare/command_line.py
index b474abf..2c87fb9 100644
--- a/tools/site_compare/command_line.py
+++ b/tools/site_compare/command_line.py
@@ -56,11 +56,11 @@ class Command(object):
"""Encapsulates an argument to a command."""
VALID_TYPES = ['string', 'readfile', 'int', 'flag', 'coords']
TYPES_WITH_VALUES = ['string', 'readfile', 'int', 'coords']
-
+
def __init__(self, names, helptext, type, metaname,
required, default, positional):
"""Command-line argument to a command.
-
+
Args:
names: argument name, or list of synonyms
helptext: brief description of the argument
@@ -76,28 +76,28 @@ class Command(object):
required: True if argument must be specified
default: Default value if not specified
positional: Argument specified by location, not name
-
+
Raises:
ValueError: the argument name is invalid for some reason
"""
if type not in Command.Argument.VALID_TYPES:
raise ValueError("Invalid type: %r" % type)
-
+
if required and default is not None:
raise ValueError("required and default are mutually exclusive")
-
+
if required and type == 'flag':
raise ValueError("A required flag? Give me a break.")
-
+
if metaname and type not in Command.Argument.TYPES_WITH_VALUES:
raise ValueError("Type %r can't have a metaname" % type)
-
+
# If no metaname is provided, infer it: use the alphabetical characters
# of the last provided name
if not metaname and type in Command.Argument.TYPES_WITH_VALUES:
metaname = (
names[-1].lstrip(string.punctuation + string.whitespace).upper())
-
+
self.names = names
self.helptext = helptext
self.type = type
@@ -105,31 +105,31 @@ class Command(object):
self.default = default
self.positional = positional
self.metaname = metaname
-
+
self.mutex = [] # arguments that are mutually exclusive with
# this one
self.depends = [] # arguments that must be present for this
# one to be valid
self.present = False # has this argument been specified?
-
+
def AddDependency(self, arg):
"""Makes this argument dependent on another argument.
-
+
Args:
arg: name of the argument this one depends on
"""
if arg not in self.depends:
self.depends.append(arg)
-
+
def AddMutualExclusion(self, arg):
"""Makes this argument invalid if another is specified.
-
+
Args:
arg: name of the mutually exclusive argument.
"""
if arg not in self.mutex:
self.mutex.append(arg)
-
+
def GetUsageString(self):
"""Returns a brief string describing the argument's usage."""
if not self.positional:
@@ -138,49 +138,49 @@ class Command(object):
string += "="+self.metaname
else:
string = self.metaname
-
+
if not self.required:
string = "["+string+"]"
-
+
return string
-
+
def GetNames(self):
"""Returns a string containing a list of the arg's names."""
if self.positional:
return self.metaname
else:
return ", ".join(self.names)
-
+
def GetHelpString(self, width=80, indent=5, names_width=20, gutter=2):
"""Returns a help string including help for all the arguments."""
names = [" "*indent + line +" "*(names_width-len(line)) for line in
textwrap.wrap(self.GetNames(), names_width)]
-
+
helpstring = textwrap.wrap(self.helptext, width-indent-names_width-gutter)
-
+
if len(names) < len(helpstring):
names += [" "*(indent+names_width)]*(len(helpstring)-len(names))
-
+
if len(helpstring) < len(names):
helpstring += [""]*(len(names)-len(helpstring))
-
+
return "\n".join([name_line + " "*gutter + help_line for
name_line, help_line in zip(names, helpstring)])
-
+
def __repr__(self):
if self.present:
string = '= %r' % self.value
else:
string = "(absent)"
-
+
return "Argument %s '%s'%s" % (self.type, self.names[0], string)
-
+
# end of nested class Argument
-
+
def AddArgument(self, names, helptext, type="string", metaname=None,
required=False, default=None, positional=False):
"""Command-line argument to a command.
-
+
Args:
names: argument name, or list of synonyms
helptext: brief description of the argument
@@ -189,82 +189,82 @@ class Command(object):
required: True if argument must be specified
default: Default value if not specified
positional: Argument specified by location, not name
-
+
Raises:
ValueError: the argument already exists or is invalid
-
+
Returns:
The newly-created argument
"""
if IsString(names): names = [names]
-
+
names = [name.lower() for name in names]
-
+
for name in names:
if name in self.arg_dict:
raise ValueError("%s is already an argument"%name)
-
+
if (positional and required and
[arg for arg in self.args if arg.positional] and
not [arg for arg in self.args if arg.positional][-1].required):
raise ValueError(
"A required positional argument may not follow an optional one.")
-
+
arg = Command.Argument(names, helptext, type, metaname,
required, default, positional)
-
+
self.args.append(arg)
-
+
for name in names:
self.arg_dict[name] = arg
-
+
return arg
-
+
def GetArgument(self, name):
"""Return an argument from a name."""
return self.arg_dict[name.lower()]
-
+
def AddMutualExclusion(self, args):
"""Specifies that a list of arguments are mutually exclusive."""
if len(args) < 2:
raise ValueError("At least two arguments must be specified.")
-
+
args = [arg.lower() for arg in args]
-
+
for index in xrange(len(args)-1):
for index2 in xrange(index+1, len(args)):
self.arg_dict[args[index]].AddMutualExclusion(self.arg_dict[args[index2]])
-
+
def AddDependency(self, dependent, depends_on):
"""Specifies that one argument may only be present if another is.
-
+
Args:
dependent: the name of the dependent argument
depends_on: the name of the argument on which it depends
"""
self.arg_dict[dependent.lower()].AddDependency(
self.arg_dict[depends_on.lower()])
-
+
def AddMutualDependency(self, args):
"""Specifies that a list of arguments are all mutually dependent."""
if len(args) < 2:
raise ValueError("At least two arguments must be specified.")
-
+
args = [arg.lower() for arg in args]
-
+
for (arg1, arg2) in [(arg1, arg2) for arg1 in args for arg2 in args]:
if arg1 == arg2: continue
self.arg_dict[arg1].AddDependency(self.arg_dict[arg2])
-
+
def AddRequiredGroup(self, args):
"""Specifies that at least one of the named arguments must be present."""
if len(args) < 2:
raise ValueError("At least two arguments must be in a required group.")
-
+
args = [self.arg_dict[arg.lower()] for arg in args]
-
+
self.required_groups.append(args)
-
+
def ParseArguments(self):
"""Given a command line, parse and validate the arguments."""
@@ -272,70 +272,70 @@ class Command(object):
for arg in self.args:
arg.present = False
arg.value = None
-
+
self.parse_errors = []
-
+
# look for arguments remaining on the command line
while len(self.cmdline.rargs):
try:
self.ParseNextArgument()
except ParseError, e:
self.parse_errors.append(e.args[0])
-
+
# after all the arguments are parsed, check for problems
for arg in self.args:
if not arg.present and arg.required:
self.parse_errors.append("'%s': required parameter was missing"
% arg.names[0])
-
+
if not arg.present and arg.default:
arg.present = True
arg.value = arg.default
-
+
if arg.present:
for mutex in arg.mutex:
if mutex.present:
self.parse_errors.append(
"'%s', '%s': arguments are mutually exclusive" %
(arg.argstr, mutex.argstr))
-
+
for depend in arg.depends:
if not depend.present:
self.parse_errors.append("'%s': '%s' must be specified as well" %
(arg.argstr, depend.names[0]))
-
+
# check for required groups
for group in self.required_groups:
if not [arg for arg in group if arg.present]:
self.parse_errors.append("%s: at least one must be present" %
(", ".join(["'%s'" % arg.names[-1] for arg in group])))
-
+
# if we have any validators, invoke them
if not self.parse_errors and self.validator:
try:
self.validator(self)
except ParseError, e:
self.parse_errors.append(e.args[0])
-
+
# Helper methods so you can treat the command like a dict
def __getitem__(self, key):
arg = self.arg_dict[key.lower()]
-
+
if arg.type == 'flag':
return arg.present
else:
return arg.value
-
+
def __iter__(self):
return [arg for arg in self.args if arg.present].__iter__()
-
+
def ArgumentPresent(self, key):
"""Tests if an argument exists and has been specified."""
return key.lower() in self.arg_dict and self.arg_dict[key.lower()].present
-
+
def __contains__(self, key):
return self.ArgumentPresent(key)
-
+
def ParseNextArgument(self):
"""Find the next argument in the command line and parse it."""
arg = None
@@ -348,26 +348,26 @@ class Command(object):
if arg.type in Command.Argument.TYPES_WITH_VALUES:
if len(self.cmdline.rargs):
value = self.cmdline.rargs.pop(0)
-
+
# Second check: is this of the form "arg=val" or "arg:val"?
if arg is None:
delimiter_pos = -1
-
+
for delimiter in [':', '=']:
pos = argstr.find(delimiter)
if pos >= 0:
if delimiter_pos < 0 or pos < delimiter_pos:
delimiter_pos = pos
-
+
if delimiter_pos >= 0:
testarg = argstr[:delimiter_pos]
testval = argstr[delimiter_pos+1:]
-
+
if testarg.lower() in self.arg_dict:
arg = self.arg_dict[testarg.lower()]
argstr = testarg
value = testval
-
+
# Third check: does this begin an argument?
if arg is None:
for key in self.arg_dict.iterkeys():
@@ -377,7 +377,7 @@ class Command(object):
value = argstr[len(key):]
argstr = argstr[:len(key)]
arg = self.arg_dict[argstr]
-
+
# Fourth check: do we have any positional arguments available?
if arg is None:
for positional_arg in [
@@ -391,40 +391,40 @@ class Command(object):
# Push the retrieved argument/value onto the largs stack
if argstr: self.cmdline.largs.append(argstr)
if value: self.cmdline.largs.append(value)
-
+
# If we've made it this far and haven't found an arg, give up
if arg is None:
raise ParseError("Unknown argument: '%s'" % argstr)
-
+
# Convert the value, if necessary
if arg.type in Command.Argument.TYPES_WITH_VALUES and value is None:
raise ParseError("Argument '%s' requires a value" % argstr)
-
+
if value is not None:
value = self.StringToValue(value, arg.type, argstr)
arg.argstr = argstr
arg.value = value
arg.present = True
-
+
# end method ParseNextArgument
-
+
def StringToValue(self, value, type, argstr):
"""Convert a string from the command line to a value type."""
try:
if type == 'string':
pass # leave it be
-
+
elif type == 'int':
try:
value = int(value)
except ValueError:
raise ParseError
-
+
elif type == 'readfile':
if not os.path.isfile(value):
raise ParseError("'%s': '%s' does not exist" % (argstr, value))
-
+
elif type == 'coords':
try:
value = [int(val) for val in
@@ -432,10 +432,10 @@ class Command(object):
groups()]
except AttributeError:
raise ParseError
-
+
else:
raise ValueError("Unknown type: '%s'" % type)
-
+
except ParseError, e:
# The bare exception is raised in the generic case; more specific errors
# will arrive with arguments and should just be reraised
@@ -443,23 +443,23 @@ class Command(object):
e = ParseError("'%s': unable to convert '%s' to type '%s'" %
(argstr, value, type))
raise e
-
+
return value
-
+
def SortArgs(self):
"""Returns a method that can be passed to sort() to sort arguments."""
-
+
def ArgSorter(arg1, arg2):
"""Helper for sorting arguments in the usage string.
-
+
Positional arguments come first, then required arguments,
then optional arguments. Pylint demands this trivial function
have both Args: and Returns: sections, sigh.
-
+
Args:
arg1: the first argument to compare
arg2: the second argument to compare
-
+
Returns:
-1 if arg1 should be sorted first, +1 if it should be sorted second,
and 0 if arg1 and arg2 have the same sort level.
@@ -467,56 +467,56 @@ class Command(object):
return ((arg2.positional-arg1.positional)*2 +
(arg2.required-arg1.required))
return ArgSorter
-
+
def GetUsageString(self, width=80, name=None):
"""Gets a string describing how the command is used."""
if name is None: name = self.names[0]
-
+
initial_indent = "Usage: %s %s " % (self.cmdline.prog, name)
subsequent_indent = " " * len(initial_indent)
-
+
sorted_args = self.args[:]
sorted_args.sort(self.SortArgs())
-
+
return textwrap.fill(
" ".join([arg.GetUsageString() for arg in sorted_args]), width,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent)
-
+
def GetHelpString(self, width=80):
"""Returns a list of help strings for all this command's arguments."""
sorted_args = self.args[:]
sorted_args.sort(self.SortArgs())
-
+
return "\n".join([arg.GetHelpString(width) for arg in sorted_args])
-
+
# end class Command
-
-
+
+
class CommandLine(object):
"""Parse a command line, extracting a command and its arguments."""
-
+
def __init__(self):
self.commands = []
self.cmd_dict = {}
-
+
# Add the help command to the parser
help_cmd = self.AddCommand(["help", "--help", "-?", "-h"],
"Displays help text for a command",
ValidateHelpCommand,
DoHelpCommand)
-
+
help_cmd.AddArgument(
"command", "Command to retrieve help for", positional=True)
help_cmd.AddArgument(
"--width", "Width of the output", type='int', default=80)
-
+
self.Exit = sys.exit # override this if you don't want the script to halt
# on error or on display of help
-
+
self.out = sys.stdout # override these if you want to redirect
self.err = sys.stderr # output or error messages
-
+
def AddCommand(self, names, helptext, validator=None, impl=None):
"""Add a new command to the parser.
@@ -525,56 +525,56 @@ class CommandLine(object):
helptext: brief string description of the command
validator: method to validate a command's arguments
impl: callable to be invoked when command is called
-
+
Raises:
ValueError: raised if command already added
-
+
Returns:
The new command
"""
if IsString(names): names = [names]
-
+
for name in names:
if name in self.cmd_dict:
raise ValueError("%s is already a command"%name)
-
+
cmd = Command(names, helptext, validator, impl)
cmd.cmdline = self
-
+
self.commands.append(cmd)
for name in names:
self.cmd_dict[name.lower()] = cmd
-
+
return cmd
-
+
def GetUsageString(self):
"""Returns simple usage instructions."""
return "Type '%s help' for usage." % self.prog
-
+
def ParseCommandLine(self, argv=None, prog=None, execute=True):
"""Does the work of parsing a command line.
-
+
Args:
argv: list of arguments, defaults to sys.args[1:]
prog: name of the command, defaults to the base name of the script
execute: if false, just parse, don't invoke the 'impl' member
-
+
Returns:
The command that was executed
"""
if argv is None: argv = sys.argv[1:]
if prog is None: prog = os.path.basename(sys.argv[0]).split('.')[0]
-
+
# Store off our parameters, we may need them someday
self.argv = argv
self.prog = prog
-
+
# We shouldn't be invoked without arguments, that's just lame
if not len(argv):
self.out.writelines(self.GetUsageString())
self.Exit()
return None # in case the client overrides Exit
-
+
# Is it a valid command?
self.command_string = argv[0].lower()
if not self.command_string in self.cmd_dict:
@@ -582,33 +582,33 @@ class CommandLine(object):
self.out.write(self.GetUsageString())
self.Exit()
return None # in case the client overrides Exit
-
+
self.command = self.cmd_dict[self.command_string]
-
+
# "rargs" = remaining (unparsed) arguments
# "largs" = already parsed, "left" of the read head
self.rargs = argv[1:]
self.largs = []
-
+
# let the command object do the parsing
self.command.ParseArguments()
-
+
if self.command.parse_errors:
# there were errors, output the usage string and exit
self.err.write(self.command.GetUsageString()+"\n\n")
self.err.write("\n".join(self.command.parse_errors))
self.err.write("\n\n")
-
+
self.Exit()
-
+
elif execute and self.command.impl:
self.command.impl(self.command)
-
+
return self.command
-
+
def __getitem__(self, key):
return self.cmd_dict[key]
-
+
def __iter__(self):
return self.cmd_dict.__iter__()
@@ -618,25 +618,25 @@ def ValidateHelpCommand(command):
if 'command' in command and command['command'] not in command.cmdline:
raise ParseError("'%s': unknown command" % command['command'])
-
+
def DoHelpCommand(command):
"""Executed when the command is 'help'."""
out = command.cmdline.out
width = command['--width']
-
+
if 'command' not in command:
out.write(command.GetUsageString())
out.write("\n\n")
-
+
indent = 5
gutter = 2
-
+
command_width = (
max([len(cmd.names[0]) for cmd in command.cmdline.commands]) + gutter)
-
+
for cmd in command.cmdline.commands:
cmd_name = cmd.names[0]
-
+
initial_indent = (" "*indent + cmd_name + " "*
(command_width+gutter-len(cmd_name)))
subsequent_indent = " "*(indent+command_width+gutter)
@@ -645,9 +645,9 @@ def DoHelpCommand(command):
initial_indent=initial_indent,
subsequent_indent=subsequent_indent))
out.write("\n")
-
+
out.write("\n")
-
+
else:
help_cmd = command.cmdline[command['command']]
@@ -657,21 +657,21 @@ def DoHelpCommand(command):
out.write("\n\n")
out.write(help_cmd.GetHelpString(width=width))
out.write("\n")
-
+
command.cmdline.Exit()
-
+
if __name__ == "__main__":
# If we're invoked rather than imported, run some tests
cmdline = CommandLine()
-
+
# Since we're testing, override Exit()
def TestExit():
pass
cmdline.Exit = TestExit
-
+
# Actually, while we're at it, let's override error output too
cmdline.err = open(os.path.devnull, "w")
-
+
test = cmdline.AddCommand(["test", "testa", "testb"], "test command")
test.AddArgument(["-i", "--int", "--integer", "--optint", "--optionalint"],
"optional integer parameter", type='int')
@@ -688,25 +688,25 @@ if __name__ == "__main__":
test.AddArgument("--mutdep2", "mutually dependent parameter 2")
test.AddArgument("--mutdep3", "mutually dependent parameter 3")
test.AddMutualDependency(["--mutdep1", "--mutdep2", "--mutdep3"])
-
+
# mutually exclusive arguments
test.AddArgument("--mutex1", "mutually exclusive parameter 1")
test.AddArgument("--mutex2", "mutually exclusive parameter 2")
test.AddArgument("--mutex3", "mutually exclusive parameter 3")
test.AddMutualExclusion(["--mutex1", "--mutex2", "--mutex3"])
-
+
# dependent argument
test.AddArgument("--dependent", "dependent argument")
test.AddDependency("--dependent", "--int")
-
+
# other argument types
test.AddArgument("--file", "filename argument", type='readfile')
test.AddArgument("--coords", "coordinate argument", type='coords')
test.AddArgument("--flag", "flag argument", type='flag')
-
+
test.AddArgument("--req1", "part of a required group", type='flag')
test.AddArgument("--req2", "part 2 of a required group", type='flag')
-
+
test.AddRequiredGroup(["--req1", "--req2"])
# a few failure cases
@@ -742,7 +742,7 @@ if __name__ == "__main__":
# Let's do some parsing! first, the minimal success line:
MIN = "test --reqint 123 param1 --req1 "
-
+
# tuples of (command line, expected error count)
test_lines = [
("test --int 3 foo --req1", 1), # missing required named parameter
@@ -781,19 +781,19 @@ if __name__ == "__main__":
(MIN+"--coords (123,456)", 0), # finally!
("test --int 123 --reqint=456 foo bar --coords(42,88) baz --req1", 0)
]
-
+
badtests = 0
-
+
for (test, expected_failures) in test_lines:
cmdline.ParseCommandLine([x.strip() for x in test.strip().split(" ")])
-
+
if not len(cmdline.command.parse_errors) == expected_failures:
print "FAILED:\n issued: '%s'\n expected: %d\n received: %d\n\n" % (
test, expected_failures, len(cmdline.command.parse_errors))
badtests += 1
-
+
print "%d failed out of %d tests" % (badtests, len(test_lines))
-
+
cmdline.ParseCommandLine(["help", "test"])
diff --git a/tools/site_compare/commands/compare2.py b/tools/site_compare/commands/compare2.py
index e970c24..045141b 100644
--- a/tools/site_compare/commands/compare2.py
+++ b/tools/site_compare/commands/compare2.py
@@ -29,7 +29,7 @@ def CreateCommand(cmdline):
"Compares the output of two browsers on the same URL or list of URLs",
ValidateCompare2,
ExecuteCompare2)
-
+
cmd.AddArgument(
["-b1", "--browser1"], "Full path to first browser's executable",
type="readfile", metaname="PATH", required=True)
@@ -81,7 +81,7 @@ def CreateCommand(cmdline):
cmd.AddArgument(
["-d", "--diffdir"], "Path to hold the difference of comparisons that fail")
-
+
def ValidateCompare2(command):
"""Validate the arguments to compare2. Raises ParseError if failed."""
executables = [".exe", ".com", ".bat"]
@@ -102,68 +102,68 @@ def ExecuteCompare2(command):
endline = command["--endline"]
url_list = [url.strip() for url in
open(command["--list"], "r").readlines()[startline:endline]]
-
+
log_file = open(command["--logfile"], "w")
outdir = command["--outdir"]
if not outdir: outdir = tempfile.gettempdir()
-
+
scrape_info_list = []
-
+
class ScrapeInfo(object):
"""Helper class to hold information about a scrape."""
__slots__ = ["browser_path", "scraper", "outdir", "result"]
-
+
for index in xrange(1, 3):
scrape_info = ScrapeInfo()
scrape_info.browser_path = command["--browser%d" % index]
scrape_info.scraper = scrapers.GetScraper(
(command["--browser"], command["--browser%dver" % index]))
-
+
if command["--browser%dname" % index]:
scrape_info.outdir = os.path.join(outdir,
command["--browser%dname" % index])
else:
scrape_info.outdir = os.path.join(outdir, str(index))
-
+
drivers.windowing.PreparePath(scrape_info.outdir)
scrape_info_list.append(scrape_info)
-
+
compare = operators.GetOperator("equals_with_mask")
-
+
for url in url_list:
success = True
-
+
for scrape_info in scrape_info_list:
scrape_info.result = scrape_info.scraper.Scrape(
[url], scrape_info.outdir, command["--size"], (0, 0),
command["--timeout"], path=scrape_info.browser_path)
-
+
if not scrape_info.result:
scrape_info.result = "success"
else:
success = False
-
+
result = "unknown"
-
+
if success:
result = "equal"
-
+
file1 = drivers.windowing.URLtoFilename(
url, scrape_info_list[0].outdir, ".bmp")
file2 = drivers.windowing.URLtoFilename(
url, scrape_info_list[1].outdir, ".bmp")
-
+
comparison_result = compare.Compare(file1, file2,
maskdir=command["--maskdir"])
-
+
if comparison_result is not None:
result = "not-equal"
-
+
if command["--diffdir"]:
comparison_result[1].save(
drivers.windowing.URLtoFilename(url, command["--diffdir"], ".bmp"))
-
+
# TODO(jhaas): maybe use the logging module rather than raw file writes
log_file.write("%s %s %s %s\n" % (url,
scrape_info_list[0].result,
diff --git a/tools/site_compare/commands/maskmaker.py b/tools/site_compare/commands/maskmaker.py
index a5bf6e4..73b732c 100644
--- a/tools/site_compare/commands/maskmaker.py
+++ b/tools/site_compare/commands/maskmaker.py
@@ -96,7 +96,7 @@ def ValidateMaskmaker(command):
def ExecuteMaskmaker(command):
"""Performs automatic mask generation."""
-
+
# Get the list of URLs to generate masks for
class MaskmakerURL(object):
"""Helper class for holding information about a URL passed to maskmaker."""
@@ -105,7 +105,7 @@ def ExecuteMaskmaker(command):
self.url = url
self.consecutive_successes = 0
self.errors = 0
-
+
if command["--url"]:
url_list = [MaskmakerURL(command["--url"])]
else:
@@ -116,22 +116,22 @@ def ExecuteMaskmaker(command):
endline = command["--endline"]
url_list = [MaskmakerURL(url.strip()) for url in
open(command["--list"], "r").readlines()[startline:endline]]
-
+
complete_list = []
error_list = []
-
+
outdir = command["--outdir"]
scrapes = command["--scrapes"]
errors = command["--errors"]
size = command["--size"]
scrape_pass = 0
-
+
scrapedir = command["--scrapedir"]
if not scrapedir: scrapedir = tempfile.gettempdir()
-
+
# Get the scraper
scraper = scrapers.GetScraper((command["--browser"], command["--browserver"]))
-
+
# Repeatedly iterate through the list of URLs until either every URL has
# a successful mask or too many errors, or we've exceeded the giveup limit
while url_list and scrape_pass < command["--giveup"]:
@@ -157,31 +157,31 @@ def ExecuteMaskmaker(command):
print " %r does not exist, creating" % mask_filename
mask = Image.new("1", size, 1)
mask.save(mask_filename)
-
+
# Find the stored scrape path
mask_scrape_dir = os.path.join(
scrapedir, os.path.splitext(os.path.basename(mask_filename))[0])
drivers.windowing.PreparePath(mask_scrape_dir)
-
+
# Find the baseline image
mask_scrapes = os.listdir(mask_scrape_dir)
mask_scrapes.sort()
-
+
if not mask_scrapes:
print " No baseline image found, mask will not be updated"
baseline = None
else:
baseline = Image.open(os.path.join(mask_scrape_dir, mask_scrapes[0]))
-
+
mask_scrape_filename = os.path.join(mask_scrape_dir,
time.strftime("%y%m%d-%H%M%S.bmp"))
-
+
# Do the scrape
result = scraper.Scrape(
[url.url], mask_scrape_dir, size, (0, 0),
command["--timeout"], path=command["--browserpath"],
filename=mask_scrape_filename)
-
+
if result:
# Return value other than None means an error
print " Scrape failed with error '%r'" % result
@@ -189,16 +189,16 @@ def ExecuteMaskmaker(command):
if url.errors >= errors:
print " ** Exceeded maximum error count for this URL, giving up"
continue
-
+
# Load the new scrape
scrape = Image.open(mask_scrape_filename)
-
+
# Calculate the difference between the new scrape and the baseline,
# subject to the current mask
if baseline:
diff = ImageChops.multiply(ImageChops.difference(scrape, baseline),
mask.convert(scrape.mode))
-
+
# If the difference is none, there's nothing to update
if max(diff.getextrema()) == (0, 0):
print " Scrape identical to baseline, no change in mask"
@@ -221,10 +221,10 @@ def ExecuteMaskmaker(command):
# a monochrome bitmap. If the original RGB image were converted
# directly to monochrome, PIL would dither it.
diff = diff.convert("L").point([255]+[0]*255, "1")
-
+
# count the number of different pixels
diff_pixels = diff.getcolors()[0][0]
-
+
# is this too much?
diff_pixel_percent = diff_pixels * 100.0 / (mask.size[0]*mask.size[1])
if diff_pixel_percent > command["--threshhold"]:
@@ -234,10 +234,10 @@ def ExecuteMaskmaker(command):
print " Scrape differed in %d pixels, updating mask" % diff_pixels
mask = ImageChops.multiply(mask, diff)
mask.save(mask_filename)
-
+
# reset the number of consecutive "good" scrapes
url.consecutive_successes = 0
-
+
# Remove URLs whose mask is deemed done
complete_list.extend(
[url for url in url_list if url.consecutive_successes >= scrapes])
@@ -247,16 +247,16 @@ def ExecuteMaskmaker(command):
url for url in url_list if
url.consecutive_successes < scrapes and
url.errors < errors]
-
+
scrape_pass += 1
print "**Done with scrape pass %d\n" % scrape_pass
-
+
if scrape_pass >= command["--giveup"]:
print "**Exceeded giveup threshhold. Giving up."
else:
print "Waiting %d seconds..." % command["--wait"]
time.sleep(command["--wait"])
-
+
print
print "*** MASKMAKER COMPLETE ***"
print "Summary report:"
diff --git a/tools/site_compare/commands/measure.py b/tools/site_compare/commands/measure.py
index 1815a3d2..086fcbe 100644
--- a/tools/site_compare/commands/measure.py
+++ b/tools/site_compare/commands/measure.py
@@ -40,14 +40,14 @@ def CreateCommand(cmdline):
def ExecuteMeasure(command):
"""Executes the Measure command."""
-
+
def LogResult(url, proc, wnd, result):
"""Write the result of the browse to the log file."""
log_file.write(result)
log_file = open(command["--logfile"], "w")
- browser_iterate.Iterate(command, LogResult)
+ browser_iterate.Iterate(command, LogResult)
# Close the log file and return. We're done.
log_file.close()
diff --git a/tools/site_compare/commands/scrape.py b/tools/site_compare/commands/scrape.py
index 21a00ce..1c47cab 100644
--- a/tools/site_compare/commands/scrape.py
+++ b/tools/site_compare/commands/scrape.py
@@ -41,7 +41,7 @@ def CreateCommand(cmdline):
def ExecuteScrape(command):
"""Executes the Scrape command."""
-
+
def ScrapeResult(url, proc, wnd, result):
"""Capture and save the scrape."""
if log_file: log_file.write(result)
@@ -49,12 +49,12 @@ def ExecuteScrape(command):
# Scrape the page
image = windowing.ScrapeWindow(wnd)
filename = windowing.URLtoFilename(url, command["--outdir"], ".bmp")
- image.save(filename)
+ image.save(filename)
if command["--logfile"]: log_file = open(command["--logfile"], "w")
else: log_file = None
- browser_iterate.Iterate(command, ScrapeResult)
+ browser_iterate.Iterate(command, ScrapeResult)
# Close the log file and return. We're done.
if log_file: log_file.close()
diff --git a/tools/site_compare/commands/timeload.py b/tools/site_compare/commands/timeload.py
index 554d3b6..ca5b0db 100644
--- a/tools/site_compare/commands/timeload.py
+++ b/tools/site_compare/commands/timeload.py
@@ -6,7 +6,7 @@
"""SiteCompare command to time page loads
Loads a series of URLs in a series of browsers (and browser versions)
-and measures how long the page takes to load in each. Outputs a
+and measures how long the page takes to load in each. Outputs a
comma-delimited file. The first line is "URL,[browser names", each
additional line is a URL follored by comma-delimited times (in seconds),
or the string "timeout" or "crashed".
@@ -67,44 +67,44 @@ def CreateCommand(cmdline):
cmd.AddArgument(
["-sz", "--size"], "Browser window size", default=(800, 600), type="coords")
-
+
def ExecuteTimeLoad(command):
"""Executes the TimeLoad command."""
browsers = command["--browsers"].split(",")
num_browsers = len(browsers)
-
+
if command["--browserversions"]:
browser_versions = command["--browserversions"].split(",")
else:
browser_versions = [None] * num_browsers
-
+
if command["--browserpaths"]:
browser_paths = command["--browserpaths"].split(",")
else:
browser_paths = [None] * num_browsers
-
+
if len(browser_versions) != num_browsers:
raise ValueError(
"--browserversions must be same length as --browser_paths")
if len(browser_paths) != num_browsers:
raise ValueError(
"--browserversions must be same length as --browser_paths")
-
+
if [b for b in browsers if b not in ["chrome", "ie", "firefox"]]:
raise ValueError("unknown browsers: %r" % b)
-
+
scraper_list = []
-
+
for b in xrange(num_browsers):
version = browser_versions[b]
if not version: version = None
-
+
scraper = scrapers.GetScraper( (browsers[b], version) )
if not scraper:
- raise ValueError("could not find scraper for (%r, %r)" %
+ raise ValueError("could not find scraper for (%r, %r)" %
(browsers[b], version))
scraper_list.append(scraper)
-
+
if command["--url"]:
url_list = [command["--url"]]
else:
@@ -115,32 +115,32 @@ def ExecuteTimeLoad(command):
endline = command["--endline"]
url_list = [url.strip() for url in
open(command["--list"], "r").readlines()[startline:endline]]
-
+
log_file = open(command["--logfile"], "w")
-
+
log_file.write("URL")
for b in xrange(num_browsers):
log_file.write(",%s" % browsers[b])
-
+
if browser_versions[b]: log_file.write(" %s" % browser_versions[b])
log_file.write("\n")
-
+
results = {}
for url in url_list:
results[url] = [None] * num_browsers
-
+
for b in xrange(num_browsers):
result = scraper_list[b].Time(url_list, command["--size"],
command["--timeout"],
path=browser_paths[b])
-
+
for (url, time) in result:
results[url][b] = time
-
+
# output the results
for url in url_list:
log_file.write(url)
for b in xrange(num_browsers):
log_file.write(",%r" % results[url][b])
-
+
diff --git a/tools/site_compare/drivers/__init__.py b/tools/site_compare/drivers/__init__.py
index befc1353..fa9f1c2 100644
--- a/tools/site_compare/drivers/__init__.py
+++ b/tools/site_compare/drivers/__init__.py
@@ -9,7 +9,7 @@ __author__ = 'jhaas@google.com (Jonathan Haas)'
import sys
platform_dir = sys.platform
-
+
keyboard = __import__(platform_dir+".keyboard", globals(), locals(), [''])
mouse = __import__(platform_dir+".mouse", globals(), locals(), [''])
windowing = __import__(platform_dir+".windowing", globals(), locals(), [''])
diff --git a/tools/site_compare/drivers/win32/keyboard.py b/tools/site_compare/drivers/win32/keyboard.py
index a25df5e..246e14c 100644
--- a/tools/site_compare/drivers/win32/keyboard.py
+++ b/tools/site_compare/drivers/win32/keyboard.py
@@ -25,45 +25,45 @@ import win32con # Windows constants
def PressKey(down, key):
"""Presses or unpresses a key.
-
+
Uses keybd_event to simulate either depressing or releasing
a key
-
+
Args:
down: Whether the key is to be pressed or released
key: Virtual key code of key to press or release
"""
-
- # keybd_event injects key events at a very low level (it's the
+
+ # keybd_event injects key events at a very low level (it's the
# Windows API keyboard device drivers call) so this is a very
# reliable way of simulating user input
win32api.keybd_event(key, 0, (not down) * win32con.KEYEVENTF_KEYUP)
-
-
+
+
def TypeKey(key, keystroke_time=0):
"""Simulate a keypress of a virtual key.
-
+
Args:
key: which key to press
keystroke_time: length of time (in seconds) to "hold down" the key
Note that zero works just fine
-
+
Returns:
None
"""
-
+
# This just wraps a pair of PressKey calls with an intervening delay
PressKey(True, key)
time.sleep(keystroke_time)
PressKey(False, key)
-
+
def TypeString(string_to_type,
use_modifiers=False,
keystroke_time=0,
time_between_keystrokes=0):
"""Simulate typing a string on the keyboard.
-
+
Args:
string_to_type: the string to print
use_modifiers: specifies whether the following modifier characters
@@ -79,27 +79,27 @@ def TypeString(string_to_type,
nonprintable keys (F-keys, ESC, arrow keys, etc),
support for explicit control of left vs. right ALT or SHIFT,
support for Windows key
-
+
keystroke_time: length of time (in secondes) to "hold down" the key
time_between_keystrokes: length of time (seconds) to pause between keys
-
+
Returns:
None
"""
-
+
shift_held = win32api.GetAsyncKeyState(win32con.VK_SHIFT ) < 0
ctrl_held = win32api.GetAsyncKeyState(win32con.VK_CONTROL) < 0
alt_held = win32api.GetAsyncKeyState(win32con.VK_MENU ) < 0
-
+
next_escaped = False
escape_chars = {
'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v'}
-
+
for char in string_to_type:
vk = None
handled = False
-
- # Check to see if this is the start or end of a modified block (that is,
+
+ # Check to see if this is the start or end of a modified block (that is,
# {abc} for ALT-modified keys or [abc] for CTRL-modified keys
if use_modifiers and not next_escaped:
handled = True
@@ -117,17 +117,17 @@ def TypeString(string_to_type,
PressKey(False, win32con.VK_CONTROL)
else:
handled = False
-
+
# If this is an explicitly-escaped character, replace it with the
# appropriate code
if next_escaped and char in escape_chars: char = escape_chars[char]
-
+
# If this is \p, pause for one second.
if next_escaped and char == 'p':
time.sleep(1)
next_escaped = False
handled = True
-
+
# If this is \(d), press F key
if next_escaped and char.isdigit():
fkey = int(char)
@@ -139,28 +139,28 @@ def TypeString(string_to_type,
if not next_escaped and char == "\\":
next_escaped = True
handled = True
-
+
# If we make it here, it's not a special character, or it's an
# escaped special character which should be treated as a literal
if not handled:
next_escaped = False
if not vk: vk = win32api.VkKeyScan(char)
-
+
# VkKeyScan() returns the scan code in the low byte. The upper
# byte specifies modifiers necessary to produce the given character
# from the given scan code. The only one we're concerned with at the
- # moment is Shift. Determine the shift state and compare it to the
+ # moment is Shift. Determine the shift state and compare it to the
# current state... if it differs, press or release the shift key.
new_shift_held = bool(vk & (1<<8))
-
+
if new_shift_held != shift_held:
PressKey(new_shift_held, win32con.VK_SHIFT)
shift_held = new_shift_held
-
+
# Type the key with the specified length, then wait the specified delay
TypeKey(vk & 0xFF, keystroke_time)
time.sleep(time_between_keystrokes)
-
+
# Release the modifier keys, if held
if shift_held: PressKey(False, win32con.VK_SHIFT)
if ctrl_held: PressKey(False, win32con.VK_CONTROL)
@@ -168,18 +168,18 @@ def TypeString(string_to_type,
if __name__ == "__main__":
# We're being invoked rather than imported. Let's do some tests
-
+
# Press command-R to bring up the Run dialog
PressKey(True, win32con.VK_LWIN)
TypeKey(ord('R'))
PressKey(False, win32con.VK_LWIN)
-
+
# Wait a sec to make sure it comes up
time.sleep(1)
-
+
# Invoke Notepad through the Run dialog
TypeString("wordpad\n")
-
+
# Wait another sec, then start typing
time.sleep(1)
TypeString("This is a test of SiteCompare's Keyboard.py module.\n\n")
@@ -194,5 +194,5 @@ if __name__ == "__main__":
use_modifiers=True,
keystroke_time=0.05,
time_between_keystrokes=0.05)
-
-
+
+
diff --git a/tools/site_compare/drivers/win32/mouse.py b/tools/site_compare/drivers/win32/mouse.py
index afdb2ea..bd16272 100644
--- a/tools/site_compare/drivers/win32/mouse.py
+++ b/tools/site_compare/drivers/win32/mouse.py
@@ -19,25 +19,25 @@ import win32gui # for window functions
def ScreenToMouse(pt):
"""Convert a value in screen coordinates to mouse coordinates.
-
+
Mouse coordinates are specified as a percentage of screen dimensions,
normalized to 16 bits. 0 represents the far left/top of the screen,
65535 represents the far right/bottom. This function assumes that
the size of the screen is fixed at module load time and does not change
-
+
Args:
pt: the point of the coords to convert
-
+
Returns:
the converted point
"""
-
+
# Initialize the screen dimensions on first execution. Note that this
# function assumes that the screen dimensions do not change during run.
if not ScreenToMouse._SCREEN_DIMENSIONS:
desktop = win32gui.GetClientRect(win32gui.GetDesktopWindow())
ScreenToMouse._SCREEN_DIMENSIONS = (desktop[2], desktop[3])
-
+
return ((65535 * pt[0]) / ScreenToMouse._SCREEN_DIMENSIONS[0],
(65535 * pt[1]) / ScreenToMouse._SCREEN_DIMENSIONS[1])
@@ -46,11 +46,11 @@ ScreenToMouse._SCREEN_DIMENSIONS = None
def PressButton(down, button='left'):
"""Simulate a mouse button press or release at the current mouse location.
-
+
Args:
down: whether the button is pressed or released
button: which button is pressed
-
+
Returns:
None
"""
@@ -61,127 +61,127 @@ def PressButton(down, button='left'):
'middle': (win32con.MOUSEEVENTF_MIDDLEUP, win32con.MOUSEEVENTF_MIDDLEDOWN),
'right': (win32con.MOUSEEVENTF_RIGHTUP, win32con.MOUSEEVENTF_RIGHTDOWN)
}
-
+
# hit the button
win32api.mouse_event(flags[button][down], 0, 0)
-
+
def ClickButton(button='left', click_time=0):
"""Press and release a mouse button at the current mouse location.
-
+
Args:
button: which button to click
click_time: duration between press and release
-
+
Returns:
None
"""
PressButton(True, button)
time.sleep(click_time)
PressButton(False, button)
-
-
+
+
def DoubleClickButton(button='left', click_time=0, time_between_clicks=0):
"""Double-click a mouse button at the current mouse location.
-
+
Args:
button: which button to click
click_time: duration between press and release
time_between_clicks: time to pause between clicks
-
+
Returns:
None
"""
ClickButton(button, click_time)
time.sleep(time_between_clicks)
ClickButton(button, click_time)
-
+
def MoveToLocation(pos, duration=0, tick=0.01):
"""Move the mouse cursor to a specified location, taking the specified time.
-
+
Args:
pos: position (in screen coordinates) to move to
duration: amount of time the move should take
tick: amount of time between successive moves of the mouse
-
+
Returns:
None
"""
# calculate the number of moves to reach the destination
num_steps = (duration/tick)+1
-
+
# get the current and final mouse position in mouse coords
current_location = ScreenToMouse(win32gui.GetCursorPos())
end_location = ScreenToMouse(pos)
-
+
# Calculate the step size
step_size = ((end_location[0]-current_location[0])/num_steps,
(end_location[1]-current_location[1])/num_steps)
step = 0
-
+
while step < num_steps:
# Move the mouse one step
current_location = (current_location[0]+step_size[0],
current_location[1]+step_size[1])
-
+
# Coerce the coords to int to avoid a warning from pywin32
win32api.mouse_event(
win32con.MOUSEEVENTF_MOVE|win32con.MOUSEEVENTF_ABSOLUTE,
int(current_location[0]), int(current_location[1]))
-
+
step += 1
time.sleep(tick)
-
-
+
+
def ClickAtLocation(pos, button='left', click_time=0):
"""Simulate a mouse click in a particular location, in screen coordinates.
-
+
Args:
pos: position in screen coordinates (x,y)
button: which button to click
click_time: duration of the click
-
+
Returns:
None
"""
MoveToLocation(pos)
ClickButton(button, click_time)
-
+
def ClickInWindow(hwnd, offset=None, button='left', click_time=0):
"""Simulate a user mouse click in the center of a window.
-
+
Args:
hwnd: handle of the window to click in
offset: where to click, defaults to dead center
button: which button to click
click_time: duration of the click
-
+
Returns:
Nothing
"""
-
+
rect = win32gui.GetClientRect(hwnd)
if offset is None: offset = (rect[2]/2, rect[3]/2)
# get the screen coordinates of the window's center
pos = win32gui.ClientToScreen(hwnd, offset)
-
+
ClickAtLocation(pos, button, click_time)
-
+
def DoubleClickInWindow(
hwnd, offset=None, button='left', click_time=0, time_between_clicks=0.1):
"""Simulate a user mouse double click in the center of a window.
-
+
Args:
hwnd: handle of the window to click in
offset: where to click, defaults to dead center
button: which button to click
click_time: duration of the clicks
time_between_clicks: length of time to pause between clicks
-
+
Returns:
Nothing
"""
@@ -191,13 +191,13 @@ def DoubleClickInWindow(
if __name__ == "__main__":
# We're being invoked rather than imported. Let's do some tests
-
+
screen_size = win32gui.GetClientRect(win32gui.GetDesktopWindow())
screen_size = (screen_size[2], screen_size[3])
-
+
# move the mouse (instantly) to the upper right corner
MoveToLocation((screen_size[0], 0))
-
+
# move the mouse (over five seconds) to the lower left corner
MoveToLocation((0, screen_size[1]), 5)
@@ -209,10 +209,10 @@ if __name__ == "__main__":
# wait a bit, then click the right button to open the context menu
time.sleep(3)
ClickButton('right')
-
+
# move the mouse away and then click the left button to dismiss the
# context menu
MoveToLocation((screen_size[0]/2, screen_size[1]/2), 3)
MoveToLocation((0, 0), 3)
ClickButton()
-
+
diff --git a/tools/site_compare/drivers/win32/windowing.py b/tools/site_compare/drivers/win32/windowing.py
index 5bc37f8..fe77c56 100644
--- a/tools/site_compare/drivers/win32/windowing.py
+++ b/tools/site_compare/drivers/win32/windowing.py
@@ -24,7 +24,7 @@ import win32process
def FindChildWindows(hwnd, path):
"""Find a set of windows through a path specification.
-
+
Args:
hwnd: Handle of the parent window
path: Path to the window to find. Has the following form:
@@ -32,12 +32,12 @@ def FindChildWindows(hwnd, path):
The slashes specify the "path" to the child window.
The text is the window class, a pipe (if present) is a title.
* is a wildcard and will find all child windows at that level
-
+
Returns:
A list of the windows that were found
"""
windows_to_check = [hwnd]
-
+
# The strategy will be to take windows_to_check and use it
# to find a list of windows that match the next specification
# in the path, then repeat with the list of found windows as the
@@ -45,7 +45,7 @@ def FindChildWindows(hwnd, path):
for segment in path.split("/"):
windows_found = []
check_values = segment.split("|")
-
+
# check_values is now a list with the first element being
# the window class, the second being the window caption.
# If the class is absent (or wildcarded) set it to None
@@ -53,7 +53,7 @@ def FindChildWindows(hwnd, path):
# If the window caption is also absent, force it to None as well
if len(check_values) == 1: check_values.append(None)
-
+
# Loop through the list of windows to check
for window_check in windows_to_check:
window_found = None
@@ -70,26 +70,26 @@ def FindChildWindows(hwnd, path):
window_found = 0
else:
raise e
-
+
# If FindWindowEx struck gold, add to our list of windows found
if window_found: windows_found.append(window_found)
-
+
# The windows we found become the windows to check for the next segment
windows_to_check = windows_found
-
+
return windows_found
def FindChildWindow(hwnd, path):
"""Find a window through a path specification.
-
+
This method is a simple wrapper for FindChildWindows() for the
case (the majority case) where you expect to find a single window
-
+
Args:
hwnd: Handle of the parent window
path: Path to the window to find. See FindChildWindows()
-
+
Returns:
The window that was found
"""
@@ -98,36 +98,36 @@ def FindChildWindow(hwnd, path):
def ScrapeWindow(hwnd, rect=None):
"""Scrape a visible window and return its contents as a bitmap.
-
+
Args:
hwnd: handle of the window to scrape
rect: rectangle to scrape in client coords, defaults to the whole thing
If specified, it's a 4-tuple of (left, top, right, bottom)
-
+
Returns:
An Image containing the scraped data
"""
# Activate the window
SetForegroundWindow(hwnd)
-
+
# If no rectangle was specified, use the fill client rectangle
if not rect: rect = win32gui.GetClientRect(hwnd)
-
+
upper_left = win32gui.ClientToScreen(hwnd, (rect[0], rect[1]))
lower_right = win32gui.ClientToScreen(hwnd, (rect[2], rect[3]))
rect = upper_left+lower_right
-
+
return PIL.ImageGrab.grab(rect)
-
+
def SetForegroundWindow(hwnd):
"""Bring a window to the foreground."""
win32gui.SetForegroundWindow(hwnd)
-
-
+
+
def InvokeAndWait(path, cmdline="", timeout=10, tick=1.):
"""Invoke an application and wait for it to bring up a window.
-
+
Args:
path: full path to the executable to invoke
cmdline: command line to pass to executable
@@ -138,7 +138,7 @@ def InvokeAndWait(path, cmdline="", timeout=10, tick=1.):
A tuple of handles to the process and the application's window,
or (None, None) if it timed out waiting for the process
"""
-
+
def EnumWindowProc(hwnd, ret):
"""Internal enumeration func, checks for visibility and proper PID."""
if win32gui.IsWindowVisible(hwnd): # don't bother even checking hidden wnds
@@ -147,12 +147,12 @@ def InvokeAndWait(path, cmdline="", timeout=10, tick=1.):
ret[1] = hwnd
return 0 # 0 means stop enumeration
return 1 # 1 means continue enumeration
-
+
# We don't need to change anything about the startupinfo structure
# (the default is quite sufficient) but we need to create it just the
# same.
sinfo = win32process.STARTUPINFO()
-
+
proc = win32process.CreateProcess(
path, # path to new process's executable
cmdline, # application's command line
@@ -168,16 +168,16 @@ def InvokeAndWait(path, cmdline="", timeout=10, tick=1.):
# some point we may care about the other members, but for now, all
# we're after is the pid
pid = proc[2]
-
+
# Enumeration APIs can take an arbitrary integer, usually a pointer,
# to be passed to the enumeration function. We'll pass a pointer to
# a structure containing the PID we're looking for, and an empty out
# parameter to hold the found window ID
ret = [pid, None]
-
+
tries_until_timeout = timeout/tick
num_tries = 0
-
+
# Enumerate top-level windows, look for one with our PID
while num_tries < tries_until_timeout and ret[1] is None:
try:
@@ -186,7 +186,7 @@ def InvokeAndWait(path, cmdline="", timeout=10, tick=1.):
# error 0 isn't an error, it just meant the enumeration was
# terminated early
if e[0]: raise e
-
+
time.sleep(tick)
num_tries += 1
@@ -197,11 +197,11 @@ def InvokeAndWait(path, cmdline="", timeout=10, tick=1.):
def WaitForProcessExit(proc, timeout=None):
"""Waits for a given process to terminate.
-
+
Args:
proc: handle to process
timeout: timeout (in seconds). None = wait indefinitely
-
+
Returns:
True if process ended, False if timed out
"""
@@ -210,26 +210,26 @@ def WaitForProcessExit(proc, timeout=None):
else:
# convert sec to msec
timeout *= 1000
-
+
return (win32event.WaitForSingleObject(proc, timeout) ==
win32event.WAIT_OBJECT_0)
def WaitForThrobber(hwnd, rect=None, timeout=20, tick=0.1, done=10):
"""Wait for a browser's "throbber" (loading animation) to complete.
-
+
Args:
hwnd: window containing the throbber
rect: rectangle of the throbber, in client coords. If None, whole window
timeout: if the throbber is still throbbing after this long, give up
tick: how often to check the throbber
done: how long the throbber must be unmoving to be considered done
-
+
Returns:
Number of seconds waited, -1 if timed out
"""
if not rect: rect = win32gui.GetClientRect(hwnd)
-
+
# last_throbber will hold the results of the preceding scrape;
# we'll compare it against the current scrape to see if we're throbbing
last_throbber = ScrapeWindow(hwnd, rect)
@@ -239,7 +239,7 @@ def WaitForThrobber(hwnd, rect=None, timeout=20, tick=0.1, done=10):
while time.clock() < timeout_clock:
time.sleep(tick)
-
+
current_throbber = ScrapeWindow(hwnd, rect)
if current_throbber.tostring() != last_throbber.tostring():
last_throbber = current_throbber
@@ -247,27 +247,27 @@ def WaitForThrobber(hwnd, rect=None, timeout=20, tick=0.1, done=10):
else:
if time.clock() - last_changed_clock > done:
return last_changed_clock - start_clock
-
+
return -1
def MoveAndSizeWindow(wnd, position=None, size=None, child=None):
"""Moves and/or resizes a window.
-
+
Repositions and resizes a window. If a child window is provided,
the parent window is resized so the child window has the given size
-
+
Args:
wnd: handle of the frame window
position: new location for the frame window
size: new size for the frame window (or the child window)
child: handle of the child window
-
+
Returns:
None
"""
rect = win32gui.GetWindowRect(wnd)
-
+
if position is None: position = (rect[0], rect[1])
if size is None:
size = (rect[2]-rect[0], rect[3]-rect[1])
@@ -276,7 +276,7 @@ def MoveAndSizeWindow(wnd, position=None, size=None, child=None):
slop = (rect[2]-rect[0]-child_rect[2]+child_rect[0],
rect[3]-rect[1]-child_rect[3]+child_rect[1])
size = (size[0]+slop[0], size[1]+slop[1])
-
+
win32gui.MoveWindow(wnd, # window to move
position[0], # new x coord
position[1], # new y coord
@@ -287,46 +287,46 @@ def MoveAndSizeWindow(wnd, position=None, size=None, child=None):
def EndProcess(proc, code=0):
"""Ends a process.
-
+
Wraps the OS TerminateProcess call for platform-independence
-
+
Args:
proc: process ID
code: process exit code
-
+
Returns:
None
"""
win32process.TerminateProcess(proc, code)
-
-
+
+
def URLtoFilename(url, path=None, extension=None):
"""Converts a URL to a filename, given a path.
-
+
This in theory could cause collisions if two URLs differ only
in unprintable characters (eg. http://www.foo.com/?bar and
http://www.foo.com/:bar. In practice this shouldn't be a problem.
-
+
Args:
url: The URL to convert
path: path to the directory to store the file
extension: string to append to filename
-
+
Returns:
filename
"""
trans = string.maketrans(r'\/:*?"<>|', '_________')
-
+
if path is None: path = ""
if extension is None: extension = ""
if len(path) > 0 and path[-1] != '\\': path += '\\'
url = url.translate(trans)
return "%s%s%s" % (path, url, extension)
-
+
def PreparePath(path):
"""Ensures that a given path exists, making subdirectories if necessary.
-
+
Args:
path: fully-qualified path of directory to ensure exists
@@ -341,11 +341,11 @@ def PreparePath(path):
if __name__ == "__main__":
PreparePath(r"c:\sitecompare\scrapes\ie7")
# We're being invoked rather than imported. Let's do some tests
-
+
# Hardcode IE's location for the purpose of this test
(proc, wnd) = InvokeAndWait(
r"c:\program files\internet explorer\iexplore.exe")
-
+
# Find the browser pane in the IE window
browser = FindChildWindow(
wnd, "TabWindowClass/Shell DocObject View/Internet Explorer_Server")
@@ -355,8 +355,8 @@ if __name__ == "__main__":
# Take a screenshot
i = ScrapeWindow(browser)
-
+
i.show()
-
+
EndProcess(proc, 0)
diff --git a/tools/site_compare/operators/__init__.py b/tools/site_compare/operators/__init__.py
index 02eac07..f60e8e8 100644
--- a/tools/site_compare/operators/__init__.py
+++ b/tools/site_compare/operators/__init__.py
@@ -9,18 +9,18 @@ __author__ = 'jhaas@google.com (Jonathan Haas)'
def GetOperator(operator):
"""Given an operator by name, returns its module.
-
+
Args:
operator: string describing the comparison
-
+
Returns:
module
"""
-
+
# TODO(jhaas): come up with a happy way of integrating multiple operators
# with different, possibly divergent and possibly convergent, operators.
-
+
module = __import__(operator, globals(), locals(), [''])
-
+
return module
diff --git a/tools/site_compare/operators/equals.py b/tools/site_compare/operators/equals.py
index c7654e9..4054fa6 100644
--- a/tools/site_compare/operators/equals.py
+++ b/tools/site_compare/operators/equals.py
@@ -11,31 +11,31 @@ from PIL import ImageChops
def Compare(file1, file2, **kwargs):
"""Compares two images to see if they're identical.
-
+
Args:
file1: path to first image to compare
file2: path to second image to compare
kwargs: unused for this operator
-
+
Returns:
None if the images are identical
A tuple of (errorstring, image) if they're not
"""
kwargs = kwargs # unused parameter
-
+
im1 = Image.open(file1)
im2 = Image.open(file2)
-
+
if im1.size != im2.size:
return ("The images are of different size (%s vs %s)" %
(im1.size, im2.size), im1)
diff = ImageChops.difference(im1, im2)
-
+
if max(diff.getextrema()) != (0, 0):
return ("The images differ", diff)
else:
return None
-
-
-
+
+
+
diff --git a/tools/site_compare/operators/equals_with_mask.py b/tools/site_compare/operators/equals_with_mask.py
index fd4000b..d6abd53 100644
--- a/tools/site_compare/operators/equals_with_mask.py
+++ b/tools/site_compare/operators/equals_with_mask.py
@@ -13,49 +13,49 @@ import os.path
def Compare(file1, file2, **kwargs):
"""Compares two images to see if they're identical subject to a mask.
-
+
An optional directory containing masks is supplied. If a mask exists
which matches file1's name, areas under the mask where it's black
are ignored.
-
+
Args:
file1: path to first image to compare
file2: path to second image to compare
kwargs: ["maskdir"] contains the directory holding the masks
-
+
Returns:
None if the images are identical
A tuple of (errorstring, image) if they're not
"""
-
+
maskdir = None
if "maskdir" in kwargs:
maskdir = kwargs["maskdir"]
-
+
im1 = Image.open(file1)
im2 = Image.open(file2)
-
+
if im1.size != im2.size:
return ("The images are of different size (%r vs %r)" %
(im1.size, im2.size), im1)
diff = ImageChops.difference(im1, im2)
-
+
if maskdir:
maskfile = os.path.join(maskdir, os.path.basename(file1))
if os.path.exists(maskfile):
mask = Image.open(maskfile)
-
+
if mask.size != im1.size:
return ("The mask is of a different size than the images (%r vs %r)" %
(mask.size, im1.size), mask)
-
+
diff = ImageChops.multiply(diff, mask.convert(diff.mode))
-
+
if max(diff.getextrema()) != (0, 0):
return ("The images differ", diff)
else:
return None
-
-
-
+
+
+
diff --git a/tools/site_compare/scrapers/__init__.py b/tools/site_compare/scrapers/__init__.py
index 08790aa..cb82b2b 100644
--- a/tools/site_compare/scrapers/__init__.py
+++ b/tools/site_compare/scrapers/__init__.py
@@ -12,23 +12,23 @@ import types
def GetScraper(browser):
"""Given a browser and an optional version, returns the scraper module.
-
+
Args:
browser: either a string (browser name) or a tuple (name, version)
-
+
Returns:
module
"""
-
+
if type(browser) == types.StringType: browser = (browser, None)
-
+
package = __import__(browser[0], globals(), locals(), [''])
module = package.GetScraper(browser[1])
if browser[1] is not None: module.version = browser[1]
-
+
return module
# if invoked rather than imported, do some tests
if __name__ == "__main__":
print GetScraper("IE")
- \ No newline at end of file
+
diff --git a/tools/site_compare/scrapers/chrome/__init__.py b/tools/site_compare/scrapers/chrome/__init__.py
index 2ba76c4..6342525 100644
--- a/tools/site_compare/scrapers/chrome/__init__.py
+++ b/tools/site_compare/scrapers/chrome/__init__.py
@@ -5,19 +5,19 @@
"""Selects the appropriate scraper for Chrome."""
__author__ = 'jhaas@google.com (Jonathan Haas)'
-
+
def GetScraper(version):
"""Returns the scraper module for the given version.
-
+
Args:
version: version string of Chrome, or None for most recent
-
+
Returns:
scrape module for given version
"""
if version is None:
version = "0.1.101.0"
-
+
parsed_version = [int(x) for x in version.split(".")]
if (parsed_version[0] > 0 or
@@ -29,10 +29,10 @@ def GetScraper(version):
scraper_version = "chrome01970"
return __import__(scraper_version, globals(), locals(), [''])
-
+
# if invoked rather than imported, test
if __name__ == "__main__":
version = "0.1.101.0"
-
+
print GetScraper(version).version
- \ No newline at end of file
+
diff --git a/tools/site_compare/scrapers/chrome/chrome011010.py b/tools/site_compare/scrapers/chrome/chrome011010.py
index 3d62d07..b4f816f 100644
--- a/tools/site_compare/scrapers/chrome/chrome011010.py
+++ b/tools/site_compare/scrapers/chrome/chrome011010.py
@@ -19,7 +19,7 @@ def GetChromeRenderPane(wnd):
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
-
+
Args:
urls: list of URLs to scrape
outdir: directory to place output
@@ -27,18 +27,18 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
-
+
Returns:
None if succeeded, else an error code
"""
chromebase.GetChromeRenderPane = GetChromeRenderPane
-
+
return chromebase.Scrape(urls, outdir, size, pos, timeout, kwargs)
def Time(urls, size, timeout, **kwargs):
"""Forwards the Time command to chromebase."""
chromebase.GetChromeRenderPane = GetChromeRenderPane
-
+
return chromebase.Time(urls, size, timeout, kwargs)
diff --git a/tools/site_compare/scrapers/chrome/chrome01970.py b/tools/site_compare/scrapers/chrome/chrome01970.py
index c1ef79f..54bc670 100644
--- a/tools/site_compare/scrapers/chrome/chrome01970.py
+++ b/tools/site_compare/scrapers/chrome/chrome01970.py
@@ -19,7 +19,7 @@ def GetChromeRenderPane(wnd):
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
-
+
Args:
urls: list of URLs to scrape
outdir: directory to place output
@@ -27,18 +27,18 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
-
+
Returns:
None if succeeded, else an error code
"""
chromebase.GetChromeRenderPane = GetChromeRenderPane
-
+
return chromebase.Scrape(urls, outdir, size, pos, timeout, kwargs)
-
+
def Time(urls, size, timeout, **kwargs):
"""Forwards the Time command to chromebase."""
chromebase.GetChromeRenderPane = GetChromeRenderPane
-
+
return chromebase.Time(urls, size, timeout, kwargs)
-
+
diff --git a/tools/site_compare/scrapers/chrome/chromebase.py b/tools/site_compare/scrapers/chrome/chromebase.py
index 085f376..aba17c1 100644
--- a/tools/site_compare/scrapers/chrome/chromebase.py
+++ b/tools/site_compare/scrapers/chrome/chromebase.py
@@ -18,17 +18,17 @@ DEFAULT_PATH = r"k:\chrome.exe"
def InvokeBrowser(path):
"""Invoke the Chrome browser.
-
+
Args:
path: full path to browser
-
+
Returns:
A tuple of (main window, process handle, address bar, render pane)
"""
-
+
# Reuse an existing instance of the browser if we can find one. This
# may not work correctly, especially if the window is behind other windows.
-
+
# TODO(jhaas): make this work with Vista
wnds = windowing.FindChildWindows(0, "Chrome_XPFrame")
if len(wnds):
@@ -37,17 +37,17 @@ def InvokeBrowser(path):
else:
# Invoke Chrome
(proc, wnd) = windowing.InvokeAndWait(path)
-
+
# Get windows we'll need
address_bar = windowing.FindChildWindow(wnd, "Chrome_AutocompleteEdit")
render_pane = GetChromeRenderPane(wnd)
-
+
return (wnd, proc, address_bar, render_pane)
-
+
def Scrape(urls, outdir, size, pos, timeout, kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
-
+
Args:
urls: list of URLs to scrape
outdir: directory to place output
@@ -55,39 +55,39 @@ def Scrape(urls, outdir, size, pos, timeout, kwargs):
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
-
+
Returns:
None if success, else an error string
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
-
+
(wnd, proc, address_bar, render_pane) = InvokeBrowser(path)
-
+
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, pos, size, render_pane)
-
+
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
timedout = False
-
+
for url in urls:
# Double-click in the address bar, type the name, and press Enter
mouse.ClickInWindow(address_bar)
keyboard.TypeString(url, 0.1)
keyboard.TypeString("\n")
-
+
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (20, 16, 36, 32), timeout)
timedout = load_time < 0
-
+
if timedout:
break
-
+
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
-
+
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
@@ -97,68 +97,68 @@ def Scrape(urls, outdir, size, pos, timeout, kwargs):
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
-
+
if proc:
windowing.SetForegroundWindow(wnd)
-
+
# Send Alt-F4, then wait for process to end
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return "crashed"
-
+
if timedout:
return "timeout"
-
+
return None
def Time(urls, size, timeout, kwargs):
"""Measure how long it takes to load each of a series of URLs
-
+
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
-
+
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
-
+
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
-
+
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, address_bar, render_pane) = InvokeBrowser(path)
-
+
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
-
+
# Double-click in the address bar, type the name, and press Enter
mouse.ClickInWindow(address_bar)
keyboard.TypeString(url, 0.1)
keyboard.TypeString("\n")
-
+
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (20, 16, 36, 32), timeout)
-
+
timedout = load_time < 0
-
+
if timedout:
load_time = "timeout"
-
+
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
windowing.SetForegroundWindow(wnd)
-
+
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
@@ -167,10 +167,10 @@ def Time(urls, size, timeout, kwargs):
except pywintypes.error:
proc = None
load_time = "crashed"
-
+
ret.append( (url, load_time) )
- if proc:
+ if proc:
windowing.SetForegroundWindow(wnd)
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
@@ -183,7 +183,7 @@ if __name__ == "__main__":
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\chrome\0.1.97.0"
windowing.PreparePath(path)
-
+
# Scrape three sites and save the results
Scrape([
"http://www.microsoft.com",
diff --git a/tools/site_compare/scrapers/firefox/__init__.py b/tools/site_compare/scrapers/firefox/__init__.py
index 255dc4b..7eb9291 100644
--- a/tools/site_compare/scrapers/firefox/__init__.py
+++ b/tools/site_compare/scrapers/firefox/__init__.py
@@ -3,29 +3,29 @@
# Copyright 2007 Google Inc. All Rights Reserved.
"""Selects the appropriate scraper for Firefox."""
-
+
__author__ = 'jhaas@google.com (Jonathan Haas)'
def GetScraper(version):
"""Returns the scraper module for the given version.
-
+
Args:
version: version string of IE, or None for most recent
-
+
Returns:
scrape module for given version
"""
-
+
# Pychecker will warn that the parameter is unused; we only
# support one version of Firefox at this time
-
+
# We only have one version of the Firefox scraper for now
return __import__("firefox2", globals(), locals(), [''])
-
+
# if invoked rather than imported, test
if __name__ == "__main__":
version = "2.0.0.6"
-
+
print GetScraper("2.0.0.6").version
- \ No newline at end of file
+
diff --git a/tools/site_compare/scrapers/firefox/firefox2.py b/tools/site_compare/scrapers/firefox/firefox2.py
index 0fdec98..fa0d620 100644
--- a/tools/site_compare/scrapers/firefox/firefox2.py
+++ b/tools/site_compare/scrapers/firefox/firefox2.py
@@ -29,29 +29,29 @@ def GetBrowser(path):
Args:
path: full path to browser
-
+
Returns:
A tuple of (process handle, render pane)
"""
if not path: path = DEFAULT_PATH
-
+
# Invoke Firefox
(proc, wnd) = windowing.InvokeAndWait(path)
-
+
# Get the content pane
render_pane = windowing.FindChildWindow(
wnd,
"MozillaWindowClass/MozillaWindowClass/MozillaWindowClass")
-
+
return (proc, wnd, render_pane)
def InvokeBrowser(path):
"""Invoke the Firefox browser.
-
+
Args:
path: full path to browser
-
+
Returns:
A tuple of (main window, process handle, render pane)
"""
@@ -64,18 +64,18 @@ def InvokeBrowser(path):
else:
# Invoke Firefox
(proc, wnd) = windowing.InvokeAndWait(path)
-
+
# Get the content pane
render_pane = windowing.FindChildWindow(
wnd,
"MozillaWindowClass/MozillaWindowClass/MozillaWindowClass")
-
+
return (wnd, proc, render_pane)
-
+
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
-
+
Args:
urls: list of URLs to scrape
outdir: directory to place output
@@ -83,7 +83,7 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
-
+
Returns:
None if success, else an error string
"""
@@ -91,28 +91,28 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
else: path = DEFAULT_PATH
(wnd, proc, render_pane) = InvokeBrowser(path)
-
+
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, pos, size, render_pane)
-
+
time.sleep(3)
-
+
# Firefox is a bit of a pain: it doesn't use standard edit controls,
- # and it doesn't display a throbber when there's no tab. Let's make
+ # and it doesn't display a throbber when there's no tab. Let's make
# sure there's at least one tab, then select the first one
-
+
mouse.ClickInWindow(wnd)
keyboard.TypeString("[t]", True)
mouse.ClickInWindow(wnd, (30, 115))
time.sleep(2)
timedout = False
-
+
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
-
+
for url in urls:
-
+
# Use keyboard shortcuts
keyboard.TypeString("{d}", True)
keyboard.TypeString(url)
@@ -124,10 +124,10 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
if timedout:
break
-
+
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
-
+
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
@@ -137,58 +137,58 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
-
+
# Close all the tabs, cheesily
mouse.ClickInWindow(wnd)
-
+
while len(windowing.FindChildWindows(0, "MozillaUIWindowClass")):
keyboard.TypeString("[w]", True)
time.sleep(1)
-
+
if timedout:
return "timeout"
def Time(urls, size, timeout, **kwargs):
"""Measure how long it takes to load each of a series of URLs
-
+
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
-
+
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
-
+
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
-
+
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, render_pane) = InvokeBrowser(path)
-
+
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
time.sleep(3)
-
+
# Firefox is a bit of a pain: it doesn't use standard edit controls,
- # and it doesn't display a throbber when there's no tab. Let's make
+ # and it doesn't display a throbber when there's no tab. Let's make
# sure there's at least one tab, then select the first one
-
+
mouse.ClickInWindow(wnd)
keyboard.TypeString("[t]", True)
mouse.ClickInWindow(wnd, (30, 115))
time.sleep(2)
-
+
# Use keyboard shortcuts
keyboard.TypeString("{d}", True)
keyboard.TypeString(url)
@@ -197,34 +197,34 @@ def Time(urls, size, timeout, **kwargs):
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (10, 96, 26, 112), timeout)
timedout = load_time < 0
-
+
if timedout:
load_time = "timeout"
-
+
# Try to close the browser; if this fails it's probably a crash
mouse.ClickInWindow(wnd)
-
+
count = 0
- while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
+ while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
and count < 5):
keyboard.TypeString("[w]", True)
time.sleep(1)
count = count + 1
-
+
if len(windowing.FindChildWindows(0, "MozillaUIWindowClass")):
windowing.EndProcess(proc)
load_time = "crashed"
-
+
proc = None
except pywintypes.error:
proc = None
load_time = "crashed"
-
+
ret.append( (url, load_time) )
-
+
if proc:
count = 0
- while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
+ while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
and count < 5):
keyboard.TypeString("[w]", True)
time.sleep(1)
@@ -236,7 +236,7 @@ if __name__ == "__main__":
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\Firefox\2.0.0.6"
windowing.PreparePath(path)
-
+
# Scrape three sites and save the results
Scrape(
["http://www.microsoft.com", "http://www.google.com",
diff --git a/tools/site_compare/scrapers/ie/__init__.py b/tools/site_compare/scrapers/ie/__init__.py
index 4b8949b..8fb95db 100644
--- a/tools/site_compare/scrapers/ie/__init__.py
+++ b/tools/site_compare/scrapers/ie/__init__.py
@@ -9,23 +9,23 @@ __author__ = 'jhaas@google.com (Jonathan Haas)'
def GetScraper(version):
"""Returns the scraper module for the given version.
-
+
Args:
version: version string of IE, or None for most recent
-
+
Returns:
scrape module for given version
"""
# Pychecker will warn that the parameter is unused; we only
# support one version of IE at this time
-
+
# We only have one version of the IE scraper for now
return __import__("ie7", globals(), locals(), [''])
-
+
# if invoked rather than imported, test
if __name__ == "__main__":
version = "7.0.5370.1"
-
+
print GetScraper(version).version
- \ No newline at end of file
+
diff --git a/tools/site_compare/scrapers/ie/ie7.py b/tools/site_compare/scrapers/ie/ie7.py
index f5d7583..da26d9b 100644
--- a/tools/site_compare/scrapers/ie/ie7.py
+++ b/tools/site_compare/scrapers/ie/ie7.py
@@ -23,29 +23,29 @@ def GetBrowser(path):
Args:
path: full path to browser
-
+
Returns:
A tuple of (process handle, render pane)
"""
if not path: path = DEFAULT_PATH
-
+
(iewnd, ieproc, address_bar, render_pane, tab_window) = InvokeBrowser(path)
return (ieproc, iewnd, render_pane)
def InvokeBrowser(path):
"""Invoke the IE browser.
-
+
Args:
path: full path to browser
-
+
Returns:
A tuple of (main window, process handle, address bar,
render_pane, tab_window)
"""
# Invoke IE
(ieproc, iewnd) = windowing.InvokeAndWait(path)
-
+
# Get windows we'll need
for tries in xrange(10):
try:
@@ -60,13 +60,13 @@ def InvokeBrowser(path):
time.sleep(1)
continue
break
-
+
return (iewnd, ieproc, address_bar, render_pane, tab_window)
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
-
+
Args:
urls: list of URLs to scrape
outdir: directory to place output
@@ -74,32 +74,32 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
-
+
Returns:
None if success, else an error string
"""
path = r"c:\program files\internet explorer\iexplore.exe"
-
+
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
(iewnd, ieproc, address_bar, render_pane, tab_window) = (
InvokeBrowser(path) )
-
+
# Resize and reposition the frame
windowing.MoveAndSizeWindow(iewnd, pos, size, render_pane)
-
+
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
-
+
timedout = False
-
+
for url in urls:
-
+
# Double-click in the address bar, type the name, and press Enter
mouse.DoubleClickInWindow(address_bar)
keyboard.TypeString(url)
keyboard.TypeString("\n")
-
+
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(
tab_window, (6, 8, 22, 24), timeout)
@@ -107,10 +107,10 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
if timedout:
break
-
+
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
-
+
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
@@ -120,55 +120,55 @@ def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
-
+
windowing.EndProcess(ieproc)
-
+
if timedout:
return "timeout"
-
-
+
+
def Time(urls, size, timeout, **kwargs):
"""Measure how long it takes to load each of a series of URLs
-
+
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
-
+
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
-
+
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
-
+
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, address_bar, render_pane, tab_window) = InvokeBrowser(path)
-
+
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
-
+
# Double-click in the address bar, type the name, and press Enter
mouse.DoubleClickInWindow(address_bar)
keyboard.TypeString(url)
keyboard.TypeString("\n")
-
+
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(
tab_window, (6, 8, 22, 24), timeout)
timedout = load_time < 0
-
+
if timedout:
load_time = "timeout"
-
+
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
keyboard.TypeString(r"{\4}", use_modifiers=True)
@@ -179,9 +179,9 @@ def Time(urls, size, timeout, **kwargs):
except pywintypes.error:
load_time = "crashed"
proc = None
-
+
ret.append( (url, load_time) )
-
+
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
if proc:
@@ -191,7 +191,7 @@ def Time(urls, size, timeout, **kwargs):
return ret
-
+
if __name__ == "__main__":
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\ie7\7.0.5380.11"
diff --git a/tools/site_compare/site_compare.py b/tools/site_compare/site_compare.py
index 976f0ef..15359fa 100644
--- a/tools/site_compare/site_compare.py
+++ b/tools/site_compare/site_compare.py
@@ -37,7 +37,7 @@ import commands.scrape # scrape a URL or series of URLs to a bitmap
def Scrape(browsers, urls, window_size=(1024, 768),
window_pos=(0, 0), timeout=20, save_path=None, **kwargs):
"""Invoke one or more browsers over one or more URLs, scraping renders.
-
+
Args:
browsers: browsers to invoke with optional version strings
urls: URLs to visit
@@ -49,43 +49,43 @@ def Scrape(browsers, urls, window_size=(1024, 768),
kwargs: miscellaneous keyword args, passed to scraper
Returns:
None
-
+
@TODO(jhaas): more parameters, or perhaps an indefinite dictionary
parameter, for things like length of time to wait for timeout, speed
of mouse clicks, etc. Possibly on a per-browser, per-URL, or
per-browser-per-URL basis
"""
-
+
if type(browsers) in types.StringTypes: browsers = [browsers]
-
+
if save_path is None:
# default save path is "scrapes" off the current root
save_path = os.path.join(os.path.split(__file__)[0], "Scrapes")
-
+
for browser in browsers:
# Browsers should be tuples of (browser, version)
if type(browser) in types.StringTypes: browser = (browser, None)
scraper = scrapers.GetScraper(browser)
-
+
full_path = os.path.join(save_path, browser[0], scraper.version)
drivers.windowing.PreparePath(full_path)
-
+
scraper.Scrape(urls, full_path, window_size, window_pos, timeout, kwargs)
-
-
+
+
def Compare(base, compare, ops, root_path=None, out_path=None):
"""Compares a series of scrapes using a series of operators.
-
+
Args:
base: (browser, version) tuple of version to consider the baseline
compare: (browser, version) tuple of version to compare to
ops: list of operators plus operator arguments
root_path: root of the scrapes
out_path: place to put any output from the operators
-
+
Returns:
None
-
+
@TODO(jhaas): this method will likely change, to provide a robust and
well-defined way of chaining operators, applying operators conditionally,
and full-featured scripting of the operator chain. There also needs
@@ -95,28 +95,28 @@ def Compare(base, compare, ops, root_path=None, out_path=None):
if root_path is None:
# default save path is "scrapes" off the current root
root_path = os.path.join(os.path.split(__file__)[0], "Scrapes")
-
+
if out_path is None:
out_path = os.path.join(os.path.split(__file__)[0], "Compares")
-
+
if type(base) in types.StringTypes: base = (base, None)
if type(compare) in types.StringTypes: compare = (compare, None)
if type(ops) in types.StringTypes: ops = [ops]
-
+
base_dir = os.path.join(root_path, base[0])
compare_dir = os.path.join(root_path, compare[0])
-
+
if base[1] is None:
# base defaults to earliest capture
base = (base[0], max(os.listdir(base_dir)))
-
+
if compare[1] is None:
# compare defaults to latest capture
compare = (compare[0], min(os.listdir(compare_dir)))
-
+
out_path = os.path.join(out_path, base[0], base[1], compare[0], compare[1])
drivers.windowing.PreparePath(out_path)
-
+
# TODO(jhaas): right now we're just dumping output to a log file
# (and the console), which works as far as it goes but isn't nearly
# robust enough. Change this after deciding exactly what we want to
@@ -126,10 +126,10 @@ def Compare(base, compare, ops, root_path=None, out_path=None):
(base[0], base[1], compare[0], compare[1]))
out_file.write(description_string)
print description_string
-
+
base_dir = os.path.join(base_dir, base[1])
compare_dir = os.path.join(compare_dir, compare[1])
-
+
for filename in os.listdir(base_dir):
out_file.write("%s: " % filename)
@@ -137,15 +137,15 @@ def Compare(base, compare, ops, root_path=None, out_path=None):
out_file.write("Does not exist in target directory\n")
print "File %s does not exist in target directory" % filename
continue
-
+
base_filename = os.path.join(base_dir, filename)
compare_filename = os.path.join(compare_dir, filename)
-
+
for op in ops:
if type(op) in types.StringTypes: op = (op, None)
-
+
module = operators.GetOperator(op[0])
-
+
ret = module.Compare(base_filename, compare_filename)
if ret is None:
print "%s: OK" % (filename,)
@@ -154,24 +154,24 @@ def Compare(base, compare, ops, root_path=None, out_path=None):
print "%s: %s" % (filename, ret[0])
out_file.write("%s\n" % (ret[0]))
ret[1].save(os.path.join(out_path, filename))
-
+
out_file.close()
def main():
"""Main executable. Parse the command line and invoke the command."""
cmdline = command_line.CommandLine()
-
+
# The below two commands are currently unstable so have been disabled
# commands.compare2.CreateCommand(cmdline)
# commands.maskmaker.CreateCommand(cmdline)
commands.measure.CreateCommand(cmdline)
commands.scrape.CreateCommand(cmdline)
-
+
cmdline.ParseCommandLine()
if __name__ == "__main__":
main()
-
+
diff --git a/tools/site_compare/utils/browser_iterate.py b/tools/site_compare/utils/browser_iterate.py
index 50ed411..6ea4e5f 100644
--- a/tools/site_compare/utils/browser_iterate.py
+++ b/tools/site_compare/utils/browser_iterate.py
@@ -32,7 +32,7 @@ PORT = 42492
def SetupIterationCommandLine(cmd):
"""Adds the necessary flags for iteration to a command.
-
+
Args:
cmd: an object created by cmdline.AddCommand
"""
@@ -71,15 +71,15 @@ def SetupIterationCommandLine(cmd):
def Iterate(command, iteration_func):
"""Iterates over a list of URLs, calling a function on each.
-
+
Args:
command: the command line containing the iteration flags
iteration_func: called for each URL with (proc, wnd, url, result)
"""
-
+
# Retrieve the browser scraper to use to invoke the browser
scraper = scrapers.GetScraper((command["--browser"], command["--browserver"]))
-
+
def AttachToBrowser(path, timeout):
"""Invoke the browser process and connect to the socket."""
(proc, frame, wnd) = scraper.GetBrowser(path)
@@ -106,7 +106,7 @@ def Iterate(command, iteration_func):
if command["--size"]:
# Resize and reposition the frame
windowing.MoveAndSizeWindow(frame, (0, 0), command["--size"], wnd)
-
+
s.settimeout(timeout)
Iterate.proc = proc
@@ -133,7 +133,7 @@ def Iterate(command, iteration_func):
browser = command["--browserpath"]
else:
browser = None
-
+
# Read the URLs from the file
if command["--url"]:
url_list = [command["--url"]]
@@ -174,13 +174,13 @@ def Iterate(command, iteration_func):
try:
recv = Iterate.s.recv(MAX_URL)
response = response + recv
-
+
# Workaround for an oddity: when Firefox closes
# gracefully, somehow Python doesn't detect it.
# (Telnet does)
- if not recv:
+ if not recv:
raise socket.error
-
+
except socket.timeout:
response = url + ",hang\n"
DetachFromBrowser()
@@ -192,10 +192,10 @@ def Iterate(command, iteration_func):
# If we received a timeout response, restart the browser
if response[-9:] == ",timeout\n":
DetachFromBrowser()
-
+
# Invoke the iteration function
iteration_func(url, Iterate.proc, Iterate.wnd, response)
- # We're done
+ # We're done
DetachFromBrowser()
diff --git a/tools/traceline/traceline/assembler.h b/tools/traceline/traceline/assembler.h
index 32f12bc4..232fb36 100755
--- a/tools/traceline/traceline/assembler.h
+++ b/tools/traceline/traceline/assembler.h
@@ -536,7 +536,7 @@ class CodeBuffer {
void stosd() {
emit(0xab);
}
-
+
void sysenter() {
emit(0x0f); emit(0x34);
}
diff --git a/tools/traceline/traceline/main.cc b/tools/traceline/traceline/main.cc
index d0e220b..f149853 100755
--- a/tools/traceline/traceline/main.cc
+++ b/tools/traceline/traceline/main.cc
@@ -67,9 +67,9 @@ class Playground {
: stack_unwind_depth_(0),
log_heap_(false),
log_lock_(false),
- vista_(false) { }
+ vista_(false) { }
+
-
// The maximum amount of frames we should unwind from the call stack.
int stack_unwind_depth() { return stack_unwind_depth_; }
void set_stack_unwind_depth(int depth) { stack_unwind_depth_ = depth; }
@@ -102,7 +102,7 @@ class Playground {
// fields that we expect to be zero. TODO this could be a lot better.
memset(buf_, 0, sizeof(buf_));
}
-
+
void AllocateInRemote() {
// Try to get something out of the way and easy to debug.
static void* kPlaygroundAddr = reinterpret_cast<void*>(0x66660000);
@@ -276,7 +276,7 @@ class Playground {
(remote_addr_ + stub_offset + cb->size() + 5);
cb->jmp_rel(off);
}
-
+
// Makes a call to NtQueryPerformanceCounter, writing the timestamp to the
// buffer pointed to by EDI. EDI it not incremented. EAX is not preserved.
void AssembleQueryPerformanceCounter(CodeBuffer* cb) {
@@ -682,7 +682,7 @@ class Playground {
cb.mov(EDX, ESP);
cb.sysenter();
- if (cb.size() > 200) {
+ if (cb.size() > 200) {
NOTREACHED("code too big: %d", cb.size());
}
}
@@ -718,7 +718,7 @@ class Playground {
cb.pop(EDI); // restore EDI that was saved in the record
cb.ret(); // jmp back to the real ret ...
- if (cb.size() > 56) {
+ if (cb.size() > 56) {
NOTREACHED("ug");
}
}
@@ -742,7 +742,7 @@ class Playground {
// can the same lock have multiple different copies, I would assume not.
{
CodeBuffer cb(buf_ + kStubOffset);
-
+
// Set up an additional frame so that we capture the return.
// TODO use memory instructions instead of using registers.
cb.pop(EAX); // return address
@@ -760,7 +760,7 @@ class Playground {
{
CodeBuffer cb(buf_ + kStubOffset + 40);
-
+
cb.push(ESI);
cb.mov(ESI, ESP);
cb.push(EAX);
@@ -789,7 +789,7 @@ class Playground {
{
CodeBuffer cb(buf_ + kStubOffset);
-
+
// Set up an additional frame so that we capture the return.
// TODO use memory instructions instead of using registers.
cb.pop(EAX); // return address
@@ -806,7 +806,7 @@ class Playground {
{
CodeBuffer cb(buf_ + kStubOffset + 40);
-
+
cb.push(ESI);
cb.mov(ESI, ESP);
cb.push(EDI);
@@ -836,7 +836,7 @@ class Playground {
std::string moved_instructions = PatchPreamble(kFuncName, kStubOffset);
CodeBuffer cb(buf_ + kStubOffset);
-
+
// TODO use memory instructions instead of using registers.
cb.pop(EDX); // return address
cb.pop(EAX); // first argument (critical section pointer)
diff --git a/tools/traceline/traceline/sym_resolver.h b/tools/traceline/traceline/sym_resolver.h
index 53948b0..c3d93c2 100755
--- a/tools/traceline/traceline/sym_resolver.h
+++ b/tools/traceline/traceline/sym_resolver.h
@@ -17,7 +17,7 @@
#include <string>
#include <map>
-static BOOL CALLBACK SymEnumer(PCSTR name, DWORD64 base, PVOID context) {
+static BOOL CALLBACK SymEnumer(PCSTR name, DWORD64 base, PVOID context) {
reinterpret_cast<std::vector<DWORD64>*>(context)->push_back(base);
return TRUE;
}
@@ -153,7 +153,7 @@ class SymResolver {
NOTREACHED("SymCleanup failed: %d", GetLastError());
}
}
-
+
private:
HANDLE proc_;
ULONG64 base_;