summaryrefslogtreecommitdiffstats
path: root/third_party/google_input_tools/builder.py
blob: 0048012d3d22c80adfc699f437532ba3a07529a3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
#!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import argparse
import json
import os
import re
import sys

_BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
require_regex = re.compile(_BASE_REGEX_STRING % 'require')
provide_regex = re.compile(_BASE_REGEX_STRING % 'provide')

base = os.path.join('third_party',
                    'closure_library',
                    'closure',
                    'goog',
                    'base.js')

def ProcessFile(filename):
  """Extracts provided and required namespaces.

  Description:
    Scans Javascript file for provided and required namespaces.

  Args:
    filename: name of the file to process.

  Returns:
    Pair of lists, where the first list contains namespaces provided by the file
    and the second contains a list of requirements.
  """

  provides = []
  requires = []
  with open(filename, 'r') as file_handle:
    for line in file_handle:
      if re.match(require_regex, line):
        requires.append(re.search(require_regex, line).group(1))
      if re.match(provide_regex, line):
        provides.append(re.search(provide_regex, line).group(1))
  return provides, requires


def ExtractDependencies(filename, providers, requirements):
  """Extracts provided and required namespaces for a file.

  Description:
    Updates maps for namespace providers and file prerequisites.

  Args:
    filename: Path of the file to process.
    providers: Mapping of namespace to filename that provides the namespace.
    requirements: Mapping of filename to a list of prerequisite namespaces.
  """

  p, r = ProcessFile(filename)

  for name in p:
    providers[name] = filename
  for name in r:
    if not filename in requirements:
      requirements[filename] = []
    requirements[filename].append(name)


def Export(target_file, source_filename, providers, requirements, processed):
  """Writes the contents of a file.

  Description:
    Appends the contents of the source file to the target file. In order to
    preserve proper dependencies, each file has its required namespaces
    processed before exporting the source file itself. The set of exported files
    is tracked to guard against multiple exports of the same file. Comments as
    well as 'provide' and 'require' statements are removed during to export to
    reduce file size.

  Args:
    target_file: Handle to target file for export.
    source_filename: Name of the file to export.
    providers: Map of namespace to filename.
    requirements: Map of filename to required namespaces.
    processed: Set of processed files.
  Returns:
  """

  # Filename may have already been processed if it was a requirement of a
  # previous exported file.
  if source_filename in processed:
    return

  # Export requirements before file.
  if source_filename in requirements:
    for namespace in requirements[source_filename]:
      if namespace in providers:
        dependency = providers[namespace]
        if dependency:
          Export(target_file, dependency, providers, requirements, processed)

  processed.add(source_filename)

  # Export file
  for name in providers:
    if providers[name] == source_filename:
      target_file.write('// %s%s' % (name, os.linesep))
  source_file = open(source_filename, 'r')
  try:
    comment_block = False
    for line in source_file:
      # Skip require statements.
      if (not re.match(require_regex, line)):
        formatted = line.rstrip()
        if comment_block:
          # Scan for trailing */ in multi-line comment.
          index = formatted.find('*/')
          if index >= 0:
            formatted = formatted[index + 2:]
            comment_block = False
          else:
            formatted = ''
        # Remove full-line // style comments.
        if formatted.lstrip().startswith('//'):
          formatted = '';
        # Remove /* */ style comments.
        start_comment = formatted.find('/*')
        end_comment = formatted.find('*/')
        while start_comment >= 0:
          if end_comment > start_comment:
            formatted = (formatted[:start_comment]
                + formatted[end_comment + 2:])
            start_comment = formatted.find('/*')
            end_comment = formatted.find('*/')
          else:
            formatted = formatted[:start_comment]
            comment_block = True
            start_comment = -1
        if formatted.strip():
          target_file.write('%s%s' % (formatted, os.linesep))
  finally:
    source_file.close()
  target_file.write('\n')


def ExtractSources(options):
  """Extracts list of sources based on command line options.

  Args:
    options: Parsed command line options.
  Returns:
    List of source files.  If the path option is specified then file paths are
    absolute.  Otherwise, relative paths may be used.
  """

  sources = None
  if options.json_file:
    # Optionally load list of source files from a json file. Useful when the
    # list of files to process is too long for the command line.
    with open(options.json_file, 'r') as json_file:
      data = []
      # Strip leading comments.
      for line in json_file:
        if not line.startswith('#'):
          data.append(line)
      json_object =  json.loads(os.linesep.join(data).replace('\'', '\"'))
      path = options.json_sources.split('.')
      sources = json_object
      for key in path:
        sources = sources[key]
      if options.path:
        sources = [os.path.join(options.path, source) for source in sources]
  else:
    sources = options.sources
  return sources


def main():
  """The entrypoint for this script."""
  parser = argparse.ArgumentParser()
  parser.add_argument('--sources', nargs='*')
  parser.add_argument('--target', nargs=1)
  parser.add_argument('--json_file', nargs='?')
  parser.add_argument('--json_sources', nargs='?')
  parser.add_argument('--path', nargs='?')
  options = parser.parse_args()
  
  sources = ExtractSources(options)
  assert sources, 'Missing source files.'

  providers = {}
  requirements = {}
  for file in sources:
    ExtractDependencies(file, providers, requirements)

  with open(options.target[0], 'w') as target_file:
    target_file.write('var CLOSURE_NO_DEPS=true;%s' % os.linesep)
    processed = set()
    base_path = base
    if options.path:
      base_path = os.path.join(options.path, base_path)
    Export(target_file, base_path, providers, requirements, processed)
    for source_filename in sources:
      Export(target_file, source_filename, providers, requirements, processed)

if __name__ == '__main__':
  main()