summaryrefslogtreecommitdiffstats
path: root/chrome/common/extensions/docs/server2/schema_util.py
blob: c5d1456a4dbc4ee8de6899a7e63364717987602d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

from collections import defaultdict, Mapping
import traceback

from third_party.json_schema_compiler import json_parse, idl_schema, idl_parser


def RemoveNoDocs(item):
  '''Removes nodes that should not be rendered from an API schema.
  '''
  if json_parse.IsDict(item):
    if item.get('nodoc', False):
      return True
    for key, value in item.items():
      if RemoveNoDocs(value):
        del item[key]
  elif type(item) == list:
    to_remove = []
    for i in item:
      if RemoveNoDocs(i):
        to_remove.append(i)
    for i in to_remove:
      item.remove(i)
  return False


def DetectInlineableTypes(schema):
  '''Look for documents that are only referenced once and mark them as inline.
  Actual inlining is done by _InlineDocs.
  '''
  if not schema.get('types'):
    return

  ignore = frozenset(('value', 'choices'))
  refcounts = defaultdict(int)
  # Use an explicit stack instead of recursion.
  stack = [schema]

  while stack:
    node = stack.pop()
    if isinstance(node, list):
      stack.extend(node)
    elif isinstance(node, Mapping):
      if '$ref' in node:
        refcounts[node['$ref']] += 1
      stack.extend(v for k, v in node.iteritems() if k not in ignore)

  for type_ in schema['types']:
    if not 'noinline_doc' in type_:
      if refcounts[type_['id']] == 1:
        type_['inline_doc'] = True


def InlineDocs(schema, retain_inlined_types):
  '''Replace '$ref's that refer to inline_docs with the json for those docs.
  If |retain_inlined_types| is False, then the inlined nodes are removed
  from the schema.
  '''
  types = schema.get('types')
  if types is None:
    return

  inline_docs = {}
  types_without_inline_doc = []

  # Gather the types with inline_doc.
  for type_ in types:
    if type_.get('inline_doc'):
      inline_docs[type_['id']] = type_
      if not retain_inlined_types:
        for k in ('description', 'id', 'inline_doc'):
          type_.pop(k, None)
    else:
      types_without_inline_doc.append(type_)
  if not retain_inlined_types:
    schema['types'] = types_without_inline_doc

  def apply_inline(node):
    if isinstance(node, list):
      for i in node:
        apply_inline(i)
    elif isinstance(node, Mapping):
      ref = node.get('$ref')
      if ref and ref in inline_docs:
        node.update(inline_docs[ref])
        del node['$ref']
      for k, v in node.iteritems():
        apply_inline(v)

  apply_inline(schema)


def ProcessSchema(path, file_data, retain_inlined_types=False):
  '''Parses |file_data| using a method determined by checking the
  extension of the file at the given |path|. Then, trims 'nodoc' and if
  |retain_inlined_types| is given and False, removes inlineable types from
  the parsed schema data.
  '''
  def trim_and_inline(schema, is_idl=False):
    '''Modifies an API schema in place by removing nodes that shouldn't be
    documented and inlining schema types that are only referenced once.
    '''
    if RemoveNoDocs(schema):
      # A return of True signifies that the entire schema should not be
      # documented. Otherwise, only nodes that request 'nodoc' are removed.
      return None
    if is_idl:
      DetectInlineableTypes(schema)
    InlineDocs(schema, retain_inlined_types)
    return schema

  if path.endswith('.idl'):
    idl = idl_schema.IDLSchema(idl_parser.IDLParser().ParseData(file_data))
    # Wrap the result in a list so that it behaves like JSON API data.
    return [trim_and_inline(idl.process()[0], is_idl=True)]

  try:
    schemas = json_parse.Parse(file_data)
  except:
    raise ValueError('Cannot parse "%s" as JSON:\n%s' %
                     (path, traceback.format_exc()))
  for schema in schemas:
    # Schemas could consist of one API schema (data for a specific API file)
    # or multiple (data from extension_api.json).
    trim_and_inline(schema)
  return schemas