summaryrefslogtreecommitdiffstats
path: root/tools/idl_parser/idl_lexer_test.py
blob: cba4e482beee6d9bec4e509bbc1aa5cc7aacf2ab (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import json
import optparse
import os
import sys
import unittest

from idl_lexer import IDLLexer
from idl_ppapi_lexer import IDLPPAPILexer

#
# FileToTokens
#
# From a source file generate a list of tokens.
#
def FileToTokens(lexer, filename):
  with open(filename, 'rb') as srcfile:
    lexer.Tokenize(srcfile.read(), filename)
    return lexer.GetTokens()


#
# TextToTokens
#
# From a source file generate a list of tokens.
#
def TextToTokens(lexer, text):
  lexer.Tokenize(text)
  return lexer.GetTokens()


class WebIDLLexer(unittest.TestCase):
  def setUp(self):
    self.lexer = IDLLexer()
    self.filenames = [
        'test_lexer/values.in',
        'test_lexer/keywords.in'
    ]

  #
  # testRebuildText
  #
  # From a set of tokens, generate a new source text by joining with a
  # single space.  The new source is then tokenized and compared against the
  # old set.
  #
  def testRebuildText(self):
    for filename in self.filenames:
      tokens1 = FileToTokens(self.lexer, filename)
      to_text = '\n'.join(['%s' % t.value for t in tokens1])
      tokens2 = TextToTokens(self.lexer, to_text)

      count1 = len(tokens1)
      count2 = len(tokens2)
      self.assertEqual(count1, count2)

      for i in range(count1):
        msg = 'Value %s does not match original %s on line %d of %s.' % (
              tokens2[i].value, tokens1[i].value, tokens1[i].lineno, filename)
        self.assertEqual(tokens1[i].value, tokens2[i].value, msg)

  #
  # testExpectedType
  #
  # From a set of tokens pairs, verify the type field of the second matches
  # the value of the first, so that:
  # integer 123 float 1.1 ...
  # will generate a passing test, when the first token has both the type and
  # value of the keyword integer and the second has the type of integer and
  # value of 123 and so on.
  #
  def testExpectedType(self):
    for filename in self.filenames:
      tokens = FileToTokens(self.lexer, filename)
      count = len(tokens)
      self.assertTrue(count > 0)
      self.assertFalse(count & 1)

      index = 0
      while index < count:
        expect_type = tokens[index].value
        actual_type = tokens[index + 1].type
        msg = 'Type %s does not match expected %s on line %d of %s.' % (
              actual_type, expect_type, tokens[index].lineno, filename)
        index += 2
        self.assertEqual(expect_type, actual_type, msg)


class PepperIDLLexer(WebIDLLexer):
  def setUp(self):
    self.lexer = IDLPPAPILexer()
    self.filenames = [
        'test_lexer/values_ppapi.in',
        'test_lexer/keywords_ppapi.in'
    ]


if __name__ == '__main__':
  unittest.main()