blob: f9d45c450c450d5a8e7a8ba6d3e848e318a702cd [file] [log] [blame]
Pablo Galindo1f24a712019-03-01 15:34:44 -08001import itertools
2
3def generate_tokens(tokens):
4 numbers = itertools.count(0)
5 for line in tokens:
6 line = line.strip()
7
8 if not line:
9 continue
10 if line.strip().startswith('#'):
11 continue
12
13 name = line.split()[0]
14 yield (name, next(numbers))
15
16 yield ('N_TOKENS', next(numbers))
17 yield ('NT_OFFSET', 256)
18
19def generate_opmap(tokens):
20 for line in tokens:
21 line = line.strip()
22
23 if not line:
24 continue
25 if line.strip().startswith('#'):
26 continue
27
28 pieces = line.split()
29
30 if len(pieces) != 2:
31 continue
32
33 name, op = pieces
34 yield (op.strip("'"), name)
35
36 # Yield independently <>. This is needed so it does not collide
37 # with the token generation in "generate_tokens" because if this
38 # symbol is included in Grammar/Tokens, it will collide with !=
39 # as it has the same name (NOTEQUAL).
40 yield ('<>', 'NOTEQUAL')