blob: 008e241e175e7f9da94899dcf9b4994db85c8c53 [file] [log] [blame]
Pablo Galindo1f24a712019-03-01 15:34:44 -08001import itertools
2
Pablo Galindo8bc401a2019-03-04 07:26:13 +00003
Pablo Galindo1f24a712019-03-01 15:34:44 -08004def generate_tokens(tokens):
5 numbers = itertools.count(0)
6 for line in tokens:
7 line = line.strip()
8
9 if not line:
10 continue
11 if line.strip().startswith('#'):
12 continue
13
14 name = line.split()[0]
15 yield (name, next(numbers))
16
17 yield ('N_TOKENS', next(numbers))
18 yield ('NT_OFFSET', 256)
19
Pablo Galindo8bc401a2019-03-04 07:26:13 +000020
Pablo Galindo1f24a712019-03-01 15:34:44 -080021def generate_opmap(tokens):
22 for line in tokens:
23 line = line.strip()
24
25 if not line:
26 continue
27 if line.strip().startswith('#'):
28 continue
29
30 pieces = line.split()
31
32 if len(pieces) != 2:
33 continue
34
35 name, op = pieces
36 yield (op.strip("'"), name)
37
38 # Yield independently <>. This is needed so it does not collide
39 # with the token generation in "generate_tokens" because if this
40 # symbol is included in Grammar/Tokens, it will collide with !=
41 # as it has the same name (NOTEQUAL).
42 yield ('<>', 'NOTEQUAL')