blob: 2cff62ce3b23e0004d2cadaacb84ac9ff8acef57 [file] [log] [blame]
Pablo Galindo1f24a712019-03-01 15:34:44 -08001import itertools
2
Pablo Galindo8bc401a2019-03-04 07:26:13 +00003
Pablo Galindo1f24a712019-03-01 15:34:44 -08004def generate_tokens(tokens):
5 numbers = itertools.count(0)
6 for line in tokens:
7 line = line.strip()
8
Pablo Galindo71876fa2019-08-22 02:38:39 +01009 if not line or line.startswith("#"):
Pablo Galindo1f24a712019-03-01 15:34:44 -080010 continue
11
12 name = line.split()[0]
13 yield (name, next(numbers))
14
Pablo Galindo71876fa2019-08-22 02:38:39 +010015 yield ("N_TOKENS", next(numbers))
16 yield ("NT_OFFSET", 256)
Pablo Galindo1f24a712019-03-01 15:34:44 -080017
Pablo Galindo8bc401a2019-03-04 07:26:13 +000018
Pablo Galindo1f24a712019-03-01 15:34:44 -080019def generate_opmap(tokens):
20 for line in tokens:
21 line = line.strip()
22
Pablo Galindo71876fa2019-08-22 02:38:39 +010023 if not line or line.startswith("#"):
Pablo Galindo1f24a712019-03-01 15:34:44 -080024 continue
25
26 pieces = line.split()
27
28 if len(pieces) != 2:
29 continue
30
31 name, op = pieces
32 yield (op.strip("'"), name)
33
34 # Yield independently <>. This is needed so it does not collide
35 # with the token generation in "generate_tokens" because if this
36 # symbol is included in Grammar/Tokens, it will collide with !=
37 # as it has the same name (NOTEQUAL).
Pablo Galindo71876fa2019-08-22 02:38:39 +010038 yield ("<>", "NOTEQUAL")