blob: e7e8f3f1b661e34e32e7ae24744d1cd7002072be [file] [log] [blame]
Pablo Galindo1f24a712019-03-01 15:34:44 -08001import itertools
2
Pablo Galindo8bc401a2019-03-04 07:26:13 +00003
Pablo Galindo1f24a712019-03-01 15:34:44 -08004def generate_tokens(tokens):
5 numbers = itertools.count(0)
6 for line in tokens:
7 line = line.strip()
8
Hansraj Dase018dc52019-07-25 02:01:19 +05309 if not line or line.startswith('#'):
Pablo Galindo1f24a712019-03-01 15:34:44 -080010 continue
11
12 name = line.split()[0]
13 yield (name, next(numbers))
14
15 yield ('N_TOKENS', next(numbers))
16 yield ('NT_OFFSET', 256)
17
Pablo Galindo8bc401a2019-03-04 07:26:13 +000018
Pablo Galindo1f24a712019-03-01 15:34:44 -080019def generate_opmap(tokens):
20 for line in tokens:
21 line = line.strip()
22
Hansraj Dase018dc52019-07-25 02:01:19 +053023 if not line or line.startswith('#'):
Pablo Galindo1f24a712019-03-01 15:34:44 -080024 continue
25
26 pieces = line.split()
27
28 if len(pieces) != 2:
29 continue
30
31 name, op = pieces
32 yield (op.strip("'"), name)
33
34 # Yield independently <>. This is needed so it does not collide
35 # with the token generation in "generate_tokens" because if this
36 # symbol is included in Grammar/Tokens, it will collide with !=
37 # as it has the same name (NOTEQUAL).
38 yield ('<>', 'NOTEQUAL')