Add support for #pragma

Preprocessor pragmas and their arguments are tokenized (as PPPRAGMA and
PPPRAGMASTR) and included in the AST as a pppragma directive with the
argument as value. If no argument was given the string will be empty.

Unit test of the lexer, parser and generator have been modified and
added accordingly.

The previous behavior, that #pragma lines would be ignored, is
henceforth obsolete.
diff --git a/pycparser/_c_ast.cfg b/pycparser/_c_ast.cfg
index fad5691..10c73e1 100644
--- a/pycparser/_c_ast.cfg
+++ b/pycparser/_c_ast.cfg
@@ -187,3 +187,5 @@
 Union: [name, decls**]
 
 While: [cond*, stmt*]
+
+Pragma: [string]
diff --git a/pycparser/c_ast.py b/pycparser/c_ast.py
index 4989f50..19119e1 100644
--- a/pycparser/c_ast.py
+++ b/pycparser/c_ast.py
@@ -795,3 +795,15 @@
 
     attr_names = ()
 
+class Pragma(Node):
+    __slots__ = ('string', 'coord', '__weakref__')
+    def __init__(self, string, coord=None):
+        self.string = string
+        self.coord = coord
+
+    def children(self):
+        nodelist = []
+        return tuple(nodelist)
+
+    attr_names = ('string', )
+
diff --git a/pycparser/c_generator.py b/pycparser/c_generator.py
index f4a5a12..4c53721 100644
--- a/pycparser/c_generator.py
+++ b/pycparser/c_generator.py
@@ -39,6 +39,12 @@
 
     def visit_ID(self, n):
         return n.name
+    
+    def visit_Pragma(self, n):
+        ret = '#pragma'
+        if n.string:
+            ret += ' ' + n.string
+        return ret
 
     def visit_ArrayRef(self, n):
         arrref = self._parenthesize_unless_simple(n.name)
@@ -157,6 +163,8 @@
         for ext in n.ext:
             if isinstance(ext, c_ast.FuncDef):
                 s += self.visit(ext)
+            elif isinstance(ext, c_ast.Pragma):
+                s += self.visit(ext) + '\n'
             else:
                 s += self.visit(ext) + ';\n'
         return s
diff --git a/pycparser/c_lexer.py b/pycparser/c_lexer.py
index cbb9d26..95e3971 100644
--- a/pycparser/c_lexer.py
+++ b/pycparser/c_lexer.py
@@ -171,7 +171,9 @@
         'ELLIPSIS',
 
         # pre-processor
-        'PPHASH',      # '#'
+        'PPHASH',       # '#'
+        'PPPRAGMA',     # 'pragma'
+        'PPPRAGMASTR',
     )
 
     ##
@@ -274,7 +276,6 @@
 
     def t_ppline_NEWLINE(self, t):
         r'\n'
-
         if self.pp_line is None:
             self._error('line number missing in #line', t)
         else:
@@ -304,15 +305,14 @@
 
     def t_pppragma_PPPRAGMA(self, t):
         r'pragma'
-        pass
+        return t
 
-    t_pppragma_ignore = ' \t<>.-{}();=+-*/$%@&^~!?:,0123456789'
+    t_pppragma_ignore = ' \t'
 
-    @TOKEN(string_literal)
-    def t_pppragma_STR(self, t): pass
-
-    @TOKEN(identifier)
-    def t_pppragma_ID(self, t): pass
+    def t_pppragma_STR(self, t):
+        '.+'
+        t.type = 'PPPRAGMASTR'
+        return t
 
     def t_pppragma_error(self, t):
         self._error('invalid #pragma directive', t)
diff --git a/pycparser/c_parser.py b/pycparser/c_parser.py
index f4f7453..494f501 100644
--- a/pycparser/c_parser.py
+++ b/pycparser/c_parser.py
@@ -537,8 +537,9 @@
 
     def p_external_declaration_3(self, p):
         """ external_declaration    : pp_directive
+                                    | pppragma_directive
         """
-        p[0] = p[1]
+        p[0] = [p[1]]
 
     def p_external_declaration_4(self, p):
         """ external_declaration    : SEMI
@@ -549,7 +550,16 @@
         """ pp_directive  : PPHASH
         """
         self._parse_error('Directives not supported yet',
-            self._coord(p.lineno(1)))
+                          self._coord(p.lineno(1)))
+
+    def p_pppragma_directive(self, p):
+        """ pppragma_directive      : PPPRAGMA
+                                    | PPPRAGMA PPPRAGMASTR
+        """
+        if len(p) == 3:
+            p[0] = c_ast.Pragma(p[2])
+        else:
+            p[0] = c_ast.Pragma("")
 
     # In function definitions, the declarator can be followed by
     # a declaration list, for old "K&R style" function definitios.
@@ -589,6 +599,7 @@
                         | selection_statement
                         | iteration_statement
                         | jump_statement
+                        | pppragma_directive
         """
         p[0] = p[1]
 
diff --git a/tests/test_c_generator.py b/tests/test_c_generator.py
index dd6e5ed..edaced1 100644
--- a/tests/test_c_generator.py
+++ b/tests/test_c_generator.py
@@ -245,5 +245,14 @@
             }
         ''')
 
+    def test_pragma(self):
+        self._assert_ctoc_correct(r'''
+            #pragma foo
+            void f() {
+                #pragma bar
+                i = (a, b, c);
+            }
+        ''')
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/tests/test_c_lexer.py b/tests/test_c_lexer.py
index 033963e..6a15276 100644
--- a/tests/test_c_lexer.py
+++ b/tests/test_c_lexer.py
@@ -309,28 +309,52 @@
 
 
     def test_preprocessor_pragma(self):
-        str = r'''
+        str = '''
         42
+        #pragma
         #pragma helo me
         #pragma once
         # pragma omp parallel private(th_id)
-        #pragma {pack: 2, smack: 3}
+        #\tpragma {pack: 2, smack: 3}
         #pragma <includeme.h> "nowit.h"
         #pragma "string"
         #pragma somestring="some_other_string"
         #pragma id 124124 and numbers 0235495
         59
         '''
-
-        # Check that pragmas are ignored but the line number advances
+        # Check that pragmas are tokenized, including trailing string
         self.clex.input(str)
         self.clex.reset_lineno()
 
         t1 = self.clex.token()
         self.assertEqual(t1.type, 'INT_CONST_DEC')
+        
         t2 = self.clex.token()
-        self.assertEqual(t2.type, 'INT_CONST_DEC')
-        self.assertEqual(t2.lineno, 11)
+        self.assertEqual(t2.type, 'PPPRAGMA')
+        
+        t3 = self.clex.token()
+        self.assertEqual(t3.type, 'PPPRAGMA')
+        
+        t4 = self.clex.token()
+        self.assertEqual(t4.type, 'PPPRAGMASTR')
+        self.assertEqual(t4.value, 'helo me')
+        
+        for i in range(3):
+            t = self.clex.token()
+            
+        t5 = self.clex.token()
+        self.assertEqual(t5.type, 'PPPRAGMASTR')
+        self.assertEqual(t5.value, 'omp parallel private(th_id)')
+        
+        for i in range(5):
+            ta = self.clex.token()
+            self.assertEqual(ta.type, 'PPPRAGMA')
+            tb = self.clex.token()
+            self.assertEqual(tb.type, 'PPPRAGMASTR')
+        
+        t6 = self.clex.token()
+        self.assertEqual(t6.type, 'INT_CONST_DEC')
+        self.assertEqual(t6.lineno, 12)
 
 
 
diff --git a/tests/test_c_parser.py b/tests/test_c_parser.py
index c1bf5be..ed957dc 100755
--- a/tests/test_c_parser.py
+++ b/tests/test_c_parser.py
@@ -1284,6 +1284,21 @@
         self.assertTrue(isinstance(ps2.ext[0].body.block_items[1].type.dim, Assignment))
         self.assertTrue(isinstance(ps2.ext[0].body.block_items[2].type.dim, ID))
 
+    def test_pragma(self):
+        s1 = r'''
+            #pragma bar
+            void main() {
+                #pragma foo
+                for(;;) {}
+            }
+            '''
+        s1_ast = self.parse(s1)
+        self.assertTrue(isinstance(s1_ast.ext[0], Pragma))
+        self.assertEqual(s1_ast.ext[0].string, 'bar')
+
+        self.assertTrue(isinstance(s1_ast.ext[1].body.block_items[0], Pragma))
+        self.assertEqual(s1_ast.ext[1].body.block_items[0].string, 'foo')
+
 
 class TestCParser_whole_code(TestCParser_base):
     """ Testing of parsing whole chunks of code.
@@ -1818,6 +1833,7 @@
             '''
         self.assertRaises(ParseError, self.parse, s2)
 
+
 if __name__ == '__main__':
     #~ suite = unittest.TestLoader().loadTestsFromNames(
         #~ ['test_c_parser.TestCParser_fundamentals.test_typedef'])