Added example extension that uses the stream filtering and added unittests.

--HG--
branch : trunk
diff --git a/docs/extensions.rst b/docs/extensions.rst
index 215a684..1d25a2b 100644
--- a/docs/extensions.rst
+++ b/docs/extensions.rst
@@ -229,6 +229,11 @@
 
         The value of the token.
 
+There is also a utility function in the lexer module that can count newline
+characters in strings:
+
+.. autofunction:: jinja2.lexer.count_newlines
+
 AST
 ~~~
 
diff --git a/ext/inlinegettext.py b/ext/inlinegettext.py
new file mode 100644
index 0000000..049ab3b
--- /dev/null
+++ b/ext/inlinegettext.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+"""
+    Inline Gettext
+    ~~~~~~~~~~~~~~
+
+    An example extension for Jinja2 that supports inline gettext calls.
+    Requires the i18n extension to be loaded.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+import re
+from jinja2.ext import Extension
+from jinja2.lexer import Token, count_newlines
+from jinja2.exceptions import TemplateSyntaxError
+
+
+_outside_re = re.compile(r'\\?(gettext|_)\(')
+_inside_re = re.compile(r'\\?[()]')
+
+
+class InlineGettext(Extension):
+    """This extension implements support for inline gettext blocks::
+
+        <h1>_(Welcome)</h1>
+        <p>_(This is a paragraph)</p>
+
+    Requires the i18n extension to be loaded and configured.
+    """
+
+    def filter_stream(self, stream):
+        paren_stack = 0
+
+        for token in stream:
+            if token.type is not 'data':
+                yield token
+                continue
+
+            pos = 0
+            lineno = token.lineno
+
+            while 1:
+                if not paren_stack:
+                    match = _outside_re.search(token.value, pos)
+                else:
+                    match = _inside_re.search(token.value, pos)
+                if match is None:
+                    break
+                new_pos = match.start()
+                if new_pos > pos:
+                    preval = token.value[pos:new_pos]
+                    yield Token(lineno, 'data', preval)
+                    lineno += count_newlines(preval)
+                gtok = match.group()
+                if gtok[0] == '\\':
+                    yield Token(lineno, 'data', gtok[1:])
+                elif not paren_stack:
+                    yield Token(lineno, 'block_begin', None)
+                    yield Token(lineno, 'name', 'trans')
+                    yield Token(lineno, 'block_end', None)
+                    paren_stack = 1
+                else:
+                    if gtok == '(' or paren_stack > 1:
+                        yield Token(lineno, 'data', gtok)
+                    paren_stack += gtok == ')' and -1 or 1
+                    if not paren_stack:
+                        yield Token(lineno, 'block_begin', None)
+                        yield Token(lineno, 'name', 'endtrans')
+                        yield Token(lineno, 'block_end', None)
+                pos = match.end()
+
+            if pos < len(token.value):
+                yield Token(lineno, 'data', token.value[pos:])
+
+        if paren_stack:
+            raise TemplateSyntaxError('unclosed gettext expression',
+                                      token.lineno, stream.name,
+                                      stream.filename)
diff --git a/jinja2/ext.py b/jinja2/ext.py
index 9dfa87c..f60aade 100644
--- a/jinja2/ext.py
+++ b/jinja2/ext.py
@@ -16,7 +16,6 @@
 from jinja2.environment import get_spontaneous_environment
 from jinja2.runtime import Undefined, concat
 from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
-from jinja2.lexer import Token
 from jinja2.utils import contextfunction, import_string, Markup
 
 
@@ -80,6 +79,10 @@
         to filter tokens returned.  This method has to return an iterable of
         :class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
         :class:`~jinja2.lexer.TokenStream`.
+
+        In the `ext` folder of the Jinja2 source distribution there is a file
+        called `inlinegettext.py` which implements a filter that utilizes this
+        method.
         """
         return stream
 
@@ -261,6 +264,8 @@
                                 'pluralize section')
                 parser.fail('control structures in translatable sections are '
                             'not allowed')
+            elif parser.stream.eos:
+                parser.fail('unclosed translation block')
             else:
                 assert False, 'internal parser error'
 
diff --git a/jinja2/lexer.py b/jinja2/lexer.py
index 108e49c..0597b7a 100644
--- a/jinja2/lexer.py
+++ b/jinja2/lexer.py
@@ -71,6 +71,13 @@
                          sorted(operators, key=lambda x: -len(x))))
 
 
+def count_newlines(value):
+    """Count the number of newline characters in the string.  This is
+    useful for extensions that filter a stream.
+    """
+    return len(newline_re.findall(value))
+
+
 class Failure(object):
     """Class that raises a `TemplateSyntaxError` if called.
     Used by the `Lexer` to specify known errors.
diff --git a/tests/test_ext.py b/tests/test_ext.py
index 8002789..b425e18 100644
--- a/tests/test_ext.py
+++ b/tests/test_ext.py
@@ -8,14 +8,14 @@
 """
 import re
 from jinja2 import Environment, nodes
-from jinja2.ext import Extension, Token
+from jinja2.ext import Extension
+from jinja2.lexer import Token, count_newlines
 
 
 importable_object = 23
 
 
-_line_re = re.compile(r'(\r\n|\r|\n)')
-_gettext_re = re.compile(r'_\((([^)\\]*(?:\\.[^)\\]*)*))\)(?s)')
+_gettext_re = re.compile(r'_\((.*?)\)')
 
 
 class TestExtension(Extension):
@@ -55,9 +55,6 @@
             else:
                 yield token
 
-    def count_lines(self, value):
-        return len(_line_re.findall(value))
-
     def interpolate(self, token):
         pos = 0
         end = len(token.value)
@@ -69,7 +66,7 @@
             value = token.value[pos:match.start()]
             if value:
                 yield Token(lineno, 'data', value)
-            lineno += self.count_lines(token.value)
+            lineno += count_newlines(token.value)
             yield Token(lineno, 'variable_begin', None)
             yield Token(lineno, 'name', 'gettext')
             yield Token(lineno, 'lparen', None)