Environment.lex returns unicode tokens now, even if the input data was a bytestring.
authorArmin Ronacher <armin.ronacher@active-4.com>
Fri, 23 May 2008 20:26:45 +0000 (22:26 +0200)
committerArmin Ronacher <armin.ronacher@active-4.com>
Fri, 23 May 2008 20:26:45 +0000 (22:26 +0200)
--HG--
branch : trunk

jinja2-debug.py
jinja2/ext.py
jinja2/lexer.py

index a250a62ffd8835117754475a1aa1d6866d56957e..2a870e8cbe804c2c20b5c62dc7f8b13527ca8f38 100755 (executable)
@@ -13,7 +13,8 @@ import sys
 import jinja2
 from werkzeug import script
 
-env = jinja2.Environment(extensions=['jinja2.ext.i18n', 'jinja2.ext.do'])
+env = jinja2.Environment(extensions=['jinja2.ext.i18n', 'jinja2.ext.do',
+                                     'jinja2.ext.loopcontrols'])
 
 def shell_init_func():
     def _compile(x):
index 63e8f5b0cbca543f1f10010d7c35b0b8d30611c3..701f54d1d26281299e3a3730408bdb59ea023293 100644 (file)
@@ -25,7 +25,7 @@ GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
 
 
 class ExtensionRegistry(type):
-    """Gives the extension a unique identifier."""
+    """Gives the extension an unique identifier."""
 
     def __new__(cls, name, bases, d):
         rv = type.__new__(cls, name, bases, d)
@@ -95,13 +95,18 @@ class Extension(object):
                           dyn_args, dyn_kwargs, lineno=lineno)
 
 
+@contextfunction
+def _gettext_alias(context, string):
+    return context.resolve('gettext')(string)
+
+
 class InternationalizationExtension(Extension):
     """This extension adds gettext support to Jinja2."""
     tags = set(['trans'])
 
     def __init__(self, environment):
         Extension.__init__(self, environment)
-        environment.globals['_'] = contextfunction(lambda c, x: c['gettext'](x))
+        environment.globals['_'] = _gettext_alias
         environment.extend(
             install_gettext_translations=self._install,
             install_null_translations=self._install_null,
index 7f0b33f757a3bb92b068e129edde58ba46baa149..92ff12e0e3cd37c38cda1ae0980ff38495ab8900 100644 (file)
@@ -371,7 +371,6 @@ class Lexer(object):
         converted into types and postprocessed. For example comments are removed,
         integers and floats converted, strings unescaped etc.
         """
-        source = unicode(source)
         def generate():
             for lineno, token, value in self.tokeniter(source, name, filename):
                 if token in ('comment_begin', 'comment', 'comment_end',
@@ -425,7 +424,7 @@ class Lexer(object):
         wants.  The parser uses the `tokenize` function with returns a
         `TokenStream` and postprocessed tokens.
         """
-        source = '\n'.join(source.splitlines())
+        source = u'\n'.join(unicode(source).splitlines())
         pos = 0
         lineno = 1
         stack = ['root']