_spontaneous_environments = LRUCache(10)
-def _get_spontaneous_environment(*args):
+def get_spontaneous_environment(*args):
"""Return a new spontaneus environment. A spontaneus environment is an
unnamed and unaccessable (in theory) environment that is used for
template generated from a string and not from the file system.
comment_end_string='#}',
line_statement_prefix=None,
trim_blocks=False,
+ extensions=(),
optimized=True,
undefined=Undefined,
- loader=None,
- extensions=(),
- finalize=unicode):
+ finalize=unicode,
+ loader=None):
+ # !!Important notice!!
+ # The constructor accepts quite a few arguments that should be
+ # passed by keyword rather than position. However it's important to
+ # not change the order of arguments because it's used at least
+ # internally in those cases:
+ # - spontaneus environments (i18n extension and Template)
+ # - unittests
+ # If parameter changes are required only add parameters at the end
+ # and don't change the arguments (or the defaults!) of the arguments
+ # up to (but excluding) loader.
"""Here the possible initialization parameters:
========================= ============================================
`trim_blocks` If this is set to ``True`` the first newline
after a block is removed (block, not
variable tag!). Defaults to ``False``.
+ `extensions` List of Jinja extensions to use.
`optimized` should the optimizer be enabled? Default is
``True``.
`undefined` a subclass of `Undefined` that is used to
represent undefined variables.
- `loader` the loader which should be used.
- `extensions` List of Jinja extensions to use.
`finalize` A callable that finalizes the variable. Per
default this is `unicode`, other useful
builtin finalizers are `escape`.
+ `loader` the loader which should be used.
========================= ============================================
"""
self.line_statement_prefix = line_statement_prefix
self.trim_blocks = trim_blocks
- # load extensions
- self.extensions = []
- for extension in extensions:
- if isinstance(extension, basestring):
- extension = import_string(extension)
- # extensions are instanciated early but initalized later.
- self.extensions.append(object.__new__(extension))
-
# runtime information
self.undefined = undefined
self.optimized = optimized
# create lexer
self.lexer = Lexer(self)
- # initialize extensions
- for extension in self.extensions:
- extension.__init__(self)
+ # load extensions
+ self.extensions = []
+ for extension in extensions:
+ if isinstance(extension, basestring):
+ extension = import_string(extension)
+ self.extensions.append(extension(self))
def subscribe(self, obj, argument):
"""Get an item or attribute of an object."""
comment_end_string='#}',
line_statement_prefix=None,
trim_blocks=False,
+ extensions=(),
optimized=True,
undefined=Undefined,
- extensions=(),
finalize=unicode):
- # make sure extensions are hashable
- extensions = tuple(extensions)
- env = _get_spontaneous_environment(
+ env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
- line_statement_prefix, trim_blocks, optimized, undefined,
- None, extensions, finalize)
+ line_statement_prefix, trim_blocks, tuple(extensions), optimized,
+ undefined, finalize)
return env.from_string(source, template_class=cls)
def render(self, *args, **kwargs):
while 1:
try:
- while 1:
+ while c_size < size:
push(next())
c_size += 1
- if c_size >= size:
- raise StopIteration()
except StopIteration:
if not c_size:
raise
jinja2.ext
~~~~~~~~~~
- Jinja extensions (EXPERIMENAL)
-
- The plan: i18n and caching becomes a parser extension. cache/endcache
- as well as trans/endtrans are not keyword and don't have nodes but
- translate into regular jinja nodes so that the person who writes such
- custom tags doesn't have to generate python code himself.
+ Jinja extensions allow to add custom tags similar to the way django custom
+ tags work. By default two example extensions exist: an i18n and a cache
+ extension.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
+from collections import deque
from jinja2 import nodes
+from jinja2.environment import get_spontaneous_environment
+from jinja2.runtime import Undefined
+from jinja2.parser import statement_end_tokens
+from jinja2.exceptions import TemplateAssertionError
+from jinja2.utils import import_string
+
+
+# the only real useful gettext functions for a Jinja template. Note
+# that ugettext must be assigned to gettext as Jinja doesn't support
+# non unicode strings.
+GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class Extension(object):
nodes.Call(nodes.Name('cache_support', 'load'), args, [], None, None),
[], [], body
)
+
+
+class TransExtension(Extension):
+ """This extension adds gettext support to Jinja."""
+ tags = set(['trans'])
+
+ def __init__(self, environment):
+ Extension.__init__(self, environment)
+ environment.globals.update({
+ '_': lambda x: x,
+ 'gettext': lambda x: x,
+ 'ngettext': lambda s, p, n: (s, p)[n != 1]
+ })
+
+ def parse(self, parser):
+ """Parse a translatable tag."""
+ lineno = parser.stream.next().lineno
+
+ # skip colon for python compatibility
+ if parser.stream.current.type is 'colon':
+ parser.stream.next()
+
+ # find all the variables referenced. Additionally a variable can be
+ # defined in the body of the trans block too, but this is checked at
+ # a later state.
+ plural_expr = None
+ variables = {}
+ while parser.stream.current.type is not 'block_end':
+ if variables:
+ parser.stream.expect('comma')
+ name = parser.stream.expect('name')
+ if name.value in variables:
+ raise TemplateAssertionError('translatable variable %r defined '
+ 'twice.' % name.value, name.lineno,
+ parser.filename)
+
+ # expressions
+ if parser.stream.current.type is 'assign':
+ parser.stream.next()
+ variables[name.value] = var = parser.parse_expression()
+ else:
+ variables[name.value] = var = nodes.Name(name.value, 'load')
+ if plural_expr is None:
+ plural_expr = var
+ parser.stream.expect('block_end')
+
+ plural = plural_names = None
+ have_plural = False
+ referenced = set()
+
+ # now parse until endtrans or pluralize
+ singular_names, singular = self._parse_block(parser, True)
+ if singular_names:
+ referenced.update(singular_names)
+ if plural_expr is None:
+ plural_expr = nodes.Name(singular_names[0], 'load')
+
+ # if we have a pluralize block, we parse that too
+ if parser.stream.current.test('name:pluralize'):
+ have_plural = True
+ parser.stream.next()
+ if parser.stream.current.type is not 'block_end':
+ plural_expr = parser.parse_expression()
+ parser.stream.expect('block_end')
+ plural_names, plural = self._parse_block(parser, False)
+ parser.stream.next()
+ referenced.update(plural_names)
+ else:
+ parser.stream.next()
+
+ # register free names as simple name expressions
+ for var in referenced:
+ if var not in variables:
+ variables[var] = nodes.Name(var, 'load')
+
+ # no variables referenced? no need to escape
+ if not referenced:
+ singular = singular.replace('%%', '%')
+ if plural:
+ plural = plural.replace('%%', '%')
+
+ if not have_plural:
+ plural_expr = None
+ elif plural_expr is None:
+ raise TemplateAssertionError('pluralize without variables',
+ lineno, parser.filename)
+
+ if variables:
+ variables = nodes.Dict([nodes.Pair(nodes.Const(x, lineno=lineno), y)
+ for x, y in variables.items()])
+ else:
+ variables = None
+
+ node = self._make_node(singular, plural, variables, plural_expr)
+ node.set_lineno(lineno)
+ return node
+
+ def _parse_block(self, parser, allow_pluralize):
+ """Parse until the next block tag with a given name."""
+ referenced = []
+ buf = []
+ while 1:
+ if parser.stream.current.type is 'data':
+ buf.append(parser.stream.current.value.replace('%', '%%'))
+ parser.stream.next()
+ elif parser.stream.current.type is 'variable_begin':
+ parser.stream.next()
+ name = parser.stream.expect('name').value
+ referenced.append(name)
+ buf.append('%%(%s)s' % name)
+ parser.stream.expect('variable_end')
+ elif parser.stream.current.type is 'block_begin':
+ parser.stream.next()
+ if parser.stream.current.test('name:endtrans'):
+ break
+ elif parser.stream.current.test('name:pluralize'):
+ if allow_pluralize:
+ break
+ raise TemplateSyntaxError('a translatable section can '
+ 'have only one pluralize '
+ 'section',
+ parser.stream.current.lineno,
+ parser.filename)
+ raise TemplateSyntaxError('control structures in translatable'
+ ' sections are not allowed.',
+ parser.stream.current.lineno,
+ parser.filename)
+ else:
+ assert False, 'internal parser error'
+
+ return referenced, u''.join(buf)
+
+ def _make_node(self, singular, plural, variables, plural_expr):
+ """Generates a useful node from the data provided."""
+ # singular only:
+ if plural_expr is None:
+ gettext = nodes.Name('gettext', 'load')
+ node = nodes.Call(gettext, [nodes.Const(singular)],
+ [], None, None)
+ if variables:
+ node = nodes.Mod(node, variables)
+
+ # singular and plural
+ else:
+ ngettext = nodes.Name('ngettext', 'load')
+ node = nodes.Call(ngettext, [
+ nodes.Const(singular),
+ nodes.Const(plural),
+ plural_expr
+ ], [], None, None)
+ if variables:
+ node = nodes.Mod(node, variables)
+ return nodes.Output([node])
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS):
+ """Extract localizable strings from the given template node.
+
+ For every string found this function yields a ``(lineno, function,
+ message)`` tuple, where:
+
+ * ``lineno`` is the number of the line on which the string was found,
+ * ``function`` is the name of the ``gettext`` function used (if the
+ string was extracted from embedded Python code), and
+ * ``message`` is the string itself (a ``unicode`` object, or a tuple
+ of ``unicode`` objects for functions with multiple string arguments).
+ """
+ for node in node.find_all(nodes.Call):
+ if not isinstance(node.node, nodes.Name) or \
+ node.node.name not in gettext_functions:
+ continue
+
+ strings = []
+ for arg in node.args:
+ if isinstance(arg, nodes.Const) and \
+ isinstance(arg.value, basestring):
+ strings.append(arg.value)
+ else:
+ strings.append(None)
+
+ if len(strings) == 1:
+ strings = strings[0]
+ else:
+ strings = tuple(strings)
+ yield node.lineno, node.node.name, strings
+
+
+def babel_extract(fileobj, keywords, comment_tags, options):
+ """Babel extraction method for Jinja templates.
+
+ :param fileobj: the file-like object the messages should be extracted from
+ :param keywords: a list of keywords (i.e. function names) that should be
+ recognized as translation functions
+ :param comment_tags: a list of translator tags to search for and include
+ in the results. (Unused)
+ :param options: a dictionary of additional options (optional)
+ :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
+ (comments will be empty currently)
+ """
+ encoding = options.get('encoding', 'utf-8')
+
+ have_trans_extension = False
+ extensions = []
+ for extension in options.get('extensions', '').split(','):
+ extension = extension.strip()
+ if not extension:
+ continue
+ extension = import_string(extension)
+ if extension is TransExtension:
+ have_trans_extension = True
+ extensions.append(extension)
+ if not have_trans_extension:
+ extensions.append(TransExtension)
+
+ environment = get_spontaneous_environment(
+ options.get('block_start_string', '{%'),
+ options.get('block_end_string', '%}'),
+ options.get('variable_start_string', '{{'),
+ options.get('variable_end_string', '}}'),
+ options.get('comment_start_string', '{#'),
+ options.get('comment_end_string', '#}'),
+ options.get('line_statement_prefix') or None,
+ options.get('trim_blocks', '').lower() in ('1', 'on', 'yes', 'true'),
+ tuple(extensions),
+ # fill with defaults so that environments are shared
+ # with other spontaneus environments.
+ True, Undefined, unicode
+ )
+
+ node = environment.parse(fileobj.read().decode(encoding))
+ for lineno, func, message in extract_from_ast(node, keywords):
+ yield lineno, func, message, []
+++ /dev/null
-# -*- coding: utf-8 -*-
-"""
- jinja2.i18n
- ~~~~~~~~~~~
-
- i18n support for Jinja.
-
- :copyright: Copyright 2008 by Armin Ronacher.
- :license: BSD.
-"""
-from collections import deque
-from jinja2 import nodes
-from jinja2.environment import Environment
-from jinja2.parser import statement_end_tokens
-from jinja2.ext import Extension
-from jinja2.exceptions import TemplateAssertionError
-
-
-# the only real useful gettext functions for a Jinja template. Note
-# that ugettext must be assigned to gettext as Jinja doesn't support
-# non unicode strings.
-GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
-
-
-def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS):
- """Extract localizable strings from the given template node.
-
- For every string found this function yields a ``(lineno, function,
- message)`` tuple, where:
-
- * ``lineno`` is the number of the line on which the string was found,
- * ``function`` is the name of the ``gettext`` function used (if the
- string was extracted from embedded Python code), and
- * ``message`` is the string itself (a ``unicode`` object, or a tuple
- of ``unicode`` objects for functions with multiple string arguments).
- """
- for node in node.find_all(nodes.Call):
- if not isinstance(node.node, nodes.Name) or \
- node.node.name not in gettext_functions:
- continue
-
- strings = []
- for arg in node.args:
- if isinstance(arg, nodes.Const) and \
- isinstance(arg.value, basestring):
- strings.append(arg.value)
- else:
- strings.append(None)
-
- if len(strings) == 1:
- strings = strings[0]
- else:
- strings = tuple(strings)
- yield node.lineno, node.node.name, strings
-
-
-def babel_extract(fileobj, keywords, comment_tags, options):
- """Babel extraction method for Jinja templates.
-
- :param fileobj: the file-like object the messages should be extracted from
- :param keywords: a list of keywords (i.e. function names) that should be
- recognized as translation functions
- :param comment_tags: a list of translator tags to search for and include
- in the results. (Unused)
- :param options: a dictionary of additional options (optional)
- :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
- (comments will be empty currently)
- """
- encoding = options.get('encoding', 'utf-8')
- extensions = [x.strip() for x in options.get('extensions', '').split(',')]
- environment = Environment(
- options.get('block_start_string', '{%'),
- options.get('block_end_string', '%}'),
- options.get('variable_start_string', '{{'),
- options.get('variable_end_string', '}}'),
- options.get('comment_start_string', '{#'),
- options.get('comment_end_string', '#}'),
- options.get('line_statement_prefix') or None,
- options.get('trim_blocks', '').lower() in ('1', 'on', 'yes', 'true'),
- extensions=[x for x in extensions if x]
- )
-
- # add the i18n extension only if it's not yet in the list. Some people
- # might use a script to sync the babel ini with the Jinja configuration
- # so we want to avoid having the trans extension twice in the list.
- for extension in environment.extensions:
- if isinstance(extension, TransExtension):
- break
- else:
- environment.extensions.append(TransExtension(environment))
-
- node = environment.parse(fileobj.read().decode(encoding))
- for lineno, func, message in extract_from_ast(node, keywords):
- yield lineno, func, message, []
-
-
-class TransExtension(Extension):
- tags = set(['trans'])
-
- def __init__(self, environment):
- Extension.__init__(self, environment)
- environment.globals.update({
- '_': lambda x: x,
- 'gettext': lambda x: x,
- 'ngettext': lambda s, p, n: (s, p)[n != 1]
- })
-
- def parse(self, parser):
- """Parse a translatable tag."""
- lineno = parser.stream.next().lineno
-
- # skip colon for python compatibility
- if parser.stream.current.type is 'colon':
- parser.stream.next()
-
- # find all the variables referenced. Additionally a variable can be
- # defined in the body of the trans block too, but this is checked at
- # a later state.
- plural_expr = None
- variables = {}
- while parser.stream.current.type is not 'block_end':
- if variables:
- parser.stream.expect('comma')
- name = parser.stream.expect('name')
- if name.value in variables:
- raise TemplateAssertionError('translatable variable %r defined '
- 'twice.' % name.value, name.lineno,
- parser.filename)
-
- # expressions
- if parser.stream.current.type is 'assign':
- parser.stream.next()
- variables[name.value] = var = parser.parse_expression()
- else:
- variables[name.value] = var = nodes.Name(name.value, 'load')
- if plural_expr is None:
- plural_expr = var
- parser.stream.expect('block_end')
-
- plural = plural_names = None
- have_plural = False
- referenced = set()
-
- # now parse until endtrans or pluralize
- singular_names, singular = self._parse_block(parser, True)
- if singular_names:
- referenced.update(singular_names)
- if plural_expr is None:
- plural_expr = nodes.Name(singular_names[0], 'load')
-
- # if we have a pluralize block, we parse that too
- if parser.stream.current.test('name:pluralize'):
- have_plural = True
- parser.stream.next()
- if parser.stream.current.type is not 'block_end':
- plural_expr = parser.parse_expression()
- parser.stream.expect('block_end')
- plural_names, plural = self._parse_block(parser, False)
- parser.stream.next()
- referenced.update(plural_names)
- else:
- parser.stream.next()
-
- # register free names as simple name expressions
- for var in referenced:
- if var not in variables:
- variables[var] = nodes.Name(var, 'load')
-
- # no variables referenced? no need to escape
- if not referenced:
- singular = singular.replace('%%', '%')
- if plural:
- plural = plural.replace('%%', '%')
-
- if not have_plural:
- plural_expr = None
- elif plural_expr is None:
- raise TemplateAssertionError('pluralize without variables',
- lineno, parser.filename)
-
- if variables:
- variables = nodes.Dict([nodes.Pair(nodes.Const(x, lineno=lineno), y)
- for x, y in variables.items()])
- else:
- variables = None
-
- node = self._make_node(singular, plural, variables, plural_expr)
- node.set_lineno(lineno)
- return node
-
- def _parse_block(self, parser, allow_pluralize):
- """Parse until the next block tag with a given name."""
- referenced = []
- buf = []
- while 1:
- if parser.stream.current.type is 'data':
- buf.append(parser.stream.current.value.replace('%', '%%'))
- parser.stream.next()
- elif parser.stream.current.type is 'variable_begin':
- parser.stream.next()
- name = parser.stream.expect('name').value
- referenced.append(name)
- buf.append('%%(%s)s' % name)
- parser.stream.expect('variable_end')
- elif parser.stream.current.type is 'block_begin':
- parser.stream.next()
- if parser.stream.current.test('name:endtrans'):
- break
- elif parser.stream.current.test('name:pluralize'):
- if allow_pluralize:
- break
- raise TemplateSyntaxError('a translatable section can '
- 'have only one pluralize '
- 'section',
- parser.stream.current.lineno,
- parser.filename)
- raise TemplateSyntaxError('control structures in translatable'
- ' sections are not allowed.',
- parser.stream.current.lineno,
- parser.filename)
- else:
- assert False, 'internal parser error'
-
- return referenced, u''.join(buf)
-
- def _make_node(self, singular, plural, variables, plural_expr):
- """Generates a useful node from the data provided."""
- # singular only:
- if plural_expr is None:
- gettext = nodes.Name('gettext', 'load')
- node = nodes.Call(gettext, [nodes.Const(singular)],
- [], None, None)
- if variables:
- node = nodes.Mod(node, variables)
-
- # singular and plural
- else:
- ngettext = nodes.Name('ngettext', 'load')
- node = nodes.Call(ngettext, [
- nodes.Const(singular),
- nodes.Const(plural),
- plural_expr
- ], [], None, None)
- if variables:
- node = nodes.Mod(node, variables)
- return nodes.Output([node])
import unicodedata
from jinja2.datastructure import TokenStream, Token
from jinja2.exceptions import TemplateSyntaxError
-from weakref import WeakValueDictionary
+from jinja2.utils import LRUCache
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
-_lexer_cache = WeakValueDictionary()
-
+_lexer_cache = LRUCache(10)
# static regular expressions
whitespace_re = re.compile(r'\s+(?um)')
name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
float_re = re.compile(r'\d+\.\d+')
-
# set of used keywords
keywords = set(['and', 'block', 'elif', 'else', 'endblock', 'print',
'endfilter', 'endfor', 'endif', 'endmacro', 'endraw',
def unescape_string(lineno, filename, s):
- r"""
- Unescape a string. Supported escapes:
+ r"""Unescape a string. Supported escapes:
\a, \n, \r\, \f, \v, \\, \", \', \0
\x00, \u0000, \U00000000, \N{...}
-
- Not supported are \101 because imho redundant.
"""
- result = []
- write = result.append
- chariter = iter(s)
- next_char = chariter.next
-
- # faster lookup
- sescapes = simple_escapes
- uescapes = unicode_escapes
-
try:
- for char in chariter:
- if char == '\\':
- char = next_char()
- if char in sescapes:
- write(sescapes[char])
- elif char in uescapes:
- seq = [next_char() for x in xrange(uescapes[char])]
- try:
- write(unichr(int(''.join(seq), 16)))
- except ValueError:
- raise TemplateSyntaxError('invalid unicode codepoint',
- lineno, filename)
- elif char == 'N':
- if next_char() != '{':
- raise TemplateSyntaxError('no name for codepoint',
- lineno, filename)
- seq = []
- while 1:
- char = next_char()
- if char == '}':
- break
- seq.append(char)
- try:
- write(unicodedata.lookup(u''.join(seq)))
- except KeyError:
- raise TemplateSyntaxError('unknown character name',
- lineno, filename)
- else:
- write('\\' + char)
- else:
- write(char)
- except StopIteration:
- raise TemplateSyntaxError('invalid string escape', lineno, filename)
- return u''.join(result)
-
-
-def unescape_regex(s):
- """
- Unescape rules for regular expressions.
- """
- buffer = []
- write = buffer.append
- in_escape = False
- for char in s:
- if in_escape:
- in_escape = False
- if char not in safe_chars:
- write('\\' + char)
- continue
- write(char)
- return u''.join(buffer)
+ return s.encode('ascii', 'backslashreplace').decode('unicode-escape')
+ except UnicodeError, e:
+ msg = str(e).split(':')[-1].strip()
+ raise TemplateSyntaxError(msg, lineno, filename)
class Failure(object):
- """
- Class that raises a `TemplateSyntaxError` if called.
+ """Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
class LexerMeta(type):
- """
- Metaclass for the lexer that caches instances for
+ """Metaclass for the lexer that caches instances for
the same configuration in a weak value dictionary.
"""
environment.comment_end_string,
environment.line_statement_prefix,
environment.trim_blocks)
-
- # use the cached lexer if possible
- if key in _lexer_cache:
- return _lexer_cache[key]
-
- # create a new lexer and cache it
- lexer = type.__call__(cls, environment)
- _lexer_cache[key] = lexer
+ lexer = _lexer_cache.get(key)
+ if lexer is None:
+ lexer = type.__call__(cls, environment)
+ _lexer_cache[key] = lexer
return lexer
class Lexer(object):
- """
- Class that implements a lexer for a given environment. Automatically
+ """Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
return TokenStream(generate(), filename)
def tokeniter(self, source, filename=None):
- """
- This method tokenizes the text and returns the tokens in a generator.
- Use this method if you just want to tokenize a template. The output
- you get is not compatible with the input the jinja parser wants. The
- parser uses the `tokenize` function with returns a `TokenStream` and
- keywords instead of just names.
+ """This method tokenizes the text and returns the tokens in a
+ generator. Use this method if you just want to tokenize a template.
+ The output you get is not compatible with the input the jinja parser
+ wants. The parser uses the `tokenize` function with returns a
+ `TokenStream` and postprocessed tokens.
"""
source = '\n'.join(source.splitlines())
pos = 0
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
- 'in': operator.contains,
- 'notin': lambda a, b: b not in a
+ 'in': lambda a, b: a in b,
+ 'notin': lambda a, b: a not in b
}
def as_const(self):
result = value = self.expr.as_const()
- for op in self.ops:
- new_value = op.expr.as_const()
- result = _cmpop_to_func[op.op](value, new_value)
- value = new_value
+ try:
+ for op in self.ops:
+ new_value = op.expr.as_const()
+ result = _cmpop_to_func[op.op](value, new_value)
+ value = new_value
+ except:
+ raise Impossible()
return result
'macro', 'include'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq', 'in'])
statement_end_tokens = set(['variable_end', 'block_end', 'in'])
+_tuple_edge_tokens = set(['rparen']) | statement_end_tokens
class Parser(object):
while 1:
if args:
self.stream.expect('comma')
- if self.stream.current.type in statement_end_tokens:
+ if self.stream.current.type in _tuple_edge_tokens:
break
args.append(parse())
- if self.stream.current.type is not 'comma':
+ if self.stream.current.type is 'comma':
+ is_tuple = True
+ else:
break
- is_tuple = True
lineno = self.stream.current.lineno
if not is_tuple and args:
if enforce:
self.stream.next()
elif token.type is 'variable_begin':
self.stream.next()
- want_comma = False
- while not self.stream.current.test_many(statement_end_tokens):
- if want_comma:
- self.stream.expect('comma')
- add_data(self.parse_expression())
- want_comma = True
+ add_data(self.parse_tuple())
self.stream.expect('variable_end')
elif token.type is 'block_begin':
flush_data()
for name, obj in self.parent.iteritems():
if type(obj) is FunctionType:
if getattr(obj, 'contextfunction', 0):
- self.vars[key] = partial(obj, self)
+ self.vars[name] = partial(obj, self)
elif getattr(obj, 'environmentfunction', 0):
- self.vars[key] = partial(obj, environment)
+ self.vars[name] = partial(obj, environment)
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
'called %r.' % block)
return SuperBlock(block, self, last)
+ def get(self, name, default=None):
+ """For dict compatibility"""
+ try:
+ return self[name]
+ except KeyError:
+ return default
+
def update(self, mapping):
"""Update vars from a mapping but don't export them."""
self.vars.update(mapping)
self.vars[key] = value
self.exported_vars.add(key)
+ def __contains__(self, name):
+ return name in self.vars or name in self.parent
+
def __getitem__(self, key):
if key in self.vars:
return self.vars[key]
extras_require={'i18n': ['Babel>=0.8']},
entry_points="""
[babel.extractors]
- jinja2 = jinja.i18n:babel_extract[i18n]
+ jinja2 = jinja.ext:babel_extract[i18n]
"""
)
i18n_env = Environment(
loader=DictLoader(templates),
- extensions=['jinja2.i18n.TransExtension']
+ extensions=['jinja2.ext.TransExtension']
)
i18n_env.globals.update({
'_': gettext,
def test_extract():
- from jinja2.i18n import babel_extract
+ from jinja2.ext import babel_extract
from StringIO import StringIO
source = StringIO('''
{{ gettext('Hello World') }}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
''')
assert list(babel_extract(source, ('gettext', 'ngettext', '_'), [], {})) == [
- (2, 'gettext', 'Hello World', []),
+ (2, 'gettext', u'Hello World', []),
(3, 'gettext', u'Hello World', []),
(4, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
"""
SIMPLE = '''\
-{% macro say_hello name %}Hello {{ name }}!{% endmacro %}
+{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}\
'''
-KWARGSFAILURE = '''\
-{% macro foo bar %}...{% endmacro %}
-{{ foo(bar='blub') }}\
-'''
-
SCOPING = '''\
-{% macro level1 data1 %}
-{% macro level2 data2 %}{{ data1 }}|{{ data2 }}{% endmacro %}
+{% macro level1(data1) %}
+{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}|{{ level2('bar') }}\
'''
ARGUMENTS = '''\
-{% macro m a, b, c='c', d='d' %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
+{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}\
'''
-PARENTHESES = '''\
-{% macro foo(a, b) %}{{ a }}|{{ b }}{% endmacro %}\
-{{ foo(1, 2) }}\
-'''
-
VARARGS = '''\
-{% macro test %}{{ varargs|join('|') }}{% endmacro %}\
+{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}\
'''
SIMPLECALL = '''\
-{% macro test %}[[{{ caller() }}]]{% endmacro %}\
+{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}\
'''
COMPLEXCALL = '''\
-{% macro test %}[[{{ caller(data='data') }}]]{% endmacro %}\
+{% macro test() %}[[{{ caller(data='data') }}]]{% endmacro %}\
{% call test() %}{{ data }}{% endcall %}\
'''
assert tmpl.render() == 'Hello Peter!'
-def test_kwargs_failure(env):
- from jinja2.exceptions import TemplateRuntimeError
- tmpl = env.from_string(KWARGSFAILURE)
- try:
- tmpl.render()
- except TemplateRuntimeError, e:
- pass
- else:
- raise AssertionError('kwargs failure test failed')
-
-
def test_scoping(env):
tmpl = env.from_string(SCOPING)
assert tmpl.render() == 'foo|bar|'
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
-def test_parentheses(env):
- tmpl = env.from_string(PARENTHESES)
- assert tmpl.render() == '1|2'
-
-
def test_varargs(env):
tmpl = env.from_string(VARARGS)
assert tmpl.render() == '1|2|3'
'{{ 1, 2 }}',
'{% for foo, bar in seq %}...{% endfor %}',
'{% for x in foo, bar %}...{% endfor %}',
- '{% for x in foo, %}...{% endfor %}',
- '{% for x in foo, recursive %}...{% endfor %}',
- '{% for x in foo, bar recursive %}...{% endfor %}',
- '{% for x, in foo, recursive %}...{% endfor %}'
+ '{% for x in foo, %}...{% endfor %}'
]
TRAILINGCOMMA = '''{{ (1, 2,) }}|{{ [1, 2,] }}|{{ {1: 2,} }}'''