--- /dev/null
+from jinja2 import nodes
+from jinja2.ext import Extension
+
+
+class CacheExtension(Extension):
+ """Adds support for fragment caching to Jinja2."""
+ tags = set(['cache'])
+
+ def __init__(self, environment):
+ Extension.__init__(self, environment)
+
+ # default dummy implementations. If the class does not implement
+ # those methods we add some noop defaults.
+ if not hasattr(environment, 'add_fragment_to_cache'):
+ environment.add_fragment_to_cache = lambda n, v, t: None
+ if not hasattr(environment, 'load_fragment_from_cache'):
+ environment.load_fragment_from_cache = lambda n: None
+
+ def parse(self, parser):
+ # the first token is the token that started the tag. In our case
+ # we only listen to ``'cache'`` so this will be a name token with
+ # `cache` as value. We get the line number so that we can give
+ # that line number to the nodes we create by hand.
+ lineno = parser.stream.next().lineno
+
+ # now we parse a single expression that is used as cache key.
+ args = [parser.parse_expression()]
+
+ # if there is a comma, someone provided the timeout. parse the
+ # timeout then
+ if parser.stream.current.type is 'comma':
+ parser.stream.next()
+ args.append(parser.parse_expression())
+
+ # otherwise set the timeout to `None`
+ else:
+ args.append(nodes.Const(None))
+
+ # now we parse the body of the cache block up to `endcache` and
+ # drop the needle (which would always be `endcache` in that case)
+ body = parser.parse_statements(['name:endcache'], drop_needle=True)
+
+ # now return a `CallBlock` node that calls our _cache_support
+ # helper method on this extension.
+ return nodes.CallBlock(
+ nodes.Call(self.attr('_cache_support'), args, [], None, None),
+ [], [], body
+ ).set_lineno(lineno)
+
+ def _cache_support(self, name, timeout, caller):
+ """Helper callback."""
+ # try to load the block from the cache
+ rv = self.environment.load_fragment_from_cache(name)
+ if rv is not None:
+ return rv
+
+ # if there is no fragment in the cache, render it and store
+ # it in the cache.
+ rv = caller()
+ self.environment.add_fragment_to_cache(name, rv, timeout)
+ return rv
jinja_env = Environment(extensions=['jinja.ext.i18n'])
-Built-in Extensions
--------------------
-
.. _i18n-extension:
-i18n
-~~~~
+i18n Extension
+--------------
-The i18n extension can be used in combination with `gettext`_ or `babel`_.
-If the i18n extension is enabled Jinja2 provides a `trans` statement that
-marks the wrapped string as translatable and calls `gettext`.
+Jinja2 currently comes with one extension, the i18n extension. It can be
+used in combination with `gettext`_ or `babel`_. If the i18n extension is
+enabled Jinja2 provides a `trans` statement that marks the wrapped string as
+translatable and calls `gettext`.
After enabling dummy `_`, `gettext` and `ngettext` functions are added to
the template globals. A internationalized application has to override those
.. _gettext: http://docs.python.org/dev/library/gettext
.. _babel: http://babel.edgewall.org/
+
.. _writing-extensions:
Writing Extensions
------------------
-TODO
+By writing extensions you can add custom tags to Jinja2. This is a non trival
+task and usually not needed as the default tags and expressions cover all
+common use cases. The i18n extension is a good example of why extensions are
+useful, another one would be fragment caching.
+
+Example Extension
+~~~~~~~~~~~~~~~~~
+
+The following example implements a `cache` tag for Jinja2:
+
+.. literalinclude:: cache_extension.py
+ :language: python
+
+In order to use the cache extension it makes sense to subclass the environment
+to implement the `add_fragment_to_cache` and `load_fragment_from_cache`
+methods. The following example shows how to use the `Werkzeug`_ caching
+with the extension from above::
+
+ from jinja2 import Environment
+ from werkzeug.contrib.cache import SimpleCache
+
+ cache = SimpleCache()
+ cache_prefix = 'tempalte_fragment/'
+
+ class MyEnvironment(Environment):
+
+ def __init__(self):
+ Environment.__init__(self, extensions=[CacheExtension])
+
+ def add_fragment_to_cache(self, key, value, timeout):
+ cache.add(cache_prefix + key, value, timeout)
+
+ def load_fragment_from_cache(self, key):
+ return cache.get(cache_prefix + key)
+
+.. _Werkzeug: http://werkzeug.pocoo.org/
+
+Extension API
+~~~~~~~~~~~~~
+
+Extensions always have to extend the :class:`jinja2.ext.Extension` class:
+
+.. autoclass:: Extension
+ :members: parse, attr
+
+ .. attribute:: identifier
+
+ The identifier of the extension. This is always the true import name
+ of the extension class and must not be changed.
+
+ .. attribute:: tags
+
+ If the extension implements custom tags this is a set of tag names
+ the extension is listening for.
+
+Parser API
+~~~~~~~~~~
+
+The parser passed to :meth:`Extension.parse` provides ways to parse
+expressions of different types. The following methods may be used by
+extensions:
+
+.. autoclass:: jinja2.parser.Parser
+ :members: parse_expression, parse_tuple, parse_statements, ignore_colon,
+ free_identifier
+
+ .. attribute:: filename
+
+ The filename of the template the parser processes. This is **not**
+ the load name of the template which is unavailable at parsing time.
+ For templates that were not loaded form the file system this is
+ `None`.
+
+ .. attribute:: stream
+
+ The current :class:`~jinja2.lexer.TokenStream`
+
+.. autoclass:: jinja2.lexer.TokenStream
+ :members: push, look, eos, skip, next, expect
+
+ .. attribute:: current
+
+ The current :class:`~jinja2.lexer.Token`.
+
+.. autoclass:: jinja2.lexer.Token
+ :members: test, test_any
+
+ .. attribute:: lineno
+
+ The line number of the token
+
+ .. attribute:: type
+
+ The type of the token. This string is interned so you may compare
+ it with arbitrary strings using the `is` operator.
+
+ .. attribute:: value
+
+ The value of the token.
+
+AST
+~~~
+
+The AST (Abstract Syntax Tree) is used to represent a template after parsing.
+It's build of nodes that the compiler then converts into executable Python
+code objects. Extensions that provide custom statements can return nodes to
+execute custom Python code.
+
+The list below describes all nodes that are currently available. The AST may
+change between Jinja2 versions but will stay backwards compatible.
+
+For more information have a look at the repr of :meth:`jinja2.Environment.parse`.
+
+.. module:: jinja2.nodes
+
+.. jinjanodes::
+
+.. autoexception:: Impossible
from jinja2 import Environment, FileSystemLoader
+def parse_rst(state, content_offset, doc):
+ node = nodes.section()
+ # hack around title style bookkeeping
+ surrounding_title_styles = state.memo.title_styles
+ surrounding_section_level = state.memo.section_level
+ state.memo.title_styles = []
+ state.memo.section_level = 0
+ state.nested_parse(doc, content_offset, node, match_titles=1)
+ state.memo.title_styles = surrounding_title_styles
+ state.memo.section_level = surrounding_section_level
+ return node.children
+
+
class JinjaStyle(Style):
title = 'Jinja Style'
default_style = ""
doc.append(line.rstrip(), '<jinjaext>')
finally:
changelog.close()
- node = nodes.section()
- # hack around title style bookkeeping
- surrounding_title_styles = state.memo.title_styles
- surrounding_section_level = state.memo.section_level
- state.memo.title_styles = []
- state.memo.section_level = 0
- state.nested_parse(doc, content_offset, node, match_titles=1)
- state.memo.title_styles = surrounding_title_styles
- state.memo.section_level = surrounding_section_level
- return node.children
+ return parse_rst(state, content_offset, doc)
from jinja2.defaults import DEFAULT_FILTERS, DEFAULT_TESTS
jinja_tests = dump_functions(DEFAULT_TESTS)
+def jinja_nodes(dirname, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ from jinja2.nodes import Node
+ doc = ViewList()
+ def walk(node, indent):
+ p = ' ' * indent
+ sig = ', '.join(node.fields)
+ doc.append(p + '.. autoclass:: %s(%s)' % (node.__name__, sig), '')
+ if node.abstract:
+ members = []
+ for key, name in node.__dict__.iteritems():
+ if not key.startswith('_') and callable(name):
+ members.append(key)
+ if members:
+ members.sort()
+ doc.append('%s :members: %s' % (p, ', '.join(members)), '')
+ else:
+ doc.append('', '')
+ doc.append(p + ' :Node type: :class:`%s`' % node.__base__.__name__, '')
+ doc.append('', '')
+ children = node.__subclasses__()
+ children.sort(key=lambda x: x.__name__.lower())
+ for child in children:
+ walk(child, indent)
+ walk(Node, 0)
+ return parse_rst(state, content_offset, doc)
+
+
def setup(app):
app.add_directive('jinjafilters', jinja_filters, 0, (0, 0, 0))
app.add_directive('jinjatests', jinja_tests, 0, (0, 0, 0))
app.add_directive('jinjachangelog', jinja_changelog, 0, (0, 0, 0))
+ app.add_directive('jinjanodes', jinja_nodes, 0, (0, 0, 0))
def generate(node, environment, name, filename, stream=None):
"""Generate the python source for a node tree."""
+ if not isinstance(node, nodes.Template):
+ raise TypeError('Can\'t compile non template nodes')
generator = CodeGenerator(environment, name, filename, stream)
generator.visit(node)
if stream is None:
self.filename = filename
self.stream = stream
+ # aliases for imports
+ self.import_aliases = {}
+
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import division')
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
- self.writeline('name = %r' % self.name)
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
self.name)
self.blocks[block.name] = block
+ # find all imports and import them
+ for import_ in node.find_all(nodes.ImportedName):
+ if import_.importname not in self.import_aliases:
+ imp = import_.importname
+ self.import_aliases[imp] = alias = self.temporary_identifier()
+ if '.' in imp:
+ module, obj = imp.rsplit('.', 1)
+ self.writeline('from %s import %s as %s' %
+ (module, obj, alias))
+ else:
+ self.writeline('import %s as %s' % (imp, alias))
+
+ # add the load name
+ self.writeline('name = %r' % self.name)
+
# generate the root render function.
self.writeline('def root(context, environment=environment):', extra=1)
self.visit(node.expr, frame)
self.write(')')
+ def visit_EnvironmentAttribute(self, node, frame):
+ self.write('environment.' + node.name)
+
+ def visit_ExtensionAttribute(self, node, frame):
+ self.write('environment.extensions[%r].%s' % (node.identifier, node.attr))
+
+ def visit_ImportedName(self, node, frame):
+ self.write(self.import_aliases[node.importname])
+
+ def visit_InternalName(self, node, frame):
+ self.write(node.name)
+
def visit_Const(self, node, frame):
val = node.value
if isinstance(val, float):
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
- Returns a new list of instanciated environments.
+ Returns a dict of instanciated environments.
"""
- result = []
+ result = {}
for extension in extensions:
if isinstance(extension, basestring):
extension = import_string(extension)
- result.append(extension(environment))
+ result[extension.identifier] = extension(environment)
return result
if cache_size is not missing:
rv.cache = create_cache(cache_size)
- rv.extensions = []
- for extension in self.extensions:
- rv.extensions.append(extension.bind(self))
+ rv.extensions = {}
+ for key, value in self.extensions.iteritems():
+ rv.extensions[key] = value.bind(rv)
if extensions is not missing:
- rv.extensions.extend(load_extensions(extensions))
+ rv.extensions.update(load_extensions(extensions))
return _environment_sanity_check(rv)
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
+class ExtensionRegistry(type):
+ """Gives the extension a unique identifier."""
+
+ def __new__(cls, name, bases, d):
+ rv = type.__new__(cls, name, bases, d)
+ rv.identifier = rv.__module__ + '.' + rv.__name__
+ return rv
+
+
class Extension(object):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. This is a supported but currently
that an extension can be bound to another environment (for overlays) by
creating a copy and reassigning the `environment` attribute.
"""
+ __metaclass__ = ExtensionRegistry
#: if this extension parses this is the list of tags it's listening to.
tags = set()
return rv
def parse(self, parser):
- """Called if one of the tags matched."""
+ """If any of the :attr:`tags` matched this method is called with the
+ parser as first argument. The token the parser stream is pointing at
+ is the name token that matched. This method has to return one or a
+ list of multiple nodes.
+ """
+
+ def attr(self, name, lineno=None):
+ """Return an attribute node for the current extension. This is useful
+ to pass callbacks to template code::
+
+ nodes.Call(self.attr('_my_callback'), args, kwargs, None, None)
+
+ That would call `self._my_callback` when the template is evaluated.
+ """
+ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
class CacheExtension(Extension):
def __init__(self, environment):
Extension.__init__(self, environment)
- def dummy_cache_support(name, timeout=None, caller=None):
- if caller is not None:
- return caller()
- environment.globals['cache_support'] = dummy_cache_support
+ environment.globals['__cache_ext_support'] = self.cache_support
+
+ def cache_support(self, name, timeout, caller):
+ """Helper for the cache_fragment function."""
+ if not hasattr(environment, 'cache_support'):
+ return caller()
+ args = [name]
+ if timeout is not None:
+ args.append(timeout)
+ return self.environment.cache_support(generate=caller, *args)
def parse(self, parser):
lineno = parser.stream.next().lineno
if parser.stream.current.type is 'comma':
parser.stream.next()
args.append(parser.parse_expression())
+ else:
+ args.append(nodes.Const(None, lineno=lineno))
body = parser.parse_statements(('name:endcache',), drop_needle=True)
return nodes.CallBlock(
- nodes.Call(nodes.Name('cache_support', 'load'), args, [], None, None),
- [], [], body
+ nodes.Call(nodes.Name('__cache_ext_support', 'load', lineno=lineno),
+ args, [], None, None), [], [], body, lineno=lineno
)
"""Parse a translatable tag."""
lineno = parser.stream.next().lineno
- # skip colon for python compatibility
- if parser.stream.current.type is 'colon':
- parser.stream.next()
-
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
while parser.stream.current.type is not 'block_end':
if variables:
parser.stream.expect('comma')
+
+ # skip colon for python compatibility
+ if parser.ignore_colon():
+ break
+
name = parser.stream.expect('name')
if name.value in variables:
raise TemplateAssertionError('translatable variable %r defined '
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
plural_expr = var
+
parser.stream.expect('block_end')
plural = plural_names = None
#: nicer import names
i18n = InternationalizationExtension
-cache = CacheExtension
def test(self, expr):
"""Test a token against a token expression. This can either be a
- token type or 'token_type:token_value'. This can only test against
- string values!
+ token type or ``'token_type:token_value'``. This can only test
+ against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
class TokenStream(object):
- """A token stream wraps a generator and supports pushing tokens back.
- It also provides some functions to expect tokens and similar stuff.
-
- Important note: Do never push more than one token back to the
- stream. Although the stream object won't stop you
- from doing so, the behavior is undefined. Multiple
- pushed tokens are only used internally!
+ """A token stream is an iterable that yields :class:`Token`\s. The
+ parser however does not iterate over it but calls :meth:`next` to go
+ one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, filename):
return TokenStreamIterator(self)
def __nonzero__(self):
- """Are we at the end of the tokenstream?"""
+ """Are we at the end of the stream?"""
return bool(self._pushed) or self.current.type != 'eof'
eos = property(lambda x: not x.__nonzero__(), doc=__nonzero__.__doc__)
self._next = None
def expect(self, expr):
- """Expect a given token type and return it"""
+ """Expect a given token type and return it. This accepts the same
+ argument as :meth:`jinja2.lexer.Token.test`.
+ """
if not self.current.test(expr):
if ':' in expr:
expr = expr.split(':')[1]
}
+# if this is `True` no new Node classes can be created.
+_node_setup_finished = False
+
+
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
+ if __debug__ and _node_setup_finished:
+ raise TypeError('Can\'t create custom node types.')
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
+ d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class Node(object):
- """Baseclass for all Jinja nodes."""
+ """Baseclass for all Jinja2 nodes. There are a number of nodes available
+ of different types. There are three major types:
+
+ - :class:`Stmt`: statements
+ - :class:`Expr`: expressions
+ - :class:`Helper`: helper nodes
+ - :class:`Template`: the outermost wrapper node
+
+ All nodes have fields and attributes. Fields may be other nodes, lists,
+ or arbitrary values. Fields are passed to the constructor as regular
+ positional arguments, attributes as keyword arguments. Each node has
+ two attributes: `lineno` (the line number of the node) and `environment`.
+ The `environment` attribute is set at the end of the parsing process for
+ all nodes automatically.
+ """
__metaclass__ = NodeType
fields = ()
attributes = ('lineno', 'environment')
+ abstract = True
- def __init__(self, *args, **kw):
- if args:
- if len(args) != len(self.fields):
+ def __init__(self, *fields, **attributes):
+ if fields:
+ if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
- for name, arg in izip(self.fields, args):
+ for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
- setattr(self, attr, kw.pop(attr, None))
- if kw:
- raise TypeError('unknown keyword argument %r' %
- iter(kw).next())
+ setattr(self, attr, attributes.pop(attr, None))
+ if attributes:
+ raise TypeError('unknown attribute %r' %
+ iter(attributes).next())
def iter_fields(self, exclude=()):
- """Iterate over all fields."""
+ """This method iterates over all fields that are defined and yields
+ ``(key, value)`` tuples. Optionally a parameter of ignored fields
+ can be provided.
+ """
for name in self.fields:
if name not in exclude:
try:
pass
def iter_child_nodes(self, exclude=()):
- """Iterate over all child nodes."""
+ """Iterates over all direct child nodes of the node. This iterates
+ over all fields and yields the values of they are nodes. If the value
+ of a field is a list all the nodes in that list are returned.
+ """
for field, item in self.iter_fields(exclude):
if isinstance(item, list):
for n in item:
yield item
def find(self, node_type):
- """Find the first node of a given type."""
+ """Find the first node of a given type. If no such node exists the
+ return value is `None`.
+ """
for result in self.find_all(node_type):
return result
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
+ return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
+ return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
+ return self
def __repr__(self):
return '%s(%s)' % (
class Stmt(Node):
"""Base node for all statements."""
+ abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
+ abstract = True
class Template(Node):
- """Node that represents a template."""
+ """Node that represents a template. This must be the outermost node that
+ is passed to the compiler.
+ """
fields = ('body',)
class For(Stmt):
- """A node that represents a for loop"""
+ """The for loop. `target` is the target for the iteration (usually a
+ :class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
+ of nodes that are used as loop-body, and `else_` a list of nodes for the
+ `else` block. If no else node exists it has to be an empty list.
+
+ For filtered nodes an expression can be stored as `test`, otherwise `None`.
+ """
fields = ('target', 'iter', 'body', 'else_', 'test')
class If(Stmt):
- """A node that represents an if condition."""
+ """If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
- """A node that represents a macro."""
+ """A macro definition. `name` is the name of the macro, `args` a list of
+ arguments and `defaults` a list of defaults if there are any. `body` is
+ a list of nodes for the macro body.
+ """
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
- """A node that represents am extended macro call."""
+ """Like a macro without a name but a call instead. `call` is called with
+ the unnamed macro as `caller` argument this node holds.
+ """
fields = ('call', 'args', 'defaults', 'body')
fields = ('template', 'names', 'with_context')
-class Trans(Stmt):
- """A node for translatable sections."""
- fields = ('singular', 'plural', 'indicator', 'replacements')
-
-
class ExprStmt(Stmt):
- """A statement that evaluates an expression to None."""
+ """A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Expr(Node):
"""Baseclass for all expressions."""
+ abstract = True
def as_const(self):
"""Return the value of the expression as constant or raise
- `Impossible` if this was not possible.
+ :exc:`Impossible` if this was not possible:
+
+ >>> Add(Const(23), Const(42)).as_const()
+ 65
+ >>> Add(Const(23), Name('var', 'load')).as_const()
+ Traceback (most recent call last):
+ ...
+ Impossible
+
+ This requires the `environment` attribute of all nodes to be
+ set to the environment that created the nodes.
"""
raise Impossible()
class Name(Expr):
- """any name such as {{ foo }}"""
+ """Looks up a name or stores a value in a name.
+ The `ctx` of the node can be one of the following values:
+
+ - `store`: store a value in the name
+ - `load`: load that name
+ - `param`: like `store` but if the name was defined as function parameter.
+ """
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none')
-class MarkSafe(Expr):
- """Mark the wrapped expression as safe (Markup)"""
- fields = ('expr',)
-
- def as_const(self):
- return Markup(self.expr.as_const())
-
-
class Literal(Expr):
"""Baseclass for literals."""
class Const(Literal):
- """any constat such as {{ "foo" }}"""
+ """All constant values. The parser will return this node for simple
+ constants such as ``42`` or ``"foo"`` but it can be used to store more
+ complex values such as lists too. Only constants with a safe
+ representation (objects where ``eval(repr(x)) == x`` is true).
+ """
fields = ('value',)
def as_const(self):
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
- for subscripts.
+ for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
+ is used for loading the names or storing.
"""
fields = ('items', 'ctx')
class List(Literal):
- """any list literal such as {{ [1, 2, 3] }}"""
+ """Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self):
class Dict(Literal):
- """any dict literal such as {{ {1: 2, 3: 4} }}"""
+ """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
+ :class:`Pair` nodes.
+ """
fields = ('items',)
def as_const(self):
class Keyword(Helper):
- """A key, value pair for keyword arguments."""
+ """A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
class CondExpr(Expr):
- """{{ foo if bar else baz }}"""
+ """A conditional expression (inline if expression). (``{{
+ foo if bar else baz }}``)
+ """
fields = ('test', 'expr1', 'expr2')
def as_const(self):
class Filter(Expr):
- """{{ foo|bar|baz }}"""
+ """This node applies a filter on an expression. `name` is the name of
+ the filter, the rest of the fields are the same as for :class:`Call`.
+ """
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, obj=None):
class Test(Expr):
- """{{ foo is lower }}"""
+ """Applies a test on an expression. `name` is the name of the test, the
+ rest of the fields are the same as for :class:`Call`.
+ """
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
- """{{ foo(bar) }}"""
+ """Calls an expression. `args` is a list of arguments, `kwargs` a list
+ of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
+ and `dyn_kwargs` has to be either `None` or a node that is used as
+ node for dynamic positional (``*args``) or keyword (``**kwargs``)
+ arguments.
+ """
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self):
class Subscript(Expr):
- """{{ foo.bar }} and {{ foo['bar'] }} etc."""
+ """Subscribe an expression by an argument. This node performs a dict
+ and an attribute lookup on the object whatever succeeds.
+ """
fields = ('node', 'arg', 'ctx')
def as_const(self):
class Slice(Expr):
- """1:2:3 etc."""
+ """Represents a slice object. This must only be used as argument for
+ :class:`Subscript`.
+ """
fields = ('start', 'stop', 'step')
def as_const(self):
class Concat(Expr):
- """For {{ foo ~ bar }}. Concatenates strings."""
+ """Concatenates the list of expressions provided after converting them to
+ unicode.
+ """
fields = ('nodes',)
def as_const(self):
class Compare(Expr):
- """{{ foo == bar }}, {{ foo >= bar }} etc."""
+ """Compares an expression with some other expressions. `ops` must be a
+ list of :class:`Operand`\s.
+ """
fields = ('expr', 'ops')
def as_const(self):
class Operand(Helper):
- """Operator + expression."""
+ """Holds an operator and an expression."""
fields = ('op', 'expr')
+if __debug__:
+ Operand.__doc__ += '\nThe following operators are available: ' + \
+ ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
+ set(_uaop_to_func) | set(_cmpop_to_func)))
+
class Mul(BinExpr):
- """{{ foo * bar }}"""
+ """Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
- """{{ foo / bar }}"""
+ """Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
- """{{ foo // bar }}"""
+ """Divides the left by the right node and truncates conver the
+ result into an integer by truncating.
+ """
operator = '//'
class Add(BinExpr):
- """{{ foo + bar }}"""
+ """Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
- """{{ foo - bar }}"""
+ """Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
- """{{ foo % bar }}"""
+ """Left modulo right."""
operator = '%'
class Pow(BinExpr):
- """{{ foo ** bar }}"""
+ """Left to the power of right."""
operator = '**'
class And(BinExpr):
- """{{ foo and bar }}"""
+ """Short circuited AND."""
operator = 'and'
def as_const(self):
class Or(BinExpr):
- """{{ foo or bar }}"""
+ """Short circuited OR."""
operator = 'or'
def as_const(self):
class Not(UnaryExpr):
- """{{ not foo }}"""
+ """Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
- """{{ -foo }}"""
+ """Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
- """{{ +foo }}"""
+ """Make the expression positive (noop for most expressions)"""
operator = '+'
+
+
+# Helpers for extensions
+
+
+class EnvironmentAttribute(Expr):
+ """Loads an attribute from the environment object. This is useful for
+ extensions that want to call a callback stored on the environment.
+ """
+ fields = ('name',)
+
+
+class ExtensionAttribute(Expr):
+ """Returns the attribute of an extension bound to the environment.
+ The identifier is the identifier of the :class:`Extension`.
+ """
+ fields = ('identifier', 'attr')
+
+
+class ImportedName(Expr):
+ """If created with an import name the import name is returned on node
+ access. For example ``ImportedName('cgi.escape')`` returns the `escape`
+ function from the cgi module on evaluation. Imports are optimized by the
+ compiler so there is no need to assign them to local variables.
+ """
+ fields = ('importname',)
+
+
+class InternalName(Expr):
+ """An internal name in the compiler. You cannot create these nodes
+ yourself but the parser provides a `free_identifier` method that creates
+ a new identifier for you. This identifier is not available from the
+ template and is not threated specially by the compiler.
+ """
+ fields = ('name',)
+
+ def __init__(self):
+ raise TypeError('Can\'t create internal names. Use the '
+ '`free_identifier` method on a parser.')
+
+
+class MarkSafe(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`)."""
+ fields = ('expr',)
+
+ def as_const(self):
+ return Markup(self.expr.as_const())
+
+
+# and close down
+_node_setup_finished = True
class Parser(object):
- """The template parser class.
-
- Transforms sourcecode into an abstract syntax tree.
+ """This is the central parsing class Jinja2 uses. It's passed to
+ extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, filename=None):
self.source = unicode(source)
self.filename = filename
self.closed = False
- self.stream = environment.lexer.tokenize(source, filename)
+ self.stream = environment.lexer.tokenize(self.source, filename)
self.extensions = {}
- for extension in environment.extensions:
+ for extension in environment.extensions.itervalues():
for tag in extension.tags:
self.extensions[tag] = extension.parse
-
- def is_statement_end(self):
- """Are we at the end of a statement?"""
- if self.stream.current.type in ('variable_end', 'block_end'):
- return True
- return self.stream.current.test('name:in')
+ self._last_identifier = 0
def is_tuple_end(self):
"""Are we at the end of a tuple?"""
- return self.stream.current.type is 'rparen' or self.is_statement_end()
+ return self.stream.current.type in ('variable_end', 'block_end',
+ 'rparen') or \
+ self.stream.current.test('name:in')
+
+ def ignore_colon(self):
+ """If there is a colon, skip it and return `True`, else `False`."""
+ if self.stream.current.type is 'colon':
+ self.stream.next()
+ return True
+ return False
+
+ def free_identifier(self, lineno=None):
+ """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
+ self._last_identifier += 1
+ rv = object.__new__(nodes.InternalName)
+ nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
+ return rv
def parse_statement(self):
"""Parse a single statement."""
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
- parses template data if appropriate.
+ parses template data if appropriate. The parser checks first if the
+ current token is a colon and skips it if there is one. Then it checks
+ for the block end and parses until if one of the `end_tokens` is
+ reached. Per default the active token in the stream at the end of
+ the call is the matched end token. If this is not wanted `drop_needle`
+ can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
- if self.stream.current.type is 'colon':
- self.stream.next()
+ self.ignore_colon()
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
def parse_print(self):
node = nodes.Output(lineno=self.stream.next().lineno)
node.nodes = []
- while not self.is_statement_end():
+ while self.stream.current.type is not 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_expression(self, no_condexpr=False):
- """Parse an expression."""
+ """Parse an expression. Per default all expressions are parsed, if
+ the optional `no_condexpr` parameter is set to `True` conditional
+ expressions are not parsed.
+ """
if no_condexpr:
return self.parse_or()
return self.parse_condexpr()
node = self.parse_postfix(node)
return node
- def parse_tuple(self, enforce=False, simplified=False, no_condexpr=False):
- """Parse multiple expressions into a tuple. This can also return
- just one expression which is not a tuple. If you want to enforce
- a tuple, pass it enforce=True (currently unused).
+ def parse_tuple(self, simplified=False, no_condexpr=False):
+ """Works like `parse_expression` but if multiple expressions are
+ delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
+ This method could also return a regular expression instead of a tuple
+ if no commas where found.
+
+ The default parsing mode is a full tuple. If `simplified` is `True`
+ only names and literals are parsed. The `no_condexpr` parameter is
+ forwarded to :meth:`parse_expression`.
"""
lineno = self.stream.current.lineno
if simplified:
break
lineno = self.stream.current.lineno
if not is_tuple and args:
- if enforce:
- raise TemplateSyntaxError('tuple expected', lineno,
- self.filename)
return args[0]
return nodes.Tuple(args, 'load', lineno=lineno)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
- body.append(self.parse_statement())
+ rv = self.parse_statement()
+ if isinstance(rv, list):
+ body.extend(rv)
+ else:
+ body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')