lexer = property(get_lexer, doc="The lexer for this environment.")
+ def iter_extensions(self):
+ """Iterates over the extensions by priority."""
+ return iter(sorted(self.extensions.values(),
+ key=lambda x: x.priority))
+
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
- self.extensions.itervalues(), unicode(source))
+ self.iter_extensions(), unicode(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
- for ext in self.extensions.itervalues():
+ for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
#: if this extension parses this is the list of tags it's listening to.
tags = set()
+ #: the priority of that extension. This is especially useful for
+ #: extensions that preprocess values. A lower value means higher
+ #: priority.
+ #:
+ #: .. versionadded:: 2.4
+ priority = 100
+
def __init__(self, environment):
self.environment = environment
self.filename = filename
self.closed = False
self.extensions = {}
- for extension in environment.extensions.itervalues():
+ for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
out = tmpl.render()
assert out == 'Foo BAR Baz'
+ def test_extension_ordering(self):
+ class T1(Extension):
+ priority = 1
+ class T2(Extension):
+ priority = 2
+ env = Environment(extensions=[T1, T2])
+ ext = list(env.iter_extensions())
+ assert ext[0].__class__ is T1
+ assert ext[1].__class__ is T2
+
class InternationalizationTestCase(JinjaTestCase):