From 215809146c04657188b3617359e330b4fbd2bb12 Mon Sep 17 00:00:00 2001 From: Armin Ronacher Date: Tue, 17 Apr 2007 17:13:10 +0200 Subject: [PATCH] [svn] various updates i haven't checked in so far (see the diff of the changelog) and fixed critical bug reported by Alexey Melchakov --HG-- branch : trunk --- CHANGES | 18 +++ docs/generate.py | 2 +- docs/src/contextenv.txt | 23 +++- docs/src/designerdoc.txt | 14 +++ docs/src/frameworks.txt | 119 +++++++++++++++++++- docs/src/i18n.txt | 5 + docs/src/index.txt | 2 + jinja/datastructure.py | 66 ++++++++++- jinja/environment.py | 13 ++- jinja/filters.py | 7 +- jinja/lexer.py | 218 +++++++++++++++++++++++------------- jinja/parser.py | 131 +++++++++++++++------- jinja/plugin.py | 61 +++++++++- jinja/tests.py | 1 + jinja/translators/python.py | 29 ++--- setup.py | 4 + tests/test_filters.py | 54 +++++++++ tests/test_ifcondition.py | 6 + tests/test_inheritance.py | 17 ++- tests/test_lexer.py | 31 +++++ 20 files changed, 665 insertions(+), 156 deletions(-) create mode 100644 tests/test_lexer.py diff --git a/CHANGES b/CHANGES index 08b316f..de01f65 100644 --- a/CHANGES +++ b/CHANGES @@ -13,6 +13,11 @@ Version 1.1 - ``{{ debug() }}`` can now be used to get a list of filters and tags. +- the template lexer keeps not track of brace, parenthesis and + bracket balance in order to not break variable tags apart if they + are configured to look like this: ``${expr}``. This also fixes + the problem with nested dicts in variable expressions. + - added whitespace management system for the template designer. - some small bugfixes. @@ -25,6 +30,8 @@ Version 1.1 to workaround problems with `datetime.strftime` which only accepts binary strings. +- it's now possible to use newlines in string literals + - developer friendly traceback is now toggleable - silent variable name failure is now toggleable @@ -46,6 +53,17 @@ Version 1.1 - added `striptags` and `xmlattr` filters for easier SGML/XML processing +- the trans tag does not need explicit naming for variables with the same + name any more. You can now use ``{% trans foo %}`` instead of the verbose + version ``{% trans foo=foo %}``. + +- reimplemented Buffet plugin so that it works at least for pylons + +- added `Environment.get_translations_for_string` + +- fixed a bug in the parser that didn't unescape keyword arguments. (thanks + to Alexey Melchakov for reporting) + Version 1.0 ----------- diff --git a/docs/generate.py b/docs/generate.py index d6a4180..8acf5b7 100755 --- a/docs/generate.py +++ b/docs/generate.py @@ -246,7 +246,7 @@ def generate_documentation(data, link_style): data, writer=writer, settings_overrides={ - 'initial_header_level': 3, + 'initial_header_level': 2, 'field_name_limit': 50, } ) diff --git a/docs/src/contextenv.txt b/docs/src/contextenv.txt index f362a64..6257d10 100644 --- a/docs/src/contextenv.txt +++ b/docs/src/contextenv.txt @@ -40,6 +40,12 @@ in the template evaluation code you may want to override: Get the translations for the template `name`. Only works if a loader is present. See the `i18n`_ section for more details. +**def** `get_translations_for_string` *(self, string)*: + + Get the translations for the string `string`. This works also if no + loader is present and can be used to lookup translation strings from + templates that are loaded from dynamic resources like databases. + **def** `apply_filters` *(self, value, context, filters)*: Now this function is a bit tricky and you usually don't have to override @@ -57,7 +63,7 @@ in the template evaluation code you may want to override: unicode. The filters for the names are stored on ``self.filters`` in a dict. Missing filters should raise a `FilterNotFound` exception. - **Warning** this is a jinja internal method. The actual implementation + **Warning** this is a Jinja internal method. The actual implementation and function signature might change. **def** `perform_test` *(self, context, testname, args, value, invert)*: @@ -72,7 +78,7 @@ in the template evaluation code you may want to override: Missing tests should raise a `TestNotFound` exception. - **Warning** this is a jinja internal method. The actual implementation + **Warning** this is a Jinja internal method. The actual implementation and function signature might change. **def** `get_attribute` *(self, obj, attribute)*: @@ -80,9 +86,14 @@ in the template evaluation code you may want to override: Get `attribute` from the object provided. The default implementation performs security tests. - **Warning** this is a jinja internal method. The actual implementation + **Warning** this is a Jinja internal method. The actual implementation and function signature might change. +**def** `get_attributes` *(self, obj, attributes)*: + + Get some attributes from the object. If `attributes` is an empty + sequence the object itself is returned unchanged. + **def** `call_function` *(self, f, context, args, kwargs, dyn_args, dyn_kwargs)*: Call a function `f` with the arguments `args`, `kwargs`, `dyn_args` and @@ -92,21 +103,21 @@ in the template evaluation code you may want to override: The default implementation performs some security checks. - **Warning** this is a jinja internal method. The actual implementation + **Warning** this is a Jinja internal method. The actual implementation and function signature might change. **def** `call_function_simple` *(self, f, context)*: Like `call_function` but without arguments. - **Warning** this is a jinja internal method. The actual implementation + **Warning** this is a Jinja internal method. The actual implementation and function signature might change. **def** `finish_var` *(self, value, ctx)*: Postprocess a variable before it's sent to the template. - **Warning** this is a jinja internal method. The actual implementation + **Warning** this is a Jinja internal method. The actual implementation and function signature might change. .. admonition:: Note diff --git a/docs/src/designerdoc.txt b/docs/src/designerdoc.txt index 2f5dcc7..dcdb5ab 100644 --- a/docs/src/designerdoc.txt +++ b/docs/src/designerdoc.txt @@ -922,5 +922,19 @@ defined in the ``trans`` header. Filters must be applied in the header. one user found. {% endif %} +*New in Jinja 1.1*: It's now possible to use the marker name as implicit +default: + +.. sourcecode:: jinja + + instead of this version: + + {% trans username=username %}Hello {{ username }}!{% endtrans %} + + you can now write this: + + {% trans username %}Hello {{ username }}!{% endtrans %} + + .. _slicing chapter: http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice .. _range function: http://docs.python.org/tut/node6.html#SECTION006300000000000000000 diff --git a/docs/src/frameworks.txt b/docs/src/frameworks.txt index 6d576db..952dd00 100644 --- a/docs/src/frameworks.txt +++ b/docs/src/frameworks.txt @@ -2,6 +2,119 @@ Framework Integration ===================== -Because the buffet template interface does not support more complex usage -cases there is currently no built in framework support. This however will -hopefully change before the Jinja release. +Starting with Jinja 1.1 it's possible to embed Jinja into some of the existing +frameworks a lot easier. When speaking of frameworks we only refer to `Pylons`_ +which has a working implementation of the TurboGears template specification. + +Since the whole situation is problematic because of various reasons (kid +specific, uses dotted names for template loading, package name prefix etc.) +we worked around some of the problems by using pylons specific workarounds. + +Jinja also ships an implementation for a hypothetical template abstraction layer +called `General Template Interface`_ which isn't implemented by any existing +framework so far. This specification however tries to solve the problems that +exist in Buffet. + +Buffet +====== + +The buffet specification proposes that templates are named in dotted names. That +means `foo.bar` and not `foo/bar.html`. The dotted notation has the disadvantage +that you cannot specify the filename extension. In recent pylons versions it's +however possible to load templates with their native path too if you prefix the +template name with a foreslash (`/foo/bar.html`). If you don't specify the +extension it will assume `.html` for the dotted notation. + +Here the list of configuration values: + +======================= ====================================================== +``jinja.extension`` The template extension when templates are loaded using + the dotted notation. Defaults to ``html``. +``jinja.environment`` If this is provided it must be the only configuration + value and it's used as jinja environment. In that + case all other configuration parameters except of + ``jinja.extension`` are ignored. +``jinja.searchpath`` If provided a new file system loader with this + search path is instanciated. +``jinja.package`` Name of the python package containing the + templates. If this and ``package_path`` is + defined a `PackageLoader` is used. +``jinja.package_path`` Path to the templates inside of a package. +``jinja.loader_func`` Function that takes the name of the template to + load. If it returns a string or unicode object + it's used to load a template. If the return + value is None it's considered missing. +``jinja.getmtime_func`` Function used to check if templates requires + reloading. Has to return the UNIX timestamp of + the last template change or 0 if this template + does not exist or requires updates at any cost. +``jinja.use_memcache`` Set this to ``True`` to enable memory caching. + This is usually a good idea in production mode, + but disable it during development since it won't + reload template changes automatically. + This only works in persistent environments like + FastCGI. +``jinja.memcache_size`` Number of template instance you want to cache. + Defaults to ``40``. +``jinja.cache_folder`` Set this to an existing directory to enable + caching of templates on the file system. Note + that this only affects templates transformed + into python code. Default is ``None`` which means + that caching is disabled. +``jinja.auto_reload`` Set this to `False` for a slightly better + performance. In that case of `getmtime_func` + not being provided this won't have an effect. +======================= ====================================================== + +All other options that start with `jinja.` are automatically forwarded to the +environment constructor. + +In pylons for example you can use jinja as buffet plugin like this: + +Edit the `yourproject/config/middleware.py` and add this to `config.init_app`: + +.. sourcecode:: python + + config.add_template_engine('jinja', '', { + 'jinja.package': 'yourapplication', + 'jinja.package_path': 'res/templates', + 'jinja.use_memcache': True + }) + +Note that it's a good idea to set the second parameter to an empty string. +It's meant to be used as replacement for the turbogears package name but +Jinja assumes that the name of the template does not include the package +path. + +You can then render the template in the view like this: + +.. sourcecode:: python + + class ExampleController(BaseController): + + def index(self): + c.title = "Your Page" + c.message = 'hi' + return render_response('jinja', 'test_template') + + def download(self): + c.title = "Downloads" + c.downloads = [1, 2, 3] + return render_response('jinja', '/downloads.html') + +With the settings from above rendering the `index` action will result in +rendering the template ``res/templates/test_template.html`` where res is +a folder in the ``yourapplication`` python package. + +The `downloads` action uses the pylons specific leading foreslash notation. + +General Template Interface +========================== + +Because nobody implemented this specification so far it's not documented here +but in the sourcecode of the `plugin module`_. The specification itself is +explained on the pocoo trac on the `General Template Interface`_ wiki page. + +.. _Pylons: http://www.pylonshq.com/ +.. _General Template Interface: http://trac.pocoo.org/wiki/GeneralTemplateInterface +.. _plugin module: http://trac.pocoo.org/browser/jinja/trunk/jinja/plugin.py diff --git a/docs/src/i18n.txt b/docs/src/i18n.txt index 18bd86f..621cc91 100644 --- a/docs/src/i18n.txt +++ b/docs/src/i18n.txt @@ -70,3 +70,8 @@ singular form and the third is the plural form if given. Because Jinja is not bound to gettext you can now use these strings to create translation files for any translation system. + +*New in Jinja 1.1* You can now extract translations from strings according +to the current envrionment settings too by using the environment method +`get_translations_for_string` which takes a string containing a template +as only argument. The return value is the same as for `get_translations`. diff --git a/docs/src/index.txt b/docs/src/index.txt index a369274..f911449 100644 --- a/docs/src/index.txt +++ b/docs/src/index.txt @@ -30,6 +30,8 @@ Welcome in the Jinja documentation. - `Internationalization `_ + - `Alternative Syntax `_ + - `Developer Recipies `_ - Template Designer Documentation: diff --git a/jinja/datastructure.py b/jinja/datastructure.py index 432e9df..4047d72 100644 --- a/jinja/datastructure.py +++ b/jinja/datastructure.py @@ -189,13 +189,25 @@ class Context(object): self.environment = _environment_ self._stack = [_environment_.globals, dict(*args, **kwargs), {}] self.globals, self.initial, self.current = self._stack - - # translator function added by the environment rendering function - self.translate_func = None + self._translate_func = None # cache object used for filters and tests self.cache = {} + def translate_func(self): + """ + Return the translator for this context. + """ + if self._translate_func is None: + translator = self.environment.get_translator(self) + def translate(s, p=None, n=None, r=None): + if p is None: + return translator.gettext(s) % (r or {}) + return translator.ngettext(s, p, r[n]) % (r or {}) + self._translate_func = translate + return self._translate_func + translate_func = property(translate_func, doc=translate_func.__doc__) + def pop(self): """ Pop the last layer from the stack and return it. @@ -404,16 +416,55 @@ class SuperBlock(object): return '' % self.name +class StateTest(object): + """ + Wrapper class for basic lambdas in order to simplify + debugging in the parser. It also provides static helper + functions that replace some lambda expressions + """ + + def __init__(self, func, error_message): + self.func = func + self.error_message = error_message + + def __call__(self, p, t, d): + return self.func(p, t, d) + + def expect_token(token_name, error_message=None): + """Scans until a token types is found.""" + return StateTest(lambda p, t, d: t == token_name, 'expected ' + + (error_message or token_name)) + expect_token = staticmethod(expect_token) + + def expect_name(*names): + """Scans until one of the given names is found.""" + if len(names) == 1: + name = names[0] + return StateTest(lambda p, t, d: t == 'name' and d == name, + "expected '%s'" % name) + else: + return StateTest(lambda p, t, d: t == 'name' and d in names, + 'expected one of %s' % ','.join(["'%s'" % name + for name in names])) + expect_name = staticmethod(expect_name) + + class TokenStream(object): """ A token stream works like a normal generator just that it supports pushing tokens back to the stream. """ - def __init__(self, generator): + def __init__(self, generator, filename): self._next = generator.next self._pushed = [] self.last = (1, 'initial', '') + self.filename = filename + + def bound(self): + """Return True if the token stream is bound to a parser.""" + return self.parser is not None + bound = property(bound, doc=bound.__doc__) def __iter__(self): """Return self in order to mark this is iterator.""" @@ -458,7 +509,12 @@ class TokenStream(object): else: yield token except StopIteration: - raise TemplateSyntaxError('end of stream reached') + if isinstance(test, StateTest): + msg = ': ' + test.error_message + else: + msg = '' + raise TemplateSyntaxError('end of stream' + msg, + self.last[0], self.filename) def drop_until(self, test, drop_needle=False): """Fetch tokens until a function matches and drop all diff --git a/jinja/environment.py b/jinja/environment.py index 64c0d4a..e1a057c 100644 --- a/jinja/environment.py +++ b/jinja/environment.py @@ -136,8 +136,10 @@ class Environment(object): self.friendly_traceback = friendly_traceback # global namespace - self.globals = namespace is None and DEFAULT_NAMESPACE.copy() \ - or namespace + if namespace is None: + self.globals = DEFAULT_NAMESPACE.copy() + else: + self.globals = namespace # jinja 1.0 compatibility if auto_escape: @@ -227,6 +229,13 @@ class Environment(object): """ return collect_translations(self.loader.parse(name)) + def get_translations_for_string(self, string): + """ + Like `get_translations`, but the translations are loaded from a + normal string that represents the template. + """ + return collect_translations(self.parse(string)) + def apply_filters(self, value, context, filters): """ Apply a list of filters on the variable. diff --git a/jinja/filters.py b/jinja/filters.py index fccaa02..cbb9cd2 100644 --- a/jinja/filters.py +++ b/jinja/filters.py @@ -660,7 +660,7 @@ def do_capture(name='captured', clean=False): def do_striptags(value, rex=re.compile(r'<[^>]+>')): """ Strip SGML/XML tags and replace adjacent whitespace by one space. - + *new in Jinja 1.1* """ return ' '.join(rex.sub('', value).split()) @@ -800,7 +800,10 @@ def do_round(precision=0, method='common'): return round(value, precision) import math func = getattr(math, method) - return func(value * 10 * precision) / (10 * precision) + if precision: + return func(value * 10 * precision) / (10 * precision) + else: + return func(value) return wrapped diff --git a/jinja/lexer.py b/jinja/lexer.py index 335bbdc..7d488db 100644 --- a/jinja/lexer.py +++ b/jinja/lexer.py @@ -25,6 +25,7 @@ import re from jinja.datastructure import TokenStream from jinja.exceptions import TemplateSyntaxError +from weakref import WeakValueDictionary try: set @@ -35,6 +36,11 @@ except NameError: __all__ = ['Lexer', 'Failure', 'keywords'] +# cache for the lexers. Exists in order to be able to have multiple +# environments with the same lexer +_lexer_cache = WeakValueDictionary() + + # static regular expressions whitespace_re = re.compile(r'\s+(?m)') name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*') @@ -75,12 +81,42 @@ class Failure(object): raise self.error_class(self.message, lineno) +class LexerMeta(type): + """ + Metaclass for the lexer that caches instances for + the same configuration in a weak value dictionary. + """ + + def __call__(cls, environment): + key = hash((environment.block_start_string, + environment.block_end_string, + environment.variable_start_string, + environment.variable_end_string, + environment.comment_start_string, + environment.comment_end_string, + environment.trim_blocks)) + + # use the cached lexer if possible + if key in _lexer_cache: + return _lexer_cache[key] + + # create a new lexer and cache it + lexer = type.__call__(cls, environment) + _lexer_cache[key] = lexer + return lexer + + class Lexer(object): """ Class that implements a lexer for a given environment. Automatically created by the environment class, usually you don't have to do that. + + Note that the lexer is not automatically bound to an environment. + Multiple environments can share the same lexer. """ + __metaclass__ = LexerMeta + def __init__(self, environment): # shortcuts c = lambda x: re.compile(x, re.M | re.S) @@ -137,7 +173,7 @@ class Lexer(object): )), ('comment', 'comment_end'), '#pop'), (c('(.)'), (Failure('Missing end of comment tag'),), None) ], - # directives + # blocks 'block_begin': [ (c('(?:\-%s\s*|%s)%s' % ( e(environment.block_end_string), @@ -165,7 +201,7 @@ class Lexer(object): ] } - def tokenize(self, source): + def tokenize(self, source, filename=None): """ Simple tokenize function that yields ``(position, type, contents)`` tuples. Wrap the generator returned by this function in a @@ -175,13 +211,13 @@ class Lexer(object): Additionally non keywords are escaped. """ def generate(): - for lineno, token, value in self.tokeniter(source): + for lineno, token, value in self.tokeniter(source, filename): if token == 'name' and value not in keywords: value += '_' yield lineno, token, value - return TokenStream(generate()) + return TokenStream(generate(), filename) - def tokeniter(self, source): + def tokeniter(self, source, filename=None): """ This method tokenizes the text and returns the tokens in a generator. Use this method if you just want to tokenize a template. The output @@ -196,84 +232,115 @@ class Lexer(object): statetokens = self.rules['root'] source_length = len(source) + balancing_stack = [] + while True: # tokenizer loop for regex, tokens, new_state in statetokens: m = regex.match(source, pos) - if m: - # tuples support more options - if isinstance(tokens, tuple): - for idx, token in enumerate(tokens): - # hidden group - if token is None: - g = m.group(idx) - if g: - lineno += g.count('\n') - continue - # failure group - elif isinstance(token, Failure): - raise token(m.start(idx + 1)) - # bygroup is a bit more complex, in that case we - # yield for the current token the first named - # group that matched - elif token == '#bygroup': - for key, value in m.groupdict().iteritems(): - if value is not None: - yield lineno, key, value - lineno += value.count('\n') - break - else: - raise RuntimeError('%r wanted to resolve ' - 'the token dynamically' - ' but no group matched' - % regex) - # normal group - else: - data = m.group(idx + 1) - if data: - yield lineno, token, data - lineno += data.count('\n') - # strings as token just are yielded as it, but just - # if the data is not empty - else: - data = m.group() - if tokens is not None: - if data: - yield lineno, tokens, data - lineno += data.count('\n') - # fetch new position into new variable so that we can check - # if there is a internal parsing error which would result - # in an infinite loop - pos2 = m.end() - # handle state changes - if new_state is not None: - # remove the uppermost state - if new_state == '#pop': - stack.pop() - # resolve the new state by group checking - elif new_state == '#bygroup': + # if no match we try again with the next rule + if not m: + continue + + # we only match blocks and variables if brances / parentheses + # are balanced. continue parsing with the lower rule which + # is the operator rule. do this only if the end tags look + # like operators + if balancing_stack and \ + tokens in ('variable_end', 'block_end'): + continue + + # tuples support more options + if isinstance(tokens, tuple): + for idx, token in enumerate(tokens): + # hidden group + if token is None: + g = m.group(idx) + if g: + lineno += g.count('\n') + continue + # failure group + elif isinstance(token, Failure): + raise token(m.start(idx + 1)) + # bygroup is a bit more complex, in that case we + # yield for the current token the first named + # group that matched + elif token == '#bygroup': for key, value in m.groupdict().iteritems(): if value is not None: - stack.append(key) + yield lineno, key, value + lineno += value.count('\n') break else: - raise RuntimeError('%r wanted to resolve the ' - 'new state dynamically but' - ' no group matched' % - regex) - # direct state name given + raise RuntimeError('%r wanted to resolve ' + 'the token dynamically' + ' but no group matched' + % regex) + # normal group else: - stack.append(new_state) - statetokens = self.rules[stack[-1]] - # we are still at the same position and no stack change. - # this means a loop without break condition, avoid that and - # raise error - elif pos2 == pos: - raise RuntimeError('%r yielded empty string without ' - 'stack change' % regex) - # publish new function and start again - pos = pos2 - break + data = m.group(idx + 1) + if data: + yield lineno, token, data + lineno += data.count('\n') + + # strings as token just are yielded as it, but just + # if the data is not empty + else: + data = m.group() + # update brace/parentheses balance + if tokens == 'operator': + if data == '{': + balancing_stack.append('}') + elif data == '(': + balancing_stack.append(')') + elif data == '[': + balancing_stack.append(']') + elif data in ('}', ')', ']'): + if not balancing_stack or \ + balancing_stack.pop() != data: + raise TemplateSyntaxError('unexpected EOF ' + 'while lexing', + lineno, filename) + # yield items + if tokens is not None: + if data: + yield lineno, tokens, data + lineno += data.count('\n') + + # fetch new position into new variable so that we can check + # if there is a internal parsing error which would result + # in an infinite loop + pos2 = m.end() + + # handle state changes + if new_state is not None: + # remove the uppermost state + if new_state == '#pop': + stack.pop() + # resolve the new state by group checking + elif new_state == '#bygroup': + for key, value in m.groupdict().iteritems(): + if value is not None: + stack.append(key) + break + else: + raise RuntimeError('%r wanted to resolve the ' + 'new state dynamically but' + ' no group matched' % + regex) + # direct state name given + else: + stack.append(new_state) + statetokens = self.rules[stack[-1]] + # we are still at the same position and no stack change. + # this means a loop without break condition, avoid that and + # raise error + elif pos2 == pos: + raise RuntimeError('%r yielded empty string without ' + 'stack change' % regex) + # publish new function and start again + pos = pos2 + break # if loop terminated without break we havn't found a single match # either we are at the end of the file or we have a problem else: @@ -282,4 +349,5 @@ class Lexer(object): return # something went wrong raise TemplateSyntaxError('unexpected char %r at %d' % - (source[pos], pos), lineno) + (source[pos], pos), lineno, + filename) diff --git a/jinja/parser.py b/jinja/parser.py index 033b101..cb3e062 100644 --- a/jinja/parser.py +++ b/jinja/parser.py @@ -18,7 +18,7 @@ import re from compiler import ast, parse from compiler.misc import set_filename from jinja import nodes -from jinja.datastructure import TokenStream +from jinja.datastructure import TokenStream, StateTest from jinja.exceptions import TemplateSyntaxError try: set @@ -29,18 +29,20 @@ except NameError: __all__ = ['Parser'] -# callback functions for the subparse method -end_of_block = lambda p, t, d: t == 'block_end' -end_of_variable = lambda p, t, d: t == 'variable_end' -end_of_comment = lambda p, t, d: t == 'comment_end' -switch_for = lambda p, t, d: t == 'name' and d in ('else', 'endfor') -end_of_for = lambda p, t, d: t == 'name' and d == 'endfor' -switch_if = lambda p, t, d: t == 'name' and d in ('else', 'elif', 'endif') -end_of_if = lambda p, t, d: t == 'name' and d == 'endif' -end_of_filter = lambda p, t, d: t == 'name' and d == 'endfilter' -end_of_macro = lambda p, t, d: t == 'name' and d == 'endmacro' -end_of_block_tag = lambda p, t, d: t == 'name' and d == 'endblock' -end_of_trans = lambda p, t, d: t == 'name' and d == 'endtrans' +# general callback functions for the parser +end_of_block = StateTest.expect_token('block_end', 'end of block tag') +end_of_variable = StateTest.expect_token('variable_end', 'end of variable') +end_of_comment = StateTest.expect_token('comment_end', 'end of comment') + +# internal tag callbacks +switch_for = StateTest.expect_name('else', 'endfor') +end_of_for = StateTest.expect_name('endfor') +switch_if = StateTest.expect_name('else', 'elif', 'endif') +end_of_if = StateTest.expect_name('endif') +end_of_filter = StateTest.expect_name('endfilter') +end_of_macro = StateTest.expect_name('endmacro') +end_of_block_tag = StateTest.expect_name('endblock') +end_of_trans = StateTest.expect_name('endtrans') class Parser(object): @@ -54,9 +56,10 @@ class Parser(object): self.environment = environment if isinstance(source, str): source = source.decode(environment.template_charset, 'ignore') + if isinstance(filename, unicode): + filename = filename.encode('utf-8') self.source = source self.filename = filename - self.tokenstream = environment.lexer.tokenize(source) #: if this template has a parent template it's stored here #: after parsing @@ -64,6 +67,7 @@ class Parser(object): #: set for blocks in order to keep them unique self.blocks = set() + #: mapping of directives that require special treatment self.directives = { 'raw': self.handle_raw_directive, 'for': self.handle_for_directive, @@ -79,6 +83,15 @@ class Parser(object): 'trans': self.handle_trans_directive } + #: set of directives that are only available in a certain + #: context. + self.context_directives = set(['elif', 'else', 'endblock', + 'endfilter', 'endfor', 'endif', 'endmacro', 'endraw', + 'endtrans', 'pluralize' + ]) + + self.tokenstream = environment.lexer.tokenize(source, filename) + def handle_raw_directive(self, lineno, gen): """ Handle fake raw directive. (real raw directives are handled by @@ -343,19 +356,23 @@ class Parser(object): replacements = {} for arg in self.parse_python(lineno, gen, '_trans(%s)').expr.args: - if arg.__class__ is not ast.Keyword: + if arg.__class__ not in (ast.Keyword, ast.Name): raise TemplateSyntaxError('translation tags need expl' 'icit names for values.', lineno, self.filename) # disallow keywords if not arg.name.endswith('_'): - raise TemplateSyntaxError('illegal use of keyword %r ' - 'as identifier.' % arg.name, - lineno, self.filename) + raise TemplateSyntaxError("illegal use of keyword '%s" + '\' as identifier.' % + arg.name, lineno, + self.filename) # remove the last "_" before writing + name = arg.name[:-1] if first_var is None: - first_var = arg.name[:-1] - replacements[arg.name[:-1]] = arg.expr + first_var = name + # if it's a keyword use the expression as value, + # otherwise just reuse the name node. + replacements[name] = getattr(arg, 'expr', arg) # look for endtrans/pluralize buf = singular = [] @@ -378,13 +395,14 @@ class Parser(object): # plural name without trailing "_"? that's a keyword if not variable_name.endswith('_'): raise TemplateSyntaxError('illegal use of keyword ' - '%r as identifier in trans ' - 'block.' % variable_name, + "'%s' as identifier in " + 'trans block.' % + variable_name, lineno, self.filename) variable_name = variable_name[:-1] if variable_name not in replacements: raise TemplateSyntaxError('unregistered translation ' - 'variable %r.' % + "variable '%s'." % variable_name, lineno, self.filename) if self.tokenstream.next()[1] != 'variable_end': @@ -399,9 +417,36 @@ class Parser(object): _, block_token, block_name = self.tokenstream.next() if block_token != 'name' or \ block_name not in ('pluralize', 'endtrans'): - raise TemplateSyntaxError('blocks in translatable ' - 'sections are not ' - 'supported', lineno, + # if we have a block name check if it's a real + # directive or a not existing one (which probably + # is a typo) + if block_token == 'name': + # if this block is a context directive the + # designer has probably misspelled endtrans + # with endfor or something like that. raise + # a nicer error message + if block_name in self.context_directives: + raise TemplateSyntaxError('unexpected directi' + "ve '%s' found" % + block_name, lineno, + self.filename) + # if's not a directive, probably misspelled + # endtrans. Raise the "unknown directive" + # exception rather than the "not allowed" + if block_name not in self.directives: + if block_name.endswith('_'): + block_name = block_name[:-1] + raise TemplateSyntaxError('unknown directive' + "'%s'" % block_name, + lineno, + self.filename) + # if it's indeed a known directive we better + # raise an exception informing the user about + # the fact that we don't support blocks in + # translatable sections. + raise TemplateSyntaxError('directives in translatable' + ' sections are not ' + 'allowed', lineno, self.filename) # pluralize if block_name == 'pluralize': @@ -417,7 +462,7 @@ class Parser(object): # disallow keywords if not plural_name.endswith('_'): raise TemplateSyntaxError('illegal use of ' - 'keyword %r as ' + "keyword '%s' as " 'identifier.' % plural_name, lineno, @@ -426,7 +471,7 @@ class Parser(object): if plural_name not in replacements: raise TemplateSyntaxError('unregistered ' 'translation ' - 'variable %r' % + "variable '%s'" % plural_name, lineno, self.filename) elif self.tokenstream.next()[1] != 'block_end': @@ -487,9 +532,10 @@ class Parser(object): try: ast = parse(source, 'exec') except SyntaxError, e: - raise TemplateSyntaxError('invalid syntax', lineno + e.lineno, + raise TemplateSyntaxError('invalid syntax in expression', + lineno + (e.lineno or 0), self.filename) - assert len(ast.node.nodes) == 1, 'get %d nodes, 1 expected' %\ + assert len(ast.node.nodes) == 1, 'get %d nodes, 1 expected' % \ len(ast.node.nodes) result = ast.node.nodes[0] nodes.inc_lineno(lineno, result) @@ -508,7 +554,7 @@ class Parser(object): # all names excluding keywords have an trailing underline. # if we find a name without trailing underline that's a keyword # and this code raises an error. else strip the underline again - if node.__class__ in (ast.AssName, ast.Name): + if node.__class__ in (ast.AssName, ast.Name, ast.Keyword): if not node.name.endswith('_'): raise TemplateSyntaxError('illegal use of keyword %r ' 'as identifier.' % node.name, @@ -586,9 +632,8 @@ class Parser(object): # template syntax error. if data in self.directives: node = self.directives[data](lineno, gen) - # directive or endtag found, give a proper error message - elif data in self.directives or \ - not data.endswith('_') and data.startswith('end'): + # context depending directive found + elif data in self.context_directives: raise TemplateSyntaxError('unexpected directive %r' % str(data), lineno, self.filename) @@ -617,8 +662,14 @@ class Parser(object): # still here and a test function is provided? raise and error if test is not None: - raise TemplateSyntaxError('unexpected end of template', lineno, - self.filename) + # if the callback is a state test lambda wrapper we + # can use the `error_message` property to get the error + if isinstance(test, StateTest): + msg = ': ' + test.error_message + else: + msg = '' + raise TemplateSyntaxError('unexpected end of template' + msg, + lineno, self.filename) return finish() def close_remaining_block(self): @@ -628,12 +679,14 @@ class Parser(object): the stream. If the next token isn't the block end we throw an error. """ - lineno = self.tokenstream.last[0] + lineno, _, tagname = self.tokenstream.last try: lineno, token, data = self.tokenstream.next() except StopIteration: raise TemplateSyntaxError('missing closing tag', lineno, self.filename) if token != 'block_end': - raise TemplateSyntaxError('expected close tag, found %r' % token, - lineno, self.filename) + print token, data, list(self.tokenstream) + raise TemplateSyntaxError('expected empty %s-directive but ' + 'found additional arguments.' % + tagname, lineno, self.filename) diff --git a/jinja/plugin.py b/jinja/plugin.py index 6ee2f10..d05c5f8 100644 --- a/jinja/plugin.py +++ b/jinja/plugin.py @@ -3,7 +3,11 @@ jinja.plugin ~~~~~~~~~~~~ - Support for the `GeneralTemplateInterface`__. + Support for the `GeneralTemplateInterface`__ and the Buffet interface. + + Do not use this module on your own. We don't recommend those interfaces! + If you are able to, you should really use Jinja without those abstraction + layers. __ http://trac.pocoo.org/wiki/GeneralTemplateInterface @@ -15,6 +19,61 @@ from jinja.loaders import FunctionLoader, FileSystemLoader, PackageLoader from jinja.exceptions import TemplateNotFound +class BuffetPlugin(object): + """ + Implements the Jinja buffet plugin. Well. It works for pylons and should + work for TurboGears too if their plugin system would work. + """ + + def __init__(self, extra_vars_func=None, options=None): + if 'jinja.environment' in options: + self.env = options['jinja.environment'] + else: + opt = {} + for key, value in options.iteritems(): + if key.startswith('jinja.') and key != 'jinja.extension': + opt[key[6:]] = value + loader_func = opt.pop('loader_func', None) + getmtime_func = opt.pop('getmtime_func', None) + use_memcache = opt.pop('use_memcache', False) + memcache_size = opt.pop('memcache_size', 40) + cache_folder = opt.pop('cache_folder', None) + auto_reload = opt.pop('auto_reload', True) + if 'searchpath' in options: + opt['loader'] = FileSystemLoader(opt.pop('searchpath'), + use_memcache, memcache_size, + cache_folder, auto_reload) + elif 'package' in options: + opt['loader'] = PackageLoader(opt.pop('package'), + opt.pop('package_path', ''), + use_memcache, memcache_size, + cache_folder, auto_reload) + elif loader_func is not None: + opt['loader'] = FunctionLoader(loader_func, getmtime_func, + use_memcache, memcache_size, + cache_folder, auto_reload) + self.env = Environment(**opt) + + self.extra_vars_func = extra_vars_func + self.extension = options.pop('jinja.extension', 'html') + + def load_template(self, templatename, template_string=None): + if template_string is not None: + return self.env.from_string(template_string) + if templatename.startswith('!'): + jinja_name = templatename[1:] + else: + jinja_name = templatename.replace('.', '/') + '.' + self.extension + return self.env.get_template(jinja_name) + + def render(self, info, format='html', fragment=False, template=None): + if isinstance(template, basestring): + template = self.load_template(template) + if self.extra_vars_func: + info.update(self.extra_vars_func()) + return template.render(info) + + def jinja_plugin_factory(options): """ Basic implementation of the `GeneralTemplateInterface`. diff --git a/jinja/tests.py b/jinja/tests.py index ae293d4..6960a04 100644 --- a/jinja/tests.py +++ b/jinja/tests.py @@ -112,6 +112,7 @@ def test_matching(regex): return regex.search(value) is not None return wrapped + TESTS = { 'odd': test_odd, 'even': test_even, diff --git a/jinja/translators/python.py b/jinja/translators/python.py index cfe4d34..4072f10 100644 --- a/jinja/translators/python.py +++ b/jinja/translators/python.py @@ -314,12 +314,10 @@ class PythonTranslator(Translator): def reset(self): """ - Reset translation variables such as indention, cycle id - or the require_translations flag. + Reset translation variables such as indention or cycle id """ self.indention = 0 self.last_cycle_id = 0 - self.require_translations = False def translate(self): """ @@ -412,17 +410,6 @@ class PythonTranslator(Translator): # the template body body_lines.extend([self.handle_node(n) for n in node]) - # add translation helpers if required - if self.require_translations: - lines.append( - ' translator = environment.get_translator(context)\n' - ' def translate(s, p=None, n=None, r=None):\n' - ' if p is None:\n' - ' return translator.gettext(s) % (r or {})\n' - ' return translator.ngettext(s, p, r[n]) % (r or {})\n' - ' context.translate_func = translate' - ) - # add body lines and "generator hook" lines.extend(body_lines) lines.append(' if False:\n yield None') @@ -492,9 +479,13 @@ class PythonTranslator(Translator): """ In some situations we might have a node list. It's just a collection of multiple statements. + + If the nodelist was empty it will return an empty string """ - return '\n'.join([self.indent(self.nodeinfo(node))] + - [self.handle_node(n) for n in node]) + body = '\n'.join([self.handle_node(n) for n in node]) + if body: + return self.indent(self.nodeinfo(node)) + '\n' + body + return '' def handle_for_loop(self, node): """ @@ -570,13 +561,13 @@ class PythonTranslator(Translator): )) self.indention += 1 write(self.nodeinfo(body)) - buf.append(self.handle_node(body)) + buf.append(self.handle_node(body) or self.indent('pass')) self.indention -= 1 if node.else_ is not None: write('else:') self.indention += 1 write(self.nodeinfo(node.else_)) - buf.append(self.handle_node(node.else_)) + buf.append(self.handle_node(node.else_) or self.indent('pass')) self.indention -= 1 return '\n'.join(buf) @@ -739,7 +730,6 @@ class PythonTranslator(Translator): """ Handle translations. """ - self.require_translations = True if node.replacements: replacements = [] for name, n in node.replacements.iteritems(): @@ -777,7 +767,6 @@ class PythonTranslator(Translator): if node.name in self.constants: return self.constants[node.name] elif node.name == '_': - self.require_translations = True return 'context.translate_func' return 'context[%r]' % node.name diff --git a/setup.py b/setup.py index 631bdb2..4943932 100644 --- a/setup.py +++ b/setup.py @@ -47,5 +47,9 @@ setup( ('docs/txt', list(list_files('docs/src'))) ], platforms = 'any', + entry_points=''' + [python.templating.engines] + jinja = jinja.plugin:BuffetPlugin + ''', extras_require = {'plugin': ['setuptools>=0.6a2']} ) diff --git a/tests/test_filters.py b/tests/test_filters.py index 83dfa82..ba3bfba 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -22,7 +22,10 @@ DEFAULT = '''{{ missing|default("no") }}|{{ false|default('no') }}|\ DICTSORT = '''{{ foo|dictsort }}|\ {{ foo|dictsort(true) }}|\ {{ foo|dictsort(false, 'value') }}''' +BATCH = '''{{ foo|batch(3) }}|{{ foo|batch(3, 'X') }}''' +SLICE = '''{{ foo|slice(3) }}|{{ foo|slice(3, 'X') }}''' ESCAPE = '''{{ '<">&'|escape }}|{{ '<">&'|escape(true) }}''' +STRIPTAGS = '''{{ foo|striptags }}''' FILESIZEFORMAT = '{{ 100|filesizeformat }}|\ {{ 1000|filesizeformat }}|\ {{ 1000000|filesizeformat }}|\ @@ -52,6 +55,12 @@ URLIZE = '''{{ "foo http://www.example.com/ bar"|urlize }}''' WORDCOUNT = '''{{ "foo bar baz"|wordcount }}''' BLOCK = '''{% filter lower|escape %}{% endfilter %}''' CHAINING = '''{{ ['', '']|first|upper|escape }}''' +SUM = '''{{ [1, 2, 3, 4, 5, 6]|sum }}''' +ABS = '''{{ -1|abs }}|{{ 1|abs }}''' +ROUND = '''{{ 2.7|round }}|{{ 2.1|round }}|\ +{{ 2.1234|round(2, 'floor') }}|{{ 2.1|round(0, 'ceil') }}''' +XMLATTR = '''{{ {'foo': 42, 'bar': 23, 'fish': none, +'spam': missing, 'blub:blub': ''}|xmlattr }}''' def test_capitalize(env): @@ -82,12 +91,33 @@ def test_dictsort(env): "[('a', 0), ('b', 1), ('c', 2), ('A', 3)]") +def test_batch(env): + tmpl = env.from_string(BATCH) + out = tmpl.render(foo=range(10)) + assert out == ("[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|" + "[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]") + + +def test_slice(env): + tmpl = env.from_string(SLICE) + out = tmpl.render(foo=range(10)) + assert out == ("[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|" + "[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]") + + def test_escape(env): tmpl = env.from_string(ESCAPE) out = tmpl.render() assert out == '<">&|<">&' +def test_striptags(env): + tmpl = env.from_string(STRIPTAGS) + out = tmpl.render(foo='

just a small \n ' + 'example link

\n

to a webpage

') + assert out == 'just a small example link to a webpage' + + def test_filesizeformat(env): tmpl = env.from_string(FILESIZEFORMAT) out = tmpl.render() @@ -220,3 +250,27 @@ def test_block(env): def test_chaining(env): tmpl = env.from_string(CHAINING) assert tmpl.render() == '<FOO>' + + +def test_sum(env): + tmpl = env.from_string(SUM) + assert tmpl.render() == '21' + + +def test_abs(env): + tmpl = env.from_string(ABS) + return tmpl.render() == '1|1' + + +def test_round(env): + tmpl = env.from_string(ROUND) + return tmpl.render() == '3.0|2.0|2.1|3.0' + + +def test_xmlattr(env): + tmpl = env.from_string(XMLATTR) + out = tmpl.render().split() + assert len(out) == 3 + assert 'foo="42"' in out + assert 'bar="23"' in out + assert 'blub:blub="<?>"' in out diff --git a/tests/test_ifcondition.py b/tests/test_ifcondition.py index 21072fc..12add62 100644 --- a/tests/test_ifcondition.py +++ b/tests/test_ifcondition.py @@ -10,6 +10,7 @@ SIMPLE = '''{% if true %}...{% endif %}''' ELIF = '''{% if false %}XXX{% elif true %}...{% else %}XXX{% endif %}''' ELSE = '''{% if false %}XXX{% else %}...{% endif %}''' +EMPTY = '''[{% if true %}{% else %}{% endif %}]''' def test_simple(env): @@ -25,3 +26,8 @@ def test_elif(env): def test_else(env): tmpl = env.from_string(ELSE) assert tmpl.render() == '...' + + +def test_empty(env): + tmpl = env.from_string(EMPTY) + assert tmpl.render() == '[]' diff --git a/tests/test_inheritance.py b/tests/test_inheritance.py index 96a1065..74d5181 100644 --- a/tests/test_inheritance.py +++ b/tests/test_inheritance.py @@ -6,7 +6,7 @@ :copyright: 2007 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ - +from jinja import Environment, DictLoader LAYOUTTEMPLATE = '''\ |{% block block1 %}block 1 from layout{% endblock %} @@ -35,7 +35,6 @@ LEVEL4TEMPLATE = '''\ {% block block3 %}block 3 from level4{% endblock %} ''' - def test_layout(env): tmpl = env.get_template('layout') assert tmpl.render() == ('|block 1 from layout|block 2 from ' @@ -64,3 +63,17 @@ def test_level4(env): tmpl = env.get_template('level4') assert tmpl.render() == ('|block 1 from level1|block 5 from ' 'level3|block 3 from level4|') + + +def test_super(): + env = Environment(loader=DictLoader({ + 'a': '{% block intro %}INTRO{% endblock %}|' + 'BEFORE|{% block data %}INNER{% endblock %}|AFTER', + 'b': '{% extends "a" %}{% block data %}({{ ' + 'super() }}){% endblock %}', + 'c': '{% extends "b" %}{% block intro %}--{{ ' + 'super() }}--{% endblock %}\n{% block data ' + '%}[{{ super() }}]{% endblock %}' + })) + tmpl = env.get_template('c') + assert tmpl.render() == '--INTRO--|BEFORE|[(INNER)]|AFTER' diff --git a/tests/test_lexer.py b/tests/test_lexer.py new file mode 100644 index 0000000..6ac49d4 --- /dev/null +++ b/tests/test_lexer.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +""" + unit test for the lexer + ~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +BALANCING = '''{% for item in seq %}${{'foo': item}|upper}{% endfor %}''' +COMMENTS = '''\ +
    + +
  • {item}
  • + +
''' + + +def test_balancing(): + from jinja import Environment + env = Environment('{%', '%}', '${', '}') + tmpl = env.from_string(BALANCING) + assert tmpl.render(seq=range(3)) == "{'FOO': 0}{'FOO': 1}{'FOO': 2}" + + +def test_comments(): + from jinja import Environment + env = Environment('', '{', '}') + tmpl = env.from_string(COMMENTS) + assert tmpl.render(seq=range(3)) == ("
    \n
  • 0
  • \n " + "
  • 1
  • \n
  • 2
  • \n
") -- 2.26.2