variable_end_string='}}',
comment_start_string='{#',
comment_end_string='#}',
+ line_statement_prefix=None,
trim_blocks=False,
- template_charset='utf-8',
loader=None):
"""Here the possible initialization parameters:
comment. defaults to ``'{#'``.
`comment_end_string` the string marking the end of a comment.
defaults to ``'#}'``.
+ `line_statement_prefix` If given and a string, this will be used as
+ prefix for line based statements. See the
+ documentation for more details.
`trim_blocks` If this is set to ``True`` the first newline
after a block is removed (block, not
variable tag!). Defaults to ``False``.
- `template_charset` the charset of the templates.
`loader` the loader which should be used.
========================= ============================================
"""
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
+ self.line_statement_prefix = line_statement_prefix
self.trim_blocks = trim_blocks
- self.template_charset = template_charset
# defaults
self.filters = DEFAULT_FILTERS.copy()
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
+ environment.line_statement_prefix,
environment.trim_blocks))
# use the cached lexer if possible
# lexing rules for tags
tag_rules = [
- (eol_re, 'eol', None),
(whitespace_re, None, None),
(float_re, 'float', None),
(integer_re, 'integer', None),
if not self.no_variable_block:
root_tag_rules.append(('variable',
environment.variable_start_string))
- root_tag_rules.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
+ root_tag_rules.sort(key=lambda x: len(x[1]))
+
+ # now escape the rules. This is done here so that the escape
+ # signs don't count for the lengths of the tags.
+ root_tag_rules = [(a, e(b)) for a, b in root_tag_rules]
+
+ # if we have a line statement prefix we need an extra rule for
+ # that. We add this rule *after* all the others.
+ if environment.line_statement_prefix is not None:
+ prefix = e(environment.line_statement_prefix)
+ root_tag_rules.insert(0, ('linestatement', '^\s*' + prefix))
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
e(environment.block_start_string),
e(environment.block_end_string)
)] + [
- '(?P<%s_begin>\s*%s\-|%s)' % (n, e(r), e(r))
+ '(?P<%s_begin>\s*%s\-|%s)' % (n, r, r)
for n, r in root_tag_rules
])), ('data', '#bygroup'), '#bygroup'),
# data
)), 'variable_end', '#pop')
] + tag_rules
+ # the same goes for the line_statement_prefix
+ if environment.line_statement_prefix is not None:
+ self.rules['linestatement_begin'] = [
+ (c(r'\s*(\n|$)'), 'linestatement_end', '#pop')
+ ] + tag_rules
+
def tokenize(self, source, filename=None):
"""
Works like `tokeniter` but returns a tokenstream of tokens and not a
already keyword tokens, not named tokens, comments are removed,
integers and floats converted, strings unescaped etc.
"""
+ source = unicode(source)
def generate():
for lineno, token, value in self.tokeniter(source, filename):
if token in ('comment_begin', 'comment', 'comment_end'):
continue
+ elif token == 'linestatement_begin':
+ token = 'block_begin'
+ elif token == 'linestatement_end':
+ token = 'block_end'
elif token == 'data':
try:
value = str(value)