(string_re, 'string', None)
]
+ #: if variables and blocks have the same delimiters we won't
+ #: receive any variable blocks in the parser. This variable is `True`
+ #: if we need that.
+ self.no_variable_block = (
+ (environment.variable_start_string is
+ environment.variable_end_string is None) or
+ (environment.variable_start_string ==
+ environment.block_start_string and
+ environment.variable_end_string ==
+ environment.block_end_string)
+ )
+
# assamble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
+ # variables are just part of the rules if variable processing
+ # is required.
root_tag_rules = [
('comment', environment.comment_start_string),
- ('block', environment.block_start_string),
- ('variable', environment.variable_start_string)
+ ('block', environment.block_start_string)
]
+ if not self.no_variable_block:
+ root_tag_rules.append(('variable',
+ environment.variable_start_string))
root_tag_rules.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
# block suffix if trimming is enabled
block_suffix_re
)), 'block_end', '#pop'),
] + tag_rules,
- # variables
- 'variable_begin': [
- (c('\-%s\s*|%s' % (
- e(environment.variable_end_string),
- e(environment.variable_end_string)
- )), 'variable_end', '#pop')
- ] + tag_rules,
# raw block
'raw': [
(c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
]
}
+ # only add the variable rules to the list if we process variables
+ # the variable_end_string variable could be None and break things.
+ if not self.no_variable_block:
+ self.rules['variable_begin'] = [
+ (c('\-%s\s*|%s' % (
+ e(environment.variable_end_string),
+ e(environment.variable_end_string)
+ )), 'variable_end', '#pop')
+ ] + tag_rules
+
def tokenize(self, source, filename=None):
"""
Simple tokenize function that yields ``(position, type, contents)``
'endmacro', 'endraw', 'endtrans', 'pluralize'
])
+ #: get the `no_variable_block` flag
+ self.no_variable_block = self.environment.lexer.no_variable_block
+
self.tokenstream = environment.lexer.tokenize(source, filename)
def handle_raw_directive(self, lineno, gen):
"""
Handle translatable sections.
"""
+ def process_variable(lineno, token, name):
+ if token != 'name':
+ raise TemplateSyntaxError('can only use variable not '
+ 'constants or expressions '
+ 'in translation variable '
+ 'blocks.', lineno,
+ self.filename)
+ # plural name without trailing "_"? that's a keyword
+ if not name.endswith('_'):
+ raise TemplateSyntaxError('illegal use of keyword '
+ "'%s' as identifier in "
+ 'trans block.' %
+ name, lineno, self.filename)
+ name = name[:-1]
+ if name not in replacements:
+ raise TemplateSyntaxError('unregistered translation '
+ "variable '%s'." %
+ name, lineno,
+ self.filename)
+ # check that we don't have an expression here, thus the
+ # next token *must* be a variable_end token (or a
+ # block_end token when in no_variable_block mode)
+ next_token = self.tokenstream.next()[1]
+ if next_token != 'variable_end' and not \
+ (self.no_variable_block and next_token == 'block_end'):
+ raise TemplateSyntaxError('you cannot use variable '
+ 'expressions inside trans '
+ 'tags. apply filters '
+ 'in the trans header.',
+ lineno, self.filename)
+ buf.append('%%(%s)s' % name)
+
# save the initial line number for the resulting node
flineno = lineno
try:
self.tokenstream.drop_until(end_of_comment, True)
# nested variables
elif token == 'variable_begin':
- _, variable_token, variable_name = self.tokenstream.next()
- if variable_token != 'name':
- raise TemplateSyntaxError('can only use variable not '
- 'constants or expressions '
- 'in translation variable '
- 'blocks.', lineno,
- self.filename)
- # plural name without trailing "_"? that's a keyword
- if not variable_name.endswith('_'):
- raise TemplateSyntaxError('illegal use of keyword '
- "'%s' as identifier in "
- 'trans block.' %
- variable_name,
- lineno, self.filename)
- variable_name = variable_name[:-1]
- if variable_name not in replacements:
- raise TemplateSyntaxError('unregistered translation '
- "variable '%s'." %
- variable_name, lineno,
- self.filename)
- if self.tokenstream.next()[1] != 'variable_end':
- raise TemplateSyntaxError('you cannot use variable '
- 'expressions inside trans '
- 'tags. apply filters '
- 'in the trans header.',
- lineno, self.filename)
- buf.append('%%(%s)s' % variable_name)
+ process_variable(*self.tokenstream.next())
# nested blocks are not supported, just look for end blocks
elif token == 'block_begin':
_, block_token, block_name = self.tokenstream.next()
# exception rather than the "not allowed"
if block_name not in self.directives:
if block_name.endswith('_'):
+ # if we don't have a variable block we
+ # have to process this as variable.
+ if self.no_variable_block:
+ process_variable(_, block_token,
+ block_name)
+ continue
block_name = block_name[:-1]
raise TemplateSyntaxError('unknown directive'
"'%s'" % block_name,
lineno,
self.filename)
+ # we have something different and are in the
+ # special no_variable_block mode. process this
+ # as variable
+ elif self.no_variable_block:
+ process_variable(_, block_token, block_name)
+ continue
# if it's indeed a known directive we better
# raise an exception informing the user about
# the fact that we don't support blocks in
# clear the buffer
del data_buffer[:]
+ def process_variable(gen):
+ data_buffer.append(('variable', lineno, tuple(gen)))
+
lineno = self.tokenstream.last[0]
result = nodes.NodeList(lineno)
data_buffer = []
# parse everything till the end of it.
elif token == 'variable_begin':
gen = self.tokenstream.fetch_until(end_of_variable, True)
- data_buffer.append(('variable', lineno, tuple(gen)))
+ process_variable(gen)
# this token marks the start of a block. like for variables
# just parse everything until the end of the block
if data_buffer:
flush_data_buffer()
+ node = None
gen = self.tokenstream.fetch_until(end_of_block, True)
try:
lineno, token, data = gen.next()
# first token *must* be a name token
if token != 'name':
- raise TemplateSyntaxError('unexpected %r token (%r)' % (
- token, data), lineno,
- self.filename)
+ # well, not always. if we have a lexer without variable
+ # blocks configured we process these tokens as variable
+ # block.
+ if self.no_variable_block:
+ process_variable([(lineno, token, data)] + list(gen))
+ else:
+ raise TemplateSyntaxError('unexpected %r token (%r)' %
+ (token, data), lineno,
+ self.filename)
# if a test function is passed to subparse we check if we
# reached the end of such a requested block.
self.filename)
# keyword or unknown name with trailing slash
else:
+ # non name token in no_variable_block mode.
+ if token != 'name' and self.no_variable_block:
+ process_variable([(lineno, token, data)] +
+ list(gen))
+ continue
if data.endswith('_'):
+ # it was a non keyword identifier and we have
+ # no variable tag. sounds like we should process
+ # this as variable tag
+ if self.no_variable_block:
+ process_variable([(lineno, token, data)] +
+ list(gen))
+ continue
+ # otherwise strip the trailing underscore for the
+ # exception that is raised
data = data[:-1]
raise TemplateSyntaxError('unknown directive %r' %
str(data), lineno,