print template.render(the='variables', go='here')
+Unicode
+-------
+
+Jinja2 is using unicode internally which means that you have to pass unicode
+objects to the render function or bytestrings that only consist of ASCII
+characters. Additionally newlines are normalized to one end of line
+sequence which is per default UNIX style (``\n``).
+
+
High Level API
--------------
.. autoexception:: jinja2.exceptions.TemplateSyntaxError
+ .. attribute:: message
+
+ The error message as utf-8 bytestring.
+
+ .. attribute:: lineno
+
+ The line number where the error occurred
+
+ .. attribute:: name
+
+ The load name for the template as unicode string.
+
+ .. attribute:: filename
+
+ The filename that loaded the template as bytestring in the encoding
+ of the file system (most likely utf-8 or mbcs on Windows systems).
+
+ The reason why the filename and error message are bytestrings and not
+ unicode strings is that Python 2.x is not using unicode for exceptions
+ and tracebacks as well as the compiler. This will change with Python 3.
+
.. autoexception:: jinja2.exceptions.TemplateAssertionError
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
- node.freeze()
generator = CodeGenerator(environment, name, filename, stream)
generator.visit(node)
if stream is None:
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
+ assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
+ 'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
- """The core component of Jinja is the `Environment`. It contains
+ r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
+ `newline_sequence`
+ The sequence that starts a newline. Must be one of ``'\r'``,
+ ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
+ useful default for Linux and OS X systems as well as web
+ applications.
+
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
trim_blocks=False,
+ newline_sequence='\n',
extensions=(),
optimized=True,
undefined=Undefined,
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.trim_blocks = trim_blocks
+ self.newline_sequence = newline_sequence
# runtime information
self.undefined = undefined
comment_end_string='#}',
line_statement_prefix=None,
trim_blocks=False,
+ newline_sequence='\n',
extensions=(),
optimized=True,
undefined=Undefined,
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
- line_statement_prefix, trim_blocks, tuple(extensions), optimized,
- undefined, finalize, autoescape, None, 0, False)
+ line_statement_prefix, trim_blocks, newline_sequence,
+ frozenset(extensions), optimized, undefined, finalize,
+ autoescape, None, 0, False)
return env.from_string(source, template_class=cls)
@classmethod
without arguments but it will evaluate the template every call
rather then caching the template. It's also possible to provide
a dict which is then used as context. The arguments are the same
- as fo the :meth:`new_context` method.
+ as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared))
"""Baseclass for all template errors."""
-class UndefinedError(TemplateError):
- """Raised if a template tries to operate on :class:`Undefined`."""
-
-
-class SecurityError(TemplateError):
- """Raised if a template tries to do something insecure if the
- sandbox is enabled.
- """
-
-
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
def __init__(self, message, lineno, name=None, filename=None):
if name is not None:
- extra = '%s, line %d' % (name, lineno)
+ extra = '%s, line %d' % (name.encode('utf-8'), lineno)
else:
extra = 'line %d' % lineno
+ # if the message was provided as unicode we have to encode it
+ # to utf-8 explicitly
+ if isinstance(message, unicode):
+ message = message.encode('utf-8')
+ # otherwise make sure it's a in fact valid utf-8
+ else:
+ message = message.decode('utf-8', 'ignore').encode('utf-8')
TemplateError.__init__(self, '%s (%s)' % (message, extra))
self.message = message
self.lineno = lineno
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
- by a syntax error.
+ by a syntax error. However it's a direct subclass of
+ :exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
- """A runtime error."""
+ """A generic runtime error in the template engine. Under some situations
+ Jinja may raise this exception.
+ """
+
+
+class UndefinedError(TemplateRuntimeError):
+ """Raised if a template tries to operate on :class:`Undefined`."""
+
+
+class SecurityError(TemplateRuntimeError):
+ """Raised if a template tries to do something insecure if the
+ sandbox is enabled.
+ """
-class FilterArgumentError(Exception):
+class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
options.get('comment_end_string', '#}'),
options.get('line_statement_prefix') or None,
options.get('trim_blocks', '').lower() in ('1', 'on', 'yes', 'true'),
- tuple(extensions),
+ '\n', frozenset(extensions),
# fill with defaults so that environments are shared
# with other spontaneus environments. The rest of the
# arguments are optimizer, undefined, finalize, autoescape,
integer_re = re.compile(r'\d+')
name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
float_re = re.compile(r'\d+\.\d+')
+newline_re = re.compile(r'(\r\n|\r|\n)')
# bind operators to token types
operators = {
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
- environment.trim_blocks)
+ environment.trim_blocks,
+ environment.newline_sequence)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = type.__call__(cls, environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
+ self.newline_sequence = environment.newline_sequence
+
# global lexing rules
self.rules = {
'root': [
] + tag_rules
}
+ def _normalize_newlines(self, value):
+ """Called for strings and template data to normlize it to unicode."""
+ return newline_re.sub(self.newline_sequence, value)
+
def tokenize(self, source, name=None, filename=None):
"""Works like `tokeniter` but returns a tokenstream of tokens and not
a generator or token tuples. Additionally all token values are already
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
- try:
- value = str(value)
- except UnicodeError:
- pass
+ value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
elif token == 'string':
# try to unescape string
try:
- value = value[1:-1] \
+ value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception, e:
wants. The parser uses the `tokenize` function with returns a
`TokenStream` and postprocessed tokens.
"""
- source = u'\n'.join(unicode(source).splitlines())
+ source = '\n'.join(unicode(source).splitlines())
pos = 0
lineno = 1
stack = ['root']
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
-
- Nodes can be frozen which makes them hashable. The compiler freezes the
- nodes automatically. Modifications on frozen nodes are possible but not
- allowed.
"""
__metaclass__ = NodeType
fields = ()
attributes = ('lineno', 'environment')
abstract = True
- frozen = False
def __init__(self, *fields, **attributes):
if self.abstract:
todo.extend(node.iter_child_nodes())
return self
- def freeze(self):
- """Freeze the complete node tree which makes them hashable.
- This happens automatically on compilation. Frozen nodes must not be
- modified any further. Extensions may not freeze nodes that appear
- in the final node tree (ie: nodes that are returned from the extension
- parse method).
- """
- todo = deque([self])
- while todo:
- node = todo.popleft()
- node.frozen = True
- todo.extend(node.iter_child_nodes())
-
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
- def __hash__(self):
- if not self.frozen:
- raise TypeError('unfrozen nodes are unhashable')
- return hash(tuple(self.iter_fields()))
-
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,