def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
- Returns a dict of instanciated environments.
+ Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
- # - spontaneus environments (i18n extension and Template)
+ # - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
return stream
def _generate(self, source, name, filename, defer_init=False):
- """Internal hook that can be overriden to hook a different generate
+ """Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
- """Internal hook that can be overriden to hook a different compile
+ """Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
- before writing specifiy an `encoding`.
+ before writing specify an `encoding`.
Example usage::
(operator_re, TOKEN_OPERATOR, None)
]
- # assamble the root lexing rule. because "|" is ungreedy
+ # assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
}
def _normalize_newlines(self, value):
- """Called for strings and template data to normlize it to unicode."""
+ """Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
if m is None:
continue
- # we only match blocks and variables if brances / parentheses
+ # we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
# publish new function and start again
pos = pos2
break
- # if loop terminated without break we havn't found a single match
+ # if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
- 'identifiers and may not contain hypens, use an '
+ 'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)