From db7985dead0c12df0fbda41e7ccda37fbd09be18 Mon Sep 17 00:00:00 2001 From: Armin Ronacher Date: Tue, 31 Mar 2009 23:51:56 +0200 Subject: [PATCH] Fixed bug in line-based comments with priority. --HG-- branch : trunk --- jinja2/lexer.py | 20 ++++++++++++-------- tests/test_parser.py | 18 +++++++++++++++--- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/jinja2/lexer.py b/jinja2/lexer.py index 10cfd63..8f63932 100644 --- a/jinja2/lexer.py +++ b/jinja2/lexer.py @@ -124,6 +124,8 @@ ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT, TOKEN_COMMENT_END, TOKEN_WHITESPACE, TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT]) +ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA, + TOKEN_COMMENT, TOKEN_LINECOMMENT]) def count_newlines(value): @@ -147,10 +149,10 @@ def compile_rules(environment): if environment.line_statement_prefix is not None: rules.append((len(environment.line_statement_prefix), 'linestatement', - '^\\s*' + e(environment.line_statement_prefix))) + r'^\s*' + e(environment.line_statement_prefix))) if environment.line_comment_prefix is not None: rules.append((len(environment.line_comment_prefix), 'linecomment', - '\\s*' + e(environment.line_comment_prefix))) + r'(?:^|(?(?:\s*%s\-|%s)\s*raw\s*%s)' % ( + [r'(?P(?:\s*%s\-|%s)\s*raw\s*%s)' % ( e(environment.block_start_string), e(environment.block_start_string), e(environment.block_end_string) )] + [ - '(?P<%s_begin>\s*%s\-|%s)' % (n, r, r) + r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, r) for n, r in root_tag_rules ])), (TOKEN_DATA, '#bygroup'), '#bygroup'), # data - (c('.+'), 'data', None) + (c('.+'), TOKEN_DATA, None) ], # comments TOKEN_COMMENT_BEGIN: [ @@ -435,7 +437,8 @@ class Lexer(object): ] + tag_rules, # line comments TOKEN_LINECOMMENT_BEGIN: [ - (c(r'.*?(?=\n|$)'), TOKEN_LINECOMMENT_END, '#pop') + (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT, + TOKEN_LINECOMMENT_END), '#pop') ] } @@ -551,7 +554,7 @@ class Lexer(object): # normal group else: data = m.group(idx + 1) - if data: + if data or token not in ignore_if_empty: yield lineno, token, data lineno += data.count('\n') @@ -579,7 +582,8 @@ class Lexer(object): lineno, name, filename) # yield items - yield lineno, tokens, data + if data or tokens not in ignore_if_empty: + yield lineno, tokens, data lineno += data.count('\n') # fetch new position into new variable so that we can check diff --git a/tests/test_parser.py b/tests/test_parser.py index 640394e..9ee6003 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -46,7 +46,15 @@ and bar comment #} {% macro blub() %}foo{% endmacro %} {{ blub() }}''' -LINE_SYNTAX_PRIORITY = '''\ +LINE_SYNTAX_PRIORITY1 = '''\ +/* ignore me. + I'm a multiline comment */ +## for item in seq: +* ${item} # this is just extra stuff +## endfor +''' + +LINE_SYNTAX_PRIORITY2 = '''\ /* ignore me. I'm a multiline comment */ # for item in seq: @@ -96,6 +104,10 @@ def test_line_syntax(): def test_line_syntax_priority(): + # XXX: why is the whitespace there in front of the newline? + env = Environment('{%', '%}', '${', '}', '/*', '*/', '##', '#') + tmpl = env.from_string(LINE_SYNTAX_PRIORITY1) + assert tmpl.render(seq=[1, 2]).strip() == '* 1 \n* 2' env = Environment('{%', '%}', '${', '}', '/*', '*/', '#', '##') - tmpl = env.from_string(LINE_SYNTAX_PRIORITY) - assert tmpl.render(seq=[1, 2]).strip() == '* 1\n* 2' + tmpl = env.from_string(LINE_SYNTAX_PRIORITY2) + assert tmpl.render(seq=[1, 2]).strip() == '* 1 \n* 2' -- 2.26.2