Skip to content

Commit 7b71819

Browse files
committed
fixup! Add option to preserve comments when parsing templates
1 parent 35681ec commit 7b71819

File tree

5 files changed

+18
-65
lines changed

5 files changed

+18
-65
lines changed

CHANGES.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ Unreleased
99
- Use modern packaging metadata with ``pyproject.toml`` instead of ``setup.cfg``.
1010
:pr:`1793`
1111
- Use ``flit_core`` instead of ``setuptools`` as build backend.
12-
- Add the ``preserve_comments`` parameter to ``Environment.parse`` to preserve comments in template ASTs. :pr:`2037`
12+
- Preserve comments in ASTs when parsing templates with ``Environment.parse``. :pr:`2037`
1313

1414

1515
Version 3.1.5

src/jinja2/environment.py

+3-11
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,6 @@ def parse(
600600
source: str,
601601
name: t.Optional[str] = None,
602602
filename: t.Optional[str] = None,
603-
preserve_comments: bool = False,
604603
) -> nodes.Template:
605604
"""Parse the sourcecode and return the abstract syntax tree. This
606605
tree of nodes is used by the compiler to convert the template into
@@ -609,12 +608,9 @@ def parse(
609608
610609
If you are :ref:`developing Jinja extensions <writing-extensions>`
611610
this gives you a good overview of the node tree generated.
612-
613-
.. versionchanged:: 3.2
614-
Added `preserve_comments` parameter.
615611
"""
616612
try:
617-
return self._parse(source, name, filename, preserve_comments)
613+
return self._parse(source, name, filename)
618614
except TemplateSyntaxError:
619615
self.handle_exception(source=source)
620616

@@ -623,12 +619,9 @@ def _parse(
623619
source: str,
624620
name: t.Optional[str],
625621
filename: t.Optional[str],
626-
preserve_comments: bool = False,
627622
) -> nodes.Template:
628623
"""Internal parsing function used by `parse` and `compile`."""
629-
return Parser(
630-
self, source, name, filename, preserve_comments=preserve_comments
631-
).parse()
624+
return Parser(self, source, name, filename).parse()
632625

633626
def lex(
634627
self,
@@ -673,13 +666,12 @@ def _tokenize(
673666
name: t.Optional[str],
674667
filename: t.Optional[str] = None,
675668
state: t.Optional[str] = None,
676-
preserve_comments: bool = False,
677669
) -> TokenStream:
678670
"""Called by the parser to do the preprocessing and filtering
679671
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
680672
"""
681673
source = self.preprocess(source, name, filename)
682-
stream = self.lexer.tokenize(source, name, filename, state, preserve_comments)
674+
stream = self.lexer.tokenize(source, name, filename, state)
683675

684676
for ext in self.iter_extensions():
685677
stream = ext.filter_stream(stream) # type: ignore

src/jinja2/lexer.py

+4-34
Original file line numberDiff line numberDiff line change
@@ -146,22 +146,7 @@
146146
f"({'|'.join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))})"
147147
)
148148

149-
comment_tokens = frozenset(
150-
[
151-
TOKEN_COMMENT_BEGIN,
152-
TOKEN_COMMENT,
153-
TOKEN_COMMENT_END,
154-
TOKEN_LINECOMMENT_BEGIN,
155-
TOKEN_LINECOMMENT_END,
156-
TOKEN_LINECOMMENT,
157-
]
158-
)
159-
ignored_tokens = frozenset(
160-
[
161-
TOKEN_WHITESPACE,
162-
*comment_tokens,
163-
]
164-
)
149+
ignored_tokens = frozenset([TOKEN_WHITESPACE])
165150
ignore_if_empty = frozenset(
166151
[TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
167152
)
@@ -612,37 +597,22 @@ def tokenize(
612597
name: t.Optional[str] = None,
613598
filename: t.Optional[str] = None,
614599
state: t.Optional[str] = None,
615-
preserve_comments: bool = False,
616600
) -> TokenStream:
617-
"""Calls tokeniter + tokenize and wraps it in a token stream.
618-
619-
.. versionchanged:: 3.2
620-
Added `preserve_comments` parameter.
621-
"""
601+
"""Calls tokeniter + tokenize and wraps it in a token stream."""
622602
stream = self.tokeniter(source, name, filename, state)
623-
return TokenStream(
624-
self.wrap(stream, name, filename, preserve_comments), name, filename
625-
)
603+
return TokenStream(self.wrap(stream, name, filename), name, filename)
626604

627605
def wrap(
628606
self,
629607
stream: t.Iterable[t.Tuple[int, str, str]],
630608
name: t.Optional[str] = None,
631609
filename: t.Optional[str] = None,
632-
preserve_comments: bool = False,
633610
) -> t.Iterator[Token]:
634611
"""This is called with the stream as returned by `tokenize` and wraps
635612
every token in a :class:`Token` and converts the value.
636-
637-
.. versionchanged:: 3.2
638-
Added `preserve_comments` parameter.
639613
"""
640-
ignored = ignored_tokens
641-
if preserve_comments:
642-
ignored -= comment_tokens
643-
644614
for lineno, token, value_str in stream:
645-
if token in ignored:
615+
if token in ignored_tokens:
646616
continue
647617

648618
value: t.Any = value_str

src/jinja2/parser.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -57,12 +57,9 @@ def __init__(
5757
name: t.Optional[str] = None,
5858
filename: t.Optional[str] = None,
5959
state: t.Optional[str] = None,
60-
preserve_comments: bool = False,
6160
) -> None:
6261
self.environment = environment
63-
self.stream = environment._tokenize(
64-
source, name, filename, state, preserve_comments
65-
)
62+
self.stream = environment._tokenize(source, name, filename, state)
6663
self.name = name
6764
self.filename = filename
6865
self.closed = False
@@ -318,10 +315,13 @@ def parse_block(self) -> nodes.Block:
318315
# with whitespace data
319316
if node.required:
320317
for body_node in node.body:
321-
if not isinstance(body_node, nodes.Output) or any(
322-
not isinstance(output_node, nodes.TemplateData)
323-
or not output_node.data.isspace()
324-
for output_node in body_node.nodes
318+
if not isinstance(body_node, (nodes.Output, nodes.Comment)) or (
319+
isinstance(body_node, nodes.Output)
320+
and any(
321+
not isinstance(output_node, nodes.TemplateData)
322+
or not output_node.data.isspace()
323+
for output_node in body_node.nodes
324+
)
325325
):
326326
self.fail("Required blocks can only contain comments or whitespace")
327327

tests/test_lexnparse.py

+2-11
Original file line numberDiff line numberDiff line change
@@ -315,27 +315,18 @@ def assert_error(code, expected):
315315
assert_error("{% unknown_tag %}", "Encountered unknown tag 'unknown_tag'.")
316316

317317
def test_comment_preservation(self, env):
318-
ast = env.parse("{# foo #}{{ bar }}", preserve_comments=True)
318+
ast = env.parse("{# foo #}{{ bar }}")
319319
assert len(ast.body) == 2
320320
assert isinstance(ast.body[0], nodes.Comment)
321321
assert ast.body[0].data == " foo "
322322

323-
ast = env.parse("{# foo #}{{ bar }}", preserve_comments=False)
324-
assert len(ast.body) == 1
325-
assert not isinstance(ast.body[0], nodes.Comment)
326-
327323
def test_line_comment_preservation(self, env):
328324
env = Environment(line_comment_prefix="#")
329-
330-
ast = env.parse("# foo\n{{ bar }}", preserve_comments=True)
325+
ast = env.parse("# foo\n{{ bar }}")
331326
assert len(ast.body) == 2
332327
assert isinstance(ast.body[0], nodes.Comment)
333328
assert ast.body[0].data == " foo"
334329

335-
ast = env.parse("# foo\n{{ bar }}", preserve_comments=False)
336-
assert len(ast.body) == 1
337-
assert not isinstance(ast.body[0], nodes.Comment)
338-
339330

340331
class TestSyntax:
341332
def test_call(self, env):

0 commit comments

Comments
 (0)