From cf880f43d4e5b82510ef7eadd8098bdf7ad96081 Mon Sep 17 00:00:00 2001 From: Dave Halter Date: Sat, 28 Mar 2020 14:41:10 +0100 Subject: [PATCH] Tokenizer: Add error dedents only if parens are not open --- parso/python/tokenize.py | 2 +- test/test_diff_parser.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/parso/python/tokenize.py b/parso/python/tokenize.py index eda4c99..3dcb6c4 100644 --- a/parso/python/tokenize.py +++ b/parso/python/tokenize.py @@ -498,7 +498,7 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)): pseudomatch = pseudo_token.match(line, pos) if not pseudomatch: # scan for tokens match = whitespace.match(line, pos) - if pos == 0: + if pos == 0 and paren_level == 0: for t in dedent_if_necessary(match.end()): yield t pos = match.end() diff --git a/test/test_diff_parser.py b/test/test_diff_parser.py index 4f4bc1c..b7f5a5c 100644 --- a/test/test_diff_parser.py +++ b/test/test_diff_parser.py @@ -1007,7 +1007,7 @@ def test_random_unicode_characters(differ): differ.parse(s, parsers=1, expect_error_leaves=True) differ.parse('') differ.parse(s + '\n', parsers=1, expect_error_leaves=True) - differ.parse(u' result = (\r\f\x17\t\x11res)', parsers=2, expect_error_leaves=True) + differ.parse(u' result = (\r\f\x17\t\x11res)', parsers=1, expect_error_leaves=True) differ.parse('') differ.parse(' a( # xx\ndef', parsers=2, expect_error_leaves=True) @@ -1124,7 +1124,7 @@ def test_all_sorts_of_indentation(differ): end ''') differ.initialize(code1) - differ.parse(code2, copies=1, parsers=4, expect_error_leaves=True) + differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True) differ.parse(code1, copies=1, parsers=3, expect_error_leaves=True) code3 = dedent('''\