diff --git a/jedi/parser/__init__.py b/jedi/parser/__init__.py index 21a3315e..534386aa 100644 --- a/jedi/parser/__init__.py +++ b/jedi/parser/__init__.py @@ -156,7 +156,9 @@ class Parser(object): def _tokenize(self, tokenizer): for typ, value, start_pos, prefix in tokenizer: - if typ == OP: + if typ == ERRORTOKEN: + raise Parser.ParserError + elif typ == OP: typ = token.opmap[value] yield typ, value, prefix, start_pos diff --git a/jedi/parser/tokenize.py b/jedi/parser/tokenize.py index ebc8a4ee..ac3cabec 100644 --- a/jedi/parser/tokenize.py +++ b/jedi/parser/tokenize.py @@ -286,7 +286,7 @@ def generate_tokens(readline): if new_line: end_pos = lnum + 1, 0 else: - end_pos = lnum, max - 1 + end_pos = lnum, max # As the last position we just take the maximally possible position. We # remove -1 for the last new line. for indent in indents[1:]: diff --git a/test/test_evaluate/test_annotations.py b/test/test_evaluate/test_annotations.py index 7897ffb0..67fe84e1 100644 --- a/test/test_evaluate/test_annotations.py +++ b/test/test_evaluate/test_annotations.py @@ -51,6 +51,7 @@ def test_illegal_forward_references(reference): assert not jedi.Script(source).goto_definitions() +@pytest.mark.skipif('sys.version_info[0] < 3') def test_lambda_forward_references(): source = 'def foo(bar: "lambda: 3"): bar'