Errortokens should also make the parser fail in the normal parser.

This commit is contained in:
Dave Halter
2015-12-20 23:11:52 +01:00
parent 5791860861
commit c15551ccc1
3 changed files with 5 additions and 2 deletions

View File

@@ -156,7 +156,9 @@ class Parser(object):
def _tokenize(self, tokenizer): def _tokenize(self, tokenizer):
for typ, value, start_pos, prefix in tokenizer: for typ, value, start_pos, prefix in tokenizer:
if typ == OP: if typ == ERRORTOKEN:
raise Parser.ParserError
elif typ == OP:
typ = token.opmap[value] typ = token.opmap[value]
yield typ, value, prefix, start_pos yield typ, value, prefix, start_pos

View File

@@ -286,7 +286,7 @@ def generate_tokens(readline):
if new_line: if new_line:
end_pos = lnum + 1, 0 end_pos = lnum + 1, 0
else: else:
end_pos = lnum, max - 1 end_pos = lnum, max
# As the last position we just take the maximally possible position. We # As the last position we just take the maximally possible position. We
# remove -1 for the last new line. # remove -1 for the last new line.
for indent in indents[1:]: for indent in indents[1:]:

View File

@@ -51,6 +51,7 @@ def test_illegal_forward_references(reference):
assert not jedi.Script(source).goto_definitions() assert not jedi.Script(source).goto_definitions()
@pytest.mark.skipif('sys.version_info[0] < 3')
def test_lambda_forward_references(): def test_lambda_forward_references():
source = 'def foo(bar: "lambda: 3"): bar' source = 'def foo(bar: "lambda: 3"): bar'