Fixed issues with last positions in the tokenizer, which was messed up a little bit a few commits ago.

This commit is contained in:
Dave Halter
2015-04-27 21:42:40 +02:00
parent 0a96083fde
commit b6ebb2f8bf
2 changed files with 7 additions and 5 deletions

View File

@@ -282,7 +282,9 @@ def generate_tokens(readline):
paren_level -= 1 paren_level -= 1
yield OP, token, spos, prefix yield OP, token, spos, prefix
end_pos = (lnum, max - 1)
# As the last position we just take the maximally possible position. We
# remove -1 for the last new line.
for indent in indents[1:]: for indent in indents[1:]:
yield DEDENT, '', (lnum, max), '' yield DEDENT, '', end_pos, ''
# As the last position we just take the max possible. yield ENDMARKER, '', end_pos, prefix
yield ENDMARKER, '', (lnum, max), prefix

View File

@@ -430,11 +430,11 @@ def test_incomplete_function():
def test_string_literals(): def test_string_literals():
"""Simplified case of jedi-vim#377.""" """Simplified case of jedi-vim#377."""
source = dedent(""" source = dedent("""
x = ur''' ''' x = ur'''
def foo(): def foo():
pass pass
x""") """)
script = jedi.Script(dedent(source)) script = jedi.Script(dedent(source))
assert script.completions() assert script.completions()