diff --git a/jedi/parser/tokenize.py b/jedi/parser/tokenize.py index 40ecab1b..d988135c 100644 --- a/jedi/parser/tokenize.py +++ b/jedi/parser/tokenize.py @@ -282,8 +282,8 @@ def generate_tokens(readline, line_offset=0): token[:3] in single_quoted: if token[-1] == '\n': # continued string contstr_start = lnum, start - endprog = (endprogs[initial] or endprogs[token[1]] or - endprogs[token[2]]) + endprog = (endprogs.get(initial) or endprogs.get(token[1]) + or endprogs.get(token[2])) contstr = line[start:] contline = line break diff --git a/test/test_parser/test_tokenize.py b/test/test_parser/test_tokenize.py index 0295040d..9c93a70d 100644 --- a/test/test_parser/test_tokenize.py +++ b/test/test_parser/test_tokenize.py @@ -21,3 +21,9 @@ asdfasdf""" + "h" ''')) tok = parsed.module.subscopes[0].statements[0]._token_list[2] self.assertEqual(tok.end_pos, (4, 11)) + + +def test_tokenizer_with_string_literal_backslash(): + import jedi + c = jedi.Script("statement = u'foo\\\n'; statement").goto_definitions() + assert c[0]._definition.obj == 'foo'