diff --git a/jedi/parser/tokenize.py b/jedi/parser/tokenize.py index faa8bc77..b9c8e982 100644 --- a/jedi/parser/tokenize.py +++ b/jedi/parser/tokenize.py @@ -155,7 +155,7 @@ def generate_tokens(readline, line_offset=0): lnum = line_offset continued = False numchars = '0123456789' - contstr, needcont = '', 0 + contstr, needcont = '', False contline = None indents = [0] @@ -163,6 +163,10 @@ def generate_tokens(readline, line_offset=0): try: line = readline() except StopIteration: + if contstr: + yield TokenInfo(ERRORTOKEN, contstr, strstart, (lnum, pos)) + contstr, needcont = '', False + contline = None line = b'' lnum += 1 @@ -176,14 +180,15 @@ def generate_tokens(readline, line_offset=0): if endmatch: pos = end = endmatch.end(0) yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end)) - contstr, needcont = '', 0 + contstr, needcont = '', False contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': - yield TokenInfo(ERRORTOKEN, contstr + line, - strstart, (lnum, len(line))) - contstr = '' - contline = None - continue + #yield TokenInfo(ERRORTOKEN, contstr + line, + # strstart, (lnum, len(line))) + #contstr = '' + #contline = None + #continue + pass else: contstr = contstr + line contline = contline + line @@ -264,7 +269,7 @@ def generate_tokens(readline, line_offset=0): strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) - contstr, needcont = line[start:], 1 + contstr, needcont = line[start:], True contline = line break else: # ordinary string diff --git a/test/completion/basic.py b/test/completion/basic.py index 41e489e6..8fe39faf 100644 --- a/test/completion/basic.py +++ b/test/completion/basic.py @@ -265,3 +265,12 @@ except ImportError, i_b: i_b #? ImportError() i_b + +# ----------------- +# continuations +# ----------------- + +foo = \ +1 +#? int() +foo