forked from VimPlug/jedi
lambdas: fix problems with reverse tokenizer
This commit is contained in:
@@ -118,9 +118,13 @@ class ModuleWithCursor(Module):
|
||||
string = ''
|
||||
level = 0
|
||||
force_point = False
|
||||
last_type = None
|
||||
try:
|
||||
for token_type, tok, start, end, line in gen:
|
||||
#print 'tok', token_type, tok, force_point
|
||||
if last_type == token_type == tokenize.NAME:
|
||||
string += ' '
|
||||
|
||||
if level > 0:
|
||||
if tok in close_brackets:
|
||||
level += 1
|
||||
@@ -146,6 +150,7 @@ class ModuleWithCursor(Module):
|
||||
|
||||
self._column_temp = self._line_length - end[1]
|
||||
string += tok
|
||||
last_type = token_type
|
||||
except tokenize.TokenError:
|
||||
debug.warning("Tokenize couldn't finish", sys.exc_info)
|
||||
|
||||
|
||||
@@ -1322,6 +1322,7 @@ class PyFuzzyParser(object):
|
||||
|
||||
n = Name(self.module, names, first_pos, self.end_pos) if names \
|
||||
else None
|
||||
#if self.module.path != '__builtin__': print n
|
||||
return n, token_type, tok
|
||||
|
||||
def _parseimportlist(self):
|
||||
@@ -1555,8 +1556,9 @@ class PyFuzzyParser(object):
|
||||
if ret is not None:
|
||||
ret.parent = lambd
|
||||
lambd.returns.append(ret)
|
||||
lambd.end_pos = ret.end_pos
|
||||
lambd.parent = self.scope
|
||||
lambd.end_pos = self.end_pos
|
||||
#print lambd, added_breaks, ret, param
|
||||
tok_list[-1] = lambd
|
||||
continue
|
||||
elif token_type == tokenize.NAME:
|
||||
|
||||
Reference in New Issue
Block a user