forked from VimPlug/jedi
use iterators instead of some handmade crap in parsing.PyFuzzyParser (it is still a hack, however...)
This commit is contained in:
@@ -28,7 +28,7 @@ All those classes are being generated by PyFuzzyParser, which takes python text
|
||||
as input and ignores just all the non-python stuff. Basically you could feed it
|
||||
a perl script, and it should still work (which means throw no error.
|
||||
"""
|
||||
from _compatibility import (next, literal_eval, StringIO,
|
||||
from _compatibility import (literal_eval, StringIO,
|
||||
property, is_py3k, cleandoc, Python3Method)
|
||||
|
||||
import tokenize
|
||||
@@ -1550,6 +1550,12 @@ class PyFuzzyParser(object):
|
||||
return stmt, tok
|
||||
|
||||
def next(self):
|
||||
return self.__next__()
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
""" Generate the next tokenize pattern. """
|
||||
try:
|
||||
self._current_full = next(self.gen)
|
||||
@@ -1602,10 +1608,10 @@ class PyFuzzyParser(object):
|
||||
|
||||
decorators = []
|
||||
self.freshscope = True
|
||||
while True:
|
||||
try:
|
||||
self.iterator = iter(self)
|
||||
# This iterator stuff is not intentional. It grew historically.
|
||||
for token_type, tok in self.iterator:
|
||||
self.module.temp_used_names = []
|
||||
token_type, tok = self.next()
|
||||
#debug.dbg('main: tok=[%s] type=[%s] indent=[%s]'\
|
||||
# % (tok, tokenize.tok_name[token_type], start_position[0]))
|
||||
|
||||
@@ -1787,8 +1793,6 @@ class PyFuzzyParser(object):
|
||||
tokenize.ENDMARKER]:
|
||||
debug.warning('token not classified', tok, token_type,
|
||||
self.start_pos[0])
|
||||
except StopIteration: # thrown on EOF
|
||||
break
|
||||
|
||||
del self.buf
|
||||
return self.module
|
||||
|
||||
Reference in New Issue
Block a user