forked from VimPlug/jedi
use iterators instead of some handmade crap in parsing.PyFuzzyParser (it is still a hack, however...)
This commit is contained in:
@@ -28,7 +28,7 @@ All those classes are being generated by PyFuzzyParser, which takes python text
|
|||||||
as input and ignores just all the non-python stuff. Basically you could feed it
|
as input and ignores just all the non-python stuff. Basically you could feed it
|
||||||
a perl script, and it should still work (which means throw no error.
|
a perl script, and it should still work (which means throw no error.
|
||||||
"""
|
"""
|
||||||
from _compatibility import (next, literal_eval, StringIO,
|
from _compatibility import (literal_eval, StringIO,
|
||||||
property, is_py3k, cleandoc, Python3Method)
|
property, is_py3k, cleandoc, Python3Method)
|
||||||
|
|
||||||
import tokenize
|
import tokenize
|
||||||
@@ -1550,6 +1550,12 @@ class PyFuzzyParser(object):
|
|||||||
return stmt, tok
|
return stmt, tok
|
||||||
|
|
||||||
def next(self):
|
def next(self):
|
||||||
|
return self.__next__()
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self):
|
||||||
""" Generate the next tokenize pattern. """
|
""" Generate the next tokenize pattern. """
|
||||||
try:
|
try:
|
||||||
self._current_full = next(self.gen)
|
self._current_full = next(self.gen)
|
||||||
@@ -1602,10 +1608,10 @@ class PyFuzzyParser(object):
|
|||||||
|
|
||||||
decorators = []
|
decorators = []
|
||||||
self.freshscope = True
|
self.freshscope = True
|
||||||
while True:
|
self.iterator = iter(self)
|
||||||
try:
|
# This iterator stuff is not intentional. It grew historically.
|
||||||
|
for token_type, tok in self.iterator:
|
||||||
self.module.temp_used_names = []
|
self.module.temp_used_names = []
|
||||||
token_type, tok = self.next()
|
|
||||||
#debug.dbg('main: tok=[%s] type=[%s] indent=[%s]'\
|
#debug.dbg('main: tok=[%s] type=[%s] indent=[%s]'\
|
||||||
# % (tok, tokenize.tok_name[token_type], start_position[0]))
|
# % (tok, tokenize.tok_name[token_type], start_position[0]))
|
||||||
|
|
||||||
@@ -1787,8 +1793,6 @@ class PyFuzzyParser(object):
|
|||||||
tokenize.ENDMARKER]:
|
tokenize.ENDMARKER]:
|
||||||
debug.warning('token not classified', tok, token_type,
|
debug.warning('token not classified', tok, token_type,
|
||||||
self.start_pos[0])
|
self.start_pos[0])
|
||||||
except StopIteration: # thrown on EOF
|
|
||||||
break
|
|
||||||
|
|
||||||
del self.buf
|
del self.buf
|
||||||
return self.module
|
return self.module
|
||||||
|
|||||||
Reference in New Issue
Block a user