forked from VimPlug/jedi
refactor use of NoErrorTokenizer
This commit is contained in:
@@ -29,10 +29,11 @@ class PushBackIterator(object):
|
|||||||
|
|
||||||
|
|
||||||
class NoErrorTokenizer(object):
|
class NoErrorTokenizer(object):
|
||||||
def __init__(self, readline, line_offset=0):
|
def __init__(self, readline, line_offset=0, stop_on_scope=False):
|
||||||
self.readline = readline
|
self.readline = readline
|
||||||
self.gen = PushBackIterator(tokenize.generate_tokens(readline))
|
self.gen = PushBackIterator(tokenize.generate_tokens(readline))
|
||||||
self.line_offset = line_offset
|
self.line_offset = line_offset
|
||||||
|
self.stop_on_scope = stop_on_scope
|
||||||
|
|
||||||
def push_last_back(self):
|
def push_last_back(self):
|
||||||
self.gen.push_back(self.current)
|
self.gen.push_back(self.current)
|
||||||
@@ -61,7 +62,13 @@ class NoErrorTokenizer(object):
|
|||||||
self.gen = PushBackIterator(tokenize.generate_tokens(
|
self.gen = PushBackIterator(tokenize.generate_tokens(
|
||||||
self.readline))
|
self.readline))
|
||||||
self.current = self.next()
|
self.current = self.next()
|
||||||
|
|
||||||
c = list(self.current)
|
c = list(self.current)
|
||||||
|
|
||||||
|
# stop if a new class or definition is started at position zero.
|
||||||
|
if self.stop_on_scope and c[1] in ['def', 'class'] and c[2][1] == 0:
|
||||||
|
raise StopIteration()
|
||||||
|
|
||||||
c[2] = self.line_offset + c[2][0], c[2][1]
|
c[2] = self.line_offset + c[2][0], c[2][1]
|
||||||
return c
|
return c
|
||||||
|
|
||||||
|
|||||||
@@ -244,6 +244,8 @@ class Module(Scope):
|
|||||||
self._name = None
|
self._name = None
|
||||||
self.used_names = {}
|
self.used_names = {}
|
||||||
self.temp_used_names = []
|
self.temp_used_names = []
|
||||||
|
# this may be changed depending on fast_parser
|
||||||
|
self.line_offset = 0
|
||||||
|
|
||||||
def add_global(self, name):
|
def add_global(self, name):
|
||||||
"""
|
"""
|
||||||
@@ -1137,10 +1139,10 @@ class PyFuzzyParser(object):
|
|||||||
:param user_position: The line/column, the user is currently on.
|
:param user_position: The line/column, the user is currently on.
|
||||||
:type user_position: tuple(int, int)
|
:type user_position: tuple(int, int)
|
||||||
:param no_docstr: If True, a string at the beginning is not a docstr.
|
:param no_docstr: If True, a string at the beginning is not a docstr.
|
||||||
:param tokenize_gen: A prepared tokenize generator -> for fast_parser
|
:param stop_on_scope: Stop if a scope appears -> for fast_parser
|
||||||
"""
|
"""
|
||||||
def __init__(self, code, module_path=None, user_position=None,
|
def __init__(self, code, module_path=None, user_position=None,
|
||||||
no_docstr=False, line_offset=0, tokenize_gen=None):
|
no_docstr=False, line_offset=0, stop_on_scope=None):
|
||||||
self.user_position = user_position
|
self.user_position = user_position
|
||||||
self.user_scope = None
|
self.user_scope = None
|
||||||
self.user_stmt = None
|
self.user_stmt = None
|
||||||
@@ -1157,12 +1159,10 @@ class PyFuzzyParser(object):
|
|||||||
# any errors of tokenize and just parse ahead.
|
# any errors of tokenize and just parse ahead.
|
||||||
self._line_offset = line_offset
|
self._line_offset = line_offset
|
||||||
|
|
||||||
if tokenize_gen is None:
|
|
||||||
code = code + '\n' # end with \n, because the parser needs it
|
code = code + '\n' # end with \n, because the parser needs it
|
||||||
buf = StringIO(code)
|
buf = StringIO(code)
|
||||||
self.gen = common.NoErrorTokenizer(buf.readline, line_offset)
|
self.gen = common.NoErrorTokenizer(buf.readline, line_offset,
|
||||||
else:
|
stop_on_scope)
|
||||||
self.gen = tokenize_gen
|
|
||||||
self.parse()
|
self.parse()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
|||||||
Reference in New Issue
Block a user