mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-06 14:04:26 +08:00
line_offset to offset in Parser, which is important for exact positioning
This commit is contained in:
@@ -190,8 +190,8 @@ class Script(object):
|
||||
return scopes
|
||||
|
||||
def _get_under_cursor_stmt(self, cursor_txt):
|
||||
offset = self.pos[0] - 1
|
||||
r = parsing.Parser(cursor_txt, no_docstr=True, line_offset=offset)
|
||||
offset = self.pos[0] - 1, self.pos[1]
|
||||
r = parsing.Parser(cursor_txt, no_docstr=True, offset=offset)
|
||||
try:
|
||||
stmt = r.module.statements[0]
|
||||
except IndexError:
|
||||
|
||||
@@ -56,10 +56,10 @@ class PushBackIterator(object):
|
||||
|
||||
|
||||
class NoErrorTokenizer(object):
|
||||
def __init__(self, readline, line_offset=0, stop_on_scope=False):
|
||||
def __init__(self, readline, offset=(0, 0), stop_on_scope=False):
|
||||
self.readline = readline
|
||||
self.gen = PushBackIterator(tokenize.generate_tokens(readline))
|
||||
self.line_offset = line_offset
|
||||
self.offset = offset
|
||||
self.stop_on_scope = stop_on_scope
|
||||
self.first_scope = False
|
||||
self.closed = False
|
||||
@@ -90,7 +90,8 @@ class NoErrorTokenizer(object):
|
||||
debug.warning('indentation error on line %s, ignoring it' %
|
||||
self.current[2][0])
|
||||
# add the starting line of the last position
|
||||
self.line_offset += self.current[2][0]
|
||||
self.offset = (self.offset[0] + self.current[2][0],
|
||||
self.current[2][1])
|
||||
self.gen = PushBackIterator(tokenize.generate_tokens(
|
||||
self.readline))
|
||||
return self.__next__()
|
||||
@@ -106,8 +107,8 @@ class NoErrorTokenizer(object):
|
||||
elif c[1] != '@':
|
||||
self.first_scope = True
|
||||
|
||||
c[2] = self.line_offset + c[2][0], c[2][1]
|
||||
c[3] = self.line_offset + c[3][0], c[3][1]
|
||||
c[2] = self.offset[0] + c[2][0], self.offset[1] + c[2][1]
|
||||
c[3] = self.offset[0] + c[3][0], self.offset[1] + c[3][1]
|
||||
return c
|
||||
|
||||
|
||||
|
||||
@@ -282,10 +282,7 @@ def _check_array_additions(compare_array, module, is_list):
|
||||
if isinstance(element, er.Array):
|
||||
stmt = element._array.parent
|
||||
else:
|
||||
# must be instance
|
||||
if isinstance(element.var_args, list):
|
||||
return None # TODO check if this is ok
|
||||
stmt = element.var_args.parent
|
||||
return None
|
||||
if isinstance(stmt, er.InstanceElement):
|
||||
stop_classes = list(stop_classes) + [er.Function]
|
||||
return stmt.get_parent_until(stop_classes)
|
||||
@@ -296,8 +293,6 @@ def _check_array_additions(compare_array, module, is_list):
|
||||
search_names = ['append', 'extend', 'insert'] if is_list else \
|
||||
['add', 'update']
|
||||
comp_arr_parent = get_execution_parent(compare_array, er.Execution)
|
||||
if comp_arr_parent is None:
|
||||
return [] # TODO check if this is ok
|
||||
|
||||
possible_stmts = []
|
||||
res = []
|
||||
@@ -311,14 +306,14 @@ def _check_array_additions(compare_array, module, is_list):
|
||||
# can search for the same statement, that is in the module
|
||||
# dict. Executions are somewhat special in jedi, since they
|
||||
# literally copy the contents of a function.
|
||||
if isinstance(comp_arr_parent, er.Execution):
|
||||
if compare_array and isinstance(comp_arr_parent, er.Execution):
|
||||
stmt = comp_arr_parent. \
|
||||
get_statement_for_position(stmt.start_pos)
|
||||
if stmt is None:
|
||||
continue
|
||||
# InstanceElements are special, because they don't get copied,
|
||||
# but have this wrapper around them.
|
||||
if isinstance(comp_arr_parent, er.InstanceElement):
|
||||
if compare_array and isinstance(comp_arr_parent, er.InstanceElement):
|
||||
stmt = er.InstanceElement(comp_arr_parent.instance, stmt)
|
||||
|
||||
if evaluate.follow_statement.push_stmt(stmt):
|
||||
|
||||
@@ -256,7 +256,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
else:
|
||||
p = parsing.Parser(code[start:],
|
||||
self.module_path, self.user_position,
|
||||
line_offset=line_offset, stop_on_scope=True,
|
||||
offset=(line_offset, 0), stop_on_scope=True,
|
||||
top_module=self.module)
|
||||
|
||||
p.hash = h
|
||||
|
||||
@@ -264,7 +264,7 @@ class ModuleWithCursor(Module):
|
||||
offset = max(self.position[0] - length, 0)
|
||||
s = '\n'.join(self.source.splitlines()[offset:offset + length])
|
||||
self._part_parser = parsing.Parser(s, self.path, self.position,
|
||||
line_offset=offset)
|
||||
offset=(offset, 0))
|
||||
return self._part_parser
|
||||
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ class Parser(object):
|
||||
:param top_module: Use this module as a parent instead of `self.module`.
|
||||
"""
|
||||
def __init__(self, source, module_path=None, user_position=None,
|
||||
no_docstr=False, line_offset=0, stop_on_scope=None,
|
||||
no_docstr=False, offset=(0, 0), stop_on_scope=None,
|
||||
top_module=None):
|
||||
self.user_position = user_position
|
||||
self.user_scope = None
|
||||
@@ -55,21 +55,17 @@ class Parser(object):
|
||||
self.no_docstr = no_docstr
|
||||
|
||||
# initialize global Scope
|
||||
self.module = pr.SubModule(module_path, (line_offset + 1, 0),
|
||||
self.module = pr.SubModule(module_path, (offset[0] + 1, offset[1]),
|
||||
top_module)
|
||||
self.scope = self.module
|
||||
self.current = (None, None)
|
||||
self.start_pos = 1, 0
|
||||
self.end_pos = 1, 0
|
||||
|
||||
# Stuff to fix tokenize errors. The parser is pretty good in tolerating
|
||||
# any errors of tokenize and just parse ahead.
|
||||
self._line_offset = line_offset
|
||||
|
||||
source = source + '\n' # end with \n, because the parser needs it
|
||||
buf = StringIO(source)
|
||||
self._gen = common.NoErrorTokenizer(buf.readline, line_offset,
|
||||
stop_on_scope)
|
||||
self._gen = common.NoErrorTokenizer(buf.readline, offset,
|
||||
stop_on_scope)
|
||||
self.top_module = top_module or self.module
|
||||
try:
|
||||
self._parse()
|
||||
|
||||
@@ -82,6 +82,9 @@ for i in list([1,'']):
|
||||
#? int() str()
|
||||
i
|
||||
|
||||
#? int() str()
|
||||
for x in [1,'']: x
|
||||
|
||||
a = []
|
||||
b = [1.0,'']
|
||||
for i in b:
|
||||
|
||||
Reference in New Issue
Block a user