forked from VimPlug/jedi
fix comment problem (code shouldn't be parsed multiple times)
This commit is contained in:
@@ -85,20 +85,21 @@ class NoErrorTokenizer(object):
|
||||
# (This is a rather unlikely error message, for normal code,
|
||||
# tokenize seems to be pretty tolerant)
|
||||
debug.warning('indentation error on line %s, ignoring it' %
|
||||
(self.start_pos[0]))
|
||||
self.current[2][0])
|
||||
# add the starting line of the last position
|
||||
self.line_offset += self.current[2][0]
|
||||
self.gen = PushBackIterator(tokenize.generate_tokens(
|
||||
self.readline))
|
||||
self.current = self.next()
|
||||
return self.__next__()
|
||||
|
||||
c = list(self.current)
|
||||
|
||||
# stop if a new class or definition is started at position zero.
|
||||
if self.stop_on_scope and c[1] in ['def', 'class'] and c[2][1] == 0:
|
||||
breaks = ['def', 'class', '@']
|
||||
if self.stop_on_scope and c[1] in breaks and c[2][1] == 0:
|
||||
if self.first_scope:
|
||||
raise StopIteration()
|
||||
else:
|
||||
raise StopIteration
|
||||
elif c[1] != '@':
|
||||
self.first_scope = True
|
||||
|
||||
c[2] = self.line_offset + c[2][0], c[2][1]
|
||||
|
||||
@@ -2,7 +2,6 @@ import re
|
||||
import operator
|
||||
from functools import reduce
|
||||
|
||||
import common
|
||||
import parsing
|
||||
from _compatibility import use_metaclass
|
||||
|
||||
@@ -11,9 +10,9 @@ parser_cache = {}
|
||||
|
||||
class Module(parsing.Simple, parsing.Module):
|
||||
def __init__(self, parsers):
|
||||
self._end_pos = None, None
|
||||
super(Module, self).__init__((1,0))
|
||||
self.parsers = parsers
|
||||
self._end_pos = None, None
|
||||
self.reset_caches()
|
||||
|
||||
def reset_caches(self):
|
||||
@@ -113,7 +112,8 @@ class Module(parsing.Simple, parsing.Module):
|
||||
|
||||
@end_pos.setter
|
||||
def end_pos(self, value):
|
||||
if None not in value and self._end_pos < value:
|
||||
if None in self._end_pos \
|
||||
or None not in value and self._end_pos < value:
|
||||
self._end_pos = value
|
||||
|
||||
def __repr__(self):
|
||||
@@ -184,15 +184,20 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
parts = re.findall(r, code, re.DOTALL)
|
||||
|
||||
line_offset = 0
|
||||
for p in parts:
|
||||
lines = p.count('\n')
|
||||
p = parsing.PyFuzzyParser(p, self.module_path, self.user_position,
|
||||
start = 0
|
||||
p = None
|
||||
for s in parts:
|
||||
lines = s.count('\n')
|
||||
if p is None or line_offset >= p.end_pos[0] - 2:
|
||||
p = parsing.PyFuzzyParser(code[start:],
|
||||
self.module_path, self.user_position,
|
||||
line_offset=line_offset, stop_on_scope=True,
|
||||
top_module=self.module)
|
||||
|
||||
p.module.parent = self.module
|
||||
p.module.parent = self.module
|
||||
self.parsers.append(p)
|
||||
line_offset += lines
|
||||
self.parsers.append(p)
|
||||
start += len(s)
|
||||
|
||||
def reset_caches(self):
|
||||
self._user_scope = None
|
||||
|
||||
@@ -1178,7 +1178,11 @@ class PyFuzzyParser(object):
|
||||
self.gen = common.NoErrorTokenizer(buf.readline, line_offset,
|
||||
stop_on_scope)
|
||||
self.top_module = top_module or self.module
|
||||
self.parse()
|
||||
try:
|
||||
self.parse()
|
||||
except StopIteration:
|
||||
# sometimes StopIteration isn't catched. Just ignore it.
|
||||
pass
|
||||
|
||||
# clean up unused decorators
|
||||
for d in self._decorators:
|
||||
|
||||
Reference in New Issue
Block a user