mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-16 02:27:06 +08:00
fix docstr issue in the wrong place, fixes #210
This commit is contained in:
@@ -302,7 +302,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
def _parse(self, code):
|
||||
""" :type code: str """
|
||||
def empty_parser():
|
||||
new, temp = self._get_parser('', '', 0, [])
|
||||
new, temp = self._get_parser('', '', 0, [], False)
|
||||
return new
|
||||
|
||||
parts = self._split_parts(code)
|
||||
@@ -330,7 +330,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
# check if code_part has already been parsed
|
||||
#print '#'*45,line_offset, p and p.end_pos, '\n', code_part
|
||||
p, node = self._get_parser(code_part, code[start:],
|
||||
line_offset, nodes)
|
||||
line_offset, nodes, not is_first)
|
||||
|
||||
if is_first and p.module.subscopes:
|
||||
# special case, we cannot use a function subscope as a
|
||||
@@ -381,7 +381,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
#print(self.parsers[0].module.get_code())
|
||||
del code
|
||||
|
||||
def _get_parser(self, code, parser_code, line_offset, nodes):
|
||||
def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr):
|
||||
h = hash(code)
|
||||
hashes = [n.hash for n in nodes]
|
||||
node = None
|
||||
@@ -392,7 +392,8 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
except ValueError:
|
||||
p = parsing.Parser(parser_code, self.module_path,
|
||||
self.user_position, offset=(line_offset, 0),
|
||||
is_fast_parser=True, top_module=self.module)
|
||||
is_fast_parser=True, top_module=self.module,
|
||||
no_docstr=no_docstr)
|
||||
p.module.parent = self.module
|
||||
else:
|
||||
if nodes[index] != self.current_node:
|
||||
|
||||
@@ -399,7 +399,7 @@ class Parser(object):
|
||||
stmt.parent = self.top_module
|
||||
self._check_user_stmt(stmt)
|
||||
|
||||
# Attribute docstring (PEP 257) support
|
||||
# Attribute docstring (PEP 224) support (sphinx uses it, e.g.)
|
||||
with common.ignored(IndexError, AttributeError):
|
||||
# If string literal is being parsed
|
||||
first_tok = stmt.token_list[0]
|
||||
@@ -673,3 +673,5 @@ class Parser(object):
|
||||
tokenize.NEWLINE, tokenize.NL]:
|
||||
debug.warning('token not classified', tok, token_type,
|
||||
self.start_pos[0])
|
||||
continue
|
||||
self.no_docstr = False
|
||||
|
||||
Reference in New Issue
Block a user