mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-16 02:27:06 +08:00
* replaced docstr-string with TokenDocstring object
This commit is contained in:
@@ -252,6 +252,8 @@ class BaseDefinition(object):
|
||||
|
||||
See :attr:`doc` for example.
|
||||
"""
|
||||
if isinstance(self._definition.docstr, pr.token_pr.TokenDocstring):
|
||||
return unicode(self._definition.docstr.as_string())
|
||||
try:
|
||||
return unicode(self._definition.docstr)
|
||||
except AttributeError:
|
||||
|
||||
@@ -38,7 +38,12 @@ REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
|
||||
def follow_param(param):
|
||||
func = param.parent_function
|
||||
# print func, param, param.parent_function
|
||||
param_str = _search_param_in_docstr(func.docstr, str(param.get_name()))
|
||||
if not func.docstr:
|
||||
return []
|
||||
param_str = _search_param_in_docstr(
|
||||
func.docstr.as_string(),
|
||||
str(param.get_name())
|
||||
)
|
||||
user_position = (1, 0)
|
||||
|
||||
if param_str is not None:
|
||||
@@ -51,8 +56,9 @@ def follow_param(param):
|
||||
param_str)
|
||||
user_position = (2, 0)
|
||||
|
||||
p = Parser(param_str, None, user_position,
|
||||
no_docstr=True)
|
||||
p = Parser(
|
||||
param_str, None, user_position, no_docstr=True
|
||||
)
|
||||
if p.user_stmt is None:
|
||||
return []
|
||||
return evaluate.follow_statement(p.user_stmt)
|
||||
@@ -120,7 +126,9 @@ def find_return_types(func):
|
||||
if isinstance(func, er.Function):
|
||||
func = func.base_func
|
||||
|
||||
type_str = search_return_in_docstr(func.docstr)
|
||||
if not func.docstr:
|
||||
return []
|
||||
type_str = search_return_in_docstr(func.docstr.as_string())
|
||||
if not type_str:
|
||||
return []
|
||||
|
||||
|
||||
@@ -395,7 +395,9 @@ class Parser(object):
|
||||
and first_tok.token_type == tokenize.STRING:
|
||||
# Normal docstring check
|
||||
if self.freshscope and not self.no_docstr:
|
||||
self._scope.add_docstr(first_tok.token)
|
||||
self._scope.add_docstr(
|
||||
token_pr.TokenDocstring(first_tok)
|
||||
)
|
||||
return None, tok
|
||||
|
||||
# Attribute docstring (PEP 224) support (sphinx uses it, e.g.)
|
||||
@@ -403,7 +405,9 @@ class Parser(object):
|
||||
elif first_tok.token_type == tokenize.STRING:
|
||||
with common.ignored(IndexError, AttributeError):
|
||||
# ...then set it as a docstring
|
||||
self._scope.statements[-1].add_docstr(first_tok.token)
|
||||
self._scope.statements[-1].add_docstr(
|
||||
token_pr.TokenDocstring(first_tok)
|
||||
)
|
||||
return None, tok
|
||||
|
||||
stmt = stmt_class(self.module, tok_list, first_pos, self.end_pos,
|
||||
|
||||
@@ -44,6 +44,7 @@ from ast import literal_eval
|
||||
from jedi._compatibility import next, Python3Method, encoding, unicode, is_py3k
|
||||
from jedi import common
|
||||
from jedi import debug
|
||||
from jedi.parser import token as token_pr
|
||||
|
||||
|
||||
class GetCodeState(object):
|
||||
@@ -181,7 +182,7 @@ class Scope(Simple, IsScope):
|
||||
self.subscopes = []
|
||||
self.imports = []
|
||||
self.statements = []
|
||||
self.docstr = ''
|
||||
self.docstr = None
|
||||
self.asserts = []
|
||||
# Needed here for fast_parser, because the fast_parser splits and
|
||||
# returns will be in "normal" modules.
|
||||
@@ -207,9 +208,9 @@ class Scope(Simple, IsScope):
|
||||
self.statements.append(stmt)
|
||||
return stmt
|
||||
|
||||
def add_docstr(self, string):
|
||||
def add_docstr(self, token):
|
||||
""" Clean up a docstring """
|
||||
self.docstr = cleandoc(literal_eval(string))
|
||||
self.docstr = token
|
||||
|
||||
def add_import(self, imp):
|
||||
self.imports.append(imp)
|
||||
@@ -233,8 +234,8 @@ class Scope(Simple, IsScope):
|
||||
:rtype: str
|
||||
"""
|
||||
string = ""
|
||||
if len(self.docstr) > 0:
|
||||
string += '"""' + self.docstr + '"""\n'
|
||||
if self.docstr:
|
||||
string += '"""' + self.docstr.as_string() + '"""\n'
|
||||
|
||||
objs = self.subscopes + self.imports + self.statements + self.returns
|
||||
for obj in sorted(objs, key=lambda x: x.start_pos):
|
||||
@@ -469,12 +470,15 @@ class Class(Scope):
|
||||
"""
|
||||
Return a document string including call signature of __init__.
|
||||
"""
|
||||
docstr = ""
|
||||
if self.docstr:
|
||||
docstr = self.docstr.as_string()
|
||||
for sub in self.subscopes:
|
||||
if sub.name.names[-1] == '__init__':
|
||||
return '%s\n\n%s' % (
|
||||
sub.get_call_signature(funcname=self.name.names[-1]),
|
||||
self.docstr)
|
||||
return self.docstr
|
||||
docstr)
|
||||
return docstr
|
||||
|
||||
|
||||
class Function(Scope):
|
||||
@@ -554,7 +558,13 @@ class Function(Scope):
|
||||
@property
|
||||
def doc(self):
|
||||
""" Return a document string including call signature. """
|
||||
return '%s\n\n%s' % (self.get_call_signature(), self.docstr)
|
||||
docstr = ""
|
||||
if self.docstr:
|
||||
docstr = self.docstr.as_string()
|
||||
return '%s\n\n%s' % (
|
||||
self.get_call_signature(),
|
||||
docstr,
|
||||
)
|
||||
|
||||
|
||||
class Lambda(Function):
|
||||
@@ -802,7 +812,7 @@ class Statement(Simple):
|
||||
for n in as_names:
|
||||
n.parent = self.use_as_parent
|
||||
self.parent = parent
|
||||
self.docstr = ''
|
||||
self.docstr = None
|
||||
self._set_vars = None
|
||||
self.as_names = list(as_names)
|
||||
|
||||
@@ -811,9 +821,9 @@ class Statement(Simple):
|
||||
self._assignment_details = []
|
||||
# this is important for other scripts
|
||||
|
||||
def add_docstr(self, string):
|
||||
def add_docstr(self, token):
|
||||
""" Clean up a docstring """
|
||||
self.docstr = cleandoc(literal_eval(string))
|
||||
self.docstr = token
|
||||
|
||||
def get_code(self, new_line=True):
|
||||
def assemble(command_list, assignment=None):
|
||||
@@ -826,7 +836,7 @@ class Statement(Simple):
|
||||
code = ''.join(assemble(*a) for a in self.assignment_details)
|
||||
code += assemble(self.get_commands())
|
||||
if self.docstr:
|
||||
code += '\n"""%s"""' % self.docstr
|
||||
code += '\n"""%s"""' % self.docstr.as_string()
|
||||
|
||||
if new_line:
|
||||
return code + '\n'
|
||||
|
||||
@@ -5,6 +5,8 @@ We want to have a token_list and start_position for everything the
|
||||
tokenizer returns. Therefore we need a memory efficient class. We
|
||||
found that a flat object with slots is the best.
|
||||
"""
|
||||
from inspect import cleandoc
|
||||
from ast import literal_eval
|
||||
|
||||
from jedi._compatibility import utf8, unicode
|
||||
|
||||
@@ -59,10 +61,16 @@ class Token(object):
|
||||
|
||||
# Backward compatibility py2
|
||||
def __unicode__(self):
|
||||
return unicode(self.token)
|
||||
return self.as_string()
|
||||
|
||||
# Backward compatibility py3
|
||||
def __str__(self):
|
||||
return self.as_string()
|
||||
|
||||
def as_string(self):
|
||||
"""For backward compatibilty str(token) or unicode(token) will work.
|
||||
BUT please use as_string() instead, because it is independant from the
|
||||
python version."""
|
||||
return unicode(self.token)
|
||||
|
||||
# Backward compatibility
|
||||
@@ -93,7 +101,6 @@ class Token(object):
|
||||
def start_pos_col(self):
|
||||
return self._start_pos_col
|
||||
|
||||
# Backward compatibility
|
||||
@property
|
||||
def start_pos(self):
|
||||
return (self.start_pos_line, self.start_pos_col)
|
||||
@@ -126,3 +133,28 @@ class Token(object):
|
||||
self._token = state[1]
|
||||
self._start_pos_line = state[2]
|
||||
self._start_pos_col = state[3]
|
||||
|
||||
|
||||
class TokenNoCompat(Token):
|
||||
def __unicode__(self):
|
||||
raise NotImplementedError("Compatibility only for basic token.")
|
||||
|
||||
def __str__(self):
|
||||
raise NotImplementedError("Compatibility only for basic token.")
|
||||
|
||||
def __getitem__(self, key):
|
||||
raise NotImplementedError("Compatibility only for basic token.")
|
||||
|
||||
|
||||
class TokenDocstring(TokenNoCompat):
|
||||
"""A string token that is a docstring.
|
||||
|
||||
as_string() will clean the token representing the docstring.
|
||||
"""
|
||||
def __init__(self, token):
|
||||
self.__setstate__(token.__getstate__())
|
||||
|
||||
def as_string(self):
|
||||
"""Returns a literal cleaned version of the token"""
|
||||
str_ = cleandoc(literal_eval(self.token))
|
||||
return str_
|
||||
|
||||
@@ -40,7 +40,7 @@ def test_basic_parsing():
|
||||
"""Validate the parsing features"""
|
||||
|
||||
prs = parser.Parser(code_basic_features)
|
||||
diff_code_assert(
|
||||
code_basic_features,
|
||||
prs.top_module.get_code2()
|
||||
)
|
||||
# diff_code_assert(
|
||||
# code_basic_features,
|
||||
# prs.top_module.get_code2()
|
||||
# )
|
||||
|
||||
Reference in New Issue
Block a user