forked from VimPlug/jedi
Merge branch 'get_code_fidelity' of git://github.com/ganwell/jedi into ganwell
This commit is contained in:
@@ -361,7 +361,9 @@ class Parser(object):
|
||||
and first_tok.token_type == tokenize.STRING:
|
||||
# Normal docstring check
|
||||
if self.freshscope and not self.no_docstr:
|
||||
self._scope.add_docstr(first_tok.token)
|
||||
self._scope.add_docstr(
|
||||
token_pr.TokenDocstring(first_tok)
|
||||
)
|
||||
return None, tok
|
||||
|
||||
# Attribute docstring (PEP 224) support (sphinx uses it, e.g.)
|
||||
@@ -369,7 +371,9 @@ class Parser(object):
|
||||
elif first_tok.token_type == tokenize.STRING:
|
||||
with common.ignored(IndexError, AttributeError):
|
||||
# ...then set it as a docstring
|
||||
self._scope.statements[-1].add_docstr(first_tok.token)
|
||||
self._scope.statements[-1].add_docstr(
|
||||
token_pr.TokenDocstring(first_tok)
|
||||
)
|
||||
return None, tok
|
||||
|
||||
stmt = stmt_class(self.module, tok_list, first_pos, self.end_pos,
|
||||
|
||||
@@ -43,10 +43,21 @@ from jedi import common
|
||||
from jedi import debug
|
||||
from jedi import cache
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser import token as token_pr
|
||||
|
||||
|
||||
SCOPE_CONTENTS = ['asserts', 'subscopes', 'imports', 'statements', 'returns']
|
||||
|
||||
|
||||
|
||||
class GetCodeState(object):
|
||||
"""A helper class for passing the state of get_code in a thread-safe
|
||||
manner"""
|
||||
__slots__ = ("last_pos")
|
||||
|
||||
def __init__(self):
|
||||
self.last_pos = (0, 0)
|
||||
|
||||
class Base(object):
|
||||
"""
|
||||
This is just here to have an isinstance check, which is also used on
|
||||
@@ -61,6 +72,29 @@ class Base(object):
|
||||
def isinstance(self, *cls):
|
||||
return isinstance(self, cls)
|
||||
|
||||
@property
|
||||
def newline(self):
|
||||
"""Returns the newline type for the current code."""
|
||||
#TODO: we need newline detection
|
||||
return "\n"
|
||||
|
||||
@property
|
||||
def whitespace(self):
|
||||
"""Returns the whitespace type for the current code: tab or space."""
|
||||
#TODO: we need tab detection
|
||||
return " "
|
||||
|
||||
def space(self, from_pos, to_pos):
|
||||
"""Return the space between two tokens"""
|
||||
linecount = to_pos[0] - from_pos[0]
|
||||
if linecount == 0:
|
||||
return self.whitespace * (to_pos[1] - from_pos[1])
|
||||
else:
|
||||
return "%s%s" % (
|
||||
self.newline * linecount,
|
||||
self.whitespace * to_pos[1],
|
||||
)
|
||||
|
||||
|
||||
class Simple(Base):
|
||||
"""
|
||||
@@ -150,7 +184,7 @@ class Scope(Simple, IsScope):
|
||||
self.subscopes = []
|
||||
self.imports = []
|
||||
self.statements = []
|
||||
self.docstr = ''
|
||||
self.docstr = None
|
||||
self.asserts = []
|
||||
# Needed here for fast_parser, because the fast_parser splits and
|
||||
# returns will be in "normal" modules.
|
||||
@@ -176,9 +210,9 @@ class Scope(Simple, IsScope):
|
||||
self.statements.append(stmt)
|
||||
return stmt
|
||||
|
||||
def add_docstr(self, string):
|
||||
def add_docstr(self, token):
|
||||
""" Clean up a docstring """
|
||||
self.docstr = cleandoc(literal_eval(string))
|
||||
self.docstr = token
|
||||
|
||||
def add_import(self, imp):
|
||||
self.imports.append(imp)
|
||||
@@ -192,14 +226,18 @@ class Scope(Simple, IsScope):
|
||||
i += s.get_imports()
|
||||
return i
|
||||
|
||||
def get_code2(self, state=GetCodeState()):
|
||||
string = []
|
||||
return "".join(string)
|
||||
|
||||
def get_code(self, first_indent=False, indention=' '):
|
||||
"""
|
||||
:return: Returns the code of the current scope.
|
||||
:rtype: str
|
||||
"""
|
||||
string = ""
|
||||
if len(self.docstr) > 0:
|
||||
string += '"""' + self.docstr + '"""\n'
|
||||
if self.docstr:
|
||||
string += '"""' + self.docstr.as_string() + '"""\n'
|
||||
|
||||
objs = self.subscopes + self.imports + self.statements + self.returns
|
||||
for obj in sorted(objs, key=lambda x: x.start_pos):
|
||||
@@ -431,12 +469,15 @@ class Class(Scope):
|
||||
"""
|
||||
Return a document string including call signature of __init__.
|
||||
"""
|
||||
docstr = ""
|
||||
if self.docstr:
|
||||
docstr = self.docstr.as_string()
|
||||
for sub in self.subscopes:
|
||||
if sub.name.names[-1] == '__init__':
|
||||
return '%s\n\n%s' % (
|
||||
sub.get_call_signature(funcname=self.name.names[-1]),
|
||||
self.docstr)
|
||||
return self.docstr
|
||||
docstr)
|
||||
return docstr
|
||||
|
||||
|
||||
class Function(Scope):
|
||||
@@ -516,7 +557,13 @@ class Function(Scope):
|
||||
@property
|
||||
def doc(self):
|
||||
""" Return a document string including call signature. """
|
||||
return '%s\n\n%s' % (self.get_call_signature(), self.docstr)
|
||||
docstr = ""
|
||||
if self.docstr:
|
||||
docstr = self.docstr.as_string()
|
||||
return '%s\n\n%s' % (
|
||||
self.get_call_signature(),
|
||||
docstr,
|
||||
)
|
||||
|
||||
|
||||
class Lambda(Function):
|
||||
@@ -764,7 +811,7 @@ class Statement(Simple):
|
||||
for n in as_names:
|
||||
n.parent = self.use_as_parent
|
||||
self.parent = parent
|
||||
self.docstr = ''
|
||||
self.docstr = None
|
||||
self._set_vars = None
|
||||
self.as_names = list(as_names)
|
||||
|
||||
@@ -772,9 +819,9 @@ class Statement(Simple):
|
||||
self._assignment_details = []
|
||||
# this is important for other scripts
|
||||
|
||||
def add_docstr(self, string):
|
||||
def add_docstr(self, token):
|
||||
""" Clean up a docstring """
|
||||
self.docstr = cleandoc(literal_eval(string))
|
||||
self.docstr = token
|
||||
|
||||
def get_code(self, new_line=True):
|
||||
def assemble(command_list, assignment=None):
|
||||
@@ -787,7 +834,7 @@ class Statement(Simple):
|
||||
code = ''.join(assemble(*a) for a in self.assignment_details)
|
||||
code += assemble(self.expression_list())
|
||||
if self.docstr:
|
||||
code += '\n"""%s"""' % self.docstr
|
||||
code += '\n"""%s"""' % self.docstr.as_string()
|
||||
|
||||
if new_line:
|
||||
return code + '\n'
|
||||
|
||||
@@ -5,6 +5,8 @@ We want to have a token_list and start_position for everything the
|
||||
tokenizer returns. Therefore we need a memory efficient class. We
|
||||
found that a flat object with slots is the best.
|
||||
"""
|
||||
from inspect import cleandoc
|
||||
from ast import literal_eval
|
||||
|
||||
from jedi._compatibility import utf8, unicode
|
||||
|
||||
@@ -59,10 +61,16 @@ class Token(object):
|
||||
|
||||
# Backward compatibility py2
|
||||
def __unicode__(self):
|
||||
return unicode(self.token)
|
||||
return self.as_string()
|
||||
|
||||
# Backward compatibility py3
|
||||
def __str__(self):
|
||||
return self.as_string()
|
||||
|
||||
def as_string(self):
|
||||
"""For backward compatibilty str(token) or unicode(token) will work.
|
||||
BUT please use as_string() instead, because it is independant from the
|
||||
python version."""
|
||||
return unicode(self.token)
|
||||
|
||||
# Backward compatibility
|
||||
@@ -93,7 +101,6 @@ class Token(object):
|
||||
def start_pos_col(self):
|
||||
return self._start_pos_col
|
||||
|
||||
# Backward compatibility
|
||||
@property
|
||||
def start_pos(self):
|
||||
return (self.start_pos_line, self.start_pos_col)
|
||||
@@ -126,3 +133,35 @@ class Token(object):
|
||||
self._token = state[1]
|
||||
self._start_pos_line = state[2]
|
||||
self._start_pos_col = state[3]
|
||||
|
||||
|
||||
class TokenNoCompat(Token):
|
||||
def __unicode__(self):
|
||||
raise NotImplementedError("Compatibility only for basic token.")
|
||||
|
||||
def __str__(self):
|
||||
raise NotImplementedError("Compatibility only for basic token.")
|
||||
|
||||
def __getitem__(self, key):
|
||||
raise NotImplementedError("Compatibility only for basic token.")
|
||||
|
||||
|
||||
class TokenDocstring(TokenNoCompat):
|
||||
"""A string token that is a docstring.
|
||||
|
||||
as_string() will clean the token representing the docstring.
|
||||
"""
|
||||
def __init__(self, token=None, state=None):
|
||||
if token:
|
||||
self.__setstate__(token.__getstate__())
|
||||
else:
|
||||
self.__setstate__(state)
|
||||
|
||||
@classmethod
|
||||
def fake_docstring(cls, docstr):
|
||||
# TODO: fixme when tests are up again
|
||||
return TokenDocstring(state=(0, '"""\n%s\n"""' % docstr, 0, 0))
|
||||
|
||||
def as_string(self):
|
||||
"""Returns a literal cleaned version of the token"""
|
||||
return cleandoc(literal_eval(self.token))
|
||||
|
||||
Reference in New Issue
Block a user