forked from VimPlug/jedi
remove token.py, docstrings are now just normal tokens until used
This commit is contained in:
@@ -10,6 +10,7 @@ from jedi import settings
|
||||
from jedi import common
|
||||
from jedi import cache
|
||||
from jedi.parser import representation as pr
|
||||
from jedi.parser.tokenize import Token
|
||||
from jedi.evaluate import representation as er
|
||||
from jedi.evaluate import iterable
|
||||
from jedi.evaluate import imports
|
||||
@@ -241,8 +242,10 @@ class BaseDefinition(object):
|
||||
|
||||
See :attr:`doc` for example.
|
||||
"""
|
||||
if isinstance(self._definition.docstr, pr.token_pr.TokenDocstring):
|
||||
return unicode(self._definition.docstr.as_string())
|
||||
print self._definition.docstr
|
||||
if isinstance(self._definition.docstr, Token):
|
||||
# TODO again ugly, we should have direct access.
|
||||
return unicode(self._definition.docstr.string)
|
||||
try:
|
||||
return unicode(self._definition.docstr)
|
||||
except AttributeError:
|
||||
|
||||
@@ -9,7 +9,7 @@ import inspect
|
||||
|
||||
from jedi._compatibility import is_py3, builtins, unicode
|
||||
from jedi.parser import Parser
|
||||
from jedi.parser import token as token_pr
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser.representation import Class
|
||||
from jedi.evaluate.helpers import FakeName
|
||||
|
||||
@@ -105,7 +105,8 @@ def get_faked(module, obj, name=None):
|
||||
# contain it).
|
||||
result.docstr = None
|
||||
if obj.__doc__:
|
||||
result.docstr = token_pr.TokenDocstring.fake_docstring(obj.__doc__)
|
||||
doc = '''"""%s"""''' % obj.__doc__ # TODO need escapes.
|
||||
result.docstr = tokenize.Token(tokenize.STRING, doc, (0, 0))
|
||||
return result
|
||||
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import re
|
||||
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
from jedi.parser import Parser
|
||||
from jedi.parser.representation import docstring_content
|
||||
|
||||
DOCSTRING_PARAM_PATTERNS = [
|
||||
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
|
||||
@@ -39,7 +40,8 @@ def follow_param(evaluator, param):
|
||||
if not func.docstr:
|
||||
return []
|
||||
param_str = _search_param_in_docstr(
|
||||
func.docstr.as_string(),
|
||||
# TODO this is ugly, no direct access?
|
||||
docstring_content(func.docstr),
|
||||
str(param.get_name())
|
||||
)
|
||||
position = (1, 0)
|
||||
@@ -119,7 +121,7 @@ def find_return_types(evaluator, func):
|
||||
|
||||
if not func.docstr:
|
||||
return []
|
||||
type_str = search_return_in_docstr(func.docstr.as_string())
|
||||
type_str = search_return_in_docstr(docstring_content(func.docstr))
|
||||
if not type_str:
|
||||
return []
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ from jedi._compatibility import next
|
||||
from jedi import debug
|
||||
from jedi import common
|
||||
from jedi.parser import representation as pr
|
||||
from jedi.parser import token as token_pr
|
||||
from jedi.parser import tokenize
|
||||
|
||||
|
||||
@@ -358,9 +357,7 @@ class Parser(object):
|
||||
and first_tok.type == tokenize.STRING:
|
||||
# Normal docstring check
|
||||
if self.freshscope and not self.no_docstr:
|
||||
self._scope.add_docstr(
|
||||
token_pr.TokenDocstring(first_tok)
|
||||
)
|
||||
self._scope.add_docstr(first_tok)
|
||||
return None, tok
|
||||
|
||||
# Attribute docstring (PEP 224) support (sphinx uses it, e.g.)
|
||||
@@ -368,9 +365,7 @@ class Parser(object):
|
||||
elif first_tok.type == tokenize.STRING:
|
||||
with common.ignored(IndexError, AttributeError):
|
||||
# ...then set it as a docstring
|
||||
self._scope.statements[-1].add_docstr(
|
||||
token_pr.TokenDocstring(first_tok)
|
||||
)
|
||||
self._scope.statements[-1].add_docstr(first_tok)
|
||||
return None, tok
|
||||
|
||||
stmt = stmt_class(self.module, tok_list, first_pos, tok.end_pos,
|
||||
|
||||
@@ -44,12 +44,16 @@ from jedi import common
|
||||
from jedi import debug
|
||||
from jedi import cache
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser import token as token_pr
|
||||
|
||||
|
||||
SCOPE_CONTENTS = ['asserts', 'subscopes', 'imports', 'statements', 'returns']
|
||||
|
||||
|
||||
def docstring_content(token):
|
||||
"""Returns a literal cleaned version of the ``Token``."""
|
||||
return unicode(cleandoc(literal_eval(token.string)))
|
||||
|
||||
|
||||
class GetCodeState(object):
|
||||
"""A helper class for passing the state of get_code in a thread-safe
|
||||
manner"""
|
||||
@@ -241,7 +245,7 @@ class Scope(Simple, IsScope):
|
||||
"""
|
||||
string = ""
|
||||
if self.docstr:
|
||||
string += '"""' + self.docstr.as_string() + '"""\n'
|
||||
string += '"""' + docstring_content(self.docstr) + '"""\n'
|
||||
|
||||
objs = self.subscopes + self.imports + self.statements + self.returns
|
||||
for obj in sorted(objs, key=lambda x: x.start_pos):
|
||||
@@ -481,7 +485,7 @@ class Class(Scope):
|
||||
"""
|
||||
docstr = ""
|
||||
if self.docstr:
|
||||
docstr = self.docstr.as_string()
|
||||
docstr = docstring_content(self.docstr)
|
||||
for sub in self.subscopes:
|
||||
if sub.name.names[-1] == '__init__':
|
||||
return '%s\n\n%s' % (
|
||||
@@ -571,7 +575,7 @@ class Function(Scope):
|
||||
""" Return a document string including call signature. """
|
||||
docstr = ""
|
||||
if self.docstr:
|
||||
docstr = self.docstr.as_string()
|
||||
docstr = docstring_content(self.docstr)
|
||||
return '%s\n\n%s' % (
|
||||
self.get_call_signature(),
|
||||
docstr,
|
||||
@@ -849,7 +853,7 @@ isinstance(c, tokenize.Token) else unicode(c)
|
||||
code = ''.join(assemble(*a) for a in self.assignment_details)
|
||||
code += assemble(self.expression_list())
|
||||
if self.docstr:
|
||||
code += '\n"""%s"""' % self.docstr.as_string()
|
||||
code += '\n"""%s"""' % docstring_content(self.docstr)
|
||||
|
||||
if new_line:
|
||||
return code + '\n'
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
""" Efficient representation of tokens
|
||||
|
||||
We want to have a token_list and start_position for everything the
|
||||
tokenizer returns. Therefore we need a memory efficient class. We
|
||||
found that a flat object with slots is the best.
|
||||
"""
|
||||
from inspect import cleandoc
|
||||
from ast import literal_eval
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.parser.tokenize import Token
|
||||
|
||||
|
||||
class TokenDocstring(Token):
|
||||
"""A string token that is a docstring.
|
||||
|
||||
as_string() will clean the token representing the docstring.
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def __init__(self, token=None, state=None):
|
||||
if token:
|
||||
self.__setstate__(token.__getstate__())
|
||||
else:
|
||||
self.__setstate__(state)
|
||||
|
||||
@classmethod
|
||||
def fake_docstring(cls, docstr):
|
||||
# TODO: fixme when tests are up again
|
||||
return TokenDocstring(state=(0, '"""\n%s\n"""' % docstr, 0, 0))
|
||||
|
||||
def as_string(self):
|
||||
"""Returns a literal cleaned version of the token"""
|
||||
return unicode(cleandoc(literal_eval(self.string)))
|
||||
Reference in New Issue
Block a user