1
0
forked from VimPlug/jedi

remove token.py, docstrings are now just normal tokens until used

This commit is contained in:
Dave Halter
2014-02-26 01:13:38 +01:00
parent 40be00826e
commit 5e5bb618ea
6 changed files with 23 additions and 53 deletions

View File

@@ -10,6 +10,7 @@ from jedi import settings
from jedi import common from jedi import common
from jedi import cache from jedi import cache
from jedi.parser import representation as pr from jedi.parser import representation as pr
from jedi.parser.tokenize import Token
from jedi.evaluate import representation as er from jedi.evaluate import representation as er
from jedi.evaluate import iterable from jedi.evaluate import iterable
from jedi.evaluate import imports from jedi.evaluate import imports
@@ -241,8 +242,10 @@ class BaseDefinition(object):
See :attr:`doc` for example. See :attr:`doc` for example.
""" """
if isinstance(self._definition.docstr, pr.token_pr.TokenDocstring): print self._definition.docstr
return unicode(self._definition.docstr.as_string()) if isinstance(self._definition.docstr, Token):
# TODO again ugly, we should have direct access.
return unicode(self._definition.docstr.string)
try: try:
return unicode(self._definition.docstr) return unicode(self._definition.docstr)
except AttributeError: except AttributeError:

View File

@@ -9,7 +9,7 @@ import inspect
from jedi._compatibility import is_py3, builtins, unicode from jedi._compatibility import is_py3, builtins, unicode
from jedi.parser import Parser from jedi.parser import Parser
from jedi.parser import token as token_pr from jedi.parser import tokenize
from jedi.parser.representation import Class from jedi.parser.representation import Class
from jedi.evaluate.helpers import FakeName from jedi.evaluate.helpers import FakeName
@@ -105,7 +105,8 @@ def get_faked(module, obj, name=None):
# contain it). # contain it).
result.docstr = None result.docstr = None
if obj.__doc__: if obj.__doc__:
result.docstr = token_pr.TokenDocstring.fake_docstring(obj.__doc__) doc = '''"""%s"""''' % obj.__doc__ # TODO need escapes.
result.docstr = tokenize.Token(tokenize.STRING, doc, (0, 0))
return result return result

View File

@@ -18,6 +18,7 @@ import re
from jedi.evaluate.cache import memoize_default from jedi.evaluate.cache import memoize_default
from jedi.parser import Parser from jedi.parser import Parser
from jedi.parser.representation import docstring_content
DOCSTRING_PARAM_PATTERNS = [ DOCSTRING_PARAM_PATTERNS = [
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
@@ -39,7 +40,8 @@ def follow_param(evaluator, param):
if not func.docstr: if not func.docstr:
return [] return []
param_str = _search_param_in_docstr( param_str = _search_param_in_docstr(
func.docstr.as_string(), # TODO this is ugly, no direct access?
docstring_content(func.docstr),
str(param.get_name()) str(param.get_name())
) )
position = (1, 0) position = (1, 0)
@@ -119,7 +121,7 @@ def find_return_types(evaluator, func):
if not func.docstr: if not func.docstr:
return [] return []
type_str = search_return_in_docstr(func.docstr.as_string()) type_str = search_return_in_docstr(docstring_content(func.docstr))
if not type_str: if not type_str:
return [] return []

View File

@@ -21,7 +21,6 @@ from jedi._compatibility import next
from jedi import debug from jedi import debug
from jedi import common from jedi import common
from jedi.parser import representation as pr from jedi.parser import representation as pr
from jedi.parser import token as token_pr
from jedi.parser import tokenize from jedi.parser import tokenize
@@ -358,9 +357,7 @@ class Parser(object):
and first_tok.type == tokenize.STRING: and first_tok.type == tokenize.STRING:
# Normal docstring check # Normal docstring check
if self.freshscope and not self.no_docstr: if self.freshscope and not self.no_docstr:
self._scope.add_docstr( self._scope.add_docstr(first_tok)
token_pr.TokenDocstring(first_tok)
)
return None, tok return None, tok
# Attribute docstring (PEP 224) support (sphinx uses it, e.g.) # Attribute docstring (PEP 224) support (sphinx uses it, e.g.)
@@ -368,9 +365,7 @@ class Parser(object):
elif first_tok.type == tokenize.STRING: elif first_tok.type == tokenize.STRING:
with common.ignored(IndexError, AttributeError): with common.ignored(IndexError, AttributeError):
# ...then set it as a docstring # ...then set it as a docstring
self._scope.statements[-1].add_docstr( self._scope.statements[-1].add_docstr(first_tok)
token_pr.TokenDocstring(first_tok)
)
return None, tok return None, tok
stmt = stmt_class(self.module, tok_list, first_pos, tok.end_pos, stmt = stmt_class(self.module, tok_list, first_pos, tok.end_pos,

View File

@@ -44,12 +44,16 @@ from jedi import common
from jedi import debug from jedi import debug
from jedi import cache from jedi import cache
from jedi.parser import tokenize from jedi.parser import tokenize
from jedi.parser import token as token_pr
SCOPE_CONTENTS = ['asserts', 'subscopes', 'imports', 'statements', 'returns'] SCOPE_CONTENTS = ['asserts', 'subscopes', 'imports', 'statements', 'returns']
def docstring_content(token):
"""Returns a literal cleaned version of the ``Token``."""
return unicode(cleandoc(literal_eval(token.string)))
class GetCodeState(object): class GetCodeState(object):
"""A helper class for passing the state of get_code in a thread-safe """A helper class for passing the state of get_code in a thread-safe
manner""" manner"""
@@ -241,7 +245,7 @@ class Scope(Simple, IsScope):
""" """
string = "" string = ""
if self.docstr: if self.docstr:
string += '"""' + self.docstr.as_string() + '"""\n' string += '"""' + docstring_content(self.docstr) + '"""\n'
objs = self.subscopes + self.imports + self.statements + self.returns objs = self.subscopes + self.imports + self.statements + self.returns
for obj in sorted(objs, key=lambda x: x.start_pos): for obj in sorted(objs, key=lambda x: x.start_pos):
@@ -481,7 +485,7 @@ class Class(Scope):
""" """
docstr = "" docstr = ""
if self.docstr: if self.docstr:
docstr = self.docstr.as_string() docstr = docstring_content(self.docstr)
for sub in self.subscopes: for sub in self.subscopes:
if sub.name.names[-1] == '__init__': if sub.name.names[-1] == '__init__':
return '%s\n\n%s' % ( return '%s\n\n%s' % (
@@ -571,7 +575,7 @@ class Function(Scope):
""" Return a document string including call signature. """ """ Return a document string including call signature. """
docstr = "" docstr = ""
if self.docstr: if self.docstr:
docstr = self.docstr.as_string() docstr = docstring_content(self.docstr)
return '%s\n\n%s' % ( return '%s\n\n%s' % (
self.get_call_signature(), self.get_call_signature(),
docstr, docstr,
@@ -849,7 +853,7 @@ isinstance(c, tokenize.Token) else unicode(c)
code = ''.join(assemble(*a) for a in self.assignment_details) code = ''.join(assemble(*a) for a in self.assignment_details)
code += assemble(self.expression_list()) code += assemble(self.expression_list())
if self.docstr: if self.docstr:
code += '\n"""%s"""' % self.docstr.as_string() code += '\n"""%s"""' % docstring_content(self.docstr)
if new_line: if new_line:
return code + '\n' return code + '\n'

View File

@@ -1,35 +0,0 @@
# -*- coding: utf-8 -*-
""" Efficient representation of tokens
We want to have a token_list and start_position for everything the
tokenizer returns. Therefore we need a memory efficient class. We
found that a flat object with slots is the best.
"""
from inspect import cleandoc
from ast import literal_eval
from jedi._compatibility import unicode
from jedi.parser.tokenize import Token
class TokenDocstring(Token):
"""A string token that is a docstring.
as_string() will clean the token representing the docstring.
"""
__slots__ = ()
def __init__(self, token=None, state=None):
if token:
self.__setstate__(token.__getstate__())
else:
self.__setstate__(state)
@classmethod
def fake_docstring(cls, docstr):
# TODO: fixme when tests are up again
return TokenDocstring(state=(0, '"""\n%s\n"""' % docstr, 0, 0))
def as_string(self):
"""Returns a literal cleaned version of the token"""
return unicode(cleandoc(literal_eval(self.string)))