1
0
forked from VimPlug/jedi

remove token.py, docstrings are now just normal tokens until used

This commit is contained in:
Dave Halter
2014-02-26 01:13:38 +01:00
parent 40be00826e
commit 5e5bb618ea
6 changed files with 23 additions and 53 deletions

View File

@@ -21,7 +21,6 @@ from jedi._compatibility import next
from jedi import debug
from jedi import common
from jedi.parser import representation as pr
from jedi.parser import token as token_pr
from jedi.parser import tokenize
@@ -358,9 +357,7 @@ class Parser(object):
and first_tok.type == tokenize.STRING:
# Normal docstring check
if self.freshscope and not self.no_docstr:
self._scope.add_docstr(
token_pr.TokenDocstring(first_tok)
)
self._scope.add_docstr(first_tok)
return None, tok
# Attribute docstring (PEP 224) support (sphinx uses it, e.g.)
@@ -368,9 +365,7 @@ class Parser(object):
elif first_tok.type == tokenize.STRING:
with common.ignored(IndexError, AttributeError):
# ...then set it as a docstring
self._scope.statements[-1].add_docstr(
token_pr.TokenDocstring(first_tok)
)
self._scope.statements[-1].add_docstr(first_tok)
return None, tok
stmt = stmt_class(self.module, tok_list, first_pos, tok.end_pos,

View File

@@ -44,12 +44,16 @@ from jedi import common
from jedi import debug
from jedi import cache
from jedi.parser import tokenize
from jedi.parser import token as token_pr
SCOPE_CONTENTS = ['asserts', 'subscopes', 'imports', 'statements', 'returns']
def docstring_content(token):
"""Returns a literal cleaned version of the ``Token``."""
return unicode(cleandoc(literal_eval(token.string)))
class GetCodeState(object):
"""A helper class for passing the state of get_code in a thread-safe
manner"""
@@ -241,7 +245,7 @@ class Scope(Simple, IsScope):
"""
string = ""
if self.docstr:
string += '"""' + self.docstr.as_string() + '"""\n'
string += '"""' + docstring_content(self.docstr) + '"""\n'
objs = self.subscopes + self.imports + self.statements + self.returns
for obj in sorted(objs, key=lambda x: x.start_pos):
@@ -481,7 +485,7 @@ class Class(Scope):
"""
docstr = ""
if self.docstr:
docstr = self.docstr.as_string()
docstr = docstring_content(self.docstr)
for sub in self.subscopes:
if sub.name.names[-1] == '__init__':
return '%s\n\n%s' % (
@@ -571,7 +575,7 @@ class Function(Scope):
""" Return a document string including call signature. """
docstr = ""
if self.docstr:
docstr = self.docstr.as_string()
docstr = docstring_content(self.docstr)
return '%s\n\n%s' % (
self.get_call_signature(),
docstr,
@@ -849,7 +853,7 @@ isinstance(c, tokenize.Token) else unicode(c)
code = ''.join(assemble(*a) for a in self.assignment_details)
code += assemble(self.expression_list())
if self.docstr:
code += '\n"""%s"""' % self.docstr.as_string()
code += '\n"""%s"""' % docstring_content(self.docstr)
if new_line:
return code + '\n'

View File

@@ -1,35 +0,0 @@
# -*- coding: utf-8 -*-
""" Efficient representation of tokens
We want to have a token_list and start_position for everything the
tokenizer returns. Therefore we need a memory efficient class. We
found that a flat object with slots is the best.
"""
from inspect import cleandoc
from ast import literal_eval
from jedi._compatibility import unicode
from jedi.parser.tokenize import Token
class TokenDocstring(Token):
"""A string token that is a docstring.
as_string() will clean the token representing the docstring.
"""
__slots__ = ()
def __init__(self, token=None, state=None):
if token:
self.__setstate__(token.__getstate__())
else:
self.__setstate__(state)
@classmethod
def fake_docstring(cls, docstr):
# TODO: fixme when tests are up again
return TokenDocstring(state=(0, '"""\n%s\n"""' % docstr, 0, 0))
def as_string(self):
"""Returns a literal cleaned version of the token"""
return unicode(cleandoc(literal_eval(self.string)))