forked from VimPlug/jedi
* made Token readonly like a tuple by using @property
* end_pos calculation didn't respect multiline tokens * replaced all index access to Token * wrapped all code that injects token tuples with Token.from_tuple() * repr of Token is still its tuple form!? * PEP8 where I read or wrote code
This commit is contained in:
@@ -6,6 +6,7 @@ import itertools
|
||||
import tokenize
|
||||
|
||||
from jedi.parser import representation as pr
|
||||
from jedi.parser import token
|
||||
|
||||
|
||||
class ObjectImporter(object):
|
||||
@@ -156,11 +157,13 @@ class ObjectImporter(object):
|
||||
names=[(rhs, (0, 0))],
|
||||
start_pos=(0, 0),
|
||||
end_pos=(None, None))
|
||||
token_list = [lhsname, (tokenize.OP, '=', (0, 0)), rhsname]
|
||||
token_list = [lhsname, token.Token.from_tuple(
|
||||
(tokenize.OP, '=', (0, 0))
|
||||
), rhsname]
|
||||
if call:
|
||||
token_list.extend([
|
||||
(tokenize.OP, '(', (0, 0)),
|
||||
(tokenize.OP, ')', (0, 0)),
|
||||
token.Token.from_tuple((tokenize.OP, '(', (0, 0))),
|
||||
token.Token.from_tuple((tokenize.OP, ')', (0, 0))),
|
||||
])
|
||||
return pr.Statement(
|
||||
module=submodule,
|
||||
|
||||
@@ -392,18 +392,18 @@ class Parser(object):
|
||||
first_tok = tok_list[0]
|
||||
# docstrings
|
||||
if len(tok_list) == 1 and not isinstance(first_tok, pr.Name) \
|
||||
and first_tok[0] == tokenize.STRING:
|
||||
and first_tok.token_type == tokenize.STRING:
|
||||
# Normal docstring check
|
||||
if self.freshscope and not self.no_docstr:
|
||||
self._scope.add_docstr(first_tok[1])
|
||||
self._scope.add_docstr(first_tok.token)
|
||||
return None, tok
|
||||
|
||||
# Attribute docstring (PEP 224) support (sphinx uses it, e.g.)
|
||||
# If string literal is being parsed...
|
||||
elif first_tok[0] == tokenize.STRING:
|
||||
elif first_tok.token_type == tokenize.STRING:
|
||||
with common.ignored(IndexError, AttributeError):
|
||||
# ...then set it as a docstring
|
||||
self._scope.statements[-1].add_docstr(first_tok[1])
|
||||
self._scope.statements[-1].add_docstr(first_tok.token)
|
||||
return None, tok
|
||||
|
||||
stmt = stmt_class(self.module, tok_list, first_pos, self.end_pos,
|
||||
|
||||
@@ -898,7 +898,7 @@ class Statement(Simple):
|
||||
|
||||
c = token_iterator.current[1]
|
||||
arr.end_pos = c.end_pos if isinstance(c, Simple) \
|
||||
else (c[2][0], c[2][1] + len(c[1]))
|
||||
else c.end_pos
|
||||
return arr, break_tok
|
||||
|
||||
def parse_stmt(token_iterator, maybe_dict=False, added_breaks=(),
|
||||
@@ -920,9 +920,10 @@ class Statement(Simple):
|
||||
# it's not possible to set it earlier
|
||||
tok.parent = self
|
||||
else:
|
||||
token_type, tok, start_tok_pos = tok_temp
|
||||
tok = tok_temp.token
|
||||
start_tok_pos = tok_temp.start_pos
|
||||
last_end_pos = end_pos
|
||||
end_pos = start_tok_pos[0], start_tok_pos[1] + len(tok)
|
||||
end_pos = tok_temp.end_pos
|
||||
if first:
|
||||
first = False
|
||||
start_pos = start_tok_pos
|
||||
@@ -932,8 +933,12 @@ class Statement(Simple):
|
||||
if lambd is not None:
|
||||
token_list.append(lambd)
|
||||
elif tok == 'for':
|
||||
list_comp, tok = parse_list_comp(token_iterator,
|
||||
token_list, start_pos, last_end_pos)
|
||||
list_comp, tok = parse_list_comp(
|
||||
token_iterator,
|
||||
token_list,
|
||||
start_pos,
|
||||
last_end_pos
|
||||
)
|
||||
if list_comp is not None:
|
||||
token_list = [list_comp]
|
||||
|
||||
@@ -944,9 +949,12 @@ class Statement(Simple):
|
||||
|
||||
if level == 0 and tok in closing_brackets \
|
||||
or tok in added_breaks \
|
||||
or level == 1 and (tok == ','
|
||||
or level == 1 and (
|
||||
tok == ','
|
||||
or maybe_dict and tok == ':'
|
||||
or is_assignment(tok) and break_on_assignment):
|
||||
or is_assignment(tok)
|
||||
and break_on_assignment
|
||||
):
|
||||
end_pos = end_pos[0], end_pos[1] - 1
|
||||
break
|
||||
token_list.append(tok_temp)
|
||||
@@ -954,8 +962,14 @@ class Statement(Simple):
|
||||
if not token_list:
|
||||
return None, tok
|
||||
|
||||
statement = stmt_class(self._sub_module, token_list,
|
||||
start_pos, end_pos, self.parent, set_name_parents=False)
|
||||
statement = stmt_class(
|
||||
self._sub_module,
|
||||
token_list,
|
||||
start_pos,
|
||||
end_pos,
|
||||
self.parent,
|
||||
set_name_parents=False
|
||||
)
|
||||
return statement, tok
|
||||
|
||||
def parse_lambda(token_iterator):
|
||||
@@ -984,8 +998,9 @@ class Statement(Simple):
|
||||
return lambd, tok
|
||||
|
||||
def parse_list_comp(token_iterator, token_list, start_pos, end_pos):
|
||||
def parse_stmt_or_arr(token_iterator, added_breaks=(),
|
||||
names_are_set_vars=False):
|
||||
def parse_stmt_or_arr(
|
||||
token_iterator, added_breaks=(), names_are_set_vars=False
|
||||
):
|
||||
stmt, tok = parse_stmt(token_iterator,
|
||||
added_breaks=added_breaks)
|
||||
if not stmt:
|
||||
@@ -1039,12 +1054,16 @@ class Statement(Simple):
|
||||
start_pos = tok.start_pos
|
||||
end_pos = tok.end_pos
|
||||
else:
|
||||
token_type, tok, start_pos = tok_temp
|
||||
end_pos = start_pos[0], start_pos[1] + len(tok)
|
||||
token_type = tok_temp.token_type
|
||||
tok = tok_temp.token
|
||||
start_pos = tok_temp.start_pos
|
||||
end_pos = tok_temp.end_pos
|
||||
if is_assignment(tok):
|
||||
# This means, there is an assignment here.
|
||||
# Add assignments, which can be more than one
|
||||
self._assignment_details.append((result, tok))
|
||||
self._assignment_details.append(
|
||||
(result, tok_temp.token)
|
||||
)
|
||||
result = []
|
||||
is_chain = False
|
||||
continue
|
||||
@@ -1072,8 +1091,9 @@ class Statement(Simple):
|
||||
result.append(call)
|
||||
is_chain = False
|
||||
elif tok in brackets.keys():
|
||||
arr, is_ass = parse_array(token_iterator, brackets[tok],
|
||||
start_pos)
|
||||
arr, is_ass = parse_array(
|
||||
token_iterator, brackets[tok], start_pos
|
||||
)
|
||||
if result and isinstance(result[-1], StatementElement):
|
||||
result[-1].set_execution(arr)
|
||||
else:
|
||||
@@ -1098,8 +1118,14 @@ class Statement(Simple):
|
||||
e = (t[2][0], t[2][1] + len(t[1])) \
|
||||
if isinstance(t, tuple) else t.start_pos
|
||||
|
||||
stmt = Statement(self._sub_module, result,
|
||||
start_pos, e, self.parent, set_name_parents=False)
|
||||
stmt = Statement(
|
||||
self._sub_module,
|
||||
result,
|
||||
start_pos,
|
||||
e,
|
||||
self.parent,
|
||||
set_name_parents=False
|
||||
)
|
||||
stmt._commands = result
|
||||
arr, break_tok = parse_array(token_iterator, Array.TUPLE,
|
||||
stmt.start_pos, stmt)
|
||||
|
||||
@@ -27,18 +27,10 @@ class Token(object):
|
||||
>>> a.__setstate__((1, 2, 3, 4))
|
||||
>>> a
|
||||
<Token: (1, 2, (3, 4))>
|
||||
>>> a[2] = (2, 1)
|
||||
>>> a
|
||||
<Token: (1, 2, (2, 1))>
|
||||
>>> a.start_pos
|
||||
(2, 1)
|
||||
>>> a.token
|
||||
2
|
||||
>>> a.start_pos = (3, 4)
|
||||
>>> a
|
||||
<Token: (1, 2, (3, 4))>
|
||||
>>> a.start_pos
|
||||
(3, 4)
|
||||
>>> a.token
|
||||
2
|
||||
>>> a.start_pos_col
|
||||
4
|
||||
>>> Token.from_tuple((6, 5, (4, 3)))
|
||||
@@ -47,7 +39,7 @@ class Token(object):
|
||||
True
|
||||
"""
|
||||
__slots__ = [
|
||||
"token_type", "token", "start_pos_line", "start_pos_col"
|
||||
"_token_type", "_token", "_start_pos_line", "_start_pos_col"
|
||||
]
|
||||
|
||||
@classmethod
|
||||
@@ -57,10 +49,10 @@ class Token(object):
|
||||
def __init__(
|
||||
self, token_type, token, start_pos_line, start_pos_col
|
||||
):
|
||||
self.token_type = token_type
|
||||
self.token = token
|
||||
self.start_pos_line = start_pos_line
|
||||
self.start_pos_col = start_pos_col
|
||||
self._token_type = token_type
|
||||
self._token = token
|
||||
self._start_pos_line = start_pos_line
|
||||
self._start_pos_col = start_pos_col
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (type(self).__name__, tuple(self))
|
||||
@@ -85,34 +77,40 @@ class Token(object):
|
||||
else:
|
||||
raise IndexError("list index out of range")
|
||||
|
||||
# Backward compatibility
|
||||
def __setitem__(self, key, value):
|
||||
# setitem analogous to getitem
|
||||
if key == 0:
|
||||
self.token_type = value
|
||||
elif key == 1:
|
||||
self.token = value
|
||||
elif key == 2:
|
||||
self.start_pos_line = value[0]
|
||||
self.start_pos_col = value[1]
|
||||
else:
|
||||
raise IndexError("list index out of range")
|
||||
@property
|
||||
def token_type(self):
|
||||
return self._token_type
|
||||
|
||||
@property
|
||||
def token(self):
|
||||
return self._token
|
||||
|
||||
@property
|
||||
def start_pos_line(self):
|
||||
return self._start_pos_line
|
||||
|
||||
@property
|
||||
def start_pos_col(self):
|
||||
return self._start_pos_col
|
||||
|
||||
# Backward compatibility
|
||||
def __getattr__(self, attr):
|
||||
# Expose the missing start_pos attribute
|
||||
if attr == "start_pos":
|
||||
@property
|
||||
def start_pos(self):
|
||||
return (self.start_pos_line, self.start_pos_col)
|
||||
else:
|
||||
return object.__getattr__(self, attr)
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
# setattr analogous to getattr for symmetry
|
||||
if attr == "start_pos":
|
||||
self.start_pos_line = value[0]
|
||||
self.start_pos_col = value[1]
|
||||
@property
|
||||
def end_pos(self):
|
||||
"""Returns end position respecting multiline tokens."""
|
||||
end_pos_line = self.start_pos_line
|
||||
lines = unicode(self).split('\n')
|
||||
end_pos_line += len(lines) - 1
|
||||
end_pos_col = self.start_pos_col
|
||||
# Check for multiline token
|
||||
if self.start_pos_line == end_pos_line:
|
||||
end_pos_col += len(lines[-1])
|
||||
else:
|
||||
object.__setattr__(self, attr, value)
|
||||
end_pos_col = len(lines[-1])
|
||||
return (end_pos_line, end_pos_col)
|
||||
|
||||
# Make cache footprint smaller for faster unpickling
|
||||
def __getstate__(self):
|
||||
@@ -124,7 +122,7 @@ class Token(object):
|
||||
)
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.token_type = state[0]
|
||||
self.token = state[1]
|
||||
self.start_pos_line = state[2]
|
||||
self.start_pos_col = state[3]
|
||||
self._token_type = state[0]
|
||||
self._token = state[1]
|
||||
self._start_pos_line = state[2]
|
||||
self._start_pos_col = state[3]
|
||||
|
||||
25
test/test_token.py
Normal file
25
test/test_token.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import jedi.parser as parser
|
||||
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError: # pragma: no cover
|
||||
import unittest
|
||||
|
||||
|
||||
class TokenTest(unittest.TestCase):
|
||||
def test_end_pos_one_line(self):
|
||||
parsed = parser.Parser('''
|
||||
def testit():
|
||||
a = "huhu"
|
||||
''')
|
||||
tok = parsed.top_module.subscopes[0].statements[0].token_list[2]
|
||||
self.assertEqual(tok.end_pos, (3, 14))
|
||||
|
||||
def test_end_pos_multi_line(self):
|
||||
parsed = parser.Parser('''
|
||||
def testit():
|
||||
a = """huhu
|
||||
asdfasdf""" + "h"
|
||||
''')
|
||||
tok = parsed.top_module.subscopes[0].statements[0].token_list[2]
|
||||
self.assertEqual(tok.end_pos, (4, 11))
|
||||
Reference in New Issue
Block a user