1
0
forked from VimPlug/jedi

more problems fixed that relate to Operator

This commit is contained in:
Dave Halter
2014-02-26 22:08:51 +01:00
parent 2e12eb7861
commit 3330e29748
3 changed files with 43 additions and 35 deletions

View File

@@ -173,18 +173,17 @@ class Evaluator(object):
and isinstance(r.obj, (str, unicode))]: and isinstance(r.obj, (str, unicode))]:
# if it is an iterable, ignore * operations # if it is an iterable, ignore * operations
next(calls_iterator) next(calls_iterator)
elif not isinstance(call, (str, unicode)): elif call == 'if':
if isinstance(call, pr.Call) and str(call.name) == 'if':
# Ternary operators. # Ternary operators.
while True: for call in calls_iterator:
try: try:
call = next(calls_iterator) if call == 'else':
break
except StopIteration: except StopIteration:
break break
with common.ignored(AttributeError):
if str(call.name) == 'else':
break
continue continue
elif not isinstance(call, (str, unicode)):
# TODO just else?
result += self.eval_call(call) result += self.eval_call(call)
return set(result) return set(result)

View File

@@ -23,6 +23,11 @@ from jedi import common
from jedi.parser import representation as pr from jedi.parser import representation as pr
from jedi.parser import tokenize from jedi.parser import tokenize
OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or'
# Not used yet. In the future I intend to add something like KeywordStatement
STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \
'return', 'yield', 'pass', 'continue', 'break'
class Parser(object): class Parser(object):
""" """
@@ -317,10 +322,12 @@ class Parser(object):
or tok.string in breaks and level <= 0): or tok.string in breaks and level <= 0):
try: try:
# print 'parse_stmt', tok, tokenize.tok_name[token_type] # print 'parse_stmt', tok, tokenize.tok_name[token_type]
if tok.type == tokenize.OP: is_kw = tok.string in OPERATOR_KEYWORDS
if tok.type == tokenize.OP or is_kw:
tok_list.append(pr.Operator(tok.string, tok.start_pos)) tok_list.append(pr.Operator(tok.string, tok.start_pos))
else: else:
tok_list.append(tok) tok_list.append(tok)
if tok.string == 'as': if tok.string == 'as':
tok = next(self._gen) tok = next(self._gen)
if tok.type == tokenize.NAME: if tok.type == tokenize.NAME:
@@ -330,11 +337,9 @@ class Parser(object):
as_names.append(n) as_names.append(n)
tok_list.append(n) tok_list.append(n)
continue continue
elif tok.string in ['lambda', 'for', 'in']: elif tok.string == 'lambda':
# don't parse these keywords, parse later in stmt. breaks.discard(':')
if tok.string == 'lambda': elif tok.type == tokenize.NAME and not is_kw:
breaks.discard(':')
elif tok.type == tokenize.NAME:
n, tok = self._parse_dot_name(self._gen.current) n, tok = self._parse_dot_name(self._gen.current)
# removed last entry, because we add Name # removed last entry, because we add Name
tok_list.pop() tok_list.pop()
@@ -356,7 +361,7 @@ class Parser(object):
first_tok = tok_list[0] first_tok = tok_list[0]
# docstrings # docstrings
if len(tok_list) == 1 and not isinstance(first_tok, pr.Name) \ if len(tok_list) == 1 and isinstance(first_tok, tokenize.Token) \
and first_tok.type == tokenize.STRING: and first_tok.type == tokenize.STRING:
# Normal docstring check # Normal docstring check
if self.freshscope and not self.no_docstr: if self.freshscope and not self.no_docstr:
@@ -592,7 +597,7 @@ class Parser(object):
self._scope.add_statement(stmt) self._scope.add_statement(stmt)
self.freshscope = False self.freshscope = False
else: else:
if token_type not in [tokenize.COMMENT, tokenize.NEWLINE]: if token_type not in [tokenize.COMMENT, tokenize.NEWLINE, tokenize.ENDMARKER]:
debug.warning('Token not used: %s %s %s', tok_str, debug.warning('Token not used: %s %s %s', tok_str,
tokenize.tok_name[token_type], first_pos) tokenize.tok_name[token_type], first_pos)
continue continue

View File

@@ -958,9 +958,7 @@ isinstance(c, tokenize.Token) else unicode(c)
# always dictionaries and not sets. # always dictionaries and not sets.
arr.type = Array.DICT arr.type = Array.DICT
c = token_iterator.current[1] arr.end_pos = token_iterator.current[1].end_pos
arr.end_pos = c.end_pos if isinstance(c, Simple) \
else c.end_pos
return arr, break_tok return arr, break_tok
def parse_stmt(token_iterator, maybe_dict=False, added_breaks=(), def parse_stmt(token_iterator, maybe_dict=False, added_breaks=(),
@@ -981,6 +979,19 @@ isinstance(c, tokenize.Token) else unicode(c)
if isinstance(tok, ListComprehension): if isinstance(tok, ListComprehension):
# it's not possible to set it earlier # it's not possible to set it earlier
tok.parent = self tok.parent = self
elif tok == 'lambda':
lambd, tok = parse_lambda(token_iterator)
if lambd is not None:
token_list.append(lambd)
elif tok == 'for':
list_comp, tok = parse_list_comp(
token_iterator,
token_list,
start_pos,
tok.end_pos
)
if list_comp is not None:
token_list = [list_comp]
if tok in closing_brackets: if tok in closing_brackets:
level -= 1 level -= 1
@@ -1006,19 +1017,6 @@ isinstance(c, tokenize.Token) else unicode(c)
first = False first = False
start_pos = start_tok_pos start_pos = start_tok_pos
if tok == 'lambda':
lambd, tok = parse_lambda(token_iterator)
if lambd is not None:
token_list.append(lambd)
elif tok == 'for':
list_comp, tok = parse_list_comp(
token_iterator,
token_list,
start_pos,
last_end_pos
)
if list_comp is not None:
token_list = [list_comp]
token_list.append(tok_temp) token_list.append(tok_temp)
if not token_list: if not token_list:
@@ -1045,6 +1043,8 @@ isinstance(c, tokenize.Token) else unicode(c)
params.append(param) params.append(param)
if tok == ':': if tok == ':':
break break
# TODO uncomment and run `./run.py func 395 --debug` shouldn't parse all statements.
#print tok, tok.start_pos
if tok != ':': if tok != ':':
return None, tok return None, tok
@@ -1052,6 +1052,7 @@ isinstance(c, tokenize.Token) else unicode(c)
parent = self.get_parent_until(IsScope) parent = self.get_parent_until(IsScope)
lambd = Lambda(self._sub_module, params, start_pos, parent) lambd = Lambda(self._sub_module, params, start_pos, parent)
ret, tok = parse_stmt(token_iterator) ret, tok = parse_stmt(token_iterator)
if ret is not None: if ret is not None:
ret.parent = lambd ret.parent = lambd
@@ -1060,9 +1061,8 @@ isinstance(c, tokenize.Token) else unicode(c)
return lambd, tok return lambd, tok
def parse_list_comp(token_iterator, token_list, start_pos, end_pos): def parse_list_comp(token_iterator, token_list, start_pos, end_pos):
def parse_stmt_or_arr( def parse_stmt_or_arr(token_iterator, added_breaks=(),
token_iterator, added_breaks=(), names_are_set_vars=False names_are_set_vars=False):
):
stmt, tok = parse_stmt(token_iterator, stmt, tok = parse_stmt(token_iterator,
added_breaks=added_breaks) added_breaks=added_breaks)
if not stmt: if not stmt:
@@ -1091,7 +1091,7 @@ isinstance(c, tokenize.Token) else unicode(c)
middle, tok = parse_stmt_or_arr(token_iterator, ['in'], True) middle, tok = parse_stmt_or_arr(token_iterator, ['in'], True)
if tok != 'in' or middle is None: if tok != 'in' or middle is None:
debug.warning('list comprehension middle @%s', start_pos) debug.warning('list comprehension middle %s@%s', tok, start_pos)
return None, tok return None, tok
in_clause, tok = parse_stmt_or_arr(token_iterator) in_clause, tok = parse_stmt_or_arr(token_iterator)
@@ -1534,5 +1534,9 @@ class Operator(Base):
"""Make comparisons easy. Improves the readability of the parser.""" """Make comparisons easy. Improves the readability of the parser."""
return self.operator == other return self.operator == other
def __ne__(self, other):
"""Python 2 compatibility."""
return self.operator != other
def __hash__(self): def __hash__(self):
return hash(self.operator) return hash(self.operator)