1
0
forked from VimPlug/jedi

more problems fixed that relate to Operator

This commit is contained in:
Dave Halter
2014-02-26 22:08:51 +01:00
parent 2e12eb7861
commit 3330e29748
3 changed files with 43 additions and 35 deletions

View File

@@ -173,18 +173,17 @@ class Evaluator(object):
and isinstance(r.obj, (str, unicode))]:
# if it is an iterable, ignore * operations
next(calls_iterator)
elif not isinstance(call, (str, unicode)):
if isinstance(call, pr.Call) and str(call.name) == 'if':
elif call == 'if':
# Ternary operators.
while True:
for call in calls_iterator:
try:
call = next(calls_iterator)
if call == 'else':
break
except StopIteration:
break
with common.ignored(AttributeError):
if str(call.name) == 'else':
break
continue
elif not isinstance(call, (str, unicode)):
# TODO just else?
result += self.eval_call(call)
return set(result)

View File

@@ -23,6 +23,11 @@ from jedi import common
from jedi.parser import representation as pr
from jedi.parser import tokenize
OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or'
# Not used yet. In the future I intend to add something like KeywordStatement
STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \
'return', 'yield', 'pass', 'continue', 'break'
class Parser(object):
"""
@@ -317,10 +322,12 @@ class Parser(object):
or tok.string in breaks and level <= 0):
try:
# print 'parse_stmt', tok, tokenize.tok_name[token_type]
if tok.type == tokenize.OP:
is_kw = tok.string in OPERATOR_KEYWORDS
if tok.type == tokenize.OP or is_kw:
tok_list.append(pr.Operator(tok.string, tok.start_pos))
else:
tok_list.append(tok)
if tok.string == 'as':
tok = next(self._gen)
if tok.type == tokenize.NAME:
@@ -330,11 +337,9 @@ class Parser(object):
as_names.append(n)
tok_list.append(n)
continue
elif tok.string in ['lambda', 'for', 'in']:
# don't parse these keywords, parse later in stmt.
if tok.string == 'lambda':
breaks.discard(':')
elif tok.type == tokenize.NAME:
elif tok.string == 'lambda':
breaks.discard(':')
elif tok.type == tokenize.NAME and not is_kw:
n, tok = self._parse_dot_name(self._gen.current)
# removed last entry, because we add Name
tok_list.pop()
@@ -356,7 +361,7 @@ class Parser(object):
first_tok = tok_list[0]
# docstrings
if len(tok_list) == 1 and not isinstance(first_tok, pr.Name) \
if len(tok_list) == 1 and isinstance(first_tok, tokenize.Token) \
and first_tok.type == tokenize.STRING:
# Normal docstring check
if self.freshscope and not self.no_docstr:
@@ -592,7 +597,7 @@ class Parser(object):
self._scope.add_statement(stmt)
self.freshscope = False
else:
if token_type not in [tokenize.COMMENT, tokenize.NEWLINE]:
if token_type not in [tokenize.COMMENT, tokenize.NEWLINE, tokenize.ENDMARKER]:
debug.warning('Token not used: %s %s %s', tok_str,
tokenize.tok_name[token_type], first_pos)
continue

View File

@@ -958,9 +958,7 @@ isinstance(c, tokenize.Token) else unicode(c)
# always dictionaries and not sets.
arr.type = Array.DICT
c = token_iterator.current[1]
arr.end_pos = c.end_pos if isinstance(c, Simple) \
else c.end_pos
arr.end_pos = token_iterator.current[1].end_pos
return arr, break_tok
def parse_stmt(token_iterator, maybe_dict=False, added_breaks=(),
@@ -981,6 +979,19 @@ isinstance(c, tokenize.Token) else unicode(c)
if isinstance(tok, ListComprehension):
# it's not possible to set it earlier
tok.parent = self
elif tok == 'lambda':
lambd, tok = parse_lambda(token_iterator)
if lambd is not None:
token_list.append(lambd)
elif tok == 'for':
list_comp, tok = parse_list_comp(
token_iterator,
token_list,
start_pos,
tok.end_pos
)
if list_comp is not None:
token_list = [list_comp]
if tok in closing_brackets:
level -= 1
@@ -1006,19 +1017,6 @@ isinstance(c, tokenize.Token) else unicode(c)
first = False
start_pos = start_tok_pos
if tok == 'lambda':
lambd, tok = parse_lambda(token_iterator)
if lambd is not None:
token_list.append(lambd)
elif tok == 'for':
list_comp, tok = parse_list_comp(
token_iterator,
token_list,
start_pos,
last_end_pos
)
if list_comp is not None:
token_list = [list_comp]
token_list.append(tok_temp)
if not token_list:
@@ -1045,6 +1043,8 @@ isinstance(c, tokenize.Token) else unicode(c)
params.append(param)
if tok == ':':
break
# TODO uncomment and run `./run.py func 395 --debug` shouldn't parse all statements.
#print tok, tok.start_pos
if tok != ':':
return None, tok
@@ -1052,6 +1052,7 @@ isinstance(c, tokenize.Token) else unicode(c)
parent = self.get_parent_until(IsScope)
lambd = Lambda(self._sub_module, params, start_pos, parent)
ret, tok = parse_stmt(token_iterator)
if ret is not None:
ret.parent = lambd
@@ -1060,9 +1061,8 @@ isinstance(c, tokenize.Token) else unicode(c)
return lambd, tok
def parse_list_comp(token_iterator, token_list, start_pos, end_pos):
def parse_stmt_or_arr(
token_iterator, added_breaks=(), names_are_set_vars=False
):
def parse_stmt_or_arr(token_iterator, added_breaks=(),
names_are_set_vars=False):
stmt, tok = parse_stmt(token_iterator,
added_breaks=added_breaks)
if not stmt:
@@ -1091,7 +1091,7 @@ isinstance(c, tokenize.Token) else unicode(c)
middle, tok = parse_stmt_or_arr(token_iterator, ['in'], True)
if tok != 'in' or middle is None:
debug.warning('list comprehension middle @%s', start_pos)
debug.warning('list comprehension middle %s@%s', tok, start_pos)
return None, tok
in_clause, tok = parse_stmt_or_arr(token_iterator)
@@ -1534,5 +1534,9 @@ class Operator(Base):
"""Make comparisons easy. Improves the readability of the parser."""
return self.operator == other
def __ne__(self, other):
"""Python 2 compatibility."""
return self.operator != other
def __hash__(self):
return hash(self.operator)