1
0
forked from VimPlug/jedi

more change to simplify the statement parser

This commit is contained in:
Dave Halter
2014-02-27 16:58:08 +01:00
parent 8688def619
commit 1eba63760e
3 changed files with 14 additions and 11 deletions

View File

@@ -73,6 +73,7 @@ import itertools
from jedi._compatibility import next, hasattr, unicode
from jedi import common
from jedi.parser import representation as pr
from jedi.parser.tokenize import Token
from jedi import debug
from jedi.evaluate import representation as er
from jedi.evaluate import imports
@@ -182,7 +183,7 @@ class Evaluator(object):
except StopIteration:
break
continue
elif not isinstance(call, unicode):
elif not isinstance(call, Token):
result += self.eval_call(call)
return set(result)

View File

@@ -13,6 +13,7 @@ import copy
from jedi._compatibility import use_metaclass
from jedi.parser import representation as pr
from jedi.parser.tokenize import Token
from jedi import debug
from jedi import common
from jedi.evaluate.cache import memoize_default, CachedMetaClass
@@ -218,7 +219,7 @@ class InstanceElement(use_metaclass(CachedMetaClass, pr.Base)):
def expression_list(self):
# Copy and modify the array.
return [InstanceElement(self.instance._evaluator, self.instance, command, self.is_class_var)
if not isinstance(command, pr.Operator) else command
if not isinstance(command, (pr.Operator, Token)) else command
for command in self.var.expression_list()]
def __iter__(self):

View File

@@ -1100,7 +1100,13 @@ isinstance(c, (tokenize.Token, Operator)) else unicode(c)
token_iterator = common.PushBackIterator(enumerate(self.token_list))
for i, tok in token_iterator:
if isinstance(tok, Base):
if isinstance(tok, tokenize.Token):
token_type = tok.type
tok_str = tok.string
if tok_str == 'as': # just ignore as, because it sets values
next(token_iterator, None)
continue
else:
# the token is a Name, which has already been parsed
tok_str = tok
token_type = None
@@ -1112,12 +1118,6 @@ isinstance(c, (tokenize.Token, Operator)) else unicode(c)
result = []
is_chain = False
continue
else:
token_type = tok.type
tok_str = tok.string
if tok_str == 'as': # just ignore as, because it sets values
next(token_iterator, None)
continue
if tok_str == 'lambda':
lambd, tok_str = parse_lambda(token_iterator)
@@ -1180,8 +1180,9 @@ isinstance(c, (tokenize.Token, Operator)) else unicode(c)
result = []
is_chain = False
else:
if token_type != tokenize.COMMENT:
result.append(tok_str)
# comments, strange tokens (like */**), error tokens to
# reproduce the string correctly.
result.append(tok)
return result