forked from VimPlug/jedi
remove NoErrorTokenizer dependency for all but the fast parsers
This commit is contained in:
@@ -381,7 +381,7 @@ def get_names_of_scope(evaluator, scope, position=None, star_search=True, includ
|
|||||||
... '''))
|
... '''))
|
||||||
>>> scope = parser.module.subscopes[0]
|
>>> scope = parser.module.subscopes[0]
|
||||||
>>> scope
|
>>> scope
|
||||||
<Function: func@3-4>
|
<Function: func@3-5>
|
||||||
|
|
||||||
`get_names_of_scope` is a generator. First it yields names from most inner
|
`get_names_of_scope` is a generator. First it yields names from most inner
|
||||||
scope.
|
scope.
|
||||||
@@ -389,13 +389,13 @@ def get_names_of_scope(evaluator, scope, position=None, star_search=True, includ
|
|||||||
>>> from jedi.evaluate import Evaluator
|
>>> from jedi.evaluate import Evaluator
|
||||||
>>> pairs = list(get_names_of_scope(Evaluator(), scope))
|
>>> pairs = list(get_names_of_scope(Evaluator(), scope))
|
||||||
>>> pairs[0]
|
>>> pairs[0]
|
||||||
(<Function: func@3-4>, [<Name: y@4,4>])
|
(<Function: func@3-5>, [<Name: y@4,4>])
|
||||||
|
|
||||||
Then it yield the names from one level outer scope. For this example, this
|
Then it yield the names from one level outer scope. For this example, this
|
||||||
is the most outer scope.
|
is the most outer scope.
|
||||||
|
|
||||||
>>> pairs[1]
|
>>> pairs[1]
|
||||||
(<SubModule: None@1-4>, [<Name: x@2,0>, <Name: func@3,4>])
|
(<SubModule: None@1-5>, [<Name: x@2,0>, <Name: func@3,4>])
|
||||||
|
|
||||||
Finally, it yields names from builtin, if `include_builtin` is
|
Finally, it yields names from builtin, if `include_builtin` is
|
||||||
true (default).
|
true (default).
|
||||||
|
|||||||
@@ -47,8 +47,7 @@ class Parser(object):
|
|||||||
self._scope = self.module
|
self._scope = self.module
|
||||||
self._current = (None, None)
|
self._current = (None, None)
|
||||||
|
|
||||||
tokenizer = tokenizer or tokenize.NoErrorTokenizer(source)
|
tokenizer = tokenizer or tokenize.source_tokens(source)
|
||||||
tokenizer = tokenize.NoErrorTokenizer(source, offset, is_fast)
|
|
||||||
self._gen = PushBackTokenizer(tokenizer)
|
self._gen = PushBackTokenizer(tokenizer)
|
||||||
self._top_module = top_module or self.module
|
self._top_module = top_module or self.module
|
||||||
try:
|
try:
|
||||||
@@ -59,6 +58,7 @@ class Parser(object):
|
|||||||
# sometimes StopIteration isn't catched. Just ignore it.
|
# sometimes StopIteration isn't catched. Just ignore it.
|
||||||
|
|
||||||
# on finish, set end_pos correctly
|
# on finish, set end_pos correctly
|
||||||
|
pass
|
||||||
s = self._scope
|
s = self._scope
|
||||||
while s is not None:
|
while s is not None:
|
||||||
s.end_pos = self.end_pos
|
s.end_pos = self.end_pos
|
||||||
@@ -71,6 +71,8 @@ class Parser(object):
|
|||||||
d.parent = self.module
|
d.parent = self.module
|
||||||
|
|
||||||
if self._current[0] in (tokenize.NEWLINE,):
|
if self._current[0] in (tokenize.NEWLINE,):
|
||||||
|
# This case is only relevant with the FastTokenizer, because
|
||||||
|
# otherwise there's always an EndMarker.
|
||||||
# we added a newline before, so we need to "remove" it again.
|
# we added a newline before, so we need to "remove" it again.
|
||||||
self.end_pos = self._gen.previous[2]
|
self.end_pos = self._gen.previous[2]
|
||||||
|
|
||||||
@@ -626,7 +628,7 @@ class PushBackTokenizer(object):
|
|||||||
def __init__(self, tokenizer):
|
def __init__(self, tokenizer):
|
||||||
self._tokenizer = tokenizer
|
self._tokenizer = tokenizer
|
||||||
self._push_backs = []
|
self._push_backs = []
|
||||||
self.current = [None, None, (0, 0), (0, 0), '']
|
self.current = tokenize.TokenInfo(None, None, (0, 0), (0, 0))
|
||||||
|
|
||||||
def push_last_back(self):
|
def push_last_back(self):
|
||||||
self._push_backs.append(self.current)
|
self._push_backs.append(self.current)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ from jedi._compatibility import u
|
|||||||
from jedi.parser import Parser
|
from jedi.parser import Parser
|
||||||
from jedi.parser.user_context import UserContextParser
|
from jedi.parser.user_context import UserContextParser
|
||||||
from jedi.parser import representation as pr
|
from jedi.parser import representation as pr
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
|
|
||||||
def test_user_statement_on_import():
|
def test_user_statement_on_import():
|
||||||
@@ -95,3 +96,15 @@ def test_module():
|
|||||||
assert str(name) == ''
|
assert str(name) == ''
|
||||||
assert name.start_pos == (0, 0)
|
assert name.start_pos == (0, 0)
|
||||||
assert name.end_pos == (0, 0)
|
assert name.end_pos == (0, 0)
|
||||||
|
|
||||||
|
|
||||||
|
def test_end_pos():
|
||||||
|
s = u(dedent('''
|
||||||
|
x = ['a', 'b', 'c']
|
||||||
|
def func():
|
||||||
|
y = None
|
||||||
|
'''))
|
||||||
|
parser = Parser(s)
|
||||||
|
scope = parser.module.subscopes[0]
|
||||||
|
assert scope.start_pos == (3, 0)
|
||||||
|
assert scope.end_pos == (5, 0)
|
||||||
|
|||||||
Reference in New Issue
Block a user