mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-09 23:34:45 +08:00
Remove all usages of start_parsing=True in the fast parser.
This commit is contained in:
@@ -5,8 +5,7 @@ Used only for REPL Completion.
|
|||||||
import inspect
|
import inspect
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from jedi import common
|
from jedi.parser.python import parse
|
||||||
from jedi.parser.python.diff import FastParser
|
|
||||||
from jedi.evaluate import compiled
|
from jedi.evaluate import compiled
|
||||||
from jedi.cache import underscore_memoization
|
from jedi.cache import underscore_memoization
|
||||||
from jedi.evaluate import imports
|
from jedi.evaluate import imports
|
||||||
@@ -115,15 +114,13 @@ class MixedObjectFilter(compiled.CompiledObjectFilter):
|
|||||||
#return MixedName(self._evaluator, self._compiled_object, name)
|
#return MixedName(self._evaluator, self._compiled_object, name)
|
||||||
|
|
||||||
|
|
||||||
def parse(grammar, path):
|
|
||||||
with open(path) as f:
|
|
||||||
source = f.read()
|
|
||||||
source = common.source_to_unicode(source)
|
|
||||||
return FastParser(grammar, source, path)
|
|
||||||
|
|
||||||
|
|
||||||
def _load_module(evaluator, path, python_object):
|
def _load_module(evaluator, path, python_object):
|
||||||
module = parse(evaluator.grammar, path).get_root_node()
|
module = parse(
|
||||||
|
grammar=evaluator.grammar,
|
||||||
|
path=path,
|
||||||
|
cache=True,
|
||||||
|
diff_cache=True
|
||||||
|
).get_root_node()
|
||||||
python_module = inspect.getmodule(python_object)
|
python_module = inspect.getmodule(python_object)
|
||||||
|
|
||||||
evaluator.modules[python_module.__name__] = module
|
evaluator.modules[python_module.__name__] = module
|
||||||
|
|||||||
@@ -51,6 +51,7 @@ class Parser(BaseParser):
|
|||||||
source += '\n'
|
source += '\n'
|
||||||
self._added_newline = True
|
self._added_newline = True
|
||||||
|
|
||||||
|
self.new_code = source
|
||||||
if start_parsing:
|
if start_parsing:
|
||||||
if tokens is None:
|
if tokens is None:
|
||||||
tokens = tokenize.source_tokens(source, use_exact_op_types=True)
|
tokens = tokenize.source_tokens(source, use_exact_op_types=True)
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ from jedi.evaluate.sys_path import (_get_parent_dir_with_file,
|
|||||||
from jedi.evaluate import Evaluator
|
from jedi.evaluate import Evaluator
|
||||||
from jedi.evaluate.representation import ModuleContext
|
from jedi.evaluate.representation import ModuleContext
|
||||||
from jedi.parser.python import parse, load_grammar
|
from jedi.parser.python import parse, load_grammar
|
||||||
from jedi.parser.python.parser import ParserWithRecovery
|
|
||||||
|
|
||||||
from ..helpers import cwd_at
|
from ..helpers import cwd_at
|
||||||
|
|
||||||
@@ -62,14 +61,14 @@ def test_path_from_invalid_sys_path_assignment():
|
|||||||
|
|
||||||
@cwd_at('test/test_evaluate/buildout_project/src/proj_name/')
|
@cwd_at('test/test_evaluate/buildout_project/src/proj_name/')
|
||||||
def test_sys_path_with_modifications():
|
def test_sys_path_with_modifications():
|
||||||
code = dedent(u("""
|
code = dedent("""
|
||||||
import os
|
import os
|
||||||
"""))
|
""")
|
||||||
|
|
||||||
path = os.path.abspath(os.path.join(os.curdir, 'module_name.py'))
|
path = os.path.abspath(os.path.join(os.curdir, 'module_name.py'))
|
||||||
grammar = load_grammar()
|
grammar = load_grammar()
|
||||||
p = ParserWithRecovery(grammar, code, module_path=path)
|
module_node = parse(code, path=path)
|
||||||
module_context = ModuleContext(Evaluator(grammar), p.get_root_node())
|
module_context = ModuleContext(Evaluator(grammar), module_node)
|
||||||
paths = sys_path_with_modifications(module_context.evaluator, module_context)
|
paths = sys_path_with_modifications(module_context.evaluator, module_context)
|
||||||
assert '/tmp/.buildout/eggs/important_package.egg' in paths
|
assert '/tmp/.buildout/eggs/important_package.egg' in paths
|
||||||
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from jedi import cache
|
|||||||
from jedi.parser.python import load_grammar
|
from jedi.parser.python import load_grammar
|
||||||
from jedi.parser.python.diff import DiffParser
|
from jedi.parser.python.diff import DiffParser
|
||||||
from jedi.parser import ParserWithRecovery
|
from jedi.parser import ParserWithRecovery
|
||||||
|
from jedi.parser.tokenize import source_tokens
|
||||||
|
|
||||||
|
|
||||||
def _check_error_leaves_nodes(node):
|
def _check_error_leaves_nodes(node):
|
||||||
@@ -41,11 +42,12 @@ def _assert_valid_graph(node):
|
|||||||
|
|
||||||
|
|
||||||
class Differ(object):
|
class Differ(object):
|
||||||
def initialize(self, source):
|
def initialize(self, code):
|
||||||
debug.dbg('differ: initialize', color='YELLOW')
|
debug.dbg('differ: initialize', color='YELLOW')
|
||||||
grammar = load_grammar()
|
grammar = load_grammar()
|
||||||
self.parser = ParserWithRecovery(grammar, source)
|
self.parser = ParserWithRecovery(grammar, code, start_parsing=False)
|
||||||
return self.parser.get_root_node()
|
tokens = source_tokens(self.parser.new_code, use_exact_op_types=True)
|
||||||
|
return self.parser.parse(tokens)
|
||||||
|
|
||||||
def parse(self, source, copies=0, parsers=0, expect_error_leaves=False):
|
def parse(self, source, copies=0, parsers=0, expect_error_leaves=False):
|
||||||
debug.dbg('differ: parse copies=%s parsers=%s', copies, parsers, color='YELLOW')
|
debug.dbg('differ: parse copies=%s parsers=%s', copies, parsers, color='YELLOW')
|
||||||
|
|||||||
@@ -10,9 +10,7 @@ from textwrap import dedent
|
|||||||
|
|
||||||
import jedi
|
import jedi
|
||||||
from jedi._compatibility import u
|
from jedi._compatibility import u
|
||||||
from jedi.parser.python import load_grammar
|
from jedi.parser.python import parse
|
||||||
from jedi.parser.python.diff import FastParser
|
|
||||||
from jedi.parser.utils import save_parser
|
|
||||||
|
|
||||||
|
|
||||||
def test_carriage_return_splitting():
|
def test_carriage_return_splitting():
|
||||||
@@ -26,8 +24,7 @@ def test_carriage_return_splitting():
|
|||||||
pass
|
pass
|
||||||
'''))
|
'''))
|
||||||
source = source.replace('\n', '\r\n')
|
source = source.replace('\n', '\r\n')
|
||||||
p = FastParser(load_grammar(), source)
|
module = parse(source)
|
||||||
module = p.get_root_node()
|
|
||||||
assert [n.value for lst in module.used_names.values() for n in lst] == ['Foo']
|
assert [n.value for lst in module.used_names.values() for n in lst] == ['Foo']
|
||||||
|
|
||||||
|
|
||||||
@@ -46,12 +43,10 @@ def check_p(src, number_parsers_used, number_of_splits=None, number_of_misses=0)
|
|||||||
if number_of_splits is None:
|
if number_of_splits is None:
|
||||||
number_of_splits = number_parsers_used
|
number_of_splits = number_parsers_used
|
||||||
|
|
||||||
grammar = load_grammar()
|
module_node = parse(src)
|
||||||
p = FastParser(grammar, u(src))
|
|
||||||
save_parser(grammar, None, p, pickling=False)
|
|
||||||
|
|
||||||
assert src == p.get_root_node().get_code()
|
assert src == module_node.get_code()
|
||||||
return p.get_root_node()
|
return module_node
|
||||||
|
|
||||||
|
|
||||||
def test_if():
|
def test_if():
|
||||||
@@ -281,13 +276,12 @@ def test_decorator_string_issue():
|
|||||||
|
|
||||||
|
|
||||||
def test_round_trip():
|
def test_round_trip():
|
||||||
source = dedent('''
|
code = dedent('''
|
||||||
def x():
|
def x():
|
||||||
"""hahaha"""
|
"""hahaha"""
|
||||||
func''')
|
func''')
|
||||||
|
|
||||||
f = FastParser(load_grammar(), u(source))
|
assert parse(code).get_code() == code
|
||||||
assert f.get_root_node().get_code() == source
|
|
||||||
|
|
||||||
|
|
||||||
def test_parentheses_in_string():
|
def test_parentheses_in_string():
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ from textwrap import dedent
|
|||||||
|
|
||||||
import jedi
|
import jedi
|
||||||
from jedi._compatibility import u, is_py3
|
from jedi._compatibility import u, is_py3
|
||||||
from jedi.parser import ParserWithRecovery
|
|
||||||
from jedi.parser.python import parse, load_grammar
|
from jedi.parser.python import parse, load_grammar
|
||||||
from jedi.parser.python import tree
|
from jedi.parser.python import tree
|
||||||
|
|
||||||
@@ -90,13 +89,13 @@ class TestImports():
|
|||||||
|
|
||||||
|
|
||||||
def test_module():
|
def test_module():
|
||||||
module = ParserWithRecovery(load_grammar(), u('asdf'), 'example.py').get_root_node()
|
module = parse('asdf', path='example.py')
|
||||||
name = module.name
|
name = module.name
|
||||||
assert str(name) == 'example'
|
assert str(name) == 'example'
|
||||||
assert name.start_pos == (1, 0)
|
assert name.start_pos == (1, 0)
|
||||||
assert name.end_pos == (1, 7)
|
assert name.end_pos == (1, 7)
|
||||||
|
|
||||||
module = ParserWithRecovery(load_grammar(), u('asdf')).get_root_node()
|
module = parse('asdf')
|
||||||
name = module.name
|
name = module.name
|
||||||
assert str(name) == ''
|
assert str(name) == ''
|
||||||
assert name.start_pos == (1, 0)
|
assert name.start_pos == (1, 0)
|
||||||
@@ -190,7 +189,7 @@ def test_param_splitting():
|
|||||||
def check(src, result):
|
def check(src, result):
|
||||||
# Python 2 tuple params should be ignored for now.
|
# Python 2 tuple params should be ignored for now.
|
||||||
grammar = load_grammar('%s.%s' % sys.version_info[:2])
|
grammar = load_grammar('%s.%s' % sys.version_info[:2])
|
||||||
m = ParserWithRecovery(grammar, u(src)).get_root_node()
|
m = parse(src, grammar=grammar)
|
||||||
if is_py3:
|
if is_py3:
|
||||||
assert not m.subscopes
|
assert not m.subscopes
|
||||||
else:
|
else:
|
||||||
|
|||||||
Reference in New Issue
Block a user