forked from VimPlug/jedi
Added a grammar param to the parser.
This commit is contained in:
@@ -15,7 +15,7 @@ import sys
|
||||
from itertools import chain
|
||||
|
||||
from jedi._compatibility import next, unicode, builtins
|
||||
from jedi.parser import Parser
|
||||
from jedi.parser import Parser, load_grammar
|
||||
from jedi.parser.tokenize import source_tokens
|
||||
from jedi.parser import tree as pr
|
||||
from jedi.parser.user_context import UserContext, UserContextParser
|
||||
@@ -100,9 +100,11 @@ class Script(object):
|
||||
|
||||
cache.clear_time_caches()
|
||||
debug.reset_time()
|
||||
self._grammar = load_grammar('grammar3.4')
|
||||
self._user_context = UserContext(self.source, self._pos)
|
||||
self._parser = UserContextParser(self.source, path, self._pos, self._user_context)
|
||||
self._evaluator = Evaluator()
|
||||
self._parser = UserContextParser(self._grammar, self.source, path,
|
||||
self._pos, self._user_context)
|
||||
self._evaluator = Evaluator(self._grammar)
|
||||
debug.speed('init')
|
||||
|
||||
@property
|
||||
@@ -277,7 +279,7 @@ class Script(object):
|
||||
|
||||
def _get_under_cursor_stmt(self, cursor_txt):
|
||||
tokenizer = source_tokens(cursor_txt, line_offset=self._pos[0] - 1)
|
||||
r = Parser(cursor_txt, no_docstr=True, tokenizer=tokenizer)
|
||||
r = Parser(self._grammar, cursor_txt, tokenizer=tokenizer)
|
||||
try:
|
||||
# Take the last statement available.
|
||||
stmt = r.module.statements[-1]
|
||||
@@ -674,11 +676,10 @@ def defined_names(source, path=None, encoding='utf-8'):
|
||||
|
||||
:rtype: list of classes.Definition
|
||||
"""
|
||||
parser = Parser(
|
||||
common.source_to_unicode(source, encoding),
|
||||
module_path=path,
|
||||
)
|
||||
return classes.defined_names(Evaluator(), parser.module)
|
||||
grammar = load_grammar('grammar3.4')
|
||||
parser = Parser(grammar, common.source_to_unicode(source, encoding),
|
||||
module_path=path)
|
||||
return classes.defined_names(Evaluator(grammar), parser.module)
|
||||
|
||||
|
||||
def names(source=None, path=None, encoding='utf-8', all_scopes=False,
|
||||
|
||||
@@ -60,7 +60,7 @@ def usages(evaluator, definition_names, mods):
|
||||
compare_definitions = compare_array(definition_names)
|
||||
mods |= set([d.get_parent_until() for d in definition_names])
|
||||
definitions = []
|
||||
for m in imports.get_modules_containing_name(mods, search_name):
|
||||
for m in imports.get_modules_containing_name(evaluator, mods, search_name):
|
||||
try:
|
||||
check_names = m.used_names[search_name]
|
||||
except KeyError:
|
||||
|
||||
@@ -91,7 +91,8 @@ from jedi.evaluate.helpers import FakeStatement, deep_ast_copy, call_of_name
|
||||
|
||||
|
||||
class Evaluator(object):
|
||||
def __init__(self):
|
||||
def __init__(self, grammar):
|
||||
self._grammar = grammar
|
||||
self.memoize_cache = {} # for memoize decorators
|
||||
self.import_cache = {} # like `sys.modules`.
|
||||
self.compiled_cache = {} # see `compiled.create()`
|
||||
|
||||
@@ -8,8 +8,7 @@ import os
|
||||
import inspect
|
||||
|
||||
from jedi._compatibility import is_py3, builtins, unicode
|
||||
from jedi.parser import Parser
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser import Parser, load_grammar
|
||||
from jedi.parser.tree import Class
|
||||
from jedi.evaluate.helpers import FakeName
|
||||
|
||||
@@ -31,7 +30,8 @@ def _load_faked_module(module):
|
||||
except IOError:
|
||||
modules[module_name] = None
|
||||
return
|
||||
module = Parser(unicode(source), module_name).module
|
||||
grammar = load_grammar('grammar3.4')
|
||||
module = Parser(grammar, unicode(source), module_name).module
|
||||
modules[module_name] = module
|
||||
|
||||
if module_name == 'builtins' and not is_py3:
|
||||
|
||||
@@ -127,7 +127,7 @@ def _evaluate_for_statement_string(evaluator, string, module):
|
||||
# (e.g., 'threading' in 'threading.Thread').
|
||||
string = 'import %s\n' % element + string
|
||||
|
||||
p = Parser(code % indent_block(string), no_docstr=True)
|
||||
p = Parser(evaluator.grammar, code % indent_block(string), no_docstr=True)
|
||||
pseudo_cls = p.module.subscopes[0]
|
||||
try:
|
||||
stmt = pseudo_cls.statements[-1]
|
||||
|
||||
@@ -174,7 +174,7 @@ def search_params(evaluator, param):
|
||||
try:
|
||||
result = []
|
||||
# This is like backtracking: Get the first possible result.
|
||||
for mod in imports.get_modules_containing_name([current_module], func_name):
|
||||
for mod in imports.get_modules_containing_name(evaluator, [current_module], func_name):
|
||||
result = get_params_for_module(mod)
|
||||
if result:
|
||||
break
|
||||
|
||||
@@ -492,8 +492,8 @@ def get_names_of_scope(evaluator, scope, position=None, star_search=True, includ
|
||||
the current scope is function:
|
||||
|
||||
>>> from jedi._compatibility import u
|
||||
>>> from jedi.parser import Parser
|
||||
>>> parser = Parser(u('''
|
||||
>>> from jedi.parser import Parser, load_grammar
|
||||
>>> parser = Parser(load_grammar('python3.4'), u('''
|
||||
... x = ['a', 'b', 'c']
|
||||
... def func():
|
||||
... y = None
|
||||
|
||||
@@ -184,7 +184,7 @@ class ImportWrapper2(pr.Base):
|
||||
rel_path = os.path.join(self._importer.get_relative_path(),
|
||||
'__init__.py')
|
||||
if os.path.exists(rel_path):
|
||||
m = _load_module(rel_path)
|
||||
m = _load_module(self.evaluator, rel_path)
|
||||
names += m.get_defined_names()
|
||||
else:
|
||||
# flask
|
||||
@@ -590,9 +590,9 @@ class _Importer(object):
|
||||
else:
|
||||
source = current_namespace[0].read()
|
||||
current_namespace[0].close()
|
||||
return _load_module(path, source, sys_path=sys_path), rest
|
||||
return _load_module(self.evaluator, path, source, sys_path=sys_path), rest
|
||||
else:
|
||||
return _load_module(name=path, sys_path=sys_path), rest
|
||||
return _load_module(self.evaluator, name=path, sys_path=sys_path), rest
|
||||
|
||||
|
||||
def follow_imports(evaluator, scopes):
|
||||
@@ -633,7 +633,7 @@ def remove_star_imports(evaluator, scope, ignored_modules=()):
|
||||
return set(modules)
|
||||
|
||||
|
||||
def _load_module(path=None, source=None, name=None, sys_path=None):
|
||||
def _load_module(evaluator, path=None, source=None, name=None, sys_path=None):
|
||||
def load(source):
|
||||
dotted_path = path and compiled.dotted_from_fs_path(path, sys_path)
|
||||
if path is not None and path.endswith('.py') \
|
||||
@@ -644,7 +644,7 @@ def _load_module(path=None, source=None, name=None, sys_path=None):
|
||||
else:
|
||||
return compiled.load_module(path, name)
|
||||
p = path or name
|
||||
p = fast.FastParser(common.source_to_unicode(source), p)
|
||||
p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p)
|
||||
cache.save_parser(path, name, p)
|
||||
return p.module
|
||||
|
||||
@@ -652,7 +652,7 @@ def _load_module(path=None, source=None, name=None, sys_path=None):
|
||||
return load(source) if cached is None else cached.module
|
||||
|
||||
|
||||
def get_modules_containing_name(mods, name):
|
||||
def get_modules_containing_name(evaluator, mods, name):
|
||||
"""
|
||||
Search a name in the directories of modules.
|
||||
"""
|
||||
@@ -669,7 +669,7 @@ def get_modules_containing_name(mods, name):
|
||||
with open(path, 'rb') as f:
|
||||
source = source_to_unicode(f.read())
|
||||
if name in source:
|
||||
return _load_module(path, source)
|
||||
return _load_module(evaluator, path, source)
|
||||
|
||||
# skip non python modules
|
||||
mods = set(m for m in mods if not isinstance(m, compiled.CompiledObject))
|
||||
|
||||
@@ -227,7 +227,7 @@ def collections_namedtuple(evaluator, obj, params):
|
||||
)
|
||||
|
||||
# Parse source
|
||||
generated_class = Parser(unicode(source)).module.subscopes[0]
|
||||
generated_class = Parser(evaluator.grammar, unicode(source)).module.subscopes[0]
|
||||
return [er.Class(evaluator, generated_class)]
|
||||
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ def sys_path_with_modifications(evaluator, module):
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
p = Parser(common.source_to_unicode(source), module_path)
|
||||
p = Parser(evaluator.grammar, common.source_to_unicode(source), module_path)
|
||||
for path in _check_module(p.module):
|
||||
if path not in buildout_paths:
|
||||
buildout_paths.add(path)
|
||||
|
||||
@@ -15,16 +15,14 @@ within the statement. This lowers memory usage and cpu time and reduces the
|
||||
complexity of the ``Parser`` (there's another parser sitting inside
|
||||
``Statement``, which produces ``Array`` and ``Call``).
|
||||
"""
|
||||
import keyword
|
||||
import logging
|
||||
import os
|
||||
|
||||
from jedi._compatibility import next, unicode
|
||||
from jedi import debug
|
||||
from jedi._compatibility import next
|
||||
from jedi import common
|
||||
from jedi.parser import tree as pt
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser import pytree
|
||||
from jedi.parser.pgen2 import Driver
|
||||
from jedi.parser import pgen2
|
||||
|
||||
OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or'
|
||||
@@ -33,20 +31,33 @@ STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \
|
||||
'return', 'yield', 'pass', 'continue', 'break'
|
||||
|
||||
|
||||
_loaded_grammars = {}
|
||||
|
||||
|
||||
def load_grammar(file):
|
||||
global _loaded_grammars
|
||||
path = os.path.join(os.path.dirname(__file__), file) + '.txt'
|
||||
try:
|
||||
return _loaded_grammars[path]
|
||||
except KeyError:
|
||||
return _loaded_grammars.setdefault(path, pgen2.load_grammar(path))
|
||||
|
||||
|
||||
class Parser(object):
|
||||
"""
|
||||
This class is used to parse a Python file, it then divides them into a
|
||||
class structure of different scopes.
|
||||
|
||||
:param source: The codebase for the parser.
|
||||
:type source: str
|
||||
:param grammar: The grammar object of pgen2. Loaded by load_grammar.
|
||||
:param source: The codebase for the parser. Must be unicode.
|
||||
:param module_path: The path of the module in the file system, may be None.
|
||||
:type module_path: str
|
||||
:param no_docstr: If True, a string at the beginning is not a docstr.
|
||||
:param top_module: Use this module as a parent instead of `self.module`.
|
||||
"""
|
||||
def __init__(self, source, module_path=None, no_docstr=False,
|
||||
tokenizer=None, top_module=None):
|
||||
def __init__(self, grammar, source, module_path=None, tokenizer=None):
|
||||
"""
|
||||
This is the way I imagine a parser describing the init function
|
||||
"""
|
||||
|
||||
if not source.endswith('\n'):
|
||||
source += '\n'
|
||||
@@ -90,8 +101,8 @@ class Parser(object):
|
||||
self.used_names = {}
|
||||
self.scope_names_stack = [{}]
|
||||
logger = logging.getLogger("Jedi-Parser")
|
||||
d = Driver(pytree.python_grammar, self.convert_node, self.convert_leaf,
|
||||
self.error_recovery, logger=logger)
|
||||
d = pgen2.Driver(grammar, self.convert_node,
|
||||
self.convert_leaf, self.error_recovery, logger=logger)
|
||||
self.module = d.parse_string(source).get_parent_until()
|
||||
|
||||
self.module.used_names = self.used_names
|
||||
|
||||
@@ -56,13 +56,13 @@ class Module(pr.Module, pr.Simple):
|
||||
|
||||
class CachedFastParser(type):
|
||||
""" This is a metaclass for caching `FastParser`. """
|
||||
def __call__(self, source, module_path=None):
|
||||
def __call__(self, grammar, source, module_path=None):
|
||||
if not settings.fast_parser:
|
||||
return Parser(source, module_path)
|
||||
return Parser(grammar, source, module_path)
|
||||
|
||||
pi = cache.parser_cache.get(module_path, None)
|
||||
if pi is None or isinstance(pi.parser, Parser):
|
||||
p = super(CachedFastParser, self).__call__(source, module_path)
|
||||
p = super(CachedFastParser, self).__call__(grammar, source, module_path)
|
||||
else:
|
||||
p = pi.parser # pi is a `cache.ParserCacheItem`
|
||||
p.update(source)
|
||||
@@ -186,8 +186,9 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
|
||||
_keyword_re = re.compile('^[ \t]*(def|class|@|%s)' % '|'.join(tokenize.FLOWS))
|
||||
|
||||
def __init__(self, code, module_path=None):
|
||||
def __init__(self, grammar, code, module_path=None):
|
||||
# set values like `pr.Module`.
|
||||
self._grammar = grammar
|
||||
self.module_path = module_path
|
||||
|
||||
self.current_node = None
|
||||
|
||||
@@ -18,6 +18,7 @@ from . import pgen2
|
||||
|
||||
# The grammar file
|
||||
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "grammar3.4.txt")
|
||||
python_grammar = pgen2.load_grammar(_GRAMMAR_FILE)
|
||||
|
||||
|
||||
class Symbols(object):
|
||||
|
||||
@@ -202,7 +202,8 @@ class UserContext(object):
|
||||
|
||||
|
||||
class UserContextParser(object):
|
||||
def __init__(self, source, path, position, user_context):
|
||||
def __init__(self, grammar, source, path, position, user_context):
|
||||
self._grammar = grammar
|
||||
self._source = source
|
||||
self._path = path and os.path.abspath(path)
|
||||
self._position = position
|
||||
@@ -211,7 +212,7 @@ class UserContextParser(object):
|
||||
@cache.underscore_memoization
|
||||
def _parser(self):
|
||||
cache.invalidate_star_import_cache(self._path)
|
||||
parser = FastParser(self._source, self._path)
|
||||
parser = FastParser(self._grammar, self._source, self._path)
|
||||
# Don't pickle that module, because the main module is changing quickly
|
||||
cache.save_parser(self._path, None, parser, pickling=False)
|
||||
return parser
|
||||
|
||||
Reference in New Issue
Block a user