forked from VimPlug/jedi
Compare commits
138 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5427b02712 | ||
|
|
aa2dfa9446 | ||
|
|
3cc97f4b73 | ||
|
|
536ad4c5f1 | ||
|
|
54242049d2 | ||
|
|
eb37f82411 | ||
|
|
4b841370e4 | ||
|
|
fe5eaaf56c | ||
|
|
fb8ed61b87 | ||
|
|
0117f83809 | ||
|
|
e660a5a703 | ||
|
|
947d91f792 | ||
|
|
d41e036427 | ||
|
|
632072000e | ||
|
|
47c1b8fa07 | ||
|
|
9f1dda04c0 | ||
|
|
7ecaf19b59 | ||
|
|
3a6d815e9e | ||
|
|
ed8370fa68 | ||
|
|
bd779655ae | ||
|
|
d6d1a39bf2 | ||
|
|
4cc467123c | ||
|
|
1624f6945e | ||
|
|
3e36238da3 | ||
|
|
281d6a87a0 | ||
|
|
1fd10d978d | ||
|
|
a6829ca546 | ||
|
|
b708b7f07d | ||
|
|
b15aa197fd | ||
|
|
1bb0c89f46 | ||
|
|
a687910368 | ||
|
|
d2d165267d | ||
|
|
7a51dbea08 | ||
|
|
f6f2765ab9 | ||
|
|
36b2fce030 | ||
|
|
7e45ee3096 | ||
|
|
35fd1c70bd | ||
|
|
db364bc44d | ||
|
|
54d69fb9f4 | ||
|
|
8059c3c2c8 | ||
|
|
932703f04a | ||
|
|
ee47be0140 | ||
|
|
1d0796ac07 | ||
|
|
6a9c2f8795 | ||
|
|
bb9ea54402 | ||
|
|
8a35a04439 | ||
|
|
b60ec024fa | ||
|
|
63cafeaa87 | ||
|
|
3d27d06781 | ||
|
|
5c54650216 | ||
|
|
aff0cbd68c | ||
|
|
fb8ffde32e | ||
|
|
7874026ee5 | ||
|
|
ac0d0869c9 | ||
|
|
fb4cff8ef9 | ||
|
|
5aa379945e | ||
|
|
eb9af19559 | ||
|
|
3a851aac8c | ||
|
|
6fef385774 | ||
|
|
26cce4d078 | ||
|
|
c41bee4253 | ||
|
|
2cb565561d | ||
|
|
3a2811fbe8 | ||
|
|
6f01264ed3 | ||
|
|
ff90beca6b | ||
|
|
d218acee6b | ||
|
|
c6811675b6 | ||
|
|
2d7fd30111 | ||
|
|
9dedb9ff68 | ||
|
|
53b4e78a9b | ||
|
|
689af9fc4e | ||
|
|
42e8861798 | ||
|
|
b4af42ddb3 | ||
|
|
3163f4d821 | ||
|
|
dad40597c5 | ||
|
|
52d855118a | ||
|
|
0f66a3c7a8 | ||
|
|
d0b6d41e99 | ||
|
|
aaf6c61e69 | ||
|
|
519fa9cfb5 | ||
|
|
ce41119051 | ||
|
|
8156a6b8a2 | ||
|
|
fd50146f92 | ||
|
|
96c67cee26 | ||
|
|
4573ab19f4 | ||
|
|
448bfd0992 | ||
|
|
b136800cfc | ||
|
|
06702d2a40 | ||
|
|
a83b43ccfd | ||
|
|
93f14157a6 | ||
|
|
0effd348e8 | ||
|
|
c332fba488 | ||
|
|
375749c5c3 | ||
|
|
55c9fd3227 | ||
|
|
9a851165ad | ||
|
|
bb8fe0b24c | ||
|
|
68a7365a0a | ||
|
|
9efb3f0af2 | ||
|
|
717bfeb574 | ||
|
|
97fc3bc23c | ||
|
|
9b5e6d16da | ||
|
|
595ffc24d4 | ||
|
|
922c480e2e | ||
|
|
a635b6839a | ||
|
|
af9b0ba8d6 | ||
|
|
82d165a723 | ||
|
|
a7b1e3fe70 | ||
|
|
6e3b00802c | ||
|
|
818fb4f60c | ||
|
|
ccef008376 | ||
|
|
c7a74e6d1c | ||
|
|
989e4bac89 | ||
|
|
b814a91f29 | ||
|
|
5c9769c5a3 | ||
|
|
ee98eab64c | ||
|
|
05e05252fa | ||
|
|
a859add6d7 | ||
|
|
fc27ca1b6a | ||
|
|
784de85b36 | ||
|
|
0fb386d7e2 | ||
|
|
5513f72987 | ||
|
|
ef1b1f41e4 | ||
|
|
68c6f8dd03 | ||
|
|
b72aa41019 | ||
|
|
adc08785b6 | ||
|
|
8131f19751 | ||
|
|
b6e61133d8 | ||
|
|
37d7b85ed1 | ||
|
|
c6cd18802b | ||
|
|
c809aad67f | ||
|
|
1d64a5caa1 | ||
|
|
90fffd883e | ||
|
|
647aec11a6 | ||
|
|
c5071f9f49 | ||
|
|
445bf6c419 | ||
|
|
b3cb7b5490 | ||
|
|
6ccac94162 | ||
|
|
f2b41b1752 |
@@ -40,6 +40,8 @@ Guido van Rossum (@gvanrossum) <guido@python.org>
|
||||
Dmytro Sadovnychyi (@sadovnychyi) <jedi@dmit.ro>
|
||||
Cristi Burcă (@scribu)
|
||||
bstaint (@bstaint)
|
||||
Mathias Rav (@Mortal) <rav@cs.au.dk>
|
||||
Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
|
||||
|
||||
|
||||
Note: (@user) means a github user name.
|
||||
|
||||
@@ -3,6 +3,17 @@
|
||||
Changelog
|
||||
---------
|
||||
|
||||
0.10.2 (2017-04-05)
|
||||
+++++++++++++++++++
|
||||
|
||||
- Python Packaging sucks. Some files were not included in 0.10.1.
|
||||
|
||||
0.10.1 (2017-04-05)
|
||||
+++++++++++++++++++
|
||||
|
||||
- Fixed a few very annoying bugs.
|
||||
- Prepared the parser to be factored out of Jedi.
|
||||
|
||||
0.10.0 (2017-02-03)
|
||||
+++++++++++++++++++
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ include conftest.py
|
||||
include pytest.ini
|
||||
include tox.ini
|
||||
include jedi/evaluate/compiled/fake/*.pym
|
||||
include jedi/parser/grammar*.txt
|
||||
include jedi/parser/python/grammar*.txt
|
||||
recursive-include test *
|
||||
recursive-include docs *
|
||||
recursive-exclude * *.pyc
|
||||
|
||||
@@ -32,7 +32,7 @@ It's really easy.
|
||||
|
||||
Jedi can currently be used with the following editors/projects:
|
||||
|
||||
- Vim (jedi-vim_, YouCompleteMe_, deoplete-jedi_)
|
||||
- Vim (jedi-vim_, YouCompleteMe_, deoplete-jedi_, completor.vim_)
|
||||
- Emacs (Jedi.el_, company-mode_, elpy_, anaconda-mode_, ycmd_)
|
||||
- Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3])
|
||||
- TextMate_ (Not sure if it's actually working)
|
||||
@@ -191,6 +191,7 @@ Acknowledgements
|
||||
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
|
||||
.. _youcompleteme: http://valloric.github.io/YouCompleteMe/
|
||||
.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
|
||||
.. _completor.vim: https://github.com/maralla/completor.vim
|
||||
.. _Jedi.el: https://github.com/tkf/emacs-jedi
|
||||
.. _company-mode: https://github.com/syohex/emacs-company-jedi
|
||||
.. _elpy: https://github.com/jorgenschaefer/elpy
|
||||
|
||||
5
deploy.sh
Executable file
5
deploy.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
python setup.py sdist bdist_wheel
|
||||
# Maybe do a pip install twine before.
|
||||
twine upload dist/*
|
||||
@@ -36,7 +36,7 @@ As you see Jedi is pretty simple and allows you to concentrate on writing a
|
||||
good text editor, while still having very good IDE features for Python.
|
||||
"""
|
||||
|
||||
__version__ = '0.10.0'
|
||||
__version__ = '0.10.2'
|
||||
|
||||
from jedi.api import Script, Interpreter, NotFoundError, set_debug_function
|
||||
from jedi.api import preload_module, defined_names, names
|
||||
|
||||
@@ -34,8 +34,29 @@ class DummyFile(object):
|
||||
del self.loader
|
||||
|
||||
|
||||
def find_module_py33(string, path=None):
|
||||
loader = importlib.machinery.PathFinder.find_module(string, path)
|
||||
def find_module_py34(string, path=None, fullname=None):
|
||||
implicit_namespace_pkg = False
|
||||
spec = None
|
||||
loader = None
|
||||
|
||||
spec = importlib.machinery.PathFinder.find_spec(string, path)
|
||||
if hasattr(spec, 'origin'):
|
||||
origin = spec.origin
|
||||
implicit_namespace_pkg = origin == 'namespace'
|
||||
|
||||
# We try to disambiguate implicit namespace pkgs with non implicit namespace pkgs
|
||||
if implicit_namespace_pkg:
|
||||
fullname = string if not path else fullname
|
||||
implicit_ns_info = ImplicitNSInfo(fullname, spec.submodule_search_locations._path)
|
||||
return None, implicit_ns_info, False
|
||||
|
||||
# we have found the tail end of the dotted path
|
||||
if hasattr(spec, 'loader'):
|
||||
loader = spec.loader
|
||||
return find_module_py33(string, path, loader)
|
||||
|
||||
def find_module_py33(string, path=None, loader=None, fullname=None):
|
||||
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
|
||||
|
||||
if loader is None and path is None: # Fallback to find builtins
|
||||
try:
|
||||
@@ -81,7 +102,7 @@ def find_module_py33(string, path=None):
|
||||
return module_file, module_path, is_package
|
||||
|
||||
|
||||
def find_module_pre_py33(string, path=None):
|
||||
def find_module_pre_py33(string, path=None, fullname=None):
|
||||
try:
|
||||
module_file, module_path, description = imp.find_module(string, path)
|
||||
module_type = description[2]
|
||||
@@ -121,6 +142,7 @@ def find_module_pre_py33(string, path=None):
|
||||
|
||||
|
||||
find_module = find_module_py33 if is_py33 else find_module_pre_py33
|
||||
find_module = find_module_py34 if is_py34 else find_module
|
||||
find_module.__doc__ = """
|
||||
Provides information about a module.
|
||||
|
||||
@@ -132,6 +154,12 @@ if the module is contained in a package.
|
||||
"""
|
||||
|
||||
|
||||
class ImplicitNSInfo(object):
|
||||
"""Stores information returned from an implicit namespace spec"""
|
||||
def __init__(self, name, paths):
|
||||
self.name = name
|
||||
self.paths = paths
|
||||
|
||||
# unicode function
|
||||
try:
|
||||
unicode = unicode
|
||||
|
||||
@@ -13,10 +13,9 @@ import os
|
||||
import warnings
|
||||
import sys
|
||||
|
||||
from jedi.parser import load_grammar
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.diff import FastParser
|
||||
from jedi.parser.utils import save_parser
|
||||
from jedi.parser.python import load_grammar
|
||||
from jedi.parser.python import tree
|
||||
from jedi.parser.python import parse
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi import common
|
||||
@@ -133,15 +132,21 @@ class Script(object):
|
||||
|
||||
@cache.memoize_method
|
||||
def _get_module_node(self):
|
||||
cache.invalidate_star_import_cache(self._path)
|
||||
parser = FastParser(self._grammar, self._source, self.path)
|
||||
save_parser(self.path, parser, pickling=False)
|
||||
|
||||
return parser.module
|
||||
return parse(
|
||||
code=self._source,
|
||||
path=self.path,
|
||||
grammar=self._grammar,
|
||||
cache=False, # No disk cache, because the current script often changes.
|
||||
diff_cache=True,
|
||||
)
|
||||
|
||||
@cache.memoize_method
|
||||
def _get_module(self):
|
||||
module = er.ModuleContext(self._evaluator, self._get_module_node())
|
||||
module = er.ModuleContext(
|
||||
self._evaluator,
|
||||
self._get_module_node(),
|
||||
self.path
|
||||
)
|
||||
imports.add_module(self._evaluator, module.name.string_name, module)
|
||||
return module
|
||||
|
||||
@@ -343,7 +348,7 @@ class Script(object):
|
||||
types = context.eval_node(node)
|
||||
for testlist in node.children[:-1:2]:
|
||||
# Iterate tuples.
|
||||
unpack_tuple_to_dict(self._evaluator, types, testlist)
|
||||
unpack_tuple_to_dict(context, types, testlist)
|
||||
else:
|
||||
try_iter_content(self._evaluator.goto_definitions(context, node))
|
||||
self._evaluator.reset_recursion_limitations()
|
||||
@@ -397,7 +402,8 @@ class Interpreter(Script):
|
||||
return interpreter.MixedModuleContext(
|
||||
self._evaluator,
|
||||
parser_module,
|
||||
self.namespaces
|
||||
self.namespaces,
|
||||
path=self.path
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import re
|
||||
from jedi._compatibility import u
|
||||
from jedi import settings
|
||||
from jedi import common
|
||||
from jedi.parser.utils import load_parser
|
||||
from jedi.parser.cache import parser_cache
|
||||
from jedi.cache import memoize_method
|
||||
from jedi.evaluate import representation as er
|
||||
from jedi.evaluate import instance
|
||||
@@ -391,12 +391,11 @@ class BaseDefinition(object):
|
||||
return ''
|
||||
|
||||
path = self._name.get_root_context().py__file__()
|
||||
parser = load_parser(path)
|
||||
lines = common.splitlines(parser.source)
|
||||
lines = parser_cache[path].lines
|
||||
|
||||
line_nr = self._name.start_pos[0]
|
||||
start_line_nr = line_nr - before
|
||||
return '\n'.join(lines[start_line_nr:line_nr + after + 1])
|
||||
return ''.join(lines[start_line_nr:line_nr + after + 1])
|
||||
|
||||
|
||||
class Completion(BaseDefinition):
|
||||
@@ -669,7 +668,7 @@ class CallSignature(Definition):
|
||||
Use :attr:`.module_name` for the module name.
|
||||
.. todo:: Remove!
|
||||
"""
|
||||
return self._executable.get_parent_until()
|
||||
return self._executable.get_root_node()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s index %s>' % \
|
||||
@@ -702,6 +701,12 @@ class _Help(object):
|
||||
|
||||
@memoize_method
|
||||
def _get_node(self, fast):
|
||||
if isinstance(self._name, (compiled.CompiledContextName, compiled.CompiledName)):
|
||||
followed = self._name.infer()
|
||||
if followed:
|
||||
return next(iter(followed))
|
||||
return None
|
||||
|
||||
if self._name.api_type == 'module' and not fast:
|
||||
followed = self._name.infer()
|
||||
if followed:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from jedi.parser import token
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi.api import classes
|
||||
@@ -164,7 +164,7 @@ class Completion:
|
||||
# No completions for ``with x as foo`` and ``import x as foo``.
|
||||
# Also true for defining names as a class or function.
|
||||
return list(self._get_class_context_completions(is_function=True))
|
||||
elif symbol_names[-1] == 'trailer' and nodes[-1] == '.':
|
||||
elif symbol_names[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
|
||||
dot = self._module_node.get_leaf_for_position(self._position)
|
||||
completion_names += self._trailer_completions(dot.get_previous_leaf())
|
||||
else:
|
||||
@@ -235,7 +235,7 @@ class Completion:
|
||||
Autocomplete inherited methods when overriding in child class.
|
||||
"""
|
||||
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
|
||||
cls = leaf.get_parent_until(tree.Class)
|
||||
cls = tree.search_ancestor(leaf, 'classdef')
|
||||
if isinstance(cls, (tree.Class, tree.Function)):
|
||||
# Complete the methods that are defined in the super classes.
|
||||
random_context = self._module_context.create_context(
|
||||
|
||||
@@ -6,8 +6,8 @@ from collections import namedtuple
|
||||
|
||||
from jedi._compatibility import u
|
||||
from jedi.evaluate.helpers import evaluate_call_of_leaf
|
||||
from jedi import parser
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python.parser import Parser
|
||||
from jedi.parser.python import tree
|
||||
from jedi.parser import tokenize
|
||||
from jedi.cache import time_cache
|
||||
from jedi import common
|
||||
@@ -74,16 +74,14 @@ def _get_code_for_stack(code_lines, module_node, position):
|
||||
return u('')
|
||||
|
||||
# If we're not on a comment simply get the previous leaf and proceed.
|
||||
try:
|
||||
leaf = leaf.get_previous_leaf()
|
||||
except IndexError:
|
||||
leaf = leaf.get_previous_leaf()
|
||||
if leaf is None:
|
||||
return u('') # At the beginning of the file.
|
||||
|
||||
is_after_newline = leaf.type == 'newline'
|
||||
while leaf.type == 'newline':
|
||||
try:
|
||||
leaf = leaf.get_previous_leaf()
|
||||
except IndexError:
|
||||
leaf = leaf.get_previous_leaf()
|
||||
if leaf is None:
|
||||
return u('')
|
||||
|
||||
if leaf.type == 'error_leaf' or leaf.type == 'string':
|
||||
@@ -134,9 +132,9 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
|
||||
safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
|
||||
code = code + safeword
|
||||
|
||||
p = parser.ParserWithRecovery(grammar, code, start_parsing=False)
|
||||
p = Parser(grammar, error_recovery=True)
|
||||
try:
|
||||
p.parse(tokenizer=tokenize_without_endmarker(code))
|
||||
p.parse(tokens=tokenize_without_endmarker(code))
|
||||
except EndMarkerReached:
|
||||
return Stack(p.pgen_parser.stack)
|
||||
raise SystemError("This really shouldn't happen. There's a bug in Jedi.")
|
||||
@@ -244,6 +242,8 @@ def _get_call_signature_details_from_error_node(node, position):
|
||||
# until the parentheses is enough.
|
||||
children = node.children[index:]
|
||||
name = element.get_previous_leaf()
|
||||
if name is None:
|
||||
continue
|
||||
if name.type == 'name' or name.parent.type in ('trailer', 'atom'):
|
||||
return CallSignatureDetails(
|
||||
element,
|
||||
@@ -255,9 +255,8 @@ def get_call_signature_details(module, position):
|
||||
leaf = module.get_leaf_for_position(position, include_prefixes=True)
|
||||
if leaf.start_pos >= position:
|
||||
# Whitespace / comments after the leaf count towards the previous leaf.
|
||||
try:
|
||||
leaf = leaf.get_previous_leaf()
|
||||
except IndexError:
|
||||
leaf = leaf.get_previous_leaf()
|
||||
if leaf is None:
|
||||
return None
|
||||
|
||||
if leaf == ')':
|
||||
@@ -281,6 +280,8 @@ def get_call_signature_details(module, position):
|
||||
|
||||
if node.type == 'trailer' and node.children[0] == '(':
|
||||
leaf = node.get_previous_leaf()
|
||||
if leaf is None:
|
||||
return None
|
||||
return CallSignatureDetails(
|
||||
node.children[0], *_get_index_and_key(node.children, position))
|
||||
|
||||
@@ -299,7 +300,7 @@ def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos
|
||||
whole = '\n'.join(other_lines + [before_cursor])
|
||||
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
|
||||
|
||||
module_path = bracket_leaf.get_parent_until().path
|
||||
module_path = context.get_root_context().py__file__()
|
||||
if module_path is None:
|
||||
yield None # Don't cache!
|
||||
else:
|
||||
|
||||
@@ -12,12 +12,12 @@ class MixedModuleContext(Context):
|
||||
resets_positions = True
|
||||
type = 'mixed_module'
|
||||
|
||||
def __init__(self, evaluator, tree_module, namespaces):
|
||||
def __init__(self, evaluator, tree_module, namespaces, path):
|
||||
self.evaluator = evaluator
|
||||
self._namespaces = namespaces
|
||||
|
||||
self._namespace_objects = [type('jedi_namespace', (), n) for n in namespaces]
|
||||
self._module_context = ModuleContext(evaluator, tree_module)
|
||||
self._module_context = ModuleContext(evaluator, tree_module, path=path)
|
||||
self.tree_node = tree_module
|
||||
|
||||
def get_node(self):
|
||||
@@ -33,7 +33,7 @@ class MixedModuleContext(Context):
|
||||
self.evaluator,
|
||||
parent_context=self,
|
||||
compiled_object=compiled_object,
|
||||
tree_name=self.tree_node.name
|
||||
tree_context=self._module_context
|
||||
)
|
||||
for filter in mixed_object.get_filters(*args, **kwargs):
|
||||
yield filter
|
||||
|
||||
@@ -4,7 +4,8 @@ import keyword
|
||||
from jedi._compatibility import is_py3, is_py35
|
||||
from jedi import common
|
||||
from jedi.evaluate.filters import AbstractNameDefinition
|
||||
from jedi.parser.tree import Leaf
|
||||
from jedi.parser.python.tree import Leaf
|
||||
|
||||
try:
|
||||
from pydoc_data import topics as pydoc_topics
|
||||
except ImportError:
|
||||
@@ -90,9 +91,6 @@ class Keyword(object):
|
||||
self.start_pos = pos
|
||||
self.parent = evaluator.BUILTINS
|
||||
|
||||
def get_parent_until(self):
|
||||
return self.parent
|
||||
|
||||
@property
|
||||
def only_valid_as_leaf(self):
|
||||
return self.name.value in keywords_only_valid_as_leaf
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
from jedi.api import classes
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate.filters import TreeNameDefinition
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
|
||||
|
||||
def compare_contexts(c1, c2):
|
||||
return c1 == c2 or (c1[1] == c2[1] and c1[0].tree_node == c2[0].tree_node)
|
||||
|
||||
|
||||
def usages(evaluator, definition_names, mods):
|
||||
"""
|
||||
:param definitions: list of Name
|
||||
@@ -14,8 +18,9 @@ def usages(evaluator, definition_names, mods):
|
||||
if name.api_type == 'module':
|
||||
found = False
|
||||
for context in name.infer():
|
||||
found = True
|
||||
yield context.name
|
||||
if isinstance(context, ModuleContext):
|
||||
found = True
|
||||
yield context.name
|
||||
if not found:
|
||||
yield name
|
||||
else:
|
||||
@@ -39,7 +44,9 @@ def usages(evaluator, definition_names, mods):
|
||||
for name_node in m.tree_node.used_names.get(search_name, []):
|
||||
context = evaluator.create_context(m, name_node)
|
||||
result = evaluator.goto(context, name_node)
|
||||
if [c for c in compare_array(result) if c in compare_definitions]:
|
||||
if any(compare_contexts(c1, c2)
|
||||
for c1 in compare_array(result)
|
||||
for c2 in compare_definitions):
|
||||
name = TreeNameDefinition(context, name_node)
|
||||
definition_names.add(name)
|
||||
# Previous definitions might be imports, so include them
|
||||
|
||||
@@ -3,8 +3,6 @@ This caching is very important for speed and memory optimizations. There's
|
||||
nothing really spectacular, just some decorators. The following cache types are
|
||||
available:
|
||||
|
||||
- module caching (`load_parser` and `save_parser`), which uses pickle and is
|
||||
really important to assure low load times of modules like ``numpy``.
|
||||
- ``time_cache`` can be used to cache something for just a limited time span,
|
||||
which can be useful if there's user interaction and the user cannot react
|
||||
faster than a certain time.
|
||||
@@ -14,14 +12,48 @@ there are global variables, which are holding the cache information. Some of
|
||||
these variables are being cleaned after every API usage.
|
||||
"""
|
||||
import time
|
||||
import inspect
|
||||
|
||||
from jedi import settings
|
||||
from jedi.parser.utils import parser_cache
|
||||
from jedi.parser.utils import underscore_memoization
|
||||
from jedi.parser.cache import parser_cache
|
||||
|
||||
_time_caches = {}
|
||||
|
||||
|
||||
def underscore_memoization(func):
|
||||
"""
|
||||
Decorator for methods::
|
||||
|
||||
class A(object):
|
||||
def x(self):
|
||||
if self._x:
|
||||
self._x = 10
|
||||
return self._x
|
||||
|
||||
Becomes::
|
||||
|
||||
class A(object):
|
||||
@underscore_memoization
|
||||
def x(self):
|
||||
return 10
|
||||
|
||||
A now has an attribute ``_x`` written by this decorator.
|
||||
"""
|
||||
name = '_' + func.__name__
|
||||
|
||||
def wrapper(self):
|
||||
try:
|
||||
return getattr(self, name)
|
||||
except AttributeError:
|
||||
result = func(self)
|
||||
if inspect.isgenerator(result):
|
||||
result = list(result)
|
||||
setattr(self, name, result)
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def clear_time_caches(delete_all=False):
|
||||
""" Jedi caches many things, that should be completed after each completion
|
||||
finishes.
|
||||
@@ -90,31 +122,3 @@ def memoize_method(method):
|
||||
dct[key] = result
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def _invalidate_star_import_cache_module(module, only_main=False):
|
||||
""" Important if some new modules are being reparsed """
|
||||
try:
|
||||
t, modules = _time_caches['star_import_cache_validity'][module]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
del _time_caches['star_import_cache_validity'][module]
|
||||
|
||||
# This stuff was part of load_parser. However since we're most likely
|
||||
# not going to use star import caching anymore, just ignore it.
|
||||
#else:
|
||||
# In case there is already a module cached and this module
|
||||
# has to be reparsed, we also need to invalidate the import
|
||||
# caches.
|
||||
# _invalidate_star_import_cache_module(parser_cache_item.parser.module)
|
||||
|
||||
|
||||
def invalidate_star_import_cache(path):
|
||||
"""On success returns True."""
|
||||
try:
|
||||
parser_cache_item = parser_cache[path]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
|
||||
|
||||
@@ -159,24 +159,29 @@ def splitlines(string, keepends=False):
|
||||
also on form feeds.
|
||||
"""
|
||||
if keepends:
|
||||
# If capturing parentheses are used in pattern, then the text of all
|
||||
# groups in the pattern are also returned as part of the resulting
|
||||
# list.
|
||||
lst = re.split('(\n|\r\n)', string)
|
||||
lst = string.splitlines(True)
|
||||
|
||||
# Need to merge the new lines with the actual lines.
|
||||
odd = False
|
||||
lines = []
|
||||
for string in lst:
|
||||
if odd:
|
||||
line += string
|
||||
lines.append(line)
|
||||
else:
|
||||
line = string
|
||||
odd = not odd
|
||||
if odd:
|
||||
lines.append(line)
|
||||
return lines
|
||||
# We have to merge lines that were broken by form feed characters.
|
||||
merge = []
|
||||
for i, line in enumerate(lst):
|
||||
if line.endswith('\f'):
|
||||
merge.append(i)
|
||||
|
||||
for index in reversed(merge):
|
||||
try:
|
||||
lst[index] = lst[index] + lst[index + 1]
|
||||
del lst[index + 1]
|
||||
except IndexError:
|
||||
# index + 1 can be empty and therefore there's no need to
|
||||
# merge.
|
||||
pass
|
||||
|
||||
# The stdlib's implementation of the end is inconsistent when calling
|
||||
# it with/without keepends. One time there's an empty string in the
|
||||
# end, one time there's none.
|
||||
if string.endswith('\n') or string == '':
|
||||
lst.append('')
|
||||
return lst
|
||||
else:
|
||||
return re.split('\n|\r\n', string)
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ that are not used are just being ignored.
|
||||
import copy
|
||||
import sys
|
||||
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi import debug
|
||||
from jedi.common import unite
|
||||
from jedi.evaluate import representation as er
|
||||
@@ -80,6 +80,7 @@ from jedi.evaluate import helpers
|
||||
from jedi.evaluate import pep0484
|
||||
from jedi.evaluate.filters import TreeNameDefinition, ParamName
|
||||
from jedi.evaluate.instance import AnonymousInstance, BoundMethod
|
||||
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
|
||||
|
||||
|
||||
class Evaluator(object):
|
||||
@@ -150,7 +151,8 @@ class Evaluator(object):
|
||||
types = self.eval_element(context, rhs)
|
||||
|
||||
if seek_name:
|
||||
types = finder.check_tuple_assignments(self, types, seek_name)
|
||||
c_node = ContextualizedName(context, seek_name)
|
||||
types = finder.check_tuple_assignments(self, c_node, types)
|
||||
|
||||
first_operation = stmt.first_operation()
|
||||
if first_operation not in ('=', None) and first_operation.type == 'operator':
|
||||
@@ -161,15 +163,15 @@ class Evaluator(object):
|
||||
left = context.py__getattribute__(
|
||||
name, position=stmt.start_pos, search_global=True)
|
||||
|
||||
for_stmt = stmt.get_parent_until(tree.ForStmt)
|
||||
if isinstance(for_stmt, tree.ForStmt) and types \
|
||||
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
|
||||
if for_stmt is not None and for_stmt.type == 'for_stmt' and types \
|
||||
and for_stmt.defines_one_name():
|
||||
# Iterate through result and add the values, that's possible
|
||||
# only in for loops without clutter, because they are
|
||||
# predictable. Also only do it, if the variable is not a tuple.
|
||||
node = for_stmt.get_input_node()
|
||||
for_iterables = self.eval_element(context, node)
|
||||
ordered = list(iterable.py__iter__(self, for_iterables, node))
|
||||
cn = ContextualizedNode(context, node)
|
||||
ordered = list(iterable.py__iter__(self, cn.infer(), cn))
|
||||
|
||||
for lazy_context in ordered:
|
||||
dct = {str(for_stmt.children[1]): lazy_context.infer()}
|
||||
@@ -269,18 +271,19 @@ class Evaluator(object):
|
||||
def _eval_element_not_cached(self, context, element):
|
||||
debug.dbg('eval_element %s@%s', element, element.start_pos)
|
||||
types = set()
|
||||
if isinstance(element, (tree.Name, tree.Literal)) or element.type == 'atom':
|
||||
typ = element.type
|
||||
if typ in ('name', 'number', 'string', 'atom'):
|
||||
types = self.eval_atom(context, element)
|
||||
elif isinstance(element, tree.Keyword):
|
||||
elif typ == 'keyword':
|
||||
# For False/True/None
|
||||
if element.value in ('False', 'True', 'None'):
|
||||
types.add(compiled.builtin_from_name(self, element.value))
|
||||
# else: print e.g. could be evaluated like this in Python 2.7
|
||||
elif isinstance(element, tree.Lambda):
|
||||
elif typ == 'lambda':
|
||||
types = set([er.FunctionContext(self, context, element)])
|
||||
elif element.type == 'expr_stmt':
|
||||
elif typ == 'expr_stmt':
|
||||
types = self.eval_statement(context, element)
|
||||
elif element.type in ('power', 'atom_expr'):
|
||||
elif typ in ('power', 'atom_expr'):
|
||||
first_child = element.children[0]
|
||||
if not (first_child.type == 'keyword' and first_child.value == 'await'):
|
||||
types = self.eval_atom(context, first_child)
|
||||
@@ -290,22 +293,22 @@ class Evaluator(object):
|
||||
types = set(precedence.calculate(self, context, types, trailer, right))
|
||||
break
|
||||
types = self.eval_trailer(context, types, trailer)
|
||||
elif element.type in ('testlist_star_expr', 'testlist',):
|
||||
elif typ in ('testlist_star_expr', 'testlist',):
|
||||
# The implicit tuple in statements.
|
||||
types = set([iterable.SequenceLiteralContext(self, context, element)])
|
||||
elif element.type in ('not_test', 'factor'):
|
||||
elif typ in ('not_test', 'factor'):
|
||||
types = self.eval_element(context, element.children[-1])
|
||||
for operator in element.children[:-1]:
|
||||
types = set(precedence.factor_calculate(self, types, operator))
|
||||
elif element.type == 'test':
|
||||
elif typ == 'test':
|
||||
# `x if foo else y` case.
|
||||
types = (self.eval_element(context, element.children[0]) |
|
||||
self.eval_element(context, element.children[-1]))
|
||||
elif element.type == 'operator':
|
||||
elif typ == 'operator':
|
||||
# Must be an ellipsis, other operators are not evaluated.
|
||||
assert element.value == '...'
|
||||
types = set([compiled.create(self, Ellipsis)])
|
||||
elif element.type == 'dotted_name':
|
||||
elif typ == 'dotted_name':
|
||||
types = self.eval_atom(context, element.children[0])
|
||||
for next_name in element.children[2::2]:
|
||||
# TODO add search_global=True?
|
||||
@@ -314,12 +317,10 @@ class Evaluator(object):
|
||||
for typ in types
|
||||
)
|
||||
types = types
|
||||
elif element.type == 'eval_input':
|
||||
elif typ == 'eval_input':
|
||||
types = self._eval_element_not_cached(context, element.children[0])
|
||||
elif element.type == 'annassign':
|
||||
print(element.children[1])
|
||||
elif typ == 'annassign':
|
||||
types = pep0484._evaluate_for_annotation(context, element.children[1])
|
||||
print('xxx')
|
||||
else:
|
||||
types = precedence.calculate_children(self, context, element.children)
|
||||
debug.dbg('eval_element result %s', types)
|
||||
@@ -331,12 +332,12 @@ class Evaluator(object):
|
||||
generate the node (because it has just one child). In that case an atom
|
||||
might be a name or a literal as well.
|
||||
"""
|
||||
if isinstance(atom, tree.Name):
|
||||
if atom.type == 'name':
|
||||
# This is the first global lookup.
|
||||
stmt = atom.get_definition()
|
||||
if isinstance(stmt, tree.CompFor):
|
||||
stmt = stmt.get_parent_until((tree.ClassOrFunc, tree.ExprStmt))
|
||||
if stmt.type != 'expr_stmt':
|
||||
if stmt.type == 'comp_for':
|
||||
stmt = tree.search_ancestor(stmt, ('expr_stmt', 'lambda', 'funcdef', 'classdef'))
|
||||
if stmt is None or stmt.type != 'expr_stmt':
|
||||
# We only need to adjust the start_pos for statements, because
|
||||
# there the name cannot be used.
|
||||
stmt = atom
|
||||
@@ -451,8 +452,10 @@ class Evaluator(object):
|
||||
return self.eval_statement(context, def_, name)
|
||||
elif def_.type == 'for_stmt':
|
||||
container_types = self.eval_element(context, def_.children[3])
|
||||
for_types = iterable.py__iter__types(self, container_types, def_.children[3])
|
||||
return finder.check_tuple_assignments(self, for_types, name)
|
||||
cn = ContextualizedNode(context, def_.children[3])
|
||||
for_types = iterable.py__iter__types(self, container_types, cn)
|
||||
c_node = ContextualizedName(context, name)
|
||||
return finder.check_tuple_assignments(self, c_node, for_types)
|
||||
elif def_.type in ('import_from', 'import_name'):
|
||||
return imports.infer_import(context, name)
|
||||
|
||||
@@ -486,7 +489,7 @@ class Evaluator(object):
|
||||
if param_name.string_name == name.value:
|
||||
param_names.append(param_name)
|
||||
return param_names
|
||||
elif isinstance(par, tree.ExprStmt) and name in par.get_defined_names():
|
||||
elif par.type == 'expr_stmt' and name in par.get_defined_names():
|
||||
# Only take the parent, because if it's more complicated than just
|
||||
# a name it's something you can "goto" again.
|
||||
return [TreeNameDefinition(context, name)]
|
||||
|
||||
@@ -2,11 +2,9 @@
|
||||
Module for statical analysis.
|
||||
"""
|
||||
from jedi import debug
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi.evaluate.compiled import CompiledObject
|
||||
|
||||
from jedi.common import unite
|
||||
|
||||
|
||||
CODES = {
|
||||
'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
|
||||
@@ -82,7 +80,9 @@ def add(node_context, error_name, node, message=None, typ=Error, payload=None):
|
||||
if _check_for_exception_catch(node_context, node, exception, payload):
|
||||
return
|
||||
|
||||
module_path = node.get_root_node().path
|
||||
# TODO this path is probably not right
|
||||
module_context = node_context.get_root_context()
|
||||
module_path = module_context.py__file__()
|
||||
instance = typ(error_name, module_path, node.start_pos, message)
|
||||
debug.warning(str(instance), format=False)
|
||||
node_context.evaluator.analysis.append(instance)
|
||||
@@ -203,10 +203,10 @@ def _check_for_exception_catch(node_context, jedi_name, exception, payload=None)
|
||||
while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
|
||||
if isinstance(obj, tree.Flow):
|
||||
# try/except catch check
|
||||
if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception):
|
||||
if obj.type == 'try_stmt' and check_try_for_except(obj, exception):
|
||||
return True
|
||||
# hasattr check
|
||||
if exception == AttributeError and obj.isinstance(tree.IfStmt, tree.WhileStmt):
|
||||
if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'):
|
||||
if check_hasattr(obj.children[1], obj.children[3]):
|
||||
return True
|
||||
obj = obj.parent
|
||||
|
||||
@@ -10,7 +10,7 @@ from functools import partial
|
||||
from jedi._compatibility import builtins as _builtins, unicode
|
||||
from jedi import debug
|
||||
from jedi.cache import underscore_memoization, memoize_method
|
||||
from jedi.parser.tree import Param, Operator
|
||||
from jedi.parser.python.tree import Param, Operator
|
||||
from jedi.evaluate.helpers import FakeName
|
||||
from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \
|
||||
ContextNameMixin
|
||||
@@ -346,7 +346,7 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
|
||||
is_instance = self._is_instance or fake.is_class_instance(obj)
|
||||
# ``dir`` doesn't include the type names.
|
||||
if not inspect.ismodule(obj) and obj != type and not is_instance:
|
||||
if not inspect.ismodule(obj) and (obj is not type) and not is_instance:
|
||||
for filter in create(self._evaluator, type).get_filters():
|
||||
names += filter.values()
|
||||
return names
|
||||
@@ -413,7 +413,7 @@ def load_module(evaluator, path=None, name=None):
|
||||
raise
|
||||
except ImportError:
|
||||
# If a module is "corrupt" or not really a Python module or whatever.
|
||||
debug.warning('Module %s not importable.', path)
|
||||
debug.warning('Module %s not importable in path %s.', dotted_path, path)
|
||||
return None
|
||||
finally:
|
||||
sys.path = temp
|
||||
|
||||
@@ -9,8 +9,8 @@ import inspect
|
||||
import types
|
||||
|
||||
from jedi._compatibility import is_py3, builtins, unicode, is_py34
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser import tree as pt
|
||||
from jedi.parser.python import parse
|
||||
from jedi.parser.python import tree
|
||||
|
||||
modules = {}
|
||||
|
||||
@@ -61,18 +61,16 @@ def _load_faked_module(module):
|
||||
except IOError:
|
||||
modules[module_name] = None
|
||||
return
|
||||
grammar = load_grammar(version='3.4')
|
||||
module = ParserWithRecovery(grammar, unicode(source), module_name).module
|
||||
modules[module_name] = module
|
||||
modules[module_name] = m = parse(unicode(source))
|
||||
|
||||
if module_name == 'builtins' and not is_py3:
|
||||
# There are two implementations of `open` for either python 2/3.
|
||||
# -> Rename the python2 version (`look at fake/builtins.pym`).
|
||||
open_func = _search_scope(module, 'open')
|
||||
open_func = _search_scope(m, 'open')
|
||||
open_func.children[1].value = 'open_python3'
|
||||
open_func = _search_scope(module, 'open_python2')
|
||||
open_func = _search_scope(m, 'open_python2')
|
||||
open_func.children[1].value = 'open'
|
||||
return module
|
||||
return m
|
||||
|
||||
|
||||
def _search_scope(scope, obj_name):
|
||||
@@ -115,7 +113,7 @@ def _faked(module, obj, name):
|
||||
if faked_mod is None:
|
||||
return None, None
|
||||
|
||||
# Having the module as a `parser.tree.Module`, we need to scan
|
||||
# Having the module as a `parser.python.tree.Module`, we need to scan
|
||||
# for methods.
|
||||
if name is None:
|
||||
if inspect.isbuiltin(obj) or inspect.isclass(obj):
|
||||
@@ -132,7 +130,7 @@ def _faked(module, obj, name):
|
||||
return None, None
|
||||
return _search_scope(cls, obj.__name__), faked_mod
|
||||
else:
|
||||
if obj == module:
|
||||
if obj is module:
|
||||
return _search_scope(faked_mod, name), faked_mod
|
||||
else:
|
||||
try:
|
||||
@@ -156,7 +154,7 @@ def memoize_faked(obj):
|
||||
key = (obj, args, frozenset(kwargs.items()))
|
||||
try:
|
||||
result = cache[key]
|
||||
except TypeError:
|
||||
except (TypeError, ValueError):
|
||||
return obj(*args, **kwargs)
|
||||
except KeyError:
|
||||
result = obj(*args, **kwargs)
|
||||
@@ -182,9 +180,9 @@ def _get_faked(module, obj, name=None):
|
||||
assert result.type == 'funcdef'
|
||||
doc = '"""%s"""' % obj.__doc__ # TODO need escapes.
|
||||
suite = result.children[-1]
|
||||
string = pt.String(doc, (0, 0), '')
|
||||
new_line = pt.Newline('\n', (0, 0))
|
||||
docstr_node = pt.Node('simple_stmt', [string, new_line])
|
||||
string = tree.String(doc, (0, 0), '')
|
||||
new_line = tree.Newline('\n', (0, 0))
|
||||
docstr_node = tree.PythonNode('simple_stmt', [string, new_line])
|
||||
suite.children.insert(1, docstr_node)
|
||||
return result, fake_module
|
||||
|
||||
|
||||
@@ -5,19 +5,19 @@ Used only for REPL Completion.
|
||||
import inspect
|
||||
import os
|
||||
|
||||
from jedi import common
|
||||
from jedi.parser.diff import FastParser
|
||||
from jedi.parser.python import parse
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.cache import underscore_memoization
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate.context import Context
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
|
||||
|
||||
class MixedObject(object):
|
||||
"""
|
||||
A ``MixedObject`` is used in two ways:
|
||||
|
||||
1. It uses the default logic of ``parser.tree`` objects,
|
||||
1. It uses the default logic of ``parser.python.tree`` objects,
|
||||
2. except for getattr calls. The names dicts are generated in a fashion
|
||||
like ``CompiledObject``.
|
||||
|
||||
@@ -30,25 +30,12 @@ class MixedObject(object):
|
||||
fewer special cases, because we in Python you don't have the same freedoms
|
||||
to modify the runtime.
|
||||
"""
|
||||
def __init__(self, evaluator, parent_context, compiled_object, tree_name):
|
||||
def __init__(self, evaluator, parent_context, compiled_object, tree_context):
|
||||
self.evaluator = evaluator
|
||||
self.parent_context = parent_context
|
||||
self.compiled_object = compiled_object
|
||||
self._context = tree_context
|
||||
self.obj = compiled_object.obj
|
||||
self._tree_name = tree_name
|
||||
name_module = tree_name.get_root_node()
|
||||
if parent_context.tree_node.get_root_node() != name_module:
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
module_context = ModuleContext(evaluator, name_module)
|
||||
name = compiled_object.get_root_context().py__name__()
|
||||
imports.add_module(evaluator, name, module_context)
|
||||
else:
|
||||
module_context = parent_context.get_root_context()
|
||||
|
||||
self._context = module_context.create_context(
|
||||
tree_name.parent,
|
||||
node_is_context=True,
|
||||
node_is_object=True
|
||||
)
|
||||
|
||||
# We have to overwrite everything that has to do with trailers, name
|
||||
# lookups and filters to make it possible to route name lookups towards
|
||||
@@ -115,15 +102,14 @@ class MixedObjectFilter(compiled.CompiledObjectFilter):
|
||||
#return MixedName(self._evaluator, self._compiled_object, name)
|
||||
|
||||
|
||||
def parse(grammar, path):
|
||||
with open(path) as f:
|
||||
source = f.read()
|
||||
source = common.source_to_unicode(source)
|
||||
return FastParser(grammar, source, path)
|
||||
|
||||
|
||||
@memoize_default(evaluator_is_first_arg=True)
|
||||
def _load_module(evaluator, path, python_object):
|
||||
module = parse(evaluator.grammar, path).module
|
||||
module = parse(
|
||||
grammar=evaluator.grammar,
|
||||
path=path,
|
||||
cache=True,
|
||||
diff_cache=True
|
||||
).get_root_node()
|
||||
python_module = inspect.getmodule(python_object)
|
||||
|
||||
evaluator.modules[python_module.__name__] = module
|
||||
@@ -135,10 +121,10 @@ def find_syntax_node_name(evaluator, python_object):
|
||||
path = inspect.getsourcefile(python_object)
|
||||
except TypeError:
|
||||
# The type might not be known (e.g. class_with_dict.__weakref__)
|
||||
return None
|
||||
return None, None
|
||||
if path is None or not os.path.exists(path):
|
||||
# The path might not exist or be e.g. <stdin>.
|
||||
return None
|
||||
return None, None
|
||||
|
||||
module = _load_module(evaluator, path, python_object)
|
||||
|
||||
@@ -146,17 +132,22 @@ def find_syntax_node_name(evaluator, python_object):
|
||||
# We don't need to check names for modules, because there's not really
|
||||
# a way to write a module in a module in Python (and also __name__ can
|
||||
# be something like ``email.utils``).
|
||||
return module.name
|
||||
return module, path
|
||||
|
||||
try:
|
||||
name_str = python_object.__name__
|
||||
except AttributeError:
|
||||
# Stuff like python_function.__code__.
|
||||
return None, None
|
||||
|
||||
name_str = python_object.__name__
|
||||
if name_str == '<lambda>':
|
||||
return None # It's too hard to find lambdas.
|
||||
return None, None # It's too hard to find lambdas.
|
||||
|
||||
# Doesn't always work (e.g. os.stat_result)
|
||||
try:
|
||||
names = module.used_names[name_str]
|
||||
except KeyError:
|
||||
return None
|
||||
return None, None
|
||||
names = [n for n in names if n.is_definition()]
|
||||
|
||||
try:
|
||||
@@ -173,22 +164,44 @@ def find_syntax_node_name(evaluator, python_object):
|
||||
# There's a chance that the object is not available anymore, because
|
||||
# the code has changed in the background.
|
||||
if line_names:
|
||||
return line_names[-1]
|
||||
return line_names[-1].parent, path
|
||||
|
||||
# It's really hard to actually get the right definition, here as a last
|
||||
# resort we just return the last one. This chance might lead to odd
|
||||
# completions at some points but will lead to mostly correct type
|
||||
# inference, because people tend to define a public name in a module only
|
||||
# once.
|
||||
return names[-1]
|
||||
return names[-1].parent, path
|
||||
|
||||
|
||||
@compiled.compiled_objects_cache('mixed_cache')
|
||||
def create(evaluator, obj, parent_context=None, *args):
|
||||
tree_name = find_syntax_node_name(evaluator, obj)
|
||||
tree_node, path = find_syntax_node_name(evaluator, obj)
|
||||
|
||||
compiled_object = compiled.create(
|
||||
evaluator, obj, parent_context=parent_context.compiled_object)
|
||||
if tree_name is None:
|
||||
if tree_node is None:
|
||||
return compiled_object
|
||||
return MixedObject(evaluator, parent_context, compiled_object, tree_name)
|
||||
|
||||
module_node = tree_node.get_root_node()
|
||||
if parent_context.tree_node.get_root_node() == module_node:
|
||||
module_context = parent_context.get_root_context()
|
||||
else:
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
module_context = ModuleContext(evaluator, module_node, path=path)
|
||||
name = compiled_object.get_root_context().py__name__()
|
||||
imports.add_module(evaluator, name, module_context)
|
||||
|
||||
tree_context = module_context.create_context(
|
||||
tree_node,
|
||||
node_is_context=True,
|
||||
node_is_object=True
|
||||
)
|
||||
|
||||
return MixedObject(
|
||||
evaluator,
|
||||
parent_context,
|
||||
compiled_object,
|
||||
tree_context=tree_context
|
||||
)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from jedi._compatibility import Python3Method
|
||||
from jedi.common import unite
|
||||
from jedi.parser.python.tree import ExprStmt, CompFor
|
||||
|
||||
|
||||
class Context(object):
|
||||
@@ -14,9 +15,6 @@ class Context(object):
|
||||
self.evaluator = evaluator
|
||||
self.parent_context = parent_context
|
||||
|
||||
def get_parent_flow_context(self):
|
||||
return self.parent_context
|
||||
|
||||
def get_root_context(self):
|
||||
context = self
|
||||
while True:
|
||||
@@ -76,12 +74,6 @@ class TreeContext(Context):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
|
||||
|
||||
|
||||
class FlowContext(TreeContext):
|
||||
def get_parent_flow_context(self):
|
||||
if 1:
|
||||
return self.parent_context
|
||||
|
||||
|
||||
class AbstractLazyContext(object):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
@@ -141,3 +133,51 @@ class MergedLazyContexts(AbstractLazyContext):
|
||||
"""data is a list of lazy contexts."""
|
||||
def infer(self):
|
||||
return unite(l.infer() for l in self.data)
|
||||
|
||||
|
||||
class ContextualizedNode(object):
|
||||
def __init__(self, context, node):
|
||||
self.context = context
|
||||
self._node = node
|
||||
|
||||
def get_root_context(self):
|
||||
return self.context.get_root_context()
|
||||
|
||||
def infer(self):
|
||||
return self.context.eval_node(self._node)
|
||||
|
||||
|
||||
class ContextualizedName(ContextualizedNode):
|
||||
# TODO merge with TreeNameDefinition?!
|
||||
@property
|
||||
def name(self):
|
||||
return self._node
|
||||
|
||||
def assignment_indexes(self):
|
||||
"""
|
||||
Returns an array of tuple(int, node) of the indexes that are used in
|
||||
tuple assignments.
|
||||
|
||||
For example if the name is ``y`` in the following code::
|
||||
|
||||
x, (y, z) = 2, ''
|
||||
|
||||
would result in ``[(1, xyz_node), (0, yz_node)]``.
|
||||
"""
|
||||
indexes = []
|
||||
node = self._node.parent
|
||||
compare = self._node
|
||||
while node is not None:
|
||||
if node.type in ('testlist_comp', 'testlist_star_expr', 'exprlist'):
|
||||
for i, child in enumerate(node.children):
|
||||
if child == compare:
|
||||
indexes.insert(0, (int(i / 2), node))
|
||||
break
|
||||
else:
|
||||
raise LookupError("Couldn't find the assignment.")
|
||||
elif isinstance(node, (ExprStmt, CompFor)):
|
||||
break
|
||||
|
||||
compare = node
|
||||
node = node.parent
|
||||
return indexes
|
||||
|
||||
@@ -22,8 +22,8 @@ from jedi._compatibility import u
|
||||
from jedi.common import unite
|
||||
from jedi.evaluate import context
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser.tree import search_ancestor
|
||||
from jedi.parser.python import parse
|
||||
from jedi.parser.python.tree import search_ancestor
|
||||
from jedi.common import indent_block
|
||||
from jedi.evaluate.iterable import SequenceLiteralContext, FakeSequence
|
||||
|
||||
@@ -133,9 +133,9 @@ def _evaluate_for_statement_string(module_context, string):
|
||||
# Take the default grammar here, if we load the Python 2.7 grammar here, it
|
||||
# will be impossible to use `...` (Ellipsis) as a token. Docstring types
|
||||
# don't need to conform with the current grammar.
|
||||
p = ParserWithRecovery(load_grammar(), code.format(indent_block(string)))
|
||||
module = parse(code.format(indent_block(string)))
|
||||
try:
|
||||
funcdef = p.module.subscopes[0]
|
||||
funcdef = module.subscopes[0]
|
||||
# First pick suite, then simple_stmt and then the node,
|
||||
# which is also not the last item, because there's a newline.
|
||||
stmt = funcdef.children[-1].children[-1].children[-2]
|
||||
|
||||
@@ -17,7 +17,7 @@ It works as follows:
|
||||
- execute these calls and check the input. This work with a ``ParamListener``.
|
||||
"""
|
||||
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi import settings
|
||||
from jedi import debug
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
|
||||
@@ -4,7 +4,7 @@ are needed for name resolution.
|
||||
"""
|
||||
from abc import abstractmethod
|
||||
|
||||
from jedi.parser.tree import search_ancestor
|
||||
from jedi.parser.python.tree import search_ancestor
|
||||
from jedi.evaluate import flow_analysis
|
||||
from jedi.common import to_list, unite
|
||||
|
||||
@@ -73,9 +73,6 @@ class ContextName(ContextNameMixin, AbstractTreeName):
|
||||
|
||||
|
||||
class TreeNameDefinition(AbstractTreeName):
|
||||
def get_parent_flow_context(self):
|
||||
return self.parent_context
|
||||
|
||||
def infer(self):
|
||||
# Refactor this, should probably be here.
|
||||
from jedi.evaluate.finder import _name_to_types
|
||||
@@ -287,7 +284,7 @@ def get_global_filters(evaluator, context, until_position, origin_scope):
|
||||
First we get the names names from the function scope.
|
||||
|
||||
>>> no_unicode_pprint(filters[0])
|
||||
<ParserTreeFilter: <ModuleContext: <Module: None@2-5>>>
|
||||
<ParserTreeFilter: <ModuleContext: @2-5>>
|
||||
>>> sorted(str(n) for n in filters[0].values())
|
||||
['<TreeNameDefinition: func@(3, 4)>', '<TreeNameDefinition: x@(2, 0)>']
|
||||
>>> filters[0]._until_position
|
||||
|
||||
@@ -15,7 +15,7 @@ Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
|
||||
check for -> a is a string). There's big potential in these checks.
|
||||
"""
|
||||
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi import debug
|
||||
from jedi.common import unite
|
||||
from jedi import settings
|
||||
@@ -30,6 +30,7 @@ from jedi.evaluate import flow_analysis
|
||||
from jedi.evaluate import param
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate.filters import get_global_filters
|
||||
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
|
||||
|
||||
|
||||
class NameFinder(object):
|
||||
@@ -78,7 +79,13 @@ class NameFinder(object):
|
||||
|
||||
def _get_origin_scope(self):
|
||||
if isinstance(self._name, tree.Name):
|
||||
return self._name.get_parent_until(tree.Scope, reverse=True)
|
||||
scope = self._name
|
||||
while scope.parent is not None:
|
||||
# TODO why if classes?
|
||||
if not isinstance(scope, tree.Scope):
|
||||
break
|
||||
scope = scope.parent
|
||||
return scope
|
||||
else:
|
||||
return None
|
||||
|
||||
@@ -165,30 +172,32 @@ class NameFinder(object):
|
||||
def _name_to_types(evaluator, context, tree_name):
|
||||
types = []
|
||||
node = tree_name.get_definition()
|
||||
if node.isinstance(tree.ForStmt):
|
||||
typ = node.type
|
||||
if typ == 'for_stmt':
|
||||
types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
|
||||
if types:
|
||||
return types
|
||||
if node.isinstance(tree.WithStmt):
|
||||
if typ == 'with_stmt':
|
||||
types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
|
||||
if types:
|
||||
return types
|
||||
if node.type in ('for_stmt', 'comp_for'):
|
||||
if typ in ('for_stmt', 'comp_for'):
|
||||
try:
|
||||
types = context.predefined_names[node][tree_name.value]
|
||||
except KeyError:
|
||||
container_types = context.eval_node(node.children[3])
|
||||
for_types = iterable.py__iter__types(evaluator, container_types, node.children[3])
|
||||
types = check_tuple_assignments(evaluator, for_types, tree_name)
|
||||
elif node.isinstance(tree.ExprStmt):
|
||||
cn = ContextualizedNode(context, node.children[3])
|
||||
for_types = iterable.py__iter__types(evaluator, cn.infer(), cn)
|
||||
c_node = ContextualizedName(context, tree_name)
|
||||
types = check_tuple_assignments(evaluator, c_node, for_types)
|
||||
elif typ == 'expr_stmt':
|
||||
types = _remove_statements(evaluator, context, node, tree_name)
|
||||
elif node.isinstance(tree.WithStmt):
|
||||
elif typ == 'with_stmt':
|
||||
types = context.eval_node(node.node_from_name(tree_name))
|
||||
elif isinstance(node, tree.Import):
|
||||
elif typ in ('import_from', 'import_name'):
|
||||
types = imports.infer_import(context, tree_name)
|
||||
elif node.type in ('funcdef', 'classdef'):
|
||||
elif typ in ('funcdef', 'classdef'):
|
||||
types = _apply_decorators(evaluator, context, node)
|
||||
elif node.type == 'global_stmt':
|
||||
elif typ == 'global_stmt':
|
||||
context = evaluator.create_context(context, tree_name)
|
||||
finder = NameFinder(evaluator, context, context, str(tree_name))
|
||||
filters = finder.get_filters(search_global=True)
|
||||
@@ -196,7 +205,7 @@ def _name_to_types(evaluator, context, tree_name):
|
||||
# which means the function itself.
|
||||
filters = [next(filters)]
|
||||
types += finder.find(filters, attribute_lookup=False)
|
||||
elif isinstance(node, tree.TryStmt):
|
||||
elif typ == 'try_stmt':
|
||||
# TODO an exception can also be a tuple. Check for those.
|
||||
# TODO check for types that are not classes and add it to
|
||||
# the static analysis report.
|
||||
@@ -234,7 +243,7 @@ def _apply_decorators(evaluator, context, node):
|
||||
trailer_nodes = dec.children[2:-1]
|
||||
if trailer_nodes:
|
||||
# Create a trailer and evaluate it.
|
||||
trailer = tree.Node('trailer', trailer_nodes)
|
||||
trailer = tree.PythonNode('trailer', trailer_nodes)
|
||||
trailer.parent = dec
|
||||
dec_values = evaluator.eval_trailer(context, dec_values, trailer)
|
||||
|
||||
@@ -271,8 +280,7 @@ def _remove_statements(evaluator, context, stmt, name):
|
||||
if check_instance is not None:
|
||||
# class renames
|
||||
types = set([er.get_instance_el(evaluator, check_instance, a, True)
|
||||
if isinstance(a, (er.Function, tree.Function))
|
||||
else a for a in types])
|
||||
if isinstance(a, er.Function) else a for a in types])
|
||||
return types
|
||||
|
||||
|
||||
@@ -308,7 +316,7 @@ def _check_flow_information(context, flow, search_name, pos):
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
if isinstance(flow, (tree.IfStmt, tree.WhileStmt)):
|
||||
if flow.type in ('if_stmt', 'while_stmt'):
|
||||
potential_ifs = [c for c in flow.children[1::4] if c != ':']
|
||||
for if_test in reversed(potential_ifs):
|
||||
if search_name.start_pos > if_test.end_pos:
|
||||
@@ -322,7 +330,7 @@ def _check_isinstance_type(context, element, search_name):
|
||||
# this might be removed if we analyze and, etc
|
||||
assert len(element.children) == 2
|
||||
first, trailer = element.children
|
||||
assert isinstance(first, tree.Name) and first.value == 'isinstance'
|
||||
assert first.type == 'name' and first.value == 'isinstance'
|
||||
assert trailer.type == 'trailer' and trailer.children[0] == '('
|
||||
assert len(trailer.children) == 3
|
||||
|
||||
@@ -354,13 +362,14 @@ def _check_isinstance_type(context, element, search_name):
|
||||
return result
|
||||
|
||||
|
||||
def check_tuple_assignments(evaluator, types, name):
|
||||
def check_tuple_assignments(evaluator, contextualized_name, types):
|
||||
"""
|
||||
Checks if tuples are assigned.
|
||||
"""
|
||||
lazy_context = None
|
||||
for index, node in name.assignment_indexes():
|
||||
iterated = iterable.py__iter__(evaluator, types, node)
|
||||
for index, node in contextualized_name.assignment_indexes():
|
||||
cn = ContextualizedNode(contextualized_name.context, node)
|
||||
iterated = iterable.py__iter__(evaluator, types, cn)
|
||||
for _ in range(index + 1):
|
||||
try:
|
||||
lazy_context = next(iterated)
|
||||
|
||||
@@ -2,7 +2,7 @@ import copy
|
||||
from itertools import chain
|
||||
from contextlib import contextmanager
|
||||
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
|
||||
|
||||
def deep_ast_copy(obj):
|
||||
|
||||
@@ -16,13 +16,13 @@ import os
|
||||
import pkgutil
|
||||
import sys
|
||||
|
||||
from jedi._compatibility import find_module, unicode
|
||||
from jedi._compatibility import find_module, unicode, ImplicitNSInfo
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi.common import source_to_unicode, unite
|
||||
from jedi.parser.diff import FastParser
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.utils import save_parser, load_parser, parser_cache
|
||||
from jedi.parser.python import parse
|
||||
from jedi.parser.python import tree
|
||||
from jedi.parser.cache import parser_cache
|
||||
from jedi.evaluate import sys_path
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate import compiled
|
||||
@@ -36,7 +36,7 @@ from jedi.evaluate.filters import AbstractNameDefinition
|
||||
@memoize_default(default=set())
|
||||
def infer_import(context, tree_name, is_goto=False):
|
||||
module_context = context.get_root_context()
|
||||
import_node = tree_name.get_parent_until(tree.Import)
|
||||
import_node = tree.search_ancestor(tree_name, ('import_name', 'import_from'))
|
||||
import_path = import_node.path_for_name(tree_name)
|
||||
from_import_name = None
|
||||
evaluator = context.evaluator
|
||||
@@ -201,14 +201,21 @@ class Importer(object):
|
||||
path = module_context.py__file__()
|
||||
if path is not None:
|
||||
import_path = list(import_path)
|
||||
p = path
|
||||
for i in range(level):
|
||||
path = os.path.dirname(path)
|
||||
dir_name = os.path.basename(path)
|
||||
p = os.path.dirname(p)
|
||||
dir_name = os.path.basename(p)
|
||||
# This is not the proper way to do relative imports. However, since
|
||||
# Jedi cannot be sure about the entry point, we just calculate an
|
||||
# absolute path here.
|
||||
if dir_name:
|
||||
import_path.insert(0, dir_name)
|
||||
# TODO those sys.modules modifications are getting
|
||||
# really stupid. this is the 3rd time that we're using
|
||||
# this. We should probably refactor.
|
||||
if path.endswith(os.path.sep + 'os.py'):
|
||||
import_path.insert(0, 'os')
|
||||
else:
|
||||
import_path.insert(0, dir_name)
|
||||
else:
|
||||
_add_error(module_context, import_path[-1])
|
||||
import_path = []
|
||||
@@ -297,7 +304,7 @@ class Importer(object):
|
||||
method = parent_module.py__path__
|
||||
except AttributeError:
|
||||
# The module is not a package.
|
||||
_add_error(parent_module, import_path[-1])
|
||||
_add_error(self.module_context, import_path[-1])
|
||||
return set()
|
||||
else:
|
||||
paths = method()
|
||||
@@ -306,13 +313,15 @@ class Importer(object):
|
||||
# At the moment we are only using one path. So this is
|
||||
# not important to be correct.
|
||||
try:
|
||||
if not isinstance(path, list):
|
||||
path = [path]
|
||||
module_file, module_path, is_pkg = \
|
||||
find_module(import_parts[-1], [path])
|
||||
find_module(import_parts[-1], path, fullname=module_name)
|
||||
break
|
||||
except ImportError:
|
||||
module_path = None
|
||||
if module_path is None:
|
||||
_add_error(parent_module, import_path[-1])
|
||||
_add_error(self.module_context, import_path[-1])
|
||||
return set()
|
||||
else:
|
||||
parent_module = None
|
||||
@@ -323,7 +332,7 @@ class Importer(object):
|
||||
sys.path, temp = sys_path, sys.path
|
||||
try:
|
||||
module_file, module_path, is_pkg = \
|
||||
find_module(import_parts[-1])
|
||||
find_module(import_parts[-1], fullname=module_name)
|
||||
finally:
|
||||
sys.path = temp
|
||||
except ImportError:
|
||||
@@ -331,22 +340,27 @@ class Importer(object):
|
||||
_add_error(self.module_context, import_path[-1])
|
||||
return set()
|
||||
|
||||
source = None
|
||||
code = None
|
||||
if is_pkg:
|
||||
# In this case, we don't have a file yet. Search for the
|
||||
# __init__ file.
|
||||
if module_path.endswith(('.zip', '.egg')):
|
||||
source = module_file.loader.get_source(module_name)
|
||||
code = module_file.loader.get_source(module_name)
|
||||
else:
|
||||
module_path = get_init_path(module_path)
|
||||
elif module_file:
|
||||
source = module_file.read()
|
||||
code = module_file.read()
|
||||
module_file.close()
|
||||
|
||||
if module_file is None and not module_path.endswith(('.py', '.zip', '.egg')):
|
||||
if isinstance(module_path, ImplicitNSInfo):
|
||||
from jedi.evaluate.representation import ImplicitNamespaceContext
|
||||
fullname, paths = module_path.name, module_path.paths
|
||||
module = ImplicitNamespaceContext(self._evaluator, fullname=fullname)
|
||||
module.paths = paths
|
||||
elif module_file is None and not module_path.endswith(('.py', '.zip', '.egg')):
|
||||
module = compiled.load_module(self._evaluator, module_path)
|
||||
else:
|
||||
module = _load_module(self._evaluator, module_path, source, sys_path, parent_module)
|
||||
module = _load_module(self._evaluator, module_path, code, sys_path, parent_module)
|
||||
|
||||
if module is None:
|
||||
# The file might raise an ImportError e.g. and therefore not be
|
||||
@@ -384,7 +398,7 @@ class Importer(object):
|
||||
:param only_modules: Indicates wheter it's possible to import a
|
||||
definition that is not defined in a module.
|
||||
"""
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
from jedi.evaluate.representation import ModuleContext, ImplicitNamespaceContext
|
||||
names = []
|
||||
if self.import_path:
|
||||
# flask
|
||||
@@ -405,20 +419,23 @@ class Importer(object):
|
||||
# Non-modules are not completable.
|
||||
if context.api_type != 'module': # not a module
|
||||
continue
|
||||
|
||||
# namespace packages
|
||||
if isinstance(context, ModuleContext) and \
|
||||
context.py__file__().endswith('__init__.py'):
|
||||
if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'):
|
||||
paths = context.py__path__()
|
||||
names += self._get_module_names(paths, in_module=context)
|
||||
|
||||
# implicit namespace packages
|
||||
elif isinstance(context, ImplicitNamespaceContext):
|
||||
paths = context.paths
|
||||
names += self._get_module_names(paths)
|
||||
|
||||
if only_modules:
|
||||
# In the case of an import like `from x.` we don't need to
|
||||
# add all the variables.
|
||||
if ('os',) == self.str_import_path and not self.level:
|
||||
# os.path is a hardcoded exception, because it's a
|
||||
# ``sys.modules`` modification.
|
||||
names.append(self._generate_name('path'))
|
||||
names.append(self._generate_name('path', context))
|
||||
|
||||
continue
|
||||
|
||||
@@ -438,31 +455,20 @@ class Importer(object):
|
||||
return names
|
||||
|
||||
|
||||
def _load_module(evaluator, path=None, source=None, sys_path=None, parent_module=None):
|
||||
def load(source):
|
||||
dotted_path = path and compiled.dotted_from_fs_path(path, sys_path)
|
||||
if path is not None and path.endswith(('.py', '.zip', '.egg')) \
|
||||
and dotted_path not in settings.auto_import_modules:
|
||||
if source is None:
|
||||
with open(path, 'rb') as f:
|
||||
source = f.read()
|
||||
else:
|
||||
return compiled.load_module(evaluator, path)
|
||||
p = path
|
||||
p = FastParser(evaluator.grammar, source_to_unicode(source), p)
|
||||
save_parser(path, p)
|
||||
return p.module
|
||||
|
||||
def _load_module(evaluator, path=None, code=None, sys_path=None, parent_module=None):
|
||||
if sys_path is None:
|
||||
sys_path = evaluator.sys_path
|
||||
|
||||
cached = load_parser(path)
|
||||
module_node = load(source) if cached is None else cached.module
|
||||
if isinstance(module_node, compiled.CompiledObject):
|
||||
return module_node
|
||||
dotted_path = path and compiled.dotted_from_fs_path(path, sys_path)
|
||||
if path is not None and path.endswith(('.py', '.zip', '.egg')) \
|
||||
and dotted_path not in settings.auto_import_modules:
|
||||
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
return ModuleContext(evaluator, module_node)
|
||||
module_node = parse(code=code, path=path, cache=True, diff_cache=True)
|
||||
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
return ModuleContext(evaluator, module_node, path=path)
|
||||
else:
|
||||
return compiled.load_module(evaluator, path)
|
||||
|
||||
|
||||
def add_module(evaluator, module_name, module):
|
||||
@@ -482,21 +488,22 @@ def get_modules_containing_name(evaluator, modules, name):
|
||||
|
||||
def check_python_file(path):
|
||||
try:
|
||||
parser_cache_item = parser_cache[path]
|
||||
node_cache_item = parser_cache[path]
|
||||
except KeyError:
|
||||
try:
|
||||
return check_fs(path)
|
||||
except IOError:
|
||||
return None
|
||||
else:
|
||||
return er.ModuleContext(evaluator, parser_cache_item.parser.module)
|
||||
module_node = node_cache_item.node
|
||||
return er.ModuleContext(evaluator, module_node, path=path)
|
||||
|
||||
def check_fs(path):
|
||||
with open(path, 'rb') as f:
|
||||
source = source_to_unicode(f.read())
|
||||
if name in source:
|
||||
code = source_to_unicode(f.read())
|
||||
if name in code:
|
||||
module_name = os.path.basename(path)[:-3] # Remove `.py`.
|
||||
module = _load_module(evaluator, path, source)
|
||||
module = _load_module(evaluator, path, code)
|
||||
add_module(evaluator, module_name, module)
|
||||
return module
|
||||
|
||||
|
||||
@@ -240,23 +240,24 @@ class Comprehension(AbstractSequence):
|
||||
parent_context = parent_context or self._defining_context
|
||||
input_types = parent_context.eval_node(input_node)
|
||||
|
||||
iterated = py__iter__(evaluator, input_types, input_node)
|
||||
cn = context.ContextualizedNode(parent_context, input_node)
|
||||
iterated = py__iter__(evaluator, input_types, cn)
|
||||
exprlist = comp_for.children[1]
|
||||
for i, lazy_context in enumerate(iterated):
|
||||
types = lazy_context.infer()
|
||||
dct = unpack_tuple_to_dict(evaluator, types, exprlist)
|
||||
context = self._get_comp_for_context(
|
||||
dct = unpack_tuple_to_dict(parent_context, types, exprlist)
|
||||
context_ = self._get_comp_for_context(
|
||||
parent_context,
|
||||
comp_for,
|
||||
)
|
||||
with helpers.predefine_names(context, comp_for, dct):
|
||||
with helpers.predefine_names(context_, comp_for, dct):
|
||||
try:
|
||||
for result in self._nested(comp_fors[1:], context):
|
||||
for result in self._nested(comp_fors[1:], context_):
|
||||
yield result
|
||||
except IndexError:
|
||||
iterated = context.eval_node(self._eval_node())
|
||||
iterated = context_.eval_node(self._eval_node())
|
||||
if self.array_type == 'dict':
|
||||
yield iterated, context.eval_node(self._eval_node(2))
|
||||
yield iterated, context_.eval_node(self._eval_node(2))
|
||||
else:
|
||||
yield iterated
|
||||
|
||||
@@ -492,16 +493,6 @@ class _FakeArray(SequenceLiteralContext):
|
||||
# TODO is this class really needed?
|
||||
|
||||
|
||||
class ImplicitTuple(_FakeArray):
|
||||
def __init__(self, evaluator, testlist):
|
||||
super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple')
|
||||
raise NotImplementedError
|
||||
self._testlist = testlist
|
||||
|
||||
def _items(self):
|
||||
return self._testlist.children[::2]
|
||||
|
||||
|
||||
class FakeSequence(_FakeArray):
|
||||
def __init__(self, evaluator, array_type, lazy_context_list):
|
||||
"""
|
||||
@@ -571,33 +562,33 @@ class MergedArray(_FakeArray):
|
||||
return sum(len(a) for a in self._arrays)
|
||||
|
||||
|
||||
def unpack_tuple_to_dict(evaluator, types, exprlist):
|
||||
def unpack_tuple_to_dict(context, types, exprlist):
|
||||
"""
|
||||
Unpacking tuple assignments in for statements and expr_stmts.
|
||||
"""
|
||||
if exprlist.type == 'name':
|
||||
return {exprlist.value: types}
|
||||
elif exprlist.type == 'atom' and exprlist.children[0] in '([':
|
||||
return unpack_tuple_to_dict(evaluator, types, exprlist.children[1])
|
||||
return unpack_tuple_to_dict(context, types, exprlist.children[1])
|
||||
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
|
||||
'testlist_star_expr'):
|
||||
dct = {}
|
||||
parts = iter(exprlist.children[::2])
|
||||
n = 0
|
||||
for lazy_context in py__iter__(evaluator, types, exprlist):
|
||||
for lazy_context in py__iter__(context.evaluator, types, exprlist):
|
||||
n += 1
|
||||
try:
|
||||
part = next(parts)
|
||||
except StopIteration:
|
||||
# TODO this context is probably not right.
|
||||
analysis.add(next(iter(types)), 'value-error-too-many-values', part,
|
||||
analysis.add(context, 'value-error-too-many-values', part,
|
||||
message="ValueError: too many values to unpack (expected %s)" % n)
|
||||
else:
|
||||
dct.update(unpack_tuple_to_dict(evaluator, lazy_context.infer(), part))
|
||||
dct.update(unpack_tuple_to_dict(context, lazy_context.infer(), part))
|
||||
has_parts = next(parts, None)
|
||||
if types and has_parts is not None:
|
||||
# TODO this context is probably not right.
|
||||
analysis.add(next(iter(types)), 'value-error-too-few-values', has_parts,
|
||||
analysis.add(context, 'value-error-too-few-values', has_parts,
|
||||
message="ValueError: need more than %s values to unpack" % n)
|
||||
return dct
|
||||
elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
|
||||
@@ -611,17 +602,19 @@ def unpack_tuple_to_dict(evaluator, types, exprlist):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def py__iter__(evaluator, types, node=None):
|
||||
def py__iter__(evaluator, types, contextualized_node=None):
|
||||
debug.dbg('py__iter__')
|
||||
type_iters = []
|
||||
for typ in types:
|
||||
try:
|
||||
iter_method = typ.py__iter__
|
||||
except AttributeError:
|
||||
if node is not None:
|
||||
# TODO this context is probably not right.
|
||||
analysis.add(typ, 'type-error-not-iterable', node,
|
||||
message="TypeError: '%s' object is not iterable" % typ)
|
||||
if contextualized_node is not None:
|
||||
analysis.add(
|
||||
contextualized_node.context,
|
||||
'type-error-not-iterable',
|
||||
contextualized_node._node,
|
||||
message="TypeError: '%s' object is not iterable" % typ)
|
||||
else:
|
||||
type_iters.append(iter_method())
|
||||
|
||||
@@ -631,12 +624,15 @@ def py__iter__(evaluator, types, node=None):
|
||||
)
|
||||
|
||||
|
||||
def py__iter__types(evaluator, types, node=None):
|
||||
def py__iter__types(evaluator, types, contextualized_node=None):
|
||||
"""
|
||||
Calls `py__iter__`, but ignores the ordering in the end and just returns
|
||||
all types that it contains.
|
||||
"""
|
||||
return unite(lazy_context.infer() for lazy_context in py__iter__(evaluator, types, node))
|
||||
return unite(
|
||||
lazy_context.infer()
|
||||
for lazy_context in py__iter__(evaluator, types, contextualized_node)
|
||||
)
|
||||
|
||||
|
||||
def py__getitem__(evaluator, context, types, trailer):
|
||||
|
||||
@@ -3,7 +3,7 @@ from collections import defaultdict
|
||||
from jedi._compatibility import zip_longest
|
||||
from jedi import debug
|
||||
from jedi import common
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi.evaluate import iterable
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate import context
|
||||
|
||||
@@ -22,8 +22,8 @@ x support for type hint comments for functions, `# type: (int, str) -> int`.
|
||||
import itertools
|
||||
|
||||
import os
|
||||
from jedi.parser import \
|
||||
Parser, load_grammar, ParseError, ParserWithRecovery, tree
|
||||
from jedi.parser import ParserSyntaxError
|
||||
from jedi.parser.python import parse, tree
|
||||
from jedi.common import unite
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
from jedi.evaluate import compiled
|
||||
@@ -62,14 +62,16 @@ def _fix_forward_reference(context, node):
|
||||
if isinstance(evaled_node, compiled.CompiledObject) and \
|
||||
isinstance(evaled_node.obj, str):
|
||||
try:
|
||||
p = Parser(load_grammar(), _compatibility.unicode(evaled_node.obj),
|
||||
start_symbol='eval_input')
|
||||
new_node = p.get_parsed_node()
|
||||
except ParseError:
|
||||
new_node = parse(
|
||||
_compatibility.unicode(evaled_node.obj),
|
||||
start_symbol='eval_input',
|
||||
error_recovery=False
|
||||
)
|
||||
except ParserSyntaxError:
|
||||
debug.warning('Annotation not parsed: %s' % evaled_node.obj)
|
||||
return node
|
||||
else:
|
||||
module = node.get_parent_until()
|
||||
module = node.get_root_node()
|
||||
new_node.move(module.end_pos[0])
|
||||
new_node.parent = context.tree_node
|
||||
return new_node
|
||||
@@ -116,8 +118,7 @@ def _get_typing_replacement_module():
|
||||
os.path.abspath(os.path.join(__file__, "../jedi_typing.py"))
|
||||
with open(typing_path) as f:
|
||||
code = _compatibility.unicode(f.read())
|
||||
p = ParserWithRecovery(load_grammar(), code)
|
||||
_typing_module = p.module
|
||||
_typing_module = parse(code)
|
||||
return _typing_module
|
||||
|
||||
|
||||
@@ -149,7 +150,11 @@ def py__getitem__(context, typ, node):
|
||||
return context.eval_node(nodes[0])
|
||||
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
typing = ModuleContext(context.evaluator, _get_typing_replacement_module())
|
||||
typing = ModuleContext(
|
||||
context.evaluator,
|
||||
module_node=_get_typing_replacement_module(),
|
||||
path=None
|
||||
)
|
||||
factories = typing.py__getattribute__("factory")
|
||||
assert len(factories) == 1
|
||||
factory = list(factories)[0]
|
||||
|
||||
@@ -4,7 +4,7 @@ Handles operator precedence.
|
||||
import operator as op
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi import debug
|
||||
from jedi.evaluate.compiled import CompiledObject, create, builtin_from_name
|
||||
from jedi.evaluate import analysis
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Like described in the :mod:`jedi.parser.tree` module,
|
||||
Like described in the :mod:`jedi.parser.python.tree` module,
|
||||
there's a need for an ast like module to represent the states of parsed
|
||||
modules.
|
||||
|
||||
@@ -39,9 +39,10 @@ import os
|
||||
import pkgutil
|
||||
import imp
|
||||
import re
|
||||
from itertools import chain
|
||||
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.python import tree
|
||||
from jedi import debug
|
||||
from jedi import common
|
||||
from jedi.evaluate.cache import memoize_default, CachedMetaClass, NO_DEFAULT
|
||||
@@ -56,9 +57,11 @@ from jedi.evaluate import imports
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
|
||||
GlobalNameFilter, DictFilter, ContextName, AbstractNameDefinition, \
|
||||
ParamName, AnonymousInstanceParamName, TreeNameDefinition
|
||||
ParamName, AnonymousInstanceParamName, TreeNameDefinition, \
|
||||
ContextNameMixin
|
||||
from jedi.evaluate.dynamic import search_params
|
||||
from jedi.evaluate import context
|
||||
from jedi.evaluate.context import ContextualizedNode
|
||||
|
||||
|
||||
def apply_py__get__(context, base_context):
|
||||
@@ -72,8 +75,19 @@ def apply_py__get__(context, base_context):
|
||||
|
||||
|
||||
class ClassName(TreeNameDefinition):
|
||||
def __init__(self, parent_context, tree_name, name_context):
|
||||
super(ClassName, self).__init__(parent_context, tree_name)
|
||||
self._name_context = name_context
|
||||
|
||||
def infer(self):
|
||||
for result_context in super(ClassName, self).infer():
|
||||
# TODO this _name_to_types might get refactored and be a part of the
|
||||
# parent class. Once it is, we can probably just overwrite method to
|
||||
# achieve this.
|
||||
from jedi.evaluate.finder import _name_to_types
|
||||
inferred = _name_to_types(
|
||||
self.parent_context.evaluator, self._name_context, self.tree_name)
|
||||
|
||||
for result_context in inferred:
|
||||
for c in apply_py__get__(result_context, self.parent_context):
|
||||
yield c
|
||||
|
||||
@@ -81,6 +95,10 @@ class ClassName(TreeNameDefinition):
|
||||
class ClassFilter(ParserTreeFilter):
|
||||
name_class = ClassName
|
||||
|
||||
def _convert_names(self, names):
|
||||
return [self.name_class(self.context, name, self._node_context)
|
||||
for name in names]
|
||||
|
||||
|
||||
class ClassContext(use_metaclass(CachedMetaClass, context.TreeContext)):
|
||||
"""
|
||||
@@ -159,13 +177,13 @@ class ClassContext(use_metaclass(CachedMetaClass, context.TreeContext)):
|
||||
origin_scope=origin_scope
|
||||
)
|
||||
else:
|
||||
for scope in self.py__mro__():
|
||||
if isinstance(scope, compiled.CompiledObject):
|
||||
for filter in scope.get_filters(is_instance=is_instance):
|
||||
for cls in self.py__mro__():
|
||||
if isinstance(cls, compiled.CompiledObject):
|
||||
for filter in cls.get_filters(is_instance=is_instance):
|
||||
yield filter
|
||||
else:
|
||||
yield ClassFilter(
|
||||
self.evaluator, self, node_context=scope,
|
||||
self.evaluator, self, node_context=cls,
|
||||
origin_scope=origin_scope)
|
||||
|
||||
def is_class(self):
|
||||
@@ -313,8 +331,8 @@ class FunctionExecutionContext(context.TreeContext):
|
||||
def _eval_yield(self, yield_expr):
|
||||
node = yield_expr.children[1]
|
||||
if node.type == 'yield_arg': # It must be a yield from.
|
||||
yield_from_types = self.eval_node(node.children[1])
|
||||
for lazy_context in iterable.py__iter__(self.evaluator, yield_from_types, node):
|
||||
cn = ContextualizedNode(self, node.children[1])
|
||||
for lazy_context in iterable.py__iter__(self.evaluator, cn.infer(), cn):
|
||||
yield lazy_context
|
||||
else:
|
||||
yield context.LazyTreeContext(self, node)
|
||||
@@ -358,8 +376,8 @@ class FunctionExecutionContext(context.TreeContext):
|
||||
yield result
|
||||
else:
|
||||
input_node = for_stmt.get_input_node()
|
||||
for_types = self.eval_node(input_node)
|
||||
ordered = iterable.py__iter__(evaluator, for_types, input_node)
|
||||
cn = ContextualizedNode(self, input_node)
|
||||
ordered = iterable.py__iter__(evaluator, cn.infer(), cn)
|
||||
ordered = list(ordered)
|
||||
for lazy_context in ordered:
|
||||
dct = {str(for_stmt.children[1]): lazy_context.infer()}
|
||||
@@ -405,13 +423,26 @@ class ModuleAttributeName(AbstractNameDefinition):
|
||||
)
|
||||
|
||||
|
||||
class ModuleName(ContextNameMixin, AbstractNameDefinition):
|
||||
start_pos = 1, 0
|
||||
|
||||
def __init__(self, context, name):
|
||||
self._context = context
|
||||
self._name = name
|
||||
|
||||
@property
|
||||
def string_name(self):
|
||||
return self._name
|
||||
|
||||
|
||||
class ModuleContext(use_metaclass(CachedMetaClass, context.TreeContext)):
|
||||
api_type = 'module'
|
||||
parent_context = None
|
||||
|
||||
def __init__(self, evaluator, module_node):
|
||||
def __init__(self, evaluator, module_node, path):
|
||||
super(ModuleContext, self).__init__(evaluator, parent_context=None)
|
||||
self.tree_node = module_node
|
||||
self._path = path
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
yield ParserTreeFilter(
|
||||
@@ -448,10 +479,21 @@ class ModuleContext(use_metaclass(CachedMetaClass, context.TreeContext)):
|
||||
# All the additional module attributes are strings.
|
||||
return dict((n, ModuleAttributeName(self, n)) for n in names)
|
||||
|
||||
@property
|
||||
def _string_name(self):
|
||||
""" This is used for the goto functions. """
|
||||
if self._path is None:
|
||||
return '' # no path -> empty name
|
||||
else:
|
||||
sep = (re.escape(os.path.sep),) * 2
|
||||
r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self._path)
|
||||
# Remove PEP 3149 names
|
||||
return re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
|
||||
|
||||
@property
|
||||
@memoize_default()
|
||||
def name(self):
|
||||
return ContextName(self, self.tree_node.name)
|
||||
return ModuleName(self, self._string_name)
|
||||
|
||||
def _get_init_directory(self):
|
||||
"""
|
||||
@@ -477,10 +519,10 @@ class ModuleContext(use_metaclass(CachedMetaClass, context.TreeContext)):
|
||||
"""
|
||||
In contrast to Python's __file__ can be None.
|
||||
"""
|
||||
if self.tree_node.path is None:
|
||||
if self._path is None:
|
||||
return None
|
||||
|
||||
return os.path.abspath(self.tree_node.path)
|
||||
return os.path.abspath(self._path)
|
||||
|
||||
def py__package__(self):
|
||||
if self._get_init_directory() is None:
|
||||
@@ -538,7 +580,7 @@ class ModuleContext(use_metaclass(CachedMetaClass, context.TreeContext)):
|
||||
Lists modules in the directory of this module (if this module is a
|
||||
package).
|
||||
"""
|
||||
path = self.tree_node.path
|
||||
path = self._path
|
||||
names = {}
|
||||
if path is not None and path.endswith(os.path.sep + '__init__.py'):
|
||||
mods = pkgutil.iter_modules([os.path.dirname(path)])
|
||||
@@ -557,3 +599,74 @@ class ModuleContext(use_metaclass(CachedMetaClass, context.TreeContext)):
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.get_special_object(self.evaluator, 'MODULE_CLASS')
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s@%s-%s>" % (
|
||||
self.__class__.__name__, self._string_name,
|
||||
self.tree_node.start_pos[0], self.tree_node.end_pos[0])
|
||||
|
||||
|
||||
class ImplicitNSName(AbstractNameDefinition):
|
||||
"""
|
||||
Accessing names for implicit namespace packages should infer to nothing.
|
||||
This object will prevent Jedi from raising exceptions
|
||||
"""
|
||||
def __init__(self, implicit_ns_context, string_name):
|
||||
self.implicit_ns_context = implicit_ns_context
|
||||
self.string_name = string_name
|
||||
|
||||
def infer(self):
|
||||
return []
|
||||
|
||||
def get_root_context(self):
|
||||
return self.implicit_ns_context
|
||||
|
||||
|
||||
class ImplicitNamespaceContext(use_metaclass(CachedMetaClass, context.TreeContext)):
|
||||
"""
|
||||
Provides support for implicit namespace packages
|
||||
"""
|
||||
api_type = 'module'
|
||||
parent_context = None
|
||||
|
||||
def __init__(self, evaluator, fullname):
|
||||
super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None)
|
||||
self.evaluator = evaluator
|
||||
self.fullname = fullname
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
yield DictFilter(self._sub_modules_dict())
|
||||
|
||||
@property
|
||||
@memoize_default()
|
||||
def name(self):
|
||||
string_name = self.py__package__().rpartition('.')[-1]
|
||||
return ImplicitNSName(self, string_name)
|
||||
|
||||
def py__file__(self):
|
||||
return None
|
||||
|
||||
def py__package__(self):
|
||||
"""Return the fullname
|
||||
"""
|
||||
return self.fullname
|
||||
|
||||
@property
|
||||
def py__path__(self):
|
||||
return lambda: [self.paths]
|
||||
|
||||
@memoize_default()
|
||||
def _sub_modules_dict(self):
|
||||
names = {}
|
||||
|
||||
paths = self.paths
|
||||
file_names = chain.from_iterable(os.listdir(path) for path in paths)
|
||||
mods = [
|
||||
file_name.rpartition('.')[0] if '.' in file_name else file_name
|
||||
for file_name in file_names
|
||||
if file_name != '__pycache__'
|
||||
]
|
||||
|
||||
for name in mods:
|
||||
names[name] = imports.SubModuleName(self, name)
|
||||
return names
|
||||
|
||||
@@ -12,18 +12,18 @@ compiled module that returns the types for C-builtins.
|
||||
import collections
|
||||
import re
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.common import unite
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import representation as er
|
||||
from jedi.evaluate.instance import InstanceFunctionExecution, \
|
||||
AbstractInstanceContext, CompiledInstance, BoundMethod
|
||||
from jedi.evaluate import iterable
|
||||
from jedi.parser import ParserWithRecovery
|
||||
from jedi.parser.python import parse
|
||||
from jedi import debug
|
||||
from jedi.evaluate import precedence
|
||||
from jedi.evaluate import param
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate.context import LazyTreeContext, ContextualizedNode
|
||||
|
||||
|
||||
class NotInStdLib(LookupError):
|
||||
@@ -176,7 +176,11 @@ def builtins_reversed(evaluator, sequences, obj, arguments):
|
||||
# want static analysis to work well. Therefore we need to generated the
|
||||
# values again.
|
||||
key, lazy_context = next(arguments.unpack())
|
||||
ordered = list(iterable.py__iter__(evaluator, sequences, lazy_context.data))
|
||||
cn = None
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
# TODO access private
|
||||
cn = ContextualizedNode(lazy_context._context, lazy_context.data)
|
||||
ordered = list(iterable.py__iter__(evaluator, sequences, cn))
|
||||
|
||||
rev = list(reversed(ordered))
|
||||
# Repack iterator values and then run it the normal way. This is
|
||||
@@ -215,11 +219,12 @@ def builtins_isinstance(evaluator, objects, types, arguments):
|
||||
bool_results.add(any(cls in mro for cls in classes))
|
||||
else:
|
||||
_, lazy_context = list(arguments.unpack())[1]
|
||||
node = lazy_context.data
|
||||
message = 'TypeError: isinstance() arg 2 must be a ' \
|
||||
'class, type, or tuple of classes and types, ' \
|
||||
'not %s.' % cls_or_tup
|
||||
analysis.add(cls_or_tup, 'type-error-isinstance', node, message)
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
node = lazy_context.data
|
||||
message = 'TypeError: isinstance() arg 2 must be a ' \
|
||||
'class, type, or tuple of classes and types, ' \
|
||||
'not %s.' % cls_or_tup
|
||||
analysis.add(lazy_context._context, 'type-error-isinstance', node, message)
|
||||
|
||||
return set(compiled.create(evaluator, x) for x in bool_results)
|
||||
|
||||
@@ -265,7 +270,7 @@ def collections_namedtuple(evaluator, obj, arguments):
|
||||
)
|
||||
|
||||
# Parse source
|
||||
generated_class = ParserWithRecovery(evaluator.grammar, unicode(source)).module.subscopes[0]
|
||||
generated_class = parse(source, grammar=evaluator.grammar).subscopes[0]
|
||||
return set([er.ClassContext(evaluator, generated_class, evaluator.BUILTINS)])
|
||||
|
||||
|
||||
|
||||
@@ -4,13 +4,13 @@ import sys
|
||||
from jedi.evaluate.site import addsitedir
|
||||
|
||||
from jedi._compatibility import exec_function, unicode
|
||||
from jedi.parser import tree
|
||||
from jedi.parser import ParserWithRecovery
|
||||
from jedi.parser.python import tree
|
||||
from jedi.parser.python import parse
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
from jedi import debug
|
||||
from jedi import common
|
||||
from jedi.evaluate.compiled import CompiledObject
|
||||
from jedi.parser.utils import load_parser, save_parser
|
||||
from jedi.evaluate.context import ContextualizedNode
|
||||
|
||||
|
||||
def get_venv_path(venv):
|
||||
@@ -122,8 +122,8 @@ def _paths_from_assignment(module_context, expr_stmt):
|
||||
|
||||
from jedi.evaluate.iterable import py__iter__
|
||||
from jedi.evaluate.precedence import is_string
|
||||
types = module_context.create_context(expr_stmt).eval_node(expr_stmt)
|
||||
for lazy_context in py__iter__(module_context.evaluator, types, expr_stmt):
|
||||
cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
|
||||
for lazy_context in py__iter__(module_context.evaluator, cn.infer(), cn):
|
||||
for context in lazy_context.infer():
|
||||
if is_string(context):
|
||||
yield context.obj
|
||||
@@ -203,34 +203,27 @@ def sys_path_with_modifications(evaluator, module_context):
|
||||
|
||||
result = _check_module(module_context)
|
||||
result += _detect_django_path(path)
|
||||
for buildout_script in _get_buildout_scripts(path):
|
||||
for path in _get_paths_from_buildout_script(evaluator, buildout_script):
|
||||
for buildout_script_path in _get_buildout_script_paths(path):
|
||||
for path in _get_paths_from_buildout_script(evaluator, buildout_script_path):
|
||||
buildout_script_paths.add(path)
|
||||
# cleanup, back to old directory
|
||||
os.chdir(curdir)
|
||||
return list(result) + list(buildout_script_paths)
|
||||
|
||||
|
||||
def _get_paths_from_buildout_script(evaluator, buildout_script):
|
||||
def load(buildout_script):
|
||||
try:
|
||||
with open(buildout_script, 'rb') as f:
|
||||
source = common.source_to_unicode(f.read())
|
||||
except IOError:
|
||||
debug.dbg('Error trying to read buildout_script: %s', buildout_script)
|
||||
return
|
||||
|
||||
p = ParserWithRecovery(evaluator.grammar, source, buildout_script)
|
||||
save_parser(buildout_script, p)
|
||||
return p.module
|
||||
|
||||
cached = load_parser(buildout_script)
|
||||
module_node = cached and cached.module or load(buildout_script)
|
||||
if module_node is None:
|
||||
def _get_paths_from_buildout_script(evaluator, buildout_script_path):
|
||||
try:
|
||||
module_node = parse(
|
||||
path=buildout_script_path,
|
||||
grammar=evaluator.grammar,
|
||||
cache=True
|
||||
)
|
||||
except IOError:
|
||||
debug.warning('Error trying to read buildout_script: %s', buildout_script_path)
|
||||
return
|
||||
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
for path in _check_module(ModuleContext(evaluator, module_node)):
|
||||
for path in _check_module(ModuleContext(evaluator, module_node, buildout_script_path)):
|
||||
yield path
|
||||
|
||||
|
||||
@@ -262,7 +255,7 @@ def _detect_django_path(module_path):
|
||||
return result
|
||||
|
||||
|
||||
def _get_buildout_scripts(module_path):
|
||||
def _get_buildout_script_paths(module_path):
|
||||
"""
|
||||
if there is a 'buildout.cfg' file in one of the parent directories of the
|
||||
given module it will return a list of all files in the buildout bin
|
||||
|
||||
@@ -1,355 +1,8 @@
|
||||
"""
|
||||
The ``Parser`` tries to convert the available Python code in an easy to read
|
||||
format, something like an abstract syntax tree. The classes who represent this
|
||||
tree, are sitting in the :mod:`jedi.parser.tree` module.
|
||||
|
||||
The Python module ``tokenize`` is a very important part in the ``Parser``,
|
||||
because it splits the code into different words (tokens). Sometimes it looks a
|
||||
bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast``
|
||||
module for this? Well, ``ast`` does a very good job understanding proper Python
|
||||
code, but fails to work as soon as there's a single line of broken code.
|
||||
|
||||
There's one important optimization that needs to be known: Statements are not
|
||||
being parsed completely. ``Statement`` is just a representation of the tokens
|
||||
within the statement. This lowers memory usage and cpu time and reduces the
|
||||
complexity of the ``Parser`` (there's another parser sitting inside
|
||||
``Statement``, which produces ``Array`` and ``Call``).
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from jedi._compatibility import FileNotFoundError
|
||||
from jedi.parser import tree as pt
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER,
|
||||
STRING, tok_name)
|
||||
from jedi.parser.parser import ParserSyntaxError
|
||||
from jedi.parser.pgen2.pgen import generate_grammar
|
||||
from jedi.parser.pgen2.parse import PgenParser
|
||||
|
||||
OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or'
|
||||
# Not used yet. In the future I intend to add something like KeywordStatement
|
||||
STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \
|
||||
'return', 'yield', 'pass', 'continue', 'break'
|
||||
from jedi.parser import python
|
||||
|
||||
|
||||
_loaded_grammars = {}
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
"""
|
||||
Signals you that the code you fed the Parser was not correct Python code.
|
||||
"""
|
||||
|
||||
|
||||
def load_grammar(version='3.6'):
|
||||
# For now we only support two different Python syntax versions: The latest
|
||||
# Python 3 and Python 2. This may change.
|
||||
if version in ('3.2', '3.3'):
|
||||
version = '3.4'
|
||||
elif version == '2.6':
|
||||
version = '2.7'
|
||||
|
||||
file = 'grammar' + version + '.txt'
|
||||
|
||||
global _loaded_grammars
|
||||
path = os.path.join(os.path.dirname(__file__), file)
|
||||
try:
|
||||
return _loaded_grammars[path]
|
||||
except KeyError:
|
||||
try:
|
||||
return _loaded_grammars.setdefault(path, generate_grammar(path))
|
||||
except FileNotFoundError:
|
||||
# Just load the default if the file does not exist.
|
||||
return load_grammar()
|
||||
|
||||
|
||||
class ParserSyntaxError(object):
|
||||
def __init__(self, message, position):
|
||||
self.message = message
|
||||
self.position = position
|
||||
|
||||
|
||||
class Parser(object):
|
||||
AST_MAPPING = {
|
||||
'expr_stmt': pt.ExprStmt,
|
||||
'classdef': pt.Class,
|
||||
'funcdef': pt.Function,
|
||||
'file_input': pt.Module,
|
||||
'import_name': pt.ImportName,
|
||||
'import_from': pt.ImportFrom,
|
||||
'break_stmt': pt.KeywordStatement,
|
||||
'continue_stmt': pt.KeywordStatement,
|
||||
'return_stmt': pt.ReturnStmt,
|
||||
'raise_stmt': pt.KeywordStatement,
|
||||
'yield_expr': pt.YieldExpr,
|
||||
'del_stmt': pt.KeywordStatement,
|
||||
'pass_stmt': pt.KeywordStatement,
|
||||
'global_stmt': pt.GlobalStmt,
|
||||
'nonlocal_stmt': pt.KeywordStatement,
|
||||
'print_stmt': pt.KeywordStatement,
|
||||
'assert_stmt': pt.AssertStmt,
|
||||
'if_stmt': pt.IfStmt,
|
||||
'with_stmt': pt.WithStmt,
|
||||
'for_stmt': pt.ForStmt,
|
||||
'while_stmt': pt.WhileStmt,
|
||||
'try_stmt': pt.TryStmt,
|
||||
'comp_for': pt.CompFor,
|
||||
'decorator': pt.Decorator,
|
||||
'lambdef': pt.Lambda,
|
||||
'old_lambdef': pt.Lambda,
|
||||
'lambdef_nocond': pt.Lambda,
|
||||
}
|
||||
|
||||
def __init__(self, grammar, source, start_symbol='file_input',
|
||||
tokenizer=None, start_parsing=True):
|
||||
# Todo Remove start_parsing (with False)
|
||||
|
||||
self._used_names = {}
|
||||
|
||||
self.source = source
|
||||
self._added_newline = False
|
||||
# The Python grammar needs a newline at the end of each statement.
|
||||
if not source.endswith('\n') and start_symbol == 'file_input':
|
||||
source += '\n'
|
||||
self._added_newline = True
|
||||
|
||||
self._start_symbol = start_symbol
|
||||
self._grammar = grammar
|
||||
|
||||
self._parsed = None
|
||||
|
||||
if start_parsing:
|
||||
if tokenizer is None:
|
||||
tokenizer = tokenize.source_tokens(source, use_exact_op_types=True)
|
||||
self.parse(tokenizer)
|
||||
|
||||
def parse(self, tokenizer):
|
||||
if self._parsed is not None:
|
||||
return self._parsed
|
||||
|
||||
start_number = self._grammar.symbol2number[self._start_symbol]
|
||||
self.pgen_parser = PgenParser(
|
||||
self._grammar, self.convert_node, self.convert_leaf,
|
||||
self.error_recovery, start_number
|
||||
)
|
||||
|
||||
self._parsed = self.pgen_parser.parse(tokenizer)
|
||||
|
||||
if self._start_symbol == 'file_input' != self._parsed.type:
|
||||
# If there's only one statement, we get back a non-module. That's
|
||||
# not what we want, we want a module, so we add it here:
|
||||
self._parsed = self.convert_node(self._grammar,
|
||||
self._grammar.symbol2number['file_input'],
|
||||
[self._parsed])
|
||||
|
||||
if self._added_newline:
|
||||
self.remove_last_newline()
|
||||
# The stack is empty now, we don't need it anymore.
|
||||
del self.pgen_parser
|
||||
return self._parsed
|
||||
|
||||
def get_parsed_node(self):
|
||||
# TODO remove in favor of get_root_node
|
||||
return self._parsed
|
||||
|
||||
def get_root_node(self):
|
||||
return self._parsed
|
||||
|
||||
def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix,
|
||||
add_token_callback):
|
||||
raise ParseError
|
||||
|
||||
def convert_node(self, grammar, type, children):
|
||||
"""
|
||||
Convert raw node information to a Node instance.
|
||||
|
||||
This is passed to the parser driver which calls it whenever a reduction of a
|
||||
grammar rule produces a new complete node, so that the tree is build
|
||||
strictly bottom-up.
|
||||
"""
|
||||
symbol = grammar.number2symbol[type]
|
||||
try:
|
||||
return Parser.AST_MAPPING[symbol](children)
|
||||
except KeyError:
|
||||
if symbol == 'suite':
|
||||
# We don't want the INDENT/DEDENT in our parser tree. Those
|
||||
# leaves are just cancer. They are virtual leaves and not real
|
||||
# ones and therefore have pseudo start/end positions and no
|
||||
# prefixes. Just ignore them.
|
||||
children = [children[0]] + children[2:-1]
|
||||
return pt.Node(symbol, children)
|
||||
|
||||
def convert_leaf(self, grammar, type, value, prefix, start_pos):
|
||||
# print('leaf', repr(value), token.tok_name[type])
|
||||
if type == tokenize.NAME:
|
||||
if value in grammar.keywords:
|
||||
return pt.Keyword(value, start_pos, prefix)
|
||||
else:
|
||||
name = pt.Name(value, start_pos, prefix)
|
||||
# Keep a listing of all used names
|
||||
arr = self._used_names.setdefault(name.value, [])
|
||||
arr.append(name)
|
||||
return name
|
||||
elif type == STRING:
|
||||
return pt.String(value, start_pos, prefix)
|
||||
elif type == NUMBER:
|
||||
return pt.Number(value, start_pos, prefix)
|
||||
elif type == NEWLINE:
|
||||
return pt.Newline(value, start_pos, prefix)
|
||||
elif type == ENDMARKER:
|
||||
return pt.EndMarker(value, start_pos, prefix)
|
||||
else:
|
||||
return pt.Operator(value, start_pos, prefix)
|
||||
|
||||
def remove_last_newline(self):
|
||||
endmarker = self._parsed.children[-1]
|
||||
# The newline is either in the endmarker as a prefix or the previous
|
||||
# leaf as a newline token.
|
||||
prefix = endmarker.prefix
|
||||
if prefix.endswith('\n'):
|
||||
endmarker.prefix = prefix = prefix[:-1]
|
||||
last_end = 0
|
||||
if '\n' not in prefix:
|
||||
# Basically if the last line doesn't end with a newline. we
|
||||
# have to add the previous line's end_position.
|
||||
try:
|
||||
last_end = endmarker.get_previous_leaf().end_pos[1]
|
||||
except IndexError:
|
||||
pass
|
||||
last_line = re.sub('.*\n', '', prefix)
|
||||
endmarker.start_pos = endmarker.line - 1, last_end + len(last_line)
|
||||
else:
|
||||
try:
|
||||
newline = endmarker.get_previous_leaf()
|
||||
except IndexError:
|
||||
return # This means that the parser is empty.
|
||||
|
||||
assert newline.value.endswith('\n')
|
||||
newline.value = newline.value[:-1]
|
||||
endmarker.start_pos = \
|
||||
newline.start_pos[0], newline.start_pos[1] + len(newline.value)
|
||||
|
||||
|
||||
class ParserWithRecovery(Parser):
|
||||
"""
|
||||
This class is used to parse a Python file, it then divides them into a
|
||||
class structure of different scopes.
|
||||
|
||||
:param grammar: The grammar object of pgen2. Loaded by load_grammar.
|
||||
:param source: The codebase for the parser. Must be unicode.
|
||||
:param module_path: The path of the module in the file system, may be None.
|
||||
:type module_path: str
|
||||
"""
|
||||
def __init__(self, grammar, source, module_path=None, tokenizer=None,
|
||||
start_parsing=True):
|
||||
self.syntax_errors = []
|
||||
|
||||
self._omit_dedent_list = []
|
||||
self._indent_counter = 0
|
||||
self._module_path = module_path
|
||||
|
||||
# TODO do print absolute import detection here.
|
||||
# try:
|
||||
# del python_grammar_no_print_statement.keywords["print"]
|
||||
# except KeyError:
|
||||
# pass # Doesn't exist in the Python 3 grammar.
|
||||
|
||||
# if self.options["print_function"]:
|
||||
# python_grammar = pygram.python_grammar_no_print_statement
|
||||
# else:
|
||||
super(ParserWithRecovery, self).__init__(
|
||||
grammar, source,
|
||||
tokenizer=tokenizer,
|
||||
start_parsing=start_parsing
|
||||
)
|
||||
|
||||
def parse(self, tokenizer):
|
||||
root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokenizer))
|
||||
self.module = root_node
|
||||
self.module.used_names = self._used_names
|
||||
self.module.path = self._module_path
|
||||
return root_node
|
||||
|
||||
def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix,
|
||||
add_token_callback):
|
||||
"""
|
||||
This parser is written in a dynamic way, meaning that this parser
|
||||
allows using different grammars (even non-Python). However, error
|
||||
recovery is purely written for Python.
|
||||
"""
|
||||
def current_suite(stack):
|
||||
# For now just discard everything that is not a suite or
|
||||
# file_input, if we detect an error.
|
||||
for index, (dfa, state, (type_, nodes)) in reversed(list(enumerate(stack))):
|
||||
# `suite` can sometimes be only simple_stmt, not stmt.
|
||||
symbol = grammar.number2symbol[type_]
|
||||
if symbol == 'file_input':
|
||||
break
|
||||
elif symbol == 'suite' and len(nodes) > 1:
|
||||
# suites without an indent in them get discarded.
|
||||
break
|
||||
elif symbol == 'simple_stmt' and len(nodes) > 1:
|
||||
# simple_stmt can just be turned into a Node, if there are
|
||||
# enough statements. Ignore the rest after that.
|
||||
break
|
||||
return index, symbol, nodes
|
||||
|
||||
index, symbol, nodes = current_suite(stack)
|
||||
if symbol == 'simple_stmt':
|
||||
index -= 2
|
||||
(_, _, (type_, suite_nodes)) = stack[index]
|
||||
symbol = grammar.number2symbol[type_]
|
||||
suite_nodes.append(pt.Node(symbol, list(nodes)))
|
||||
# Remove
|
||||
nodes[:] = []
|
||||
nodes = suite_nodes
|
||||
stack[index]
|
||||
|
||||
# print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index)
|
||||
if self._stack_removal(grammar, stack, arcs, index + 1, value, start_pos):
|
||||
add_token_callback(typ, value, start_pos, prefix)
|
||||
else:
|
||||
if typ == INDENT:
|
||||
# For every deleted INDENT we have to delete a DEDENT as well.
|
||||
# Otherwise the parser will get into trouble and DEDENT too early.
|
||||
self._omit_dedent_list.append(self._indent_counter)
|
||||
else:
|
||||
error_leaf = pt.ErrorLeaf(tok_name[typ].lower(), value, start_pos, prefix)
|
||||
stack[-1][2][1].append(error_leaf)
|
||||
|
||||
def _stack_removal(self, grammar, stack, arcs, start_index, value, start_pos):
|
||||
failed_stack = []
|
||||
found = False
|
||||
all_nodes = []
|
||||
for dfa, state, (typ, nodes) in stack[start_index:]:
|
||||
if nodes:
|
||||
found = True
|
||||
if found:
|
||||
symbol = grammar.number2symbol[typ]
|
||||
failed_stack.append((symbol, nodes))
|
||||
all_nodes += nodes
|
||||
if failed_stack:
|
||||
stack[start_index - 1][2][1].append(pt.ErrorNode(all_nodes))
|
||||
|
||||
stack[start_index:] = []
|
||||
return failed_stack
|
||||
|
||||
def _tokenize(self, tokenizer):
|
||||
for typ, value, start_pos, prefix in tokenizer:
|
||||
# print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix))
|
||||
if typ == DEDENT:
|
||||
# We need to count indents, because if we just omit any DEDENT,
|
||||
# we might omit them in the wrong place.
|
||||
o = self._omit_dedent_list
|
||||
if o and o[-1] == self._indent_counter:
|
||||
o.pop()
|
||||
continue
|
||||
|
||||
self._indent_counter -= 1
|
||||
elif typ == INDENT:
|
||||
self._indent_counter += 1
|
||||
|
||||
yield typ, value, start_pos, prefix
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (type(self).__name__, self.module)
|
||||
def parse(grammar, code):
|
||||
raise NotImplementedError
|
||||
Parser(grammar, code)
|
||||
|
||||
147
jedi/parser/cache.py
Normal file
147
jedi/parser/cache.py
Normal file
@@ -0,0 +1,147 @@
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import hashlib
|
||||
import gc
|
||||
import shutil
|
||||
import pickle
|
||||
import platform
|
||||
import errno
|
||||
|
||||
from jedi import settings
|
||||
from jedi import debug
|
||||
from jedi._compatibility import FileNotFoundError
|
||||
|
||||
|
||||
_PICKLE_VERSION = 30
|
||||
"""
|
||||
Version number (integer) for file system cache.
|
||||
|
||||
Increment this number when there are any incompatible changes in
|
||||
the parser tree classes. For example, the following changes
|
||||
are regarded as incompatible.
|
||||
|
||||
- A class name is changed.
|
||||
- A class is moved to another module.
|
||||
- A __slot__ of a class is changed.
|
||||
"""
|
||||
|
||||
_VERSION_TAG = '%s-%s%s-%s' % (
|
||||
platform.python_implementation(),
|
||||
sys.version_info[0],
|
||||
sys.version_info[1],
|
||||
_PICKLE_VERSION
|
||||
)
|
||||
"""
|
||||
Short name for distinguish Python implementations and versions.
|
||||
|
||||
It's like `sys.implementation.cache_tag` but for Python < 3.3
|
||||
we generate something similar. See:
|
||||
http://docs.python.org/3/library/sys.html#sys.implementation
|
||||
"""
|
||||
|
||||
# for fast_parser, should not be deleted
|
||||
parser_cache = {}
|
||||
|
||||
|
||||
|
||||
class _NodeCacheItem(object):
|
||||
def __init__(self, node, lines, change_time=None):
|
||||
self.node = node
|
||||
self.lines = lines
|
||||
if change_time is None:
|
||||
change_time = time.time()
|
||||
self.change_time = change_time
|
||||
|
||||
|
||||
def load_module(grammar, path):
|
||||
"""
|
||||
Returns a module or None, if it fails.
|
||||
"""
|
||||
try:
|
||||
p_time = os.path.getmtime(path)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
|
||||
try:
|
||||
# TODO Add grammar sha256
|
||||
module_cache_item = parser_cache[path]
|
||||
if p_time <= module_cache_item.change_time:
|
||||
return module_cache_item.node
|
||||
except KeyError:
|
||||
if not settings.use_filesystem_cache:
|
||||
return None
|
||||
|
||||
return _load_from_file_system(grammar, path, p_time)
|
||||
|
||||
|
||||
def _load_from_file_system(grammar, path, p_time):
|
||||
cache_path = _get_hashed_path(grammar, path)
|
||||
try:
|
||||
try:
|
||||
if p_time > os.path.getmtime(cache_path):
|
||||
# Cache is outdated
|
||||
return None
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
# In Python 2 instead of an IOError here we get an OSError.
|
||||
raise FileNotFoundError
|
||||
else:
|
||||
raise
|
||||
|
||||
with open(cache_path, 'rb') as f:
|
||||
gc.disable()
|
||||
try:
|
||||
module_cache_item = pickle.load(f)
|
||||
finally:
|
||||
gc.enable()
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
else:
|
||||
parser_cache[path] = module_cache_item
|
||||
debug.dbg('pickle loaded: %s', path)
|
||||
return module_cache_item.node
|
||||
|
||||
|
||||
def save_module(grammar, path, module, lines, pickling=True):
|
||||
try:
|
||||
p_time = None if path is None else os.path.getmtime(path)
|
||||
except OSError:
|
||||
p_time = None
|
||||
pickling = False
|
||||
|
||||
item = _NodeCacheItem(module, lines, p_time)
|
||||
parser_cache[path] = item
|
||||
if settings.use_filesystem_cache and pickling and path is not None:
|
||||
_save_to_file_system(grammar, path, item)
|
||||
|
||||
|
||||
def _save_to_file_system(grammar, path, item):
|
||||
with open(_get_hashed_path(grammar, path), 'wb') as f:
|
||||
pickle.dump(item, f, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
|
||||
def remove_old_modules(self):
|
||||
"""
|
||||
# TODO Might want to use such a function to clean up the cache (if it's
|
||||
# too old). We could potentially also scan for old files in the
|
||||
# directory and delete those.
|
||||
"""
|
||||
|
||||
|
||||
def clear_cache(self):
|
||||
shutil.rmtree(settings.cache_directory)
|
||||
parser_cache.clear()
|
||||
|
||||
|
||||
def _get_hashed_path(grammar, path):
|
||||
file_hash = hashlib.sha256(path.encode("utf-8")).hexdigest()
|
||||
directory = _get_cache_directory_path()
|
||||
return os.path.join(directory, '%s-%s.pkl' % (grammar.sha256, file_hash))
|
||||
|
||||
|
||||
def _get_cache_directory_path():
|
||||
directory = os.path.join(settings.cache_directory, _VERSION_TAG)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
return directory
|
||||
77
jedi/parser/parser.py
Normal file
77
jedi/parser/parser.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""
|
||||
The ``Parser`` tries to convert the available Python code in an easy to read
|
||||
format, something like an abstract syntax tree. The classes who represent this
|
||||
tree, are sitting in the :mod:`jedi.parser.tree` module.
|
||||
|
||||
The Python module ``tokenize`` is a very important part in the ``Parser``,
|
||||
because it splits the code into different words (tokens). Sometimes it looks a
|
||||
bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast``
|
||||
module for this? Well, ``ast`` does a very good job understanding proper Python
|
||||
code, but fails to work as soon as there's a single line of broken code.
|
||||
|
||||
There's one important optimization that needs to be known: Statements are not
|
||||
being parsed completely. ``Statement`` is just a representation of the tokens
|
||||
within the statement. This lowers memory usage and cpu time and reduces the
|
||||
complexity of the ``Parser`` (there's another parser sitting inside
|
||||
``Statement``, which produces ``Array`` and ``Call``).
|
||||
"""
|
||||
from jedi.parser import tree
|
||||
from jedi.parser.pgen2.parse import PgenParser
|
||||
|
||||
|
||||
class ParserSyntaxError(Exception):
|
||||
"""
|
||||
Contains error information about the parser tree.
|
||||
|
||||
May be raised as an exception.
|
||||
"""
|
||||
def __init__(self, message, position):
|
||||
self.message = message
|
||||
self.position = position
|
||||
|
||||
|
||||
class BaseParser(object):
|
||||
node_map = {}
|
||||
default_node = tree.Node
|
||||
|
||||
leaf_map = {
|
||||
}
|
||||
default_leaf = tree.Leaf
|
||||
|
||||
def __init__(self, grammar, start_symbol='file_input', error_recovery=False):
|
||||
self._grammar = grammar
|
||||
self._start_symbol = start_symbol
|
||||
self._error_recovery = error_recovery
|
||||
|
||||
def parse(self, tokens):
|
||||
start_number = self._grammar.symbol2number[self._start_symbol]
|
||||
self.pgen_parser = PgenParser(
|
||||
self._grammar, self.convert_node, self.convert_leaf,
|
||||
self.error_recovery, start_number
|
||||
)
|
||||
|
||||
node = self.pgen_parser.parse(tokens)
|
||||
# The stack is empty now, we don't need it anymore.
|
||||
del self.pgen_parser
|
||||
return node
|
||||
|
||||
def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix,
|
||||
add_token_callback):
|
||||
if self._error_recovery:
|
||||
raise NotImplementedError("Error Recovery is not implemented")
|
||||
else:
|
||||
raise ParserSyntaxError('SyntaxError: invalid syntax', start_pos)
|
||||
|
||||
def convert_node(self, grammar, type_, children):
|
||||
# TODO REMOVE symbol, we don't want type here.
|
||||
symbol = grammar.number2symbol[type_]
|
||||
try:
|
||||
return self.node_map[symbol](children)
|
||||
except KeyError:
|
||||
return self.default_node(symbol, children)
|
||||
|
||||
def convert_leaf(self, grammar, type_, value, prefix, start_pos):
|
||||
try:
|
||||
return self.leaf_map[type_](value, start_pos, prefix)
|
||||
except KeyError:
|
||||
return self.default_leaf(value, start_pos, prefix)
|
||||
@@ -16,8 +16,9 @@ fallback token code OP, but the parser needs the actual token code.
|
||||
|
||||
"""
|
||||
|
||||
# Python imports
|
||||
import pickle
|
||||
import hashlib
|
||||
|
||||
|
||||
|
||||
class Grammar(object):
|
||||
@@ -74,7 +75,7 @@ class Grammar(object):
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, bnf_text):
|
||||
self.symbol2number = {}
|
||||
self.number2symbol = {}
|
||||
self.states = []
|
||||
@@ -84,6 +85,7 @@ class Grammar(object):
|
||||
self.tokens = {}
|
||||
self.symbol2label = {}
|
||||
self.start = 256
|
||||
self.sha256 = hashlib.sha256(bnf_text.encode("utf-8")).hexdigest()
|
||||
|
||||
def dump(self, filename):
|
||||
"""Dump the grammar tables to a pickle file."""
|
||||
|
||||
@@ -118,8 +118,8 @@ class PgenParser(object):
|
||||
self.rootnode = None
|
||||
self.error_recovery = error_recovery
|
||||
|
||||
def parse(self, tokenizer):
|
||||
for type_, value, start_pos, prefix in tokenizer:
|
||||
def parse(self, tokens):
|
||||
for type_, value, start_pos, prefix in tokens:
|
||||
if self.addtoken(type_, value, start_pos, prefix):
|
||||
break
|
||||
else:
|
||||
@@ -135,22 +135,27 @@ class PgenParser(object):
|
||||
ilabel = token_to_ilabel(self.grammar, type_, value)
|
||||
|
||||
# Loop until the token is shifted; may raise exceptions
|
||||
_gram = self.grammar
|
||||
_labels = _gram.labels
|
||||
_push = self._push
|
||||
_pop = self._pop
|
||||
_shift = self._shift
|
||||
while True:
|
||||
dfa, state, node = self.stack[-1]
|
||||
states, first = dfa
|
||||
arcs = states[state]
|
||||
# Look for a state with this label
|
||||
for i, newstate in arcs:
|
||||
t, v = self.grammar.labels[i]
|
||||
t, v = _labels[i]
|
||||
if ilabel == i:
|
||||
# Look it up in the list of labels
|
||||
assert t < 256
|
||||
# Shift a token; we're done with it
|
||||
self.shift(type_, value, newstate, prefix, start_pos)
|
||||
_shift(type_, value, newstate, prefix, start_pos)
|
||||
# Pop while we are in an accept-only state
|
||||
state = newstate
|
||||
while states[state] == [(0, state)]:
|
||||
self.pop()
|
||||
_pop()
|
||||
if not self.stack:
|
||||
# Done parsing!
|
||||
return True
|
||||
@@ -160,16 +165,16 @@ class PgenParser(object):
|
||||
return False
|
||||
elif t >= 256:
|
||||
# See if it's a symbol and if we're in its first set
|
||||
itsdfa = self.grammar.dfas[t]
|
||||
itsdfa = _gram.dfas[t]
|
||||
itsstates, itsfirst = itsdfa
|
||||
if ilabel in itsfirst:
|
||||
# Push a symbol
|
||||
self.push(t, itsdfa, newstate)
|
||||
_push(t, itsdfa, newstate)
|
||||
break # To continue the outer while loop
|
||||
else:
|
||||
if (0, state) in arcs:
|
||||
# An accepting state, pop it and try something else
|
||||
self.pop()
|
||||
_pop()
|
||||
if not self.stack:
|
||||
# Done parsing, but another token is input
|
||||
raise InternalParseError("too much input", type_, value, start_pos)
|
||||
@@ -178,21 +183,21 @@ class PgenParser(object):
|
||||
value, start_pos, prefix, self.addtoken)
|
||||
break
|
||||
|
||||
def shift(self, type_, value, newstate, prefix, start_pos):
|
||||
def _shift(self, type_, value, newstate, prefix, start_pos):
|
||||
"""Shift a token. (Internal)"""
|
||||
dfa, state, node = self.stack[-1]
|
||||
newnode = self.convert_leaf(self.grammar, type_, value, prefix, start_pos)
|
||||
node[-1].append(newnode)
|
||||
self.stack[-1] = (dfa, newstate, node)
|
||||
|
||||
def push(self, type_, newdfa, newstate):
|
||||
def _push(self, type_, newdfa, newstate):
|
||||
"""Push a nonterminal. (Internal)"""
|
||||
dfa, state, node = self.stack[-1]
|
||||
newnode = (type_, [])
|
||||
self.stack[-1] = (dfa, newstate, node)
|
||||
self.stack.append((newdfa, 0, newnode))
|
||||
|
||||
def pop(self):
|
||||
def _pop(self):
|
||||
"""Pop a nonterminal. (Internal)"""
|
||||
popdfa, popstate, (type_, children) = self.stack.pop()
|
||||
# If there's exactly one child, return that child instead of creating a
|
||||
|
||||
@@ -5,30 +5,22 @@
|
||||
# Copyright 2014 David Halter. Integration into Jedi.
|
||||
# Modifications are dual-licensed: MIT and PSF.
|
||||
|
||||
# Pgen imports
|
||||
from . import grammar
|
||||
from jedi.parser import token
|
||||
from jedi.parser import tokenize
|
||||
|
||||
|
||||
class ParserGenerator(object):
|
||||
def __init__(self, filename, stream=None):
|
||||
close_stream = None
|
||||
if stream is None:
|
||||
stream = open(filename)
|
||||
close_stream = stream.close
|
||||
self.filename = filename
|
||||
self.stream = stream
|
||||
self.generator = tokenize.generate_tokens(stream.readline)
|
||||
def __init__(self, bnf_text):
|
||||
self._bnf_text = bnf_text
|
||||
self.generator = tokenize.source_tokens(bnf_text)
|
||||
self.gettoken() # Initialize lookahead
|
||||
self.dfas, self.startsymbol = self.parse()
|
||||
if close_stream is not None:
|
||||
close_stream()
|
||||
self.first = {} # map from symbol name to set of tokens
|
||||
self.addfirstsets()
|
||||
|
||||
def make_grammar(self):
|
||||
c = grammar.Grammar()
|
||||
c = grammar.Grammar(self._bnf_text)
|
||||
names = list(self.dfas.keys())
|
||||
names.sort()
|
||||
names.remove(self.startsymbol)
|
||||
@@ -389,6 +381,14 @@ class DFAState(object):
|
||||
__hash__ = None # For Py3 compatibility.
|
||||
|
||||
|
||||
def generate_grammar(filename="Grammar.txt"):
|
||||
p = ParserGenerator(filename)
|
||||
def generate_grammar(bnf_text):
|
||||
"""
|
||||
``bnf_text`` is a grammar in extended BNF (using * for repetition, + for
|
||||
at-least-once repetition, [] for optional parts, | for alternatives and ()
|
||||
for grouping).
|
||||
|
||||
It's not EBNF according to ISO/IEC 14977. It's a dialect Python uses in its
|
||||
own parser.
|
||||
"""
|
||||
p = ParserGenerator(bnf_text)
|
||||
return p.make_grammar()
|
||||
|
||||
126
jedi/parser/python/__init__.py
Normal file
126
jedi/parser/python/__init__.py
Normal file
@@ -0,0 +1,126 @@
|
||||
"""
|
||||
Parsers for Python
|
||||
"""
|
||||
import os
|
||||
|
||||
from jedi import settings
|
||||
from jedi._compatibility import FileNotFoundError
|
||||
from jedi.parser.pgen2.pgen import generate_grammar
|
||||
from jedi.parser.python.parser import Parser, _remove_last_newline
|
||||
from jedi.parser.python.diff import DiffParser
|
||||
from jedi.parser.tokenize import generate_tokens
|
||||
from jedi.parser.cache import parser_cache, load_module, save_module
|
||||
from jedi.common import splitlines, source_to_unicode
|
||||
|
||||
|
||||
_loaded_grammars = {}
|
||||
|
||||
|
||||
def load_grammar(version=None):
|
||||
"""
|
||||
Loads a Python grammar. The default version is always the latest.
|
||||
|
||||
If you need support for a specific version, please use e.g.
|
||||
`version='3.3'`.
|
||||
"""
|
||||
if version is None:
|
||||
version = '3.6'
|
||||
|
||||
if version in ('3.2', '3.3'):
|
||||
version = '3.4'
|
||||
elif version == '2.6':
|
||||
version = '2.7'
|
||||
|
||||
file = 'grammar' + version + '.txt'
|
||||
|
||||
global _loaded_grammars
|
||||
path = os.path.join(os.path.dirname(__file__), file)
|
||||
try:
|
||||
return _loaded_grammars[path]
|
||||
except KeyError:
|
||||
try:
|
||||
with open(path) as f:
|
||||
bnf_text = f.read()
|
||||
grammar = generate_grammar(bnf_text)
|
||||
return _loaded_grammars.setdefault(path, grammar)
|
||||
except FileNotFoundError:
|
||||
# Just load the default if the file does not exist.
|
||||
return load_grammar()
|
||||
|
||||
|
||||
def parse(code=None, path=None, grammar=None, error_recovery=True,
|
||||
start_symbol='file_input', cache=False, diff_cache=False):
|
||||
"""
|
||||
If you want to parse a Python file you want to start here, most likely.
|
||||
|
||||
If you need finer grained control over the parsed instance, there will be
|
||||
other ways to access it.
|
||||
|
||||
:param code: A unicode string that contains Python code.
|
||||
:param path: The path to the file you want to open. Only needed for caching.
|
||||
:param grammar: A Python grammar file, created with load_grammar.
|
||||
:param error_recovery: If enabled, any code will be returned. If it is
|
||||
invalid, it will be returned as an error node. If disabled, you will
|
||||
get a ParseError when encountering syntax errors in your code.
|
||||
:param start_symbol: The grammar symbol that you want to parse. Only
|
||||
allowed to be used when error_recovery is disabled.
|
||||
|
||||
:return: A syntax tree node. Typically the module.
|
||||
"""
|
||||
if code is None and path is None:
|
||||
raise TypeError("Please provide either code or a path.")
|
||||
|
||||
if grammar is None:
|
||||
grammar = load_grammar()
|
||||
|
||||
if path is not None:
|
||||
path = os.path.expanduser(path)
|
||||
|
||||
if cache and not code and path is not None:
|
||||
# In this case we do actual caching. We just try to load it.
|
||||
module_node = load_module(grammar, path)
|
||||
if module_node is not None:
|
||||
return module_node
|
||||
|
||||
if code is None:
|
||||
with open(path, 'rb') as f:
|
||||
code = source_to_unicode(f.read())
|
||||
|
||||
if diff_cache and settings.fast_parser:
|
||||
try:
|
||||
module_cache_item = parser_cache[path]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
lines = splitlines(code, keepends=True)
|
||||
module_node = module_cache_item.node
|
||||
old_lines = module_cache_item.lines
|
||||
if old_lines == lines:
|
||||
save_module(grammar, path, module_node, lines, pickling=False)
|
||||
return module_node
|
||||
|
||||
new_node = DiffParser(grammar, module_node).update(
|
||||
old_lines=old_lines,
|
||||
new_lines=lines
|
||||
)
|
||||
save_module(grammar, path, new_node, lines, pickling=cache)
|
||||
return new_node
|
||||
|
||||
added_newline = not code.endswith('\n')
|
||||
lines = tokenize_lines = splitlines(code, keepends=True)
|
||||
if added_newline:
|
||||
code += '\n'
|
||||
tokenize_lines = list(tokenize_lines)
|
||||
tokenize_lines[-1] += '\n'
|
||||
tokenize_lines.append('')
|
||||
|
||||
tokens = generate_tokens(tokenize_lines, use_exact_op_types=True)
|
||||
|
||||
p = Parser(grammar, error_recovery=error_recovery, start_symbol=start_symbol)
|
||||
root_node = p.parse(tokens=tokens)
|
||||
if added_newline:
|
||||
_remove_last_newline(root_node)
|
||||
|
||||
if cache or diff_cache:
|
||||
save_module(grammar, path, root_node, lines, pickling=cache)
|
||||
return root_node
|
||||
@@ -9,42 +9,16 @@ import re
|
||||
import difflib
|
||||
from collections import namedtuple
|
||||
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi import settings
|
||||
from jedi.common import splitlines
|
||||
from jedi.parser import ParserWithRecovery
|
||||
from jedi.parser.tree import EndMarker
|
||||
from jedi.parser.utils import parser_cache
|
||||
from jedi.parser.python.parser import Parser, _remove_last_newline
|
||||
from jedi.parser.python.tree import EndMarker
|
||||
from jedi import debug
|
||||
from jedi.parser.tokenize import (generate_tokens, NEWLINE, TokenInfo,
|
||||
ENDMARKER, INDENT, DEDENT)
|
||||
|
||||
|
||||
class CachedFastParser(type):
|
||||
""" This is a metaclass for caching `FastParser`. """
|
||||
def __call__(self, grammar, source, module_path=None):
|
||||
pi = parser_cache.get(module_path, None)
|
||||
if pi is None or not settings.fast_parser:
|
||||
return ParserWithRecovery(grammar, source, module_path)
|
||||
|
||||
parser = pi.parser
|
||||
d = DiffParser(parser)
|
||||
new_lines = splitlines(source, keepends=True)
|
||||
parser.module = parser._parsed = d.update(new_lines)
|
||||
return parser
|
||||
|
||||
|
||||
class FastParser(use_metaclass(CachedFastParser)):
|
||||
pass
|
||||
|
||||
|
||||
def _merge_used_names(base_dict, other_dict):
|
||||
for key, names in other_dict.items():
|
||||
base_dict.setdefault(key, []).extend(names)
|
||||
|
||||
|
||||
def _get_last_line(node_or_leaf):
|
||||
last_leaf = node_or_leaf.last_leaf()
|
||||
last_leaf = node_or_leaf.get_last_leaf()
|
||||
if _ends_with_newline(last_leaf):
|
||||
return last_leaf.start_pos[0]
|
||||
else:
|
||||
@@ -111,20 +85,21 @@ def _update_positions(nodes, line_offset, last_leaf):
|
||||
|
||||
|
||||
class DiffParser(object):
|
||||
def __init__(self, parser):
|
||||
self._parser = parser
|
||||
self._grammar = self._parser._grammar
|
||||
self._module = parser.get_root_node()
|
||||
"""
|
||||
An advanced form of parsing a file faster. Unfortunately comes with huge
|
||||
side effects. It changes the given module.
|
||||
"""
|
||||
def __init__(self, grammar, module):
|
||||
self._grammar = grammar
|
||||
self._module = module
|
||||
|
||||
def _reset(self):
|
||||
self._copy_count = 0
|
||||
self._parser_count = 0
|
||||
|
||||
self._copied_ranges = []
|
||||
self._new_used_names = {}
|
||||
self._nodes_stack = _NodesStack(self._module)
|
||||
|
||||
def update(self, lines_new):
|
||||
def update(self, old_lines, new_lines):
|
||||
'''
|
||||
The algorithm works as follows:
|
||||
|
||||
@@ -141,27 +116,26 @@ class DiffParser(object):
|
||||
Returns the new module node.
|
||||
'''
|
||||
debug.speed('diff parser start')
|
||||
self._parser_lines_new = lines_new
|
||||
# Reset the used names cache so they get regenerated.
|
||||
self._module._used_names = None
|
||||
|
||||
self._parser_lines_new = new_lines
|
||||
self._added_newline = False
|
||||
if lines_new[-1] != '':
|
||||
if new_lines[-1] != '':
|
||||
# The Python grammar needs a newline at the end of a file, but for
|
||||
# everything else we keep working with lines_new here.
|
||||
self._parser_lines_new = list(lines_new)
|
||||
# everything else we keep working with new_lines here.
|
||||
self._parser_lines_new = list(new_lines)
|
||||
self._parser_lines_new[-1] += '\n'
|
||||
self._parser_lines_new.append('')
|
||||
self._added_newline = True
|
||||
|
||||
self._reset()
|
||||
|
||||
line_length = len(lines_new)
|
||||
lines_old = splitlines(self._parser.source, keepends=True)
|
||||
sm = difflib.SequenceMatcher(None, lines_old, self._parser_lines_new)
|
||||
line_length = len(new_lines)
|
||||
sm = difflib.SequenceMatcher(None, old_lines, self._parser_lines_new)
|
||||
opcodes = sm.get_opcodes()
|
||||
debug.speed('diff parser calculated')
|
||||
debug.dbg('diff: line_lengths old: %s, new: %s' % (len(lines_old), line_length))
|
||||
|
||||
if len(opcodes) == 1 and opcodes[0][0] == 'equal':
|
||||
self._copy_count = 1
|
||||
return self._module
|
||||
debug.dbg('diff: line_lengths old: %s, new: %s' % (len(old_lines), line_length))
|
||||
|
||||
for operation, i1, i2, j1, j2 in opcodes:
|
||||
debug.dbg('diff %s old[%s:%s] new[%s:%s]',
|
||||
@@ -185,19 +159,16 @@ class DiffParser(object):
|
||||
# changed module.
|
||||
self._nodes_stack.close()
|
||||
|
||||
self._cleanup()
|
||||
if self._added_newline:
|
||||
self._parser.remove_last_newline()
|
||||
|
||||
self._parser.source = ''.join(lines_new)
|
||||
_remove_last_newline(self._module)
|
||||
|
||||
# Good for debugging.
|
||||
if debug.debug_function:
|
||||
self._enable_debugging(lines_old, lines_new)
|
||||
self._enabled_debugging(old_lines, new_lines)
|
||||
last_pos = self._module.end_pos[0]
|
||||
if last_pos != line_length:
|
||||
current_lines = splitlines(self._module.get_code(), keepends=True)
|
||||
diff = difflib.unified_diff(current_lines, lines_new)
|
||||
diff = difflib.unified_diff(current_lines, new_lines)
|
||||
raise Exception(
|
||||
"There's an issue (%s != %s) with the diff parser. Please report:\n%s"
|
||||
% (last_pos, line_length, ''.join(diff))
|
||||
@@ -206,14 +177,15 @@ class DiffParser(object):
|
||||
debug.speed('diff parser end')
|
||||
return self._module
|
||||
|
||||
def _enable_debugging(self, lines_old, lines_new):
|
||||
def _enabled_debugging(self, old_lines, lines_new):
|
||||
if self._module.get_code() != ''.join(lines_new):
|
||||
debug.warning('parser issue:\n%s\n%s', repr(''.join(lines_old)),
|
||||
repr(''.join(lines_new)))
|
||||
debug.warning('parser issue:\n%s\n%s', ''.join(old_lines),
|
||||
''.join(lines_new))
|
||||
|
||||
def _copy_from_old_parser(self, line_offset, until_line_old, until_line_new):
|
||||
copied_nodes = [None]
|
||||
|
||||
last_until_line = -1
|
||||
while until_line_new > self._nodes_stack.parsed_until_line:
|
||||
parsed_until_line_old = self._nodes_stack.parsed_until_line - line_offset
|
||||
line_stmt = self._get_old_line_stmt(parsed_until_line_old + 1)
|
||||
@@ -243,9 +215,13 @@ class DiffParser(object):
|
||||
|
||||
from_ = copied_nodes[0].get_start_pos_of_prefix()[0] + line_offset
|
||||
to = self._nodes_stack.parsed_until_line
|
||||
self._copied_ranges.append((from_, to))
|
||||
|
||||
debug.dbg('diff actually copy %s to %s', from_, to)
|
||||
# Since there are potential bugs that might loop here endlessly, we
|
||||
# just stop here.
|
||||
assert last_until_line != self._nodes_stack.parsed_until_line \
|
||||
or not copied_nodes, last_until_line
|
||||
last_until_line = self._nodes_stack.parsed_until_line
|
||||
|
||||
def _get_old_line_stmt(self, old_line):
|
||||
leaf = self._module.get_leaf_for_position((old_line, 0), include_prefixes=True)
|
||||
@@ -254,8 +230,6 @@ class DiffParser(object):
|
||||
leaf = leaf.get_next_leaf()
|
||||
if leaf.get_start_pos_of_prefix()[0] == old_line:
|
||||
node = leaf
|
||||
# TODO use leaf.get_definition one day when that one is working
|
||||
# well.
|
||||
while node.parent.type not in ('file_input', 'suite'):
|
||||
node = node.parent
|
||||
return node
|
||||
@@ -267,7 +241,7 @@ class DiffParser(object):
|
||||
return None
|
||||
|
||||
line = self._nodes_stack.parsed_until_line + 1
|
||||
node = self._new_module.last_leaf()
|
||||
node = self._new_module.get_last_leaf()
|
||||
while True:
|
||||
parent = node.parent
|
||||
if parent.type in ('suite', 'file_input'):
|
||||
@@ -281,6 +255,7 @@ class DiffParser(object):
|
||||
Parses at least until the given line, but might just parse more until a
|
||||
valid state is reached.
|
||||
"""
|
||||
last_until_line = 0
|
||||
while until_line > self._nodes_stack.parsed_until_line:
|
||||
node = self._try_parse_part(until_line)
|
||||
nodes = self._get_children_nodes(node)
|
||||
@@ -293,10 +268,11 @@ class DiffParser(object):
|
||||
self._nodes_stack.parsed_until_line,
|
||||
node.end_pos[0] - 1
|
||||
)
|
||||
_merge_used_names(
|
||||
self._new_used_names,
|
||||
node.used_names
|
||||
)
|
||||
# Since the tokenizer sometimes has bugs, we cannot be sure that
|
||||
# this loop terminates. Therefore assert that there's always a
|
||||
# change.
|
||||
assert last_until_line != self._nodes_stack.parsed_until_line, last_until_line
|
||||
last_until_line = self._nodes_stack.parsed_until_line
|
||||
|
||||
def _get_children_nodes(self, node):
|
||||
nodes = node.children
|
||||
@@ -320,37 +296,22 @@ class DiffParser(object):
|
||||
parsed_until_line = self._nodes_stack.parsed_until_line
|
||||
lines_after = self._parser_lines_new[parsed_until_line:]
|
||||
#print('parse_content', parsed_until_line, lines_after, until_line)
|
||||
tokenizer = self._diff_tokenize(
|
||||
tokens = self._diff_tokenize(
|
||||
lines_after,
|
||||
until_line,
|
||||
line_offset=parsed_until_line
|
||||
)
|
||||
self._active_parser = ParserWithRecovery(
|
||||
self._active_parser = Parser(
|
||||
self._grammar,
|
||||
source='\n',
|
||||
start_parsing=False
|
||||
error_recovery=True
|
||||
)
|
||||
return self._active_parser.parse(tokenizer=tokenizer)
|
||||
|
||||
def _cleanup(self):
|
||||
"""Add the used names from the old parser to the new one."""
|
||||
copied_line_numbers = set()
|
||||
for l1, l2 in self._copied_ranges:
|
||||
copied_line_numbers.update(range(l1, l2 + 1))
|
||||
|
||||
new_used_names = self._new_used_names
|
||||
for key, names in self._module.used_names.items():
|
||||
for name in names:
|
||||
if name.line in copied_line_numbers:
|
||||
new_used_names.setdefault(key, []).append(name)
|
||||
self._module.used_names = new_used_names
|
||||
return self._active_parser.parse(tokens=tokens)
|
||||
|
||||
def _diff_tokenize(self, lines, until_line, line_offset=0):
|
||||
is_first_token = True
|
||||
omitted_first_indent = False
|
||||
indents = []
|
||||
l = iter(lines)
|
||||
tokens = generate_tokens(lambda: next(l, ''), use_exact_op_types=True)
|
||||
tokens = generate_tokens(lines, use_exact_op_types=True)
|
||||
stack = self._active_parser.pgen_parser.stack
|
||||
for typ, string, start_pos, prefix in tokens:
|
||||
start_pos = start_pos[0] + line_offset, start_pos[1]
|
||||
@@ -422,20 +383,24 @@ class _NodesStackNode(object):
|
||||
self.children_groups.append(group)
|
||||
|
||||
def get_last_line(self, suffix):
|
||||
if not self.children_groups:
|
||||
assert not self.parent
|
||||
return 0
|
||||
line = 0
|
||||
if self.children_groups:
|
||||
children_group = self.children_groups[-1]
|
||||
last_leaf = children_group.children[-1].get_last_leaf()
|
||||
line = last_leaf.end_pos[0]
|
||||
|
||||
last_leaf = self.children_groups[-1].children[-1].last_leaf()
|
||||
line = last_leaf.end_pos[0]
|
||||
# Calculate the line offsets
|
||||
offset = children_group.line_offset
|
||||
if offset:
|
||||
# In case the line_offset is not applied to this specific leaf,
|
||||
# just ignore it.
|
||||
if last_leaf.line <= children_group.last_line_offset_leaf.line:
|
||||
line += children_group.line_offset
|
||||
|
||||
# Calculate the line offsets
|
||||
line += self.children_groups[-1].line_offset
|
||||
|
||||
# Newlines end on the next line, which means that they would cover
|
||||
# the next line. That line is not fully parsed at this point.
|
||||
if _ends_with_newline(last_leaf, suffix):
|
||||
line -= 1
|
||||
# Newlines end on the next line, which means that they would cover
|
||||
# the next line. That line is not fully parsed at this point.
|
||||
if _ends_with_newline(last_leaf, suffix):
|
||||
line -= 1
|
||||
line += suffix.count('\n')
|
||||
return line
|
||||
|
||||
@@ -454,7 +419,7 @@ class _NodesStack(object):
|
||||
return not self._base_node.children
|
||||
|
||||
@property
|
||||
def parsed_until_line(self, ):
|
||||
def parsed_until_line(self):
|
||||
return self._tos.get_last_line(self.prefix)
|
||||
|
||||
def _get_insertion_node(self, indentation_node):
|
||||
@@ -500,7 +465,7 @@ class _NodesStack(object):
|
||||
"""
|
||||
Helps cleaning up the tree nodes that get inserted.
|
||||
"""
|
||||
last_leaf = tree_nodes[-1].last_leaf()
|
||||
last_leaf = tree_nodes[-1].get_last_leaf()
|
||||
is_endmarker = last_leaf.type == self.endmarker_type
|
||||
self._last_prefix = ''
|
||||
if is_endmarker:
|
||||
@@ -515,7 +480,7 @@ class _NodesStack(object):
|
||||
last_leaf.prefix, self._last_prefix = \
|
||||
last_leaf.prefix[:separation + 1], last_leaf.prefix[separation + 1:]
|
||||
|
||||
first_leaf = tree_nodes[0].first_leaf()
|
||||
first_leaf = tree_nodes[0].get_first_leaf()
|
||||
first_leaf.prefix = self.prefix + first_leaf.prefix
|
||||
self.prefix = ''
|
||||
|
||||
@@ -592,13 +557,13 @@ class _NodesStack(object):
|
||||
new_nodes.pop()
|
||||
while new_nodes:
|
||||
last_node = new_nodes[-1]
|
||||
if last_node.last_leaf().type == 'newline':
|
||||
if last_node.get_last_leaf().type == 'newline':
|
||||
break
|
||||
new_nodes.pop()
|
||||
|
||||
if new_nodes:
|
||||
try:
|
||||
last_line_offset_leaf = new_nodes[line_offset_index].last_leaf()
|
||||
last_line_offset_leaf = new_nodes[line_offset_index].get_last_leaf()
|
||||
except IndexError:
|
||||
line_offset = 0
|
||||
# In this case we don't have to calculate an offset, because
|
||||
@@ -621,7 +586,7 @@ class _NodesStack(object):
|
||||
|
||||
# Add an endmarker.
|
||||
try:
|
||||
last_leaf = self._module.last_leaf()
|
||||
last_leaf = self._module.get_last_leaf()
|
||||
end_pos = list(last_leaf.end_pos)
|
||||
except IndexError:
|
||||
end_pos = [1, 0]
|
||||
245
jedi/parser/python/parser.py
Normal file
245
jedi/parser/python/parser.py
Normal file
@@ -0,0 +1,245 @@
|
||||
from jedi.parser.python import tree
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER,
|
||||
STRING, tok_name)
|
||||
from jedi.parser.parser import BaseParser
|
||||
from jedi.common import splitlines
|
||||
|
||||
|
||||
class Parser(BaseParser):
|
||||
"""
|
||||
This class is used to parse a Python file, it then divides them into a
|
||||
class structure of different scopes.
|
||||
|
||||
:param grammar: The grammar object of pgen2. Loaded by load_grammar.
|
||||
"""
|
||||
|
||||
node_map = {
|
||||
'expr_stmt': tree.ExprStmt,
|
||||
'classdef': tree.Class,
|
||||
'funcdef': tree.Function,
|
||||
'file_input': tree.Module,
|
||||
'import_name': tree.ImportName,
|
||||
'import_from': tree.ImportFrom,
|
||||
'break_stmt': tree.KeywordStatement,
|
||||
'continue_stmt': tree.KeywordStatement,
|
||||
'return_stmt': tree.ReturnStmt,
|
||||
'raise_stmt': tree.KeywordStatement,
|
||||
'yield_expr': tree.YieldExpr,
|
||||
'del_stmt': tree.KeywordStatement,
|
||||
'pass_stmt': tree.KeywordStatement,
|
||||
'global_stmt': tree.GlobalStmt,
|
||||
'nonlocal_stmt': tree.KeywordStatement,
|
||||
'print_stmt': tree.KeywordStatement,
|
||||
'assert_stmt': tree.AssertStmt,
|
||||
'if_stmt': tree.IfStmt,
|
||||
'with_stmt': tree.WithStmt,
|
||||
'for_stmt': tree.ForStmt,
|
||||
'while_stmt': tree.WhileStmt,
|
||||
'try_stmt': tree.TryStmt,
|
||||
'comp_for': tree.CompFor,
|
||||
'decorator': tree.Decorator,
|
||||
'lambdef': tree.Lambda,
|
||||
'old_lambdef': tree.Lambda,
|
||||
'lambdef_nocond': tree.Lambda,
|
||||
}
|
||||
default_node = tree.PythonNode
|
||||
|
||||
def __init__(self, grammar, error_recovery=True, start_symbol='file_input'):
|
||||
super(Parser, self).__init__(grammar, start_symbol, error_recovery=error_recovery)
|
||||
|
||||
self.syntax_errors = []
|
||||
self._omit_dedent_list = []
|
||||
self._indent_counter = 0
|
||||
|
||||
# TODO do print absolute import detection here.
|
||||
# try:
|
||||
# del python_grammar_no_print_statement.keywords["print"]
|
||||
# except KeyError:
|
||||
# pass # Doesn't exist in the Python 3 grammar.
|
||||
|
||||
# if self.options["print_function"]:
|
||||
# python_grammar = pygram.python_grammar_no_print_statement
|
||||
# else:
|
||||
|
||||
def parse(self, tokens):
|
||||
if self._error_recovery:
|
||||
if self._start_symbol != 'file_input':
|
||||
raise NotImplementedError
|
||||
|
||||
tokens = self._recovery_tokenize(tokens)
|
||||
|
||||
node = super(Parser, self).parse(tokens)
|
||||
|
||||
if self._start_symbol == 'file_input' != node.type:
|
||||
# If there's only one statement, we get back a non-module. That's
|
||||
# not what we want, we want a module, so we add it here:
|
||||
node = self.convert_node(
|
||||
self._grammar,
|
||||
self._grammar.symbol2number['file_input'],
|
||||
[node]
|
||||
)
|
||||
|
||||
return node
|
||||
|
||||
def convert_node(self, grammar, type, children):
|
||||
"""
|
||||
Convert raw node information to a PythonBaseNode instance.
|
||||
|
||||
This is passed to the parser driver which calls it whenever a reduction of a
|
||||
grammar rule produces a new complete node, so that the tree is build
|
||||
strictly bottom-up.
|
||||
"""
|
||||
# TODO REMOVE symbol, we don't want type here.
|
||||
symbol = grammar.number2symbol[type]
|
||||
try:
|
||||
return self.node_map[symbol](children)
|
||||
except KeyError:
|
||||
if symbol == 'suite':
|
||||
# We don't want the INDENT/DEDENT in our parser tree. Those
|
||||
# leaves are just cancer. They are virtual leaves and not real
|
||||
# ones and therefore have pseudo start/end positions and no
|
||||
# prefixes. Just ignore them.
|
||||
children = [children[0]] + children[2:-1]
|
||||
return self.default_node(symbol, children)
|
||||
|
||||
def convert_leaf(self, grammar, type, value, prefix, start_pos):
|
||||
# print('leaf', repr(value), token.tok_name[type])
|
||||
if type == tokenize.NAME:
|
||||
if value in grammar.keywords:
|
||||
return tree.Keyword(value, start_pos, prefix)
|
||||
else:
|
||||
return tree.Name(value, start_pos, prefix)
|
||||
elif type == STRING:
|
||||
return tree.String(value, start_pos, prefix)
|
||||
elif type == NUMBER:
|
||||
return tree.Number(value, start_pos, prefix)
|
||||
elif type == NEWLINE:
|
||||
return tree.Newline(value, start_pos, prefix)
|
||||
elif type == ENDMARKER:
|
||||
return tree.EndMarker(value, start_pos, prefix)
|
||||
else:
|
||||
return tree.Operator(value, start_pos, prefix)
|
||||
|
||||
def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix,
|
||||
add_token_callback):
|
||||
"""
|
||||
This parser is written in a dynamic way, meaning that this parser
|
||||
allows using different grammars (even non-Python). However, error
|
||||
recovery is purely written for Python.
|
||||
"""
|
||||
if not self._error_recovery:
|
||||
return super(Parser, self).error_recovery(
|
||||
grammar, stack, arcs, typ, value, start_pos, prefix,
|
||||
add_token_callback)
|
||||
|
||||
def current_suite(stack):
|
||||
# For now just discard everything that is not a suite or
|
||||
# file_input, if we detect an error.
|
||||
for index, (dfa, state, (type_, nodes)) in reversed(list(enumerate(stack))):
|
||||
# `suite` can sometimes be only simple_stmt, not stmt.
|
||||
symbol = grammar.number2symbol[type_]
|
||||
if symbol == 'file_input':
|
||||
break
|
||||
elif symbol == 'suite' and len(nodes) > 1:
|
||||
# suites without an indent in them get discarded.
|
||||
break
|
||||
elif symbol == 'simple_stmt' and len(nodes) > 1:
|
||||
# simple_stmt can just be turned into a PythonNode, if
|
||||
# there are enough statements. Ignore the rest after that.
|
||||
break
|
||||
return index, symbol, nodes
|
||||
|
||||
index, symbol, nodes = current_suite(stack)
|
||||
if symbol == 'simple_stmt':
|
||||
index -= 2
|
||||
(_, _, (type_, suite_nodes)) = stack[index]
|
||||
symbol = grammar.number2symbol[type_]
|
||||
suite_nodes.append(tree.PythonNode(symbol, list(nodes)))
|
||||
# Remove
|
||||
nodes[:] = []
|
||||
nodes = suite_nodes
|
||||
stack[index]
|
||||
|
||||
# print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index)
|
||||
if self._stack_removal(grammar, stack, arcs, index + 1, value, start_pos):
|
||||
add_token_callback(typ, value, start_pos, prefix)
|
||||
else:
|
||||
if typ == INDENT:
|
||||
# For every deleted INDENT we have to delete a DEDENT as well.
|
||||
# Otherwise the parser will get into trouble and DEDENT too early.
|
||||
self._omit_dedent_list.append(self._indent_counter)
|
||||
else:
|
||||
error_leaf = tree.PythonErrorLeaf(tok_name[typ].lower(), value, start_pos, prefix)
|
||||
stack[-1][2][1].append(error_leaf)
|
||||
|
||||
def _stack_removal(self, grammar, stack, arcs, start_index, value, start_pos):
|
||||
failed_stack = []
|
||||
found = False
|
||||
all_nodes = []
|
||||
for dfa, state, (typ, nodes) in stack[start_index:]:
|
||||
if nodes:
|
||||
found = True
|
||||
if found:
|
||||
symbol = grammar.number2symbol[typ]
|
||||
failed_stack.append((symbol, nodes))
|
||||
all_nodes += nodes
|
||||
if failed_stack:
|
||||
stack[start_index - 1][2][1].append(tree.PythonErrorNode(all_nodes))
|
||||
|
||||
stack[start_index:] = []
|
||||
return failed_stack
|
||||
|
||||
def _recovery_tokenize(self, tokens):
|
||||
for typ, value, start_pos, prefix in tokens:
|
||||
# print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix))
|
||||
if typ == DEDENT:
|
||||
# We need to count indents, because if we just omit any DEDENT,
|
||||
# we might omit them in the wrong place.
|
||||
o = self._omit_dedent_list
|
||||
if o and o[-1] == self._indent_counter:
|
||||
o.pop()
|
||||
continue
|
||||
|
||||
self._indent_counter -= 1
|
||||
elif typ == INDENT:
|
||||
self._indent_counter += 1
|
||||
|
||||
yield typ, value, start_pos, prefix
|
||||
|
||||
|
||||
def _remove_last_newline(node):
|
||||
endmarker = node.children[-1]
|
||||
# The newline is either in the endmarker as a prefix or the previous
|
||||
# leaf as a newline token.
|
||||
prefix = endmarker.prefix
|
||||
leaf = endmarker.get_previous_leaf()
|
||||
if prefix:
|
||||
text = prefix
|
||||
else:
|
||||
if leaf is None:
|
||||
raise ValueError("You're trying to remove a newline from an empty module.")
|
||||
|
||||
text = leaf.value
|
||||
|
||||
if not text.endswith('\n'):
|
||||
raise ValueError("There's no newline at the end, cannot remove it.")
|
||||
|
||||
text = text[:-1]
|
||||
if prefix:
|
||||
endmarker.prefix = text
|
||||
|
||||
if leaf is None:
|
||||
end_pos = (1, 0)
|
||||
else:
|
||||
end_pos = leaf.end_pos
|
||||
|
||||
lines = splitlines(text, keepends=True)
|
||||
if len(lines) == 1:
|
||||
end_pos = end_pos[0], end_pos[1] + len(lines[0])
|
||||
else:
|
||||
end_pos = end_pos[0] + len(lines) - 1, len(lines[-1])
|
||||
endmarker.start_pos = end_pos
|
||||
else:
|
||||
leaf.value = text
|
||||
endmarker.start_pos = leaf.end_pos
|
||||
1288
jedi/parser/python/tree.py
Normal file
1288
jedi/parser/python/tree.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -14,12 +14,11 @@ from __future__ import absolute_import
|
||||
import string
|
||||
import re
|
||||
from collections import namedtuple
|
||||
from io import StringIO
|
||||
import itertools as _itertools
|
||||
|
||||
from jedi.parser.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, opmap,
|
||||
NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT)
|
||||
from jedi._compatibility import is_py3, py_version
|
||||
from jedi._compatibility import is_py3, py_version, u
|
||||
from jedi.common import splitlines
|
||||
|
||||
|
||||
@@ -165,10 +164,10 @@ for _prefix in _all_string_prefixes():
|
||||
single_quoted = set()
|
||||
triple_quoted = set()
|
||||
for t in _all_string_prefixes():
|
||||
for u in (t + '"', t + "'"):
|
||||
single_quoted.add(u)
|
||||
for u in (t + '"""', t + "'''"):
|
||||
triple_quoted.add(u)
|
||||
for p in (t + '"', t + "'"):
|
||||
single_quoted.add(p)
|
||||
for p in (t + '"""', t + "'''"):
|
||||
triple_quoted.add(p)
|
||||
|
||||
|
||||
# TODO add with?
|
||||
@@ -179,9 +178,15 @@ pseudo_token_compiled = _compile(PseudoToken)
|
||||
|
||||
class TokenInfo(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])):
|
||||
def __repr__(self):
|
||||
annotated_type = tok_name[self.type]
|
||||
return ('TokenInfo(type=%s, string=%r, start=%r, prefix=%r)' %
|
||||
self._replace(type=annotated_type))
|
||||
self._replace(type=self.get_type_name()))
|
||||
|
||||
def get_type_name(self, exact=True):
|
||||
if exact:
|
||||
typ = self.exact_type
|
||||
else:
|
||||
typ = self.type
|
||||
return tok_name[typ]
|
||||
|
||||
@property
|
||||
def exact_type(self):
|
||||
@@ -201,12 +206,11 @@ class TokenInfo(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])):
|
||||
|
||||
def source_tokens(source, use_exact_op_types=False):
|
||||
"""Generate tokens from a the source code (string)."""
|
||||
source = source
|
||||
readline = StringIO(source).readline
|
||||
return generate_tokens(readline, use_exact_op_types)
|
||||
lines = splitlines(source, keepends=True)
|
||||
return generate_tokens(lines, use_exact_op_types)
|
||||
|
||||
|
||||
def generate_tokens(readline, use_exact_op_types=False):
|
||||
def generate_tokens(lines, use_exact_op_types=False):
|
||||
"""
|
||||
A heavily modified Python standard library tokenizer.
|
||||
|
||||
@@ -216,7 +220,6 @@ def generate_tokens(readline, use_exact_op_types=False):
|
||||
"""
|
||||
paren_level = 0 # count parentheses
|
||||
indents = [0]
|
||||
lnum = 0
|
||||
max = 0
|
||||
numchars = '0123456789'
|
||||
contstr = ''
|
||||
@@ -228,14 +231,7 @@ def generate_tokens(readline, use_exact_op_types=False):
|
||||
new_line = True
|
||||
prefix = '' # Should never be required, but here for safety
|
||||
additional_prefix = ''
|
||||
while True: # loop over lines in stream
|
||||
line = readline() # readline returns empty when finished. See StringIO
|
||||
if not line:
|
||||
if contstr:
|
||||
yield TokenInfo(ERRORTOKEN, contstr, contstr_start, prefix)
|
||||
break
|
||||
|
||||
lnum += 1
|
||||
for lnum, line in enumerate(lines, 1): # loop over lines in stream
|
||||
pos, max = 0, len(line)
|
||||
|
||||
if contstr: # continued string
|
||||
@@ -253,11 +249,7 @@ def generate_tokens(readline, use_exact_op_types=False):
|
||||
while pos < max:
|
||||
pseudomatch = pseudo_token_compiled.match(line, pos)
|
||||
if not pseudomatch: # scan for tokens
|
||||
txt = line[pos]
|
||||
if line[pos] in '"\'':
|
||||
# If a literal starts but doesn't end the whole rest of the
|
||||
# line is an error token.
|
||||
txt = line[pos:]
|
||||
txt = line[pos:]
|
||||
if txt.endswith('\n'):
|
||||
new_line = True
|
||||
yield TokenInfo(ERRORTOKEN, txt, (lnum, pos), prefix)
|
||||
@@ -267,7 +259,8 @@ def generate_tokens(readline, use_exact_op_types=False):
|
||||
additional_prefix = ''
|
||||
start, pos = pseudomatch.span(2)
|
||||
spos = (lnum, start)
|
||||
token, initial = line[start:pos], line[start]
|
||||
token = pseudomatch.group(2)
|
||||
initial = token[0]
|
||||
|
||||
if new_line and initial not in '\r\n#':
|
||||
new_line = False
|
||||
@@ -351,12 +344,26 @@ def generate_tokens(readline, use_exact_op_types=False):
|
||||
typ = OP
|
||||
yield TokenInfo(typ, token, spos, prefix)
|
||||
|
||||
if new_line or additional_prefix[-1:] == '\n':
|
||||
end_pos = lnum + 1, 0
|
||||
else:
|
||||
end_pos = lnum, max
|
||||
if contstr:
|
||||
yield TokenInfo(ERRORTOKEN, contstr, contstr_start, prefix)
|
||||
if contstr.endswith('\n'):
|
||||
new_line = True
|
||||
|
||||
end_pos = lnum, max
|
||||
# As the last position we just take the maximally possible position. We
|
||||
# remove -1 for the last new line.
|
||||
for indent in indents[1:]:
|
||||
yield TokenInfo(DEDENT, '', end_pos, '')
|
||||
yield TokenInfo(ENDMARKER, '', end_pos, additional_prefix)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
if len(sys.argv) >= 2:
|
||||
path = sys.argv[1]
|
||||
with open(path) as f:
|
||||
code = u(f.read())
|
||||
else:
|
||||
code = u(sys.stdin.read())
|
||||
for token in source_tokens(code, use_exact_op_types=True):
|
||||
print(token)
|
||||
|
||||
1376
jedi/parser/tree.py
1376
jedi/parser/tree.py
File diff suppressed because it is too large
Load Diff
@@ -1,197 +0,0 @@
|
||||
import inspect
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import hashlib
|
||||
import gc
|
||||
import shutil
|
||||
import pickle
|
||||
|
||||
from jedi import settings
|
||||
from jedi import debug
|
||||
|
||||
|
||||
def underscore_memoization(func):
|
||||
"""
|
||||
Decorator for methods::
|
||||
|
||||
class A(object):
|
||||
def x(self):
|
||||
if self._x:
|
||||
self._x = 10
|
||||
return self._x
|
||||
|
||||
Becomes::
|
||||
|
||||
class A(object):
|
||||
@underscore_memoization
|
||||
def x(self):
|
||||
return 10
|
||||
|
||||
A now has an attribute ``_x`` written by this decorator.
|
||||
"""
|
||||
name = '_' + func.__name__
|
||||
|
||||
def wrapper(self):
|
||||
try:
|
||||
return getattr(self, name)
|
||||
except AttributeError:
|
||||
result = func(self)
|
||||
if inspect.isgenerator(result):
|
||||
result = list(result)
|
||||
setattr(self, name, result)
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
# for fast_parser, should not be deleted
|
||||
parser_cache = {}
|
||||
|
||||
|
||||
class ParserCacheItem(object):
|
||||
def __init__(self, parser, change_time=None):
|
||||
self.parser = parser
|
||||
if change_time is None:
|
||||
change_time = time.time()
|
||||
self.change_time = change_time
|
||||
|
||||
|
||||
def load_parser(path):
|
||||
"""
|
||||
Returns the module or None, if it fails.
|
||||
"""
|
||||
p_time = os.path.getmtime(path) if path else None
|
||||
try:
|
||||
parser_cache_item = parser_cache[path]
|
||||
if not path or p_time <= parser_cache_item.change_time:
|
||||
return parser_cache_item.parser
|
||||
except KeyError:
|
||||
if settings.use_filesystem_cache:
|
||||
return ParserPickling.load_parser(path, p_time)
|
||||
|
||||
|
||||
def save_parser(path, parser, pickling=True):
|
||||
try:
|
||||
p_time = None if path is None else os.path.getmtime(path)
|
||||
except OSError:
|
||||
p_time = None
|
||||
pickling = False
|
||||
|
||||
item = ParserCacheItem(parser, p_time)
|
||||
parser_cache[path] = item
|
||||
if settings.use_filesystem_cache and pickling:
|
||||
ParserPickling.save_parser(path, item)
|
||||
|
||||
|
||||
class ParserPickling(object):
|
||||
version = 27
|
||||
"""
|
||||
Version number (integer) for file system cache.
|
||||
|
||||
Increment this number when there are any incompatible changes in
|
||||
parser representation classes. For example, the following changes
|
||||
are regarded as incompatible.
|
||||
|
||||
- Class name is changed.
|
||||
- Class is moved to another module.
|
||||
- Defined slot of the class is changed.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.__index = None
|
||||
self.py_tag = 'cpython-%s%s' % sys.version_info[:2]
|
||||
"""
|
||||
Short name for distinguish Python implementations and versions.
|
||||
|
||||
It's like `sys.implementation.cache_tag` but for Python < 3.3
|
||||
we generate something similar. See:
|
||||
http://docs.python.org/3/library/sys.html#sys.implementation
|
||||
|
||||
.. todo:: Detect interpreter (e.g., PyPy).
|
||||
"""
|
||||
|
||||
def load_parser(self, path, original_changed_time):
|
||||
try:
|
||||
pickle_changed_time = self._index[path]
|
||||
except KeyError:
|
||||
return None
|
||||
if original_changed_time is not None \
|
||||
and pickle_changed_time < original_changed_time:
|
||||
# the pickle file is outdated
|
||||
return None
|
||||
|
||||
with open(self._get_hashed_path(path), 'rb') as f:
|
||||
try:
|
||||
gc.disable()
|
||||
parser_cache_item = pickle.load(f)
|
||||
finally:
|
||||
gc.enable()
|
||||
|
||||
debug.dbg('pickle loaded: %s', path)
|
||||
parser_cache[path] = parser_cache_item
|
||||
return parser_cache_item.parser
|
||||
|
||||
def save_parser(self, path, parser_cache_item):
|
||||
self.__index = None
|
||||
try:
|
||||
files = self._index
|
||||
except KeyError:
|
||||
files = {}
|
||||
self._index = files
|
||||
|
||||
with open(self._get_hashed_path(path), 'wb') as f:
|
||||
pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL)
|
||||
files[path] = parser_cache_item.change_time
|
||||
|
||||
self._flush_index()
|
||||
|
||||
@property
|
||||
def _index(self):
|
||||
if self.__index is None:
|
||||
try:
|
||||
with open(self._get_path('index.json')) as f:
|
||||
data = json.load(f)
|
||||
except (IOError, ValueError):
|
||||
self.__index = {}
|
||||
else:
|
||||
# 0 means version is not defined (= always delete cache):
|
||||
if data.get('version', 0) != self.version:
|
||||
self.clear_cache()
|
||||
else:
|
||||
self.__index = data['index']
|
||||
return self.__index
|
||||
|
||||
def _remove_old_modules(self):
|
||||
# TODO use
|
||||
change = False
|
||||
if change:
|
||||
self._flush_index(self)
|
||||
self._index # reload index
|
||||
|
||||
def _flush_index(self):
|
||||
data = {'version': self.version, 'index': self._index}
|
||||
with open(self._get_path('index.json'), 'w') as f:
|
||||
json.dump(data, f)
|
||||
self.__index = None
|
||||
|
||||
def clear_cache(self):
|
||||
shutil.rmtree(self._cache_directory())
|
||||
self.__index = {}
|
||||
|
||||
def _get_hashed_path(self, path):
|
||||
return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest())
|
||||
|
||||
def _get_path(self, file):
|
||||
dir = self._cache_directory()
|
||||
if not os.path.exists(dir):
|
||||
os.makedirs(dir)
|
||||
return os.path.join(dir, file)
|
||||
|
||||
def _cache_directory(self):
|
||||
return os.path.join(settings.cache_directory, self.py_tag)
|
||||
|
||||
|
||||
# is a singleton
|
||||
ParserPickling = ParserPickling()
|
||||
@@ -16,7 +16,7 @@ import difflib
|
||||
|
||||
from jedi import common
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.parser import tree as pt
|
||||
from jedi.parser.python import tree as pt
|
||||
|
||||
|
||||
class Refactoring(object):
|
||||
|
||||
@@ -68,7 +68,6 @@ definitely worse in some cases. But a completion should also be fast.
|
||||
Caching
|
||||
~~~~~~~
|
||||
|
||||
.. autodata:: star_import_cache_validity
|
||||
.. autodata:: call_signatures_validity
|
||||
|
||||
|
||||
@@ -217,13 +216,6 @@ scale `max_executions` and `max_until_execution_unique`:
|
||||
# caching validity (time)
|
||||
# ----------------
|
||||
|
||||
star_import_cache_validity = 60.0
|
||||
"""
|
||||
In huge packages like numpy, checking all star imports on every completion
|
||||
might be slow, therefore we do a star import caching, that lasts a certain
|
||||
time span (in seconds).
|
||||
"""
|
||||
|
||||
call_signatures_validity = 3.0
|
||||
"""
|
||||
Finding function calls might be slow (0.1-0.5s). This is not acceptible for
|
||||
|
||||
@@ -9,15 +9,15 @@ Usage:
|
||||
Options:
|
||||
-h --help Show this screen.
|
||||
-d --debug Enable Jedi internal debugging.
|
||||
-s <sort> Sort the profile results, e.g. cum, name [default: time].
|
||||
-s <sort> Sort the profile results, e.g. cumtime, name [default: time].
|
||||
"""
|
||||
|
||||
import cProfile
|
||||
|
||||
from docopt import docopt
|
||||
from jedi.parser import load_grammar
|
||||
from jedi.parser.python import load_grammar
|
||||
from jedi.parser.diff import DiffParser
|
||||
from jedi.parser import ParserWithRecovery
|
||||
from jedi.parser.python import ParserWithRecovery
|
||||
from jedi._compatibility import u
|
||||
from jedi.common import splitlines
|
||||
import jedi
|
||||
@@ -26,14 +26,20 @@ import jedi
|
||||
def run(parser, lines):
|
||||
diff_parser = DiffParser(parser)
|
||||
diff_parser.update(lines)
|
||||
# Make sure used_names is loaded
|
||||
parser.module.used_names
|
||||
|
||||
|
||||
def main(args):
|
||||
jedi.set_debug_function(notices=args['--debug'])
|
||||
if args['--debug']:
|
||||
jedi.set_debug_function(notices=True)
|
||||
|
||||
with open(args['<file>']) as f:
|
||||
code = f.read()
|
||||
grammar = load_grammar()
|
||||
parser = ParserWithRecovery(grammar, u(code))
|
||||
# Make sure used_names is loaded
|
||||
parser.module.used_names
|
||||
|
||||
code = code + '\na\n' # Add something so the diff parser needs to run.
|
||||
lines = splitlines(code, keepends=True)
|
||||
|
||||
@@ -45,5 +45,5 @@ def main(args):
|
||||
if __name__ == '__main__':
|
||||
args = docopt(__doc__)
|
||||
if args['<code>'] is None:
|
||||
args['<code>'] = 'import numpy; numpy.array([0])'
|
||||
args['<code>'] = 'import numpy; numpy.array([0]).'
|
||||
main(args)
|
||||
3
setup.py
3
setup.py
@@ -11,7 +11,7 @@ __AUTHOR__ = 'David Halter'
|
||||
__AUTHOR_EMAIL__ = 'davidhalter88@gmail.com'
|
||||
|
||||
readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read()
|
||||
packages = ['jedi', 'jedi.parser', 'jedi.parser.pgen2',
|
||||
packages = ['jedi', 'jedi.parser', 'jedi.parser.pgen2', 'jedi.parser.python',
|
||||
'jedi.evaluate', 'jedi.evaluate.compiled', 'jedi.api']
|
||||
|
||||
import jedi
|
||||
@@ -21,6 +21,7 @@ setup(name='jedi',
|
||||
description='An autocompletion tool for Python that can be used for text editors.',
|
||||
author=__AUTHOR__,
|
||||
author_email=__AUTHOR_EMAIL__,
|
||||
include_package_data=True,
|
||||
maintainer=__AUTHOR__,
|
||||
maintainer_email=__AUTHOR_EMAIL__,
|
||||
url='https://github.com/davidhalter/jedi',
|
||||
|
||||
@@ -493,3 +493,19 @@ B().a
|
||||
B.b
|
||||
#? int()
|
||||
B().b
|
||||
|
||||
|
||||
# -----------------
|
||||
# With import
|
||||
# -----------------
|
||||
|
||||
from import_tree.classes import Config2, BaseClass
|
||||
|
||||
class Config(BaseClass):
|
||||
"""#884"""
|
||||
|
||||
#? Config2()
|
||||
Config.mode
|
||||
|
||||
#? int()
|
||||
Config.mode2
|
||||
|
||||
@@ -304,3 +304,15 @@ class A():
|
||||
|
||||
#? int()
|
||||
A().ret()
|
||||
|
||||
|
||||
# -----------------
|
||||
# On decorator completions
|
||||
# -----------------
|
||||
|
||||
import abc
|
||||
#? ['abc']
|
||||
@abc
|
||||
|
||||
#? ['abstractmethod']
|
||||
@abc.abstractmethod
|
||||
|
||||
10
test/completion/import_tree/classes.py
Normal file
10
test/completion/import_tree/classes.py
Normal file
@@ -0,0 +1,10 @@
|
||||
blub = 1
|
||||
|
||||
class Config2():
|
||||
pass
|
||||
|
||||
|
||||
class BaseClass():
|
||||
mode = Config2()
|
||||
if isinstance(whaat, int):
|
||||
mode2 = whaat
|
||||
@@ -69,7 +69,7 @@ from import_tree.pkg.mod1 import not_existant, # whitespace before
|
||||
from import_tree.pkg.mod1 import not_existant,
|
||||
#? 22 ['mod1']
|
||||
from import_tree.pkg. import mod1
|
||||
#? 17 ['mod1', 'mod2', 'random', 'pkg', 'rename1', 'rename2', 'recurse_class1', 'recurse_class2', 'invisible_pkg', 'flow_import']
|
||||
#? 17 ['mod1', 'mod2', 'random', 'pkg', 'rename1', 'rename2', 'classes', 'recurse_class1', 'recurse_class2', 'invisible_pkg', 'flow_import']
|
||||
from import_tree. import pkg
|
||||
|
||||
#? 18 ['pkg']
|
||||
|
||||
@@ -158,3 +158,10 @@ Y = int
|
||||
def just_because_we_can(x: "flo" + "at"):
|
||||
#? float()
|
||||
x
|
||||
|
||||
|
||||
def keyword_only(a: str, *, b: str):
|
||||
#? ['startswith']
|
||||
a.startswi
|
||||
#? ['startswith']
|
||||
b.startswi
|
||||
|
||||
@@ -83,17 +83,18 @@ import module_not_exists
|
||||
module_not_exists
|
||||
|
||||
|
||||
#< ('rename1', 1,0), (0,24), (3,0), (6,17), ('rename2', 4,5), (10,17), (13,17), ('imports', 72, 16)
|
||||
#< ('rename1', 1,0), (0,24), (3,0), (6,17), ('rename2', 4,5), (11,17), (14,17), ('imports', 72, 16)
|
||||
from import_tree import rename1
|
||||
|
||||
#< (0,8), ('rename1',3,0), ('rename2',4,20), ('rename2',6,0), (3,32), (7,32), (4,0)
|
||||
#< (0,8), ('rename1',3,0), ('rename2',4,20), ('rename2',6,0), (3,32), (8,32), (5,0)
|
||||
rename1.abc
|
||||
|
||||
#< (-3,8), ('rename1', 3,0), ('rename2', 4,20), ('rename2', 6,0), (0,32), (4,32), (1,0)
|
||||
#< (-3,8), ('rename1', 3,0), ('rename2', 4,20), ('rename2', 6,0), (0,32), (5,32), (2,0)
|
||||
from import_tree.rename1 import abc
|
||||
#< (-5,8), (-2,32), ('rename1', 3,0), ('rename2', 4,20), ('rename2', 6,0), (0,0), (3,32)
|
||||
abc
|
||||
|
||||
#< 20 ('rename1', 1,0), ('rename2', 4,5), (-10,24), (-7,0), (-4,17), (0,17), (3,17), ('imports', 72, 16)
|
||||
#< 20 ('rename1', 1,0), ('rename2', 4,5), (-11,24), (-8,0), (-5,17), (0,17), (3,17), ('imports', 72, 16)
|
||||
from import_tree.rename1 import abc
|
||||
|
||||
#< (0, 32),
|
||||
|
||||
@@ -131,3 +131,9 @@ def isolated_jedi_cache(monkeypatch, tmpdir):
|
||||
each test case (scope='function').
|
||||
"""
|
||||
monkeypatch.setattr(settings, 'cache_directory', str(tmpdir))
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def cwd_tmpdir(monkeypatch, tmpdir):
|
||||
with helpers.set_cwd(tmpdir.dirpath):
|
||||
yield tmpdir
|
||||
|
||||
@@ -4,6 +4,8 @@ A helper module for testing, improves compatibility for testing (as
|
||||
"""
|
||||
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
|
||||
if sys.hexversion < 0x02070000:
|
||||
import unittest2 as unittest
|
||||
else:
|
||||
@@ -29,12 +31,19 @@ def cwd_at(path):
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwds):
|
||||
try:
|
||||
oldcwd = os.getcwd()
|
||||
repo_root = os.path.dirname(test_dir)
|
||||
os.chdir(os.path.join(repo_root, path))
|
||||
with set_cwd(path):
|
||||
return func(*args, **kwds)
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_cwd(path, absolute_path=False):
|
||||
repo_root = os.path.dirname(test_dir)
|
||||
|
||||
oldcwd = os.getcwd()
|
||||
os.chdir(os.path.join(repo_root, path))
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
|
||||
@@ -120,7 +120,7 @@ from functools import reduce
|
||||
import jedi
|
||||
from jedi import debug
|
||||
from jedi._compatibility import unicode, is_py3
|
||||
from jedi.parser import Parser, load_grammar
|
||||
from jedi.parser.python import parse
|
||||
from jedi.api.classes import Definition
|
||||
from jedi.api.completion import get_user_scope
|
||||
|
||||
@@ -187,9 +187,9 @@ class IntegrationTestCase(object):
|
||||
should_be = set()
|
||||
for match in re.finditer('(?:[^ ]+)', correct):
|
||||
string = match.group(0)
|
||||
parser = Parser(load_grammar(), string, start_symbol='eval_input')
|
||||
parser = parse(string, start_symbol='eval_input', error_recovery=False)
|
||||
parser.get_root_node().move(self.line_nr)
|
||||
element = parser.get_parsed_node()
|
||||
element = parser.get_root_node()
|
||||
module_context = script._get_module()
|
||||
# The context shouldn't matter for the test results.
|
||||
user_context = get_user_scope(module_context, (self.line_nr, 0))
|
||||
@@ -363,9 +363,6 @@ if __name__ == '__main__':
|
||||
|
||||
import time
|
||||
t_start = time.time()
|
||||
# Sorry I didn't use argparse here. It's because argparse is not in the
|
||||
# stdlib in 2.5.
|
||||
import sys
|
||||
|
||||
if arguments['--debug']:
|
||||
jedi.set_debug_function()
|
||||
|
||||
4
test/test_api/import_tree_for_usages/__init__.py
Normal file
4
test/test_api/import_tree_for_usages/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
"""
|
||||
An import tree, for testing usages.
|
||||
"""
|
||||
|
||||
4
test/test_api/import_tree_for_usages/a.py
Normal file
4
test/test_api/import_tree_for_usages/a.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from . import b
|
||||
|
||||
def foo():
|
||||
b.bar()
|
||||
2
test/test_api/import_tree_for_usages/b.py
Normal file
2
test/test_api/import_tree_for_usages/b.py
Normal file
@@ -0,0 +1,2 @@
|
||||
def bar():
|
||||
pass
|
||||
@@ -7,7 +7,7 @@ from textwrap import dedent
|
||||
from jedi import api
|
||||
from jedi._compatibility import is_py3
|
||||
from pytest import raises
|
||||
from jedi.parser import utils
|
||||
from jedi.parser import cache
|
||||
|
||||
|
||||
def test_preload_modules():
|
||||
@@ -17,15 +17,15 @@ def test_preload_modules():
|
||||
for i in modules:
|
||||
assert [i in k for k in parser_cache.keys() if k is not None]
|
||||
|
||||
temp_cache, utils.parser_cache = utils.parser_cache, {}
|
||||
parser_cache = utils.parser_cache
|
||||
temp_cache, cache.parser_cache = cache.parser_cache, {}
|
||||
parser_cache = cache.parser_cache
|
||||
|
||||
api.preload_module('sys')
|
||||
check_loaded() # compiled (c_builtin) modules shouldn't be in the cache.
|
||||
api.preload_module('types', 'token')
|
||||
check_loaded('types', 'token')
|
||||
|
||||
utils.parser_cache = temp_cache
|
||||
cache.parser_cache = temp_cache
|
||||
|
||||
|
||||
def test_empty_script():
|
||||
@@ -166,7 +166,7 @@ def test_get_line_code():
|
||||
# With before/after
|
||||
line = ' foo'
|
||||
source = 'def foo():\n%s\nother_line' % line
|
||||
assert get_line_code(source, line=2) == line
|
||||
assert get_line_code(source, line=2) == line + '\n'
|
||||
assert get_line_code(source, line=2, after=1) == line + '\nother_line'
|
||||
assert get_line_code(source, line=2, after=1, before=1) == source
|
||||
|
||||
|
||||
@@ -338,7 +338,7 @@ class TestGotoAssignments(TestCase):
|
||||
n = nms[0].goto_assignments()[0]
|
||||
assert n.name == 'json'
|
||||
assert n.type == 'module'
|
||||
assert n._name.tree_name.get_definition().type == 'file_input'
|
||||
assert n._name._context.tree_node.type == 'file_input'
|
||||
|
||||
assert nms[1].name == 'foo'
|
||||
assert nms[1].type == 'module'
|
||||
@@ -347,7 +347,7 @@ class TestGotoAssignments(TestCase):
|
||||
assert len(ass) == 1
|
||||
assert ass[0].name == 'json'
|
||||
assert ass[0].type == 'module'
|
||||
assert ass[0]._name.tree_name.get_definition().type == 'file_input'
|
||||
assert ass[0]._name._context.tree_node.type == 'file_input'
|
||||
|
||||
|
||||
def test_added_equals_to_params():
|
||||
|
||||
@@ -89,3 +89,10 @@ def test_sub_module():
|
||||
assert [d.full_name for d in defs] == ['jedi.api.classes']
|
||||
defs = jedi.Script('import jedi.api; jedi.api').goto_definitions()
|
||||
assert [d.full_name for d in defs] == ['jedi.api']
|
||||
|
||||
|
||||
def test_os_path():
|
||||
d, = jedi.Script('from os.path import join').completions()
|
||||
assert d.full_name == 'os.path.join'
|
||||
d, = jedi.Script('import os.p').completions()
|
||||
assert d.full_name == 'os.path'
|
||||
|
||||
@@ -1,6 +1,48 @@
|
||||
import jedi
|
||||
|
||||
import os.path
|
||||
|
||||
def test_import_usage():
|
||||
s = jedi.Script("from .. import foo", line=1, column=18, path="foo.py")
|
||||
assert [usage.line for usage in s.usages()] == [1]
|
||||
|
||||
|
||||
def usages_with_additional_modules(script, additional_modules):
|
||||
"""
|
||||
Stripped down version of `jedi.api.Script.usages` that can take an
|
||||
explicit set of additional modules. For use with `test_cross_module_usages`.
|
||||
"""
|
||||
|
||||
definition_names = jedi.api.usages.resolve_potential_imports(script._evaluator,
|
||||
script._goto())
|
||||
modules = set([d.get_root_context() for d in definition_names])
|
||||
modules.add(script._get_module())
|
||||
for additional_module in additional_modules:
|
||||
modules.add(additional_module._name.get_root_context())
|
||||
return jedi.api.usages.usages(script._evaluator, definition_names, modules)
|
||||
|
||||
|
||||
def test_cross_module_usages():
|
||||
"""
|
||||
This tests finding of usages between different modules. In
|
||||
`jedi.api.usages.compare_contexts`, this exercises the case where
|
||||
`c1 != c2`. This tests whether `jedi` can find the usage of
|
||||
`import_tree_for_usages.b.bar` in `import_tree_for_usages.a`
|
||||
"""
|
||||
|
||||
def usages_script():
|
||||
source = 'import import_tree_for_usages.b; import_tree_for_usages.b.bar'
|
||||
return jedi.api.Script(source=source, line=1, column=len(source),
|
||||
sys_path=[os.path.dirname(os.path.abspath(__file__))])
|
||||
|
||||
def module_script():
|
||||
source = 'import import_tree_for_usages.a; import_tree_for_usages.a'
|
||||
return jedi.api.Script(source=source, line=1, column=len(source),
|
||||
sys_path=[os.path.dirname(os.path.abspath(__file__))])
|
||||
|
||||
module = module_script().goto_definitions()[0]
|
||||
module_definition = module._name.get_root_context()
|
||||
usages_list = usages_with_additional_modules(usages_script(), set([module]))
|
||||
|
||||
assert any([elt for elt in usages_list if elt.module_name == 'a']), (
|
||||
"Did not find cross-module usage of :func:`b.bar` in :mod:`a`. Usages list was: {}"
|
||||
.format(usages_list))
|
||||
|
||||
@@ -3,16 +3,16 @@ Test all things related to the ``jedi.cache`` module.
|
||||
"""
|
||||
|
||||
import time
|
||||
from os import unlink
|
||||
|
||||
import pytest
|
||||
|
||||
import jedi
|
||||
from jedi import settings, cache
|
||||
from jedi.parser.utils import ParserCacheItem, ParserPickling
|
||||
|
||||
|
||||
ParserPicklingCls = type(ParserPickling)
|
||||
ParserPickling = ParserPicklingCls()
|
||||
from jedi.parser.cache import _NodeCacheItem, save_module, load_module, \
|
||||
_get_hashed_path, parser_cache, _load_from_file_system, \
|
||||
_save_to_file_system
|
||||
from jedi.parser.python import load_grammar
|
||||
|
||||
|
||||
def test_modulepickling_change_cache_dir(monkeypatch, tmpdir):
|
||||
@@ -24,41 +24,79 @@ def test_modulepickling_change_cache_dir(monkeypatch, tmpdir):
|
||||
dir_1 = str(tmpdir.mkdir('first'))
|
||||
dir_2 = str(tmpdir.mkdir('second'))
|
||||
|
||||
item_1 = ParserCacheItem('fake parser 1')
|
||||
item_2 = ParserCacheItem('fake parser 2')
|
||||
item_1 = _NodeCacheItem('bla', [])
|
||||
item_2 = _NodeCacheItem('bla', [])
|
||||
path_1 = 'fake path 1'
|
||||
path_2 = 'fake path 2'
|
||||
|
||||
monkeypatch.setattr(settings, 'cache_directory', dir_1)
|
||||
ParserPickling.save_parser(path_1, item_1)
|
||||
cached = load_stored_item(ParserPickling, path_1, item_1)
|
||||
assert cached == item_1.parser
|
||||
grammar = load_grammar()
|
||||
_save_to_file_system(grammar, path_1, item_1)
|
||||
parser_cache.clear()
|
||||
cached = load_stored_item(grammar, path_1, item_1)
|
||||
assert cached == item_1.node
|
||||
|
||||
monkeypatch.setattr(settings, 'cache_directory', dir_2)
|
||||
ParserPickling.save_parser(path_2, item_2)
|
||||
cached = load_stored_item(ParserPickling, path_1, item_1)
|
||||
_save_to_file_system(grammar, path_2, item_2)
|
||||
cached = load_stored_item(grammar, path_1, item_1)
|
||||
assert cached is None
|
||||
|
||||
|
||||
def load_stored_item(cache, path, item):
|
||||
def load_stored_item(grammar, path, item):
|
||||
"""Load `item` stored at `path` in `cache`."""
|
||||
return cache.load_parser(path, item.change_time - 1)
|
||||
item = _load_from_file_system(grammar, path, item.change_time - 1)
|
||||
return item
|
||||
|
||||
|
||||
@pytest.mark.skip("This is currently not something we have implemented.")
|
||||
@pytest.mark.usefixtures("isolated_jedi_cache")
|
||||
def test_modulepickling_delete_incompatible_cache():
|
||||
item = ParserCacheItem('fake parser')
|
||||
item = _NodeCacheItem('fake parser', [])
|
||||
path = 'fake path'
|
||||
|
||||
cache1 = ParserPicklingCls()
|
||||
cache1.version = 1
|
||||
cache1.save_parser(path, item)
|
||||
cached1 = load_stored_item(cache1, path, item)
|
||||
assert cached1 == item.parser
|
||||
grammar = load_grammar()
|
||||
cache1.save_item(grammar, path, item)
|
||||
cached1 = load_stored_item(grammar, cache1, path, item)
|
||||
assert cached1 == item.node
|
||||
|
||||
cache2 = ParserPicklingCls()
|
||||
cache2.version = 2
|
||||
cached2 = load_stored_item(cache2, path, item)
|
||||
cached2 = load_stored_item(grammar, cache2, path, item)
|
||||
assert cached2 is None
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("isolated_jedi_cache")
|
||||
def test_modulepickling_simulate_deleted_cache(tmpdir):
|
||||
"""
|
||||
Tests loading from a cache file after it is deleted.
|
||||
According to macOS `dev docs`__,
|
||||
|
||||
Note that the system may delete the Caches/ directory to free up disk
|
||||
space, so your app must be able to re-create or download these files as
|
||||
needed.
|
||||
|
||||
It is possible that other supported platforms treat cache files the same
|
||||
way.
|
||||
|
||||
__ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html
|
||||
"""
|
||||
grammar = load_grammar()
|
||||
module = 'fake parser'
|
||||
|
||||
# Create the file
|
||||
path = tmpdir.dirname + '/some_path'
|
||||
with open(path, 'w'):
|
||||
pass
|
||||
|
||||
save_module(grammar, path, module, [])
|
||||
assert load_module(grammar, path) == module
|
||||
|
||||
unlink(_get_hashed_path(grammar, path))
|
||||
parser_cache.clear()
|
||||
|
||||
cached2 = load_module(grammar, path)
|
||||
assert cached2 is None
|
||||
|
||||
|
||||
|
||||
@@ -5,9 +5,13 @@ def test_splitlines_no_keepends():
|
||||
assert splitlines('asd\r\n') == ['asd', '']
|
||||
assert splitlines('asd\r\n\f') == ['asd', '\f']
|
||||
assert splitlines('\fasd\r\n') == ['\fasd', '']
|
||||
assert splitlines('') == ['']
|
||||
assert splitlines('\n') == ['', '']
|
||||
|
||||
|
||||
def test_splitlines_keepends():
|
||||
assert splitlines('asd\r\n', keepends=True) == ['asd\r\n', '']
|
||||
assert splitlines('asd\r\n\f', keepends=True) == ['asd\r\n', '\f']
|
||||
assert splitlines('\fasd\r\n', keepends=True) == ['\fasd\r\n', '']
|
||||
assert splitlines('', keepends=True) == ['']
|
||||
assert splitlines('\n', keepends=True) == ['\n', '']
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
foo = 'ns1_file!'
|
||||
@@ -0,0 +1 @@
|
||||
foo = 'ns2_file!'
|
||||
@@ -0,0 +1 @@
|
||||
CONST = 1
|
||||
@@ -3,8 +3,7 @@ Tests ``from __future__ import absolute_import`` (only important for
|
||||
Python 2.X)
|
||||
"""
|
||||
import jedi
|
||||
from jedi._compatibility import u
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser.python import parse
|
||||
from .. import helpers
|
||||
|
||||
|
||||
@@ -12,16 +11,15 @@ def test_explicit_absolute_imports():
|
||||
"""
|
||||
Detect modules with ``from __future__ import absolute_import``.
|
||||
"""
|
||||
parser = ParserWithRecovery(load_grammar(), u("from __future__ import absolute_import"), "test.py")
|
||||
assert parser.module.has_explicit_absolute_import
|
||||
module = parse("from __future__ import absolute_import")
|
||||
assert module.has_explicit_absolute_import
|
||||
|
||||
|
||||
def test_no_explicit_absolute_imports():
|
||||
"""
|
||||
Detect modules without ``from __future__ import absolute_import``.
|
||||
"""
|
||||
parser = ParserWithRecovery(load_grammar(), u("1"), "test.py")
|
||||
assert not parser.module.has_explicit_absolute_import
|
||||
assert not parse("1").has_explicit_absolute_import
|
||||
|
||||
|
||||
def test_dont_break_imports_without_namespaces():
|
||||
@@ -29,9 +27,8 @@ def test_dont_break_imports_without_namespaces():
|
||||
The code checking for ``from __future__ import absolute_import`` shouldn't
|
||||
assume that all imports have non-``None`` namespaces.
|
||||
"""
|
||||
src = u("from __future__ import absolute_import\nimport xyzzy")
|
||||
parser = ParserWithRecovery(load_grammar(), src, "test.py")
|
||||
assert parser.module.has_explicit_absolute_import
|
||||
src = "from __future__ import absolute_import\nimport xyzzy"
|
||||
assert parse(src).has_explicit_absolute_import
|
||||
|
||||
|
||||
@helpers.cwd_at("test/test_evaluate/absolute_import")
|
||||
|
||||
@@ -3,20 +3,19 @@ from textwrap import dedent
|
||||
|
||||
from jedi._compatibility import u
|
||||
from jedi.evaluate.sys_path import (_get_parent_dir_with_file,
|
||||
_get_buildout_scripts,
|
||||
_get_buildout_script_paths,
|
||||
sys_path_with_modifications,
|
||||
_check_module)
|
||||
from jedi.evaluate import Evaluator
|
||||
from jedi.evaluate.representation import ModuleContext
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser.python import parse, load_grammar
|
||||
|
||||
from ..helpers import cwd_at
|
||||
|
||||
|
||||
def check_module_test(code):
|
||||
grammar = load_grammar()
|
||||
p = ParserWithRecovery(grammar, code)
|
||||
module_context = ModuleContext(Evaluator(grammar), p.module)
|
||||
module_context = ModuleContext(Evaluator(grammar), parse(code), path=None)
|
||||
return _check_module(module_context)
|
||||
|
||||
|
||||
@@ -30,7 +29,7 @@ def test_parent_dir_with_file():
|
||||
|
||||
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
|
||||
def test_buildout_detection():
|
||||
scripts = _get_buildout_scripts(os.path.abspath('./module_name.py'))
|
||||
scripts = _get_buildout_script_paths(os.path.abspath('./module_name.py'))
|
||||
assert len(scripts) == 1
|
||||
curdir = os.path.abspath(os.curdir)
|
||||
appdir_path = os.path.normpath(os.path.join(curdir, '../../bin/app'))
|
||||
@@ -62,14 +61,14 @@ def test_path_from_invalid_sys_path_assignment():
|
||||
|
||||
@cwd_at('test/test_evaluate/buildout_project/src/proj_name/')
|
||||
def test_sys_path_with_modifications():
|
||||
code = dedent(u("""
|
||||
code = dedent("""
|
||||
import os
|
||||
"""))
|
||||
""")
|
||||
|
||||
path = os.path.abspath(os.path.join(os.curdir, 'module_name.py'))
|
||||
grammar = load_grammar()
|
||||
p = ParserWithRecovery(grammar, code, module_path=path)
|
||||
module_context = ModuleContext(Evaluator(grammar), p.module)
|
||||
module_node = parse(code, path=path)
|
||||
module_context = ModuleContext(Evaluator(grammar), module_node, path=path)
|
||||
paths = sys_path_with_modifications(module_context.evaluator, module_context)
|
||||
assert '/tmp/.buildout/eggs/important_package.egg' in paths
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from textwrap import dedent
|
||||
|
||||
from jedi._compatibility import builtins, is_py3
|
||||
from jedi.parser import load_grammar
|
||||
from jedi.parser.python import load_grammar
|
||||
from jedi.evaluate import compiled, instance
|
||||
from jedi.evaluate.representation import FunctionContext
|
||||
from jedi.evaluate import Evaluator
|
||||
@@ -83,3 +83,9 @@ def test_method_completion():
|
||||
else:
|
||||
result = ['__func__']
|
||||
assert [c.name for c in Script(code).completions()] == result
|
||||
|
||||
|
||||
def test_time_docstring():
|
||||
import time
|
||||
comp, = Script('import time\ntime.sleep').completions()
|
||||
assert comp.docstring() == time.sleep.__doc__
|
||||
|
||||
58
test/test_evaluate/test_implicit_namespace_package.py
Normal file
58
test/test_evaluate/test_implicit_namespace_package.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from os.path import dirname, join
|
||||
|
||||
import jedi
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.skipif('sys.version_info[:2] < (3,4)')
|
||||
def test_implicit_namespace_package():
|
||||
sys_path = [join(dirname(__file__), d)
|
||||
for d in ['implicit_namespace_package/ns1', 'implicit_namespace_package/ns2']]
|
||||
|
||||
def script_with_path(*args, **kwargs):
|
||||
return jedi.Script(sys_path=sys_path, *args, **kwargs)
|
||||
|
||||
# goto definition
|
||||
assert script_with_path('from pkg import ns1_file').goto_definitions()
|
||||
assert script_with_path('from pkg import ns2_file').goto_definitions()
|
||||
assert not script_with_path('from pkg import ns3_file').goto_definitions()
|
||||
|
||||
# goto assignment
|
||||
tests = {
|
||||
'from pkg.ns2_file import foo': 'ns2_file!',
|
||||
'from pkg.ns1_file import foo': 'ns1_file!',
|
||||
}
|
||||
for source, solution in tests.items():
|
||||
ass = script_with_path(source).goto_assignments()
|
||||
assert len(ass) == 1
|
||||
assert ass[0].description == "foo = '%s'" % solution
|
||||
|
||||
# completion
|
||||
completions = script_with_path('from pkg import ').completions()
|
||||
names = [c.name for c in completions]
|
||||
compare = ['ns1_file', 'ns2_file']
|
||||
# must at least contain these items, other items are not important
|
||||
assert set(compare) == set(names)
|
||||
|
||||
tests = {
|
||||
'from pkg import ns2_file as x': 'ns2_file!',
|
||||
'from pkg import ns1_file as x': 'ns1_file!'
|
||||
}
|
||||
for source, solution in tests.items():
|
||||
for c in script_with_path(source + '; x.').completions():
|
||||
if c.name == 'foo':
|
||||
completion = c
|
||||
solution = "foo = '%s'" % solution
|
||||
assert completion.description == solution
|
||||
|
||||
@pytest.mark.skipif('sys.version_info[:2] < (3,4)')
|
||||
def test_implicit_nested_namespace_package():
|
||||
CODE = 'from implicit_nested_namespaces.namespace.pkg.module import CONST'
|
||||
|
||||
sys_path = [dirname(__file__)]
|
||||
|
||||
script = jedi.Script(sys_path=sys_path, source=CODE, line=1, column=61)
|
||||
|
||||
result = script.goto_definitions()
|
||||
|
||||
assert len(result) == 1
|
||||
7
test/test_evaluate/test_mixed.py
Normal file
7
test/test_evaluate/test_mixed.py
Normal file
@@ -0,0 +1,7 @@
|
||||
import jedi
|
||||
|
||||
|
||||
def test_on_code():
|
||||
from functools import wraps
|
||||
i = jedi.Interpreter("wraps.__code__", [{'wraps':wraps}])
|
||||
assert i.goto_definitions()
|
||||
@@ -1,11 +1,11 @@
|
||||
from jedi._compatibility import u
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser.python import parse
|
||||
|
||||
|
||||
def test_basic_parsing():
|
||||
def compare(string):
|
||||
"""Generates the AST object and then regenerates the code."""
|
||||
assert ParserWithRecovery(load_grammar(), string).module.get_code() == string
|
||||
assert parse(string).get_code() == string
|
||||
|
||||
compare(u('\na #pass\n'))
|
||||
compare(u('wblabla* 1\t\n'))
|
||||
|
||||
@@ -4,12 +4,12 @@ import pytest
|
||||
|
||||
import jedi
|
||||
from jedi import debug
|
||||
from jedi._compatibility import u
|
||||
from jedi.common import splitlines
|
||||
from jedi import cache
|
||||
from jedi.parser import load_grammar
|
||||
from jedi.parser.diff import DiffParser
|
||||
from jedi.parser import ParserWithRecovery
|
||||
from jedi.parser.cache import parser_cache
|
||||
from jedi.parser.python import load_grammar
|
||||
from jedi.parser.python.diff import DiffParser
|
||||
from jedi.parser.python import parse
|
||||
|
||||
|
||||
def _check_error_leaves_nodes(node):
|
||||
@@ -42,22 +42,24 @@ def _assert_valid_graph(node):
|
||||
|
||||
|
||||
class Differ(object):
|
||||
def initialize(self, source):
|
||||
debug.dbg('differ: initialize', color='YELLOW')
|
||||
grammar = load_grammar()
|
||||
self.parser = ParserWithRecovery(grammar, u(source))
|
||||
return self.parser.module
|
||||
grammar = load_grammar()
|
||||
|
||||
def parse(self, source, copies=0, parsers=0, expect_error_leaves=False):
|
||||
def initialize(self, code):
|
||||
debug.dbg('differ: initialize', color='YELLOW')
|
||||
self.lines = splitlines(code, keepends=True)
|
||||
parser_cache.pop(None, None)
|
||||
self.module = parse(code, diff_cache=True, cache=True)
|
||||
return self.module
|
||||
|
||||
def parse(self, code, copies=0, parsers=0, expect_error_leaves=False):
|
||||
debug.dbg('differ: parse copies=%s parsers=%s', copies, parsers, color='YELLOW')
|
||||
lines = splitlines(source, keepends=True)
|
||||
diff_parser = DiffParser(self.parser)
|
||||
new_module = diff_parser.update(lines)
|
||||
assert source == new_module.get_code()
|
||||
lines = splitlines(code, keepends=True)
|
||||
diff_parser = DiffParser(self.grammar, self.module)
|
||||
new_module = diff_parser.update(self.lines, lines)
|
||||
self.lines = lines
|
||||
assert code == new_module.get_code()
|
||||
assert diff_parser._copy_count == copies
|
||||
assert diff_parser._parser_count == parsers
|
||||
self.parser.module = new_module
|
||||
self.parser._parsed = new_module
|
||||
|
||||
assert expect_error_leaves == _check_error_leaves_nodes(new_module)
|
||||
_assert_valid_graph(new_module)
|
||||
@@ -423,6 +425,21 @@ def test_whitespace_at_end(differ):
|
||||
differ.parse(code + '\n', parsers=1, copies=1)
|
||||
|
||||
|
||||
def test_endless_while_loop(differ):
|
||||
"""
|
||||
This was a bug in Jedi #878.
|
||||
"""
|
||||
code = '#dead'
|
||||
differ.initialize(code)
|
||||
module = differ.parse(code, parsers=1)
|
||||
assert module.end_pos == (1, 5)
|
||||
|
||||
code = '#dead\n'
|
||||
differ.initialize(code)
|
||||
module = differ.parse(code + '\n', parsers=1)
|
||||
assert module.end_pos == (3, 0)
|
||||
|
||||
|
||||
def test_in_class_movements(differ):
|
||||
code1 = dedent("""\
|
||||
class PlaybookExecutor:
|
||||
@@ -448,3 +465,31 @@ def test_in_class_movements(differ):
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=1)
|
||||
|
||||
|
||||
def test_in_parentheses_newlines(differ):
|
||||
code1 = dedent("""
|
||||
x = str(
|
||||
True)
|
||||
|
||||
a = 1
|
||||
|
||||
def foo():
|
||||
pass
|
||||
|
||||
b = 2""")
|
||||
|
||||
code2 = dedent("""
|
||||
x = str(True)
|
||||
|
||||
a = 1
|
||||
|
||||
def foo():
|
||||
pass
|
||||
|
||||
b = 2""")
|
||||
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=1)
|
||||
differ.parse(code1, parsers=2, copies=1)
|
||||
|
||||
@@ -2,10 +2,9 @@ import difflib
|
||||
|
||||
import pytest
|
||||
|
||||
from jedi._compatibility import u
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser.python import parse
|
||||
|
||||
code_basic_features = u('''
|
||||
code_basic_features = '''
|
||||
"""A mod docstring"""
|
||||
|
||||
def a_function(a_argument, a_default = "default"):
|
||||
@@ -22,7 +21,7 @@ to""" + "huhu"
|
||||
return str(a_result)
|
||||
else
|
||||
return None
|
||||
''')
|
||||
'''
|
||||
|
||||
|
||||
def diff_code_assert(a, b, n=4):
|
||||
@@ -44,22 +43,22 @@ def diff_code_assert(a, b, n=4):
|
||||
def test_basic_parsing():
|
||||
"""Validate the parsing features"""
|
||||
|
||||
prs = ParserWithRecovery(load_grammar(), code_basic_features)
|
||||
m = parse(code_basic_features)
|
||||
diff_code_assert(
|
||||
code_basic_features,
|
||||
prs.module.get_code()
|
||||
m.get_code()
|
||||
)
|
||||
|
||||
|
||||
def test_operators():
|
||||
src = u('5 * 3')
|
||||
prs = ParserWithRecovery(load_grammar(), src)
|
||||
diff_code_assert(src, prs.module.get_code())
|
||||
src = '5 * 3'
|
||||
module = parse(src)
|
||||
diff_code_assert(src, module.get_code())
|
||||
|
||||
|
||||
def test_get_code():
|
||||
"""Use the same code that the parser also generates, to compare"""
|
||||
s = u('''"""a docstring"""
|
||||
s = '''"""a docstring"""
|
||||
class SomeClass(object, mixin):
|
||||
def __init__(self):
|
||||
self.xy = 3.0
|
||||
@@ -81,8 +80,8 @@ class WithDocstring:
|
||||
def method_with_docstring():
|
||||
"""class docstr"""
|
||||
pass
|
||||
''')
|
||||
assert ParserWithRecovery(load_grammar(), s).module.get_code() == s
|
||||
'''
|
||||
assert parse(s).get_code() == s
|
||||
|
||||
|
||||
def test_end_newlines():
|
||||
@@ -92,7 +91,7 @@ def test_end_newlines():
|
||||
line the parser needs.
|
||||
"""
|
||||
def test(source, end_pos):
|
||||
module = ParserWithRecovery(load_grammar(), u(source)).module
|
||||
module = parse(source)
|
||||
assert module.get_code() == source
|
||||
assert module.end_pos == end_pos
|
||||
|
||||
|
||||
@@ -10,9 +10,7 @@ from textwrap import dedent
|
||||
|
||||
import jedi
|
||||
from jedi._compatibility import u
|
||||
from jedi.parser import load_grammar
|
||||
from jedi.parser.diff import FastParser
|
||||
from jedi.parser.utils import save_parser
|
||||
from jedi.parser.python import parse
|
||||
|
||||
|
||||
def test_carriage_return_splitting():
|
||||
@@ -26,8 +24,8 @@ def test_carriage_return_splitting():
|
||||
pass
|
||||
'''))
|
||||
source = source.replace('\n', '\r\n')
|
||||
p = FastParser(load_grammar(), source)
|
||||
assert [n.value for lst in p.module.used_names.values() for n in lst] == ['Foo']
|
||||
module = parse(source)
|
||||
assert [n.value for lst in module.used_names.values() for n in lst] == ['Foo']
|
||||
|
||||
|
||||
def test_class_in_docstr():
|
||||
@@ -45,11 +43,10 @@ def check_p(src, number_parsers_used, number_of_splits=None, number_of_misses=0)
|
||||
if number_of_splits is None:
|
||||
number_of_splits = number_parsers_used
|
||||
|
||||
p = FastParser(load_grammar(), u(src))
|
||||
save_parser(None, p, pickling=False)
|
||||
module_node = parse(src)
|
||||
|
||||
assert src == p.module.get_code()
|
||||
return p.module
|
||||
assert src == module_node.get_code()
|
||||
return module_node
|
||||
|
||||
|
||||
def test_if():
|
||||
@@ -257,7 +254,7 @@ def test_string_literals():
|
||||
""")
|
||||
|
||||
script = jedi.Script(dedent(source))
|
||||
script._get_module().tree_node.end_pos == (6, 0)
|
||||
assert script._get_module().tree_node.end_pos == (6, 0)
|
||||
assert script.completions()
|
||||
|
||||
|
||||
@@ -279,13 +276,12 @@ def test_decorator_string_issue():
|
||||
|
||||
|
||||
def test_round_trip():
|
||||
source = dedent('''
|
||||
code = dedent('''
|
||||
def x():
|
||||
"""hahaha"""
|
||||
func''')
|
||||
|
||||
f = FastParser(load_grammar(), u(source))
|
||||
assert f.get_parsed_node().get_code() == source
|
||||
assert parse(code).get_code() == code
|
||||
|
||||
|
||||
def test_parentheses_in_string():
|
||||
|
||||
@@ -5,7 +5,7 @@ instead of simple parser objects.
|
||||
|
||||
from textwrap import dedent
|
||||
|
||||
from jedi.parser import Parser, load_grammar
|
||||
from jedi.parser.python import parse
|
||||
|
||||
|
||||
def assert_params(param_string, **wanted_dct):
|
||||
@@ -14,12 +14,12 @@ def assert_params(param_string, **wanted_dct):
|
||||
pass
|
||||
''') % param_string
|
||||
|
||||
parser = Parser(load_grammar(), dedent(source))
|
||||
funcdef = parser.get_parsed_node().subscopes[0]
|
||||
module = parse(source)
|
||||
funcdef = module.subscopes[0]
|
||||
dct = dict((p.name.value, p.default and p.default.get_code())
|
||||
for p in funcdef.params)
|
||||
assert dct == wanted_dct
|
||||
assert parser.get_parsed_node().get_code() == source
|
||||
assert module.get_code() == source
|
||||
|
||||
|
||||
def test_split_params_with_separation_star():
|
||||
|
||||
@@ -2,28 +2,31 @@
|
||||
import sys
|
||||
from textwrap import dedent
|
||||
|
||||
import pytest
|
||||
|
||||
import jedi
|
||||
from jedi._compatibility import u, is_py3
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser import tree as pt
|
||||
from jedi.parser.python import parse, load_grammar
|
||||
from jedi.parser.python import tree
|
||||
from jedi.common import splitlines
|
||||
|
||||
|
||||
def test_user_statement_on_import():
|
||||
"""github #285"""
|
||||
s = u("from datetime import (\n"
|
||||
" time)")
|
||||
s = "from datetime import (\n" \
|
||||
" time)"
|
||||
|
||||
for pos in [(2, 1), (2, 4)]:
|
||||
p = ParserWithRecovery(load_grammar(), s)
|
||||
stmt = p.module.get_statement_for_position(pos)
|
||||
assert isinstance(stmt, pt.Import)
|
||||
p = parse(s)
|
||||
stmt = p.get_statement_for_position(pos)
|
||||
assert isinstance(stmt, tree.Import)
|
||||
assert [str(n) for n in stmt.get_defined_names()] == ['time']
|
||||
|
||||
|
||||
class TestCallAndName():
|
||||
def get_call(self, source):
|
||||
# Get the simple_stmt and then the first one.
|
||||
simple_stmt = ParserWithRecovery(load_grammar(), u(source)).module.children[0]
|
||||
simple_stmt = parse(source).children[0]
|
||||
return simple_stmt.children[0]
|
||||
|
||||
def test_name_and_call_positions(self):
|
||||
@@ -40,25 +43,25 @@ class TestCallAndName():
|
||||
|
||||
def test_call_type(self):
|
||||
call = self.get_call('hello')
|
||||
assert isinstance(call, pt.Name)
|
||||
assert isinstance(call, tree.Name)
|
||||
|
||||
def test_literal_type(self):
|
||||
literal = self.get_call('1.0')
|
||||
assert isinstance(literal, pt.Literal)
|
||||
assert isinstance(literal, tree.Literal)
|
||||
assert type(literal.eval()) == float
|
||||
|
||||
literal = self.get_call('1')
|
||||
assert isinstance(literal, pt.Literal)
|
||||
assert isinstance(literal, tree.Literal)
|
||||
assert type(literal.eval()) == int
|
||||
|
||||
literal = self.get_call('"hello"')
|
||||
assert isinstance(literal, pt.Literal)
|
||||
assert isinstance(literal, tree.Literal)
|
||||
assert literal.eval() == 'hello'
|
||||
|
||||
|
||||
class TestSubscopes():
|
||||
def get_sub(self, source):
|
||||
return ParserWithRecovery(load_grammar(), u(source)).module.subscopes[0]
|
||||
return parse(source).subscopes[0]
|
||||
|
||||
def test_subscope_names(self):
|
||||
name = self.get_sub('class Foo: pass').name
|
||||
@@ -74,7 +77,7 @@ class TestSubscopes():
|
||||
|
||||
class TestImports():
|
||||
def get_import(self, source):
|
||||
return ParserWithRecovery(load_grammar(), source).module.imports[0]
|
||||
return parse(source).imports[0]
|
||||
|
||||
def test_import_names(self):
|
||||
imp = self.get_import(u('import math\n'))
|
||||
@@ -88,40 +91,26 @@ class TestImports():
|
||||
assert imp.end_pos == (1, len('import math'))
|
||||
|
||||
|
||||
def test_module():
|
||||
module = ParserWithRecovery(load_grammar(), u('asdf'), 'example.py').module
|
||||
name = module.name
|
||||
assert str(name) == 'example'
|
||||
assert name.start_pos == (1, 0)
|
||||
assert name.end_pos == (1, 7)
|
||||
|
||||
module = ParserWithRecovery(load_grammar(), u('asdf')).module
|
||||
name = module.name
|
||||
assert str(name) == ''
|
||||
assert name.start_pos == (1, 0)
|
||||
assert name.end_pos == (1, 0)
|
||||
|
||||
|
||||
def test_end_pos():
|
||||
s = u(dedent('''
|
||||
x = ['a', 'b', 'c']
|
||||
def func():
|
||||
y = None
|
||||
'''))
|
||||
parser = ParserWithRecovery(load_grammar(), s)
|
||||
scope = parser.module.subscopes[0]
|
||||
s = dedent('''
|
||||
x = ['a', 'b', 'c']
|
||||
def func():
|
||||
y = None
|
||||
''')
|
||||
parser = parse(s)
|
||||
scope = parser.subscopes[0]
|
||||
assert scope.start_pos == (3, 0)
|
||||
assert scope.end_pos == (5, 0)
|
||||
|
||||
|
||||
def test_carriage_return_statements():
|
||||
source = u(dedent('''
|
||||
source = dedent('''
|
||||
foo = 'ns1!'
|
||||
|
||||
# this is a namespace package
|
||||
'''))
|
||||
''')
|
||||
source = source.replace('\n', '\r\n')
|
||||
stmt = ParserWithRecovery(load_grammar(), source).module.statements[0]
|
||||
stmt = parse(source).statements[0]
|
||||
assert '#' not in stmt.get_code()
|
||||
|
||||
|
||||
@@ -129,7 +118,7 @@ def test_incomplete_list_comprehension():
|
||||
""" Shouldn't raise an error, same bug as #418. """
|
||||
# With the old parser this actually returned a statement. With the new
|
||||
# parser only valid statements generate one.
|
||||
assert ParserWithRecovery(load_grammar(), u('(1 for def')).module.statements == []
|
||||
assert parse('(1 for def').statements == []
|
||||
|
||||
|
||||
def test_hex_values_in_docstring():
|
||||
@@ -141,7 +130,7 @@ def test_hex_values_in_docstring():
|
||||
return 1
|
||||
'''
|
||||
|
||||
doc = ParserWithRecovery(load_grammar(), dedent(u(source))).module.subscopes[0].raw_doc
|
||||
doc = parse(source).subscopes[0].raw_doc
|
||||
if is_py3:
|
||||
assert doc == '\xff'
|
||||
else:
|
||||
@@ -160,7 +149,7 @@ def test_error_correction_with():
|
||||
|
||||
|
||||
def test_newline_positions():
|
||||
endmarker = ParserWithRecovery(load_grammar(), u('a\n')).module.children[-1]
|
||||
endmarker = parse('a\n').children[-1]
|
||||
assert endmarker.end_pos == (2, 0)
|
||||
new_line = endmarker.get_previous_leaf()
|
||||
assert new_line.start_pos == (1, 1)
|
||||
@@ -173,8 +162,8 @@ def test_end_pos_error_correction():
|
||||
grammar needs it. However, they are removed again. We still want the right
|
||||
end_pos, even if something breaks in the parser (error correction).
|
||||
"""
|
||||
s = u('def x():\n .')
|
||||
m = ParserWithRecovery(load_grammar(), s).module
|
||||
s = 'def x():\n .'
|
||||
m = parse(s)
|
||||
func = m.children[0]
|
||||
assert func.type == 'funcdef'
|
||||
assert func.end_pos == (2, 2)
|
||||
@@ -189,7 +178,7 @@ def test_param_splitting():
|
||||
def check(src, result):
|
||||
# Python 2 tuple params should be ignored for now.
|
||||
grammar = load_grammar('%s.%s' % sys.version_info[:2])
|
||||
m = ParserWithRecovery(grammar, u(src)).module
|
||||
m = parse(src, grammar=grammar)
|
||||
if is_py3:
|
||||
assert not m.subscopes
|
||||
else:
|
||||
@@ -203,24 +192,22 @@ def test_param_splitting():
|
||||
|
||||
|
||||
def test_unicode_string():
|
||||
s = pt.String(None, u('bö'), (0, 0))
|
||||
s = tree.String(None, u('bö'), (0, 0))
|
||||
assert repr(s) # Should not raise an Error!
|
||||
|
||||
|
||||
def test_backslash_dos_style():
|
||||
grammar = load_grammar()
|
||||
m = ParserWithRecovery(grammar, u('\\\r\n')).module
|
||||
assert m
|
||||
assert parse('\\\r\n')
|
||||
|
||||
|
||||
def test_started_lambda_stmt():
|
||||
p = ParserWithRecovery(load_grammar(), u'lambda a, b: a i')
|
||||
assert p.get_parsed_node().children[0].type == 'error_node'
|
||||
m = parse(u'lambda a, b: a i')
|
||||
assert m.children[0].type == 'error_node'
|
||||
|
||||
|
||||
def test_python2_octal():
|
||||
parser = ParserWithRecovery(load_grammar(), u'0660')
|
||||
first = parser.get_parsed_node().children[0]
|
||||
module = parse('0660')
|
||||
first = module.children[0]
|
||||
if is_py3:
|
||||
assert first.type == 'error_node'
|
||||
else:
|
||||
@@ -228,8 +215,7 @@ def test_python2_octal():
|
||||
|
||||
|
||||
def test_python3_octal():
|
||||
parser = ParserWithRecovery(load_grammar(), u'0o660')
|
||||
module = parser.get_parsed_node()
|
||||
module = parse('0o660')
|
||||
if is_py3:
|
||||
assert module.children[0].children[0].type == 'number'
|
||||
else:
|
||||
@@ -243,3 +229,15 @@ def test_load_newer_grammar():
|
||||
# The same is true for very old grammars (even though this is probably not
|
||||
# going to be an issue.
|
||||
load_grammar('1.5')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('code', ['foo "', 'foo """\n', 'foo """\nbar'])
|
||||
def test_open_string_literal(code):
|
||||
"""
|
||||
Testing mostly if removing the last newline works.
|
||||
"""
|
||||
lines = splitlines(code, keepends=True)
|
||||
end_pos = (len(lines), len(lines[-1]))
|
||||
module = parse(code)
|
||||
assert module.get_code() == code
|
||||
assert module.end_pos == end_pos == module.children[1].end_pos
|
||||
|
||||
@@ -5,8 +5,8 @@ from textwrap import dedent
|
||||
import pytest
|
||||
|
||||
from jedi._compatibility import u, unicode
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser import tree as pt
|
||||
from jedi.parser.python import parse
|
||||
from jedi.parser.python import tree
|
||||
|
||||
|
||||
class TestsFunctionAndLambdaParsing(object):
|
||||
@@ -27,21 +27,21 @@ class TestsFunctionAndLambdaParsing(object):
|
||||
|
||||
@pytest.fixture(params=FIXTURES)
|
||||
def node(self, request):
|
||||
parsed = ParserWithRecovery(load_grammar(), dedent(u(request.param[0])))
|
||||
parsed = parse(dedent(request.param[0]))
|
||||
request.keywords['expected'] = request.param[1]
|
||||
return parsed.module.subscopes[0]
|
||||
return parsed.subscopes[0]
|
||||
|
||||
@pytest.fixture()
|
||||
def expected(self, request, node):
|
||||
return request.keywords['expected']
|
||||
|
||||
|
||||
def test_name(self, node, expected):
|
||||
assert isinstance(node.name, pt.Name)
|
||||
assert isinstance(node.name, tree.Name)
|
||||
assert unicode(node.name) == u(expected['name'])
|
||||
|
||||
|
||||
def test_params(self, node, expected):
|
||||
assert isinstance(node.params, list)
|
||||
assert all(isinstance(x, pt.Param) for x in node.params)
|
||||
assert all(isinstance(x, tree.Param) for x in node.params)
|
||||
assert [unicode(x.name) for x in node.params] == [u(x) for x in expected['params']]
|
||||
|
||||
def test_is_generator(self, node, expected):
|
||||
|
||||
@@ -8,9 +8,9 @@ test_grammar.py files from both Python 2 and Python 3.
|
||||
|
||||
from textwrap import dedent
|
||||
|
||||
|
||||
from jedi._compatibility import unicode, is_py3
|
||||
from jedi.parser import Parser, load_grammar, ParseError
|
||||
from jedi._compatibility import is_py3
|
||||
from jedi.parser.python import parse as _parse, load_grammar
|
||||
from jedi.parser import ParserSyntaxError
|
||||
import pytest
|
||||
|
||||
from test.helpers import TestCase
|
||||
@@ -19,7 +19,7 @@ from test.helpers import TestCase
|
||||
def parse(code, version='3.4'):
|
||||
code = dedent(code) + "\n\n"
|
||||
grammar = load_grammar(version=version)
|
||||
return Parser(grammar, unicode(code), 'file_input').get_parsed_node()
|
||||
return _parse(code, grammar=grammar, error_recovery=False)
|
||||
|
||||
|
||||
class TestDriver(TestCase):
|
||||
@@ -37,7 +37,7 @@ class GrammarTest(TestCase):
|
||||
def invalid_syntax(self, code, **kwargs):
|
||||
try:
|
||||
parse(code, **kwargs)
|
||||
except ParseError:
|
||||
except ParserSyntaxError:
|
||||
pass
|
||||
else:
|
||||
raise AssertionError("Syntax shouldn't have been valid")
|
||||
|
||||
@@ -1,42 +1,43 @@
|
||||
# -*- coding: utf-8 # This file contains Unicode characters.
|
||||
|
||||
from io import StringIO
|
||||
from textwrap import dedent
|
||||
|
||||
from jedi._compatibility import u, is_py3, py_version
|
||||
from jedi.parser.token import NAME, OP, NEWLINE, STRING, INDENT
|
||||
from jedi.parser import ParserWithRecovery, load_grammar, tokenize
|
||||
from jedi._compatibility import is_py3, py_version
|
||||
from jedi.parser.token import NAME, OP, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser.python import parse
|
||||
from jedi.common import splitlines
|
||||
from jedi.parser.tokenize import TokenInfo
|
||||
|
||||
|
||||
from ..helpers import unittest
|
||||
|
||||
def _get_token_list(string):
|
||||
io = StringIO(u(string))
|
||||
return list(tokenize.generate_tokens(io.readline))
|
||||
return list(tokenize.source_tokens(string))
|
||||
|
||||
|
||||
class TokenTest(unittest.TestCase):
|
||||
def test_end_pos_one_line(self):
|
||||
parsed = ParserWithRecovery(load_grammar(), dedent(u('''
|
||||
parsed = parse(dedent('''
|
||||
def testit():
|
||||
a = "huhu"
|
||||
''')))
|
||||
tok = parsed.module.subscopes[0].statements[0].children[2]
|
||||
'''))
|
||||
tok = parsed.subscopes[0].statements[0].children[2]
|
||||
assert tok.end_pos == (3, 14)
|
||||
|
||||
def test_end_pos_multi_line(self):
|
||||
parsed = ParserWithRecovery(load_grammar(), dedent(u('''
|
||||
parsed = parse(dedent('''
|
||||
def testit():
|
||||
a = """huhu
|
||||
asdfasdf""" + "h"
|
||||
''')))
|
||||
tok = parsed.module.subscopes[0].statements[0].children[2].children[0]
|
||||
'''))
|
||||
tok = parsed.subscopes[0].statements[0].children[2].children[0]
|
||||
assert tok.end_pos == (4, 11)
|
||||
|
||||
def test_simple_no_whitespace(self):
|
||||
# Test a simple one line string, no preceding whitespace
|
||||
simple_docstring = u('"""simple one line docstring"""')
|
||||
simple_docstring_io = StringIO(simple_docstring)
|
||||
tokens = tokenize.generate_tokens(simple_docstring_io.readline)
|
||||
simple_docstring = '"""simple one line docstring"""'
|
||||
tokens = tokenize.source_tokens(simple_docstring)
|
||||
token_list = list(tokens)
|
||||
_, value, _, prefix = token_list[0]
|
||||
assert prefix == ''
|
||||
@@ -44,9 +45,8 @@ class TokenTest(unittest.TestCase):
|
||||
|
||||
def test_simple_with_whitespace(self):
|
||||
# Test a simple one line string with preceding whitespace and newline
|
||||
simple_docstring = u(' """simple one line docstring""" \r\n')
|
||||
simple_docstring_io = StringIO(simple_docstring)
|
||||
tokens = tokenize.generate_tokens(simple_docstring_io.readline)
|
||||
simple_docstring = ' """simple one line docstring""" \r\n'
|
||||
tokens = tokenize.source_tokens(simple_docstring)
|
||||
token_list = list(tokens)
|
||||
assert token_list[0][0] == INDENT
|
||||
typ, value, start_pos, prefix = token_list[1]
|
||||
@@ -59,14 +59,13 @@ class TokenTest(unittest.TestCase):
|
||||
|
||||
def test_function_whitespace(self):
|
||||
# Test function definition whitespace identification
|
||||
fundef = dedent(u('''
|
||||
fundef = dedent('''
|
||||
def test_whitespace(*args, **kwargs):
|
||||
x = 1
|
||||
if x > 0:
|
||||
print(True)
|
||||
'''))
|
||||
fundef_io = StringIO(fundef)
|
||||
tokens = tokenize.generate_tokens(fundef_io.readline)
|
||||
''')
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
for _, value, _, prefix in token_list:
|
||||
if value == 'test_whitespace':
|
||||
@@ -82,13 +81,39 @@ class TokenTest(unittest.TestCase):
|
||||
if value == 'if':
|
||||
assert prefix == ' '
|
||||
|
||||
def test_tokenize_multiline_I(self):
|
||||
# Make sure multiline string having newlines have the end marker on the
|
||||
# next line
|
||||
fundef = '''""""\n'''
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n', (1, 0), ''),
|
||||
TokenInfo(ENDMARKER , '', (2, 0), '')]
|
||||
|
||||
def test_tokenize_multiline_II(self):
|
||||
# Make sure multiline string having no newlines have the end marker on
|
||||
# same line
|
||||
fundef = '''""""'''
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""', (1, 0), ''),
|
||||
TokenInfo(ENDMARKER, '', (1, 4), '')]
|
||||
|
||||
def test_tokenize_multiline_III(self):
|
||||
# Make sure multiline string having newlines have the end marker on the
|
||||
# next line even if several newline
|
||||
fundef = '''""""\n\n'''
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n\n', (1, 0), ''),
|
||||
TokenInfo(ENDMARKER, '', (3, 0), '')]
|
||||
|
||||
def test_identifier_contains_unicode(self):
|
||||
fundef = dedent(u('''
|
||||
fundef = dedent('''
|
||||
def 我あφ():
|
||||
pass
|
||||
'''))
|
||||
fundef_io = StringIO(fundef)
|
||||
tokens = tokenize.generate_tokens(fundef_io.readline)
|
||||
''')
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
unicode_token = token_list[1]
|
||||
if is_py3:
|
||||
@@ -109,8 +134,8 @@ class TokenTest(unittest.TestCase):
|
||||
]
|
||||
|
||||
for s in string_tokens:
|
||||
parsed = ParserWithRecovery(load_grammar(), u('''a = %s\n''' % s))
|
||||
simple_stmt = parsed.module.children[0]
|
||||
module = parse('''a = %s\n''' % s)
|
||||
simple_stmt = module.children[0]
|
||||
expr_stmt = simple_stmt.children[0]
|
||||
assert len(expr_stmt.children) == 3
|
||||
string_tok = expr_stmt.children[2]
|
||||
@@ -173,3 +198,17 @@ def test_error_literal():
|
||||
assert error_token.string == '"""'
|
||||
assert endmarker.type == tokenize.ENDMARKER
|
||||
assert endmarker.prefix == ''
|
||||
|
||||
|
||||
def test_endmarker_end_pos():
|
||||
def check(code):
|
||||
tokens = _get_token_list(code)
|
||||
lines = splitlines(code)
|
||||
assert tokens[-1].end_pos == (len(lines), len(lines[-1]))
|
||||
|
||||
check('#c')
|
||||
check('#c\n')
|
||||
check('a\n')
|
||||
check('a')
|
||||
check(r'a\\n')
|
||||
check('a\\')
|
||||
|
||||
@@ -7,16 +7,14 @@ import os
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
from .helpers import TestCase, cwd_at
|
||||
|
||||
import pytest
|
||||
import jedi
|
||||
from jedi._compatibility import u
|
||||
|
||||
from jedi import Script
|
||||
from jedi import api
|
||||
from jedi import common
|
||||
from jedi.evaluate import imports
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser.python import parse
|
||||
from .helpers import TestCase, cwd_at
|
||||
|
||||
#jedi.set_debug_function()
|
||||
|
||||
@@ -102,9 +100,9 @@ class TestRegression(TestCase):
|
||||
|
||||
def test_end_pos_line(self):
|
||||
# jedi issue #150
|
||||
s = u("x()\nx( )\nx( )\nx ( )")
|
||||
parser = ParserWithRecovery(load_grammar(), s)
|
||||
for i, s in enumerate(parser.module.statements):
|
||||
s = "x()\nx( )\nx( )\nx ( )"
|
||||
module = parse(s)
|
||||
for i, s in enumerate(module.statements):
|
||||
assert s.end_pos == (i + 1, i + 3)
|
||||
|
||||
def check_definition_by_marker(self, source, after_cursor, names):
|
||||
@@ -125,7 +123,6 @@ class TestRegression(TestCase):
|
||||
break
|
||||
column = len(line) - len(after_cursor)
|
||||
defs = Script(source, i + 1, column).goto_definitions()
|
||||
print(defs)
|
||||
assert [d.name for d in defs] == names
|
||||
|
||||
def test_backslash_continuation(self):
|
||||
|
||||
@@ -24,8 +24,8 @@ class TestSpeed(TestCase):
|
||||
for i in range(number):
|
||||
func(self)
|
||||
single_time = (time.time() - first) / number
|
||||
print('\nspeed', func, single_time)
|
||||
assert single_time < time_per_run
|
||||
message = 'speed issue %s, %s' % (func, single_time)
|
||||
assert single_time < time_per_run, message
|
||||
return wrapper
|
||||
return decorated
|
||||
|
||||
|
||||
Reference in New Issue
Block a user