1
0
forked from VimPlug/jedi
This commit is contained in:
David Halter
2013-02-06 13:00:23 +01:00
parent 8cf783f2c3
commit 69137a48f0
6 changed files with 26 additions and 19 deletions

View File

@@ -205,7 +205,8 @@ class Completion(BaseDefinition):
@property @property
def word(self): def word(self):
""" """
Similar to :meth:`Completion.complete`, but return the whole word, e.g. :: Similar to :meth:`Completion.complete`, but return the whole word, for
example::
>>> isinstan >>> isinstan
@@ -235,11 +236,11 @@ class Completion(BaseDefinition):
def follow_definition(self): def follow_definition(self):
""" """
Return the original definitions. I strongly recommend not using it for Return the original definitions. I strongly recommend not using it for
your completions, because it might slow down |jedi|. If you want to read your completions, because it might slow down |jedi|. If you want to
only a few objects (<=20), it might be useful, especially to read only a few objects (<=20), it might be useful, especially to get
get the original docstrings. The basic problem of this function is the original docstrings. The basic problem of this function is that it
that it follows all results. This means with 1000 completions (e.g. follows all results. This means with 1000 completions (e.g. numpy),
numpy), it's just PITA-slow. it's just PITA-slow.
""" """
if self._followed_definitions is None: if self._followed_definitions is None:
if self.definition.isinstance(pr.Statement): if self.definition.isinstance(pr.Statement):

View File

@@ -18,17 +18,18 @@ import evaluate_representation as er
import parsing import parsing
DOCSTRING_PARAM_PATTERNS = [ DOCSTRING_PARAM_PATTERNS = [
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
r'\s*@type\s+%s:\s*([^\n]+)', # Epidoc r'\s*@type\s+%s:\s*([^\n]+)', # Epidoc
] ]
DOCSTRING_RETURN_PATTERNS = [ DOCSTRING_RETURN_PATTERNS = [
re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epidoc re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epidoc
] ]
REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
#@cache.memoize_default() # TODO add #@cache.memoize_default() # TODO add
def follow_param(param): def follow_param(param):
func = param.parent_function func = param.parent_function
@@ -68,7 +69,8 @@ def search_param_in_docstr(docstr, param_str):
""" """
# look at #40 to see definitions of those params # look at #40 to see definitions of those params
patterns = [ re.compile(p % re.escape(param_str)) for p in DOCSTRING_PARAM_PATTERNS ] patterns = [re.compile(p % re.escape(param_str))
for p in DOCSTRING_PARAM_PATTERNS]
for pattern in patterns: for pattern in patterns:
match = pattern.search(docstr) match = pattern.search(docstr)
if match: if match:
@@ -114,6 +116,7 @@ def find_return_types(func):
p.user_stmt.parent = func p.user_stmt.parent = func
return list(evaluate.follow_statement(p.user_stmt)) return list(evaluate.follow_statement(p.user_stmt))
def search_return_in_docstr(code): def search_return_in_docstr(code):
for p in DOCSTRING_RETURN_PATTERNS: for p in DOCSTRING_RETURN_PATTERNS:
match = p.search(code) match = p.search(code)

View File

@@ -16,7 +16,7 @@ import cache
class Module(pr.Simple, pr.Module): class Module(pr.Simple, pr.Module):
def __init__(self, parsers): def __init__(self, parsers):
self._end_pos = None, None self._end_pos = None, None
super(Module, self).__init__(self, (1,0)) super(Module, self).__init__(self, (1, 0))
self.parsers = parsers self.parsers = parsers
self.reset_caches() self.reset_caches()
self.line_offset = 0 self.line_offset = 0

View File

@@ -373,13 +373,15 @@ def source_to_unicode(source, encoding=None):
http://docs.python.org/2/reference/lexical_analysis.html#encoding-\ http://docs.python.org/2/reference/lexical_analysis.html#encoding-\
declarations declarations
""" """
byte_mark = '\xef\xbb\xbf' if is_py25 else literal_eval(r"b'\xef\xbb\xbf'") byte_mark = '\xef\xbb\xbf' if is_py25 else \
literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark): if source.startswith(byte_mark):
# UTF-8 byte-order mark # UTF-8 byte-order mark
return 'utf-8' return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0) first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding: if possible_encoding:
return possible_encoding.group(1) return possible_encoding.group(1)
else: else:

View File

@@ -65,6 +65,7 @@ def _rename(names, replace_str):
""" For both rename and inline. """ """ For both rename and inline. """
order = sorted(names, key=lambda x: (x.module_path, x.start_pos), order = sorted(names, key=lambda x: (x.module_path, x.start_pos),
reverse=True) reverse=True)
def process(path, old_lines, new_lines): def process(path, old_lines, new_lines):
if new_lines is not None: # goto next file, save last if new_lines is not None: # goto next file, save last
dct[path] = path, old_lines, new_lines dct[path] = path, old_lines, new_lines
@@ -130,7 +131,6 @@ def extract(script, new_name):
end_line = new_lines[end_pos[0] - 1] end_line = new_lines[end_pos[0] - 1]
text += '\n' + end_line[:end_pos[1]] text += '\n' + end_line[:end_pos[1]]
# remove code from new lines # remove code from new lines
t = text.lstrip() t = text.lstrip()
del_start = start_pos[1] + len(text) - len(t) del_start = start_pos[1] + len(text) - len(t)
@@ -144,7 +144,7 @@ def extract(script, new_name):
e = e - del_end e = e - del_end
start_line = start_line[:del_start] + new_name + start_line[e:] start_line = start_line[:del_start] + new_name + start_line[e:]
new_lines[start_pos[0] - 1] = start_line new_lines[start_pos[0] - 1] = start_line
new_lines[start_pos[0]:end_pos[0]-1] = [] new_lines[start_pos[0]:end_pos[0] - 1] = []
# add parentheses in multiline case # add parentheses in multiline case
open_brackets = ['(', '[', '{'] open_brackets = ['(', '[', '{']
@@ -194,7 +194,6 @@ def inline(script):
if len(stmt.set_vars) == 1: if len(stmt.set_vars) == 1:
line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:]
dct = _rename(inlines, replace_str) dct = _rename(inlines, replace_str)
# remove the empty line # remove the empty line
new_lines = dct[script.source_path][2] new_lines = dct[script.source_path][2]

View File

@@ -124,11 +124,13 @@ Use filesystem cache to save once parsed files with pickle.
""" """
if platform.system().lower() == 'windows': if platform.system().lower() == 'windows':
_cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi', 'Jedi') _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi',
'Jedi')
elif platform.system().lower() == 'darwin': elif platform.system().lower() == 'darwin':
_cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi') _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi')
else: else:
_cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'jedi') _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache',
'jedi')
cache_directory = os.path.expanduser(_cache_directory) cache_directory = os.path.expanduser(_cache_directory)
""" """
The path where all the caches can be found. The path where all the caches can be found.