mirror of
https://github.com/davidhalter/jedi.git
synced 2026-02-28 17:22:19 +08:00
Merge branch 'pep484' into linter
This commit is contained in:
@@ -322,13 +322,8 @@ class Script(object):
|
||||
|
||||
@memoize_default()
|
||||
def _get_under_cursor_stmt(self, cursor_txt, start_pos=None):
|
||||
tokenizer = source_tokens(cursor_txt)
|
||||
r = Parser(self._grammar, cursor_txt, tokenizer=tokenizer)
|
||||
try:
|
||||
# Take the last statement available that is not an endmarker.
|
||||
# And because it's a simple_stmt, we need to get the first child.
|
||||
stmt = r.module.children[-2].children[0]
|
||||
except (AttributeError, IndexError):
|
||||
stmt = Parser(self._grammar, cursor_txt, 'eval_input').get_parsed_node()
|
||||
if stmt is None:
|
||||
return None
|
||||
|
||||
user_stmt = self._parser.user_stmt()
|
||||
|
||||
@@ -305,6 +305,8 @@ class Evaluator(object):
|
||||
types = set(chain.from_iterable(self.find_types(typ, next_name)
|
||||
for typ in types))
|
||||
types = types
|
||||
elif element.type == 'eval_input':
|
||||
types = self._eval_element_not_cached(element.children[0])
|
||||
else:
|
||||
types = precedence.calculate_children(self, element.children)
|
||||
debug.dbg('eval_element result %s', types)
|
||||
|
||||
@@ -8,7 +8,7 @@ import os
|
||||
import inspect
|
||||
|
||||
from jedi._compatibility import is_py3, builtins, unicode
|
||||
from jedi.parser import Parser, load_grammar
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.parser import tree as pt
|
||||
from jedi.evaluate.helpers import FakeName
|
||||
|
||||
@@ -31,7 +31,7 @@ def _load_faked_module(module):
|
||||
modules[module_name] = None
|
||||
return
|
||||
grammar = load_grammar('grammar3.4')
|
||||
module = Parser(grammar, unicode(source), module_name).module
|
||||
module = ParserWithRecovery(grammar, unicode(source), module_name).module
|
||||
modules[module_name] = module
|
||||
|
||||
if module_name == 'builtins' and not is_py3:
|
||||
@@ -68,7 +68,11 @@ def get_module(obj):
|
||||
# Happens for example in `(_ for _ in []).send.__module__`.
|
||||
return builtins
|
||||
else:
|
||||
return __import__(imp_plz)
|
||||
try:
|
||||
return __import__(imp_plz)
|
||||
except ImportError:
|
||||
# __module__ can be something arbitrary that doesn't exist.
|
||||
return builtins
|
||||
|
||||
|
||||
def _faked(module, obj, name):
|
||||
|
||||
@@ -20,7 +20,7 @@ from itertools import chain
|
||||
from textwrap import dedent
|
||||
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
from jedi.parser import Parser, load_grammar
|
||||
from jedi.parser import ParserWithRecovery, load_grammar
|
||||
from jedi.common import indent_block
|
||||
from jedi.evaluate.iterable import Array, FakeSequence, AlreadyEvaluated
|
||||
|
||||
@@ -130,7 +130,7 @@ def _evaluate_for_statement_string(evaluator, string, module):
|
||||
# Take the default grammar here, if we load the Python 2.7 grammar here, it
|
||||
# will be impossible to use `...` (Ellipsis) as a token. Docstring types
|
||||
# don't need to conform with the current grammar.
|
||||
p = Parser(load_grammar(), code % indent_block(string))
|
||||
p = ParserWithRecovery(load_grammar(), code % indent_block(string))
|
||||
try:
|
||||
pseudo_cls = p.module.subscopes[0]
|
||||
# First pick suite, then simple_stmt (-2 for DEDENT) and then the node,
|
||||
|
||||
@@ -23,6 +23,7 @@ from jedi.evaluate import representation as er
|
||||
from jedi.evaluate import dynamic
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import docstrings
|
||||
from jedi.evaluate import pep0484
|
||||
from jedi.evaluate import iterable
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate import analysis
|
||||
@@ -386,10 +387,11 @@ def _eval_param(evaluator, param, scope):
|
||||
and func.instance.is_generated and str(func.name) == '__init__':
|
||||
param = func.var.params[param.position_nr]
|
||||
|
||||
# Add docstring knowledge.
|
||||
# Add pep0484 and docstring knowledge.
|
||||
pep0484_hints = pep0484.follow_param(evaluator, param)
|
||||
doc_params = docstrings.follow_param(evaluator, param)
|
||||
if doc_params:
|
||||
return doc_params
|
||||
if pep0484_hints or doc_params:
|
||||
return list(set(pep0484_hints) | set(doc_params))
|
||||
|
||||
if isinstance(param, ExecutedParam):
|
||||
return res_new | param.eval(evaluator)
|
||||
@@ -485,8 +487,8 @@ def global_names_dict_generator(evaluator, scope, position):
|
||||
the current scope is function:
|
||||
|
||||
>>> from jedi._compatibility import u, no_unicode_pprint
|
||||
>>> from jedi.parser import Parser, load_grammar
|
||||
>>> parser = Parser(load_grammar(), u('''
|
||||
>>> from jedi.parser import ParserWithRecovery, load_grammar
|
||||
>>> parser = ParserWithRecovery(load_grammar(), u('''
|
||||
... x = ['a', 'b', 'c']
|
||||
... def func():
|
||||
... y = None
|
||||
|
||||
59
jedi/evaluate/pep0484.py
Normal file
59
jedi/evaluate/pep0484.py
Normal file
@@ -0,0 +1,59 @@
|
||||
"""
|
||||
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
|
||||
through function annotations. There is a strong suggestion in this document
|
||||
that only the type of type hinting defined in PEP0484 should be allowed
|
||||
as annotations in future python versions.
|
||||
|
||||
The (initial / probably incomplete) implementation todo list for pep-0484:
|
||||
v Function parameter annotations with builtin/custom type classes
|
||||
v Function returntype annotations with builtin/custom type classes
|
||||
v Function parameter annotations with strings (forward reference)
|
||||
v Function return type annotations with strings (forward reference)
|
||||
x Local variable type hints
|
||||
v Assigned types: `Url = str\ndef get(url:Url) -> str:`
|
||||
x Type hints in `with` statements
|
||||
x Stub files support
|
||||
x support `@no_type_check` and `@no_type_check_decorator`
|
||||
x support for type hint comments `# type: (int, str) -> int`. See comment from
|
||||
Guido https://github.com/davidhalter/jedi/issues/662
|
||||
"""
|
||||
|
||||
from itertools import chain
|
||||
|
||||
from jedi.parser import Parser, load_grammar
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
from jedi.evaluate.compiled import CompiledObject
|
||||
from jedi import debug
|
||||
|
||||
|
||||
def _evaluate_for_annotation(evaluator, annotation):
|
||||
if annotation is not None:
|
||||
definitions = set()
|
||||
for definition in evaluator.eval_element(annotation):
|
||||
if (isinstance(definition, CompiledObject) and
|
||||
isinstance(definition.obj, str)):
|
||||
p = Parser(load_grammar(), definition.obj, start='eval_input')
|
||||
element = p.get_parsed_node()
|
||||
if element is None:
|
||||
debug.warning('Annotation not parsed: %s' % definition.obj)
|
||||
else:
|
||||
element.parent = annotation.parent
|
||||
definitions |= evaluator.eval_element(element)
|
||||
else:
|
||||
definitions.add(definition)
|
||||
return list(chain.from_iterable(
|
||||
evaluator.execute(d) for d in definitions))
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
@memoize_default(None, evaluator_is_first_arg=True)
|
||||
def follow_param(evaluator, param):
|
||||
annotation = param.annotation()
|
||||
return _evaluate_for_annotation(evaluator, annotation)
|
||||
|
||||
|
||||
@memoize_default(None, evaluator_is_first_arg=True)
|
||||
def find_return_types(evaluator, func):
|
||||
annotation = func.py__annotations__().get("return", None)
|
||||
return _evaluate_for_annotation(evaluator, annotation)
|
||||
@@ -49,6 +49,7 @@ from jedi.evaluate import compiled
|
||||
from jedi.evaluate import recursion
|
||||
from jedi.evaluate import iterable
|
||||
from jedi.evaluate import docstrings
|
||||
from jedi.evaluate import pep0484
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate import param
|
||||
from jedi.evaluate import flow_analysis
|
||||
@@ -583,6 +584,20 @@ class Function(use_metaclass(CachedMetaClass, Wrapper)):
|
||||
else:
|
||||
return FunctionExecution(self._evaluator, self, params).get_return_types()
|
||||
|
||||
@memoize_default()
|
||||
def py__annotations__(self):
|
||||
parser_func = self.base
|
||||
return_annotation = parser_func.annotation()
|
||||
if return_annotation:
|
||||
dct = {'return': return_annotation}
|
||||
else:
|
||||
dct = {}
|
||||
for function_param in parser_func.params:
|
||||
param_annotation = function_param.annotation()
|
||||
if param_annotation is not None:
|
||||
dct[function_param.name.value] = param_annotation
|
||||
return dct
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.get_special_object(self._evaluator, 'FUNCTION_CLASS')
|
||||
|
||||
@@ -642,6 +657,7 @@ class FunctionExecution(Executed):
|
||||
else:
|
||||
returns = self.returns
|
||||
types = set(docstrings.find_return_types(self._evaluator, func))
|
||||
types |= set(pep0484.find_return_types(self._evaluator, func))
|
||||
|
||||
for r in returns:
|
||||
check = flow_analysis.break_check(self._evaluator, self, r)
|
||||
|
||||
@@ -14,7 +14,7 @@ from jedi.common import unite
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import representation as er
|
||||
from jedi.evaluate import iterable
|
||||
from jedi.parser import Parser
|
||||
from jedi.parser import ParserWithRecovery
|
||||
from jedi.parser import tree
|
||||
from jedi import debug
|
||||
from jedi.evaluate import precedence
|
||||
@@ -243,7 +243,7 @@ def collections_namedtuple(evaluator, obj, arguments):
|
||||
)
|
||||
|
||||
# Parse source
|
||||
generated_class = Parser(evaluator.grammar, unicode(source)).module.subscopes[0]
|
||||
generated_class = ParserWithRecovery(evaluator.grammar, unicode(source)).module.subscopes[0]
|
||||
return set([er.Class(evaluator, generated_class)])
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ from jedi.evaluate.site import addsitedir
|
||||
|
||||
from jedi._compatibility import exec_function, unicode
|
||||
from jedi.parser import tree
|
||||
from jedi.parser import Parser
|
||||
from jedi.parser import ParserWithRecovery
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
from jedi import debug
|
||||
from jedi import common
|
||||
@@ -209,7 +209,7 @@ def _get_paths_from_buildout_script(evaluator, buildout_script):
|
||||
debug.dbg('Error trying to read buildout_script: %s', buildout_script)
|
||||
return
|
||||
|
||||
p = Parser(evaluator.grammar, source, buildout_script)
|
||||
p = ParserWithRecovery(evaluator.grammar, source, buildout_script)
|
||||
cache.save_parser(buildout_script, p)
|
||||
return p.module
|
||||
|
||||
|
||||
@@ -81,92 +81,89 @@ class ParserSyntaxError(object):
|
||||
|
||||
|
||||
class Parser(object):
|
||||
"""
|
||||
This class is used to parse a Python file, it then divides them into a
|
||||
class structure of different scopes.
|
||||
AST_MAPPING = {
|
||||
'expr_stmt': pt.ExprStmt,
|
||||
'classdef': pt.Class,
|
||||
'funcdef': pt.Function,
|
||||
'file_input': pt.Module,
|
||||
'import_name': pt.ImportName,
|
||||
'import_from': pt.ImportFrom,
|
||||
'break_stmt': pt.KeywordStatement,
|
||||
'continue_stmt': pt.KeywordStatement,
|
||||
'return_stmt': pt.ReturnStmt,
|
||||
'raise_stmt': pt.KeywordStatement,
|
||||
'yield_expr': pt.YieldExpr,
|
||||
'del_stmt': pt.KeywordStatement,
|
||||
'pass_stmt': pt.KeywordStatement,
|
||||
'global_stmt': pt.GlobalStmt,
|
||||
'nonlocal_stmt': pt.KeywordStatement,
|
||||
'print_stmt': pt.KeywordStatement,
|
||||
'assert_stmt': pt.AssertStmt,
|
||||
'if_stmt': pt.IfStmt,
|
||||
'with_stmt': pt.WithStmt,
|
||||
'for_stmt': pt.ForStmt,
|
||||
'while_stmt': pt.WhileStmt,
|
||||
'try_stmt': pt.TryStmt,
|
||||
'comp_for': pt.CompFor,
|
||||
'decorator': pt.Decorator,
|
||||
'lambdef': pt.Lambda,
|
||||
'old_lambdef': pt.Lambda,
|
||||
'lambdef_nocond': pt.Lambda,
|
||||
}
|
||||
|
||||
:param grammar: The grammar object of pgen2. Loaded by load_grammar.
|
||||
:param source: The codebase for the parser. Must be unicode.
|
||||
:param module_path: The path of the module in the file system, may be None.
|
||||
:type module_path: str
|
||||
:param top_module: Use this module as a parent instead of `self.module`.
|
||||
"""
|
||||
def __init__(self, grammar, source, module_path=None, tokenizer=None):
|
||||
self._ast_mapping = {
|
||||
'expr_stmt': pt.ExprStmt,
|
||||
'classdef': pt.Class,
|
||||
'funcdef': pt.Function,
|
||||
'file_input': pt.Module,
|
||||
'import_name': pt.ImportName,
|
||||
'import_from': pt.ImportFrom,
|
||||
'break_stmt': pt.KeywordStatement,
|
||||
'continue_stmt': pt.KeywordStatement,
|
||||
'return_stmt': pt.ReturnStmt,
|
||||
'raise_stmt': pt.KeywordStatement,
|
||||
'yield_expr': pt.YieldExpr,
|
||||
'del_stmt': pt.KeywordStatement,
|
||||
'pass_stmt': pt.KeywordStatement,
|
||||
'global_stmt': pt.GlobalStmt,
|
||||
'nonlocal_stmt': pt.KeywordStatement,
|
||||
'print_stmt': pt.KeywordStatement,
|
||||
'assert_stmt': pt.AssertStmt,
|
||||
'if_stmt': pt.IfStmt,
|
||||
'with_stmt': pt.WithStmt,
|
||||
'for_stmt': pt.ForStmt,
|
||||
'while_stmt': pt.WhileStmt,
|
||||
'try_stmt': pt.TryStmt,
|
||||
'comp_for': pt.CompFor,
|
||||
'decorator': pt.Decorator,
|
||||
'lambdef': pt.Lambda,
|
||||
'old_lambdef': pt.Lambda,
|
||||
'lambdef_nocond': pt.Lambda,
|
||||
}
|
||||
class ParserError(Exception):
|
||||
pass
|
||||
|
||||
self.syntax_errors = []
|
||||
def __init__(self, grammar, source, start, tokenizer=None):
|
||||
start_number = grammar.symbol2number[start]
|
||||
|
||||
self._global_names = []
|
||||
self._omit_dedent_list = []
|
||||
self._indent_counter = 0
|
||||
self._last_failed_start_pos = (0, 0)
|
||||
|
||||
# TODO do print absolute import detection here.
|
||||
#try:
|
||||
# del python_grammar_no_print_statement.keywords["print"]
|
||||
#except KeyError:
|
||||
# pass # Doesn't exist in the Python 3 grammar.
|
||||
|
||||
#if self.options["print_function"]:
|
||||
# python_grammar = pygram.python_grammar_no_print_statement
|
||||
#else:
|
||||
self._used_names = {}
|
||||
self._scope_names_stack = [{}]
|
||||
self._error_statement_stacks = []
|
||||
|
||||
added_newline = False
|
||||
# The Python grammar needs a newline at the end of each statement.
|
||||
if not source.endswith('\n'):
|
||||
source += '\n'
|
||||
added_newline = True
|
||||
self._last_failed_start_pos = (0, 0)
|
||||
self._global_names = []
|
||||
|
||||
# For the fast parser.
|
||||
self.position_modifier = pt.PositionModifier()
|
||||
p = PgenParser(grammar, self.convert_node, self.convert_leaf,
|
||||
self.error_recovery)
|
||||
tokenizer = tokenizer or tokenize.source_tokens(source)
|
||||
self.module = p.parse(self._tokenize(tokenizer))
|
||||
if self.module.type != 'file_input':
|
||||
# If there's only one statement, we get back a non-module. That's
|
||||
# not what we want, we want a module, so we add it here:
|
||||
self.module = self.convert_node(grammar,
|
||||
grammar.symbol2number['file_input'],
|
||||
[self.module])
|
||||
|
||||
if added_newline:
|
||||
self.remove_last_newline()
|
||||
self.module.used_names = self._used_names
|
||||
self.module.path = module_path
|
||||
self.module.global_names = self._global_names
|
||||
self.module.error_statement_stacks = self._error_statement_stacks
|
||||
added_newline = False
|
||||
# The Python grammar needs a newline at the end of each statement.
|
||||
if not source.endswith('\n') and start == 'file_input':
|
||||
source += '\n'
|
||||
added_newline = True
|
||||
|
||||
p = PgenParser(grammar, self.convert_node, self.convert_leaf,
|
||||
self.error_recovery, start_number)
|
||||
if tokenizer is None:
|
||||
tokenizer = tokenize.source_tokens(source)
|
||||
try:
|
||||
self._parsed = p.parse(self._tokenize(tokenizer))
|
||||
except Parser.ParserError:
|
||||
self._parsed = None
|
||||
else:
|
||||
if start == 'file_input' != self._parsed.type:
|
||||
# If there's only one statement, we get back a non-module. That's
|
||||
# not what we want, we want a module, so we add it here:
|
||||
self._parsed = self.convert_node(grammar,
|
||||
grammar.symbol2number['file_input'],
|
||||
[self._parsed])
|
||||
|
||||
if added_newline:
|
||||
self.remove_last_newline()
|
||||
|
||||
def get_parsed_node(self):
|
||||
return self._parsed
|
||||
|
||||
def _tokenize(self, tokenizer):
|
||||
for typ, value, start_pos, prefix in tokenizer:
|
||||
if typ == ERRORTOKEN:
|
||||
raise Parser.ParserError
|
||||
elif typ == OP:
|
||||
typ = token.opmap[value]
|
||||
yield typ, value, prefix, start_pos
|
||||
|
||||
def error_recovery(self, *args, **kwargs):
|
||||
raise Parser.ParserError
|
||||
|
||||
def convert_node(self, grammar, type, children):
|
||||
"""
|
||||
@@ -178,7 +175,7 @@ class Parser(object):
|
||||
"""
|
||||
symbol = grammar.number2symbol[type]
|
||||
try:
|
||||
new_node = self._ast_mapping[symbol](children)
|
||||
new_node = Parser.AST_MAPPING[symbol](children)
|
||||
except KeyError:
|
||||
new_node = pt.Node(symbol, children)
|
||||
|
||||
@@ -231,6 +228,83 @@ class Parser(object):
|
||||
else:
|
||||
return pt.Operator(self.position_modifier, value, start_pos, prefix)
|
||||
|
||||
def remove_last_newline(self):
|
||||
"""
|
||||
In all of this we need to work with _start_pos, because if we worked
|
||||
with start_pos, we would need to check the position_modifier as well
|
||||
(which is accounted for in the start_pos property).
|
||||
"""
|
||||
endmarker = self._parsed.children[-1]
|
||||
# The newline is either in the endmarker as a prefix or the previous
|
||||
# leaf as a newline token.
|
||||
if endmarker.prefix.endswith('\n'):
|
||||
endmarker.prefix = endmarker.prefix[:-1]
|
||||
last_line = re.sub('.*\n', '', endmarker.prefix)
|
||||
endmarker._start_pos = endmarker._start_pos[0] - 1, len(last_line)
|
||||
else:
|
||||
try:
|
||||
newline = endmarker.get_previous()
|
||||
except IndexError:
|
||||
return # This means that the parser is empty.
|
||||
while True:
|
||||
if newline.value == '':
|
||||
# Must be a DEDENT, just continue.
|
||||
try:
|
||||
newline = newline.get_previous()
|
||||
except IndexError:
|
||||
# If there's a statement that fails to be parsed, there
|
||||
# will be no previous leaf. So just ignore it.
|
||||
break
|
||||
elif newline.value != '\n':
|
||||
# This may happen if error correction strikes and removes
|
||||
# a whole statement including '\n'.
|
||||
break
|
||||
else:
|
||||
newline.value = ''
|
||||
if self._last_failed_start_pos > newline._start_pos:
|
||||
# It may be the case that there was a syntax error in a
|
||||
# function. In that case error correction removes the
|
||||
# right newline. So we use the previously assigned
|
||||
# _last_failed_start_pos variable to account for that.
|
||||
endmarker._start_pos = self._last_failed_start_pos
|
||||
else:
|
||||
endmarker._start_pos = newline._start_pos
|
||||
break
|
||||
|
||||
|
||||
class ParserWithRecovery(Parser):
|
||||
"""
|
||||
This class is used to parse a Python file, it then divides them into a
|
||||
class structure of different scopes.
|
||||
|
||||
:param grammar: The grammar object of pgen2. Loaded by load_grammar.
|
||||
:param source: The codebase for the parser. Must be unicode.
|
||||
:param module_path: The path of the module in the file system, may be None.
|
||||
:type module_path: str
|
||||
"""
|
||||
def __init__(self, grammar, source, module_path=None, tokenizer=None):
|
||||
self.syntax_errors = []
|
||||
|
||||
self._omit_dedent_list = []
|
||||
self._indent_counter = 0
|
||||
|
||||
# TODO do print absolute import detection here.
|
||||
#try:
|
||||
# del python_grammar_no_print_statement.keywords["print"]
|
||||
#except KeyError:
|
||||
# pass # Doesn't exist in the Python 3 grammar.
|
||||
|
||||
#if self.options["print_function"]:
|
||||
# python_grammar = pygram.python_grammar_no_print_statement
|
||||
#else:
|
||||
super(ParserWithRecovery, self).__init__(grammar, source, 'file_input', tokenizer)
|
||||
|
||||
self.module = self._parsed
|
||||
self.module.used_names = self._used_names
|
||||
self.module.path = module_path
|
||||
self.module.global_names = self._global_names
|
||||
self.module.error_statement_stacks = self._error_statement_stacks
|
||||
|
||||
def error_recovery(self, grammar, stack, typ, value, start_pos, prefix,
|
||||
add_token_callback):
|
||||
"""
|
||||
@@ -349,46 +423,3 @@ class Parser(object):
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (type(self).__name__, self.module)
|
||||
|
||||
def remove_last_newline(self):
|
||||
"""
|
||||
In all of this we need to work with _start_pos, because if we worked
|
||||
with start_pos, we would need to check the position_modifier as well
|
||||
(which is accounted for in the start_pos property).
|
||||
"""
|
||||
endmarker = self.module.children[-1]
|
||||
# The newline is either in the endmarker as a prefix or the previous
|
||||
# leaf as a newline token.
|
||||
if endmarker.prefix.endswith('\n'):
|
||||
endmarker.prefix = endmarker.prefix[:-1]
|
||||
last_line = re.sub('.*\n', '', endmarker.prefix)
|
||||
endmarker._start_pos = endmarker._start_pos[0] - 1, len(last_line)
|
||||
else:
|
||||
try:
|
||||
newline = endmarker.get_previous()
|
||||
except IndexError:
|
||||
return # This means that the parser is empty.
|
||||
while True:
|
||||
if newline.value == '':
|
||||
# Must be a DEDENT, just continue.
|
||||
try:
|
||||
newline = newline.get_previous()
|
||||
except IndexError:
|
||||
# If there's a statement that fails to be parsed, there
|
||||
# will be no previous leaf. So just ignore it.
|
||||
break
|
||||
elif newline.value != '\n':
|
||||
# This may happen if error correction strikes and removes
|
||||
# a whole statement including '\n'.
|
||||
break
|
||||
else:
|
||||
newline.value = ''
|
||||
if self._last_failed_start_pos > newline._start_pos:
|
||||
# It may be the case that there was a syntax error in a
|
||||
# function. In that case error correction removes the
|
||||
# right newline. So we use the previously assigned
|
||||
# _last_failed_start_pos variable to account for that.
|
||||
endmarker._start_pos = self._last_failed_start_pos
|
||||
else:
|
||||
endmarker._start_pos = newline._start_pos
|
||||
break
|
||||
|
||||
@@ -8,7 +8,7 @@ from itertools import chain
|
||||
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi import settings
|
||||
from jedi.parser import Parser
|
||||
from jedi.parser import ParserWithRecovery
|
||||
from jedi.parser import tree
|
||||
from jedi import cache
|
||||
from jedi import debug
|
||||
@@ -52,8 +52,9 @@ class FastModule(tree.Module):
|
||||
return "<fast.%s: %s@%s-%s>" % (type(self).__name__, self.name,
|
||||
self.start_pos[0], self.end_pos[0])
|
||||
|
||||
# To avoid issues with with the `parser.Parser`, we need setters that do
|
||||
# nothing, because if pickle comes along and sets those values.
|
||||
# To avoid issues with with the `parser.ParserWithRecovery`, we need
|
||||
# setters that do nothing, because if pickle comes along and sets those
|
||||
# values.
|
||||
@global_names.setter
|
||||
def global_names(self, value):
|
||||
pass
|
||||
@@ -99,10 +100,10 @@ class CachedFastParser(type):
|
||||
""" This is a metaclass for caching `FastParser`. """
|
||||
def __call__(self, grammar, source, module_path=None):
|
||||
if not settings.fast_parser:
|
||||
return Parser(grammar, source, module_path)
|
||||
return ParserWithRecovery(grammar, source, module_path)
|
||||
|
||||
pi = cache.parser_cache.get(module_path, None)
|
||||
if pi is None or isinstance(pi.parser, Parser):
|
||||
if pi is None or isinstance(pi.parser, ParserWithRecovery):
|
||||
p = super(CachedFastParser, self).__call__(grammar, source, module_path)
|
||||
else:
|
||||
p = pi.parser # pi is a `cache.ParserCacheItem`
|
||||
@@ -432,7 +433,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
else:
|
||||
tokenizer = FastTokenizer(parser_code)
|
||||
self.number_parsers_used += 1
|
||||
p = Parser(self._grammar, parser_code, self.module_path, tokenizer=tokenizer)
|
||||
p = ParserWithRecovery(self._grammar, parser_code, self.module_path, tokenizer=tokenizer)
|
||||
|
||||
end = line_offset + p.module.end_pos[0]
|
||||
used_lines = self._lines[line_offset:end - 1]
|
||||
|
||||
@@ -60,7 +60,7 @@ class PgenParser(object):
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, grammar, convert_node, convert_leaf, error_recovery):
|
||||
def __init__(self, grammar, convert_node, convert_leaf, error_recovery, start):
|
||||
"""Constructor.
|
||||
|
||||
The grammar argument is a grammar.Grammar instance; see the
|
||||
@@ -90,8 +90,6 @@ class PgenParser(object):
|
||||
self.convert_node = convert_node
|
||||
self.convert_leaf = convert_leaf
|
||||
|
||||
# Prepare for parsing.
|
||||
start = self.grammar.start
|
||||
# Each stack entry is a tuple: (dfa, state, node).
|
||||
# A node is a tuple: (type, children),
|
||||
# where children is a list of nodes or None
|
||||
|
||||
@@ -149,7 +149,7 @@ ALWAYS_BREAK_TOKENS = (';', 'import', 'from', 'class', 'def', 'try', 'except',
|
||||
|
||||
def source_tokens(source):
|
||||
"""Generate tokens from a the source code (string)."""
|
||||
source = source + '\n' # end with \n, because the parser needs it
|
||||
source = source
|
||||
readline = StringIO(source).readline
|
||||
return generate_tokens(readline)
|
||||
|
||||
@@ -165,6 +165,7 @@ def generate_tokens(readline):
|
||||
paren_level = 0 # count parentheses
|
||||
indents = [0]
|
||||
lnum = 0
|
||||
max = 0
|
||||
numchars = '0123456789'
|
||||
contstr = ''
|
||||
contline = None
|
||||
@@ -282,9 +283,12 @@ def generate_tokens(readline):
|
||||
paren_level -= 1
|
||||
yield OP, token, spos, prefix
|
||||
|
||||
end_pos = (lnum, max - 1)
|
||||
if new_line:
|
||||
end_pos = lnum + 1, 0
|
||||
else:
|
||||
end_pos = lnum, max
|
||||
# As the last position we just take the maximally possible position. We
|
||||
# remove -1 for the last new line.
|
||||
for indent in indents[1:]:
|
||||
yield DEDENT, '', end_pos, ''
|
||||
yield ENDMARKER, '', end_pos, prefix
|
||||
yield ENDMARKER, '', end_pos, additional_prefix
|
||||
|
||||
@@ -14,8 +14,8 @@ The easiest way to play with this module is to use :class:`parsing.Parser`.
|
||||
:attr:`parsing.Parser.module` holds an instance of :class:`Module`:
|
||||
|
||||
>>> from jedi._compatibility import u
|
||||
>>> from jedi.parser import Parser, load_grammar
|
||||
>>> parser = Parser(load_grammar(), u('import os'), 'example.py')
|
||||
>>> from jedi.parser import ParserWithRecovery, load_grammar
|
||||
>>> parser = ParserWithRecovery(load_grammar(), u('import os'), 'example.py')
|
||||
>>> submodule = parser.module
|
||||
>>> submodule
|
||||
<Module: example.py@1-1>
|
||||
@@ -873,7 +873,10 @@ class Function(ClassOrFunc):
|
||||
|
||||
def annotation(self):
|
||||
try:
|
||||
return self.children[6] # 6th element: def foo(...) -> bar
|
||||
if self.children[3] == "->":
|
||||
return self.children[4]
|
||||
assert self.children[3] == ":"
|
||||
return None
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
@@ -952,6 +955,10 @@ class Lambda(Function):
|
||||
def is_generator(self):
|
||||
return False
|
||||
|
||||
def annotation(self):
|
||||
# lambda functions do not support annotations
|
||||
return None
|
||||
|
||||
@property
|
||||
def yields(self):
|
||||
return []
|
||||
@@ -1404,8 +1411,14 @@ class Param(BaseNode):
|
||||
return None
|
||||
|
||||
def annotation(self):
|
||||
# Generate from tfpdef.
|
||||
raise NotImplementedError
|
||||
tfpdef = self._tfpdef()
|
||||
if is_node(tfpdef, 'tfpdef'):
|
||||
assert tfpdef.children[1] == ":"
|
||||
assert len(tfpdef.children) == 3
|
||||
annotation = tfpdef.children[2]
|
||||
return annotation
|
||||
else:
|
||||
return None
|
||||
|
||||
def _tfpdef(self):
|
||||
"""
|
||||
|
||||
@@ -4,7 +4,7 @@ import keyword
|
||||
|
||||
from jedi import cache
|
||||
from jedi import common
|
||||
from jedi.parser import tokenize, Parser
|
||||
from jedi.parser import tokenize, ParserWithRecovery
|
||||
from jedi._compatibility import u
|
||||
from jedi.parser.fast import FastParser
|
||||
from jedi.parser import tree
|
||||
@@ -284,7 +284,7 @@ class UserContextParser(object):
|
||||
# Don't pickle that module, because the main module is changing quickly
|
||||
cache.save_parser(self._path, parser, pickling=False)
|
||||
else:
|
||||
parser = Parser(self._grammar, self._source, self._path)
|
||||
parser = ParserWithRecovery(self._grammar, self._source, self._path)
|
||||
self._parser_done_callback(parser)
|
||||
return parser
|
||||
|
||||
|
||||
Reference in New Issue
Block a user