1
0
forked from VimPlug/jedi

Don't use as in imports when not needed.

This commit is contained in:
Dave Halter
2017-03-16 08:45:12 +01:00
parent 06702d2a40
commit b136800cfc

View File

@@ -1,6 +1,6 @@
import re import re
from jedi.parser import tree as pt from jedi.parser import tree
from jedi.parser import tokenize from jedi.parser import tokenize
from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER,
STRING, tok_name) STRING, tok_name)
@@ -10,33 +10,33 @@ from jedi.parser.parser import ParserSyntaxError
class Parser(object): class Parser(object):
AST_MAPPING = { AST_MAPPING = {
'expr_stmt': pt.ExprStmt, 'expr_stmt': tree.ExprStmt,
'classdef': pt.Class, 'classdef': tree.Class,
'funcdef': pt.Function, 'funcdef': tree.Function,
'file_input': pt.Module, 'file_input': tree.Module,
'import_name': pt.ImportName, 'import_name': tree.ImportName,
'import_from': pt.ImportFrom, 'import_from': tree.ImportFrom,
'break_stmt': pt.KeywordStatement, 'break_stmt': tree.KeywordStatement,
'continue_stmt': pt.KeywordStatement, 'continue_stmt': tree.KeywordStatement,
'return_stmt': pt.ReturnStmt, 'return_stmt': tree.ReturnStmt,
'raise_stmt': pt.KeywordStatement, 'raise_stmt': tree.KeywordStatement,
'yield_expr': pt.YieldExpr, 'yield_expr': tree.YieldExpr,
'del_stmt': pt.KeywordStatement, 'del_stmt': tree.KeywordStatement,
'pass_stmt': pt.KeywordStatement, 'pass_stmt': tree.KeywordStatement,
'global_stmt': pt.GlobalStmt, 'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': pt.KeywordStatement, 'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': pt.KeywordStatement, 'print_stmt': tree.KeywordStatement,
'assert_stmt': pt.AssertStmt, 'assert_stmt': tree.AssertStmt,
'if_stmt': pt.IfStmt, 'if_stmt': tree.IfStmt,
'with_stmt': pt.WithStmt, 'with_stmt': tree.WithStmt,
'for_stmt': pt.ForStmt, 'for_stmt': tree.ForStmt,
'while_stmt': pt.WhileStmt, 'while_stmt': tree.WhileStmt,
'try_stmt': pt.TryStmt, 'try_stmt': tree.TryStmt,
'comp_for': pt.CompFor, 'comp_for': tree.CompFor,
'decorator': pt.Decorator, 'decorator': tree.Decorator,
'lambdef': pt.Lambda, 'lambdef': tree.Lambda,
'old_lambdef': pt.Lambda, 'old_lambdef': tree.Lambda,
'lambdef_nocond': pt.Lambda, 'lambdef_nocond': tree.Lambda,
} }
def __init__(self, grammar, source, start_symbol='file_input', def __init__(self, grammar, source, start_symbol='file_input',
@@ -116,29 +116,29 @@ class Parser(object):
# ones and therefore have pseudo start/end positions and no # ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them. # prefixes. Just ignore them.
children = [children[0]] + children[2:-1] children = [children[0]] + children[2:-1]
return pt.Node(symbol, children) return tree.Node(symbol, children)
def convert_leaf(self, grammar, type, value, prefix, start_pos): def convert_leaf(self, grammar, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type]) # print('leaf', repr(value), token.tok_name[type])
if type == tokenize.NAME: if type == tokenize.NAME:
if value in grammar.keywords: if value in grammar.keywords:
return pt.Keyword(value, start_pos, prefix) return tree.Keyword(value, start_pos, prefix)
else: else:
name = pt.Name(value, start_pos, prefix) name = tree.Name(value, start_pos, prefix)
# Keep a listing of all used names # Keep a listing of all used names
arr = self._used_names.setdefault(name.value, []) arr = self._used_names.setdefault(name.value, [])
arr.append(name) arr.append(name)
return name return name
elif type == STRING: elif type == STRING:
return pt.String(value, start_pos, prefix) return tree.String(value, start_pos, prefix)
elif type == NUMBER: elif type == NUMBER:
return pt.Number(value, start_pos, prefix) return tree.Number(value, start_pos, prefix)
elif type == NEWLINE: elif type == NEWLINE:
return pt.Newline(value, start_pos, prefix) return tree.Newline(value, start_pos, prefix)
elif type == ENDMARKER: elif type == ENDMARKER:
return pt.EndMarker(value, start_pos, prefix) return tree.EndMarker(value, start_pos, prefix)
else: else:
return pt.Operator(value, start_pos, prefix) return tree.Operator(value, start_pos, prefix)
def remove_last_newline(self): def remove_last_newline(self):
endmarker = self._parsed.children[-1] endmarker = self._parsed.children[-1]
@@ -236,7 +236,7 @@ class ParserWithRecovery(Parser):
index -= 2 index -= 2
(_, _, (type_, suite_nodes)) = stack[index] (_, _, (type_, suite_nodes)) = stack[index]
symbol = grammar.number2symbol[type_] symbol = grammar.number2symbol[type_]
suite_nodes.append(pt.Node(symbol, list(nodes))) suite_nodes.append(tree.Node(symbol, list(nodes)))
# Remove # Remove
nodes[:] = [] nodes[:] = []
nodes = suite_nodes nodes = suite_nodes
@@ -251,7 +251,7 @@ class ParserWithRecovery(Parser):
# Otherwise the parser will get into trouble and DEDENT too early. # Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter) self._omit_dedent_list.append(self._indent_counter)
else: else:
error_leaf = pt.ErrorLeaf(tok_name[typ].lower(), value, start_pos, prefix) error_leaf = tree.ErrorLeaf(tok_name[typ].lower(), value, start_pos, prefix)
stack[-1][2][1].append(error_leaf) stack[-1][2][1].append(error_leaf)
def _stack_removal(self, grammar, stack, arcs, start_index, value, start_pos): def _stack_removal(self, grammar, stack, arcs, start_index, value, start_pos):
@@ -266,7 +266,7 @@ class ParserWithRecovery(Parser):
failed_stack.append((symbol, nodes)) failed_stack.append((symbol, nodes))
all_nodes += nodes all_nodes += nodes
if failed_stack: if failed_stack:
stack[start_index - 1][2][1].append(pt.ErrorNode(all_nodes)) stack[start_index - 1][2][1].append(tree.ErrorNode(all_nodes))
stack[start_index:] = [] stack[start_index:] = []
return failed_stack return failed_stack