1
0
forked from VimPlug/jedi

(Mostly) whitespace fixes

This commit is contained in:
Laurens Van Houtven
2013-07-10 14:45:47 +02:00
parent 8dae2049c9
commit cdc41128b4
18 changed files with 165 additions and 162 deletions

View File

@@ -148,7 +148,7 @@ class NoErrorTokenizer(object):
tokenize.NEWLINE, tokenize.DEDENT) \ tokenize.NEWLINE, tokenize.DEDENT) \
and c[0] not in (tokenize.COMMENT, tokenize.INDENT, and c[0] not in (tokenize.COMMENT, tokenize.INDENT,
tokenize.NL, tokenize.NEWLINE, tokenize.DEDENT): tokenize.NL, tokenize.NEWLINE, tokenize.DEDENT):
#print c, tokenize.tok_name[c[0]] # print c, tokenize.tok_name[c[0]]
tok = c[1] tok = c[1]
indent = c[2][1] indent = c[2][1]

View File

@@ -62,4 +62,4 @@ def print_to_stdout(level, str_out):
print(col + str_out + Fore.RESET) print(col + str_out + Fore.RESET)
#debug_function = print_to_stdout # debug_function = print_to_stdout

View File

@@ -37,7 +37,7 @@ REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
@cache.memoize_default() @cache.memoize_default()
def follow_param(param): def follow_param(param):
func = param.parent_function func = param.parent_function
#print func, param, param.parent_function # print func, param, param.parent_function
param_str = _search_param_in_docstr(func.docstr, str(param.get_name())) param_str = _search_param_in_docstr(func.docstr, str(param.get_name()))
user_position = (1, 0) user_position = (1, 0)

View File

@@ -53,7 +53,9 @@ class Executable(pr.IsScope):
class Instance(use_metaclass(cache.CachedMetaClass, Executable)): class Instance(use_metaclass(cache.CachedMetaClass, Executable)):
""" This class is used to evaluate instances. """ """ This class is used to evaluate instances. """
def __init__(self, base, var_args=()): def __init__(self, base, var_args=()):
super(Instance, self).__init__(base, var_args) super(Instance, self).__init__(base, var_args)
if str(base.name) in ['list', 'set'] \ if str(base.name) in ['list', 'set'] \

View File

@@ -328,7 +328,7 @@ class FastParser(use_metaclass(CachedFastParser)):
nodes += self.current_node.old_children nodes += self.current_node.old_children
# check if code_part has already been parsed # check if code_part has already been parsed
#print '#'*45,line_offset, p and p.end_pos, '\n', code_part # print '#'*45,line_offset, p and p.end_pos, '\n', code_part
p, node = self._get_parser(code_part, code[start:], p, node = self._get_parser(code_part, code[start:],
line_offset, nodes, not is_first) line_offset, nodes, not is_first)
@@ -365,7 +365,7 @@ class FastParser(use_metaclass(CachedFastParser)):
is_first = False is_first = False
else: else:
#print '#'*45, line_offset, p.end_pos, 'theheck\n', code_part # print '#'*45, line_offset, p.end_pos, 'theheck\n', code_part
pass pass
line_offset += lines line_offset += lines
@@ -378,7 +378,7 @@ class FastParser(use_metaclass(CachedFastParser)):
self.module.end_pos = self.parsers[-1].end_pos self.module.end_pos = self.parsers[-1].end_pos
#print(self.parsers[0].module.get_code()) # print(self.parsers[0].module.get_code())
del code del code
def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr): def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr):

View File

@@ -163,7 +163,7 @@ class ModuleWithCursor(Module):
last_type = None last_type = None
try: try:
for token_type, tok, start, end, line in gen: for token_type, tok, start, end, line in gen:
#print 'tok', token_type, tok, force_point # print 'tok', token_type, tok, force_point
if last_type == token_type == tokenize.NAME: if last_type == token_type == tokenize.NAME:
string += ' ' string += ' '

View File

@@ -348,7 +348,7 @@ class Parser(object):
or tok in not_first_break and not tok_list or tok in not_first_break and not tok_list
or tok in breaks and level <= 0): or tok in breaks and level <= 0):
try: try:
#print 'parse_stmt', tok, tokenize.tok_name[token_type] # print 'parse_stmt', tok, tokenize.tok_name[token_type]
tok_list.append(self.current + (self.start_pos,)) tok_list.append(self.current + (self.start_pos,))
if tok == 'as': if tok == 'as':
token_type, tok = self.next() token_type, tok = self.next()
@@ -387,7 +387,7 @@ class Parser(object):
if not tok_list: if not tok_list:
return None, tok return None, tok
#print 'new_stat', set_vars, used_vars # print 'new_stat', set_vars, used_vars
if self.freshscope and not self.no_docstr and len(tok_list) == 1 \ if self.freshscope and not self.no_docstr and len(tok_list) == 1 \
and self.last_token[0] == tokenize.STRING: and self.last_token[0] == tokenize.STRING:
self._scope.add_docstr(self.last_token[1]) self._scope.add_docstr(self.last_token[1])
@@ -469,7 +469,7 @@ class Parser(object):
# This iterator stuff is not intentional. It grew historically. # This iterator stuff is not intentional. It grew historically.
for token_type, tok in self.iterator: for token_type, tok in self.iterator:
self.module.temp_used_names = [] self.module.temp_used_names = []
#debug.dbg('main: tok=[%s] type=[%s] indent=[%s]'\ # debug.dbg('main: tok=[%s] type=[%s] indent=[%s]'\
# % (tok, tokenize.tok_name[token_type], start_position[0])) # % (tok, tokenize.tok_name[token_type], start_position[0]))
while token_type == tokenize.DEDENT and self._scope != self.module: while token_type == tokenize.DEDENT and self._scope != self.module:
@@ -561,7 +561,7 @@ class Parser(object):
self._check_user_stmt(i) self._check_user_stmt(i)
self._scope.add_import(i) self._scope.add_import(i)
self.freshscope = False self.freshscope = False
#loops # loops
elif tok == 'for': elif tok == 'for':
set_stmt, tok = self._parse_statement(added_breaks=['in']) set_stmt, tok = self._parse_statement(added_breaks=['in'])
if tok == 'in': if tok == 'in':

View File

@@ -24,7 +24,7 @@ class RecursionDecorator(object):
self.reset() self.reset()
def __call__(self, stmt, *args, **kwargs): def __call__(self, stmt, *args, **kwargs):
#print stmt, len(self.node_statements()) # print stmt, len(self.node_statements())
if self.push_stmt(stmt): if self.push_stmt(stmt):
return [] return []
else: else:

View File

@@ -208,7 +208,8 @@ def generate_tokens(readline):
yield TokenInfo(NL, line[nl_pos:], yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line) (lnum, nl_pos), (lnum, len(line)), line)
else: else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:], yield TokenInfo(
(NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line) (lnum, pos), (lnum, len(line)), line)
continue continue