mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-06 12:54:29 +08:00
flake8 changes
This commit is contained in:
@@ -5,7 +5,6 @@ import hashlib
|
|||||||
import gc
|
import gc
|
||||||
import shutil
|
import shutil
|
||||||
import platform
|
import platform
|
||||||
import errno
|
|
||||||
import logging
|
import logging
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
@@ -92,7 +91,8 @@ On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
|
|||||||
|
|
||||||
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
|
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
|
||||||
|
|
||||||
def _get_cache_clear_lock(cache_path = None):
|
|
||||||
|
def _get_cache_clear_lock(cache_path=None):
|
||||||
"""
|
"""
|
||||||
The path where the cache lock is stored.
|
The path where the cache lock is stored.
|
||||||
|
|
||||||
@@ -231,17 +231,17 @@ def clear_inactive_cache(
|
|||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
os.remove(file.path)
|
os.remove(file.path)
|
||||||
except OSError: # silently ignore all failures
|
except OSError: # silently ignore all failures
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _remove_cache_and_update_lock(cache_path = None):
|
def _remove_cache_and_update_lock(cache_path=None):
|
||||||
lock = _get_cache_clear_lock(cache_path=cache_path)
|
lock = _get_cache_clear_lock(cache_path=cache_path)
|
||||||
clear_lock_time = lock.get_last_modified()
|
clear_lock_time = lock.get_last_modified()
|
||||||
if (
|
if (
|
||||||
clear_lock_time is None # first time
|
clear_lock_time is None # first time
|
||||||
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
|
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
|
||||||
):
|
):
|
||||||
if not lock._touch():
|
if not lock._touch():
|
||||||
@@ -250,7 +250,8 @@ def _remove_cache_and_update_lock(cache_path = None):
|
|||||||
# not a big problem.
|
# not a big problem.
|
||||||
return False
|
return False
|
||||||
|
|
||||||
clear_inactive_cache(cache_path = cache_path)
|
clear_inactive_cache(cache_path=cache_path)
|
||||||
|
|
||||||
|
|
||||||
def _get_hashed_path(hashed_grammar, path, cache_path=None):
|
def _get_hashed_path(hashed_grammar, path, cache_path=None):
|
||||||
directory = _get_cache_directory_path(cache_path=cache_path)
|
directory = _get_cache_directory_path(cache_path=cache_path)
|
||||||
|
|||||||
@@ -23,8 +23,9 @@ class Grammar(object):
|
|||||||
:py:func:`parso.load_grammar` returns instances of this class.
|
:py:func:`parso.load_grammar` returns instances of this class.
|
||||||
|
|
||||||
Creating custom none-python grammars by calling this is not supported, yet.
|
Creating custom none-python grammars by calling this is not supported, yet.
|
||||||
|
|
||||||
|
:param text: A BNF representation of your grammar.
|
||||||
"""
|
"""
|
||||||
#:param text: A BNF representation of your grammar.
|
|
||||||
_error_normalizer_config = None
|
_error_normalizer_config = None
|
||||||
_token_namespace = None
|
_token_namespace = None
|
||||||
_default_normalizer_config = pep8.PEP8NormalizerConfig()
|
_default_normalizer_config = pep8.PEP8NormalizerConfig()
|
||||||
@@ -133,9 +134,9 @@ class Grammar(object):
|
|||||||
new_lines=lines
|
new_lines=lines
|
||||||
)
|
)
|
||||||
try_to_save_module(self._hashed, file_io, new_node, lines,
|
try_to_save_module(self._hashed, file_io, new_node, lines,
|
||||||
# Never pickle in pypy, it's slow as hell.
|
# Never pickle in pypy, it's slow as hell.
|
||||||
pickling=cache and not is_pypy,
|
pickling=cache and not is_pypy,
|
||||||
cache_path=cache_path)
|
cache_path=cache_path)
|
||||||
return new_node
|
return new_node
|
||||||
|
|
||||||
tokens = self._tokenizer(lines, start_pos=start_pos)
|
tokens = self._tokenizer(lines, start_pos=start_pos)
|
||||||
@@ -149,9 +150,9 @@ class Grammar(object):
|
|||||||
|
|
||||||
if cache or diff_cache:
|
if cache or diff_cache:
|
||||||
try_to_save_module(self._hashed, file_io, root_node, lines,
|
try_to_save_module(self._hashed, file_io, root_node, lines,
|
||||||
# Never pickle in pypy, it's slow as hell.
|
# Never pickle in pypy, it's slow as hell.
|
||||||
pickling=cache and not is_pypy,
|
pickling=cache and not is_pypy,
|
||||||
cache_path=cache_path)
|
cache_path=cache_path)
|
||||||
return root_node
|
return root_node
|
||||||
|
|
||||||
def _get_token_namespace(self):
|
def _get_token_namespace(self):
|
||||||
@@ -252,7 +253,9 @@ def load_grammar(**kwargs):
|
|||||||
grammar = PythonGrammar(version_info, bnf_text)
|
grammar = PythonGrammar(version_info, bnf_text)
|
||||||
return _loaded_grammars.setdefault(path, grammar)
|
return _loaded_grammars.setdefault(path, grammar)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
message = "Python version %s.%s is currently not supported." % (version_info.major, version_info.minor)
|
message = "Python version %s.%s is currently not supported." % (
|
||||||
|
version_info.major, version_info.minor
|
||||||
|
)
|
||||||
raise NotImplementedError(message)
|
raise NotImplementedError(message)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("No support for language %s." % language)
|
raise NotImplementedError("No support for language %s." % language)
|
||||||
|
|||||||
@@ -149,7 +149,6 @@ def _simplify_dfas(dfas):
|
|||||||
for j in range(i + 1, len(dfas)):
|
for j in range(i + 1, len(dfas)):
|
||||||
state_j = dfas[j]
|
state_j = dfas[j]
|
||||||
if state_i == state_j:
|
if state_i == state_j:
|
||||||
#print " unify", i, j
|
|
||||||
del dfas[j]
|
del dfas[j]
|
||||||
for state in dfas:
|
for state in dfas:
|
||||||
state.unifystate(state_j, state_i)
|
state.unifystate(state_j, state_i)
|
||||||
@@ -245,14 +244,14 @@ def generate_grammar(bnf_grammar, token_namespace):
|
|||||||
rule_to_dfas = {}
|
rule_to_dfas = {}
|
||||||
start_nonterminal = None
|
start_nonterminal = None
|
||||||
for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse():
|
for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse():
|
||||||
#_dump_nfa(nfa_a, nfa_z)
|
# _dump_nfa(nfa_a, nfa_z)
|
||||||
dfas = _make_dfas(nfa_a, nfa_z)
|
dfas = _make_dfas(nfa_a, nfa_z)
|
||||||
#_dump_dfas(dfas)
|
# _dump_dfas(dfas)
|
||||||
# oldlen = len(dfas)
|
# oldlen = len(dfas)
|
||||||
_simplify_dfas(dfas)
|
_simplify_dfas(dfas)
|
||||||
# newlen = len(dfas)
|
# newlen = len(dfas)
|
||||||
rule_to_dfas[nfa_a.from_rule] = dfas
|
rule_to_dfas[nfa_a.from_rule] = dfas
|
||||||
#print(nfa_a.from_rule, oldlen, newlen)
|
# print(nfa_a.from_rule, oldlen, newlen)
|
||||||
|
|
||||||
if start_nonterminal is None:
|
if start_nonterminal is None:
|
||||||
start_nonterminal = nfa_a.from_rule
|
start_nonterminal = nfa_a.from_rule
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ ALLOWED_FUTURES = (
|
|||||||
)
|
)
|
||||||
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
|
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
|
||||||
|
|
||||||
|
|
||||||
def _get_rhs_name(node, version):
|
def _get_rhs_name(node, version):
|
||||||
type_ = node.type
|
type_ = node.type
|
||||||
if type_ == "lambdef":
|
if type_ == "lambdef":
|
||||||
@@ -39,7 +40,7 @@ def _get_rhs_name(node, version):
|
|||||||
elif (
|
elif (
|
||||||
first == "("
|
first == "("
|
||||||
and (second == ")"
|
and (second == ")"
|
||||||
or (len(node.children) == 3 and node.children[1].type == "testlist_comp"))
|
or (len(node.children) == 3 and node.children[1].type == "testlist_comp"))
|
||||||
):
|
):
|
||||||
return "tuple"
|
return "tuple"
|
||||||
elif first == "(":
|
elif first == "(":
|
||||||
@@ -79,8 +80,7 @@ def _get_rhs_name(node, version):
|
|||||||
elif trailer.children[0] == ".":
|
elif trailer.children[0] == ".":
|
||||||
return "attribute"
|
return "attribute"
|
||||||
elif (
|
elif (
|
||||||
("expr" in type_
|
("expr" in type_ and "star_expr" not in type_) # is a substring
|
||||||
and "star_expr" not in type_) # is a substring
|
|
||||||
or "_test" in type_
|
or "_test" in type_
|
||||||
or type_ in ("term", "factor")
|
or type_ in ("term", "factor")
|
||||||
):
|
):
|
||||||
@@ -91,7 +91,8 @@ def _get_rhs_name(node, version):
|
|||||||
return "tuple"
|
return "tuple"
|
||||||
elif type_ == "fstring":
|
elif type_ == "fstring":
|
||||||
return "f-string expression"
|
return "f-string expression"
|
||||||
return type_ # shouldn't reach here
|
return type_ # shouldn't reach here
|
||||||
|
|
||||||
|
|
||||||
def _iter_stmts(scope):
|
def _iter_stmts(scope):
|
||||||
"""
|
"""
|
||||||
@@ -420,7 +421,9 @@ class ErrorFinder(Normalizer):
|
|||||||
message = 'invalid syntax'
|
message = 'invalid syntax'
|
||||||
if (
|
if (
|
||||||
self.version >= (3, 9)
|
self.version >= (3, 9)
|
||||||
and leaf.value in _get_token_collection(self.version).always_break_tokens
|
and leaf.value in _get_token_collection(
|
||||||
|
self.version
|
||||||
|
).always_break_tokens
|
||||||
):
|
):
|
||||||
message = "f-string: " + message
|
message = "f-string: " + message
|
||||||
else:
|
else:
|
||||||
@@ -1145,6 +1148,7 @@ class _CompForRule(_CheckAssignmentRule):
|
|||||||
class _ExprStmtRule(_CheckAssignmentRule):
|
class _ExprStmtRule(_CheckAssignmentRule):
|
||||||
message = "illegal expression for augmented assignment"
|
message = "illegal expression for augmented assignment"
|
||||||
extended_message = "'{target}' is an " + message
|
extended_message = "'{target}' is an " + message
|
||||||
|
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
augassign = node.children[1]
|
augassign = node.children[1]
|
||||||
is_aug_assign = augassign != '=' and augassign.type != 'annassign'
|
is_aug_assign = augassign != '=' and augassign.type != 'annassign'
|
||||||
@@ -1174,6 +1178,7 @@ class _ExprStmtRule(_CheckAssignmentRule):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ErrorFinder.register_rule(type='with_item')
|
@ErrorFinder.register_rule(type='with_item')
|
||||||
class _WithItemRule(_CheckAssignmentRule):
|
class _WithItemRule(_CheckAssignmentRule):
|
||||||
def is_issue(self, with_item):
|
def is_issue(self, with_item):
|
||||||
|
|||||||
@@ -71,7 +71,6 @@ class BracketNode(IndentationNode):
|
|||||||
n = n.parent
|
n = n.parent
|
||||||
parent_indentation = n.indentation
|
parent_indentation = n.indentation
|
||||||
|
|
||||||
|
|
||||||
next_leaf = leaf.get_next_leaf()
|
next_leaf = leaf.get_next_leaf()
|
||||||
if '\n' in next_leaf.prefix:
|
if '\n' in next_leaf.prefix:
|
||||||
# This implies code like:
|
# This implies code like:
|
||||||
@@ -93,7 +92,7 @@ class BracketNode(IndentationNode):
|
|||||||
if '\t' in config.indentation:
|
if '\t' in config.indentation:
|
||||||
self.indentation = None
|
self.indentation = None
|
||||||
else:
|
else:
|
||||||
self.indentation = ' ' * expected_end_indent
|
self.indentation = ' ' * expected_end_indent
|
||||||
self.bracket_indentation = self.indentation
|
self.bracket_indentation = self.indentation
|
||||||
self.type = IndentationTypes.VERTICAL_BRACKET
|
self.type = IndentationTypes.VERTICAL_BRACKET
|
||||||
|
|
||||||
@@ -137,7 +136,7 @@ class BackslashNode(IndentationNode):
|
|||||||
self.indentation = parent_indentation + config.indentation
|
self.indentation = parent_indentation + config.indentation
|
||||||
else:
|
else:
|
||||||
# +1 because there is a space.
|
# +1 because there is a space.
|
||||||
self.indentation = ' ' * (equals.end_pos[1] + 1)
|
self.indentation = ' ' * (equals.end_pos[1] + 1)
|
||||||
else:
|
else:
|
||||||
self.indentation = parent_indentation + config.indentation
|
self.indentation = parent_indentation + config.indentation
|
||||||
self.bracket_indentation = self.indentation
|
self.bracket_indentation = self.indentation
|
||||||
@@ -190,7 +189,8 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
expr_stmt = node.parent
|
expr_stmt = node.parent
|
||||||
# Check if it's simply defining a single name, not something like
|
# Check if it's simply defining a single name, not something like
|
||||||
# foo.bar or x[1], where using a lambda could make more sense.
|
# foo.bar or x[1], where using a lambda could make more sense.
|
||||||
if expr_stmt.type == 'expr_stmt' and any(n.type == 'name' for n in expr_stmt.children[:-2:2]):
|
if expr_stmt.type == 'expr_stmt' and any(n.type == 'name'
|
||||||
|
for n in expr_stmt.children[:-2:2]):
|
||||||
self.add_issue(node, 731, 'Do not assign a lambda expression, use a def')
|
self.add_issue(node, 731, 'Do not assign a lambda expression, use a def')
|
||||||
elif typ == 'try_stmt':
|
elif typ == 'try_stmt':
|
||||||
for child in node.children:
|
for child in node.children:
|
||||||
@@ -221,7 +221,6 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
if typ in _IMPORT_TYPES:
|
if typ in _IMPORT_TYPES:
|
||||||
simple_stmt = node.parent
|
simple_stmt = node.parent
|
||||||
module = simple_stmt.parent
|
module = simple_stmt.parent
|
||||||
#if module.type == 'simple_stmt':
|
|
||||||
if module.type == 'file_input':
|
if module.type == 'file_input':
|
||||||
index = module.children.index(simple_stmt)
|
index = module.children.index(simple_stmt)
|
||||||
for child in module.children[:index]:
|
for child in module.children[:index]:
|
||||||
@@ -406,7 +405,6 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
and leaf.parent.parent.type == 'decorated':
|
and leaf.parent.parent.type == 'decorated':
|
||||||
self.add_issue(part, 304, "Blank lines found after function decorator")
|
self.add_issue(part, 304, "Blank lines found after function decorator")
|
||||||
|
|
||||||
|
|
||||||
self._newline_count += 1
|
self._newline_count += 1
|
||||||
|
|
||||||
if type_ == 'backslash':
|
if type_ == 'backslash':
|
||||||
@@ -461,33 +459,62 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
else:
|
else:
|
||||||
should_be_indentation = node.indentation
|
should_be_indentation = node.indentation
|
||||||
if self._in_suite_introducer and indentation == \
|
if self._in_suite_introducer and indentation == \
|
||||||
node.get_latest_suite_node().indentation \
|
node.get_latest_suite_node().indentation \
|
||||||
+ self._config.indentation:
|
+ self._config.indentation:
|
||||||
self.add_issue(part, 129, "Line with same indent as next logical block")
|
self.add_issue(part, 129, "Line with same indent as next logical block")
|
||||||
elif indentation != should_be_indentation:
|
elif indentation != should_be_indentation:
|
||||||
if not self._check_tabs_spaces(spacing) and part.value != '\n':
|
if not self._check_tabs_spaces(spacing) and part.value != '\n':
|
||||||
if value in '])}':
|
if value in '])}':
|
||||||
if node.type == IndentationTypes.VERTICAL_BRACKET:
|
if node.type == IndentationTypes.VERTICAL_BRACKET:
|
||||||
self.add_issue(part, 124, "Closing bracket does not match visual indentation")
|
self.add_issue(
|
||||||
|
part,
|
||||||
|
124,
|
||||||
|
"Closing bracket does not match visual indentation"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.add_issue(part, 123, "Losing bracket does not match indentation of opening bracket's line")
|
self.add_issue(
|
||||||
|
part,
|
||||||
|
123,
|
||||||
|
"Losing bracket does not match "
|
||||||
|
"indentation of opening bracket's line"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if len(indentation) < len(should_be_indentation):
|
if len(indentation) < len(should_be_indentation):
|
||||||
if node.type == IndentationTypes.VERTICAL_BRACKET:
|
if node.type == IndentationTypes.VERTICAL_BRACKET:
|
||||||
self.add_issue(part, 128, 'Continuation line under-indented for visual indent')
|
self.add_issue(
|
||||||
|
part,
|
||||||
|
128,
|
||||||
|
'Continuation line under-indented for visual indent'
|
||||||
|
)
|
||||||
elif node.type == IndentationTypes.BACKSLASH:
|
elif node.type == IndentationTypes.BACKSLASH:
|
||||||
self.add_issue(part, 122, 'Continuation line missing indentation or outdented')
|
self.add_issue(
|
||||||
|
part,
|
||||||
|
122,
|
||||||
|
'Continuation line missing indentation or outdented'
|
||||||
|
)
|
||||||
elif node.type == IndentationTypes.IMPLICIT:
|
elif node.type == IndentationTypes.IMPLICIT:
|
||||||
self.add_issue(part, 135, 'xxx')
|
self.add_issue(part, 135, 'xxx')
|
||||||
else:
|
else:
|
||||||
self.add_issue(part, 121, 'Continuation line under-indented for hanging indent')
|
self.add_issue(
|
||||||
|
part,
|
||||||
|
121,
|
||||||
|
'Continuation line under-indented for hanging indent'
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if node.type == IndentationTypes.VERTICAL_BRACKET:
|
if node.type == IndentationTypes.VERTICAL_BRACKET:
|
||||||
self.add_issue(part, 127, 'Continuation line over-indented for visual indent')
|
self.add_issue(
|
||||||
|
part,
|
||||||
|
127,
|
||||||
|
'Continuation line over-indented for visual indent'
|
||||||
|
)
|
||||||
elif node.type == IndentationTypes.IMPLICIT:
|
elif node.type == IndentationTypes.IMPLICIT:
|
||||||
self.add_issue(part, 136, 'xxx')
|
self.add_issue(part, 136, 'xxx')
|
||||||
else:
|
else:
|
||||||
self.add_issue(part, 126, 'Continuation line over-indented for hanging indent')
|
self.add_issue(
|
||||||
|
part,
|
||||||
|
126,
|
||||||
|
'Continuation line over-indented for hanging indent'
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self._check_spacing(part, spacing)
|
self._check_spacing(part, spacing)
|
||||||
|
|
||||||
@@ -524,7 +551,7 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
else:
|
else:
|
||||||
last_column = part.end_pos[1]
|
last_column = part.end_pos[1]
|
||||||
if last_column > self._config.max_characters \
|
if last_column > self._config.max_characters \
|
||||||
and spacing.start_pos[1] <= self._config.max_characters :
|
and spacing.start_pos[1] <= self._config.max_characters:
|
||||||
# Special case for long URLs in multi-line docstrings or comments,
|
# Special case for long URLs in multi-line docstrings or comments,
|
||||||
# but still report the error when the 72 first chars are whitespaces.
|
# but still report the error when the 72 first chars are whitespaces.
|
||||||
report = True
|
report = True
|
||||||
@@ -538,7 +565,7 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
part,
|
part,
|
||||||
501,
|
501,
|
||||||
'Line too long (%s > %s characters)' %
|
'Line too long (%s > %s characters)' %
|
||||||
(last_column, self._config.max_characters),
|
(last_column, self._config.max_characters),
|
||||||
)
|
)
|
||||||
|
|
||||||
def _check_spacing(self, part, spacing):
|
def _check_spacing(self, part, spacing):
|
||||||
@@ -573,11 +600,11 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
message = "Whitespace before '%s'" % part.value
|
message = "Whitespace before '%s'" % part.value
|
||||||
add_if_spaces(spacing, 202, message)
|
add_if_spaces(spacing, 202, message)
|
||||||
elif part in (',', ';') or part == ':' \
|
elif part in (',', ';') or part == ':' \
|
||||||
and part.parent.type not in _POSSIBLE_SLICE_PARENTS:
|
and part.parent.type not in _POSSIBLE_SLICE_PARENTS:
|
||||||
message = "Whitespace before '%s'" % part.value
|
message = "Whitespace before '%s'" % part.value
|
||||||
add_if_spaces(spacing, 203, message)
|
add_if_spaces(spacing, 203, message)
|
||||||
elif prev == ':' and prev.parent.type in _POSSIBLE_SLICE_PARENTS:
|
elif prev == ':' and prev.parent.type in _POSSIBLE_SLICE_PARENTS:
|
||||||
pass # TODO
|
pass # TODO
|
||||||
elif prev in (',', ';', ':'):
|
elif prev in (',', ';', ':'):
|
||||||
add_not_spaces(spacing, 231, "missing whitespace after '%s'")
|
add_not_spaces(spacing, 231, "missing whitespace after '%s'")
|
||||||
elif part == ':': # Is a subscript
|
elif part == ':': # Is a subscript
|
||||||
@@ -602,9 +629,17 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
if param.type == 'param' and param.annotation:
|
if param.type == 'param' and param.annotation:
|
||||||
add_not_spaces(spacing, 252, 'Expected spaces around annotation equals')
|
add_not_spaces(spacing, 252, 'Expected spaces around annotation equals')
|
||||||
else:
|
else:
|
||||||
add_if_spaces(spacing, 251, 'Unexpected spaces around keyword / parameter equals')
|
add_if_spaces(
|
||||||
|
spacing,
|
||||||
|
251,
|
||||||
|
'Unexpected spaces around keyword / parameter equals'
|
||||||
|
)
|
||||||
elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR:
|
elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR:
|
||||||
add_not_spaces(spacing, 227, 'Missing whitespace around bitwise or shift operator')
|
add_not_spaces(
|
||||||
|
spacing,
|
||||||
|
227,
|
||||||
|
'Missing whitespace around bitwise or shift operator'
|
||||||
|
)
|
||||||
elif part == '%' or prev == '%':
|
elif part == '%' or prev == '%':
|
||||||
add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator')
|
add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator')
|
||||||
else:
|
else:
|
||||||
@@ -621,8 +656,7 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
|
|
||||||
if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE:
|
if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE:
|
||||||
message_225 = 'Missing whitespace between tokens'
|
message_225 = 'Missing whitespace between tokens'
|
||||||
#print('xy', spacing)
|
# self.add_issue(spacing, 225, message_225)
|
||||||
#self.add_issue(spacing, 225, message_225)
|
|
||||||
# TODO why only brackets?
|
# TODO why only brackets?
|
||||||
if part in _OPENING_BRACKETS:
|
if part in _OPENING_BRACKETS:
|
||||||
message = "Whitespace before '%s'" % part.value
|
message = "Whitespace before '%s'" % part.value
|
||||||
@@ -664,7 +698,8 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
self.add_issue(leaf, 711, message)
|
self.add_issue(leaf, 711, message)
|
||||||
break
|
break
|
||||||
elif node.value in ('True', 'False'):
|
elif node.value in ('True', 'False'):
|
||||||
message = "comparison to False/True should be 'if cond is True:' or 'if cond:'"
|
message = "comparison to False/True should be " \
|
||||||
|
"'if cond is True:' or 'if cond:'"
|
||||||
self.add_issue(leaf, 712, message)
|
self.add_issue(leaf, 712, message)
|
||||||
break
|
break
|
||||||
elif leaf.value in ('in', 'is'):
|
elif leaf.value in ('in', 'is'):
|
||||||
@@ -679,6 +714,7 @@ class PEP8Normalizer(ErrorFinder):
|
|||||||
for i, line in enumerate(leaf.value.splitlines()[1:]):
|
for i, line in enumerate(leaf.value.splitlines()[1:]):
|
||||||
indentation = re.match(r'[ \t]*', line).group(0)
|
indentation = re.match(r'[ \t]*', line).group(0)
|
||||||
start_pos = leaf.line + i, len(indentation)
|
start_pos = leaf.line + i, len(indentation)
|
||||||
|
print(start_pos)
|
||||||
# TODO check multiline indentation.
|
# TODO check multiline indentation.
|
||||||
elif typ == 'endmarker':
|
elif typ == 'endmarker':
|
||||||
if self._newline_count >= 2:
|
if self._newline_count >= 2:
|
||||||
@@ -718,7 +754,7 @@ class PEP8NormalizerConfig(ErrorFinderConfig):
|
|||||||
|
|
||||||
|
|
||||||
# TODO this is not yet ready.
|
# TODO this is not yet ready.
|
||||||
#@PEP8Normalizer.register_rule(type='endmarker')
|
# @PEP8Normalizer.register_rule(type='endmarker')
|
||||||
class BlankLineAtEnd(Rule):
|
class BlankLineAtEnd(Rule):
|
||||||
code = 392
|
code = 392
|
||||||
message = 'Blank line at end of file'
|
message = 'Blank line at end of file'
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ def split_prefix(leaf, start_pos):
|
|||||||
value = spacing = ''
|
value = spacing = ''
|
||||||
bom = False
|
bom = False
|
||||||
while start != len(leaf.prefix):
|
while start != len(leaf.prefix):
|
||||||
match =_regex.match(leaf.prefix, start)
|
match = _regex.match(leaf.prefix, start)
|
||||||
spacing = match.group(1)
|
spacing = match.group(1)
|
||||||
value = match.group(2)
|
value = match.group(2)
|
||||||
if not value:
|
if not value:
|
||||||
|
|||||||
@@ -395,12 +395,12 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0), indents=None, is_first
|
|||||||
is_first_token = False
|
is_first_token = False
|
||||||
|
|
||||||
if contstr: # continued string
|
if contstr: # continued string
|
||||||
endmatch = endprog.match(line)
|
endmatch = endprog.match(line) # noqa: F821
|
||||||
if endmatch:
|
if endmatch:
|
||||||
pos = endmatch.end(0)
|
pos = endmatch.end(0)
|
||||||
yield PythonToken(
|
yield PythonToken(
|
||||||
STRING, contstr + line[:pos],
|
STRING, contstr + line[:pos],
|
||||||
contstr_start, prefix)
|
contstr_start, prefix) # noqa: F821
|
||||||
contstr = ''
|
contstr = ''
|
||||||
contline = None
|
contline = None
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -135,11 +135,11 @@ class FileModification:
|
|||||||
# We cannot delete every line, that doesn't make sense to
|
# We cannot delete every line, that doesn't make sense to
|
||||||
# fuzz and it would be annoying to rewrite everything here.
|
# fuzz and it would be annoying to rewrite everything here.
|
||||||
continue
|
continue
|
||||||
l = LineDeletion(random_line())
|
ld = LineDeletion(random_line())
|
||||||
elif rand == 2:
|
elif rand == 2:
|
||||||
# Copy / Insertion
|
# Copy / Insertion
|
||||||
# Make it possible to insert into the first and the last line
|
# Make it possible to insert into the first and the last line
|
||||||
l = LineCopy(random_line(), random_line(include_end=True))
|
ld = LineCopy(random_line(), random_line(include_end=True))
|
||||||
elif rand in (3, 4):
|
elif rand in (3, 4):
|
||||||
# Modify a line in some weird random ways.
|
# Modify a line in some weird random ways.
|
||||||
line_nr = random_line()
|
line_nr = random_line()
|
||||||
@@ -166,9 +166,9 @@ class FileModification:
|
|||||||
# we really replace the line with something that has
|
# we really replace the line with something that has
|
||||||
# indentation.
|
# indentation.
|
||||||
line = ' ' * random.randint(0, 12) + random_string + '\n'
|
line = ' ' * random.randint(0, 12) + random_string + '\n'
|
||||||
l = LineReplacement(line_nr, line)
|
ld = LineReplacement(line_nr, line)
|
||||||
l.apply(lines)
|
ld.apply(lines)
|
||||||
yield l
|
yield ld
|
||||||
|
|
||||||
def __init__(self, modification_list, check_original):
|
def __init__(self, modification_list, check_original):
|
||||||
self.modification_list = modification_list
|
self.modification_list = modification_list
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ def test_modulepickling_simulate_deleted_cache(tmpdir):
|
|||||||
way.
|
way.
|
||||||
|
|
||||||
__ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html
|
__ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html
|
||||||
"""
|
""" # noqa
|
||||||
grammar = load_grammar()
|
grammar = load_grammar()
|
||||||
module = 'fake parser'
|
module = 'fake parser'
|
||||||
|
|
||||||
|
|||||||
@@ -1340,7 +1340,7 @@ def test_backslash_issue(differ):
|
|||||||
pre = (
|
pre = (
|
||||||
'')
|
'')
|
||||||
\\if
|
\\if
|
||||||
''')
|
''') # noqa
|
||||||
differ.initialize(code1)
|
differ.initialize(code1)
|
||||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||||
differ.parse(code1, parsers=1, copies=1)
|
differ.parse(code1, parsers=1, copies=1)
|
||||||
@@ -1420,7 +1420,7 @@ def test_with_formfeed(differ):
|
|||||||
\x0cimport
|
\x0cimport
|
||||||
return
|
return
|
||||||
return ''
|
return ''
|
||||||
''')
|
''') # noqa
|
||||||
differ.initialize(code1)
|
differ.initialize(code1)
|
||||||
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)
|
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)
|
||||||
|
|
||||||
|
|||||||
@@ -42,9 +42,9 @@ def collect_errors(code):
|
|||||||
column = int(add_indent or len(match.group(1)))
|
column = int(add_indent or len(match.group(1)))
|
||||||
|
|
||||||
code, _, add_line = code.partition('+')
|
code, _, add_line = code.partition('+')
|
||||||
l = line_nr + 1 + int(add_line or 0)
|
ln = line_nr + 1 + int(add_line or 0)
|
||||||
|
|
||||||
yield WantedIssue(code[1:], l, column)
|
yield WantedIssue(code[1:], ln, column)
|
||||||
|
|
||||||
|
|
||||||
def test_normalizer_issue(normalizer_issue_case):
|
def test_normalizer_issue(normalizer_issue_case):
|
||||||
|
|||||||
@@ -135,7 +135,7 @@ def test_wrong_indentation():
|
|||||||
b
|
b
|
||||||
a
|
a
|
||||||
""")
|
""")
|
||||||
#check_p(src, 1)
|
check_p(src, 1)
|
||||||
|
|
||||||
src = dedent("""\
|
src = dedent("""\
|
||||||
def complex():
|
def complex():
|
||||||
|
|||||||
@@ -193,10 +193,12 @@ def test_no_error_nodes(each_version):
|
|||||||
def test_named_expression(works_ge_py38):
|
def test_named_expression(works_ge_py38):
|
||||||
works_ge_py38.parse("(a := 1, a + 1)")
|
works_ge_py38.parse("(a := 1, a + 1)")
|
||||||
|
|
||||||
|
|
||||||
def test_extended_rhs_annassign(works_ge_py38):
|
def test_extended_rhs_annassign(works_ge_py38):
|
||||||
works_ge_py38.parse("x: y = z,")
|
works_ge_py38.parse("x: y = z,")
|
||||||
works_ge_py38.parse("x: Tuple[int, ...] = z, *q, w")
|
works_ge_py38.parse("x: Tuple[int, ...] = z, *q, w")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'param_code', [
|
'param_code', [
|
||||||
'a=1, /',
|
'a=1, /',
|
||||||
@@ -211,6 +213,7 @@ def test_extended_rhs_annassign(works_ge_py38):
|
|||||||
def test_positional_only_arguments(works_ge_py38, param_code):
|
def test_positional_only_arguments(works_ge_py38, param_code):
|
||||||
works_ge_py38.parse("def x(%s): pass" % param_code)
|
works_ge_py38.parse("def x(%s): pass" % param_code)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'expression', [
|
'expression', [
|
||||||
'a + a',
|
'a + a',
|
||||||
|
|||||||
@@ -163,13 +163,13 @@ def top_function_three():
|
|||||||
raise Exception
|
raise Exception
|
||||||
"""
|
"""
|
||||||
|
|
||||||
r = get_raise_stmts(code, 0) # Lists in a simple Function
|
r = get_raise_stmts(code, 0) # Lists in a simple Function
|
||||||
assert len(list(r)) == 1
|
assert len(list(r)) == 1
|
||||||
|
|
||||||
r = get_raise_stmts(code, 1) # Doesn't Exceptions list in closures
|
r = get_raise_stmts(code, 1) # Doesn't Exceptions list in closures
|
||||||
assert len(list(r)) == 1
|
assert len(list(r)) == 1
|
||||||
|
|
||||||
r = get_raise_stmts(code, 2) # Lists inside try-catch
|
r = get_raise_stmts(code, 2) # Lists inside try-catch
|
||||||
assert len(list(r)) == 2
|
assert len(list(r)) == 2
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ def test_eof_blankline():
|
|||||||
assert_issue('# foobar\n\n')
|
assert_issue('# foobar\n\n')
|
||||||
assert_issue('\n\n')
|
assert_issue('\n\n')
|
||||||
|
|
||||||
|
|
||||||
def test_shebang():
|
def test_shebang():
|
||||||
assert not issues('#!\n')
|
assert not issues('#!\n')
|
||||||
assert not issues('#!/foo\n')
|
assert not issues('#!/foo\n')
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ def test_simple_prefix_splitting(string, tokens):
|
|||||||
else:
|
else:
|
||||||
end_pos = start_pos[0], start_pos[1] + len(expected) + len(pt.spacing)
|
end_pos = start_pos[0], start_pos[1] + len(expected) + len(pt.spacing)
|
||||||
|
|
||||||
#assert start_pos == pt.start_pos
|
# assert start_pos == pt.start_pos
|
||||||
assert end_pos == pt.end_pos
|
assert end_pos == pt.end_pos
|
||||||
start_pos = end_pos
|
start_pos = end_pos
|
||||||
|
|
||||||
|
|||||||
@@ -273,10 +273,12 @@ def test_too_many_levels_of_indentation():
|
|||||||
assert not _get_error_list(build_nested('pass', 49, base=base))
|
assert not _get_error_list(build_nested('pass', 49, base=base))
|
||||||
assert _get_error_list(build_nested('pass', 50, base=base))
|
assert _get_error_list(build_nested('pass', 50, base=base))
|
||||||
|
|
||||||
|
|
||||||
def test_paren_kwarg():
|
def test_paren_kwarg():
|
||||||
assert _get_error_list("print((sep)=seperator)", version="3.8")
|
assert _get_error_list("print((sep)=seperator)", version="3.8")
|
||||||
assert not _get_error_list("print((sep)=seperator)", version="3.7")
|
assert not _get_error_list("print((sep)=seperator)", version="3.7")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'code', [
|
'code', [
|
||||||
"f'{*args,}'",
|
"f'{*args,}'",
|
||||||
@@ -330,6 +332,7 @@ def test_trailing_comma(code):
|
|||||||
errors = _get_error_list(code)
|
errors = _get_error_list(code)
|
||||||
assert not errors
|
assert not errors
|
||||||
|
|
||||||
|
|
||||||
def test_continue_in_finally():
|
def test_continue_in_finally():
|
||||||
code = dedent('''\
|
code = dedent('''\
|
||||||
for a in [1]:
|
for a in [1]:
|
||||||
@@ -341,7 +344,7 @@ def test_continue_in_finally():
|
|||||||
assert not _get_error_list(code, version="3.8")
|
assert not _get_error_list(code, version="3.8")
|
||||||
assert _get_error_list(code, version="3.7")
|
assert _get_error_list(code, version="3.7")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'template', [
|
'template', [
|
||||||
"a, b, {target}, c = d",
|
"a, b, {target}, c = d",
|
||||||
@@ -392,6 +395,7 @@ def test_repeated_kwarg():
|
|||||||
def test_unparenthesized_genexp(source, no_errors):
|
def test_unparenthesized_genexp(source, no_errors):
|
||||||
assert bool(_get_error_list(source)) ^ no_errors
|
assert bool(_get_error_list(source)) ^ no_errors
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
('source', 'no_errors'), [
|
('source', 'no_errors'), [
|
||||||
('*x = 2', False),
|
('*x = 2', False),
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ def test_tokenize_multiline_I():
|
|||||||
fundef = '''""""\n'''
|
fundef = '''""""\n'''
|
||||||
token_list = _get_token_list(fundef)
|
token_list = _get_token_list(fundef)
|
||||||
assert token_list == [PythonToken(ERRORTOKEN, '""""\n', (1, 0), ''),
|
assert token_list == [PythonToken(ERRORTOKEN, '""""\n', (1, 0), ''),
|
||||||
PythonToken(ENDMARKER , '', (2, 0), '')]
|
PythonToken(ENDMARKER, '', (2, 0), '')]
|
||||||
|
|
||||||
|
|
||||||
def test_tokenize_multiline_II():
|
def test_tokenize_multiline_II():
|
||||||
@@ -117,7 +117,7 @@ def test_tokenize_multiline_II():
|
|||||||
fundef = '''""""'''
|
fundef = '''""""'''
|
||||||
token_list = _get_token_list(fundef)
|
token_list = _get_token_list(fundef)
|
||||||
assert token_list == [PythonToken(ERRORTOKEN, '""""', (1, 0), ''),
|
assert token_list == [PythonToken(ERRORTOKEN, '""""', (1, 0), ''),
|
||||||
PythonToken(ENDMARKER, '', (1, 4), '')]
|
PythonToken(ENDMARKER, '', (1, 4), '')]
|
||||||
|
|
||||||
|
|
||||||
def test_tokenize_multiline_III():
|
def test_tokenize_multiline_III():
|
||||||
@@ -126,7 +126,7 @@ def test_tokenize_multiline_III():
|
|||||||
fundef = '''""""\n\n'''
|
fundef = '''""""\n\n'''
|
||||||
token_list = _get_token_list(fundef)
|
token_list = _get_token_list(fundef)
|
||||||
assert token_list == [PythonToken(ERRORTOKEN, '""""\n\n', (1, 0), ''),
|
assert token_list == [PythonToken(ERRORTOKEN, '""""\n\n', (1, 0), ''),
|
||||||
PythonToken(ENDMARKER, '', (3, 0), '')]
|
PythonToken(ENDMARKER, '', (3, 0), '')]
|
||||||
|
|
||||||
|
|
||||||
def test_identifier_contains_unicode():
|
def test_identifier_contains_unicode():
|
||||||
|
|||||||
@@ -83,6 +83,7 @@ def test_bytes_to_unicode_failing_encoding(code, errors):
|
|||||||
else:
|
else:
|
||||||
python_bytes_to_unicode(code, errors=errors)
|
python_bytes_to_unicode(code, errors=errors)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
('version_str', 'version'), [
|
('version_str', 'version'), [
|
||||||
('3', (3,)),
|
('3', (3,)),
|
||||||
|
|||||||
Reference in New Issue
Block a user