flake8 changes

This commit is contained in:
Dave Halter
2020-07-24 16:04:15 +02:00
parent 2962517be0
commit a0662b3b3b
19 changed files with 124 additions and 71 deletions

View File

@@ -5,7 +5,6 @@ import hashlib
import gc
import shutil
import platform
import errno
import logging
import warnings
@@ -92,7 +91,8 @@ On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
def _get_cache_clear_lock(cache_path = None):
def _get_cache_clear_lock(cache_path=None):
"""
The path where the cache lock is stored.
@@ -231,17 +231,17 @@ def clear_inactive_cache(
):
try:
os.remove(file.path)
except OSError: # silently ignore all failures
except OSError: # silently ignore all failures
continue
else:
return True
def _remove_cache_and_update_lock(cache_path = None):
def _remove_cache_and_update_lock(cache_path=None):
lock = _get_cache_clear_lock(cache_path=cache_path)
clear_lock_time = lock.get_last_modified()
if (
clear_lock_time is None # first time
clear_lock_time is None # first time
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
):
if not lock._touch():
@@ -250,7 +250,8 @@ def _remove_cache_and_update_lock(cache_path = None):
# not a big problem.
return False
clear_inactive_cache(cache_path = cache_path)
clear_inactive_cache(cache_path=cache_path)
def _get_hashed_path(hashed_grammar, path, cache_path=None):
directory = _get_cache_directory_path(cache_path=cache_path)

View File

@@ -23,8 +23,9 @@ class Grammar(object):
:py:func:`parso.load_grammar` returns instances of this class.
Creating custom none-python grammars by calling this is not supported, yet.
:param text: A BNF representation of your grammar.
"""
#:param text: A BNF representation of your grammar.
_error_normalizer_config = None
_token_namespace = None
_default_normalizer_config = pep8.PEP8NormalizerConfig()
@@ -133,9 +134,9 @@ class Grammar(object):
new_lines=lines
)
try_to_save_module(self._hashed, file_io, new_node, lines,
# Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy,
cache_path=cache_path)
# Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy,
cache_path=cache_path)
return new_node
tokens = self._tokenizer(lines, start_pos=start_pos)
@@ -149,9 +150,9 @@ class Grammar(object):
if cache or diff_cache:
try_to_save_module(self._hashed, file_io, root_node, lines,
# Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy,
cache_path=cache_path)
# Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy,
cache_path=cache_path)
return root_node
def _get_token_namespace(self):
@@ -252,7 +253,9 @@ def load_grammar(**kwargs):
grammar = PythonGrammar(version_info, bnf_text)
return _loaded_grammars.setdefault(path, grammar)
except FileNotFoundError:
message = "Python version %s.%s is currently not supported." % (version_info.major, version_info.minor)
message = "Python version %s.%s is currently not supported." % (
version_info.major, version_info.minor
)
raise NotImplementedError(message)
else:
raise NotImplementedError("No support for language %s." % language)

View File

@@ -149,7 +149,6 @@ def _simplify_dfas(dfas):
for j in range(i + 1, len(dfas)):
state_j = dfas[j]
if state_i == state_j:
#print " unify", i, j
del dfas[j]
for state in dfas:
state.unifystate(state_j, state_i)
@@ -245,14 +244,14 @@ def generate_grammar(bnf_grammar, token_namespace):
rule_to_dfas = {}
start_nonterminal = None
for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse():
#_dump_nfa(nfa_a, nfa_z)
# _dump_nfa(nfa_a, nfa_z)
dfas = _make_dfas(nfa_a, nfa_z)
#_dump_dfas(dfas)
# _dump_dfas(dfas)
# oldlen = len(dfas)
_simplify_dfas(dfas)
# newlen = len(dfas)
rule_to_dfas[nfa_a.from_rule] = dfas
#print(nfa_a.from_rule, oldlen, newlen)
# print(nfa_a.from_rule, oldlen, newlen)
if start_nonterminal is None:
start_nonterminal = nfa_a.from_rule

View File

@@ -19,6 +19,7 @@ ALLOWED_FUTURES = (
)
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
def _get_rhs_name(node, version):
type_ = node.type
if type_ == "lambdef":
@@ -39,7 +40,7 @@ def _get_rhs_name(node, version):
elif (
first == "("
and (second == ")"
or (len(node.children) == 3 and node.children[1].type == "testlist_comp"))
or (len(node.children) == 3 and node.children[1].type == "testlist_comp"))
):
return "tuple"
elif first == "(":
@@ -79,8 +80,7 @@ def _get_rhs_name(node, version):
elif trailer.children[0] == ".":
return "attribute"
elif (
("expr" in type_
and "star_expr" not in type_) # is a substring
("expr" in type_ and "star_expr" not in type_) # is a substring
or "_test" in type_
or type_ in ("term", "factor")
):
@@ -91,7 +91,8 @@ def _get_rhs_name(node, version):
return "tuple"
elif type_ == "fstring":
return "f-string expression"
return type_ # shouldn't reach here
return type_ # shouldn't reach here
def _iter_stmts(scope):
"""
@@ -420,7 +421,9 @@ class ErrorFinder(Normalizer):
message = 'invalid syntax'
if (
self.version >= (3, 9)
and leaf.value in _get_token_collection(self.version).always_break_tokens
and leaf.value in _get_token_collection(
self.version
).always_break_tokens
):
message = "f-string: " + message
else:
@@ -1145,6 +1148,7 @@ class _CompForRule(_CheckAssignmentRule):
class _ExprStmtRule(_CheckAssignmentRule):
message = "illegal expression for augmented assignment"
extended_message = "'{target}' is an " + message
def is_issue(self, node):
augassign = node.children[1]
is_aug_assign = augassign != '=' and augassign.type != 'annassign'
@@ -1174,6 +1178,7 @@ class _ExprStmtRule(_CheckAssignmentRule):
),
)
@ErrorFinder.register_rule(type='with_item')
class _WithItemRule(_CheckAssignmentRule):
def is_issue(self, with_item):

View File

@@ -71,7 +71,6 @@ class BracketNode(IndentationNode):
n = n.parent
parent_indentation = n.indentation
next_leaf = leaf.get_next_leaf()
if '\n' in next_leaf.prefix:
# This implies code like:
@@ -93,7 +92,7 @@ class BracketNode(IndentationNode):
if '\t' in config.indentation:
self.indentation = None
else:
self.indentation = ' ' * expected_end_indent
self.indentation = ' ' * expected_end_indent
self.bracket_indentation = self.indentation
self.type = IndentationTypes.VERTICAL_BRACKET
@@ -137,7 +136,7 @@ class BackslashNode(IndentationNode):
self.indentation = parent_indentation + config.indentation
else:
# +1 because there is a space.
self.indentation = ' ' * (equals.end_pos[1] + 1)
self.indentation = ' ' * (equals.end_pos[1] + 1)
else:
self.indentation = parent_indentation + config.indentation
self.bracket_indentation = self.indentation
@@ -190,7 +189,8 @@ class PEP8Normalizer(ErrorFinder):
expr_stmt = node.parent
# Check if it's simply defining a single name, not something like
# foo.bar or x[1], where using a lambda could make more sense.
if expr_stmt.type == 'expr_stmt' and any(n.type == 'name' for n in expr_stmt.children[:-2:2]):
if expr_stmt.type == 'expr_stmt' and any(n.type == 'name'
for n in expr_stmt.children[:-2:2]):
self.add_issue(node, 731, 'Do not assign a lambda expression, use a def')
elif typ == 'try_stmt':
for child in node.children:
@@ -221,7 +221,6 @@ class PEP8Normalizer(ErrorFinder):
if typ in _IMPORT_TYPES:
simple_stmt = node.parent
module = simple_stmt.parent
#if module.type == 'simple_stmt':
if module.type == 'file_input':
index = module.children.index(simple_stmt)
for child in module.children[:index]:
@@ -406,7 +405,6 @@ class PEP8Normalizer(ErrorFinder):
and leaf.parent.parent.type == 'decorated':
self.add_issue(part, 304, "Blank lines found after function decorator")
self._newline_count += 1
if type_ == 'backslash':
@@ -461,33 +459,62 @@ class PEP8Normalizer(ErrorFinder):
else:
should_be_indentation = node.indentation
if self._in_suite_introducer and indentation == \
node.get_latest_suite_node().indentation \
+ self._config.indentation:
self.add_issue(part, 129, "Line with same indent as next logical block")
node.get_latest_suite_node().indentation \
+ self._config.indentation:
self.add_issue(part, 129, "Line with same indent as next logical block")
elif indentation != should_be_indentation:
if not self._check_tabs_spaces(spacing) and part.value != '\n':
if value in '])}':
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(part, 124, "Closing bracket does not match visual indentation")
self.add_issue(
part,
124,
"Closing bracket does not match visual indentation"
)
else:
self.add_issue(part, 123, "Losing bracket does not match indentation of opening bracket's line")
self.add_issue(
part,
123,
"Losing bracket does not match "
"indentation of opening bracket's line"
)
else:
if len(indentation) < len(should_be_indentation):
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(part, 128, 'Continuation line under-indented for visual indent')
self.add_issue(
part,
128,
'Continuation line under-indented for visual indent'
)
elif node.type == IndentationTypes.BACKSLASH:
self.add_issue(part, 122, 'Continuation line missing indentation or outdented')
self.add_issue(
part,
122,
'Continuation line missing indentation or outdented'
)
elif node.type == IndentationTypes.IMPLICIT:
self.add_issue(part, 135, 'xxx')
else:
self.add_issue(part, 121, 'Continuation line under-indented for hanging indent')
self.add_issue(
part,
121,
'Continuation line under-indented for hanging indent'
)
else:
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(part, 127, 'Continuation line over-indented for visual indent')
self.add_issue(
part,
127,
'Continuation line over-indented for visual indent'
)
elif node.type == IndentationTypes.IMPLICIT:
self.add_issue(part, 136, 'xxx')
else:
self.add_issue(part, 126, 'Continuation line over-indented for hanging indent')
self.add_issue(
part,
126,
'Continuation line over-indented for hanging indent'
)
else:
self._check_spacing(part, spacing)
@@ -524,7 +551,7 @@ class PEP8Normalizer(ErrorFinder):
else:
last_column = part.end_pos[1]
if last_column > self._config.max_characters \
and spacing.start_pos[1] <= self._config.max_characters :
and spacing.start_pos[1] <= self._config.max_characters:
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
report = True
@@ -538,7 +565,7 @@ class PEP8Normalizer(ErrorFinder):
part,
501,
'Line too long (%s > %s characters)' %
(last_column, self._config.max_characters),
(last_column, self._config.max_characters),
)
def _check_spacing(self, part, spacing):
@@ -573,11 +600,11 @@ class PEP8Normalizer(ErrorFinder):
message = "Whitespace before '%s'" % part.value
add_if_spaces(spacing, 202, message)
elif part in (',', ';') or part == ':' \
and part.parent.type not in _POSSIBLE_SLICE_PARENTS:
and part.parent.type not in _POSSIBLE_SLICE_PARENTS:
message = "Whitespace before '%s'" % part.value
add_if_spaces(spacing, 203, message)
elif prev == ':' and prev.parent.type in _POSSIBLE_SLICE_PARENTS:
pass # TODO
pass # TODO
elif prev in (',', ';', ':'):
add_not_spaces(spacing, 231, "missing whitespace after '%s'")
elif part == ':': # Is a subscript
@@ -602,9 +629,17 @@ class PEP8Normalizer(ErrorFinder):
if param.type == 'param' and param.annotation:
add_not_spaces(spacing, 252, 'Expected spaces around annotation equals')
else:
add_if_spaces(spacing, 251, 'Unexpected spaces around keyword / parameter equals')
add_if_spaces(
spacing,
251,
'Unexpected spaces around keyword / parameter equals'
)
elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR:
add_not_spaces(spacing, 227, 'Missing whitespace around bitwise or shift operator')
add_not_spaces(
spacing,
227,
'Missing whitespace around bitwise or shift operator'
)
elif part == '%' or prev == '%':
add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator')
else:
@@ -621,8 +656,7 @@ class PEP8Normalizer(ErrorFinder):
if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE:
message_225 = 'Missing whitespace between tokens'
#print('xy', spacing)
#self.add_issue(spacing, 225, message_225)
# self.add_issue(spacing, 225, message_225)
# TODO why only brackets?
if part in _OPENING_BRACKETS:
message = "Whitespace before '%s'" % part.value
@@ -664,7 +698,8 @@ class PEP8Normalizer(ErrorFinder):
self.add_issue(leaf, 711, message)
break
elif node.value in ('True', 'False'):
message = "comparison to False/True should be 'if cond is True:' or 'if cond:'"
message = "comparison to False/True should be " \
"'if cond is True:' or 'if cond:'"
self.add_issue(leaf, 712, message)
break
elif leaf.value in ('in', 'is'):
@@ -679,6 +714,7 @@ class PEP8Normalizer(ErrorFinder):
for i, line in enumerate(leaf.value.splitlines()[1:]):
indentation = re.match(r'[ \t]*', line).group(0)
start_pos = leaf.line + i, len(indentation)
print(start_pos)
# TODO check multiline indentation.
elif typ == 'endmarker':
if self._newline_count >= 2:
@@ -718,7 +754,7 @@ class PEP8NormalizerConfig(ErrorFinderConfig):
# TODO this is not yet ready.
#@PEP8Normalizer.register_rule(type='endmarker')
# @PEP8Normalizer.register_rule(type='endmarker')
class BlankLineAtEnd(Rule):
code = 392
message = 'Blank line at end of file'

View File

@@ -71,7 +71,7 @@ def split_prefix(leaf, start_pos):
value = spacing = ''
bom = False
while start != len(leaf.prefix):
match =_regex.match(leaf.prefix, start)
match = _regex.match(leaf.prefix, start)
spacing = match.group(1)
value = match.group(2)
if not value:

View File

@@ -395,12 +395,12 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0), indents=None, is_first
is_first_token = False
if contstr: # continued string
endmatch = endprog.match(line)
endmatch = endprog.match(line) # noqa: F821
if endmatch:
pos = endmatch.end(0)
yield PythonToken(
STRING, contstr + line[:pos],
contstr_start, prefix)
contstr_start, prefix) # noqa: F821
contstr = ''
contline = None
else:

View File

@@ -135,11 +135,11 @@ class FileModification:
# We cannot delete every line, that doesn't make sense to
# fuzz and it would be annoying to rewrite everything here.
continue
l = LineDeletion(random_line())
ld = LineDeletion(random_line())
elif rand == 2:
# Copy / Insertion
# Make it possible to insert into the first and the last line
l = LineCopy(random_line(), random_line(include_end=True))
ld = LineCopy(random_line(), random_line(include_end=True))
elif rand in (3, 4):
# Modify a line in some weird random ways.
line_nr = random_line()
@@ -166,9 +166,9 @@ class FileModification:
# we really replace the line with something that has
# indentation.
line = ' ' * random.randint(0, 12) + random_string + '\n'
l = LineReplacement(line_nr, line)
l.apply(lines)
yield l
ld = LineReplacement(line_nr, line)
ld.apply(lines)
yield ld
def __init__(self, modification_list, check_original):
self.modification_list = modification_list

View File

@@ -81,7 +81,7 @@ def test_modulepickling_simulate_deleted_cache(tmpdir):
way.
__ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html
"""
""" # noqa
grammar = load_grammar()
module = 'fake parser'

View File

@@ -1340,7 +1340,7 @@ def test_backslash_issue(differ):
pre = (
'')
\\if
''')
''') # noqa
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
@@ -1420,7 +1420,7 @@ def test_with_formfeed(differ):
\x0cimport
return
return ''
''')
''') # noqa
differ.initialize(code1)
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)

View File

@@ -42,9 +42,9 @@ def collect_errors(code):
column = int(add_indent or len(match.group(1)))
code, _, add_line = code.partition('+')
l = line_nr + 1 + int(add_line or 0)
ln = line_nr + 1 + int(add_line or 0)
yield WantedIssue(code[1:], l, column)
yield WantedIssue(code[1:], ln, column)
def test_normalizer_issue(normalizer_issue_case):

View File

@@ -135,7 +135,7 @@ def test_wrong_indentation():
b
a
""")
#check_p(src, 1)
check_p(src, 1)
src = dedent("""\
def complex():

View File

@@ -193,10 +193,12 @@ def test_no_error_nodes(each_version):
def test_named_expression(works_ge_py38):
works_ge_py38.parse("(a := 1, a + 1)")
def test_extended_rhs_annassign(works_ge_py38):
works_ge_py38.parse("x: y = z,")
works_ge_py38.parse("x: Tuple[int, ...] = z, *q, w")
@pytest.mark.parametrize(
'param_code', [
'a=1, /',
@@ -211,6 +213,7 @@ def test_extended_rhs_annassign(works_ge_py38):
def test_positional_only_arguments(works_ge_py38, param_code):
works_ge_py38.parse("def x(%s): pass" % param_code)
@pytest.mark.parametrize(
'expression', [
'a + a',

View File

@@ -163,13 +163,13 @@ def top_function_three():
raise Exception
"""
r = get_raise_stmts(code, 0) # Lists in a simple Function
r = get_raise_stmts(code, 0) # Lists in a simple Function
assert len(list(r)) == 1
r = get_raise_stmts(code, 1) # Doesn't Exceptions list in closures
r = get_raise_stmts(code, 1) # Doesn't Exceptions list in closures
assert len(list(r)) == 1
r = get_raise_stmts(code, 2) # Lists inside try-catch
r = get_raise_stmts(code, 2) # Lists inside try-catch
assert len(list(r)) == 2

View File

@@ -33,6 +33,7 @@ def test_eof_blankline():
assert_issue('# foobar\n\n')
assert_issue('\n\n')
def test_shebang():
assert not issues('#!\n')
assert not issues('#!/foo\n')

View File

@@ -44,7 +44,7 @@ def test_simple_prefix_splitting(string, tokens):
else:
end_pos = start_pos[0], start_pos[1] + len(expected) + len(pt.spacing)
#assert start_pos == pt.start_pos
# assert start_pos == pt.start_pos
assert end_pos == pt.end_pos
start_pos = end_pos

View File

@@ -273,10 +273,12 @@ def test_too_many_levels_of_indentation():
assert not _get_error_list(build_nested('pass', 49, base=base))
assert _get_error_list(build_nested('pass', 50, base=base))
def test_paren_kwarg():
assert _get_error_list("print((sep)=seperator)", version="3.8")
assert not _get_error_list("print((sep)=seperator)", version="3.7")
@pytest.mark.parametrize(
'code', [
"f'{*args,}'",
@@ -330,6 +332,7 @@ def test_trailing_comma(code):
errors = _get_error_list(code)
assert not errors
def test_continue_in_finally():
code = dedent('''\
for a in [1]:
@@ -341,7 +344,7 @@ def test_continue_in_finally():
assert not _get_error_list(code, version="3.8")
assert _get_error_list(code, version="3.7")
@pytest.mark.parametrize(
'template', [
"a, b, {target}, c = d",
@@ -392,6 +395,7 @@ def test_repeated_kwarg():
def test_unparenthesized_genexp(source, no_errors):
assert bool(_get_error_list(source)) ^ no_errors
@pytest.mark.parametrize(
('source', 'no_errors'), [
('*x = 2', False),

View File

@@ -108,7 +108,7 @@ def test_tokenize_multiline_I():
fundef = '''""""\n'''
token_list = _get_token_list(fundef)
assert token_list == [PythonToken(ERRORTOKEN, '""""\n', (1, 0), ''),
PythonToken(ENDMARKER , '', (2, 0), '')]
PythonToken(ENDMARKER, '', (2, 0), '')]
def test_tokenize_multiline_II():
@@ -117,7 +117,7 @@ def test_tokenize_multiline_II():
fundef = '''""""'''
token_list = _get_token_list(fundef)
assert token_list == [PythonToken(ERRORTOKEN, '""""', (1, 0), ''),
PythonToken(ENDMARKER, '', (1, 4), '')]
PythonToken(ENDMARKER, '', (1, 4), '')]
def test_tokenize_multiline_III():
@@ -126,7 +126,7 @@ def test_tokenize_multiline_III():
fundef = '''""""\n\n'''
token_list = _get_token_list(fundef)
assert token_list == [PythonToken(ERRORTOKEN, '""""\n\n', (1, 0), ''),
PythonToken(ENDMARKER, '', (3, 0), '')]
PythonToken(ENDMARKER, '', (3, 0), '')]
def test_identifier_contains_unicode():

View File

@@ -83,6 +83,7 @@ def test_bytes_to_unicode_failing_encoding(code, errors):
else:
python_bytes_to_unicode(code, errors=errors)
@pytest.mark.parametrize(
('version_str', 'version'), [
('3', (3,)),