flake8 changes

This commit is contained in:
Dave Halter
2020-07-24 16:04:15 +02:00
parent 2962517be0
commit a0662b3b3b
19 changed files with 124 additions and 71 deletions

View File

@@ -5,7 +5,6 @@ import hashlib
import gc import gc
import shutil import shutil
import platform import platform
import errno
import logging import logging
import warnings import warnings
@@ -92,6 +91,7 @@ On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24 _CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
def _get_cache_clear_lock(cache_path=None): def _get_cache_clear_lock(cache_path=None):
""" """
The path where the cache lock is stored. The path where the cache lock is stored.
@@ -252,6 +252,7 @@ def _remove_cache_and_update_lock(cache_path = None):
clear_inactive_cache(cache_path=cache_path) clear_inactive_cache(cache_path=cache_path)
def _get_hashed_path(hashed_grammar, path, cache_path=None): def _get_hashed_path(hashed_grammar, path, cache_path=None):
directory = _get_cache_directory_path(cache_path=cache_path) directory = _get_cache_directory_path(cache_path=cache_path)

View File

@@ -23,8 +23,9 @@ class Grammar(object):
:py:func:`parso.load_grammar` returns instances of this class. :py:func:`parso.load_grammar` returns instances of this class.
Creating custom none-python grammars by calling this is not supported, yet. Creating custom none-python grammars by calling this is not supported, yet.
:param text: A BNF representation of your grammar.
""" """
#:param text: A BNF representation of your grammar.
_error_normalizer_config = None _error_normalizer_config = None
_token_namespace = None _token_namespace = None
_default_normalizer_config = pep8.PEP8NormalizerConfig() _default_normalizer_config = pep8.PEP8NormalizerConfig()
@@ -252,7 +253,9 @@ def load_grammar(**kwargs):
grammar = PythonGrammar(version_info, bnf_text) grammar = PythonGrammar(version_info, bnf_text)
return _loaded_grammars.setdefault(path, grammar) return _loaded_grammars.setdefault(path, grammar)
except FileNotFoundError: except FileNotFoundError:
message = "Python version %s.%s is currently not supported." % (version_info.major, version_info.minor) message = "Python version %s.%s is currently not supported." % (
version_info.major, version_info.minor
)
raise NotImplementedError(message) raise NotImplementedError(message)
else: else:
raise NotImplementedError("No support for language %s." % language) raise NotImplementedError("No support for language %s." % language)

View File

@@ -149,7 +149,6 @@ def _simplify_dfas(dfas):
for j in range(i + 1, len(dfas)): for j in range(i + 1, len(dfas)):
state_j = dfas[j] state_j = dfas[j]
if state_i == state_j: if state_i == state_j:
#print " unify", i, j
del dfas[j] del dfas[j]
for state in dfas: for state in dfas:
state.unifystate(state_j, state_i) state.unifystate(state_j, state_i)

View File

@@ -19,6 +19,7 @@ ALLOWED_FUTURES = (
) )
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for') _COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
def _get_rhs_name(node, version): def _get_rhs_name(node, version):
type_ = node.type type_ = node.type
if type_ == "lambdef": if type_ == "lambdef":
@@ -79,8 +80,7 @@ def _get_rhs_name(node, version):
elif trailer.children[0] == ".": elif trailer.children[0] == ".":
return "attribute" return "attribute"
elif ( elif (
("expr" in type_ ("expr" in type_ and "star_expr" not in type_) # is a substring
and "star_expr" not in type_) # is a substring
or "_test" in type_ or "_test" in type_
or type_ in ("term", "factor") or type_ in ("term", "factor")
): ):
@@ -93,6 +93,7 @@ def _get_rhs_name(node, version):
return "f-string expression" return "f-string expression"
return type_ # shouldn't reach here return type_ # shouldn't reach here
def _iter_stmts(scope): def _iter_stmts(scope):
""" """
Iterates over all statements and splits up simple_stmt. Iterates over all statements and splits up simple_stmt.
@@ -420,7 +421,9 @@ class ErrorFinder(Normalizer):
message = 'invalid syntax' message = 'invalid syntax'
if ( if (
self.version >= (3, 9) self.version >= (3, 9)
and leaf.value in _get_token_collection(self.version).always_break_tokens and leaf.value in _get_token_collection(
self.version
).always_break_tokens
): ):
message = "f-string: " + message message = "f-string: " + message
else: else:
@@ -1145,6 +1148,7 @@ class _CompForRule(_CheckAssignmentRule):
class _ExprStmtRule(_CheckAssignmentRule): class _ExprStmtRule(_CheckAssignmentRule):
message = "illegal expression for augmented assignment" message = "illegal expression for augmented assignment"
extended_message = "'{target}' is an " + message extended_message = "'{target}' is an " + message
def is_issue(self, node): def is_issue(self, node):
augassign = node.children[1] augassign = node.children[1]
is_aug_assign = augassign != '=' and augassign.type != 'annassign' is_aug_assign = augassign != '=' and augassign.type != 'annassign'
@@ -1174,6 +1178,7 @@ class _ExprStmtRule(_CheckAssignmentRule):
), ),
) )
@ErrorFinder.register_rule(type='with_item') @ErrorFinder.register_rule(type='with_item')
class _WithItemRule(_CheckAssignmentRule): class _WithItemRule(_CheckAssignmentRule):
def is_issue(self, with_item): def is_issue(self, with_item):

View File

@@ -71,7 +71,6 @@ class BracketNode(IndentationNode):
n = n.parent n = n.parent
parent_indentation = n.indentation parent_indentation = n.indentation
next_leaf = leaf.get_next_leaf() next_leaf = leaf.get_next_leaf()
if '\n' in next_leaf.prefix: if '\n' in next_leaf.prefix:
# This implies code like: # This implies code like:
@@ -190,7 +189,8 @@ class PEP8Normalizer(ErrorFinder):
expr_stmt = node.parent expr_stmt = node.parent
# Check if it's simply defining a single name, not something like # Check if it's simply defining a single name, not something like
# foo.bar or x[1], where using a lambda could make more sense. # foo.bar or x[1], where using a lambda could make more sense.
if expr_stmt.type == 'expr_stmt' and any(n.type == 'name' for n in expr_stmt.children[:-2:2]): if expr_stmt.type == 'expr_stmt' and any(n.type == 'name'
for n in expr_stmt.children[:-2:2]):
self.add_issue(node, 731, 'Do not assign a lambda expression, use a def') self.add_issue(node, 731, 'Do not assign a lambda expression, use a def')
elif typ == 'try_stmt': elif typ == 'try_stmt':
for child in node.children: for child in node.children:
@@ -221,7 +221,6 @@ class PEP8Normalizer(ErrorFinder):
if typ in _IMPORT_TYPES: if typ in _IMPORT_TYPES:
simple_stmt = node.parent simple_stmt = node.parent
module = simple_stmt.parent module = simple_stmt.parent
#if module.type == 'simple_stmt':
if module.type == 'file_input': if module.type == 'file_input':
index = module.children.index(simple_stmt) index = module.children.index(simple_stmt)
for child in module.children[:index]: for child in module.children[:index]:
@@ -406,7 +405,6 @@ class PEP8Normalizer(ErrorFinder):
and leaf.parent.parent.type == 'decorated': and leaf.parent.parent.type == 'decorated':
self.add_issue(part, 304, "Blank lines found after function decorator") self.add_issue(part, 304, "Blank lines found after function decorator")
self._newline_count += 1 self._newline_count += 1
if type_ == 'backslash': if type_ == 'backslash':
@@ -468,26 +466,55 @@ class PEP8Normalizer(ErrorFinder):
if not self._check_tabs_spaces(spacing) and part.value != '\n': if not self._check_tabs_spaces(spacing) and part.value != '\n':
if value in '])}': if value in '])}':
if node.type == IndentationTypes.VERTICAL_BRACKET: if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(part, 124, "Closing bracket does not match visual indentation") self.add_issue(
part,
124,
"Closing bracket does not match visual indentation"
)
else: else:
self.add_issue(part, 123, "Losing bracket does not match indentation of opening bracket's line") self.add_issue(
part,
123,
"Losing bracket does not match "
"indentation of opening bracket's line"
)
else: else:
if len(indentation) < len(should_be_indentation): if len(indentation) < len(should_be_indentation):
if node.type == IndentationTypes.VERTICAL_BRACKET: if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(part, 128, 'Continuation line under-indented for visual indent') self.add_issue(
part,
128,
'Continuation line under-indented for visual indent'
)
elif node.type == IndentationTypes.BACKSLASH: elif node.type == IndentationTypes.BACKSLASH:
self.add_issue(part, 122, 'Continuation line missing indentation or outdented') self.add_issue(
part,
122,
'Continuation line missing indentation or outdented'
)
elif node.type == IndentationTypes.IMPLICIT: elif node.type == IndentationTypes.IMPLICIT:
self.add_issue(part, 135, 'xxx') self.add_issue(part, 135, 'xxx')
else: else:
self.add_issue(part, 121, 'Continuation line under-indented for hanging indent') self.add_issue(
part,
121,
'Continuation line under-indented for hanging indent'
)
else: else:
if node.type == IndentationTypes.VERTICAL_BRACKET: if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(part, 127, 'Continuation line over-indented for visual indent') self.add_issue(
part,
127,
'Continuation line over-indented for visual indent'
)
elif node.type == IndentationTypes.IMPLICIT: elif node.type == IndentationTypes.IMPLICIT:
self.add_issue(part, 136, 'xxx') self.add_issue(part, 136, 'xxx')
else: else:
self.add_issue(part, 126, 'Continuation line over-indented for hanging indent') self.add_issue(
part,
126,
'Continuation line over-indented for hanging indent'
)
else: else:
self._check_spacing(part, spacing) self._check_spacing(part, spacing)
@@ -602,9 +629,17 @@ class PEP8Normalizer(ErrorFinder):
if param.type == 'param' and param.annotation: if param.type == 'param' and param.annotation:
add_not_spaces(spacing, 252, 'Expected spaces around annotation equals') add_not_spaces(spacing, 252, 'Expected spaces around annotation equals')
else: else:
add_if_spaces(spacing, 251, 'Unexpected spaces around keyword / parameter equals') add_if_spaces(
spacing,
251,
'Unexpected spaces around keyword / parameter equals'
)
elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR: elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR:
add_not_spaces(spacing, 227, 'Missing whitespace around bitwise or shift operator') add_not_spaces(
spacing,
227,
'Missing whitespace around bitwise or shift operator'
)
elif part == '%' or prev == '%': elif part == '%' or prev == '%':
add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator') add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator')
else: else:
@@ -621,7 +656,6 @@ class PEP8Normalizer(ErrorFinder):
if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE: if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE:
message_225 = 'Missing whitespace between tokens' message_225 = 'Missing whitespace between tokens'
#print('xy', spacing)
# self.add_issue(spacing, 225, message_225) # self.add_issue(spacing, 225, message_225)
# TODO why only brackets? # TODO why only brackets?
if part in _OPENING_BRACKETS: if part in _OPENING_BRACKETS:
@@ -664,7 +698,8 @@ class PEP8Normalizer(ErrorFinder):
self.add_issue(leaf, 711, message) self.add_issue(leaf, 711, message)
break break
elif node.value in ('True', 'False'): elif node.value in ('True', 'False'):
message = "comparison to False/True should be 'if cond is True:' or 'if cond:'" message = "comparison to False/True should be " \
"'if cond is True:' or 'if cond:'"
self.add_issue(leaf, 712, message) self.add_issue(leaf, 712, message)
break break
elif leaf.value in ('in', 'is'): elif leaf.value in ('in', 'is'):
@@ -679,6 +714,7 @@ class PEP8Normalizer(ErrorFinder):
for i, line in enumerate(leaf.value.splitlines()[1:]): for i, line in enumerate(leaf.value.splitlines()[1:]):
indentation = re.match(r'[ \t]*', line).group(0) indentation = re.match(r'[ \t]*', line).group(0)
start_pos = leaf.line + i, len(indentation) start_pos = leaf.line + i, len(indentation)
print(start_pos)
# TODO check multiline indentation. # TODO check multiline indentation.
elif typ == 'endmarker': elif typ == 'endmarker':
if self._newline_count >= 2: if self._newline_count >= 2:

View File

@@ -395,12 +395,12 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0), indents=None, is_first
is_first_token = False is_first_token = False
if contstr: # continued string if contstr: # continued string
endmatch = endprog.match(line) endmatch = endprog.match(line) # noqa: F821
if endmatch: if endmatch:
pos = endmatch.end(0) pos = endmatch.end(0)
yield PythonToken( yield PythonToken(
STRING, contstr + line[:pos], STRING, contstr + line[:pos],
contstr_start, prefix) contstr_start, prefix) # noqa: F821
contstr = '' contstr = ''
contline = None contline = None
else: else:

View File

@@ -135,11 +135,11 @@ class FileModification:
# We cannot delete every line, that doesn't make sense to # We cannot delete every line, that doesn't make sense to
# fuzz and it would be annoying to rewrite everything here. # fuzz and it would be annoying to rewrite everything here.
continue continue
l = LineDeletion(random_line()) ld = LineDeletion(random_line())
elif rand == 2: elif rand == 2:
# Copy / Insertion # Copy / Insertion
# Make it possible to insert into the first and the last line # Make it possible to insert into the first and the last line
l = LineCopy(random_line(), random_line(include_end=True)) ld = LineCopy(random_line(), random_line(include_end=True))
elif rand in (3, 4): elif rand in (3, 4):
# Modify a line in some weird random ways. # Modify a line in some weird random ways.
line_nr = random_line() line_nr = random_line()
@@ -166,9 +166,9 @@ class FileModification:
# we really replace the line with something that has # we really replace the line with something that has
# indentation. # indentation.
line = ' ' * random.randint(0, 12) + random_string + '\n' line = ' ' * random.randint(0, 12) + random_string + '\n'
l = LineReplacement(line_nr, line) ld = LineReplacement(line_nr, line)
l.apply(lines) ld.apply(lines)
yield l yield ld
def __init__(self, modification_list, check_original): def __init__(self, modification_list, check_original):
self.modification_list = modification_list self.modification_list = modification_list

View File

@@ -81,7 +81,7 @@ def test_modulepickling_simulate_deleted_cache(tmpdir):
way. way.
__ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html __ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html
""" """ # noqa
grammar = load_grammar() grammar = load_grammar()
module = 'fake parser' module = 'fake parser'

View File

@@ -1340,7 +1340,7 @@ def test_backslash_issue(differ):
pre = ( pre = (
'') '')
\\if \\if
''') ''') # noqa
differ.initialize(code1) differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1) differ.parse(code1, parsers=1, copies=1)
@@ -1420,7 +1420,7 @@ def test_with_formfeed(differ):
\x0cimport \x0cimport
return return
return '' return ''
''') ''') # noqa
differ.initialize(code1) differ.initialize(code1)
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True) differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)

View File

@@ -42,9 +42,9 @@ def collect_errors(code):
column = int(add_indent or len(match.group(1))) column = int(add_indent or len(match.group(1)))
code, _, add_line = code.partition('+') code, _, add_line = code.partition('+')
l = line_nr + 1 + int(add_line or 0) ln = line_nr + 1 + int(add_line or 0)
yield WantedIssue(code[1:], l, column) yield WantedIssue(code[1:], ln, column)
def test_normalizer_issue(normalizer_issue_case): def test_normalizer_issue(normalizer_issue_case):

View File

@@ -135,7 +135,7 @@ def test_wrong_indentation():
b b
a a
""") """)
#check_p(src, 1) check_p(src, 1)
src = dedent("""\ src = dedent("""\
def complex(): def complex():

View File

@@ -193,10 +193,12 @@ def test_no_error_nodes(each_version):
def test_named_expression(works_ge_py38): def test_named_expression(works_ge_py38):
works_ge_py38.parse("(a := 1, a + 1)") works_ge_py38.parse("(a := 1, a + 1)")
def test_extended_rhs_annassign(works_ge_py38): def test_extended_rhs_annassign(works_ge_py38):
works_ge_py38.parse("x: y = z,") works_ge_py38.parse("x: y = z,")
works_ge_py38.parse("x: Tuple[int, ...] = z, *q, w") works_ge_py38.parse("x: Tuple[int, ...] = z, *q, w")
@pytest.mark.parametrize( @pytest.mark.parametrize(
'param_code', [ 'param_code', [
'a=1, /', 'a=1, /',
@@ -211,6 +213,7 @@ def test_extended_rhs_annassign(works_ge_py38):
def test_positional_only_arguments(works_ge_py38, param_code): def test_positional_only_arguments(works_ge_py38, param_code):
works_ge_py38.parse("def x(%s): pass" % param_code) works_ge_py38.parse("def x(%s): pass" % param_code)
@pytest.mark.parametrize( @pytest.mark.parametrize(
'expression', [ 'expression', [
'a + a', 'a + a',

View File

@@ -33,6 +33,7 @@ def test_eof_blankline():
assert_issue('# foobar\n\n') assert_issue('# foobar\n\n')
assert_issue('\n\n') assert_issue('\n\n')
def test_shebang(): def test_shebang():
assert not issues('#!\n') assert not issues('#!\n')
assert not issues('#!/foo\n') assert not issues('#!/foo\n')

View File

@@ -273,10 +273,12 @@ def test_too_many_levels_of_indentation():
assert not _get_error_list(build_nested('pass', 49, base=base)) assert not _get_error_list(build_nested('pass', 49, base=base))
assert _get_error_list(build_nested('pass', 50, base=base)) assert _get_error_list(build_nested('pass', 50, base=base))
def test_paren_kwarg(): def test_paren_kwarg():
assert _get_error_list("print((sep)=seperator)", version="3.8") assert _get_error_list("print((sep)=seperator)", version="3.8")
assert not _get_error_list("print((sep)=seperator)", version="3.7") assert not _get_error_list("print((sep)=seperator)", version="3.7")
@pytest.mark.parametrize( @pytest.mark.parametrize(
'code', [ 'code', [
"f'{*args,}'", "f'{*args,}'",
@@ -330,6 +332,7 @@ def test_trailing_comma(code):
errors = _get_error_list(code) errors = _get_error_list(code)
assert not errors assert not errors
def test_continue_in_finally(): def test_continue_in_finally():
code = dedent('''\ code = dedent('''\
for a in [1]: for a in [1]:
@@ -392,6 +395,7 @@ def test_repeated_kwarg():
def test_unparenthesized_genexp(source, no_errors): def test_unparenthesized_genexp(source, no_errors):
assert bool(_get_error_list(source)) ^ no_errors assert bool(_get_error_list(source)) ^ no_errors
@pytest.mark.parametrize( @pytest.mark.parametrize(
('source', 'no_errors'), [ ('source', 'no_errors'), [
('*x = 2', False), ('*x = 2', False),

View File

@@ -83,6 +83,7 @@ def test_bytes_to_unicode_failing_encoding(code, errors):
else: else:
python_bytes_to_unicode(code, errors=errors) python_bytes_to_unicode(code, errors=errors)
@pytest.mark.parametrize( @pytest.mark.parametrize(
('version_str', 'version'), [ ('version_str', 'version'), [
('3', (3,)), ('3', (3,)),