Fix line endings support at various locations

This commit is contained in:
gousaiyang
2021-05-29 17:56:50 -07:00
parent 86f3f1096b
commit 7000dd24d7
7 changed files with 22 additions and 13 deletions

View File

@@ -74,7 +74,7 @@ class BracketNode(IndentationNode):
parent_indentation = n.indentation parent_indentation = n.indentation
next_leaf = leaf.get_next_leaf() next_leaf = leaf.get_next_leaf()
if '\n' in next_leaf.prefix: if '\n' in next_leaf.prefix or '\r' in next_leaf.prefix:
# This implies code like: # This implies code like:
# foobarbaz( # foobarbaz(
# a, # a,
@@ -116,7 +116,7 @@ class ImplicitNode(BracketNode):
self.type = IndentationTypes.IMPLICIT self.type = IndentationTypes.IMPLICIT
next_leaf = leaf.get_next_leaf() next_leaf = leaf.get_next_leaf()
if leaf == ':' and '\n' not in next_leaf.prefix: if leaf == ':' and '\n' not in next_leaf.prefix and '\r' not in next_leaf.prefix:
self.indentation += ' ' self.indentation += ' '
@@ -216,8 +216,8 @@ class PEP8Normalizer(ErrorFinder):
endmarker = node.children[-1] endmarker = node.children[-1]
prev = endmarker.get_previous_leaf() prev = endmarker.get_previous_leaf()
prefix = endmarker.prefix prefix = endmarker.prefix
if (not prefix.endswith('\n') and ( if (not prefix.endswith('\n') and not prefix.endswith('\r') and (
prefix or prev is None or prev.value != '\n')): prefix or prev is None or prev.value not in {'\n', '\r\n', '\r'})):
self.add_issue(endmarker, 292, "No newline at end of file") self.add_issue(endmarker, 292, "No newline at end of file")
if typ in _IMPORT_TYPES: if typ in _IMPORT_TYPES:
@@ -465,7 +465,8 @@ class PEP8Normalizer(ErrorFinder):
+ self._config.indentation: + self._config.indentation:
self.add_issue(part, 129, "Line with same indent as next logical block") self.add_issue(part, 129, "Line with same indent as next logical block")
elif indentation != should_be_indentation: elif indentation != should_be_indentation:
if not self._check_tabs_spaces(spacing) and part.value != '\n': if not self._check_tabs_spaces(spacing) and part.value not in \
{'\n', '\r\n', '\r'}:
if value in '])}': if value in '])}':
if node.type == IndentationTypes.VERTICAL_BRACKET: if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue( self.add_issue(
@@ -652,7 +653,8 @@ class PEP8Normalizer(ErrorFinder):
else: else:
prev_spacing = self._previous_spacing prev_spacing = self._previous_spacing
if prev in _ALLOW_SPACE and spaces != prev_spacing.value \ if prev in _ALLOW_SPACE and spaces != prev_spacing.value \
and '\n' not in self._previous_leaf.prefix: and '\n' not in self._previous_leaf.prefix \
and '\r' not in self._previous_leaf.prefix:
message = "Whitespace before operator doesn't match with whitespace after" message = "Whitespace before operator doesn't match with whitespace after"
self.add_issue(spacing, 229, message) self.add_issue(spacing, 229, message)

View File

@@ -18,7 +18,7 @@ class PrefixPart:
@property @property
def end_pos(self) -> Tuple[int, int]: def end_pos(self) -> Tuple[int, int]:
if self.value.endswith('\n'): if self.value.endswith('\n') or self.value.endswith('\r'):
return self.start_pos[0] + 1, 0 return self.start_pos[0] + 1, 0
if self.value == unicode_bom: if self.value == unicode_bom:
# The bom doesn't have a length at the start of a Python file. # The bom doesn't have a length at the start of a Python file.
@@ -50,8 +50,8 @@ class PrefixPart:
_comment = r'#[^\n\r\f]*' _comment = r'#[^\n\r\f]*'
_backslash = r'\\\r?\n' _backslash = r'\\\r?\n|\\\r'
_newline = r'\r?\n' _newline = r'\r?\n|\r'
_form_feed = r'\f' _form_feed = r'\f'
_only_spacing = '$' _only_spacing = '$'
_spacing = r'[ \t]*' _spacing = r'[ \t]*'
@@ -94,7 +94,7 @@ def split_prefix(leaf, start_pos):
bom = True bom = True
start = match.end(0) start = match.end(0)
if value.endswith('\n'): if value.endswith('\n') or value.endswith('\r'):
line += 1 line += 1
column = -start column = -start

View File

@@ -548,7 +548,7 @@ def tokenize_lines(
additional_prefix = prefix + token additional_prefix = prefix + token
new_line = True new_line = True
elif initial == '#': # Comments elif initial == '#': # Comments
assert not token.endswith("\n") assert not token.endswith("\n") and not token.endswith("\r")
if fstring_stack and fstring_stack[-1].is_in_expr(): if fstring_stack and fstring_stack[-1].is_in_expr():
# `#` is not allowed in f-string expressions # `#` is not allowed in f-string expressions
yield PythonToken(ERRORTOKEN, initial, spos, prefix) yield PythonToken(ERRORTOKEN, initial, spos, prefix)

View File

@@ -92,7 +92,7 @@ def python_bytes_to_unicode(
# UTF-8 byte-order mark # UTF-8 byte-order mark
return 'utf-8' return 'utf-8'
first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0) first_two_lines = re.match(br'(?:[^\r\n]*(?:\r\n|\r|\n)){0,2}', source).group(0)
possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)", possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)",
first_two_lines) first_two_lines)
if possible_encoding: if possible_encoding:

View File

@@ -15,6 +15,8 @@ def test_eof_newline():
assert issue.code == 292 assert issue.code == 292
assert not issues('asdf = 1\n') assert not issues('asdf = 1\n')
assert not issues('asdf = 1\r\n')
assert not issues('asdf = 1\r')
assert_issue('asdf = 1') assert_issue('asdf = 1')
assert_issue('asdf = 1\n# foo') assert_issue('asdf = 1\n# foo')
assert_issue('# foobar') assert_issue('# foobar')

View File

@@ -19,6 +19,7 @@ unicode_bom = BOM_UTF8.decode('utf-8')
(' \f ', ['\f', ' ']), (' \f ', ['\f', ' ']),
(' \f ', ['\f', ' ']), (' \f ', ['\f', ' ']),
(' \r\n', ['\r\n', '']), (' \r\n', ['\r\n', '']),
(' \r', ['\r', '']),
('\\\n', ['\\\n', '']), ('\\\n', ['\\\n', '']),
('\\\r\n', ['\\\r\n', '']), ('\\\r\n', ['\\\r\n', '']),
('\t\t\n\t', ['\n', '\t']), ('\t\t\n\t', ['\n', '\t']),
@@ -34,7 +35,7 @@ def test_simple_prefix_splitting(string, tokens):
assert pt.value == expected assert pt.value == expected
# Calculate the estimated end_pos # Calculate the estimated end_pos
if expected.endswith('\n'): if expected.endswith('\n') or expected.endswith('\r'):
end_pos = start_pos[0] + 1, 0 end_pos = start_pos[0] + 1, 0
else: else:
end_pos = start_pos[0], start_pos[1] + len(expected) + len(pt.spacing) end_pos = start_pos[0], start_pos[1] + len(expected) + len(pt.spacing)

View File

@@ -74,6 +74,10 @@ def test_utf8_bom():
('code', 'errors'), [ ('code', 'errors'), [
(b'# coding: wtf-12\nfoo', 'strict'), (b'# coding: wtf-12\nfoo', 'strict'),
(b'# coding: wtf-12\nfoo', 'replace'), (b'# coding: wtf-12\nfoo', 'replace'),
(b'# coding: wtf-12\r\nfoo', 'strict'),
(b'# coding: wtf-12\r\nfoo', 'replace'),
(b'# coding: wtf-12\rfoo', 'strict'),
(b'# coding: wtf-12\rfoo', 'replace'),
] ]
) )
def test_bytes_to_unicode_failing_encoding(code, errors): def test_bytes_to_unicode_failing_encoding(code, errors):