mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-06 12:54:29 +08:00
Refactor splitlines -> split_lines.
This commit is contained in:
@@ -3,7 +3,7 @@ import os
|
||||
|
||||
from parso._compatibility import FileNotFoundError, is_pypy
|
||||
from parso.pgen2.pgen import generate_grammar
|
||||
from parso.utils import splitlines, source_to_unicode, parse_version_string
|
||||
from parso.utils import split_lines, source_to_unicode, parse_version_string
|
||||
from parso.python.diff import DiffParser
|
||||
from parso.python.tokenize import tokenize_lines, tokenize
|
||||
from parso.cache import parser_cache, load_module, save_module
|
||||
@@ -88,7 +88,7 @@ class Grammar(object):
|
||||
|
||||
code = source_to_unicode(code)
|
||||
|
||||
lines = splitlines(code, keepends=True)
|
||||
lines = split_lines(code, keepends=True)
|
||||
if diff_cache:
|
||||
if self._diff_parser is None:
|
||||
raise TypeError("You have to define a diff parser to be able "
|
||||
|
||||
@@ -10,7 +10,7 @@ import difflib
|
||||
from collections import namedtuple
|
||||
import logging
|
||||
|
||||
from parso.utils import splitlines
|
||||
from parso.utils import split_lines
|
||||
from parso.python.parser import Parser
|
||||
from parso.python.tree import EndMarker
|
||||
from parso.python.tokenize import (NEWLINE, TokenInfo, ERROR_DEDENT,
|
||||
@@ -154,7 +154,7 @@ class DiffParser(object):
|
||||
|
||||
last_pos = self._module.end_pos[0]
|
||||
if last_pos != line_length:
|
||||
current_lines = splitlines(self._module.get_code(), keepends=True)
|
||||
current_lines = split_lines(self._module.get_code(), keepends=True)
|
||||
diff = difflib.unified_diff(current_lines, new_lines)
|
||||
raise Exception(
|
||||
"There's an issue (%s != %s) with the diff parser. Please report:\n%s"
|
||||
@@ -572,7 +572,7 @@ class _NodesStack(object):
|
||||
end_pos = list(last_leaf.end_pos)
|
||||
except IndexError:
|
||||
end_pos = [1, 0]
|
||||
lines = splitlines(self.prefix)
|
||||
lines = split_lines(self.prefix)
|
||||
assert len(lines) > 0
|
||||
if len(lines) == 1:
|
||||
end_pos[1] += len(lines[0])
|
||||
|
||||
@@ -22,7 +22,7 @@ from parso.python.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, o
|
||||
NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT,
|
||||
ERROR_DEDENT)
|
||||
from parso._compatibility import py_version
|
||||
from parso.utils import splitlines
|
||||
from parso.utils import split_lines
|
||||
|
||||
|
||||
TokenCollection = namedtuple(
|
||||
@@ -224,7 +224,7 @@ class TokenInfo(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])):
|
||||
|
||||
@property
|
||||
def end_pos(self):
|
||||
lines = splitlines(self.string)
|
||||
lines = split_lines(self.string)
|
||||
if len(lines) > 1:
|
||||
return self.start_pos[0] + len(lines) - 1, 0
|
||||
else:
|
||||
@@ -233,7 +233,7 @@ class TokenInfo(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])):
|
||||
|
||||
def tokenize(code, version_info):
|
||||
"""Generate tokens from a the source code (string)."""
|
||||
lines = splitlines(code, keepends=True)
|
||||
lines = split_lines(code, keepends=True)
|
||||
return tokenize_lines(lines, version_info)
|
||||
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ from parso._compatibility import unicode, total_ordering
|
||||
Version = namedtuple('Version', 'major, minor, micro')
|
||||
|
||||
|
||||
def splitlines(string, keepends=False):
|
||||
def split_lines(string, keepends=False):
|
||||
r"""
|
||||
A splitlines for Python code. In contrast to Python's ``str.splitlines``,
|
||||
A str.splitlines for Python code. In contrast to Python's ``str.splitlines``,
|
||||
looks at form feeds and other special characters as normal text. Just
|
||||
splits ``\n`` and ``\r\n``.
|
||||
Also different: Returns ``['']`` for an empty string input.
|
||||
|
||||
@@ -3,7 +3,7 @@ import logging
|
||||
|
||||
import pytest
|
||||
|
||||
from parso.utils import splitlines
|
||||
from parso.utils import split_lines
|
||||
from parso import cache
|
||||
from parso import load_grammar
|
||||
from parso.python.diff import DiffParser
|
||||
@@ -58,13 +58,13 @@ class Differ(object):
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
self.lines = splitlines(code, keepends=True)
|
||||
self.lines = split_lines(code, keepends=True)
|
||||
self.module = parse(code, diff_cache=True, cache=True)
|
||||
return self.module
|
||||
|
||||
def parse(self, code, copies=0, parsers=0, expect_error_leaves=False):
|
||||
logging.debug('differ: parse copies=%s parsers=%s', copies, parsers)
|
||||
lines = splitlines(code, keepends=True)
|
||||
lines = split_lines(code, keepends=True)
|
||||
diff_parser = DiffParser(
|
||||
self.grammar._pgen_grammar,
|
||||
self.grammar._tokenizer,
|
||||
|
||||
@@ -6,7 +6,7 @@ import pytest
|
||||
from parso._compatibility import u
|
||||
from parso import parse
|
||||
from parso.python import tree
|
||||
from parso.utils import splitlines
|
||||
from parso.utils import split_lines
|
||||
|
||||
|
||||
def test_basic_parsing(each_version):
|
||||
@@ -153,7 +153,7 @@ def test_open_string_literal(each_version, code):
|
||||
"""
|
||||
Testing mostly if removing the last newline works.
|
||||
"""
|
||||
lines = splitlines(code, keepends=True)
|
||||
lines = split_lines(code, keepends=True)
|
||||
end_pos = (len(lines), len(lines[-1]))
|
||||
module = parse(code, version=each_version)
|
||||
assert module.get_code() == code
|
||||
|
||||
@@ -5,7 +5,7 @@ from textwrap import dedent
|
||||
import pytest
|
||||
|
||||
from parso._compatibility import py_version
|
||||
from parso.utils import splitlines, parse_version_string
|
||||
from parso.utils import split_lines, parse_version_string
|
||||
from parso.python.token import (
|
||||
NAME, NEWLINE, STRING, INDENT, DEDENT, ERRORTOKEN, ENDMARKER, ERROR_DEDENT)
|
||||
from parso.python import tokenize
|
||||
@@ -201,7 +201,7 @@ def test_error_literal():
|
||||
def test_endmarker_end_pos():
|
||||
def check(code):
|
||||
tokens = _get_token_list(code)
|
||||
lines = splitlines(code)
|
||||
lines = split_lines(code)
|
||||
assert tokens[-1].end_pos == (len(lines), len(lines[-1]))
|
||||
|
||||
check('#c')
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
from codecs import BOM_UTF8
|
||||
|
||||
from parso.utils import splitlines, source_to_unicode
|
||||
from parso.utils import split_lines, source_to_unicode
|
||||
import parso
|
||||
|
||||
|
||||
def test_splitlines_no_keepends():
|
||||
assert splitlines('asd\r\n') == ['asd', '']
|
||||
assert splitlines('asd\r\n\f') == ['asd', '\f']
|
||||
assert splitlines('\fasd\r\n') == ['\fasd', '']
|
||||
assert splitlines('') == ['']
|
||||
assert splitlines('\n') == ['', '']
|
||||
def test_split_lines_no_keepends():
|
||||
assert split_lines('asd\r\n') == ['asd', '']
|
||||
assert split_lines('asd\r\n\f') == ['asd', '\f']
|
||||
assert split_lines('\fasd\r\n') == ['\fasd', '']
|
||||
assert split_lines('') == ['']
|
||||
assert split_lines('\n') == ['', '']
|
||||
|
||||
|
||||
def test_splitlines_keepends():
|
||||
assert splitlines('asd\r\n', keepends=True) == ['asd\r\n', '']
|
||||
assert splitlines('asd\r\n\f', keepends=True) == ['asd\r\n', '\f']
|
||||
assert splitlines('\fasd\r\n', keepends=True) == ['\fasd\r\n', '']
|
||||
assert splitlines('', keepends=True) == ['']
|
||||
assert splitlines('\n', keepends=True) == ['\n', '']
|
||||
def test_split_lines_keepends():
|
||||
assert split_lines('asd\r\n', keepends=True) == ['asd\r\n', '']
|
||||
assert split_lines('asd\r\n\f', keepends=True) == ['asd\r\n', '\f']
|
||||
assert split_lines('\fasd\r\n', keepends=True) == ['\fasd\r\n', '']
|
||||
assert split_lines('', keepends=True) == ['']
|
||||
assert split_lines('\n', keepends=True) == ['\n', '']
|
||||
|
||||
|
||||
def test_source_to_unicode_unicode_text():
|
||||
|
||||
Reference in New Issue
Block a user