From 51f2de28c6dea96099c8378c8cb164f60ff1486b Mon Sep 17 00:00:00 2001 From: Dave Halter Date: Tue, 15 Aug 2017 20:07:24 +0200 Subject: [PATCH] source_to_unicode -> python_bytes_to_unicode. --- parso/grammar.py | 4 ++-- parso/python/tokenize.py | 4 ++-- parso/utils.py | 2 +- test/test_normalizer_issues_files.py | 4 ++-- test/test_utils.py | 6 +++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/parso/grammar.py b/parso/grammar.py index 1996e79..34989db 100644 --- a/parso/grammar.py +++ b/parso/grammar.py @@ -3,7 +3,7 @@ import os from parso._compatibility import FileNotFoundError, is_pypy from parso.pgen2.pgen import generate_grammar -from parso.utils import split_lines, source_to_unicode, parse_version_string +from parso.utils import split_lines, python_bytes_to_unicode, parse_version_string from parso.python.diff import DiffParser from parso.python.tokenize import tokenize_lines, tokenize from parso.cache import parser_cache, load_module, save_module @@ -86,7 +86,7 @@ class Grammar(object): with open(path, 'rb') as f: code = f.read() - code = source_to_unicode(code) + code = python_bytes_to_unicode(code) lines = split_lines(code, keepends=True) if diff_cache: diff --git a/parso/python/tokenize.py b/parso/python/tokenize.py index c6b7a5a..02ceabf 100644 --- a/parso/python/tokenize.py +++ b/parso/python/tokenize.py @@ -406,10 +406,10 @@ if __name__ == "__main__": else: code = sys.stdin.read() - from parso.utils import source_to_unicode, parse_version_string + from parso.utils import python_bytes_to_unicode, parse_version_string if isinstance(code, bytes): - code = source_to_unicode(code) + code = python_bytes_to_unicode(code) for token in tokenize(code, parse_version_string()): print(token) diff --git a/parso/utils.py b/parso/utils.py index 70980ff..7b5de71 100644 --- a/parso/utils.py +++ b/parso/utils.py @@ -48,7 +48,7 @@ def split_lines(string, keepends=False): return re.split('\n|\r\n', string) -def source_to_unicode(source, default_encoding='utf-8', errors='strict'): +def python_bytes_to_unicode(source, default_encoding='utf-8', errors='strict'): """ `errors` can be 'strict', 'replace' or 'ignore'. """ diff --git a/test/test_normalizer_issues_files.py b/test/test_normalizer_issues_files.py index eb65bd3..7f692d1 100644 --- a/test/test_normalizer_issues_files.py +++ b/test/test_normalizer_issues_files.py @@ -8,7 +8,7 @@ import re import parso from parso._compatibility import total_ordering -from parso.utils import source_to_unicode +from parso.utils import python_bytes_to_unicode @total_ordering @@ -54,7 +54,7 @@ def test_normalizer_issue(normalizer_issue_case): for i in issues] with open(normalizer_issue_case.path, 'rb') as f: - code = source_to_unicode(f.read()) + code = python_bytes_to_unicode(f.read()) desired = sort(collect_errors(code)) diff --git a/test/test_utils.py b/test/test_utils.py index b93e592..4425bd2 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -1,6 +1,6 @@ from codecs import BOM_UTF8 -from parso.utils import split_lines, source_to_unicode +from parso.utils import split_lines, python_bytes_to_unicode import parso @@ -20,12 +20,12 @@ def test_split_lines_keepends(): assert split_lines('\n', keepends=True) == ['\n', ''] -def test_source_to_unicode_unicode_text(): +def test_python_bytes_to_unicode_unicode_text(): source = ( b"# vim: fileencoding=utf-8\n" b"# \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\n" ) - actual = source_to_unicode(source) + actual = python_bytes_to_unicode(source) expected = source.decode('utf-8') assert actual == expected