From 206dfd113b99a489dc9b0bfb8ec00149db4cc446 Mon Sep 17 00:00:00 2001 From: Dave Halter Date: Mon, 15 May 2017 13:53:30 -0400 Subject: [PATCH] Use jedi less in the tests. --- conftest.py | 2 +- test/test_diff_parser.py | 2 +- test/test_old_fast_parser.py | 2 +- test/test_parser.py | 87 +++--------------------------------- test/test_pgen2.py | 4 +- test/test_tokenize.py | 12 ++--- 6 files changed, 16 insertions(+), 93 deletions(-) diff --git a/conftest.py b/conftest.py index 2894ba4..60b08ce 100644 --- a/conftest.py +++ b/conftest.py @@ -6,7 +6,7 @@ import pytest from parso import cache -#collect_ignore = ["setup.py"] +collect_ignore = ["setup.py"] # The following hooks (pytest_configure, pytest_unconfigure) are used diff --git a/test/test_diff_parser.py b/test/test_diff_parser.py index 323c9f2..efa1992 100644 --- a/test/test_diff_parser.py +++ b/test/test_diff_parser.py @@ -4,7 +4,7 @@ import pytest import jedi from jedi import debug -from jedi.common import splitlines +from parso.utils import splitlines from jedi import cache from parso.cache import parser_cache from parso.python import load_grammar diff --git a/test/test_old_fast_parser.py b/test/test_old_fast_parser.py index b3ff217..17f98c7 100644 --- a/test/test_old_fast_parser.py +++ b/test/test_old_fast_parser.py @@ -9,7 +9,7 @@ However the tests might still be relevant for the parser. from textwrap import dedent import jedi -from jedi._compatibility import u +from parso._compatibility import u from parso.python import parse diff --git a/test/test_parser.py b/test/test_parser.py index 7a14277..051387a 100644 --- a/test/test_parser.py +++ b/test/test_parser.py @@ -4,13 +4,10 @@ from textwrap import dedent import pytest -import jedi -from jedi._compatibility import u, is_py3 +from parso._compatibility import u, py_version from parso.python import parse, load_grammar from parso.python import tree -from jedi.common import splitlines -from jedi.parser_utils import get_statement_of_position, \ - clean_scope_docstring, safe_literal_eval +from parso.utils import splitlines def test_basic_parsing(): @@ -23,53 +20,6 @@ def test_basic_parsing(): compare('def x(a, b:3): pass\n') compare('assert foo\n') -def test_user_statement_on_import(): - """github #285""" - s = "from datetime import (\n" \ - " time)" - - for pos in [(2, 1), (2, 4)]: - p = parse(s) - stmt = get_statement_of_position(p, pos) - assert isinstance(stmt, tree.Import) - assert [n.value for n in stmt.get_defined_names()] == ['time'] - - -class TestCallAndName(): - def get_call(self, source): - # Get the simple_stmt and then the first one. - simple_stmt = parse(source).children[0] - return simple_stmt.children[0] - - def test_name_and_call_positions(self): - name = self.get_call('name\nsomething_else') - assert name.value == 'name' - assert name.start_pos == (1, 0) - assert name.end_pos == (1, 4) - - leaf = self.get_call('1.0\n') - assert leaf.value == '1.0' - assert safe_literal_eval(leaf.value) == 1.0 - assert leaf.start_pos == (1, 0) - assert leaf.end_pos == (1, 3) - - def test_call_type(self): - call = self.get_call('hello') - assert isinstance(call, tree.Name) - - def test_literal_type(self): - literal = self.get_call('1.0') - assert isinstance(literal, tree.Literal) - assert type(safe_literal_eval(literal.value)) == float - - literal = self.get_call('1') - assert isinstance(literal, tree.Literal) - assert type(safe_literal_eval(literal.value)) == int - - literal = self.get_call('"hello"') - assert isinstance(literal, tree.Literal) - assert safe_literal_eval(literal.value) == 'hello' - class TestSubscopes(): def get_sub(self, source): @@ -135,33 +85,6 @@ def test_incomplete_list_comprehension(): ['error_node', 'error_node', 'newline', 'endmarker'] -def test_hex_values_in_docstring(): - source = r''' - def foo(object): - """ - \xff - """ - return 1 - ''' - - doc = clean_scope_docstring(next(parse(source).iter_funcdefs())) - if is_py3: - assert doc == '\xff' - else: - assert doc == u('�') - - -def test_error_correction_with(): - source = """ - with open() as f: - try: - f.""" - comps = jedi.Script(source).completions() - assert len(comps) > 30 - # `open` completions have a closed attribute. - assert [1 for c in comps if c.name == 'closed'] - - def test_newline_positions(): endmarker = parse('a\n').children[-1] assert endmarker.end_pos == (2, 0) @@ -193,7 +116,7 @@ def test_param_splitting(): # Python 2 tuple params should be ignored for now. grammar = load_grammar('%s.%s' % sys.version_info[:2]) m = parse(src, grammar=grammar) - if is_py3: + if py_version >= 30: assert not list(m.iter_funcdefs()) else: # We don't want b and c to be a part of the param enumeration. Just @@ -222,7 +145,7 @@ def test_started_lambda_stmt(): def test_python2_octal(): module = parse('0660') first = module.children[0] - if is_py3: + if py_version >= 30: assert first.type == 'error_node' else: assert first.children[0].type == 'number' @@ -230,7 +153,7 @@ def test_python2_octal(): def test_python3_octal(): module = parse('0o660') - if is_py3: + if py_version >= 30: assert module.children[0].children[0].type == 'number' else: assert module.children[0].type == 'error_node' diff --git a/test/test_pgen2.py b/test/test_pgen2.py index 4d965e5..e266d64 100644 --- a/test/test_pgen2.py +++ b/test/test_pgen2.py @@ -8,7 +8,7 @@ test_grammar.py files from both Python 2 and Python 3. from textwrap import dedent -from jedi._compatibility import is_py3 +from parso._compatibility import py_version from parso.python import parse as _parse, load_grammar from parso import ParserSyntaxError import pytest @@ -217,7 +217,7 @@ class TestSetLiteral(GrammarTest): class TestNumericLiterals(GrammarTest): def test_new_octal_notation(self): code = """0o7777777777777""" - if is_py3: + if py_version >= 30: parse(code) else: self.invalid_syntax(code) diff --git a/test/test_tokenize.py b/test/test_tokenize.py index b60f1a1..2beb72a 100644 --- a/test/test_tokenize.py +++ b/test/test_tokenize.py @@ -2,8 +2,8 @@ from textwrap import dedent -from jedi._compatibility import is_py3, py_version -from jedi.common import splitlines +from parso._compatibility import py_version +from parso.utils import splitlines from parso.token import NAME, OP, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER from parso import tokenize from parso.python import parse @@ -117,7 +117,7 @@ class TokenTest(unittest.TestCase): tokens = tokenize.source_tokens(fundef) token_list = list(tokens) unicode_token = token_list[1] - if is_py3: + if py_version >= 30: assert unicode_token[0] == NAME else: # Unicode tokens in Python 2 seem to be identified as operators. @@ -170,9 +170,9 @@ def test_ur_literals(): assert typ == NAME check('u""') - check('ur""', is_literal=not is_py3) - check('Ur""', is_literal=not is_py3) - check('UR""', is_literal=not is_py3) + check('ur""', is_literal=not py_version >= 30) + check('Ur""', is_literal=not py_version >= 30) + check('UR""', is_literal=not py_version >= 30) check('bR""') # Starting with Python 3.3 this ordering is also possible, but we just # enable it for all versions. It doesn't hurt.