Use jedi less in the tests.

This commit is contained in:
Dave Halter
2017-05-15 13:53:30 -04:00
parent f784e28eec
commit 206dfd113b
6 changed files with 16 additions and 93 deletions

View File

@@ -6,7 +6,7 @@ import pytest
from parso import cache from parso import cache
#collect_ignore = ["setup.py"] collect_ignore = ["setup.py"]
# The following hooks (pytest_configure, pytest_unconfigure) are used # The following hooks (pytest_configure, pytest_unconfigure) are used

View File

@@ -4,7 +4,7 @@ import pytest
import jedi import jedi
from jedi import debug from jedi import debug
from jedi.common import splitlines from parso.utils import splitlines
from jedi import cache from jedi import cache
from parso.cache import parser_cache from parso.cache import parser_cache
from parso.python import load_grammar from parso.python import load_grammar

View File

@@ -9,7 +9,7 @@ However the tests might still be relevant for the parser.
from textwrap import dedent from textwrap import dedent
import jedi import jedi
from jedi._compatibility import u from parso._compatibility import u
from parso.python import parse from parso.python import parse

View File

@@ -4,13 +4,10 @@ from textwrap import dedent
import pytest import pytest
import jedi from parso._compatibility import u, py_version
from jedi._compatibility import u, is_py3
from parso.python import parse, load_grammar from parso.python import parse, load_grammar
from parso.python import tree from parso.python import tree
from jedi.common import splitlines from parso.utils import splitlines
from jedi.parser_utils import get_statement_of_position, \
clean_scope_docstring, safe_literal_eval
def test_basic_parsing(): def test_basic_parsing():
@@ -23,53 +20,6 @@ def test_basic_parsing():
compare('def x(a, b:3): pass\n') compare('def x(a, b:3): pass\n')
compare('assert foo\n') compare('assert foo\n')
def test_user_statement_on_import():
"""github #285"""
s = "from datetime import (\n" \
" time)"
for pos in [(2, 1), (2, 4)]:
p = parse(s)
stmt = get_statement_of_position(p, pos)
assert isinstance(stmt, tree.Import)
assert [n.value for n in stmt.get_defined_names()] == ['time']
class TestCallAndName():
def get_call(self, source):
# Get the simple_stmt and then the first one.
simple_stmt = parse(source).children[0]
return simple_stmt.children[0]
def test_name_and_call_positions(self):
name = self.get_call('name\nsomething_else')
assert name.value == 'name'
assert name.start_pos == (1, 0)
assert name.end_pos == (1, 4)
leaf = self.get_call('1.0\n')
assert leaf.value == '1.0'
assert safe_literal_eval(leaf.value) == 1.0
assert leaf.start_pos == (1, 0)
assert leaf.end_pos == (1, 3)
def test_call_type(self):
call = self.get_call('hello')
assert isinstance(call, tree.Name)
def test_literal_type(self):
literal = self.get_call('1.0')
assert isinstance(literal, tree.Literal)
assert type(safe_literal_eval(literal.value)) == float
literal = self.get_call('1')
assert isinstance(literal, tree.Literal)
assert type(safe_literal_eval(literal.value)) == int
literal = self.get_call('"hello"')
assert isinstance(literal, tree.Literal)
assert safe_literal_eval(literal.value) == 'hello'
class TestSubscopes(): class TestSubscopes():
def get_sub(self, source): def get_sub(self, source):
@@ -135,33 +85,6 @@ def test_incomplete_list_comprehension():
['error_node', 'error_node', 'newline', 'endmarker'] ['error_node', 'error_node', 'newline', 'endmarker']
def test_hex_values_in_docstring():
source = r'''
def foo(object):
"""
\xff
"""
return 1
'''
doc = clean_scope_docstring(next(parse(source).iter_funcdefs()))
if is_py3:
assert doc == '\xff'
else:
assert doc == u('<EFBFBD>')
def test_error_correction_with():
source = """
with open() as f:
try:
f."""
comps = jedi.Script(source).completions()
assert len(comps) > 30
# `open` completions have a closed attribute.
assert [1 for c in comps if c.name == 'closed']
def test_newline_positions(): def test_newline_positions():
endmarker = parse('a\n').children[-1] endmarker = parse('a\n').children[-1]
assert endmarker.end_pos == (2, 0) assert endmarker.end_pos == (2, 0)
@@ -193,7 +116,7 @@ def test_param_splitting():
# Python 2 tuple params should be ignored for now. # Python 2 tuple params should be ignored for now.
grammar = load_grammar('%s.%s' % sys.version_info[:2]) grammar = load_grammar('%s.%s' % sys.version_info[:2])
m = parse(src, grammar=grammar) m = parse(src, grammar=grammar)
if is_py3: if py_version >= 30:
assert not list(m.iter_funcdefs()) assert not list(m.iter_funcdefs())
else: else:
# We don't want b and c to be a part of the param enumeration. Just # We don't want b and c to be a part of the param enumeration. Just
@@ -222,7 +145,7 @@ def test_started_lambda_stmt():
def test_python2_octal(): def test_python2_octal():
module = parse('0660') module = parse('0660')
first = module.children[0] first = module.children[0]
if is_py3: if py_version >= 30:
assert first.type == 'error_node' assert first.type == 'error_node'
else: else:
assert first.children[0].type == 'number' assert first.children[0].type == 'number'
@@ -230,7 +153,7 @@ def test_python2_octal():
def test_python3_octal(): def test_python3_octal():
module = parse('0o660') module = parse('0o660')
if is_py3: if py_version >= 30:
assert module.children[0].children[0].type == 'number' assert module.children[0].children[0].type == 'number'
else: else:
assert module.children[0].type == 'error_node' assert module.children[0].type == 'error_node'

View File

@@ -8,7 +8,7 @@ test_grammar.py files from both Python 2 and Python 3.
from textwrap import dedent from textwrap import dedent
from jedi._compatibility import is_py3 from parso._compatibility import py_version
from parso.python import parse as _parse, load_grammar from parso.python import parse as _parse, load_grammar
from parso import ParserSyntaxError from parso import ParserSyntaxError
import pytest import pytest
@@ -217,7 +217,7 @@ class TestSetLiteral(GrammarTest):
class TestNumericLiterals(GrammarTest): class TestNumericLiterals(GrammarTest):
def test_new_octal_notation(self): def test_new_octal_notation(self):
code = """0o7777777777777""" code = """0o7777777777777"""
if is_py3: if py_version >= 30:
parse(code) parse(code)
else: else:
self.invalid_syntax(code) self.invalid_syntax(code)

View File

@@ -2,8 +2,8 @@
from textwrap import dedent from textwrap import dedent
from jedi._compatibility import is_py3, py_version from parso._compatibility import py_version
from jedi.common import splitlines from parso.utils import splitlines
from parso.token import NAME, OP, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER from parso.token import NAME, OP, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER
from parso import tokenize from parso import tokenize
from parso.python import parse from parso.python import parse
@@ -117,7 +117,7 @@ class TokenTest(unittest.TestCase):
tokens = tokenize.source_tokens(fundef) tokens = tokenize.source_tokens(fundef)
token_list = list(tokens) token_list = list(tokens)
unicode_token = token_list[1] unicode_token = token_list[1]
if is_py3: if py_version >= 30:
assert unicode_token[0] == NAME assert unicode_token[0] == NAME
else: else:
# Unicode tokens in Python 2 seem to be identified as operators. # Unicode tokens in Python 2 seem to be identified as operators.
@@ -170,9 +170,9 @@ def test_ur_literals():
assert typ == NAME assert typ == NAME
check('u""') check('u""')
check('ur""', is_literal=not is_py3) check('ur""', is_literal=not py_version >= 30)
check('Ur""', is_literal=not is_py3) check('Ur""', is_literal=not py_version >= 30)
check('UR""', is_literal=not is_py3) check('UR""', is_literal=not py_version >= 30)
check('bR""') check('bR""')
# Starting with Python 3.3 this ordering is also possible, but we just # Starting with Python 3.3 this ordering is also possible, but we just
# enable it for all versions. It doesn't hurt. # enable it for all versions. It doesn't hurt.