Remove the tests that have been moved to parso.

This commit is contained in:
Dave Halter
2017-05-19 10:23:56 -04:00
parent 3c57f781dd
commit fb1c208985
10 changed files with 0 additions and 1567 deletions

View File

@@ -1,29 +0,0 @@
"""
Tests ``from __future__ import absolute_import`` (only important for
Python 2.X)
"""
from jedi.parser.python import parse
def test_explicit_absolute_imports():
"""
Detect modules with ``from __future__ import absolute_import``.
"""
module = parse("from __future__ import absolute_import")
assert module.has_explicit_absolute_import()
def test_no_explicit_absolute_imports():
"""
Detect modules without ``from __future__ import absolute_import``.
"""
assert not parse("1").has_explicit_absolute_import()
def test_dont_break_imports_without_namespaces():
"""
The code checking for ``from __future__ import absolute_import`` shouldn't
assume that all imports have non-``None`` namespaces.
"""
src = "from __future__ import absolute_import\nimport xyzzy"
assert parse(src).has_explicit_absolute_import()

View File

@@ -1,462 +0,0 @@
from textwrap import dedent
import pytest
from jedi import debug
from jedi.common import splitlines
from jedi import cache
from jedi.parser.cache import parser_cache
from jedi.parser.python import load_grammar
from jedi.parser.python.diff import DiffParser
from jedi.parser.python import parse
def _check_error_leaves_nodes(node):
if node.type in ('error_leaf', 'error_node'):
return True
try:
children = node.children
except AttributeError:
pass
else:
for child in children:
if _check_error_leaves_nodes(child):
return True
return False
def _assert_valid_graph(node):
"""
Checks if the parent/children relationship is correct.
"""
try:
children = node.children
except AttributeError:
return
for child in children:
assert child.parent == node
_assert_valid_graph(child)
class Differ(object):
grammar = load_grammar()
def initialize(self, code):
debug.dbg('differ: initialize', color='YELLOW')
self.lines = splitlines(code, keepends=True)
parser_cache.pop(None, None)
self.module = parse(code, diff_cache=True, cache=True)
return self.module
def parse(self, code, copies=0, parsers=0, expect_error_leaves=False):
debug.dbg('differ: parse copies=%s parsers=%s', copies, parsers, color='YELLOW')
lines = splitlines(code, keepends=True)
diff_parser = DiffParser(self.grammar, self.module)
new_module = diff_parser.update(self.lines, lines)
self.lines = lines
assert code == new_module.get_code()
assert diff_parser._copy_count == copies
assert diff_parser._parser_count == parsers
assert expect_error_leaves == _check_error_leaves_nodes(new_module)
_assert_valid_graph(new_module)
return new_module
@pytest.fixture()
def differ():
return Differ()
def test_change_and_undo(differ):
# Empty the parser cache for the path None.
cache.parser_cache.pop(None, None)
func_before = 'def func():\n pass\n'
# Parse the function and a.
differ.initialize(func_before + 'a')
# Parse just b.
differ.parse(func_before + 'b', copies=1, parsers=1)
# b has changed to a again, so parse that.
differ.parse(func_before + 'a', copies=1, parsers=1)
# Same as before parsers should be used at the end, because it doesn't end
# with newlines and that leads to complications.
differ.parse(func_before + 'a', copies=1, parsers=1)
# Now that we have a newline at the end, everything is easier in Python
# syntax, we can parse once and then get a copy.
differ.parse(func_before + 'a\n', copies=1, parsers=1)
differ.parse(func_before + 'a\n', copies=1)
# Getting rid of an old parser: Still no parsers used.
differ.parse('a\n', copies=1)
# Now the file has completely changed and we need to parse.
differ.parse('b\n', parsers=1)
# And again.
differ.parse('a\n', parsers=1)
def test_positions(differ):
# Empty the parser cache for the path None.
cache.parser_cache.pop(None, None)
func_before = 'class A:\n pass\n'
m = differ.initialize(func_before + 'a')
assert m.start_pos == (1, 0)
assert m.end_pos == (3, 1)
m = differ.parse('a', parsers=1)
assert m.start_pos == (1, 0)
assert m.end_pos == (1, 1)
m = differ.parse('a\n\n', parsers=1)
assert m.end_pos == (3, 0)
m = differ.parse('a\n\n ', copies=1, parsers=1)
assert m.end_pos == (3, 1)
m = differ.parse('a ', parsers=1)
assert m.end_pos == (1, 2)
def test_if_simple(differ):
src = dedent('''\
if 1:
a = 3
''')
else_ = "else:\n a = ''\n"
differ.initialize(src + 'a')
differ.parse(src + else_ + "a", copies=0, parsers=1)
differ.parse(else_, parsers=1, expect_error_leaves=True)
differ.parse(src + else_, parsers=1)
def test_func_with_for_and_comment(differ):
# The first newline is important, leave it. It should not trigger another
# parser split.
src = dedent("""\
def func():
pass
for a in [1]:
# COMMENT
a""")
differ.initialize(src)
differ.parse('a\n' + src, copies=1, parsers=2)
def test_one_statement_func(differ):
src = dedent("""\
first
def func(): a
""")
differ.initialize(src + 'second')
differ.parse(src + 'def second():\n a', parsers=1, copies=1)
def test_for_on_one_line(differ):
src = dedent("""\
foo = 1
for x in foo: pass
def hi():
pass
""")
differ.initialize(src)
src = dedent("""\
def hi():
for x in foo: pass
pass
pass
""")
differ.parse(src, parsers=2)
src = dedent("""\
def hi():
for x in foo: pass
pass
def nested():
pass
""")
# The second parser is for parsing the `def nested()` which is an `equal`
# operation in the SequenceMatcher.
differ.parse(src, parsers=1, copies=1)
def test_open_parentheses(differ):
func = 'def func():\n a\n'
code = 'isinstance(\n\n' + func
new_code = 'isinstance(\n' + func
differ.initialize(code)
differ.parse(new_code, parsers=1, expect_error_leaves=True)
new_code = 'a = 1\n' + new_code
differ.parse(new_code, copies=1, parsers=1, expect_error_leaves=True)
func += 'def other_func():\n pass\n'
differ.initialize('isinstance(\n' + func)
# Cannot copy all, because the prefix of the function is once a newline and
# once not.
differ.parse('isinstance()\n' + func, parsers=2, copies=1)
def test_open_parentheses_at_end(differ):
code = "a['"
differ.initialize(code)
differ.parse(code, parsers=1, expect_error_leaves=True)
def test_backslash(differ):
src = dedent(r"""
a = 1\
if 1 else 2
def x():
pass
""")
differ.initialize(src)
src = dedent(r"""
def x():
a = 1\
if 1 else 2
def y():
pass
""")
differ.parse(src, parsers=2)
src = dedent(r"""
def first():
if foo \
and bar \
or baz:
pass
def second():
pass
""")
differ.parse(src, parsers=1)
def test_full_copy(differ):
code = 'def foo(bar, baz):\n pass\n bar'
differ.initialize(code)
differ.parse(code, copies=1, parsers=1)
def test_wrong_whitespace(differ):
code = '''
hello
'''
differ.initialize(code)
differ.parse(code + 'bar\n ', parsers=1, copies=1)
code += """abc(\npass\n """
differ.parse(code, parsers=1, copies=1, expect_error_leaves=True)
def test_issues_with_error_leaves(differ):
code = dedent('''
def ints():
str..
str
''')
code2 = dedent('''
def ints():
str.
str
''')
differ.initialize(code)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
def test_unfinished_nodes(differ):
code = dedent('''
class a():
def __init__(self, a):
self.a = a
def p(self):
a(1)
''')
code2 = dedent('''
class a():
def __init__(self, a):
self.a = a
def p(self):
self
a(1)
''')
differ.initialize(code)
differ.parse(code2, parsers=1, copies=2)
def test_nested_if_and_scopes(differ):
code = dedent('''
class a():
if 1:
def b():
2
''')
code2 = code + ' else:\n 3'
differ.initialize(code)
differ.parse(code2, parsers=1, copies=0)
def test_word_before_def(differ):
code1 = 'blub def x():\n'
code2 = code1 + ' s'
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=0, expect_error_leaves=True)
def test_classes_with_error_leaves(differ):
code1 = dedent('''
class X():
def x(self):
blablabla
assert 3
self.
class Y():
pass
''')
code2 = dedent('''
class X():
def x(self):
blablabla
assert 3
str(
class Y():
pass
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
def test_totally_wrong_whitespace(differ):
code1 = '''
class X():
raise n
class Y():
pass
'''
code2 = '''
class X():
raise n
str(
class Y():
pass
'''
differ.initialize(code1)
differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True)
def test_node_insertion(differ):
code1 = dedent('''
class X():
def y(self):
a = 1
b = 2
c = 3
d = 4
''')
code2 = dedent('''
class X():
def y(self):
a = 1
b = 2
str
c = 3
d = 4
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=2)
def test_whitespace_at_end(differ):
code = dedent('str\n\n')
differ.initialize(code)
differ.parse(code + '\n', parsers=1, copies=1)
def test_endless_while_loop(differ):
"""
This was a bug in Jedi #878.
"""
code = '#dead'
differ.initialize(code)
module = differ.parse(code, parsers=1)
assert module.end_pos == (1, 5)
code = '#dead\n'
differ.initialize(code)
module = differ.parse(code + '\n', parsers=1)
assert module.end_pos == (3, 0)
def test_in_class_movements(differ):
code1 = dedent("""\
class PlaybookExecutor:
p
b
def run(self):
1
try:
x
except:
pass
""")
code2 = dedent("""\
class PlaybookExecutor:
b
def run(self):
1
try:
x
except:
pass
""")
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1)
def test_in_parentheses_newlines(differ):
code1 = dedent("""
x = str(
True)
a = 1
def foo():
pass
b = 2""")
code2 = dedent("""
x = str(True)
a = 1
def foo():
pass
b = 2""")
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1)
differ.parse(code1, parsers=2, copies=1)

View File

@@ -1,106 +0,0 @@
import difflib
import pytest
from jedi.parser.python import parse
code_basic_features = '''
"""A mod docstring"""
def a_function(a_argument, a_default = "default"):
"""A func docstring"""
a_result = 3 * a_argument
print(a_result) # a comment
b = """
from
to""" + "huhu"
if a_default == "default":
return str(a_result)
else
return None
'''
def diff_code_assert(a, b, n=4):
if a != b:
diff = "\n".join(difflib.unified_diff(
a.splitlines(),
b.splitlines(),
n=n,
lineterm=""
))
assert False, "Code does not match:\n%s\n\ncreated code:\n%s" % (
diff,
b
)
pass
@pytest.mark.skipif('True', reason='Refactor a few parser things first.')
def test_basic_parsing():
"""Validate the parsing features"""
m = parse(code_basic_features)
diff_code_assert(
code_basic_features,
m.get_code()
)
def test_operators():
src = '5 * 3'
module = parse(src)
diff_code_assert(src, module.get_code())
def test_get_code():
"""Use the same code that the parser also generates, to compare"""
s = '''"""a docstring"""
class SomeClass(object, mixin):
def __init__(self):
self.xy = 3.0
"""statement docstr"""
def some_method(self):
return 1
def yield_method(self):
while hasattr(self, 'xy'):
yield True
for x in [1, 2]:
yield x
def empty(self):
pass
class Empty:
pass
class WithDocstring:
"""class docstr"""
pass
def method_with_docstring():
"""class docstr"""
pass
'''
assert parse(s).get_code() == s
def test_end_newlines():
"""
The Python grammar explicitly needs a newline at the end. Jedi though still
wants to be able, to return the exact same code without the additional new
line the parser needs.
"""
def test(source, end_pos):
module = parse(source)
assert module.get_code() == source
assert module.end_pos == end_pos
test('a', (1, 1))
test('a\n', (2, 0))
test('a\nb', (2, 1))
test('a\n#comment\n', (3, 0))
test('a\n#comment', (2, 8))
test('a#comment', (1, 9))
test('def a():\n pass', (2, 5))
test('def a(', (1, 6))

View File

@@ -1,205 +0,0 @@
"""
These tests test the cases that the old fast parser tested with the normal
parser.
The old fast parser doesn't exist anymore and was replaced with a diff parser.
However the tests might still be relevant for the parser.
"""
from textwrap import dedent
from jedi.parser.python import parse
def test_carriage_return_splitting():
source = dedent('''
"string"
class Foo():
pass
''')
source = source.replace('\n', '\r\n')
module = parse(source)
assert [n.value for lst in module.get_used_names().values() for n in lst] == ['Foo']
def check_p(src):
module_node = parse(src)
assert src == module_node.get_code()
return module_node
def test_for():
src = dedent("""\
for a in [1,2]:
a
for a1 in 1,"":
a1
""")
check_p(src)
def test_class_with_class_var():
src = dedent("""\
class SuperClass:
class_super = 3
def __init__(self):
self.foo = 4
pass
""")
check_p(src)
def test_func_with_if():
src = dedent("""\
def recursion(a):
if foo:
return recursion(a)
else:
if bar:
return inexistent
else:
return a
""")
check_p(src)
def test_decorator():
src = dedent("""\
class Decorator():
@memoize
def dec(self, a):
return a
""")
check_p(src)
def test_nested_funcs():
src = dedent("""\
def memoize(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
""")
check_p(src)
def test_multi_line_params():
src = dedent("""\
def x(a,
b):
pass
foo = 1
""")
check_p(src)
def test_class_func_if():
src = dedent("""\
class Class:
def func(self):
if 1:
a
else:
b
pass
""")
check_p(src)
def test_multi_line_for():
src = dedent("""\
for x in [1,
2]:
pass
pass
""")
check_p(src)
def test_wrong_indentation():
src = dedent("""\
def func():
a
b
a
""")
check_p(src)
src = dedent("""\
def complex():
def nested():
a
b
a
def other():
pass
""")
check_p(src)
def test_strange_parentheses():
src = dedent("""
class X():
a = (1
if 1 else 2)
def x():
pass
""")
check_p(src)
def test_fake_parentheses():
"""
The fast parser splitting counts parentheses, but not as correct tokens.
Therefore parentheses in string tokens are included as well. This needs to
be accounted for.
"""
src = dedent(r"""
def x():
a = (')'
if 1 else 2)
def y():
pass
def z():
pass
""")
check_p(src)
def test_additional_indent():
source = dedent('''\
int(
def x():
pass
''')
check_p(source)
def test_round_trip():
code = dedent('''
def x():
"""hahaha"""
func''')
assert parse(code).get_code() == code
def test_parentheses_in_string():
code = dedent('''
def x():
'('
import abc
abc.''')
check_p(code)

View File

@@ -1,34 +0,0 @@
'''
To make the life of any analysis easier, we are generating Param objects
instead of simple parser objects.
'''
from textwrap import dedent
from jedi.parser.python import parse
def assert_params(param_string, **wanted_dct):
source = dedent('''
def x(%s):
pass
''') % param_string
module = parse(source)
funcdef = next(module.iter_funcdefs())
dct = dict((p.name.value, p.default and p.default.get_code())
for p in funcdef.params)
assert dct == wanted_dct
assert module.get_code() == source
def test_split_params_with_separation_star():
assert_params(u'x, y=1, *, z=3', x=None, y='1', z='3')
assert_params(u'*, x', x=None)
assert_params(u'*')
def test_split_params_with_stars():
assert_params(u'x, *args', x=None, args=None)
assert_params(u'**kwargs', kwargs=None)
assert_params(u'*args, **kwargs', args=None, kwargs=None)

View File

@@ -1,180 +0,0 @@
# -*- coding: utf-8 -*-
import sys
from textwrap import dedent
import pytest
from jedi._compatibility import u, is_py3
from jedi.parser.python import parse, load_grammar
from jedi.parser.python import tree
from jedi.common import splitlines
def test_basic_parsing():
def compare(string):
"""Generates the AST object and then regenerates the code."""
assert parse(string).get_code() == string
compare('\na #pass\n')
compare('wblabla* 1\t\n')
compare('def x(a, b:3): pass\n')
compare('assert foo\n')
class TestSubscopes():
def get_sub(self, source):
return parse(source).children[0]
def test_subscope_names(self):
name = self.get_sub('class Foo: pass').name
assert name.start_pos == (1, len('class '))
assert name.end_pos == (1, len('class Foo'))
assert name.value == 'Foo'
name = self.get_sub('def foo(): pass').name
assert name.start_pos == (1, len('def '))
assert name.end_pos == (1, len('def foo'))
assert name.value == 'foo'
class TestImports():
def get_import(self, source):
return next(parse(source).iter_imports())
def test_import_names(self):
imp = self.get_import(u('import math\n'))
names = imp.get_defined_names()
assert len(names) == 1
assert names[0].value == 'math'
assert names[0].start_pos == (1, len('import '))
assert names[0].end_pos == (1, len('import math'))
assert imp.start_pos == (1, 0)
assert imp.end_pos == (1, len('import math'))
def test_end_pos():
s = dedent('''
x = ['a', 'b', 'c']
def func():
y = None
''')
parser = parse(s)
scope = next(parser.iter_funcdefs())
assert scope.start_pos == (3, 0)
assert scope.end_pos == (5, 0)
def test_carriage_return_statements():
source = dedent('''
foo = 'ns1!'
# this is a namespace package
''')
source = source.replace('\n', '\r\n')
stmt = parse(source).children[0]
assert '#' not in stmt.get_code()
def test_incomplete_list_comprehension():
""" Shouldn't raise an error, same bug as #418. """
# With the old parser this actually returned a statement. With the new
# parser only valid statements generate one.
children = parse('(1 for def').children
assert [c.type for c in children] == \
['error_node', 'error_node', 'newline', 'endmarker']
def test_newline_positions():
endmarker = parse('a\n').children[-1]
assert endmarker.end_pos == (2, 0)
new_line = endmarker.get_previous_leaf()
assert new_line.start_pos == (1, 1)
assert new_line.end_pos == (2, 0)
def test_end_pos_error_correction():
"""
Source code without ending newline are given one, because the Python
grammar needs it. However, they are removed again. We still want the right
end_pos, even if something breaks in the parser (error correction).
"""
s = 'def x():\n .'
m = parse(s)
func = m.children[0]
assert func.type == 'funcdef'
assert func.end_pos == (2, 2)
assert m.end_pos == (2, 2)
def test_param_splitting():
"""
Jedi splits parameters into params, this is not what the grammar does,
but Jedi does this to simplify argument parsing.
"""
def check(src, result):
# Python 2 tuple params should be ignored for now.
grammar = load_grammar('%s.%s' % sys.version_info[:2])
m = parse(src, grammar=grammar)
if is_py3:
assert not list(m.iter_funcdefs())
else:
# We don't want b and c to be a part of the param enumeration. Just
# ignore them, because it's not what we want to support in the
# future.
assert [param.name.value for param in next(m.iter_funcdefs()).params] == result
check('def x(a, (b, c)):\n pass', ['a'])
check('def x((b, c)):\n pass', [])
def test_unicode_string():
s = tree.String(None, u(''), (0, 0))
assert repr(s) # Should not raise an Error!
def test_backslash_dos_style():
assert parse('\\\r\n')
def test_started_lambda_stmt():
m = parse(u'lambda a, b: a i')
assert m.children[0].type == 'error_node'
def test_python2_octal():
module = parse('0660')
first = module.children[0]
if is_py3:
assert first.type == 'error_node'
else:
assert first.children[0].type == 'number'
def test_python3_octal():
module = parse('0o660')
if is_py3:
assert module.children[0].children[0].type == 'number'
else:
assert module.children[0].type == 'error_node'
def test_load_newer_grammar():
# This version shouldn't be out for a while, but if we somehow get this it
# should just take the latest Python grammar.
load_grammar('15.8')
# The same is true for very old grammars (even though this is probably not
# going to be an issue.
load_grammar('1.5')
@pytest.mark.parametrize('code', ['foo "', 'foo """\n', 'foo """\nbar'])
def test_open_string_literal(code):
"""
Testing mostly if removing the last newline works.
"""
lines = splitlines(code, keepends=True)
end_pos = (len(lines), len(lines[-1]))
module = parse(code)
assert module.get_code() == code
assert module.end_pos == end_pos == module.children[1].end_pos

View File

@@ -1,61 +0,0 @@
# -*- coding: utf-8 # This file contains Unicode characters.
from textwrap import dedent
import pytest
from jedi.parser.python import parse
from jedi.parser.python import tree
class TestsFunctionAndLambdaParsing(object):
FIXTURES = [
('def my_function(x, y, z) -> str:\n return x + y * z\n', {
'name': 'my_function',
'params': ['x', 'y', 'z'],
'annotation': "str",
}),
('lambda x, y, z: x + y * z\n', {
'name': '<lambda>',
'params': ['x', 'y', 'z'],
}),
]
@pytest.fixture(params=FIXTURES)
def node(self, request):
parsed = parse(dedent(request.param[0]))
request.keywords['expected'] = request.param[1]
child = parsed.children[0]
if child.type == 'simple_stmt':
child = child.children[0]
return child
@pytest.fixture()
def expected(self, request, node):
return request.keywords['expected']
def test_name(self, node, expected):
if node.type != 'lambdef':
assert isinstance(node.name, tree.Name)
assert node.name.value == expected['name']
def test_params(self, node, expected):
assert isinstance(node.params, list)
assert all(isinstance(x, tree.Param) for x in node.params)
assert [str(x.name.value) for x in node.params] == [x for x in expected['params']]
def test_is_generator(self, node, expected):
assert node.is_generator() is expected.get('is_generator', False)
def test_yields(self, node, expected):
# TODO: There's a comment in the code noting that the current
# implementation is incorrect.
assert node.is_generator() == expected.get('yields', False)
def test_annotation(self, node, expected):
expected_annotation = expected.get('annotation', None)
if expected_annotation is None:
assert node.annotation is None
else:
assert node.annotation.value == expected_annotation

View File

@@ -1,279 +0,0 @@
"""Test suite for 2to3's parser and grammar files.
This is the place to add tests for changes to 2to3's grammar, such as those
merging the grammars for Python 2 and 3. In addition to specific tests for
parts of the grammar we've changed, we also make sure we can parse the
test_grammar.py files from both Python 2 and Python 3.
"""
from textwrap import dedent
from jedi._compatibility import is_py3
from jedi.parser.python import parse as _parse, load_grammar
from jedi.parser import ParserSyntaxError
import pytest
from test.helpers import TestCase
def parse(code, version='3.4'):
code = dedent(code) + "\n\n"
grammar = load_grammar(version=version)
return _parse(code, grammar=grammar, error_recovery=False)
class TestDriver(TestCase):
def test_formfeed(self):
s = """print 1\n\x0Cprint 2\n"""
t = parse(s, '2.7')
self.assertEqual(t.children[0].children[0].type, 'print_stmt')
self.assertEqual(t.children[1].children[0].type, 'print_stmt')
s = """1\n\x0C\x0C2\n"""
t = parse(s, '2.7')
class GrammarTest(TestCase):
def invalid_syntax(self, code, **kwargs):
try:
parse(code, **kwargs)
except ParserSyntaxError:
pass
else:
raise AssertionError("Syntax shouldn't have been valid")
class TestMatrixMultiplication(GrammarTest):
@pytest.mark.skipif('sys.version_info[:2] < (3, 5)')
def test_matrix_multiplication_operator(self):
parse("a @ b", "3.5")
parse("a @= b", "3.5")
class TestYieldFrom(GrammarTest):
def test_yield_from(self):
parse("yield from x")
parse("(yield from x) + y")
self.invalid_syntax("yield from")
class TestAsyncAwait(GrammarTest):
@pytest.mark.skipif('sys.version_info[:2] < (3, 5)')
def test_await_expr(self):
parse("""async def foo():
await x
""", "3.5")
parse("""async def foo():
def foo(): pass
def foo(): pass
await x
""", "3.5")
parse("""async def foo(): return await a""", "3.5")
parse("""def foo():
def foo(): pass
async def foo(): await x
""", "3.5")
@pytest.mark.skipif('sys.version_info[:2] < (3, 5)')
@pytest.mark.xfail(reason="acting like python 3.7")
def test_await_expr_invalid(self):
self.invalid_syntax("await x", version="3.5")
self.invalid_syntax("""def foo():
await x""", version="3.5")
self.invalid_syntax("""def foo():
def foo(): pass
async def foo(): pass
await x
""", version="3.5")
@pytest.mark.skipif('sys.version_info[:2] < (3, 5)')
@pytest.mark.xfail(reason="acting like python 3.7")
def test_async_var(self):
parse("""async = 1""", "3.5")
parse("""await = 1""", "3.5")
parse("""def async(): pass""", "3.5")
@pytest.mark.skipif('sys.version_info[:2] < (3, 5)')
def test_async_for(self):
parse("""async def foo():
async for a in b: pass""", "3.5")
@pytest.mark.skipif('sys.version_info[:2] < (3, 5)')
@pytest.mark.xfail(reason="acting like python 3.7")
def test_async_for_invalid(self):
self.invalid_syntax("""def foo():
async for a in b: pass""", version="3.5")
@pytest.mark.skipif('sys.version_info[:2] < (3, 5)')
def test_async_with(self):
parse("""async def foo():
async with a: pass""", "3.5")
@pytest.mark.skipif('sys.version_info[:2] < (3, 5)')
@pytest.mark.xfail(reason="acting like python 3.7")
def test_async_with_invalid(self):
self.invalid_syntax("""def foo():
async with a: pass""", version="3.5")
class TestRaiseChanges(GrammarTest):
def test_2x_style_1(self):
parse("raise")
def test_2x_style_2(self):
parse("raise E, V", version='2.7')
def test_2x_style_3(self):
parse("raise E, V, T", version='2.7')
def test_2x_style_invalid_1(self):
self.invalid_syntax("raise E, V, T, Z", version='2.7')
def test_3x_style(self):
parse("raise E1 from E2")
def test_3x_style_invalid_1(self):
self.invalid_syntax("raise E, V from E1")
def test_3x_style_invalid_2(self):
self.invalid_syntax("raise E from E1, E2")
def test_3x_style_invalid_3(self):
self.invalid_syntax("raise from E1, E2")
def test_3x_style_invalid_4(self):
self.invalid_syntax("raise E from")
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
class TestFunctionAnnotations(GrammarTest):
def test_1(self):
parse("""def f(x) -> list: pass""")
def test_2(self):
parse("""def f(x:int): pass""")
def test_3(self):
parse("""def f(*x:str): pass""")
def test_4(self):
parse("""def f(**x:float): pass""")
def test_5(self):
parse("""def f(x, y:1+2): pass""")
def test_6(self):
self.invalid_syntax("""def f(a, (b:1, c:2, d)): pass""")
def test_7(self):
self.invalid_syntax("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
def test_8(self):
s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
*g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
self.invalid_syntax(s)
class TestExcept(GrammarTest):
def test_new(self):
s = """
try:
x
except E as N:
y"""
parse(s)
def test_old(self):
s = """
try:
x
except E, N:
y"""
parse(s, version='2.7')
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
class TestSetLiteral(GrammarTest):
def test_1(self):
parse("""x = {'one'}""")
def test_2(self):
parse("""x = {'one', 1,}""")
def test_3(self):
parse("""x = {'one', 'two', 'three'}""")
def test_4(self):
parse("""x = {2, 3, 4,}""")
class TestNumericLiterals(GrammarTest):
def test_new_octal_notation(self):
code = """0o7777777777777"""
if is_py3:
parse(code)
else:
self.invalid_syntax(code)
self.invalid_syntax("""0o7324528887""")
def test_new_binary_notation(self):
parse("""0b101010""")
self.invalid_syntax("""0b0101021""")
class TestClassDef(GrammarTest):
def test_new_syntax(self):
parse("class B(t=7): pass")
parse("class B(t, *args): pass")
parse("class B(t, **kwargs): pass")
parse("class B(t, *args, **kwargs): pass")
parse("class B(t, y=9, *args, **kwargs): pass")
class TestParserIdempotency(TestCase):
"""A cut-down version of pytree_idempotency.py."""
def test_extended_unpacking(self):
parse("a, *b, c = x\n")
parse("[*a, b] = x\n")
parse("(z, *y, w) = m\n")
parse("for *z, m in d: pass\n")
class TestLiterals(GrammarTest):
# It's not possible to get the same result when using \xaa in Python 2/3,
# because it's treated differently.
@pytest.mark.skipif('sys.version_info[0] < 3')
def test_multiline_bytes_literals(self):
s = """
md5test(b"\xaa" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
parse(s)
def test_multiline_bytes_tripquote_literals(self):
s = '''
b"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
"""
'''
parse(s)
@pytest.mark.skipif('sys.version_info[0] < 3')
def test_multiline_str_literals(self):
s = """
md5test("\xaa" * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
parse(s)

View File

@@ -1,211 +0,0 @@
# -*- coding: utf-8 # This file contains Unicode characters.
from textwrap import dedent
from jedi._compatibility import is_py3, py_version
from jedi.parser.token import NAME, OP, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER
from jedi.parser import tokenize
from jedi.parser.python import parse
from jedi.common import splitlines
from jedi.parser.tokenize import TokenInfo
from jedi.parser_utils import safe_literal_eval
from ..helpers import unittest
def _get_token_list(string):
return list(tokenize.source_tokens(string))
class TokenTest(unittest.TestCase):
def test_end_pos_one_line(self):
parsed = parse(dedent('''
def testit():
a = "huhu"
'''))
simple_stmt = next(parsed.iter_funcdefs()).get_suite().children[-1]
string = simple_stmt.children[0].get_rhs()
assert string.end_pos == (3, 14)
def test_end_pos_multi_line(self):
parsed = parse(dedent('''
def testit():
a = """huhu
asdfasdf""" + "h"
'''))
expr_stmt = next(parsed.iter_funcdefs()).get_suite().children[1].children[0]
string_leaf = expr_stmt.get_rhs().children[0]
assert string_leaf.end_pos == (4, 11)
def test_simple_no_whitespace(self):
# Test a simple one line string, no preceding whitespace
simple_docstring = '"""simple one line docstring"""'
tokens = tokenize.source_tokens(simple_docstring)
token_list = list(tokens)
_, value, _, prefix = token_list[0]
assert prefix == ''
assert value == '"""simple one line docstring"""'
def test_simple_with_whitespace(self):
# Test a simple one line string with preceding whitespace and newline
simple_docstring = ' """simple one line docstring""" \r\n'
tokens = tokenize.source_tokens(simple_docstring)
token_list = list(tokens)
assert token_list[0][0] == INDENT
typ, value, start_pos, prefix = token_list[1]
assert prefix == ' '
assert value == '"""simple one line docstring"""'
assert typ == STRING
typ, value, start_pos, prefix = token_list[2]
assert prefix == ' '
assert typ == NEWLINE
def test_function_whitespace(self):
# Test function definition whitespace identification
fundef = dedent('''
def test_whitespace(*args, **kwargs):
x = 1
if x > 0:
print(True)
''')
tokens = tokenize.source_tokens(fundef)
token_list = list(tokens)
for _, value, _, prefix in token_list:
if value == 'test_whitespace':
assert prefix == ' '
if value == '(':
assert prefix == ''
if value == '*':
assert prefix == ''
if value == '**':
assert prefix == ' '
if value == 'print':
assert prefix == ' '
if value == 'if':
assert prefix == ' '
def test_tokenize_multiline_I(self):
# Make sure multiline string having newlines have the end marker on the
# next line
fundef = '''""""\n'''
tokens = tokenize.source_tokens(fundef)
token_list = list(tokens)
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n', (1, 0), ''),
TokenInfo(ENDMARKER , '', (2, 0), '')]
def test_tokenize_multiline_II(self):
# Make sure multiline string having no newlines have the end marker on
# same line
fundef = '''""""'''
tokens = tokenize.source_tokens(fundef)
token_list = list(tokens)
assert token_list == [TokenInfo(ERRORTOKEN, '""""', (1, 0), ''),
TokenInfo(ENDMARKER, '', (1, 4), '')]
def test_tokenize_multiline_III(self):
# Make sure multiline string having newlines have the end marker on the
# next line even if several newline
fundef = '''""""\n\n'''
tokens = tokenize.source_tokens(fundef)
token_list = list(tokens)
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n\n', (1, 0), ''),
TokenInfo(ENDMARKER, '', (3, 0), '')]
def test_identifier_contains_unicode(self):
fundef = dedent('''
def 我あφ():
pass
''')
tokens = tokenize.source_tokens(fundef)
token_list = list(tokens)
unicode_token = token_list[1]
if is_py3:
assert unicode_token[0] == NAME
else:
# Unicode tokens in Python 2 seem to be identified as operators.
# They will be ignored in the parser, that's ok.
assert unicode_token[0] == OP
def test_quoted_strings(self):
string_tokens = [
'u"test"',
'u"""test"""',
'U"""test"""',
"u'''test'''",
"U'''test'''",
]
for s in string_tokens:
module = parse('''a = %s\n''' % s)
simple_stmt = module.children[0]
expr_stmt = simple_stmt.children[0]
assert len(expr_stmt.children) == 3
string_tok = expr_stmt.children[2]
assert string_tok.type == 'string'
assert string_tok.value == s
assert safe_literal_eval(string_tok.value) == 'test'
def test_ur_literals():
"""
Decided to parse `u''` literals regardless of Python version. This makes
probably sense:
- Python 3+ doesn't support it, but it doesn't hurt
not be. While this is incorrect, it's just incorrect for one "old" and in
the future not very important version.
- All the other Python versions work very well with it.
"""
def check(literal, is_literal=True):
token_list = _get_token_list(literal)
typ, result_literal, _, _ = token_list[0]
if is_literal:
assert typ == STRING
assert result_literal == literal
else:
assert typ == NAME
check('u""')
check('ur""', is_literal=not is_py3)
check('Ur""', is_literal=not is_py3)
check('UR""', is_literal=not is_py3)
check('bR""')
# Starting with Python 3.3 this ordering is also possible, but we just
# enable it for all versions. It doesn't hurt.
check('Rb""')
# Starting with Python 3.6 format strings where introduced.
check('fr""', is_literal=py_version >= 36)
check('rF""', is_literal=py_version >= 36)
check('f""', is_literal=py_version >= 36)
check('F""', is_literal=py_version >= 36)
def test_error_literal():
error_token, endmarker = _get_token_list('"\n')
assert error_token.type == tokenize.ERRORTOKEN
assert endmarker.prefix == ''
assert error_token.string == '"\n'
assert endmarker.type == tokenize.ENDMARKER
assert endmarker.prefix == ''
bracket, error_token, endmarker = _get_token_list('( """')
assert error_token.type == tokenize.ERRORTOKEN
assert error_token.prefix == ' '
assert error_token.string == '"""'
assert endmarker.type == tokenize.ENDMARKER
assert endmarker.prefix == ''
def test_endmarker_end_pos():
def check(code):
tokens = _get_token_list(code)
lines = splitlines(code)
assert tokens[-1].end_pos == (len(lines), len(lines[-1]))
check('#c')
check('#c\n')
check('a\n')
check('a')
check(r'a\\n')
check('a\\')