forked from VimPlug/jedi
Python 2.7 compatibility.
This commit is contained in:
@@ -197,3 +197,15 @@ try:
|
|||||||
from itertools import zip_longest
|
from itertools import zip_longest
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from itertools import izip_longest as zip_longest # Python 2
|
from itertools import izip_longest as zip_longest # Python 2
|
||||||
|
|
||||||
|
|
||||||
|
def no_unicode_pprint(dct):
|
||||||
|
"""
|
||||||
|
Python 2/3 dict __repr__ may be different, because of unicode differens
|
||||||
|
(with or without a `u` prefix). Normally in doctests we could use `pprint`
|
||||||
|
to sort dicts and check for equality, but here we have to write a separate
|
||||||
|
function to do that.
|
||||||
|
"""
|
||||||
|
import pprint
|
||||||
|
s = pprint.pformat(dct)
|
||||||
|
print(re.sub("u'", "'", s))
|
||||||
|
|||||||
@@ -533,8 +533,6 @@ class Script(object):
|
|||||||
for n in imp_names:
|
for n in imp_names:
|
||||||
iw = imports.ImportWrapper(self._evaluator, n).follow()
|
iw = imports.ImportWrapper(self._evaluator, n).follow()
|
||||||
i = n.get_definition()
|
i = n.get_definition()
|
||||||
if i.is_nested() and any(not isinstance(i, pr.Module) for i in iw):
|
|
||||||
analysis.add(self._evaluator, 'import-error', i.namespace_names[-1])
|
|
||||||
for node in sorted(nodes, key=lambda obj: obj.start_pos):
|
for node in sorted(nodes, key=lambda obj: obj.start_pos):
|
||||||
#if not (isinstance(stmt.parent, pr.ForFlow) and stmt.parent.set_stmt == stmt):
|
#if not (isinstance(stmt.parent, pr.ForFlow) and stmt.parent.set_stmt == stmt):
|
||||||
if node.type == 'expr_stmt':
|
if node.type == 'expr_stmt':
|
||||||
|
|||||||
@@ -439,7 +439,7 @@ def global_names_dict_generator(evaluator, scope, position):
|
|||||||
This function is used to include names from outer scopes. For example, when
|
This function is used to include names from outer scopes. For example, when
|
||||||
the current scope is function:
|
the current scope is function:
|
||||||
|
|
||||||
>>> from jedi._compatibility import u
|
>>> from jedi._compatibility import u, no_unicode_pprint
|
||||||
>>> from jedi.parser import Parser, load_grammar
|
>>> from jedi.parser import Parser, load_grammar
|
||||||
>>> parser = Parser(load_grammar(), u('''
|
>>> parser = Parser(load_grammar(), u('''
|
||||||
... x = ['a', 'b', 'c']
|
... x = ['a', 'b', 'c']
|
||||||
@@ -453,12 +453,11 @@ def global_names_dict_generator(evaluator, scope, position):
|
|||||||
`global_names_dict_generator` is a generator. First it yields names from
|
`global_names_dict_generator` is a generator. First it yields names from
|
||||||
most inner scope.
|
most inner scope.
|
||||||
|
|
||||||
>>> from pprint import pprint
|
|
||||||
>>> from jedi.evaluate import Evaluator
|
>>> from jedi.evaluate import Evaluator
|
||||||
>>> evaluator = Evaluator(load_grammar())
|
>>> evaluator = Evaluator(load_grammar())
|
||||||
>>> scope = er.wrap(evaluator, scope)
|
>>> scope = er.wrap(evaluator, scope)
|
||||||
>>> pairs = list(global_names_dict_generator(evaluator, scope, (4, 0)))
|
>>> pairs = list(global_names_dict_generator(evaluator, scope, (4, 0)))
|
||||||
>>> pprint(pairs[0])
|
>>> no_unicode_pprint(pairs[0])
|
||||||
({'func': [], 'y': [<Name: y@4,4>]}, (4, 0))
|
({'func': [], 'y': [<Name: y@4,4>]}, (4, 0))
|
||||||
|
|
||||||
Then it yields the names from one level "lower". In this example, this
|
Then it yields the names from one level "lower". In this example, this
|
||||||
@@ -466,7 +465,7 @@ def global_names_dict_generator(evaluator, scope, position):
|
|||||||
None, because typically the whole module is loaded before the function is
|
None, because typically the whole module is loaded before the function is
|
||||||
called.
|
called.
|
||||||
|
|
||||||
>>> pprint(pairs[1])
|
>>> no_unicode_pprint(pairs[1])
|
||||||
({'func': [<Name: func@3,4>], 'x': [<Name: x@2,0>]}, None)
|
({'func': [<Name: func@3,4>], 'x': [<Name: x@2,0>]}, None)
|
||||||
|
|
||||||
After that we have a few underscore names that are part of the module.
|
After that we have a few underscore names that are part of the module.
|
||||||
|
|||||||
@@ -71,7 +71,8 @@ yield_stmt: yield_expr
|
|||||||
raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
|
raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
|
||||||
import_stmt: import_name | import_from
|
import_stmt: import_name | import_from
|
||||||
import_name: 'import' dotted_as_names
|
import_name: 'import' dotted_as_names
|
||||||
import_from: ('from' ('.'* dotted_name | '.'+)
|
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||||
|
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||||
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||||
import_as_name: NAME ['as' NAME]
|
import_as_name: NAME ['as' NAME]
|
||||||
dotted_as_name: dotted_name ['as' NAME]
|
dotted_as_name: dotted_name ['as' NAME]
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ class MixinTestFullName(object):
|
|||||||
def check(self, source, desired):
|
def check(self, source, desired):
|
||||||
script = jedi.Script(textwrap.dedent(source))
|
script = jedi.Script(textwrap.dedent(source))
|
||||||
definitions = getattr(script, type(self).operation)()
|
definitions = getattr(script, type(self).operation)()
|
||||||
assert len(definitions) == 1
|
for d in definitions:
|
||||||
self.assertEqual(definitions[0].full_name, desired)
|
self.assertEqual(d.full_name, desired)
|
||||||
|
|
||||||
def test_os_path_join(self):
|
def test_os_path_join(self):
|
||||||
self.check('import os; os.path.join', 'os.path.join')
|
self.check('import os; os.path.join', 'os.path.join')
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
"""
|
"""
|
||||||
Test of keywords and ``jedi.keywords``
|
Test of keywords and ``jedi.keywords``
|
||||||
"""
|
"""
|
||||||
import jedi
|
from jedi._compatibility import is_py3
|
||||||
from jedi import Script, common
|
from jedi import Script
|
||||||
|
|
||||||
|
|
||||||
def test_goto_assignments_keyword():
|
def test_goto_assignments_keyword():
|
||||||
@@ -17,7 +17,10 @@ def test_goto_assignments_keyword():
|
|||||||
def test_keyword():
|
def test_keyword():
|
||||||
""" github jedi-vim issue #44 """
|
""" github jedi-vim issue #44 """
|
||||||
defs = Script("print").goto_definitions()
|
defs = Script("print").goto_definitions()
|
||||||
|
if is_py3:
|
||||||
assert [d.doc for d in defs]
|
assert [d.doc for d in defs]
|
||||||
|
else:
|
||||||
|
assert defs == []
|
||||||
|
|
||||||
assert Script("import").goto_assignments() == []
|
assert Script("import").goto_assignments() == []
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
from jedi._compatibility import u
|
||||||
from jedi.parser import Parser, load_grammar
|
from jedi.parser import Parser, load_grammar
|
||||||
|
|
||||||
|
|
||||||
@@ -6,7 +7,7 @@ def test_basic_parsing():
|
|||||||
"""Generates the AST object and then regenerates the code."""
|
"""Generates the AST object and then regenerates the code."""
|
||||||
assert Parser(load_grammar(), string).module.get_code() == string
|
assert Parser(load_grammar(), string).module.get_code() == string
|
||||||
|
|
||||||
compare('\na #pass\n')
|
compare(u('\na #pass\n'))
|
||||||
compare('wblabla* 1\t\n')
|
compare(u('wblabla* 1\t\n'))
|
||||||
compare('def x(a, b:3): pass\n')
|
compare(u('def x(a, b:3): pass\n'))
|
||||||
compare('assert foo\n')
|
compare(u('assert foo\n'))
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ asdfasdf""" + "h"
|
|||||||
|
|
||||||
def test_simple_no_whitespace(self):
|
def test_simple_no_whitespace(self):
|
||||||
# Test a simple one line string, no preceding whitespace
|
# Test a simple one line string, no preceding whitespace
|
||||||
simple_docstring = u'"""simple one line docstring"""'
|
simple_docstring = u('"""simple one line docstring"""')
|
||||||
simple_docstring_io = StringIO(simple_docstring)
|
simple_docstring_io = StringIO(simple_docstring)
|
||||||
tokens = parser.tokenize.generate_tokens(simple_docstring_io.readline)
|
tokens = parser.tokenize.generate_tokens(simple_docstring_io.readline)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
@@ -37,7 +37,7 @@ asdfasdf""" + "h"
|
|||||||
|
|
||||||
def test_simple_with_whitespace(self):
|
def test_simple_with_whitespace(self):
|
||||||
# Test a simple one line string with preceding whitespace and newline
|
# Test a simple one line string with preceding whitespace and newline
|
||||||
simple_docstring = ' """simple one line docstring""" \r\n'
|
simple_docstring = u(' """simple one line docstring""" \r\n')
|
||||||
simple_docstring_io = StringIO(simple_docstring)
|
simple_docstring_io = StringIO(simple_docstring)
|
||||||
tokens = parser.tokenize.generate_tokens(simple_docstring_io.readline)
|
tokens = parser.tokenize.generate_tokens(simple_docstring_io.readline)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
@@ -51,11 +51,11 @@ asdfasdf""" + "h"
|
|||||||
|
|
||||||
def test_function_whitespace(self):
|
def test_function_whitespace(self):
|
||||||
# Test function definition whitespace identification
|
# Test function definition whitespace identification
|
||||||
fundef = '''def test_whitespace(*args, **kwargs):
|
fundef = u('''def test_whitespace(*args, **kwargs):
|
||||||
x = 1
|
x = 1
|
||||||
if x > 0:
|
if x > 0:
|
||||||
print(True)
|
print(True)
|
||||||
'''
|
''')
|
||||||
fundef_io = StringIO(fundef)
|
fundef_io = StringIO(fundef)
|
||||||
tokens = parser.tokenize.generate_tokens(fundef_io.readline)
|
tokens = parser.tokenize.generate_tokens(fundef_io.readline)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
|
|||||||
Reference in New Issue
Block a user