From cc1098b93c7a3b620fe0f49f4e1005f10087e5a0 Mon Sep 17 00:00:00 2001 From: Dave Halter Date: Wed, 26 Nov 2014 16:09:28 +0100 Subject: [PATCH] Fix a few tokenize tests and merge them back together. --- jedi/parser/__init__.py | 2 +- jedi/refactoring.py | 2 +- test/test_parser/test_tokenize.py | 29 ----------------------------- test/test_parser/test_tokenizer.py | 14 ++++++++++---- 4 files changed, 12 insertions(+), 35 deletions(-) delete mode 100644 test/test_parser/test_tokenize.py diff --git a/jedi/parser/__init__.py b/jedi/parser/__init__.py index ce054318..4ac7aa82 100644 --- a/jedi/parser/__init__.py +++ b/jedi/parser/__init__.py @@ -33,7 +33,7 @@ STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \ _loaded_grammars = {} -def load_grammar(file): +def load_grammar(file='grammar3.4'): global _loaded_grammars path = os.path.join(os.path.dirname(__file__), file) + '.txt' try: diff --git a/jedi/refactoring.py b/jedi/refactoring.py index fbd9bde6..a342f08e 100644 --- a/jedi/refactoring.py +++ b/jedi/refactoring.py @@ -16,7 +16,7 @@ import difflib from jedi import common from jedi.evaluate import helpers -from jedi.parser import representation as pr +from jedi.parser import tree as pt class Refactoring(object): diff --git a/test/test_parser/test_tokenize.py b/test/test_parser/test_tokenize.py deleted file mode 100644 index ab57d4f1..00000000 --- a/test/test_parser/test_tokenize.py +++ /dev/null @@ -1,29 +0,0 @@ -from jedi._compatibility import u -from jedi import parser - -from ..helpers import unittest - - -class TokenTest(unittest.TestCase): - def test_end_pos_one_line(self): - parsed = parser.Parser(u(''' -def testit(): - a = "huhu" -''')) - tok = parsed.module.subscopes[0].statements[0]._token_list[2] - self.assertEqual(tok.end_pos, (3, 14)) - - def test_end_pos_multi_line(self): - parsed = parser.Parser(u(''' -def testit(): - a = """huhu -asdfasdf""" + "h" -''')) - tok = parsed.module.subscopes[0].statements[0]._token_list[2] - self.assertEqual(tok.end_pos, (4, 11)) - - -def test_tokenizer_with_string_literal_backslash(): - import jedi - c = jedi.Script("statement = u'foo\\\n'; statement").goto_definitions() - assert c[0]._name.parent.obj == 'foo' diff --git a/test/test_parser/test_tokenizer.py b/test/test_parser/test_tokenizer.py index 35ec6abe..25f0ff62 100644 --- a/test/test_parser/test_tokenizer.py +++ b/test/test_parser/test_tokenizer.py @@ -9,20 +9,20 @@ from ..helpers import unittest class TokenTest(unittest.TestCase): def test_end_pos_one_line(self): - parsed = parser.Parser(u(''' + parsed = parser.Parser(parser.load_grammar(), u(''' def testit(): a = "huhu" ''')) - tok = parsed.module.subscopes[0].statements[0]._token_list[2] + tok = parsed.module.subscopes[0].statements[0].children[2] self.assertEqual(tok.end_pos, (3, 14)) def test_end_pos_multi_line(self): - parsed = parser.Parser(u(''' + parsed = parser.Parser(parser.load_grammar(), u(''' def testit(): a = """huhu asdfasdf""" + "h" ''')) - tok = parsed.module.subscopes[0].statements[0]._token_list[2] + tok = parsed.module.subscopes[0].statements[0].children[2].children[0] self.assertEqual(tok.end_pos, (4, 11)) def test_simple_no_whitespace(self): @@ -73,3 +73,9 @@ asdfasdf""" + "h" self.assertEqual(t._preceding_whitespace, ' ') if t.string == 'if': self.assertEqual(t._preceding_whitespace, ' ') + + +def test_tokenizer_with_string_literal_backslash(): + import jedi + c = jedi.Script("statement = u'foo\\\n'; statement").goto_definitions() + assert c[0]._name.parent.obj == 'foo'