From 2d8d3cc1573bae647bc2dda989de53d37b1ad4f4 Mon Sep 17 00:00:00 2001 From: Dave Halter Date: Wed, 17 May 2017 14:16:36 -0400 Subject: [PATCH] Whitespace. --- test/test_tokenize.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/test_tokenize.py b/test/test_tokenize.py index e71b6c3..009dc10 100644 --- a/test/test_tokenize.py +++ b/test/test_tokenize.py @@ -22,6 +22,7 @@ def test_end_pos_one_line(): string = simple_stmt.children[0].get_rhs() assert string.end_pos == (3, 14) + def test_end_pos_multi_line(): parsed = parse(dedent(''' def testit(): @@ -32,6 +33,7 @@ def test_end_pos_multi_line(): string_leaf = expr_stmt.get_rhs().children[0] assert string_leaf.end_pos == (4, 11) + def test_simple_no_whitespace(): # Test a simple one line string, no preceding whitespace simple_docstring = '"""simple one line docstring"""' @@ -41,6 +43,7 @@ def test_simple_no_whitespace(): assert prefix == '' assert value == '"""simple one line docstring"""' + def test_simple_with_whitespace(): # Test a simple one line string with preceding whitespace and newline simple_docstring = ' """simple one line docstring""" \r\n' @@ -55,6 +58,7 @@ def test_simple_with_whitespace(): assert prefix == ' ' assert typ == NEWLINE + def test_function_whitespace(): # Test function definition whitespace identification fundef = dedent(''' @@ -79,6 +83,7 @@ def test_function_whitespace(): if value == 'if': assert prefix == ' ' + def test_tokenize_multiline_I(): # Make sure multiline string having newlines have the end marker on the # next line @@ -88,6 +93,7 @@ def test_tokenize_multiline_I(): assert token_list == [TokenInfo(ERRORTOKEN, '""""\n', (1, 0), ''), TokenInfo(ENDMARKER , '', (2, 0), '')] + def test_tokenize_multiline_II(): # Make sure multiline string having no newlines have the end marker on # same line @@ -97,6 +103,7 @@ def test_tokenize_multiline_II(): assert token_list == [TokenInfo(ERRORTOKEN, '""""', (1, 0), ''), TokenInfo(ENDMARKER, '', (1, 4), '')] + def test_tokenize_multiline_III(): # Make sure multiline string having newlines have the end marker on the # next line even if several newline @@ -106,6 +113,7 @@ def test_tokenize_multiline_III(): assert token_list == [TokenInfo(ERRORTOKEN, '""""\n\n', (1, 0), ''), TokenInfo(ENDMARKER, '', (3, 0), '')] + def test_identifier_contains_unicode(): fundef = dedent(''' def 我あφ(): @@ -121,6 +129,7 @@ def test_identifier_contains_unicode(): # They will be ignored in the parser, that's ok. assert unicode_token[0] == OP + def test_quoted_strings(): string_tokens = [ 'u"test"',