diff --git a/parso/grammar.py b/parso/grammar.py index 10689f0..3094850 100644 --- a/parso/grammar.py +++ b/parso/grammar.py @@ -7,7 +7,7 @@ from parso._compatibility import FileNotFoundError, unicode from parso.pgen2.pgen import generate_grammar from parso.utils import splitlines, source_to_unicode from parso.python.diff import DiffParser -from parso.tokenize import tokenize_lines +from parso.python.tokenize import tokenize_lines from parso.cache import parser_cache, load_module, save_module from parso.parser import BaseParser from parso.python.parser import Parser as PythonParser diff --git a/parso/pgen2/parse.py b/parso/pgen2/parse.py index 365ff07..ac1ade3 100644 --- a/parso/pgen2/parse.py +++ b/parso/pgen2/parse.py @@ -14,7 +14,7 @@ See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. """ -from parso import tokenize +from parso.python import tokenize class InternalParseError(Exception): diff --git a/parso/pgen2/pgen.py b/parso/pgen2/pgen.py index c429c3a..60e4838 100644 --- a/parso/pgen2/pgen.py +++ b/parso/pgen2/pgen.py @@ -6,8 +6,8 @@ # Modifications are dual-licensed: MIT and PSF. from parso.pgen2 import grammar -from parso import token -from parso import tokenize +from parso.python import token +from parso.python import tokenize class ParserGenerator(object): diff --git a/parso/python/diff.py b/parso/python/diff.py index c3bda1d..2fbf1f7 100644 --- a/parso/python/diff.py +++ b/parso/python/diff.py @@ -13,8 +13,8 @@ import logging from parso.utils import splitlines from parso.python.parser import Parser from parso.python.tree import EndMarker -from parso.tokenize import (tokenize_lines, NEWLINE, TokenInfo, - ENDMARKER, INDENT, DEDENT) +from parso.python.tokenize import (tokenize_lines, NEWLINE, TokenInfo, + ENDMARKER, INDENT, DEDENT) def _get_last_line(node_or_leaf): diff --git a/parso/python/parser.py b/parso/python/parser.py index 717fc48..b05944f 100644 --- a/parso/python/parser.py +++ b/parso/python/parser.py @@ -1,7 +1,7 @@ from parso.python import tree -from parso import tokenize -from parso.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, - STRING, tok_name) +from parso.python import tokenize +from parso.python.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, + STRING, tok_name) from parso.parser import BaseParser from parso.pgen2.parse import token_to_ilabel diff --git a/parso/python/prefix.py b/parso/python/prefix.py index 18d06e4..38c2c54 100644 --- a/parso/python/prefix.py +++ b/parso/python/prefix.py @@ -1,6 +1,6 @@ import re -from parso.tokenize import group +from parso.python.tokenize import group class PrefixPart(object): diff --git a/parso/token.py b/parso/python/token.py similarity index 100% rename from parso/token.py rename to parso/python/token.py diff --git a/parso/tokenize.py b/parso/python/tokenize.py similarity index 98% rename from parso/tokenize.py rename to parso/python/tokenize.py index 6c5bbc9..84e05d5 100644 --- a/parso/tokenize.py +++ b/parso/python/tokenize.py @@ -16,8 +16,8 @@ import re from collections import namedtuple import itertools as _itertools -from parso.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, opmap, - NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT) +from parso.python.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, opmap, + NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT) from parso._compatibility import py_version, u from parso.utils import splitlines diff --git a/test/test_tokenize.py b/test/test_tokenize.py index 7b3c1a7..c5f4bb3 100644 --- a/test/test_tokenize.py +++ b/test/test_tokenize.py @@ -4,10 +4,10 @@ from textwrap import dedent from parso._compatibility import py_version from parso.utils import splitlines -from parso.token import NAME, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER -from parso import tokenize +from parso.python.token import NAME, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER +from parso.python import tokenize from parso import parse -from parso.tokenize import TokenInfo +from parso.python.tokenize import TokenInfo def _get_token_list(string): return list(tokenize.tokenize(string))