Move tokenize.py and token.py to the python folder.

This commit is contained in:
Dave Halter
2017-07-09 00:06:14 +02:00
parent ee1184e7c4
commit c28ec5f182
9 changed files with 15 additions and 15 deletions

View File

@@ -7,7 +7,7 @@ from parso._compatibility import FileNotFoundError, unicode
from parso.pgen2.pgen import generate_grammar from parso.pgen2.pgen import generate_grammar
from parso.utils import splitlines, source_to_unicode from parso.utils import splitlines, source_to_unicode
from parso.python.diff import DiffParser from parso.python.diff import DiffParser
from parso.tokenize import tokenize_lines from parso.python.tokenize import tokenize_lines
from parso.cache import parser_cache, load_module, save_module from parso.cache import parser_cache, load_module, save_module
from parso.parser import BaseParser from parso.parser import BaseParser
from parso.python.parser import Parser as PythonParser from parso.python.parser import Parser as PythonParser

View File

@@ -14,7 +14,7 @@ See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works. how this parsing engine works.
""" """
from parso import tokenize from parso.python import tokenize
class InternalParseError(Exception): class InternalParseError(Exception):

View File

@@ -6,8 +6,8 @@
# Modifications are dual-licensed: MIT and PSF. # Modifications are dual-licensed: MIT and PSF.
from parso.pgen2 import grammar from parso.pgen2 import grammar
from parso import token from parso.python import token
from parso import tokenize from parso.python import tokenize
class ParserGenerator(object): class ParserGenerator(object):

View File

@@ -13,8 +13,8 @@ import logging
from parso.utils import splitlines from parso.utils import splitlines
from parso.python.parser import Parser from parso.python.parser import Parser
from parso.python.tree import EndMarker from parso.python.tree import EndMarker
from parso.tokenize import (tokenize_lines, NEWLINE, TokenInfo, from parso.python.tokenize import (tokenize_lines, NEWLINE, TokenInfo,
ENDMARKER, INDENT, DEDENT) ENDMARKER, INDENT, DEDENT)
def _get_last_line(node_or_leaf): def _get_last_line(node_or_leaf):

View File

@@ -1,7 +1,7 @@
from parso.python import tree from parso.python import tree
from parso import tokenize from parso.python import tokenize
from parso.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, from parso.python.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER,
STRING, tok_name) STRING, tok_name)
from parso.parser import BaseParser from parso.parser import BaseParser
from parso.pgen2.parse import token_to_ilabel from parso.pgen2.parse import token_to_ilabel

View File

@@ -1,6 +1,6 @@
import re import re
from parso.tokenize import group from parso.python.tokenize import group
class PrefixPart(object): class PrefixPart(object):

View File

@@ -16,8 +16,8 @@ import re
from collections import namedtuple from collections import namedtuple
import itertools as _itertools import itertools as _itertools
from parso.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, opmap, from parso.python.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, opmap,
NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT) NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT)
from parso._compatibility import py_version, u from parso._compatibility import py_version, u
from parso.utils import splitlines from parso.utils import splitlines

View File

@@ -4,10 +4,10 @@ from textwrap import dedent
from parso._compatibility import py_version from parso._compatibility import py_version
from parso.utils import splitlines from parso.utils import splitlines
from parso.token import NAME, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER from parso.python.token import NAME, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER
from parso import tokenize from parso.python import tokenize
from parso import parse from parso import parse
from parso.tokenize import TokenInfo from parso.python.tokenize import TokenInfo
def _get_token_list(string): def _get_token_list(string):
return list(tokenize.tokenize(string)) return list(tokenize.tokenize(string))