1
0
forked from VimPlug/jedi

TokenInfo -> Token

This commit is contained in:
Dave Halter
2014-02-25 16:44:48 +01:00
parent 6439d6c848
commit 18e985a961
5 changed files with 27 additions and 27 deletions

View File

@@ -614,7 +614,7 @@ class PushBackTokenizer(object):
def __init__(self, tokenizer): def __init__(self, tokenizer):
self._tokenizer = tokenizer self._tokenizer = tokenizer
self._push_backs = [] self._push_backs = []
self.current = tokenize.TokenInfo(None, '', (0, 0)) self.current = tokenize.Token(None, '', (0, 0))
def push_last_back(self): def push_last_back(self):
self._push_backs.append(self.current) self._push_backs.append(self.current)

View File

@@ -12,7 +12,7 @@ from jedi.parser import Parser
from jedi.parser import representation as pr from jedi.parser import representation as pr
from jedi.parser import tokenize from jedi.parser import tokenize
from jedi import cache from jedi import cache
from jedi.parser.tokenize import (source_tokens, TokenInfo, FLOWS, NEWLINE, from jedi.parser.tokenize import (source_tokens, Token, FLOWS, NEWLINE,
COMMENT, ENDMARKER) COMMENT, ENDMARKER)
@@ -398,7 +398,7 @@ class FastTokenizer(object):
self.closed = False self.closed = False
# fast parser options # fast parser options
self.current = self.previous = TokenInfo(None, '', (0, 0)) self.current = self.previous = Token(None, '', (0, 0))
self.in_flow = False self.in_flow = False
self.new_indent = False self.new_indent = False
self.parser_indent = self.old_parser_indent = 0 self.parser_indent = self.old_parser_indent = 0

View File

@@ -840,7 +840,7 @@ class Statement(Simple):
def get_code(self, new_line=True): def get_code(self, new_line=True):
def assemble(command_list, assignment=None): def assemble(command_list, assignment=None):
pieces = [c.get_code() if isinstance(c, Simple) else c.string if pieces = [c.get_code() if isinstance(c, Simple) else c.string if
isinstance(c, tokenize.TokenInfo) else unicode(c) isinstance(c, tokenize.Token) else unicode(c)
for c in command_list] for c in command_list]
if assignment is None: if assignment is None:
return ''.join(pieces) return ''.join(pieces)

View File

@@ -9,10 +9,10 @@ from inspect import cleandoc
from ast import literal_eval from ast import literal_eval
from jedi._compatibility import unicode from jedi._compatibility import unicode
from jedi.parser.tokenize import TokenInfo from jedi.parser.tokenize import Token
class TokenNoCompat(TokenInfo): class TokenNoCompat(Token):
__slots__ = () __slots__ = ()
def __getitem__(self, key): def __getitem__(self, key):

View File

@@ -35,30 +35,30 @@ ENCODING = N_TOKENS + 1
tok_name[ENCODING] = 'ENCODING' tok_name[ENCODING] = 'ENCODING'
class TokenInfo(object): class Token(object):
""" """
The token object is an efficient representation of the structure The token object is an efficient representation of the structure
(type, token, (start_pos_line, start_pos_col)). It has indexer (type, token, (start_pos_line, start_pos_col)). It has indexer
methods that maintain compatibility to existing code that expects the above methods that maintain compatibility to existing code that expects the above
structure. structure.
>>> tuple(TokenInfo(1, 'foo' ,(3,4))) >>> tuple(Token(1, 'foo' ,(3,4)))
(1, 'foo', (3, 4), (3, 7)) (1, 'foo', (3, 4), (3, 7))
>>> repr(TokenInfo(1, "test", (1, 1))) >>> repr(Token(1, "test", (1, 1)))
"<TokenInfo: (1, 'test', (1, 1))>" "<Token: (1, 'test', (1, 1))>"
>>> TokenInfo(1, 'bar', (3, 4)).__getstate__() >>> Token(1, 'bar', (3, 4)).__getstate__()
(1, 'bar', 3, 4) (1, 'bar', 3, 4)
>>> a = TokenInfo(0, 'baz', (0, 0)) >>> a = Token(0, 'baz', (0, 0))
>>> a.__setstate__((1, 'foo', 3, 4)) >>> a.__setstate__((1, 'foo', 3, 4))
>>> a >>> a
<TokenInfo: (1, 'foo', (3, 4))> <Token: (1, 'foo', (3, 4))>
>>> a.start_pos >>> a.start_pos
(3, 4) (3, 4)
>>> a.string >>> a.string
'foo' 'foo'
>>> a._start_pos_col >>> a._start_pos_col
4 4
>>> TokenInfo(1, u("😷"), (1 ,1)).string + "p" == u("😷p") >>> Token(1, u("😷"), (1 ,1)).string + "p" == u("😷p")
True True
""" """
__slots__ = ("type", "string", "_start_pos_line", "_start_pos_col") __slots__ = ("type", "string", "_start_pos_line", "_start_pos_col")
@@ -180,7 +180,7 @@ Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special) Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name) PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken token = Ignore + PlainToken
# First (or only) line of ' or " string. # First (or only) line of ' or " string.
ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
@@ -196,7 +196,7 @@ def _compile(expr):
tokenprog, pseudoprog, single3prog, double3prog = map( tokenprog, pseudoprog, single3prog, double3prog = map(
_compile, (Token, PseudoToken, Single3, Double3)) _compile, (token, PseudoToken, Single3, Double3))
endprogs = {"'": _compile(Single), '"': _compile(Double), endprogs = {"'": _compile(Single), '"': _compile(Double),
"'''": single3prog, '"""': double3prog, "'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog, "r'''": single3prog, 'r"""': double3prog,
@@ -249,7 +249,7 @@ def generate_tokens(readline, line_offset=0):
line = readline() # readline returns empty if it's finished. See StringIO line = readline() # readline returns empty if it's finished. See StringIO
if not line: if not line:
if contstr: if contstr:
yield TokenInfo(ERRORTOKEN, contstr, contstr_start) yield Token(ERRORTOKEN, contstr, contstr_start)
break break
lnum += 1 lnum += 1
@@ -259,7 +259,7 @@ def generate_tokens(readline, line_offset=0):
endmatch = endprog.match(line) endmatch = endprog.match(line)
if endmatch: if endmatch:
pos = endmatch.end(0) pos = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:pos], contstr_start) yield Token(STRING, contstr + line[:pos], contstr_start)
contstr = '' contstr = ''
contline = None contline = None
else: else:
@@ -270,7 +270,7 @@ def generate_tokens(readline, line_offset=0):
while pos < max: while pos < max:
pseudomatch = pseudoprog.match(line, pos) pseudomatch = pseudoprog.match(line, pos)
if not pseudomatch: # scan for tokens if not pseudomatch: # scan for tokens
yield TokenInfo(ERRORTOKEN, line[pos], (lnum, pos)) yield Token(ERRORTOKEN, line[pos], (lnum, pos))
pos += 1 pos += 1
continue continue
@@ -280,19 +280,19 @@ def generate_tokens(readline, line_offset=0):
if (initial in numchars or # ordinary number if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')): (initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos) yield Token(NUMBER, token, spos)
elif initial in '\r\n': elif initial in '\r\n':
yield TokenInfo(NEWLINE, token, spos) yield Token(NEWLINE, token, spos)
elif initial == '#': elif initial == '#':
assert not token.endswith("\n") assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos) yield Token(COMMENT, token, spos)
elif token in triple_quoted: elif token in triple_quoted:
endprog = endprogs[token] endprog = endprogs[token]
endmatch = endprog.match(line, pos) endmatch = endprog.match(line, pos)
if endmatch: # all on one line if endmatch: # all on one line
pos = endmatch.end(0) pos = endmatch.end(0)
token = line[start:pos] token = line[start:pos]
yield TokenInfo(STRING, token, spos) yield Token(STRING, token, spos)
else: else:
contstr_start = (lnum, start) # multiple lines contstr_start = (lnum, start) # multiple lines
contstr = line[start:] contstr = line[start:]
@@ -309,12 +309,12 @@ def generate_tokens(readline, line_offset=0):
contline = line contline = line
break break
else: # ordinary string else: # ordinary string
yield TokenInfo(STRING, token, spos) yield Token(STRING, token, spos)
elif initial in namechars: # ordinary name elif initial in namechars: # ordinary name
yield TokenInfo(NAME, token, spos) yield Token(NAME, token, spos)
elif initial == '\\' and line[start:] == '\\\n': # continued stmt elif initial == '\\' and line[start:] == '\\\n': # continued stmt
continue continue
else: else:
yield TokenInfo(OP, token, spos) yield Token(OP, token, spos)
yield TokenInfo(ENDMARKER, '', (lnum, 0)) yield Token(ENDMARKER, '', (lnum, 0))