From a28bc7195f4f690de4e60be27bf6c364df4642fb Mon Sep 17 00:00:00 2001 From: David Halter Date: Fri, 19 Apr 2013 23:50:28 +0430 Subject: [PATCH 01/10] added tokenizer of the standard library to jedi, because in the future it shall replace tokenize --- jedi/tokenizer.py | 556 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 556 insertions(+) create mode 100644 jedi/tokenizer.py diff --git a/jedi/tokenizer.py b/jedi/tokenizer.py new file mode 100644 index 00000000..f575e9bc --- /dev/null +++ b/jedi/tokenizer.py @@ -0,0 +1,556 @@ +"""Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens. It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF). It generates 5-tuples with these +members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators. Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. +""" + +__author__ = 'Ka-Ping Yee ' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' + 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' + 'Michael Foord') +import builtins +import re +import sys +from token import * +from codecs import lookup, BOM_UTF8 +import collections +from io import TextIOWrapper +cookie_re = re.compile("coding[:=]\s*([-\w.]+)") + +import token +__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", + "NL", "untokenize", "ENCODING", "TokenInfo"] +del token + +COMMENT = N_TOKENS +tok_name[COMMENT] = 'COMMENT' +NL = N_TOKENS + 1 +tok_name[NL] = 'NL' +ENCODING = N_TOKENS + 2 +tok_name[ENCODING] = 'ENCODING' +N_TOKENS += 3 + +class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): + def __repr__(self): + annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) + return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % + self._replace(type=annotated_type)) + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +# Note: we use unicode matching for names ("\w") but ascii matching for +# number literals. +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'\w+' + +Hexnumber = r'0[xX][0-9a-fA-F]+' +Binnumber = r'0[bB][01]+' +Octnumber = r'0[oO][0-7]+' +Decnumber = r'(?:0+|[1-9][0-9]*)' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?[0-9]+' +Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) +Expfloat = r'[0-9]+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""') +# Single-line ' or " string. +String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", + r"//=?", r"->", + r"[+\-*/%&|^=<>]=?", + r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +def _compile(expr): + return re.compile(expr, re.UNICODE) + +tokenprog, pseudoprog, single3prog, double3prog = map( + _compile, (Token, PseudoToken, Single3, Double3)) +endprogs = {"'": _compile(Single), '"': _compile(Double), + "'''": single3prog, '"""': double3prog, + "r'''": single3prog, 'r"""': double3prog, + "b'''": single3prog, 'b"""': double3prog, + "br'''": single3prog, 'br"""': double3prog, + "R'''": single3prog, 'R"""': double3prog, + "B'''": single3prog, 'B"""': double3prog, + "bR'''": single3prog, 'bR"""': double3prog, + "Br'''": single3prog, 'Br"""': double3prog, + "BR'''": single3prog, 'BR"""': double3prog, + 'r': None, 'R': None, 'b': None, 'B': None} + +triple_quoted = {} +for t in ("'''", '"""', + "r'''", 'r"""', "R'''", 'R"""', + "b'''", 'b"""', "B'''", 'B"""', + "br'''", 'br"""', "Br'''", 'Br"""', + "bR'''", 'bR"""', "BR'''", 'BR"""'): + triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', + "r'", 'r"', "R'", 'R"', + "b'", 'b"', "B'", 'B"', + "br'", 'br"', "Br'", 'Br"', + "bR'", 'bR"', "BR'", 'BR"' ): + single_quoted[t] = t + +del _compile + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + + +class Untokenizer: + + def __init__(self): + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + self.encoding = None + + def add_whitespace(self, start): + row, col = start + assert row <= self.prev_row + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def untokenize(self, iterable): + for t in iterable: + if len(t) == 2: + self.compat(t, iterable) + break + tok_type, token, start, end, line = t + if tok_type == ENCODING: + self.encoding = token + continue + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + return "".join(self.tokens) + + def compat(self, token, iterable): + startline = False + indents = [] + toks_append = self.tokens.append + toknum, tokval = token + + if toknum in (NAME, NUMBER): + tokval += ' ' + if toknum in (NEWLINE, NL): + startline = True + prevstring = False + for tok in iterable: + toknum, tokval = tok[:2] + if toknum == ENCODING: + self.encoding = tokval + continue + + if toknum in (NAME, NUMBER): + tokval += ' ' + + # Insert a space between two consecutive strings + if toknum == STRING: + if prevstring: + tokval = ' ' + tokval + prevstring = True + else: + prevstring = False + + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + toks_append(tokval) + + +def untokenize(iterable): + """Transform tokens back into Python source code. + It returns a bytes object, encoded using the ENCODING + token, which is the first token sequence output by tokenize. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited intput: + # Output bytes will tokenize the back to the input + t1 = [tok[:2] for tok in tokenize(f.readline)] + newcode = untokenize(t1) + readline = BytesIO(newcode).readline + t2 = [tok[:2] for tok in tokenize(readline)] + assert t1 == t2 + """ + ut = Untokenizer() + out = ut.untokenize(iterable) + if ut.encoding is not None: + out = out.encode(ut.encoding) + return out + + +def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + +def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argment, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + line_string = line.decode('ascii') + except UnicodeDecodeError: + return None + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + raise SyntaxError("unknown encoding: " + encoding) + + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + raise SyntaxError('encoding problem: utf-8') + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + + +def open(filename): + """Open a file in read only mode using the encoding detected by + detect_encoding(). + """ + buffer = builtins.open(filename, 'rb') + encoding, lines = detect_encoding(buffer.readline) + buffer.seek(0) + text = TextIOWrapper(buffer, encoding, line_buffering=True) + text.mode = 'r' + return text + + +def tokenize(readline): + """ + The tokenize() generator requires one argment, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as bytes. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile, 'rb').__next__ # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + logical line; continuation lines are included. + + The first token sequence will always be an ENCODING token + which tells you which encoding was used to decode the bytes stream. + """ + # This import is here to avoid problems when the itertools module is not + # built yet and tokenize is imported. + from itertools import chain, repeat + encoding, consumed = detect_encoding(readline) + rl_gen = iter(readline, b"") + empty = repeat(b"") + return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) + + +def _tokenize(readline, encoding): + lnum = parenlev = continued = 0 + numchars = '0123456789' + contstr, needcont = '', 0 + contline = None + indents = [0] + + if encoding is not None: + if encoding == "utf-8-sig": + # BOM will already have been stripped. + encoding = "utf-8" + yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') + while True: # loop over lines in stream + try: + line = readline() + except StopIteration: + line = b'' + + if encoding is not None: + line = line.decode(encoding) + lnum += 1 + pos, max = 0, len(line) + + if contstr: # continued string + if not line: + raise TokenError("EOF in multi-line string", strstart) + endmatch = endprog.match(line) + if endmatch: + pos = end = endmatch.end(0) + yield TokenInfo(STRING, contstr + line[:end], + strstart, (lnum, end), contline + line) + contstr, needcont = '', 0 + contline = None + elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': + yield TokenInfo(ERRORTOKEN, contstr + line, + strstart, (lnum, len(line)), contline) + contstr = '' + contline = None + continue + else: + contstr = contstr + line + contline = contline + line + continue + + elif parenlev == 0 and not continued: # new statement + if not line: break + column = 0 + while pos < max: # measure leading whitespace + if line[pos] == ' ': + column += 1 + elif line[pos] == '\t': + column = (column//tabsize + 1)*tabsize + elif line[pos] == '\f': + column = 0 + else: + break + pos += 1 + if pos == max: + break + + if line[pos] in '#\r\n': # skip comments or blank lines + if line[pos] == '#': + comment_token = line[pos:].rstrip('\r\n') + nl_pos = pos + len(comment_token) + yield TokenInfo(COMMENT, comment_token, + (lnum, pos), (lnum, pos + len(comment_token)), line) + yield TokenInfo(NL, line[nl_pos:], + (lnum, nl_pos), (lnum, len(line)), line) + else: + yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:], + (lnum, pos), (lnum, len(line)), line) + continue + + if column > indents[-1]: # count indents or dedents + indents.append(column) + yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) + while column < indents[-1]: + if column not in indents: + raise IndentationError( + "unindent does not match any outer indentation level", + ("", lnum, pos, line)) + indents = indents[:-1] + yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) + + else: # continued statement + if not line: + raise TokenError("EOF in multi-line statement", (lnum, 0)) + continued = 0 + + while pos < max: + pseudomatch = pseudoprog.match(line, pos) + if pseudomatch: # scan for tokens + start, end = pseudomatch.span(1) + spos, epos, pos = (lnum, start), (lnum, end), end + token, initial = line[start:end], line[start] + + if (initial in numchars or # ordinary number + (initial == '.' and token != '.' and token != '...')): + yield TokenInfo(NUMBER, token, spos, epos, line) + elif initial in '\r\n': + yield TokenInfo(NL if parenlev > 0 else NEWLINE, + token, spos, epos, line) + elif initial == '#': + assert not token.endswith("\n") + yield TokenInfo(COMMENT, token, spos, epos, line) + elif token in triple_quoted: + endprog = endprogs[token] + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + yield TokenInfo(STRING, token, spos, (lnum, pos), line) + else: + strstart = (lnum, start) # multiple lines + contstr = line[start:] + contline = line + break + elif initial in single_quoted or \ + token[:2] in single_quoted or \ + token[:3] in single_quoted: + if token[-1] == '\n': # continued string + strstart = (lnum, start) + endprog = (endprogs[initial] or endprogs[token[1]] or + endprogs[token[2]]) + contstr, needcont = line[start:], 1 + contline = line + break + else: # ordinary string + yield TokenInfo(STRING, token, spos, epos, line) + elif initial.isidentifier(): # ordinary name + yield TokenInfo(NAME, token, spos, epos, line) + elif initial == '\\': # continued stmt + continued = 1 + else: + if initial in '([{': + parenlev += 1 + elif initial in ')]}': + parenlev -= 1 + yield TokenInfo(OP, token, spos, epos, line) + else: + yield TokenInfo(ERRORTOKEN, line[pos], + (lnum, pos), (lnum, pos+1), line) + pos += 1 + + for indent in indents[1:]: # pop remaining indent levels + yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') + yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') + + +# An undocumented, backwards compatible, API for all the places in the standard +# library that expect to be able to use tokenize with strings +def generate_tokens(readline): + return _tokenize(readline, None) + +if __name__ == "__main__": + # Quick sanity check + s = b'''def parseline(self, line): + """Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + """ + line = line.strip() + if not line: + return None, None, line + elif line[0] == '?': + line = 'help ' + line[1:] + elif line[0] == '!': + if hasattr(self, 'do_shell'): + line = 'shell ' + line[1:] + else: + return None, None, line + i, n = 0, len(line) + while i < n and line[i] in self.identchars: i = i+1 + cmd, arg = line[:i], line[i:].strip() + return cmd, arg, line + ''' + for tok in tokenize(iter(s.splitlines()).__next__): + print(tok) From 3b0f4b87cfe40e112177ea8a39a18ef102cac828 Mon Sep 17 00:00:00 2001 From: David Halter Date: Fri, 19 Apr 2013 23:59:33 +0430 Subject: [PATCH 02/10] removed unnecessary stuff for the jedi tokenizer --- jedi/tokenizer.py | 164 +++------------------------------------------- 1 file changed, 8 insertions(+), 156 deletions(-) diff --git a/jedi/tokenizer.py b/jedi/tokenizer.py index f575e9bc..90972c85 100644 --- a/jedi/tokenizer.py +++ b/jedi/tokenizer.py @@ -1,4 +1,11 @@ -"""Tokenization help for Python programs. +""" +This tokenizer has been copied from the ``tokenize.py`` standard library +tokenizer. The reason was simple: The standanrd library tokenizer fails +if the indentation is not right. The fast parser of jedi however requires +"wrong" indentation. + +Tokenization help for Python programs. +++++++++++++++++++++++++++++++++++++++ tokenize(readline) is a generator that breaks a stream of bytes into Python tokens. It decodes the bytes according to PEP-0263 for @@ -20,23 +27,12 @@ operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. """ -__author__ = 'Ka-Ping Yee ' -__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' - 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' - 'Michael Foord') -import builtins import re -import sys from token import * from codecs import lookup, BOM_UTF8 import collections -from io import TextIOWrapper cookie_re = re.compile("coding[:=]\s*([-\w.]+)") -import token -__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", - "NL", "untokenize", "ENCODING", "TokenInfo"] -del token COMMENT = N_TOKENS tok_name[COMMENT] = 'COMMENT' @@ -152,107 +148,6 @@ class TokenError(Exception): pass class StopTokenizing(Exception): pass -class Untokenizer: - - def __init__(self): - self.tokens = [] - self.prev_row = 1 - self.prev_col = 0 - self.encoding = None - - def add_whitespace(self, start): - row, col = start - assert row <= self.prev_row - col_offset = col - self.prev_col - if col_offset: - self.tokens.append(" " * col_offset) - - def untokenize(self, iterable): - for t in iterable: - if len(t) == 2: - self.compat(t, iterable) - break - tok_type, token, start, end, line = t - if tok_type == ENCODING: - self.encoding = token - continue - self.add_whitespace(start) - self.tokens.append(token) - self.prev_row, self.prev_col = end - if tok_type in (NEWLINE, NL): - self.prev_row += 1 - self.prev_col = 0 - return "".join(self.tokens) - - def compat(self, token, iterable): - startline = False - indents = [] - toks_append = self.tokens.append - toknum, tokval = token - - if toknum in (NAME, NUMBER): - tokval += ' ' - if toknum in (NEWLINE, NL): - startline = True - prevstring = False - for tok in iterable: - toknum, tokval = tok[:2] - if toknum == ENCODING: - self.encoding = tokval - continue - - if toknum in (NAME, NUMBER): - tokval += ' ' - - # Insert a space between two consecutive strings - if toknum == STRING: - if prevstring: - tokval = ' ' + tokval - prevstring = True - else: - prevstring = False - - if toknum == INDENT: - indents.append(tokval) - continue - elif toknum == DEDENT: - indents.pop() - continue - elif toknum in (NEWLINE, NL): - startline = True - elif startline and indents: - toks_append(indents[-1]) - startline = False - toks_append(tokval) - - -def untokenize(iterable): - """Transform tokens back into Python source code. - It returns a bytes object, encoded using the ENCODING - token, which is the first token sequence output by tokenize. - - Each element returned by the iterable must be a token sequence - with at least two elements, a token number and token value. If - only two tokens are passed, the resulting output is poor. - - Round-trip invariant for full input: - Untokenized source will match input source exactly - - Round-trip invariant for limited intput: - # Output bytes will tokenize the back to the input - t1 = [tok[:2] for tok in tokenize(f.readline)] - newcode = untokenize(t1) - readline = BytesIO(newcode).readline - t2 = [tok[:2] for tok in tokenize(readline)] - assert t1 == t2 - """ - ut = Untokenizer() - out = ut.untokenize(iterable) - if ut.encoding is not None: - out = out.encode(ut.encoding) - return out - - def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. @@ -336,18 +231,6 @@ def detect_encoding(readline): return default, [first, second] -def open(filename): - """Open a file in read only mode using the encoding detected by - detect_encoding(). - """ - buffer = builtins.open(filename, 'rb') - encoding, lines = detect_encoding(buffer.readline) - buffer.seek(0) - text = TextIOWrapper(buffer, encoding, line_buffering=True) - text.mode = 'r' - return text - - def tokenize(readline): """ The tokenize() generator requires one argment, readline, which @@ -523,34 +406,3 @@ def _tokenize(readline, encoding): for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') - - -# An undocumented, backwards compatible, API for all the places in the standard -# library that expect to be able to use tokenize with strings -def generate_tokens(readline): - return _tokenize(readline, None) - -if __name__ == "__main__": - # Quick sanity check - s = b'''def parseline(self, line): - """Parse the line into a command name and a string containing - the arguments. Returns a tuple containing (command, args, line). - 'command' and 'args' may be None if the line couldn't be parsed. - """ - line = line.strip() - if not line: - return None, None, line - elif line[0] == '?': - line = 'help ' + line[1:] - elif line[0] == '!': - if hasattr(self, 'do_shell'): - line = 'shell ' + line[1:] - else: - return None, None, line - i, n = 0, len(line) - while i < n and line[i] in self.identchars: i = i+1 - cmd, arg = line[:i], line[i:].strip() - return cmd, arg, line - ''' - for tok in tokenize(iter(s.splitlines()).__next__): - print(tok) From 06da7ddeecb068384e4e21f0e4e095e42724cf51 Mon Sep 17 00:00:00 2001 From: David Halter Date: Sat, 20 Apr 2013 09:21:22 +0430 Subject: [PATCH 03/10] tokenizer py27 support --- jedi/tokenizer.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/jedi/tokenizer.py b/jedi/tokenizer.py index 90972c85..989678a6 100644 --- a/jedi/tokenizer.py +++ b/jedi/tokenizer.py @@ -27,12 +27,15 @@ operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. """ +import string import re from token import * from codecs import lookup, BOM_UTF8 import collections cookie_re = re.compile("coding[:=]\s*([-\w.]+)") +namechars = string.ascii_letters + '_' + COMMENT = N_TOKENS tok_name[COMMENT] = 'COMMENT' @@ -388,7 +391,7 @@ def _tokenize(readline, encoding): break else: # ordinary string yield TokenInfo(STRING, token, spos, epos, line) - elif initial.isidentifier(): # ordinary name + elif initial in namechars: # ordinary name yield TokenInfo(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 @@ -400,9 +403,15 @@ def _tokenize(readline, encoding): yield TokenInfo(OP, token, spos, epos, line) else: yield TokenInfo(ERRORTOKEN, line[pos], - (lnum, pos), (lnum, pos+1), line) + (lnum, pos), (lnum, pos + 1), line) pos += 1 for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') + + +# An undocumented, backwards compatible, API for all the places in the standard +# library that expect to be able to use tokenize with strings +def generate_tokens(readline): + return _tokenize(readline, None) From 6b2619a844c56cd56c927d5be503ec2efd04c6b7 Mon Sep 17 00:00:00 2001 From: David Halter Date: Sat, 20 Apr 2013 09:23:27 +0430 Subject: [PATCH 04/10] tokenizer removed unused functions --- jedi/tokenizer.py | 113 ---------------------------------------------- 1 file changed, 113 deletions(-) diff --git a/jedi/tokenizer.py b/jedi/tokenizer.py index 989678a6..c5ff5e2f 100644 --- a/jedi/tokenizer.py +++ b/jedi/tokenizer.py @@ -148,119 +148,6 @@ tabsize = 8 class TokenError(Exception): pass -class StopTokenizing(Exception): pass - - -def _get_normal_name(orig_enc): - """Imitates get_normal_name in tokenizer.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ - enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): - return "iso-8859-1" - return orig_enc - -def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argment, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - bom_found = False - encoding = None - default = 'utf-8' - def read_or_stop(): - try: - return readline() - except StopIteration: - return b'' - - def find_cookie(line): - try: - line_string = line.decode('ascii') - except UnicodeDecodeError: - return None - - matches = cookie_re.findall(line_string) - if not matches: - return None - encoding = _get_normal_name(matches[0]) - try: - codec = lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - raise SyntaxError("unknown encoding: " + encoding) - - if bom_found: - if codec.name != 'utf-8': - # This behaviour mimics the Python interpreter - raise SyntaxError('encoding problem: utf-8') - encoding += '-sig' - return encoding - - first = read_or_stop() - if first.startswith(BOM_UTF8): - bom_found = True - first = first[3:] - default = 'utf-8-sig' - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - - -def tokenize(readline): - """ - The tokenize() generator requires one argment, readline, which - must be a callable object which provides the same interface as the - readline() method of built-in file objects. Each call to the function - should return one line of input as bytes. Alternately, readline - can be a callable function terminating with StopIteration: - readline = open(myfile, 'rb').__next__ # Example of alternate readline - - The generator produces 5-tuples with these members: the token type; the - token string; a 2-tuple (srow, scol) of ints specifying the row and - column where the token begins in the source; a 2-tuple (erow, ecol) of - ints specifying the row and column where the token ends in the source; - and the line on which the token was found. The line passed is the - logical line; continuation lines are included. - - The first token sequence will always be an ENCODING token - which tells you which encoding was used to decode the bytes stream. - """ - # This import is here to avoid problems when the itertools module is not - # built yet and tokenize is imported. - from itertools import chain, repeat - encoding, consumed = detect_encoding(readline) - rl_gen = iter(readline, b"") - empty = repeat(b"") - return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) - def _tokenize(readline, encoding): lnum = parenlev = continued = 0 From 0d385563a5d1bb1db3e20e8fb6bec6ce72099d19 Mon Sep 17 00:00:00 2001 From: David Halter Date: Sat, 20 Apr 2013 09:28:32 +0430 Subject: [PATCH 05/10] tokenizer: removed encoding --- jedi/tokenizer.py | 38 +++----------------------------------- 1 file changed, 3 insertions(+), 35 deletions(-) diff --git a/jedi/tokenizer.py b/jedi/tokenizer.py index c5ff5e2f..e48cf30b 100644 --- a/jedi/tokenizer.py +++ b/jedi/tokenizer.py @@ -4,27 +4,8 @@ tokenizer. The reason was simple: The standanrd library tokenizer fails if the indentation is not right. The fast parser of jedi however requires "wrong" indentation. -Tokenization help for Python programs. -++++++++++++++++++++++++++++++++++++++ - -tokenize(readline) is a generator that breaks a stream of bytes into -Python tokens. It decodes the bytes according to PEP-0263 for -determining source file encoding. - -It accepts a readline-like method which is called repeatedly to get the -next line of input (or b"" for EOF). It generates 5-tuples with these -members: - - the token type (see token.py) - the token (a string) - the starting (row, column) indices of the token (a 2-tuple of ints) - the ending (row, column) indices of the token (a 2-tuple of ints) - the original line (string) - -It is designed to match the working of the Python tokenizer exactly, except -that it produces COMMENT tokens for comments and gives type OP for all -operators. Additionally, all token lists start with an ENCODING token -which tells you which encoding was used to decode the bytes stream. +Basically this is a stripped down version of the standard library module, so +you can read the documentation there. """ import string @@ -149,26 +130,19 @@ tabsize = 8 class TokenError(Exception): pass -def _tokenize(readline, encoding): +def generate_tokens(readline): lnum = parenlev = continued = 0 numchars = '0123456789' contstr, needcont = '', 0 contline = None indents = [0] - if encoding is not None: - if encoding == "utf-8-sig": - # BOM will already have been stripped. - encoding = "utf-8" - yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') while True: # loop over lines in stream try: line = readline() except StopIteration: line = b'' - if encoding is not None: - line = line.decode(encoding) lnum += 1 pos, max = 0, len(line) @@ -296,9 +270,3 @@ def _tokenize(readline, encoding): for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') - - -# An undocumented, backwards compatible, API for all the places in the standard -# library that expect to be able to use tokenize with strings -def generate_tokens(readline): - return _tokenize(readline, None) From e2a6562384b356d59690f0e1be7d765b284a4a78 Mon Sep 17 00:00:00 2001 From: David Halter Date: Sat, 20 Apr 2013 09:51:52 +0430 Subject: [PATCH 06/10] tokenizer: removed raised errors --- jedi/tokenizer.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/jedi/tokenizer.py b/jedi/tokenizer.py index e48cf30b..e4b5c0d6 100644 --- a/jedi/tokenizer.py +++ b/jedi/tokenizer.py @@ -148,7 +148,8 @@ def generate_tokens(readline): if contstr: # continued string if not line: - raise TokenError("EOF in multi-line string", strstart) + # multiline string has not been finished + break endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) @@ -200,16 +201,13 @@ def generate_tokens(readline): indents.append(column) yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: - if column not in indents: - raise IndentationError( - "unindent does not match any outer indentation level", - ("", lnum, pos, line)) indents = indents[:-1] yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: - raise TokenError("EOF in multi-line statement", (lnum, 0)) + # basically a statement has not been finished here. + break continued = 0 while pos < max: From d4428c497ac4f3c4500d5db6f19051c099200681 Mon Sep 17 00:00:00 2001 From: David Halter Date: Sat, 20 Apr 2013 09:52:54 +0430 Subject: [PATCH 07/10] use tokenizer instead of tokenize in all modules --- jedi/common.py | 2 +- jedi/modules.py | 2 +- jedi/parsing.py | 2 +- jedi/parsing_representation.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/jedi/common.py b/jedi/common.py index ebd1f25d..04d17a5d 100644 --- a/jedi/common.py +++ b/jedi/common.py @@ -2,7 +2,7 @@ import sys import contextlib import functools -import tokenize +import tokenizer as tokenize from jedi._compatibility import next, reraise from jedi import debug diff --git a/jedi/modules.py b/jedi/modules.py index e67cc2c3..3301eba7 100644 --- a/jedi/modules.py +++ b/jedi/modules.py @@ -16,7 +16,7 @@ Apart from those classes there's a ``sys.path`` fetching function, as well as from __future__ import with_statement import re -import tokenize +import tokenizer as tokenize import sys import os diff --git a/jedi/parsing.py b/jedi/parsing.py index e5279f71..2241b12d 100644 --- a/jedi/parsing.py +++ b/jedi/parsing.py @@ -17,7 +17,7 @@ complexity of the ``Parser`` (there's another parser sitting inside """ from __future__ import with_statement -import tokenize +import tokenizer as tokenize import keyword from jedi._compatibility import next, StringIO diff --git a/jedi/parsing_representation.py b/jedi/parsing_representation.py index d4b8380c..a7dc9545 100644 --- a/jedi/parsing_representation.py +++ b/jedi/parsing_representation.py @@ -38,7 +38,7 @@ from __future__ import with_statement import os import re -import tokenize +import tokenizer as tokenize from jedi._compatibility import next, literal_eval, cleandoc, Python3Method, \ encoding, property, unicode, is_py3k From a7308fa51d2a1442fb9011a50e207a8307b591ab Mon Sep 17 00:00:00 2001 From: David Halter Date: Sat, 20 Apr 2013 14:11:46 +0430 Subject: [PATCH 08/10] removed more unused code --- jedi/common.py | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/jedi/common.py b/jedi/common.py index 04d17a5d..960d71d3 100644 --- a/jedi/common.py +++ b/jedi/common.py @@ -5,7 +5,6 @@ import functools import tokenizer as tokenize from jedi._compatibility import next, reraise -from jedi import debug from jedi import settings @@ -101,28 +100,7 @@ class NoErrorTokenizer(object): def __next__(self): if self.closed: raise MultiLevelStopIteration() - try: - self.current = next(self.gen) - except tokenize.TokenError: - # We just ignore this error, I try to handle it earlier - as - # good as possible - debug.warning('parentheses not closed error') - return self.__next__() - except IndentationError: - # This is an error, that tokenize may produce, because the code - # is not indented as it should. Here it just ignores this line - # and restarts the parser. - # (This is a rather unlikely error message, for normal code, - # tokenize seems to be pretty tolerant) - debug.warning('indentation error on line %s, ignoring it' % - self.current[2][0]) - # add the starting line of the last position - self.offset = (self.offset[0] + self.current[2][0], - self.current[2][1]) - self.gen = PushBackIterator(tokenize.generate_tokens( - self.readline)) - return self.__next__() - + self.current = next(self.gen) c = list(self.current) # stop if a new class or definition is started at position zero. From 8c56fba1e9946cc92d1accf8aeaba783e6582315 Mon Sep 17 00:00:00 2001 From: David Halter Date: Sat, 20 Apr 2013 14:12:15 +0430 Subject: [PATCH 09/10] fix a problem with statements without parents --- jedi/parsing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/jedi/parsing.py b/jedi/parsing.py index 2241b12d..e1785c01 100644 --- a/jedi/parsing.py +++ b/jedi/parsing.py @@ -394,6 +394,7 @@ class Parser(object): stmt = stmt_class(self.module, set_vars, used_vars, tok_list, first_pos, self.end_pos) + stmt.parent = self.top_module self._check_user_stmt(stmt) # Attribute docstring (PEP 257) support From d27dc0d58efccefa2440eb76e2c6b6ea19a7b52a Mon Sep 17 00:00:00 2001 From: David Halter Date: Sat, 20 Apr 2013 14:37:18 +0430 Subject: [PATCH 10/10] remove py2.5 from travis - this also removes python 2.5 support --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 16d38fab..29c03f1a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,5 @@ language: python env: - - TOXENV=py25 PIP_INSECURE=t - TOXENV=py26 - TOXENV=py27 - TOXENV=py32