diff --git a/jedi/evaluate_representation.py b/jedi/evaluate_representation.py index c8865ca9..2244d1f6 100644 --- a/jedi/evaluate_representation.py +++ b/jedi/evaluate_representation.py @@ -508,7 +508,7 @@ class Execution(Executable): arr.values = values key_stmts = [] for key in keys: - stmt = pr.Statement(self._sub_module, 'XXX code', [], [], [], [], + stmt = pr.Statement(self._sub_module, [], [], [], [], start_pos, None) stmt._commands = [key] key_stmts.append(stmt) @@ -624,7 +624,7 @@ class Execution(Executable): old = stmt # generate a statement if it's not already one. module = builtin.Builtin.scope - stmt = pr.Statement(module, 'XXX code', [], [], [], [], (0, 0), None) + stmt = pr.Statement(module, [], [], [], [], (0, 0), None) stmt._commands = [old] # *args diff --git a/jedi/parsing.py b/jedi/parsing.py index b25cdfd5..4349afa8 100644 --- a/jedi/parsing.py +++ b/jedi/parsing.py @@ -14,12 +14,10 @@ being parsed completely. ``Statement`` is just a representation of the tokens within the statement. This lowers memory usage and cpu time and reduces the complexity of the ``Parser`` (there's another parser sitting inside ``Statement``, which produces ``Array`` and ``Call``). - """ -from _compatibility import next, StringIO, unicode +from _compatibility import next, StringIO import tokenize -import re import keyword import debug @@ -313,8 +311,6 @@ class Parser(object): :return: Statement + last parsed token. :rtype: (Statement, str) """ - - string = unicode('') set_vars = [] used_funcs = [] used_vars = [] @@ -350,18 +346,15 @@ class Parser(object): or tok in not_first_break and not tok_list or tok in breaks and level <= 0): try: - set_string = None #print 'parse_stmt', tok, tokenize.tok_name[token_type] tok_list.append(self.current + (self.start_pos,)) if tok == 'as': - string += " %s " % tok token_type, tok = self.next() if token_type == tokenize.NAME: n, token_type, tok = self._parse_dot_name(self.current) if n: set_vars.append(n) tok_list.append(n) - string += ".".join(n.names) continue elif tok == 'lambda': params = [] @@ -428,19 +421,12 @@ class Parser(object): i = 0 tok_list, toks = tok_list[:-i], tok_list[-i:-1] - src = '' - for t in toks: - src += t[1] if isinstance(t, tuple) \ - else t.get_code() - st = pr.Statement(self.module, src, [], [], [], + st = pr.Statement(self.module, [], [], [], toks, first_pos, self.end_pos) tok = pr.ListComprehension(st, middle, in_clause, self.scope) tok_list.append(tok) - if list_comp: - string = '' - string += tok.get_code() continue else: n, token_type, tok = self._parse_dot_name(self.current) @@ -453,9 +439,6 @@ class Parser(object): used_funcs.append(n) else: used_vars.append(n) - if string and re.match(r'[\w\d\'"]', string[-1]): - string += ' ' - string += ".".join(n.names) continue elif tok.endswith('=') and tok not in ['>=', '<=', '==', '!=']: # there has been an assignement -> change vars @@ -467,21 +450,20 @@ class Parser(object): elif tok in closing_brackets: level -= 1 - string = set_string if set_string is not None else string + tok token_type, tok = self.next() except (StopIteration, common.MultiLevelStopIteration): # comes from tokenizer break - if not string: + if not tok_list: return None, tok - #print 'new_stat', string, set_vars, used_funcs, used_vars + #print 'new_stat', set_vars, used_funcs, used_vars if self.freshscope and not self.no_docstr and len(tok_list) == 1 \ and self.last_token[0] == tokenize.STRING: self.scope.add_docstr(self.last_token[1]) return None, tok else: - stmt = stmt_class(self.module, string, set_vars, used_funcs, + stmt = stmt_class(self.module, set_vars, used_funcs, used_vars, tok_list, first_pos, self.end_pos) self._check_user_stmt(stmt) @@ -667,7 +649,6 @@ class Parser(object): n, token_type, tok = self._parse_dot_name() if n: statement.set_vars.append(n) - statement.code += ',' + n.get_code() if statement: inputs.append(statement) first = False diff --git a/jedi/parsing_representation.py b/jedi/parsing_representation.py index 688def92..5ab61580 100644 --- a/jedi/parsing_representation.py +++ b/jedi/parsing_representation.py @@ -649,9 +649,6 @@ class Statement(Simple): stores pretty much all the Python code, except functions, classes, imports, and flow functions like if, for, etc. - :param code: The full code of a statement. This is import, if one wants \ - to execute the code at some level. - :param code: str :param set_vars: The variables which are defined by the statement. :param set_vars: str :param used_funcs: The functions which are used by the statement. @@ -663,14 +660,12 @@ class Statement(Simple): :param start_pos: Position (line, column) of the Statement. :type start_pos: tuple(int, int) """ - __slots__ = ('used_funcs', 'code', 'token_list', 'used_vars', + __slots__ = ('used_funcs', 'token_list', 'used_vars', 'set_vars', '_commands', '_assignment_details') - def __init__(self, module, code, set_vars, used_funcs, used_vars, + def __init__(self, module, set_vars, used_funcs, used_vars, token_list, start_pos, end_pos, parent=None): super(Statement, self).__init__(module, start_pos, end_pos) - # TODO remove code -> much cleaner - self.code = code self.used_funcs = used_funcs self.used_vars = used_vars self.token_list = token_list @@ -847,8 +842,8 @@ class Statement(Simple): if not token_list: return None, tok - statement = Statement(self._sub_module, "XXX" + self.code, [], [], [], - token_list, start_pos, end_pos) + statement = Statement(self._sub_module, [], [], [], + token_list, start_pos, end_pos) statement.parent = self.parent return statement, tok @@ -932,9 +927,9 @@ class Param(Statement): __slots__ = ('position_nr', 'is_generated', 'annotation_stmt', 'parent_function') - def __init__(self, module, code, set_vars, used_funcs, used_vars, + def __init__(self, module, set_vars, used_funcs, used_vars, token_list, start_pos, end_pos): - super(Param, self).__init__(module, code, set_vars, used_funcs, + super(Param, self).__init__(module, set_vars, used_funcs, used_vars, token_list, start_pos, end_pos) # this is defined by the parser later on, not at the initialization