diff --git a/jedi/api/__init__.py b/jedi/api/__init__.py index 01ed0a34..5811cc8f 100644 --- a/jedi/api/__init__.py +++ b/jedi/api/__init__.py @@ -288,7 +288,7 @@ class Script(object): @memoize_default() def _get_under_cursor_stmt(self, cursor_txt): - tokenizer = source_tokens(cursor_txt, line_offset=self._pos[0] - 1) + tokenizer = source_tokens(cursor_txt) r = Parser(self._grammar, cursor_txt, tokenizer=tokenizer) try: # Take the last statement available. @@ -304,7 +304,7 @@ class Script(object): else: pos = user_stmt.start_pos - stmt.move(0, pos[1]) + stmt.move(pos[0] - 1, pos[1]) # Moving the offset. stmt.parent = self._parser.user_scope() return stmt diff --git a/jedi/parser/fast.py b/jedi/parser/fast.py index 9f48b50b..582f19ce 100644 --- a/jedi/parser/fast.py +++ b/jedi/parser/fast.py @@ -391,7 +391,7 @@ class FastParser(use_metaclass(CachedFastParser)): nodes.remove(node) break else: - tokenizer = FastTokenizer(parser_code, 0) + tokenizer = FastTokenizer(parser_code) self.number_parsers_used += 1 #print('CODE', repr(source)) p = Parser(self._grammar, parser_code, self.module_path, tokenizer=tokenizer) @@ -410,10 +410,9 @@ class FastTokenizer(object): """ Breaks when certain conditions are met, i.e. a new function or class opens. """ - def __init__(self, source, line_offset=0): - # TODO remove the whole line_offset stuff, it's not used anymore. + def __init__(self, source): self.source = source - self._gen = source_tokens(source, line_offset) + self._gen = source_tokens(source) self._closed = False # fast parser options diff --git a/jedi/parser/tokenize.py b/jedi/parser/tokenize.py index 651f32b5..d70c2784 100644 --- a/jedi/parser/tokenize.py +++ b/jedi/parser/tokenize.py @@ -139,14 +139,14 @@ ALWAYS_BREAK_TOKEN = (';', 'import', 'from', 'class', 'def', 'try', 'except', 'finally', 'while', 'return') -def source_tokens(source, line_offset=0): +def source_tokens(source): """Generate tokens from a the source code (string).""" source = source + '\n' # end with \n, because the parser needs it readline = StringIO(source).readline - return generate_tokens(readline, line_offset) + return generate_tokens(readline) -def generate_tokens(readline, line_offset=0): +def generate_tokens(readline): """ A heavily modified Python standard library tokenizer. @@ -156,7 +156,7 @@ def generate_tokens(readline, line_offset=0): """ paren_level = 0 # count parentheses indents = [0] - lnum = line_offset + lnum = 0 numchars = '0123456789' contstr = '' contline = None