reverse use of tokenize, to get the command under the cursor

This commit is contained in:
David Halter
2012-04-06 04:02:34 +02:00
parent 1724a64e8d
commit 09fcff0f91
2 changed files with 54 additions and 109 deletions

View File

@@ -1,4 +1,5 @@
import re import re
import tokenize
import parsing import parsing
import evaluate import evaluate
@@ -36,113 +37,65 @@ class FileWithCursor(modules.File):
def get_row_path(self, column): def get_row_path(self, column):
""" Get the path under the cursor. """ """ Get the path under the cursor. """
def fetch_line(with_column=False): self._is_first = True
def fetch_line():
line = self.get_line(self._row_temp) line = self.get_line(self._row_temp)
if with_column: if self._is_first:
self._relevant_temp = line[:column - 1] self._is_first = False
line = line[:column - 1]
else: else:
self._relevant_temp += line + ' ' + self._relevant_temp line = line + '\n'
# add lines with a backslash at the end
while self._row_temp > 1: while self._row_temp > 1:
self._row_temp -= 1 self._row_temp -= 1
last_line = self.get_line(self._row_temp) last_line = self.get_line(self._row_temp)
if last_line and last_line[-1] == '\\': if last_line and last_line[-1] == '\\':
self._relevant_temp = last_line[:-1] + self._relevant_temp line = last_line[:-1] + ' ' + line
else: else:
break break
return line[::-1]
def fetch_name(is_first):
"""
:param is_first: This means, that there can be a point \
(which is a name separator) directly. There is no need for a name.
:type is_first: str
:return: The list of names and an is_finished param.
:rtype: (list, bool)
:raises: ParserError
"""
def get_char():
self._relevant_temp, char = self._relevant_temp[:-1], \
self._relevant_temp[-1]
return char
whitespace = [' ', '\n', '\r', '\\']
open_brackets = ['(', '[', '{']
close_brackets = [')', ']', '}']
strings = ['"', '"""', "'", "'''"]
is_word = lambda char: re.search('\w', char)
name = ''
force_point = False
force_no_brackets = False
is_finished = False
while True:
try:
char = get_char()
except IndexError:
is_finished = True
break
if force_point:
if char in whitespace:
continue
elif char != '.':
is_finished = True
break
if char == '.':
if not is_first and not name:
raise ParserError('No name after point (@%s): %s'
% (self._row_temp,
self._relevant_temp + char))
break
elif char in whitespace:
if is_word(name[0]):
force_point = True
elif char in close_brackets:
# TODO strings are not looked at here, they are dangerous!
# handle them!
# TODO handle comments
if force_no_brackets:
is_finished = True
break
level = 1
name = char + name
while True:
try:
char = get_char()
except IndexError:
while not self._relevant_temp:
# TODO can raise an exception, when there are
# no more lines
fetch_line()
char = get_char()
if char in close_brackets:
level += 1
elif char in open_brackets:
level -= 1
name = char + name
if level == 0:
break
elif is_word(char):
# TODO handle strings -> "asdf".join([1,2])
name = char + name
force_no_brackets = True
else:
is_finished = True
break
return name, is_finished
self._row_temp = self.row self._row_temp = self.row
self._relevant_temp = ''
fetch_line(True)
names = [] force_point = False
is_finished = False open_brackets = ['(', '[', '{']
while not is_finished: close_brackets = [')', ']', '}']
# do this not with tokenize, because it might fail
# due to single line processing gen = tokenize.generate_tokens(fetch_line)
name, is_finished = fetch_name(not bool(names)) string = ''
names.insert(0, name) level = 0
return names for token_type, tok, start, end, line in gen:
#print token_type, tok, line
if level > 0:
if tok in close_brackets:
level += 1
if tok in open_brackets:
level -= 1
elif tok == '.':
force_point = False
elif force_point:
if tok != '.':
break
elif tok in close_brackets:
level += 1
elif token_type in [tokenize.NAME, tokenize.STRING,
tokenize.NUMBER]:
force_point = True
else:
break
string += tok
return string[::-1]
def get_line(self, line):
if not self._line_cache:
self._line_cache = self.source.split('\n')
try:
return self._line_cache[line - 1]
except IndexError:
raise StopIteration()
def complete(source, row, column, file_callback=None): def complete(source, row, column, file_callback=None):
@@ -176,6 +129,7 @@ def complete(source, row, column, file_callback=None):
try: try:
path = f.get_row_path(column) path = f.get_row_path(column)
debug.dbg('completion_path', path)
except ParserError as e: except ParserError as e:
path = [] path = []
debug.dbg(e) debug.dbg(e)
@@ -183,7 +137,7 @@ def complete(source, row, column, file_callback=None):
result = [] result = []
if path and path[0]: if path and path[0]:
# just parse one statement # just parse one statement
r = parsing.PyFuzzyParser(".".join(path)) r = parsing.PyFuzzyParser(path)
#print 'p', r.top.get_code().replace('\n', r'\n'), r.top.statements[0] #print 'p', r.top.get_code().replace('\n', r'\n'), r.top.statements[0]
scopes = evaluate.follow_statement(r.top.statements[0], scope) scopes = evaluate.follow_statement(r.top.statements[0], scope)

View File

@@ -35,22 +35,13 @@ class File(object):
if not self.module_name and not self.source: if not self.module_name and not self.source:
raise AttributeError("Submit a module name or the source code") raise AttributeError("Submit a module name or the source code")
elif self.module_name: elif self.module_name:
return self.load_module() return self._load_module()
def load_module(self): def _load_module(self):
self._parser = parsing.PyFuzzyParser(self.source) self._parser = parsing.PyFuzzyParser(self.source)
del self.source # efficiency
return self._parser return self._parser
def get_line(self, line):
if not self._line_cache:
self._line_cache = self.source.split('\n')
if 1 <= line <= len(self._line_cache):
return self._line_cache[line - 1]
else:
return None
def find_module(point_path): def find_module(point_path):
""" """
Find a module with a path (of the module, like usb.backend.libusb10). Find a module with a path (of the module, like usb.backend.libusb10).