1
0
forked from VimPlug/jedi

Don't start parsing in our own API.

This commit is contained in:
Dave Halter
2017-03-20 08:44:52 +01:00
parent 53b4e78a9b
commit 9dedb9ff68
4 changed files with 10 additions and 9 deletions

View File

@@ -134,7 +134,7 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
p = parser.ParserWithRecovery(grammar, code, start_parsing=False) p = parser.ParserWithRecovery(grammar, code, start_parsing=False)
try: try:
p.parse(tokenizer=tokenize_without_endmarker(code)) p.parse(tokens=tokenize_without_endmarker(code))
except EndMarkerReached: except EndMarkerReached:
return Stack(p.pgen_parser.stack) return Stack(p.pgen_parser.stack)
raise SystemError("This really shouldn't happen. There's a bug in Jedi.") raise SystemError("This really shouldn't happen. There's a bug in Jedi.")

View File

@@ -323,7 +323,7 @@ class DiffParser(object):
parsed_until_line = self._nodes_stack.parsed_until_line parsed_until_line = self._nodes_stack.parsed_until_line
lines_after = self._parser_lines_new[parsed_until_line:] lines_after = self._parser_lines_new[parsed_until_line:]
#print('parse_content', parsed_until_line, lines_after, until_line) #print('parse_content', parsed_until_line, lines_after, until_line)
tokenizer = self._diff_tokenize( tokens = self._diff_tokenize(
lines_after, lines_after,
until_line, until_line,
line_offset=parsed_until_line line_offset=parsed_until_line
@@ -333,7 +333,7 @@ class DiffParser(object):
source='\n', source='\n',
start_parsing=False start_parsing=False
) )
return self._active_parser.parse(tokenizer=tokenizer) return self._active_parser.parse(tokens=tokens)
def _diff_tokenize(self, lines, until_line, line_offset=0): def _diff_tokenize(self, lines, until_line, line_offset=0):
is_first_token = True is_first_token = True

View File

@@ -79,8 +79,9 @@ def parse(code, grammar=None, error_recovery=True, start_symbol='file_input'):
else: else:
kwargs = dict(start_symbol=start_symbol) kwargs = dict(start_symbol=start_symbol)
parser = Parser parser = Parser
p = parser(grammar, code, tokens=tokens, **kwargs) p = parser(grammar, code, start_parsing=False, **kwargs)
module = p.parse(tokens=tokens)
if added_newline: if added_newline:
p._remove_last_newline() p._remove_last_newline()
return p.get_root_node() return module

View File

@@ -173,8 +173,8 @@ class ParserWithRecovery(Parser):
start_parsing=start_parsing start_parsing=start_parsing
) )
def parse(self, tokenizer): def parse(self, tokens):
root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokenizer)) root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokens))
root_node.path = self._module_path root_node.path = self._module_path
return root_node return root_node
@@ -242,8 +242,8 @@ class ParserWithRecovery(Parser):
stack[start_index:] = [] stack[start_index:] = []
return failed_stack return failed_stack
def _tokenize(self, tokenizer): def _tokenize(self, tokens):
for typ, value, start_pos, prefix in tokenizer: for typ, value, start_pos, prefix in tokens:
# print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix)) # print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix))
if typ == DEDENT: if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT, # We need to count indents, because if we just omit any DEDENT,