1
0
forked from VimPlug/jedi

The fast parser works now faster in case of for flows with a simple_stmt after.

This commit is contained in:
Dave Halter
2015-02-14 18:57:04 +01:00
parent a3b32729a7
commit db31e0e37d
2 changed files with 11 additions and 7 deletions

View File

@@ -346,6 +346,9 @@ class FastParser(use_metaclass(CachedFastParser)):
self.current_node = self._get_node(code_part, source[start:], self.current_node = self._get_node(code_part, source[start:],
line_offset, nodes, not is_first) line_offset, nodes, not is_first)
is_first = False is_first = False
else:
debug.dbg('While parsing %s, line %s slowed down the fast parser',
self.module_path, line_offset)
line_offset += code_part.count('\n') line_offset += code_part.count('\n')
start += len(code_part) start += len(code_part)
@@ -439,9 +442,11 @@ class FastTokenizer(object):
self._expect_indent = False self._expect_indent = False
elif typ == DEDENT: elif typ == DEDENT:
self._indent_counter -= 1 self._indent_counter -= 1
if self._in_flow and self._indent_counter == self._flow_indent_counter: if self._in_flow:
self._in_flow = False # TODO add <= for flows without INDENT in classes.
elif not self._in_flow: if self._indent_counter == self._flow_indent_counter:
self._in_flow = False
else:
self._closed = True self._closed = True
return current return current
@@ -450,11 +455,10 @@ class FastTokenizer(object):
if self.previous[0] in (NEWLINE, INDENT, DEDENT) \ if self.previous[0] in (NEWLINE, INDENT, DEDENT) \
and not self._parentheses_level and typ != INDENT: and not self._parentheses_level and typ != INDENT:
# Check for NEWLINE, which symbolizes the indent. # Check for NEWLINE, which symbolizes the indent.
# print('X', repr(value), tokenize.tok_name[typ])
if not self._in_flow: if not self._in_flow:
self._in_flow = value in FLOWS if value in FLOWS:
if self._in_flow:
self._flow_indent_counter = self._indent_counter self._flow_indent_counter = self._indent_counter
self._first_stmt = False
elif value in ('def', 'class', '@'): elif value in ('def', 'class', '@'):
# The values here are exactly the same check as in # The values here are exactly the same check as in
# _split_parts, but this time with tokenize and therefore # _split_parts, but this time with tokenize and therefore

View File

@@ -34,7 +34,7 @@ from inspect import cleandoc
from itertools import chain from itertools import chain
import textwrap import textwrap
from jedi._compatibility import (next, Python3Method, encoding, is_py3, from jedi._compatibility import (Python3Method, encoding, is_py3,
literal_eval, use_metaclass, unicode) literal_eval, use_metaclass, unicode)
from jedi import cache from jedi import cache