Fixed completions of global vars and tensorflow slowness, fixes #1228, #1116

This commit is contained in:
Dave Halter
2018-10-02 15:28:28 +02:00
parent 075577d50c
commit 23b3327b1d
2 changed files with 16 additions and 7 deletions

View File

@@ -178,17 +178,22 @@ class Script(object):
)
completions = completion.completions()
import_completions_count = len([
c for c in completions
if not c._name.tree_name
or c._name.tree_name.get_definition().type in ('import_name', 'import_from')
])
if import_completions_count > 10:
def iter_import_completions():
for c in completions:
tree_name = c._name.tree_name
if tree_name is None:
continue
definition = tree_name.get_definition()
if definition is not None \
and definition.type in ('import_name', 'import_from'):
yield c
if len(list(iter_import_completions())) > 10:
# For now disable completions if there's a lot of imports that
# might potentially be resolved. This is the case for tensorflow
# and has been fixed for it. This is obviously temporary until we
# have a better solution.
self._evaluator.infer_enabled = True
self._evaluator.infer_enabled = False
debug.speed('completions end')
return completions

View File

@@ -154,6 +154,9 @@ def global_define():
#? int()
global_var_in_func
#? ['global_var_in_func']
global_var_in_f
def funct1():
# From issue #610
@@ -175,6 +178,7 @@ def init_global_var_predefined():
#? int() None
global_var_predefined
# -----------------
# within docstrs
# -----------------