232 Commits

Author SHA1 Message Date
Dave Halter
a01e4c6b37 Merge remote-tracking branch 'origin/master' into dev 2014-07-23 00:47:30 +02:00
Dave Halter
5e9d9573d5 Don't warn on addition of an int literal plus an unknown int number. 2014-07-22 16:44:10 +02:00
Dave Halter
f7c8c43fbc check not only - but also + for numbers. 2014-07-22 16:18:57 +02:00
Dave Halter
96ca596cc2 fix the operation test where static analysis reports additions of ints with non-ints. 2014-07-22 16:05:46 +02:00
Dave Halter
852cdad754 Operator fixes. Subclass of Simple, now. 2014-07-22 16:02:34 +02:00
Dave Halter
15f42d93d7 it should be possible to find the origin of an operation, if it's a faulty one (static analysis) 2014-07-22 14:52:58 +02:00
Dave Halter
50ceef6e09 1 + '1' TypeErrors are now detected, but not shown in the right place 2014-07-22 10:44:56 +02:00
Dave Halter
52bbedd4a8 renamed company-jedi to anaconda-mode, cc @proofit404 2014-07-22 10:33:49 +02:00
Dave Halter
037d5fa02a static analysis tests for the 1 + '1' test 2014-07-22 01:40:36 +02:00
Dave Halter
d350c1fa30 Merge branch 'linter' of github.com:davidhalter/jedi into linter 2014-07-21 17:40:25 +02:00
Dave Halter
b0d5d96b20 Merge pull request #435 from hattya/windows
fixes for Windows
2014-07-21 16:38:53 +02:00
Dave Halter
ec690b9ec5 change the default linter command from 'force' to 'linter' 2014-07-21 16:15:42 +02:00
Akinori Hattori
712e5653d8 use _ctypes for extension tests 2014-07-19 14:39:14 +09:00
Akinori Hattori
10b7ed967d organize imports 2014-07-19 10:41:19 +09:00
Akinori Hattori
00b8263859 fix buildout test for Windows 2014-07-19 10:39:45 +09:00
Akinori Hattori
11bc105207 skip readline test on Windows 2014-07-19 10:37:46 +09:00
Akinori Hattori
68150f2814 fix module name of integration tests for Windows 2014-07-19 10:36:31 +09:00
Akinori Hattori
5b15c0ba84 fix package detection for Windows 2014-07-19 10:34:15 +09:00
Akinori Hattori
2696d95d70 fix dotted_from_fs_path for Windows 2014-07-19 10:33:08 +09:00
Dave Halter
2616143d10 unicode issues with docstrings should be gone, fixes #420 2014-07-18 17:43:25 +02:00
Dave Halter
e07f51387f added a test for hex value issues in docstrings, see #427 2014-07-18 17:09:44 +02:00
Dave Halter
ee1c5041ed use the new implementation of splitlines all over the code, fixes #424 2014-07-18 16:59:22 +02:00
Dave Halter
ffaacbefbc new splitlines implementation to get rid of the ugly splitlines we now have as well as (partially) the issue with form feeds 2014-07-18 16:52:55 +02:00
Dave Halter
0f665bf436 test for #424, issues with form feeds 2014-07-18 16:52:25 +02:00
Dave Halter
81f3b940e8 dicts should not be used to check against in get_defined_names, because they cannot contain a defined name (lists and tuples can) so just ignore them., fixes #417 2014-07-18 15:37:10 +02:00
Dave Halter
4626a8b6df test for #417 2014-07-18 15:23:47 +02:00
Dave Halter
21341283ca another parser issue, very much related to the last one. sometimes a None element was used as a token.fixes #418 2014-07-18 15:10:10 +02:00
Dave Halter
0dea47b260 def/class keywords after an opening parentheses led to empty arrays without closing brackets, tests & fixes #416 2014-07-18 12:53:06 +02:00
Dave Halter
3be5220bf1 parentheses checks in the fast parser (tokenizer) have been improved to really cover all cases. real fix for davidhalter/jedi-vim#288 2014-07-18 10:59:28 +02:00
Dave Halter
03226783dd parentheses should be ignored when calculating the indent of a new block in the fast parser, fixes davidhalter/jedi-vim#288 2014-07-18 09:11:06 +02:00
Dave Halter
08f7a439c0 merged dev and linter 2014-07-17 23:17:49 +02:00
Dave Halter
a1bc644bfb fix a small issue in the alternative test runner 2014-07-17 22:47:54 +02:00
Dave Halter
9eec2b2794 Merge pull request #434 from hattya/last-part
keep newline at end of code
2014-07-17 20:21:21 +04:30
Akinori Hattori
e14b144199 keep newline at end of code 2014-07-17 20:37:25 +09:00
Dave Halter
4852c7840f Merge pull request #433 from hattya/pytest
update pytest to latest stable
2014-07-17 14:28:31 +04:30
Dave Halter
ef58f0e8e0 Merge pull request #430 from hattya/dev
improve fast parser for incremental parsing
2014-07-17 14:24:17 +04:30
Akinori Hattori
1eeb5677fa update pytest to latest stable 2014-07-17 18:35:31 +09:00
Akinori Hattori
3f75ea5cc7 skip newline at end of code 2014-07-17 18:29:00 +09:00
Dave Halter
53a32d8304 test for parser issues with parentheses indentation, see davidhalter/jedi-vim#288 2014-07-15 17:05:16 +02:00
Akinori Hattori
feae67484c CRLF should be also treated as blank line 2014-07-08 20:21:45 +09:00
Akinori Hattori
da89b66594 use generator to reduce memory usage 2014-07-06 11:53:22 +09:00
Akinori Hattori
1650f65507 reduce loops for finding sub parser 2014-07-06 11:29:24 +09:00
Akinori Hattori
aab4891c4e reduce regex searches and compile pattern 2014-07-06 11:11:23 +09:00
Akinori Hattori
0610ef16ae use del instead of assigning empty iterator 2014-07-06 11:07:19 +09:00
Dave Halter
22e5574a91 Remove some get_defined_names methods, that are not needed anymore. 2014-07-03 12:12:50 +02:00
Dave Halter
1fd7acef7a finally able to remove _get_defined_names_for_position 2014-07-03 11:53:51 +02:00
Dave Halter
c1ed3bf38a cleanup NameFinder.scopes, fully use scope_names_generator. 2014-07-03 11:48:26 +02:00
Dave Halter
ea370a083d more consequent usage of scope_names_generator 2014-07-03 11:41:33 +02:00
Dave Halter
a7e4d81692 also add scope_names_generator to the iterable module classes, as well as cleaning up some old scope_names_generator stuff 2014-07-02 18:58:31 +02:00
Dave Halter
3264a1815e Merge pull request #407 from hattya/dev
fix virtualenv support
2014-07-02 21:16:51 +04:30
Dave Halter
13ada3154b replace get_defined_names with scope_names_generator in a lot of places (the cleanup still needs to be done, though). 2014-07-02 16:12:49 +02:00
Dave Halter
749d0121fc change test results, because of inserts in another test file 2014-07-02 13:15:36 +02:00
Dave Halter
23008d8a19 use scope_names_generator in completion api as well. hopefully the last __file__ related issue :) 2014-07-02 13:12:37 +02:00
Dave Halter
ea72b46fe8 Also update the changelog temporarily. 2014-07-02 12:49:36 +02:00
Dave Halter
bb50c285f1 Bump release to 0.8.1-final0, because we're doing a release soon. 2014-07-02 12:47:44 +02:00
Dave Halter
7d8c1e8810 pre-alpha notice in static analysis API 2014-07-02 12:46:57 +02:00
Dave Halter
8d395a2ef1 scope_names_generator now works with modules perfectly well. 2014-07-02 12:41:16 +02:00
Dave Halter
789d48b7e3 fix issues introduced with defined_names. 2014-07-02 11:01:42 +02:00
Dave Halter
3865c1a844 Fixed __file__ issues by always applying a ModuleWrapper in the global scope lookup. 2014-07-01 15:35:21 +02:00
Dave Halter
8d63e6f6e7 somehow temporary solution to enable completion of __file__ 2014-07-01 15:10:32 +02:00
Dave Halter
f7a384bf18 fix a multi line issue of var_args with an error token or newline in the beginning. 2014-07-01 12:45:34 +02:00
Dave Halter
4ba1c95317 multiple files should be scannable in the main analysis API. 2014-07-01 02:17:28 +02:00
Dave Halter
ede685c717 string prefixes are now recognized by the backwards tokenizer 2014-07-01 01:19:07 +02:00
Dave Halter
5099c44593 exceptions were ignored in jedis static analysis. They shouldn't be. 2014-06-30 15:57:24 +02:00
Dave Halter
58d7dac92f fix dict issue with **kwargs use 2014-06-30 15:54:49 +02:00
Dave Halter
0b99473886 ExecutedParams should never be additionally faked, even if they are the first params. They have been legitimately created by a caller. 2014-06-30 15:22:53 +02:00
Dave Halter
0d3ea4dfb4 fix a ModuleWrapper with StarImports. Now all modules are wrapped. 2014-06-29 21:04:52 +02:00
Dave Halter
5b7c869323 types also add to completions, also for compiled objects. removed a few lines of code that complicated the process as well. 2014-06-28 12:09:43 +02:00
Dave Halter
7d73e571bb json.load[s] shouldn't return any results. fixed by overwriting the method 2014-06-27 11:56:40 +02:00
Dave Halter
cf1fd691da custom copy.copy and copy.deepcopy implementations to not confuse autocompletion (just return the first param) 2014-06-27 11:49:26 +02:00
Dave Halter
8cd7f9a288 std -> stdlib 2014-06-27 11:24:23 +02:00
Dave Halter
aba4a16ae3 fix indexing issues for multiple index options 2014-06-27 11:23:46 +02:00
Dave Halter
4c849f5969 make it possible to access pdb with 'python -m jedi force --pdb' 2014-06-26 16:14:39 +02:00
Dave Halter
d444ef9e15 setdefault fix 2014-06-26 15:23:20 +02:00
Dave Halter
1c9058ce6b Also issue warnings if setattr is used in a class instead of an error 2014-06-26 13:40:15 +02:00
Dave Halter
4238538df4 Add __getattr__ checks with proper inheritance. 2014-06-26 12:56:01 +02:00
Dave Halter
a936cea987 jedi issues now warnings instead of errors for AttributeErrors that happen in instances with __getattr__/__getattribute__ methods 2014-06-26 12:21:19 +02:00
Dave Halter
7e0edc4776 preparation for warnings in static analysis 2014-06-26 11:57:44 +02:00
Dave Halter
bdcbac160b fix string/array (sequence) multiplications with integer. 2014-06-26 00:49:56 +02:00
Dave Halter
47205dd7f3 change the implementation of compiled.load_module and always use the sys.modules cache after an import, because it's easier. Doesn't require any logic. 2014-06-25 18:57:07 +02:00
Dave Halter
e5efd6e5c8 add a setting auto_import_modules to fix autocompletion for modules that use setattr and companions a lot. fixes #151 2014-06-25 17:14:31 +02:00
Dave Halter
01869e4100 make a translation from file system paths to dotted paths possible 2014-06-25 16:33:25 +02:00
Dave Halter
718df569ea memoize sys_path modifications 2014-06-25 15:04:48 +02:00
Dave Halter
6e82fa31e1 submodules need relative imports not absolute 2014-06-25 11:08:29 +02:00
Dave Halter
44238a9f92 submodules are automatically indexed without actually importing them. fixes #413.
However, this is not a 100% correct Python behavior. Python behavior would be to follow ALL imports in all modules (recursively) and check if the module was imported. However, that's a lot of work, that would slow down autocompletion. For now it's better to have no false positives in flaking and to ignore a few attribute errors.
2014-06-25 01:39:43 +02:00
Dave Halter
034a818863 repr improvements 2014-06-24 13:42:40 +02:00
Dave Halter
393833059a fixed inheritance for exception checks 2014-06-23 13:01:12 +02:00
Dave Halter
c1181a0459 fix except: usage in analysis 2014-06-23 12:13:10 +02:00
Dave Halter
9348d4bb6c jedi should be able to detect exceptions even in except X: statements 2014-06-23 12:07:51 +02:00
Dave Halter
e106e4ffc8 fixed for loop in exception issue combined with usage of an exception variable 2014-06-22 23:32:07 +02:00
Dave Halter
401914e91c exception elements are always instances 2014-06-22 16:25:42 +02:00
Dave Halter
3e0f719915 changed the way how the dynamic param function searches it's parent scope, to enable smooth list comprehension following 2014-06-22 13:48:30 +02:00
Dave Halter
b7bf8d515c support for completions on return statements. 2014-06-22 12:05:22 +02:00
Dave Halter
d752907290 temporary solution for completions in asserts 2014-06-22 11:27:28 +02:00
Dave Halter
ddca14980e introduce an is_scope function to make it easier to work with scopes 2014-06-20 17:47:42 +02:00
Dave Halter
3ee3a04bcb fix list comprehension issues in nested parentheses. 2014-06-20 17:29:30 +02:00
Dave Halter
2fc404f99d fix issue with list comprehensions in function calls 2014-06-20 16:28:31 +02:00
Dave Halter
8c924afdb8 manifest was missing the fake paths, which makes the last release a bad one. This means basically that in the last (few?) release(s) the stdlib was not correctly supported. 2014-06-20 16:25:05 +02:00
Dave Halter
ea271c8047 replace _sre fake module literals with undefined types. 2014-06-20 11:18:54 +02:00
Dave Halter
be3ac0b1c0 fix issue with reordering var_args 2014-06-20 00:35:25 +02:00
Dave Halter
bbc5ad748d Merge pull request #423 from syohex/fix-package_data
Fix package_data '*.pym' paths
2014-06-19 15:07:08 +04:30
Dave Halter
36fbb6cd3e reorder var_args if named arguments are in front of *args. 2014-06-19 12:18:24 +02:00
Syohei YOSHIDA
605ab9c6f5 Fix package_data '*.pym' paths 2014-06-19 19:10:58 +09:00
Dave Halter
6edff1d952 Merge pull request #403 from mfussenegger/dev_buildout
detect buildout and add buildout eggs to sys.path
2014-06-19 02:05:31 +04:30
Mathias Fussenegger
bf43fcf1c6 detect buildout and add buildout eggs to sys.path 2014-06-18 18:30:11 +02:00
Dave Halter
a373818965 fix function execution mutable list issue 2014-06-16 17:12:27 +02:00
Dave Halter
eb1f299444 function repr should only include the decorated function if it actually is one. 2014-06-16 13:06:53 +02:00
Dave Halter
8aeac478a5 message improvement for param failure 2014-06-13 12:20:08 +02:00
Dave Halter
acfa40afa7 *args without self but still an implicit self from a method decorator 2014-06-12 22:42:15 +02:00
Dave Halter
371ec888e9 further test for list comprehensions 2014-06-12 11:20:46 +02:00
Dave Halter
d5758adb2b fix list comprehensions. they were not implemented in a good way 2014-06-12 11:10:10 +02:00
Dave Halter
f5f8d99233 Merge pull request #422 from fbergroth/fix-completion-params
Fix completion params
2014-06-12 13:10:40 +04:30
Dave Halter
f8b79b3dd0 work in progress refactoring to make ListComprehension a sublass of ForFlow 2014-06-12 10:13:49 +02:00
Fredrik Bergroth
d3ac1e902e Always cast pr to er in _follow_statements_imports 2014-06-11 23:49:36 +02:00
Dave Halter
43e54b6173 list comprehensions should be able to serve as an input for dynamic params as well. 2014-06-11 21:54:18 +02:00
Dave Halter
63868feb5d hasattr checks working now, #408 2014-06-10 16:08:53 +02:00
Dave Halter
bba120d906 hasattr test for static analysis, it's a common idiom. 2014-06-10 11:15:59 +02:00
Dave Halter
c6aea92753 fix issues with tokens in expression list 2014-06-10 11:03:36 +02:00
Dave Halter
e6331f8ac8 fix issues with the previous commits (broken tests) 2014-06-10 01:13:37 +02:00
Dave Halter
081fa79d9b fix issues with generator comprehensions used directly with a send() call or something similar 2014-06-10 00:56:51 +02:00
Dave Halter
9cffbef608 tests for generator to tuple assignment as well as generator comprehensions 2014-06-10 00:40:38 +02:00
Dave Halter
af801ef9b4 make generator comprehensions work 2014-06-10 00:36:36 +02:00
Dave Halter
cd5b8aebfd fix issues with equal names before and after listcomprehension 2014-06-09 20:28:24 +02:00
Dave Halter
0b926ca454 get rid of is_list_comp boolean in favor of a direct check of ListComprehensionFlow 2014-06-09 20:19:31 +02:00
Dave Halter
dd8e4341db create a ListComprehensionFlow, to make the distinction between ForFlow and list comprehensions clearer 2014-06-09 20:09:53 +02:00
Dave Halter
c48146093e test: list comprehension name resolve should not include its own definitions 2014-06-09 12:53:17 +02:00
Dave Halter
496671966b instance issue with param static analysis 2014-06-09 01:59:54 +02:00
Dave Halter
920eb3b06a fix a default argument issue 2014-06-08 14:19:22 +02:00
Dave Halter
c8b7d79b54 erroneus star arguments warning 2014-06-07 13:10:19 +02:00
Dave Halter
62db176e5e cleanup 2014-06-07 12:36:16 +02:00
Dave Halter
f061de0f74 Wrong var_args with a star star function. 2014-06-06 16:49:53 +02:00
Dave Halter
cb430c4c36 add a merged array class to account for array additions 2014-06-06 02:51:48 +02:00
Dave Halter
8798f5b1d7 add comments to the *args/**kwargs merge code 2014-06-05 12:08:08 +02:00
Dave Halter
cf7b5b6b2b disable two failing multiple value tests. These are things jedi is not able to detect at the moment. It's not a huge problem, but it would be very nice if we could detect these as well. But there would be a need of restructuring var_args unpacking. 2014-06-05 11:54:46 +02:00
Dave Halter
6f83eb65ce raise multiple key errors also if they are an input to kwargs 2014-06-05 10:35:44 +02:00
Dave Halter
acb4959a6a temporary very unfinished solution for the *args/**kwargs combination problem, if they are used in common with dynamic params. This doesn't solve the issue entirely, but it's at least a start and will probably solve all autocompletion issues. However, static analysis needs a little bit more than that. 2014-06-04 17:18:09 +02:00
Dave Halter
945888a535 fix for kwargs params 2014-06-01 13:52:21 +02:00
Dave Halter
933e231d74 small update on multiple value named argument type error 2014-06-01 11:34:20 +02:00
Dave Halter
b8525c7e1e get dicts partially working 2014-06-01 11:24:24 +02:00
Dave Halter
248cca2e5e fix issues with empty *args as inputs 2014-05-31 11:03:37 +02:00
Dave Halter
f4a508ac53 handle *args arguments the right way. 2014-05-29 20:53:51 +02:00
Dave Halter
b24178b275 multiple values refactoring in params 2014-05-29 16:59:56 +02:00
Dave Halter
1899f16a4a if there's a func listener, stop the execution of a function. This solves the issue of nested *args that were reported as having too many params in static analysis. 2014-05-29 12:15:07 +02:00
Dave Halter
4f66591227 nested functions with *args should only raise an error if there's well defined input. 2014-05-28 14:35:48 +02:00
Dave Halter
a695166585 add a new static_analysis file to test star arguments separately 2014-05-28 11:08:44 +02:00
Dave Halter
11e867d2c1 fix calling_var_args with tuples 2014-05-28 11:00:24 +02:00
Dave Halter
23edfd27ad detect origin of a call in case of missing params. This is important, because the user doesn't care about decorators in between. 2014-05-28 02:30:35 +02:00
Dave Halter
b7aaec50e3 add ExecutedParam instead of using copy.copy 2014-05-28 00:50:14 +02:00
Dave Halter
40c2d64bac use the internal api to get a param name instead of doing crazy stuff 2014-05-27 15:04:22 +02:00
Dave Halter
f3e986a285 add multiple values for keyword type error detection 2014-05-26 18:40:02 +02:00
Dave Halter
720907531b small corrections in too few argument errors 2014-05-26 17:38:14 +02:00
Dave Halter
425f7a8b64 better error reporting for static analysis 2014-05-26 17:31:50 +02:00
Dave Halter
3a946ab549 fix for the newly created keyword/default tests 2014-05-26 12:37:47 +02:00
Dave Halter
f71e2d5b8f keyword/default param tests 2014-05-26 11:21:16 +02:00
Dave Halter
e46979c354 improved static analysis found mistakes in its own test suite 2014-05-26 10:51:19 +02:00
Dave Halter
ad120f529d param.py doesn't seem to be needing some default value checks 2014-05-26 10:50:41 +02:00
Dave Halter
0d0d123393 Merge pull request #415 from pombredanne/patch-1
Fixed comment typo
2014-05-25 18:29:42 +04:30
Philippe Ombredanne
6f69d7d17f Fixed comment typo 2014-05-25 15:38:57 +02:00
Dave Halter
a621662440 some param refactorings. 2014-05-23 19:54:27 +02:00
Dave Halter
d9f17beea5 unexpected keyword arguments detection 2014-05-23 15:34:16 +02:00
Dave Halter
acd836f30d new named arguments tests for static analysis 2014-05-23 15:18:55 +02:00
Dave Halter
9214e0b358 move a closure out of get_params 2014-05-22 10:58:38 +02:00
Dave Halter
9d4dc546ca first version of too few params detection 2014-05-21 13:01:12 +02:00
Dave Halter
4ecc150d85 first version of too many arguments detection 2014-05-21 12:30:51 +02:00
Dave Halter
a252d825f2 remove an unimportant test case that showed strange unreproducible behavior on travis. 2014-05-20 16:47:23 +02:00
Dave Halter
f57b53bbe2 check for 'if foo is not None' checks in the NameFinder. Solves the issues with the subprocess library. 2014-05-20 16:23:46 +02:00
Dave Halter
79556a7935 finder docstring and naming improvements 2014-05-20 15:17:07 +02:00
Dave Halter
ad762f674e renaming of an unclear variable 2014-05-20 14:01:43 +02:00
Dave Halter
dd50001ed1 update pickling version, because we have changed some things about the KeywordStatement a while ago 2014-05-19 13:49:52 +02:00
Dave Halter
77baabb93b % operation returned both left and right side, but only the left side is really important. 2014-05-19 13:26:12 +02:00
Dave Halter
a717981679 more extensive __file__ tests, #408 2014-05-19 01:03:40 +02:00
Dave Halter
08b48807e9 a (temporary) solution for the __file__ access issues in imported modules, see #408 2014-05-19 01:01:56 +02:00
Dave Halter
709c53a679 empty reversed issue, fixes traceback of #408 2014-05-17 12:32:18 +02:00
Dave Halter
41f32f21ea Merge pull request #411 from jorgenschaefer/fix-deprecation-docstring-typo
Fix deprecation docstring typo
2014-05-17 13:04:45 +04:30
Jorgen Schaefer
1cbbc00089 Fix names in docstrings and DeprecationWarnings.
Update a number of docstrings and DeprecationWarnings to refer to
the correct methods or attributes.
2014-05-17 10:21:44 +02:00
Dave Halter
06bae0e835 fix test issue with python 2.7 2014-05-16 18:03:36 +02:00
Dave Halter
ecf9043d97 Improved error messages for AttributeErrors, however not a final version #408 2014-05-16 18:02:33 +02:00
Dave Halter
ca2cc65686 improved import positioning errors in static analysis 2014-05-16 17:20:45 +02:00
Dave Halter
9bf50e6022 better knowledge in the importer where the import names originate from 2014-05-16 17:05:43 +02:00
Dave Halter
87704ec16a custom message capability for analysis 2014-05-16 15:46:08 +02:00
Dave Halter
11b7f9f7f6 decorators should also be included in the static analysis 2014-05-16 15:33:21 +02:00
Dave Halter
0f7a17090c static analysis start positions are now tested 2014-05-16 15:03:59 +02:00
Dave Halter
552502a2e9 list comprehension static analysis test 2014-05-16 14:31:53 +02:00
Dave Halter
4e596060b9 test for is_nested failure 2014-05-16 13:00:13 +02:00
Dave Halter
8e27ed556e fix returns/flow command edge cases 2014-05-16 12:33:00 +02:00
Dave Halter
d59e21f43c new way of gathering statements to evaluate for static analysis 2014-05-16 12:23:09 +02:00
Dave Halter
857a9b7621 don't raise attribute NameErrors in all for loop name definitions. 2014-05-15 12:39:00 +02:00
Dave Halter
bcab821df9 linter output changes - #408 2014-05-15 00:45:50 +02:00
Dave Halter
b54d46374c recursive file paths for the temporary linter api - #408 2014-05-15 00:39:42 +02:00
Dave Halter
5e2bb0ef9b Using python -m jedi force <path> you can use the linter for now. 2014-05-13 16:44:46 +02:00
Dave Halter
99340dd2a1 few NameError tests 2014-05-13 16:17:25 +02:00
Dave Halter
9bcc4f8fd2 static analysis is now able to tell the difference between NameError/AttributeError 2014-05-13 16:14:32 +02:00
Dave Halter
7632a7d120 sorting the statements for analysis. we need to be able to reproduce results. 2014-05-13 15:56:41 +02:00
Dave Halter
876942d2b8 Small adjustment in Interpreter completion to be compatible with static analysis 2014-05-13 01:39:33 +02:00
Dave Halter
a2b483b4f5 None issue fix for static analysis 2014-05-13 01:21:32 +02:00
Dave Halter
00e43d4585 except can also catch multiple exceptions in one statement 2014-05-12 18:46:17 +02:00
Dave Halter
7096a570bf try/except test for static analysis (duck typing should not cause jedi to report mistakes) 2014-05-12 18:34:38 +02:00
Dave Halter
96386b4578 if something catches an exception, ignore that report 2014-05-12 18:10:17 +02:00
Dave Halter
64af9524b7 simple generator static analysis subscript check 2014-05-12 16:06:28 +02:00
Dave Halter
b6ec589997 refactor general array lookup method get_index_types 2014-05-12 15:23:48 +02:00
Dave Halter
70d85d1b3a strip_imports -> follow_imports 2014-05-12 11:18:47 +02:00
Dave Halter
e5fe726862 imports cleanup & documentation 2014-05-12 11:15:17 +02:00
Dave Halter
02d1e1aa42 fix static analysis for nested imports 2014-05-12 11:03:27 +02:00
Dave Halter
13949ec145 move is_nested check from evaluate.imports to the parser 2014-05-12 11:02:57 +02:00
Dave Halter
04855e9452 pytest -> ignore static_analysis folder 2014-05-12 01:59:00 +02:00
Dave Halter
63155808df interpreter cleanup, use proper parents 2014-05-12 01:55:48 +02:00
Dave Halter
3f2e737702 statical analysis shouldn't report the exact same error twice 2014-05-11 16:45:22 +02:00
Dave Halter
7abdc375c2 add tests for attribute errors 2014-05-11 16:44:58 +02:00
Dave Halter
284a64a79a more import-error detection tests 2014-05-11 15:33:53 +02:00
Dave Halter
7b525285bd static analysis import tests 2014-05-11 15:18:48 +02:00
Dave Halter
c92113a7b1 improved static analysis test base 2014-05-11 13:30:29 +02:00
Dave Halter
40a54961cd testing structure for static analysis. 2014-05-11 12:09:42 +02:00
Dave Halter
c59a8dce28 ImportError detection 2014-05-11 01:54:25 +02:00
Dave Halter
586ac9d013 removed a few debug things. 2014-05-10 20:31:33 +02:00
Dave Halter
11a445ab42 Merge branch 'dev' into linter 2014-05-10 16:53:41 +02:00
Dave Halter
60971245d6 star imports now have their own class, which will be important for AttributeError detection 2014-05-10 16:53:27 +02:00
Dave Halter
358472b21f improved star import support preparation 2014-05-10 14:12:36 +02:00
Dave Halter
ecfb3a0423 Merge branch 'dev' into linter 2014-05-09 11:52:39 +02:00
Dave Halter
d0b2a2ce4f fixed NestedImportModule 2014-05-09 11:52:10 +02:00
Dave Halter
2d48c72340 introduce a NestedImportModule class 2014-05-08 13:22:41 +02:00
Dave Halter
6098ba5e84 small fixes to the analysis.Error class. 2014-05-08 12:04:40 +02:00
Akinori Hattori
1ecb173b33 update AUTHORS 2014-05-08 18:45:51 +09:00
Dave Halter
7c965e544f dive further into following imports, etc 2014-05-08 11:26:08 +02:00
Akinori Hattori
9f3542903d fix virtualenv support 2014-05-08 18:09:05 +09:00
Dave Halter
9eb75f9c61 static analysis prototype decription 2014-05-07 12:39:18 +02:00
Dave Halter
c0064e17bc Merge pull request #406 from davidhalter/repl_info
Print the Jedi version when REPL completion is used
2014-05-06 12:58:33 +04:30
Danilo Bargen
ae8b0d5eab Print the Jedi version when REPL completion is used
This also makes debugging easier, because people see which completion
they're actually using.
2014-05-06 08:17:09 +02:00
Danilo Bargen
e66f2d8f4b Docs / changelog formatting 2014-05-06 07:47:59 +02:00
Danilo Bargen
db9ee1f5a0 Updated AUTHORS 2014-05-06 07:39:31 +02:00
Dave Halter
895db8d6ff changelog mistake again, thanks for noticing it @aebersold 2014-05-05 23:50:45 +02:00
Dave Halter
c587d12510 removed call signature caching unintentionally (a few commmits ago). 2014-05-05 12:45:19 +02:00
Dave Halter
7db1eb2f62 change small changelog mistake 2014-05-05 00:41:34 +02:00
92 changed files with 2848 additions and 798 deletions

View File

@@ -7,7 +7,7 @@ Takafumi Arakaki (@tkf) <aka.tkf@gmail.com>
Code Contributors
=================
Danilo Bargen (@dbrgn) <gezuru@gmail.com>
Danilo Bargen (@dbrgn) <mail@dbrgn.ch>
Laurens Van Houtven (@lvh) <_@lvh.cc>
Aldo Stracquadanio (@Astrac) <aldo.strac@gmail.com>
Jean-Louis Fuchs (@ganwell) <ganwell@fangorn.ch>
@@ -18,10 +18,13 @@ andviro (@andviro)
Mike Gilbert (@floppym) <floppym@gentoo.org>
Aaron Meurer (@asmeurer) <asmeurer@gmail.com>
Lubos Trilety <ltrilety@redhat.com>
Akinori Hattori (@hattya)
Akinori Hattori (@hattya) <hattya@gmail.com>
srusskih (@srusskih)
Steven Silvester (@blink1073)
Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com>
Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de>
Fredrik Bergroth (@fbergroth)
Mathias Fußenegger (@mfussenegger)
Syohei Yoshida (@syohex) <syohex@gmail.com>
Note: (@user) means a github user name.

View File

@@ -3,32 +3,39 @@
Changelog
---------
+0.8.0 (2013-04-01)
0.8.1 (2014-07-15)
+++++++++++++++++++
* Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced
drastically. Loading times are down as well (it takes basically as long as
an import).
* REPL completion is starting to become usable.
* Various small API changes. Generally this released focuses on stability and
refactoring of internal APIs.
* Introducing operator precedence, which makes calculating correct Array
indices and ``__getattr__`` strings possible.
* Bugfix release, the last release forgot to include files that improve
autocompletion for builtin libraries. Fixed.
0.8.0 (2014-05-05)
+++++++++++++++++++
- Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced
drastically. Loading times are down as well (it takes basically as long as an
import).
- REPL completion is starting to become usable.
- Various small API changes. Generally this released focuses on stability and
refactoring of internal APIs.
- Introducing operator precedence, which makes calculating correct Array indices
and ``__getattr__`` strings possible.
0.7.0 (2013-08-09)
++++++++++++++++++
* switched from LGPL to MIT license
* added an Interpreter class to the API to make autocompletion in REPL possible.
* added autocompletion support for namespace packages
* add sith.py, a new random testing method
- Switched from LGPL to MIT license
- Added an Interpreter class to the API to make autocompletion in REPL possible.
- Added autocompletion support for namespace packages
- Add sith.py, a new random testing method
0.6.0 (2013-05-14)
++++++++++++++++++
* much faster parser with builtin part caching
* a test suite, thanks @tkf
- Much faster parser with builtin part caching
- A test suite, thanks @tkf
0.5 versions (2012)
+++++++++++++++++++
* Initial development
- Initial development

View File

@@ -7,7 +7,7 @@ include sith.py
include conftest.py
include pytest.ini
include tox.ini
include jedi/mixin/*.pym
include jedi/evaluate/compiled/fake/*.pym
recursive-include test *
recursive-include docs *
recursive-exclude * *.pyc

View File

@@ -34,7 +34,7 @@ me.
Jedi can be used with the following editors:
- Vim (jedi-vim_, YouCompleteMe_)
- Emacs (Jedi.el_, elpy_, company-jedi_)
- Emacs (Jedi.el_, elpy_, anaconda-mode_)
- Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3])
- SynWrite_
- TextMate_ (Not sure if it's actually working)
@@ -144,7 +144,7 @@ For more detailed information visit the `testing documentation
.. _youcompleteme: http://valloric.github.io/YouCompleteMe/
.. _Jedi.el: https://github.com/tkf/emacs-jedi
.. _elpy: https://github.com/jorgenschaefer/elpy
.. _company-jedi: https://github.com/proofit404/company-jedi
.. _anaconda-mode: https://github.com/proofit404/anaconda-mode
.. _sublimejedi: https://github.com/srusskih/SublimeJEDI
.. _anaconda: https://github.com/DamnWidget/anaconda
.. _SynWrite: http://uvviewsoft.com/synjedi/

View File

@@ -26,7 +26,7 @@ Emacs:
- Jedi.el_
- elpy_
- company-jedi_
- anaconda-mode_
Sublime Text 2/3:
@@ -43,8 +43,9 @@ TextMate:
Kate:
- Kate_ version 4.13+ supports it natively, you have to enable it, though. [`proof
<https://projects.kde.org/projects/kde/applications/kate/repository/show?rev=KDE%2F4.13>`_]
- Kate_ version 4.13+ `supports it natively
<https://projects.kde.org/projects/kde/applications/kate/repository/entry/addons/kate/pate/src/plugins/python_autocomplete_jedi.py?rev=KDE%2F4.13>`__,
you have to enable it, though.
.. _other-software:
@@ -78,9 +79,10 @@ Using a custom ``$HOME/.pythonrc.py``
.. _youcompleteme: http://valloric.github.io/YouCompleteMe/
.. _Jedi.el: https://github.com/tkf/emacs-jedi
.. _elpy: https://github.com/jorgenschaefer/elpy
.. _company-jedi: https://github.com/proofit404/company-jedi
.. _anaconda-mode: https://github.com/proofit404/anaconda-mode
.. _sublimejedi: https://github.com/srusskih/SublimeJEDI
.. _anaconda: https://github.com/DamnWidget/anaconda
.. _SynJedi: http://uvviewsoft.com/synjedi/
.. _wdb: https://github.com/Kozea/wdb
.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle
.. _kate: http://kate-editor.org/

View File

@@ -34,7 +34,7 @@ As you see Jedi is pretty simple and allows you to concentrate on writing a
good text editor, while still having very good IDE features for Python.
"""
__version__ = '0.8.0-final0'
__version__ = '0.8.1-final0'
from jedi.api import Script, Interpreter, NotFoundError, set_debug_function
from jedi.api import preload_module, defined_names

View File

@@ -1,8 +1,43 @@
from sys import argv
from os.path import join, dirname, abspath
from os.path import join, dirname, abspath, isdir
if len(argv) == 2 and argv[1] == 'repl':
# don't want to use __main__ only for repl yet, maybe we want to use it for
# something else. So just use the keyword ``repl`` for now.
print(join(dirname(abspath(__file__)), 'api', 'replstartup.py'))
elif len(argv) > 1 and argv[1] == 'linter':
"""
This is a pre-alpha API. You're not supposed to use it at all, except for
testing. It will very likely change.
"""
import jedi
import sys
if '--debug' in sys.argv:
jedi.set_debug_function()
for path in sys.argv[2:]:
if path.startswith('--'):
continue
if isdir(path):
import fnmatch
import os
paths = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.py'):
paths.append(os.path.join(root, filename))
else:
paths = [path]
try:
for path in paths:
for error in jedi.Script(path=path)._analysis():
print(error)
except Exception:
if '--pdb' in sys.argv:
import pdb
pdb.post_mortem()
else:
raise

View File

@@ -191,3 +191,9 @@ def literal_eval(string):
if re.match('[uU][\'"]', string):
string = string[1:]
return ast.literal_eval(string)
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest # Python 2

View File

@@ -35,6 +35,7 @@ from jedi.evaluate import imports
from jedi.evaluate.helpers import FakeName
from jedi.evaluate.finder import get_names_of_scope
from jedi.evaluate.helpers import search_call_signatures
from jedi.evaluate import analysis
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
# can remove some "maximum recursion depth" errors.
@@ -85,9 +86,8 @@ class Script(object):
with open(path) as f:
source = f.read()
lines = source.splitlines() or ['']
if source and source[-1] == '\n':
lines.append('')
self.source = common.source_to_unicode(source, encoding)
lines = common.splitlines(self.source)
line = max(len(lines), 1) if line is None else line
if not (0 < line <= len(lines)):
raise ValueError('`line` parameter is not in a valid range.')
@@ -100,7 +100,6 @@ class Script(object):
cache.clear_caches()
debug.reset_time()
self.source = common.source_to_unicode(source, encoding)
self._user_context = UserContext(self.source, self._pos)
self._parser = UserContextParser(self.source, path, self._pos, self._user_context)
self._evaluator = Evaluator()
@@ -204,11 +203,11 @@ class Script(object):
scopes = list(self._prepare_goto(path, True))
except NotFoundError:
scopes = []
scope_generator = get_names_of_scope(self._evaluator,
self._parser.user_scope(),
self._pos)
scope_names_generator = get_names_of_scope(self._evaluator,
self._parser.user_scope(),
self._pos)
completions = []
for scope, name_list in scope_generator:
for scope, name_list in scope_names_generator:
for c in name_list:
completions.append((c, scope))
else:
@@ -217,19 +216,20 @@ class Script(object):
for s in scopes:
if s.isinstance(er.Function):
names = s.get_magic_function_names()
else:
if isinstance(s, imports.ImportWrapper):
under = like + self._user_context.get_path_after_cursor()
if under == 'import':
current_line = self._user_context.get_position_line()
if not current_line.endswith('import import'):
continue
a = s.import_stmt.alias
if a and a.start_pos <= self._pos <= a.end_pos:
elif isinstance(s, imports.ImportWrapper):
under = like + self._user_context.get_path_after_cursor()
if under == 'import':
current_line = self._user_context.get_position_line()
if not current_line.endswith('import import'):
continue
names = s.get_defined_names(on_import_stmt=True)
else:
names = s.get_defined_names()
a = s.import_stmt.alias
if a and a.start_pos <= self._pos <= a.end_pos:
continue
names = s.get_defined_names(on_import_stmt=True)
else:
names = []
for _, new_names in s.scope_names_generator():
names += new_names
for c in names:
completions.append((c, s))
@@ -259,7 +259,7 @@ class Script(object):
if not is_completion:
# goto_definition returns definitions of its statements if the
# cursor is on the assignee. By changing the start_pos of our
# "pseud" statement, the Jedi evaluator can find the assignees.
# "pseudo" statement, the Jedi evaluator can find the assignees.
if user_stmt is not None:
eval_stmt.start_pos = user_stmt.end_pos
scopes = self._evaluator.eval_statement(eval_stmt)
@@ -273,6 +273,8 @@ class Script(object):
stmt = r.module.statements[-1]
except IndexError:
raise NotFoundError()
if isinstance(stmt, pr.KeywordStatement):
stmt = stmt.stmt
if not isinstance(stmt, pr.Statement):
raise NotFoundError()
@@ -345,7 +347,7 @@ class Script(object):
Use :attr:`.call_signatures` instead.
.. todo:: Remove!
"""
warnings.warn("Use line instead.", DeprecationWarning)
warnings.warn("Use call_signatures instead.", DeprecationWarning)
sig = self.call_signatures()
return sig[0] if sig else None
@@ -570,7 +572,6 @@ class Script(object):
_callable = lambda: self._evaluator.eval_call(stmt_el)
origins = cache.cache_call_signatures(_callable, self.source,
self._pos, user_stmt)
origins = self._evaluator.eval_call(stmt_el)
debug.speed('func_call followed')
key_name = None
@@ -586,6 +587,23 @@ class Script(object):
return [classes.CallSignature(self._evaluator, o, call, index, key_name)
for o in origins if o.is_callable()]
def _analysis(self):
#statements = set(chain(*self._parser.module().used_names.values()))
stmts, imps = analysis.get_module_statements(self._parser.module())
# Sort the statements so that the results are reproducible.
for i in imps:
iw = imports.ImportWrapper(self._evaluator, i,
nested_resolve=True).follow()
if i.is_nested() and any(not isinstance(i, pr.Module) for i in iw):
analysis.add(self._evaluator, 'import-error', i.namespace.names[-1])
for stmt in sorted(stmts, key=lambda obj: obj.start_pos):
if not (isinstance(stmt.parent, pr.ForFlow)
and stmt.parent.set_stmt == stmt):
self._evaluator.eval_statement(stmt)
ana = [a for a in self._evaluator.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
class Interpreter(Script):
"""
@@ -627,7 +645,7 @@ class Interpreter(Script):
user_stmt = self._parser.user_stmt_with_whitespace()
is_simple_path = not path or re.search('^[\w][\w\d.]*$', path)
if isinstance(user_stmt, pr.Import) or not is_simple_path:
return super(type(self), self)._simple_complete(path, like)
return super(Interpreter, self)._simple_complete(path, like)
else:
class NamespaceModule(object):
def __getattr__(_, name):
@@ -639,8 +657,8 @@ class Interpreter(Script):
raise AttributeError()
def __dir__(_):
return list(set(chain.from_iterable(n.keys()
for n in self.namespaces)))
gen = (n.keys() for n in self.namespaces)
return list(set(chain.from_iterable(gen)))
paths = path.split('.') if path else []

View File

@@ -27,8 +27,14 @@ def defined_names(evaluator, scope):
:type scope: Scope
:rtype: list of Definition
"""
pair = next(get_names_of_scope(evaluator, scope, star_search=False,
include_builtin=False), None)
# Calling get_names_of_scope doesn't make sense always. It might include
# star imports or inherited stuff. Wanted?
# TODO discuss!
if isinstance(scope, pr.Module):
pair = scope, scope.get_defined_names()
else:
pair = next(get_names_of_scope(evaluator, scope, star_search=False,
include_builtin=False), None)
names = pair[1] if pair else []
names = [n for n in names if isinstance(n, pr.Import) or (len(n) == 1)]
return [Definition(evaluator, d) for d in sorted(names, key=lambda s: s.start_pos)]
@@ -142,7 +148,7 @@ class BaseDefinition(object):
stripped = stripped.parent
if isinstance(stripped, pr.Name):
stripped = stripped.parent
return type(stripped).__name__.lower()
return type(stripped).__name__.lower().replace('wrapper', '')
def _path(self):
"""The module path."""
@@ -245,7 +251,7 @@ class BaseDefinition(object):
Use :meth:`.docstring` instead.
.. todo:: Remove!
"""
warnings.warn("Use documentation() instead.", DeprecationWarning)
warnings.warn("Use docstring() instead.", DeprecationWarning)
return self.docstring()
@property
@@ -255,7 +261,7 @@ class BaseDefinition(object):
Use :meth:`.docstring` instead.
.. todo:: Remove!
"""
warnings.warn("Use documentation() instead.", DeprecationWarning)
warnings.warn("Use docstring() instead.", DeprecationWarning)
return self.docstring(raw=True)
@property
@@ -307,16 +313,17 @@ class BaseDefinition(object):
stripped = self._definition
if isinstance(stripped, pr.Name):
stripped = stripped.parent
# We should probably work in `Finder._names_to_types` here.
if isinstance(stripped, pr.Function):
stripped = er.Function(self._evaluator, stripped)
elif isinstance(stripped, pr.Class):
stripped = er.Class(self._evaluator, stripped)
# We should probably work in `Finder._names_to_types` here.
if isinstance(stripped, pr.Function):
stripped = er.Function(self._evaluator, stripped)
elif isinstance(stripped, pr.Class):
stripped = er.Class(self._evaluator, stripped)
if stripped.isinstance(pr.Statement):
return self._evaluator.eval_statement(stripped)
elif stripped.isinstance(pr.Import):
return imports.strip_imports(self._evaluator, [stripped])
return imports.follow_imports(self._evaluator, [stripped])
else:
return [stripped]
@@ -463,7 +470,7 @@ class Completion(BaseDefinition):
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.documentation(fast=False)`` on every object, because it
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
"""
definition = self._definition
@@ -565,18 +572,13 @@ class Definition(use_metaclass(CachedMetaClass, BaseDefinition)):
name = d.get_defined_names()[0].names[-1]
except (AttributeError, IndexError):
return None
elif isinstance(d, pr.Param):
name = d.get_name()
elif isinstance(d, pr.Statement):
try:
expression_list = d.assignment_details[0][0]
name = expression_list[0].name.names[-1]
except IndexError:
if isinstance(d, pr.Param):
try:
return unicode(d.expression_list()[0].name)
except (IndexError, AttributeError):
# IndexError for syntax error params
# AttributeError for *args/**kwargs
pass
return None
elif isinstance(d, iterable.Generator):
return None

View File

@@ -41,6 +41,6 @@ def get_on_import_stmt(evaluator, user_context, user_stmt, is_like_search=False)
just_from = next(context) == 'from'
i = imports.ImportWrapper(evaluator, user_stmt, is_like_search,
kill_count=kill_count, direct_resolve=True,
kill_count=kill_count, nested_resolve=True,
is_just_from=just_from)
return i, cur_name_part

View File

@@ -18,12 +18,16 @@ class InterpreterNamespace(pr.Module):
self.parser_module = parser_module
self._evaluator = evaluator
@underscore_memoization
def get_defined_names(self):
for name in self.parser_module.get_defined_names():
yield name
for key, value in self.namespace.items():
yield LazyName(self._evaluator, key, value)
def scope_names_generator(self, position=None):
yield self, list(self.get_defined_names())
def __getattr__(self, name):
return getattr(self.parser_module, name)
@@ -38,26 +42,33 @@ class LazyName(helpers.FakeName):
@property
@underscore_memoization
def parent(self):
parser_path = []
obj = self._value
parser_path = []
if inspect.ismodule(obj):
module = obj
else:
class FakeParent(pr.Base):
parent = None # To avoid having no parent for NamePart.
path = None
names = []
try:
o = obj.__objclass__
parser_path.append(pr.NamePart(obj.__name__, None, (None, None)))
names.append(obj.__name__)
obj = o
except AttributeError:
pass
try:
module_name = obj.__module__
parser_path.insert(0, pr.NamePart(obj.__name__, None, (None, None)))
names.insert(0, obj.__name__)
except AttributeError:
# Unfortunately in some cases like `int` there's no __module__
module = builtins
else:
module = __import__(module_name)
fake_name = helpers.FakeName(names, FakeParent())
parser_path = fake_name.names
raw_module = get_module(self._value)
try:

View File

@@ -15,9 +15,13 @@ Then you will be able to use Jedi completer in your Python interpreter::
os.path.join().split().index os.path.join().split().insert
"""
import jedi.utils
from jedi import __version__ as __jedi_version__
print('REPL completion using Jedi %s' % __jedi_version__)
jedi.utils.setup_readline()
del jedi
# Note: try not to do many things here, as it will contaminate global
# namespace of the interpreter.

View File

@@ -76,7 +76,7 @@ def usages(evaluator, definitions, search_name, mods):
for used_count, name_part in imps:
i = imports.ImportWrapper(evaluator, stmt, kill_count=count - used_count,
direct_resolve=True)
nested_resolve=True)
f = i.follow(is_goto=True)
if set(f) & set(definitions):
names.append(classes.Definition(evaluator, name_part))
@@ -91,7 +91,7 @@ def usages_add_import_modules(evaluator, definitions, search_name):
new = set()
for d in definitions:
if isinstance(d.parent, pr.Import):
s = imports.ImportWrapper(evaluator, d.parent, direct_resolve=True)
s = imports.ImportWrapper(evaluator, d.parent, nested_resolve=True)
with common.ignored(IndexError):
new.add(s.follow(is_goto=True)[0])
return set(definitions) | new

View File

@@ -100,9 +100,7 @@ def time_cache(time_add_setting):
def cache_call_signatures(source, user_pos, stmt):
"""This function calculates the cache key."""
index = user_pos[0] - 1
lines = source.splitlines() or ['']
if source and source[-1] == '\n':
lines.append('')
lines = common.splitlines(source)
before_cursor = lines[index][:user_pos[1]]
other_lines = lines[stmt.start_pos[0]:index]
@@ -245,7 +243,7 @@ def save_parser(path, name, parser, pickling=True):
class ParserPickling(object):
version = 10
version = 13
"""
Version number (integer) for file system cache.

View File

@@ -146,3 +146,13 @@ def source_to_unicode(source, encoding=None):
# cast to unicode by default
return unicode(source, detect_encoding(), 'replace')
def splitlines(string):
"""
A splitlines for Python code. In contrast to Python's ``str.splitlines``,
looks at form feeds and other special characters as normal text. Just
splits ``\n`` and ``\r\n``.
Also different: Returns ``['']`` for an empty string input.
"""
return re.split('\n|\r\n', string)

View File

@@ -73,6 +73,7 @@ import itertools
from jedi._compatibility import next, hasattr, unicode
from jedi.parser import representation as pr
from jedi.parser.tokenize import Token
from jedi.parser import fast
from jedi import debug
from jedi.evaluate import representation as er
from jedi.evaluate import imports
@@ -83,6 +84,7 @@ from jedi.evaluate import stdlib
from jedi.evaluate import finder
from jedi.evaluate import compiled
from jedi.evaluate import precedence
from jedi.evaluate.helpers import FakeStatement
class Evaluator(object):
@@ -92,6 +94,7 @@ class Evaluator(object):
self.compiled_cache = {} # see `compiled.create()`
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector()
self.analysis = []
def find_types(self, scope, name_str, position=None, search_global=False,
is_goto=False, resolve_decorator=True):
@@ -107,7 +110,7 @@ class Evaluator(object):
scopes = f.scopes(search_global)
if is_goto:
return f.filter_name(scopes)
return f.find(scopes, resolve_decorator)
return f.find(scopes, resolve_decorator, search_global)
@memoize_default(default=[], evaluator_is_first_arg=True)
@recursion.recursion_decorator
@@ -123,6 +126,8 @@ class Evaluator(object):
"""
debug.dbg('eval_statement %s (%s)', stmt, seek_name)
expression_list = stmt.expression_list()
if isinstance(stmt, FakeStatement):
return expression_list # Already contains the results.
result = self.eval_expression_list(expression_list)
@@ -132,7 +137,10 @@ class Evaluator(object):
# `=` is always the last character in aug assignments -> -1
operator = operator[:-1]
name = str(expr_list[0].name)
left = self.find_types(stmt.parent, name, stmt.start_pos)
parent = stmt.parent
if isinstance(parent, (pr.SubModule, fast.Module)):
parent = er.ModuleWrapper(self, parent)
left = self.find_types(parent, name, stmt.start_pos)
if isinstance(stmt.parent, pr.ForFlow):
# iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
@@ -169,25 +177,28 @@ class Evaluator(object):
return self._eval_precedence(el)
else:
# normal element, no operators
return self._eval_statement_element(el)
return self.eval_statement_element(el)
def _eval_precedence(self, _precedence):
left = self.process_precedence_element(_precedence.left)
right = self.process_precedence_element(_precedence.right)
return precedence.calculate(self, left, _precedence.operator, right)
def _eval_statement_element(self, element):
def eval_statement_element(self, element):
if pr.Array.is_type(element, pr.Array.NOARRAY):
r = list(itertools.chain.from_iterable(self.eval_statement(s)
for s in element))
try:
lst_cmp = element[0].expression_list()[0]
if not isinstance(lst_cmp, pr.ListComprehension):
raise IndexError
except IndexError:
r = list(itertools.chain.from_iterable(self.eval_statement(s)
for s in element))
else:
r = [iterable.GeneratorComprehension(self, lst_cmp)]
call_path = element.generate_call_path()
next(call_path, None) # the first one has been used already
return self.follow_path(call_path, r, element.parent)
elif isinstance(element, pr.ListComprehension):
loop = _evaluate_list_comprehension(element)
# Caveat: parents are being changed, but this doesn't matter,
# because nothing else uses it.
element.stmt.parent = loop
return self.eval_statement(element.stmt)
elif isinstance(element, pr.Lambda):
return [er.Function(self, element)]
@@ -198,10 +209,10 @@ class Evaluator(object):
# The string tokens are just operations (+, -, etc.)
elif isinstance(element, compiled.CompiledObject):
return [element]
elif not isinstance(element, Token):
return self.eval_call(element)
else:
elif isinstance(element, Token):
return []
else:
return self.eval_call(element)
def eval_call(self, call):
"""Follow a call is following a function, variable, string, etc."""
@@ -211,7 +222,8 @@ class Evaluator(object):
s = call
while not s.parent.isinstance(pr.IsScope):
s = s.parent
return self.eval_call_path(path, s.parent, s.start_pos)
par = s.parent
return self.eval_call_path(path, par, s.start_pos)
def eval_call_path(self, path, scope, position):
"""
@@ -229,7 +241,7 @@ class Evaluator(object):
else:
# for pr.Literal
types = [compiled.create(self, current.value)]
types = imports.strip_imports(self, types)
types = imports.follow_imports(self, types)
return self.follow_path(path, types, scope)
@@ -274,8 +286,11 @@ class Evaluator(object):
# This must be an execution, either () or [].
if current.type == pr.Array.LIST:
if hasattr(typ, 'get_index_types'):
slc = iterable.create_indexes_or_slices(self, current)
result = typ.get_index_types(slc)
if isinstance(typ, compiled.CompiledObject):
# CompiledObject doesn't contain an evaluator instance.
result = typ.get_index_types(self, current)
else:
result = typ.get_index_types(current)
elif current.type not in [pr.Array.DICT]:
# Scope must be a class or func - make an instance or execution.
result = self.execute(typ, current)
@@ -291,7 +306,7 @@ class Evaluator(object):
if filter_private_variable(typ, scope, current):
return []
types = self.find_types(typ, current)
result = imports.strip_imports(self, types)
result = imports.follow_imports(self, types)
return self.follow_path(path, result, scope)
@debug.increase_indent
@@ -329,7 +344,7 @@ class Evaluator(object):
debug.warning("no execution possible %s", obj)
debug.dbg('execute result: %s in %s', stmts, obj)
return imports.strip_imports(self, stmts)
return imports.follow_imports(self, stmts)
def goto(self, stmt, call_path):
scope = stmt.get_parent_until(pr.IsScope)
@@ -365,20 +380,3 @@ def filter_private_variable(scope, call_scope, var_name):
if s != scope.base.base:
return True
return False
def _evaluate_list_comprehension(lc, parent=None):
input = lc.input
nested_lc = input.expression_list()[0]
if isinstance(nested_lc, pr.ListComprehension):
# is nested LC
input = nested_lc.stmt
module = input.get_parent_until()
# create a for loop, which does the same as list comprehensions
loop = pr.ForFlow(module, [input], lc.stmt.start_pos, lc.middle, True)
loop.parent = parent or lc.get_parent_until(pr.IsScope)
if isinstance(nested_lc, pr.ListComprehension):
loop = _evaluate_list_comprehension(nested_lc, loop)
return loop

235
jedi/evaluate/analysis.py Normal file
View File

@@ -0,0 +1,235 @@
"""
Module for statical analysis.
"""
from jedi import debug
from jedi.parser import representation as pr
from jedi.evaluate.compiled import CompiledObject
CODES = {
'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
'name-error': (2, NameError, 'Potential NameError.'),
'import-error': (3, ImportError, 'Potential ImportError.'),
'type-error-generator': (4, TypeError, "TypeError: 'generator' object is not subscriptable."),
'type-error-too-many-arguments': (5, TypeError, None),
'type-error-too-few-arguments': (6, TypeError, None),
'type-error-keyword-argument': (7, TypeError, None),
'type-error-multiple-values': (8, TypeError, None),
'type-error-star-star': (9, TypeError, None),
'type-error-star': (10, TypeError, None),
'type-error-operation': (11, TypeError, None),
}
class Error(object):
def __init__(self, name, module_path, start_pos, message=None):
self.path = module_path
self._start_pos = start_pos
self.name = name
if message is None:
message = CODES[self.name][2]
self.message = message
@property
def line(self):
return self._start_pos[0]
@property
def column(self):
return self._start_pos[1]
@property
def code(self):
# The class name start
first = self.__class__.__name__[0]
return first + str(CODES[self.name][0])
def __unicode__(self):
return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
self.code, self.message)
def __str__(self):
return self.__unicode__()
def __eq__(self, other):
return (self.path == other.path and self.name == other.name
and self._start_pos == other._start_pos)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.path, self._start_pos, self.name))
def __repr__(self):
return '<%s %s: %s@%s,%s' % (self.__class__.__name__,
self.name, self.path,
self._start_pos[0], self._start_pos[1])
class Warning(Error):
pass
def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None):
exception = CODES[name][1]
if _check_for_exception_catch(evaluator, jedi_obj, exception, payload):
return
module_path = jedi_obj.get_parent_until().path
instance = typ(name, module_path, jedi_obj.start_pos, message)
debug.warning(str(instance))
evaluator.analysis.append(instance)
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
module = instance.get_parent_until()
try:
stmts = module.used_names['setattr']
except KeyError:
return False
return any(instance.start_pos < stmt.start_pos < instance.end_pos
for stmt in stmts)
def add_attribute_error(evaluator, scope, name_part):
message = ('AttributeError: %s has no attribute %s.' % (scope, name_part))
from jedi.evaluate.representation import Instance
# Check for __getattr__/__getattribute__ existance and issue a warning
# instead of an error, if that happens.
if isinstance(scope, Instance):
typ = Warning
try:
scope.get_subscope_by_name('__getattr__')
except KeyError:
try:
scope.get_subscope_by_name('__getattribute__')
except KeyError:
if not _check_for_setattr(scope):
typ = Error
else:
typ = Error
payload = scope, name_part
add(evaluator, 'attribute-error', name_part, message, typ, payload)
def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None):
"""
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
doesn't count as an error (if equal to `exception`).
Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
it.
Returns True if the exception was catched.
"""
def check_match(cls):
try:
return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj)
except TypeError:
return False
def check_try_for_except(obj):
while obj.next is not None:
obj = obj.next
if not obj.inputs:
# No import implies a `except:` catch, which catches
# everything.
return True
for i in obj.inputs:
except_classes = evaluator.eval_statement(i)
for cls in except_classes:
from jedi.evaluate import iterable
if isinstance(cls, iterable.Array) and cls.type == 'tuple':
# multiple exceptions
for c in cls.values():
if check_match(c):
return True
else:
if check_match(cls):
return True
return False
def check_hasattr(stmt):
expression_list = stmt.expression_list()
try:
assert len(expression_list) == 1
call = expression_list[0]
assert isinstance(call, pr.Call) and str(call.name) == 'hasattr'
execution = call.execution
assert execution and len(execution) == 2
# check if the names match
names = evaluator.eval_statement(execution[1])
assert len(names) == 1 and isinstance(names[0], CompiledObject)
assert names[0].obj == str(payload[1])
objects = evaluator.eval_statement(execution[0])
return payload[0] in objects
except AssertionError:
pass
return False
obj = jedi_obj
while obj is not None and not obj.isinstance(pr.Function, pr.Class):
if obj.isinstance(pr.Flow):
# try/except catch check
if obj.command == 'try' and check_try_for_except(obj):
return True
# hasattr check
if exception == AttributeError and obj.command in ('if', 'while'):
if obj.inputs and check_hasattr(obj.inputs[0]):
return True
obj = obj.parent
return False
def get_module_statements(module):
"""
Returns the statements used in a module. All these statements should be
evaluated to check for potential exceptions.
"""
def add_stmts(stmts):
new = set()
for stmt in stmts:
if isinstance(stmt, pr.Flow):
while stmt is not None:
new |= add_stmts(stmt.inputs)
stmt = stmt.next
continue
if isinstance(stmt, pr.KeywordStatement):
stmt = stmt.stmt
if stmt is None:
continue
for expression in stmt.expression_list():
if isinstance(expression, pr.Array):
new |= add_stmts(expression.values)
if isinstance(expression, pr.StatementElement):
for element in expression.generate_call_path():
if isinstance(element, pr.Array):
new |= add_stmts(element.values)
new.add(stmt)
return new
stmts = set()
imports = set()
for scope in module.walk():
imports |= set(scope.imports)
stmts |= add_stmts(scope.statements)
stmts |= add_stmts(r for r in scope.returns if r is not None)
try:
decorators = scope.decorators
except AttributeError:
pass
else:
stmts |= add_stmts(decorators)
return stmts, imports

View File

@@ -15,6 +15,13 @@ from jedi.evaluate.helpers import FakeName
from . import fake
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
class CompiledObject(Base):
# comply with the parser
start_pos = 0, 0
@@ -41,7 +48,7 @@ class CompiledObject(Base):
for p in tokens:
parts = [FakeName(part) for part in p.strip().split('=')]
if len(parts) >= 2:
parts.insert(1, Operator('=', (0, 0)))
parts.insert(1, Operator(module, '=', module, (0, 0)))
params.append(Param(module, parts, start_pos,
end_pos, builtin))
return params
@@ -82,24 +89,30 @@ class CompiledObject(Base):
return CompiledObject(c, self.parent)
return self
@underscore_memoization
def get_defined_names(self):
if inspect.ismodule(self.obj):
return self.instance_names()
else:
return type_names + self.instance_names()
def scope_names_generator(self, position=None):
yield self, self.get_defined_names()
@underscore_memoization
def instance_names(self):
names = []
cls = self._cls()
for name in dir(cls.obj):
names.append(CompiledName(cls, name))
return names
def instance_names(self):
return self.get_defined_names()
def get_subscope_by_name(self, name):
if name in dir(self._cls().obj):
return CompiledName(self._cls(), name).parent
else:
raise KeyError("CompiledObject doesn't have an attribute '%s'." % name)
def get_index_types(self, index_types):
def get_index_types(self, evaluator, index_array):
# If the object doesn't have `__getitem__`, just raise the
# AttributeError.
if not hasattr(self.obj, '__getitem__'):
@@ -110,7 +123,8 @@ class CompiledObject(Base):
return []
result = []
for typ in index_types:
from jedi.evaluate.iterable import create_indexes_or_slices
for typ in create_indexes_or_slices(evaluator, index_array):
index = None
try:
index = typ.obj
@@ -168,6 +182,9 @@ class CompiledObject(Base):
faked_subscopes.append(f)
return faked_subscopes
def is_scope(self):
return True
def get_self_attributes(self):
return [] # Instance compatibility
@@ -187,7 +204,11 @@ class CompiledName(FakeName):
self.start_pos = 0, 0 # an illegal start_pos, to make sorting easy.
def __repr__(self):
return '<%s: (%s).%s>' % (type(self).__name__, self._obj.name, self.name)
try:
name = self._obj.name # __name__ is not defined all the time
except AttributeError:
name = None
return '<%s: (%s).%s>' % (type(self).__name__, name, self.name)
@property
@underscore_memoization
@@ -200,7 +221,37 @@ class CompiledName(FakeName):
pass # Just ignore this, FakeName tries to overwrite the parent attribute.
def dotted_from_fs_path(fs_path, sys_path=None):
"""
Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e.
compares the path with sys.path and then returns the dotted_path. If the
path is not in the sys.path, just returns None.
"""
if sys_path is None:
sys_path = get_sys_path()
# prefer
# - UNIX
# /path/to/pythonX.Y/lib-dynload
# /path/to/pythonX.Y/site-packages
# - Windows
# C:\path\to\DLLs
# C:\path\to\Lib\site-packages
# over
# - UNIX
# /path/to/pythonX.Y
# - Windows
# C:\path\to\Lib
path = ''
for s in sys_path:
if (fs_path.startswith(s) and
len(path) < len(s)):
path = s
return _path_re.sub('', fs_path[len(path):].lstrip(os.path.sep)).replace(os.path.sep, '.')
def load_module(path, name):
"""
if not name:
name = os.path.basename(path)
name = name.rpartition('.')[0] # cut file type (normally .so)
@@ -222,17 +273,22 @@ def load_module(path, name):
else:
path = os.path.dirname(path)
"""
if path is not None:
dotted_path = dotted_from_fs_path(path)
else:
dotted_path = name
sys_path = get_sys_path()
if path:
sys_path.insert(0, path)
if dotted_path is None:
p, _, dotted_path = path.partition(os.path.sep)
sys_path.insert(0, p)
temp, sys.path = sys.path, sys_path
try:
module = __import__(name, {}, {}, dot_path[:-1])
except AttributeError:
# use sys.modules, because you cannot access some modules
# directly. -> github issue #59
module = sys.modules[name]
__import__(dotted_path)
# Just access the cache after import, because of #59 as well as the very
# complicated import structure of Python.
module = sys.modules[dotted_path]
sys.path = temp
return CompiledObject(module)
@@ -307,12 +363,6 @@ def _parse_function_doc(doc):
class Builtin(CompiledObject, IsScope):
@memoize
def get_defined_names(self):
# Filter None, because it's really just a keyword, nobody wants to
# access it.
return [d for d in super(Builtin, self).get_defined_names() if d.name != 'None']
@memoize
def get_by_name(self, name):
item = [n for n in self.get_defined_names() if n.get_code() == name][0]
@@ -324,10 +374,6 @@ def _a_generator(foo):
yield 42
yield foo
builtin = Builtin(_builtins)
magic_function_class = CompiledObject(type(load_module), parent=builtin)
generator_obj = CompiledObject(_a_generator(1.0))
def _create_from_name(module, parent, name):
faked = fake.get_faked(module.obj, parent.obj, name)
@@ -346,6 +392,13 @@ def _create_from_name(module, parent, name):
return CompiledObject(obj, parent)
builtin = Builtin(_builtins)
magic_function_class = CompiledObject(type(load_module), parent=builtin)
generator_obj = CompiledObject(_a_generator(1.0))
type_names = [] # Need this, because it's return in get_defined_names.
type_names = builtin.get_by_name('type').get_defined_names()
def compiled_objects_cache(func):
def wrapper(evaluator, obj, parent=builtin, module=None):
# Do a very cheap form of caching here.

View File

@@ -5,5 +5,5 @@ class partial():
self.__keywords = keywords
def __call__(self, *args, **kwargs):
# I know this doesn't work in Python, but Jedi can this ;-)
# I know this doesn't work in Python, but in Jedi it does ;-)
return self.__func(*self.__args, *args, **self.keywords, **kwargs)

View File

@@ -1,48 +1,48 @@
def compile():
class SRE_Match():
endpos = 1
lastgroup = 0
lastindex = 1
pos = 0
string = 'a'
regs = ((0, 1),)
endpos = int()
lastgroup = int()
lastindex = int()
pos = int()
string = str()
regs = ((int(), int()),)
def __init__(self, pattern):
self.re = pattern
def start(self):
return 0
return int()
def end(self):
return 1
return int()
def span(self):
return 0, 1
return int(), int()
def expand(self):
return ''
return str()
def group(self, nr):
return ''
return str()
def groupdict(self):
return {'a', 'a'}
return {str(): str()}
def groups(self):
return ('a',)
return (str(),)
class SRE_Pattern():
flags = 0
flags = int()
groupindex = {}
groups = 0
pattern = 'a'
groups = int()
pattern = str()
def findall(self, string, pos=None, endpos=None):
"""
findall(string[, pos[, endpos]]) --> list.
Return a list of all non-overlapping matches of pattern in string.
"""
return ['a']
return [str()]
def finditer(self, string, pos=None, endpos=None):
"""
@@ -77,7 +77,7 @@ def compile():
split(string[, maxsplit = 0]) --> list.
Split string by the occurrences of pattern.
"""
return ['a']
return [str()]
def sub(self, repl, string, count=0):
"""
@@ -85,7 +85,7 @@ def compile():
Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl.
"""
return ''
return str()
def subn(self, repl, string, count=0):
"""
@@ -94,6 +94,6 @@ def compile():
the leftmost non-overlapping occurrences of pattern with the
replacement repl.
"""
return ('', 1)
return (str(), int())
return SRE_Pattern()

View File

@@ -199,6 +199,10 @@ class dict():
except KeyError:
return d
def setdefault(self, k, d):
# TODO maybe also return the content
return d
class reversed():
def __init__(self, sequence):

View File

@@ -95,7 +95,7 @@ def search_params(evaluator, param):
continue
scopes = [scope]
if first:
scopes = evaluator.eval_call_path(iter(first), scope, pos)
scopes = evaluator.eval_call_path(iter(first), c.parent, pos)
pos = None
from jedi.evaluate import representation as er
for scope in scopes:
@@ -110,7 +110,6 @@ def search_params(evaluator, param):
# only if we have the correct function we execute
# it, otherwise just ignore it.
evaluator.follow_path(iter(last), s, scope)
return listener.param_possibilities
result = []

View File

@@ -11,10 +11,11 @@ would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
check for -> a is a string). There's big potential in these checks.
"""
import sys
from itertools import chain
from jedi._compatibility import hasattr, unicode, u, reraise
from jedi._compatibility import hasattr, unicode, u
from jedi.parser import representation as pr, tokenize
from jedi.parser import fast
from jedi import debug
from jedi import common
from jedi import settings
@@ -24,6 +25,8 @@ from jedi.evaluate import compiled
from jedi.evaluate import docstrings
from jedi.evaluate import iterable
from jedi.evaluate import imports
from jedi.evaluate import analysis
from jedi.evaluate import precedence
class NameFinder(object):
@@ -33,69 +36,76 @@ class NameFinder(object):
self.name_str = name_str
self.position = position
def find(self, scopes, resolve_decorator=True):
@debug.increase_indent
def find(self, scopes, resolve_decorator=True, search_global=False):
if unicode(self.name_str) == 'None':
# Filter None, because it's really just a keyword, nobody wants to
# access it.
return []
names = self.filter_name(scopes)
types = self._names_to_types(names, resolve_decorator)
debug.dbg('finder._names_to_types: %s, old: %s', names, types)
if not names and not types \
and not (isinstance(self.name_str, pr.NamePart)
and isinstance(self.name_str.parent.parent, pr.Param)):
if not isinstance(self.name_str, (str, unicode)): # TODO Remove?
if search_global:
message = ("NameError: name '%s' is not defined."
% self.name_str)
analysis.add(self._evaluator, 'name-error', self.name_str,
message)
else:
analysis.add_attribute_error(self._evaluator,
self.scope, self.name_str)
debug.dbg('finder._names_to_types: %s -> %s', names, types)
return self._resolve_descriptors(types)
def scopes(self, search_global=False):
if search_global:
return get_names_of_scope(self._evaluator, self.scope, self.position)
else:
if isinstance(self.scope, er.Instance):
return self.scope.scope_generator()
else:
if isinstance(self.scope, er.Class):
# classes are only available directly via chaining?
# strange stuff...
names = self.scope.get_defined_names()
else:
names = _get_defined_names_for_position(self.scope, self.position)
return iter([(self.scope, names)])
return self.scope.scope_names_generator(self.position)
def filter_name(self, scope_generator):
def filter_name(self, scope_names_generator):
"""
Filters all variables of a scope (which are defined in the
`scope_generator`), until the name fits.
`scope_names_generator`), until the name fits.
"""
result = []
for nscope, name_list in scope_generator:
for name_list_scope, name_list in scope_names_generator:
break_scopes = []
if not isinstance(nscope, compiled.CompiledObject):
if not isinstance(name_list_scope, compiled.CompiledObject):
# Here is the position stuff happening (sorting of variables).
# Compiled objects don't need that, because there's only one
# reference.
name_list = sorted(name_list, key=lambda n: n.start_pos, reverse=True)
for name in name_list:
if unicode(self.name_str) != name.get_code():
continue
parpar = name.parent.parent
if name.parent.parent in break_scopes:
scope = name.parent.parent
if scope in break_scopes:
continue
# Exclude `arr[1] =` from the result set.
if not self._name_is_array_assignment(name):
result.append(name) # `arr[1] =` is not the definition
# for comparison we need the raw class
# this means that a definition was found and is not e.g.
# in if/else.
if result and self._name_is_break_scope(name):
#print result, name.parent, parpar, s
if isinstance(parpar, pr.Flow) \
or isinstance(parpar, pr.KeywordStatement) \
and parpar.name == 'global':
s = nscope.base if isinstance(nscope, er.Class) else nscope
if parpar == s:
break
else:
result.append(name)
if result and self._is_name_break_scope(name):
if self._does_scope_break_immediately(scope, name_list_scope):
break
break_scopes.append(parpar)
else:
break_scopes.append(scope)
if result:
break
debug.dbg('finder.filter_name "%s" in (%s-%s): %s@%s', self.name_str,
self.scope, nscope, u(result), self.position)
scope_txt = (self.scope if self.scope == name_list_scope
else '%s-%s' % (self.scope, name_list_scope))
debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self.name_str,
scope_txt, u(result), self.position)
return result
def _check_getattr(self, inst):
@@ -114,20 +124,50 @@ class NameFinder(object):
result = inst.execute_subscope_by_name('__getattribute__', [name])
return result
def _name_is_break_scope(self, name):
def _is_name_break_scope(self, name):
"""
Returns the parent of a name, which means the element which stands
behind a name.
Returns True except for nested imports and instance variables.
"""
par = name.parent
if par.isinstance(pr.Statement):
if isinstance(name, er.InstanceElement) and not name.is_class_var:
return False
elif isinstance(par, pr.Import) and len(par.namespace) > 1:
# TODO multi-level import non-breakable
elif isinstance(par, pr.Import) and par.is_nested():
return False
return True
def _does_scope_break_immediately(self, scope, name_list_scope):
"""
In comparison to everthing else, if/while/etc doesn't break directly,
because there are multiple different places in which a variable can be
defined.
"""
if isinstance(scope, pr.Flow) \
or isinstance(scope, pr.KeywordStatement) and scope.name == 'global':
# Check for `if foo is not None`, because Jedi is not interested in
# None values, so this is the only branch we actually care about.
# ATM it carries the same issue as the isinstance checks. It
# doesn't work with instance variables (self.foo).
if isinstance(scope, pr.Flow) and scope.command in ('if', 'while'):
try:
expression_list = scope.inputs[0].expression_list()
except IndexError:
pass
else:
p = precedence.create_precedence(expression_list)
if (isinstance(p, precedence.Precedence)
and p.operator.string == 'is not'
and p.right.get_code() == 'None'
and p.left.get_code() == unicode(self.name_str)):
return True
if isinstance(name_list_scope, er.Class):
name_list_scope = name_list_scope.base
return scope == name_list_scope
else:
return True
def _name_is_array_assignment(self, name):
if name.parent.isinstance(pr.Statement):
def is_execution(calls):
@@ -179,12 +219,15 @@ class NameFinder(object):
# global keyword handling.
types += evaluator.find_types(typ.parent.parent, str(name))
else:
types += self._remove_statements(typ)
types += self._remove_statements(typ, name)
else:
if isinstance(typ, pr.Class):
typ = er.Class(evaluator, typ)
elif isinstance(typ, pr.Function):
typ = er.Function(evaluator, typ)
elif isinstance(typ, pr.Module):
typ = er.ModuleWrapper(evaluator, typ)
if typ.isinstance(er.Function) and resolve_decorator:
typ = typ.get_decorated_func()
types.append(typ)
@@ -195,7 +238,7 @@ class NameFinder(object):
return types
def _remove_statements(self, stmt):
def _remove_statements(self, stmt, name):
"""
This is the part where statements are being stripped.
@@ -216,6 +259,16 @@ class NameFinder(object):
types += evaluator.eval_statement(stmt, seek_name=unicode(self.name_str))
# check for `except X as y` usages, because y needs to be instantiated.
p = stmt.parent
# TODO this looks really hacky, improve parser representation!
if isinstance(p, pr.Flow) and p.command == 'except' \
and p.inputs and p.inputs[0].as_names == [name]:
# TODO check for types that are not classes and add it to the
# static analysis report.
types = list(chain.from_iterable(
evaluator.execute(t) for t in types))
if check_instance is not None:
# class renames
types = [er.InstanceElement(evaluator, check_instance, a, True)
@@ -230,7 +283,9 @@ class NameFinder(object):
cls = func.parent.get_parent_until((pr.Class, pr.Function))
if isinstance(cls, pr.Class) and param.position_nr == 0:
from jedi.evaluate.param import ExecutedParam
if isinstance(cls, pr.Class) and param.position_nr == 0 \
and not isinstance(param, ExecutedParam):
# This is where we add self - if it has never been
# instantiated.
if isinstance(self.scope, er.InstanceElement):
@@ -267,8 +322,7 @@ class NameFinder(object):
return res_new + evaluator.eval_statement(param, seek_name=unicode(self.name_str))
def _handle_for_loops(self, loop):
# Take the first statement (for has always only
# one, remember `in`). And follow it.
# Take the first statement (for has always only one`in`).
if not loop.inputs:
return []
result = iterable.get_iterator_types(self._evaluator.eval_statement(loop.inputs[0]))
@@ -344,40 +398,11 @@ def _check_isinstance_type(evaluator, stmt, search_name_part):
result = []
for c in evaluator.eval_call(classes[0]):
for typ in (c.get_index_types() if isinstance(c, iterable.Array) else [c]):
for typ in (c.values() if isinstance(c, iterable.Array) else [c]):
result += evaluator.execute(typ)
return result
def _get_defined_names_for_position(scope, position=None, start_scope=None):
"""
Return filtered version of ``scope.get_defined_names()``.
This function basically does what :meth:`scope.get_defined_names
<parsing_representation.Scope.get_defined_names>` does.
- If `position` is given, delete all names defined after `position`.
- For special objects like instances, `position` is ignored and all
names are returned.
:type scope: :class:`parsing_representation.IsScope`
:param scope: Scope in which names are searched.
:param position: The position as a line/column tuple, default is infinity.
"""
names = scope.get_defined_names()
# Instances have special rules, always return all the possible completions,
# because class variables are always valid and the `self.` variables, too.
if not position or isinstance(scope, (iterable.Array, er.Instance, compiled.CompiledObject)) \
or start_scope != scope \
and isinstance(start_scope, (pr.Function, er.FunctionExecution)):
return names
names_new = []
for n in names:
if n.start_pos[0] is not None and n.start_pos < position:
names_new.append(n)
return names_new
def get_names_of_scope(evaluator, scope, position=None, star_search=True, include_builtin=True):
"""
Get all completions (names) possible for the current scope. The star search
@@ -410,22 +435,32 @@ def get_names_of_scope(evaluator, scope, position=None, star_search=True, includ
is the most outer scope.
>>> pairs[1]
(<SubModule: None@1-5>, [<Name: x@2,0>, <Name: func@3,4>])
(<ModuleWrapper: <SubModule: None@1-5>>, [<Name: x@2,0>, <Name: func@3,4>])
After that we have a few underscore names that have been defined
>>> pairs[2]
(<ModuleWrapper: <SubModule: None@1-5>>, [<FakeName: __file__@0,0>, ...])
Finally, it yields names from builtin, if `include_builtin` is
true (default).
>>> pairs[2] #doctest: +ELLIPSIS
>>> pairs[3] #doctest: +ELLIPSIS
(<Builtin: ...builtin...>, [<CompiledName: ...>, ...])
:rtype: [(pr.Scope, [pr.Name])]
:return: Return an generator that yields a pair of scope and names.
"""
if isinstance(scope, pr.ListComprehension):
position = scope.parent.start_pos
in_func_scope = scope
non_flow = scope.get_parent_until(pr.Flow, reverse=True)
while scope:
if isinstance(scope, pr.SubModule) and scope.parent:
# we don't want submodules to report if we have modules.
# We don't want submodules to report if we have modules.
# As well as some non-scopes, which are parents of list comprehensions.
if isinstance(scope, pr.SubModule) and scope.parent or not scope.is_scope():
scope = scope.parent
continue
# `pr.Class` is used, because the parent is never `Class`.
@@ -437,15 +472,13 @@ def get_names_of_scope(evaluator, scope, position=None, star_search=True, includ
and non_flow.isinstance(er.Function)
or isinstance(scope, compiled.CompiledObject)
and scope.type() == 'class' and in_func_scope != scope):
try:
if isinstance(scope, er.Instance):
for g in scope.scope_generator():
yield g
else:
yield scope, _get_defined_names_for_position(scope, position, in_func_scope)
except StopIteration:
reraise(common.MultiLevelStopIteration, sys.exc_info()[2])
if scope.isinstance(pr.ForFlow) and scope.is_list_comp:
if isinstance(scope, (pr.SubModule, fast.Module)):
scope = er.ModuleWrapper(evaluator, scope)
for g in scope.scope_names_generator(position):
yield g
if scope.isinstance(pr.ListComprehension):
# is a list comprehension
yield scope, scope.get_defined_names(is_internal_call=True)
@@ -454,6 +487,9 @@ def get_names_of_scope(evaluator, scope, position=None, star_search=True, includ
# results.
if scope and scope.isinstance(er.Function, pr.Function, er.FunctionExecution):
in_func_scope = scope
if in_func_scope != scope \
and isinstance(in_func_scope, (pr.Function, er.FunctionExecution)):
position = None
# Add star imports.
if star_search:

View File

@@ -161,6 +161,9 @@ def scan_statement_for_calls(stmt, search_name, assignment_details=False):
if s_new.execution is not None:
result += scan_array(s_new.execution, search_name)
s_new = s_new.next
elif isinstance(c, pr.ListComprehension):
for s in c.stmt, c.middle, c.input:
result += scan_statement_for_calls(s, search_name)
return result
@@ -170,20 +173,44 @@ class FakeSubModule():
class FakeArray(pr.Array):
def __init__(self, values, parent, arr_type=pr.Array.LIST):
def __init__(self, values, parent=None, arr_type=pr.Array.LIST):
p = (0, 0)
super(FakeArray, self).__init__(FakeSubModule, p, arr_type, parent)
self.values = values
class FakeStatement(pr.Statement):
def __init__(self, expression_list, start_pos=(0, 0)):
def __init__(self, expression_list, start_pos=(0, 0), parent=None):
p = start_pos
super(FakeStatement, self).__init__(FakeSubModule, expression_list, p, p)
self.set_expression_list(expression_list)
self.parent = parent
class FakeImport(pr.Import):
def __init__(self, name, parent, level=0):
p = 0, 0
super(FakeImport, self).__init__(FakeSubModule, p, p, name,
relative_count=level)
self.parent = parent
class FakeName(pr.Name):
def __init__(self, name, parent=None):
def __init__(self, name_or_names, parent=None):
p = 0, 0
super(FakeName, self).__init__(FakeSubModule, [(name, p)], p, p, parent)
if isinstance(name_or_names, list):
names = [(n, p) for n in name_or_names]
else:
names = [(name_or_names, p)]
super(FakeName, self).__init__(FakeSubModule, names, p, p, parent)
def stmts_to_stmt(statements):
"""
Sometimes we want to have something like a result_set and unite some
statements in one.
"""
if len(statements) == 1:
return statements[0]
array = FakeArray(statements, arr_type=pr.Array.NOARRAY)
return FakeStatement([array])

View File

@@ -22,16 +22,19 @@ from jedi import debug
from jedi import cache
from jedi.parser import fast
from jedi.parser import representation as pr
from jedi.evaluate import sys_path
from jedi.evaluate.sys_path import get_sys_path, sys_path_with_modifications
from jedi.evaluate import helpers
from jedi import settings
from jedi.common import source_to_unicode
from jedi.evaluate import compiled
from jedi.evaluate import analysis
from jedi.evaluate.cache import memoize_default, NO_DEFAULT
class ModuleNotFound(Exception):
pass
def __init__(self, name_part):
super(ModuleNotFound, self).__init__()
self.name_part = name_part
class ImportWrapper(pr.Base):
@@ -45,11 +48,18 @@ class ImportWrapper(pr.Base):
GlobalNamespace = GlobalNamespace()
def __init__(self, evaluator, import_stmt, is_like_search=False, kill_count=0,
direct_resolve=False, is_just_from=False):
nested_resolve=False, is_just_from=False):
"""
:param is_like_search: If the wrapper is used for autocompletion.
:param kill_count: Placement of the import, sometimes we only want to
resole a part of the import.
:param nested_resolve: Resolves nested imports fully.
:param is_just_from: Bool if the second part is missing.
"""
self._evaluator = evaluator
self.import_stmt = import_stmt
self.is_like_search = is_like_search
self.direct_resolve = direct_resolve
self.nested_resolve = nested_resolve
self.is_just_from = is_just_from
self.is_partial_import = bool(max(0, kill_count))
@@ -59,11 +69,10 @@ class ImportWrapper(pr.Base):
if import_stmt.from_ns:
import_path += import_stmt.from_ns.names
if import_stmt.namespace:
if self._is_nested_import() and not direct_resolve:
if self.import_stmt.is_nested() and not nested_resolve:
import_path.append(import_stmt.namespace.names[0])
else:
import_path += import_stmt.namespace.names
import_path = [str(name_part) for name_part in import_path]
for i in range(kill_count + int(is_like_search)):
if import_path:
@@ -78,7 +87,7 @@ class ImportWrapper(pr.Base):
@property
def import_path(self):
return self._importer.import_path
return self._importer.str_import_path()
def get_defined_names(self, on_import_stmt=False):
names = []
@@ -94,15 +103,17 @@ class ImportWrapper(pr.Base):
names += self._get_module_names([path])
if self._is_relative_import():
rel_path = self._importer.get_relative_path() + '/__init__.py'
rel_path = os.path.join(self._importer.get_relative_path(),
'__init__.py')
if os.path.exists(rel_path):
m = load_module(rel_path)
m = _load_module(rel_path)
names += m.get_defined_names()
else:
if on_import_stmt and isinstance(scope, pr.Module) \
and scope.path.endswith('__init__.py'):
pkg_path = os.path.dirname(scope.path)
paths = self._importer.namespace_packages(pkg_path, self.import_path)
paths = self._importer.namespace_packages(pkg_path,
self.import_path)
names += self._get_module_names([pkg_path] + paths)
if self.is_just_from:
# In the case of an import like `from x.` we don't need to
@@ -147,34 +158,6 @@ class ImportWrapper(pr.Base):
names.append(self._generate_name(name))
return names
def _is_nested_import(self):
"""
This checks for the special case of nested imports, without aliases and
from statement::
import foo.bar
"""
return not self.import_stmt.alias and not self.import_stmt.from_ns \
and len(self.import_stmt.namespace.names) > 1 \
and not self.direct_resolve
def _get_nested_import(self, parent):
"""
See documentation of `self._is_nested_import`.
Generates an Import statement, that can be used to fake nested imports.
"""
i = self.import_stmt
# This is not an existing Import statement. Therefore, set position to
# 0 (0 is not a valid line number).
zero = (0, 0)
names = [(unicode(name_part), name_part.start_pos)
for name_part in i.namespace.names[1:]]
n = pr.Name(i._sub_module, names, zero, zero, self.import_stmt)
new = pr.Import(i._sub_module, zero, zero, n)
new.parent = parent
debug.dbg('Generated a nested import: %s', new)
return new
def _is_relative_import(self):
return bool(self.import_stmt.relative_count)
@@ -185,13 +168,19 @@ class ImportWrapper(pr.Base):
if self.import_path:
try:
scope, rest = self._importer.follow_file_system()
except ModuleNotFound:
debug.warning('Module not found: %s', self.import_stmt)
module, rest = self._importer.follow_file_system()
except ModuleNotFound as e:
analysis.add(self._evaluator, 'import-error', e.name_part)
return []
scopes = [scope]
scopes += remove_star_imports(self._evaluator, scope)
if self.import_stmt.is_nested() and not self.nested_resolve:
scopes = [NestedImportModule(module, self.import_stmt)]
else:
scopes = [module]
star_imports = remove_star_imports(self._evaluator, module)
if star_imports:
scopes = [StarImportModule(scopes[0], star_imports)]
# follow the rest of the import (not FS -> classes, functions)
if len(rest) > 1 or rest and self.is_like_search:
@@ -202,7 +191,7 @@ class ImportWrapper(pr.Base):
# ``os.path``, because it's a very important one in Python
# that is being achieved by messing with ``sys.modules`` in
# ``os``.
scopes = self._evaluator.follow_path(iter(rest), [scope], scope)
scopes = self._evaluator.follow_path(iter(rest), [module], module)
elif rest:
if is_goto:
scopes = list(chain.from_iterable(
@@ -212,16 +201,74 @@ class ImportWrapper(pr.Base):
scopes = list(chain.from_iterable(
self._evaluator.follow_path(iter(rest), [s], s)
for s in scopes))
if self._is_nested_import():
scopes.append(self._get_nested_import(scope))
else:
scopes = [ImportWrapper.GlobalNamespace]
debug.dbg('after import: %s', scopes)
if not scopes:
analysis.add(self._evaluator, 'import-error',
self._importer.import_path[-1])
self._evaluator.recursion_detector.pop_stmt()
return scopes
class NestedImportModule(pr.Module):
def __init__(self, module, nested_import):
self._module = module
self._nested_import = nested_import
def _get_nested_import_name(self):
"""
Generates an Import statement, that can be used to fake nested imports.
"""
i = self._nested_import
# This is not an existing Import statement. Therefore, set position to
# 0 (0 is not a valid line number).
zero = (0, 0)
names = [unicode(name_part) for name_part in i.namespace.names[1:]]
name = helpers.FakeName(names, self._nested_import)
new = pr.Import(i._sub_module, zero, zero, name)
new.parent = self._module
debug.dbg('Generated a nested import: %s', new)
return helpers.FakeName(str(i.namespace.names[1]), new)
def _get_defined_names(self):
"""
NesteImportModule don't seem to be actively used, right now.
However, they might in the future. If we do more sophisticated static
analysis checks.
"""
nested = self._get_nested_import_name()
return self._module.get_defined_names() + [nested]
def __getattr__(self, name):
return getattr(self._module, name)
def __repr__(self):
return "<%s: %s of %s>" % (self.__class__.__name__, self._module,
self._nested_import)
class StarImportModule(pr.Module):
"""
Used if a module contains star imports.
"""
def __init__(self, module, star_import_modules):
self._module = module
self.star_import_modules = star_import_modules
def scope_names_generator(self, position=None):
for module, names in self._module.scope_names_generator(position):
yield module, names
for s in self.star_import_modules:
yield s, s.get_defined_names()
def __getattr__(self, name):
return getattr(self._module, name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._module)
def get_importer(evaluator, import_path, module, level=0):
"""
Checks the evaluator caches first, which resembles the ``sys.modules``
@@ -262,6 +309,10 @@ class _Importer(object):
# TODO abspath
self.file_path = os.path.dirname(path) if path is not None else None
def str_import_path(self):
"""Returns the import path as pure strings instead of NameParts."""
return tuple(str(name_part) for name_part in self.import_path)
def get_relative_path(self):
path = self.file_path
for i in range(self.level - 1):
@@ -277,11 +328,11 @@ class _Importer(object):
if self.import_path:
parts = self.file_path.split(os.path.sep)
for i, p in enumerate(parts):
if p == self.import_path[0]:
if p == unicode(self.import_path[0]):
new = os.path.sep.join(parts[:i])
in_path.append(new)
return in_path + sys_path.sys_path_with_modifications(self.module)
return in_path + sys_path_with_modifications(self._evaluator, self.module)
def follow(self, evaluator):
scope, rest = self.follow_file_system()
@@ -308,9 +359,13 @@ class _Importer(object):
sys_path_mod.append(temp_path)
old_path, temp_path = temp_path, os.path.dirname(temp_path)
else:
sys_path_mod = list(sys_path.get_sys_path())
sys_path_mod = list(get_sys_path())
return self._follow_sys_path(sys_path_mod)
from jedi.evaluate.representation import ModuleWrapper
module, rest = self._follow_sys_path(sys_path_mod)
if isinstance(module, pr.Module):
return ModuleWrapper(self._evaluator, module), rest
return module, rest
def namespace_packages(self, found_path, import_path):
"""
@@ -372,7 +427,7 @@ class _Importer(object):
rest = []
for i, s in enumerate(self.import_path):
try:
current_namespace = follow_str(current_namespace[1], s)
current_namespace = follow_str(current_namespace[1], unicode(s))
except ImportError:
_continue = False
if self.level >= 1 and len(self.import_path) == 1:
@@ -381,10 +436,10 @@ class _Importer(object):
with common.ignored(ImportError):
current_namespace = follow_str(rel_path, '__init__')
elif current_namespace[2]: # is a package
for n in self.namespace_packages(current_namespace[1],
self.import_path[:i]):
path = self.str_import_path()[:i]
for n in self.namespace_packages(current_namespace[1], path):
try:
current_namespace = follow_str(n, s)
current_namespace = follow_str(n, unicode(s))
if current_namespace[1]:
_continue = True
break
@@ -393,10 +448,10 @@ class _Importer(object):
if not _continue:
if current_namespace[1]:
rest = self.import_path[i:]
rest = self.str_import_path()[i:]
break
else:
raise ModuleNotFound('The module you searched has not been found')
raise ModuleNotFound(s)
path = current_namespace[1]
is_package_directory = current_namespace[2]
@@ -405,18 +460,18 @@ class _Importer(object):
if is_package_directory or current_namespace[0]:
# is a directory module
if is_package_directory:
path += '/__init__.py'
path = os.path.join(path, '__init__.py')
with open(path, 'rb') as f:
source = f.read()
else:
source = current_namespace[0].read()
current_namespace[0].close()
return load_module(path, source), rest
return _load_module(path, source, sys_path=sys_path), rest
else:
return load_module(name=path), rest
return _load_module(name=path, sys_path=sys_path), rest
def strip_imports(evaluator, scopes):
def follow_imports(evaluator, scopes):
"""
Here we strip the imports - they don't get resolved necessarily.
Really used anymore? Merge with remove_star_imports?
@@ -424,7 +479,8 @@ def strip_imports(evaluator, scopes):
result = []
for s in scopes:
if isinstance(s, pr.Import):
result += ImportWrapper(evaluator, s).follow()
for r in ImportWrapper(evaluator, s).follow():
result.append(r)
else:
result.append(s)
return result
@@ -439,7 +495,9 @@ def remove_star_imports(evaluator, scope, ignored_modules=()):
and follow these modules.
"""
modules = strip_imports(evaluator, (i for i in scope.get_imports() if i.star))
if isinstance(scope, StarImportModule):
return scope.star_import_modules
modules = follow_imports(evaluator, (i for i in scope.get_imports() if i.star))
new = []
for m in modules:
if m not in ignored_modules:
@@ -450,9 +508,11 @@ def remove_star_imports(evaluator, scope, ignored_modules=()):
return set(modules)
def load_module(path=None, source=None, name=None):
def _load_module(path=None, source=None, name=None, sys_path=None):
def load(source):
if path is not None and path.endswith('.py'):
dotted_path = path and compiled.dotted_from_fs_path(path, sys_path)
if path is not None and path.endswith('.py') \
and not dotted_path in settings.auto_import_modules:
if source is None:
with open(path, 'rb') as f:
source = f.read()
@@ -484,7 +544,7 @@ def get_modules_containing_name(mods, name):
with open(path, 'rb') as f:
source = source_to_unicode(f.read())
if name in source:
return load_module(path, source)
return _load_module(path, source)
# skip non python modules
mods = set(m for m in mods if not isinstance(m, compiled.CompiledObject))

View File

@@ -32,6 +32,7 @@ from jedi.evaluate import helpers
from jedi.evaluate import precedence
from jedi.evaluate.cache import CachedMetaClass, memoize_default, NO_DEFAULT
from jedi.cache import underscore_memoization
from jedi.evaluate import analysis
class Generator(use_metaclass(CachedMetaClass, pr.Base)):
@@ -43,7 +44,7 @@ class Generator(use_metaclass(CachedMetaClass, pr.Base)):
self.var_args = var_args
@underscore_memoization
def get_defined_names(self):
def _get_defined_names(self):
"""
Returns a list of names that define a generator, which can return the
content of a generator.
@@ -56,14 +57,25 @@ class Generator(use_metaclass(CachedMetaClass, pr.Base)):
else:
yield name
def scope_names_generator(self, position=None):
yield self, self._get_defined_names()
def iter_content(self):
""" returns the content of __iter__ """
return self._evaluator.execute(self.func, self.var_args, True)
def get_index_types(self, index=None):
debug.warning('Tried to get array access on a generator: %s', self)
def get_index_types(self, index_array):
#debug.warning('Tried to get array access on a generator: %s', self)
analysis.add(self._evaluator, 'type-error-generator', index_array)
return []
def get_exact_index_types(self, index):
"""
Exact lookups are used for tuple lookups, which are perfectly fine if
used with generators.
"""
return [self.iter_content()[index]]
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'parent', 'get_imports',
'asserts', 'doc', 'docstr', 'get_parent_until',
@@ -89,6 +101,15 @@ class GeneratorMethod(object):
return getattr(self._builtin_func, name)
class GeneratorComprehension(Generator):
def __init__(self, evaluator, comprehension):
super(GeneratorComprehension, self).__init__(evaluator, comprehension, None)
self.comprehension = comprehension
def iter_content(self):
return self._evaluator.eval_statement_element(self.comprehension)
class Array(use_metaclass(CachedMetaClass, pr.Base)):
"""
Used as a mirror to pr.Array, if needed. It defines some getter
@@ -99,25 +120,29 @@ class Array(use_metaclass(CachedMetaClass, pr.Base)):
self._array = array
@memoize_default(NO_DEFAULT)
def get_index_types(self, indexes=()):
def get_index_types(self, index_array=()):
"""
Get the types of a specific index or all, if not given.
:param indexes: The index input types.
"""
result = []
indexes = create_indexes_or_slices(self._evaluator, index_array)
if [index for index in indexes if isinstance(index, Slice)]:
return [self]
if len(indexes) == 1:
# This is indexing only one element, with a fixed index number,
# otherwise it just ignores the index (e.g. [1+1]).
index = indexes[0]
lookup_done = False
types = []
for index in indexes:
if isinstance(index, compiled.CompiledObject) \
and isinstance(index.obj, (int, str, unicode)):
with common.ignored(KeyError, IndexError, TypeError):
return self.get_exact_index_types(index.obj)
types += self.get_exact_index_types(index.obj)
lookup_done = True
return types if lookup_done else self.values()
@memoize_default(NO_DEFAULT)
def values(self):
result = list(_follow_values(self._evaluator, self._array.values))
result += check_array_additions(self._evaluator, self)
return result
@@ -150,7 +175,7 @@ class Array(use_metaclass(CachedMetaClass, pr.Base)):
values = [self._array.values[index]]
return _follow_values(self._evaluator, values)
def get_defined_names(self):
def scope_names_generator(self, position=None):
"""
This method generates all `ArrayMethod` for one pr.Array.
It returns e.g. for a list: append, pop, ...
@@ -158,8 +183,8 @@ class Array(use_metaclass(CachedMetaClass, pr.Base)):
# `array.type` is a string with the type, e.g. 'list'.
scope = self._evaluator.find_types(compiled.builtin, self._array.type)[0]
scope = self._evaluator.execute(scope)[0] # builtins only have one class
names = scope.get_defined_names()
return [ArrayMethod(n) for n in names]
for _, names in scope.scope_names_generator():
yield self, [ArrayMethod(n) for n in names]
@common.safe_property
def parent(self):
@@ -174,14 +199,11 @@ class Array(use_metaclass(CachedMetaClass, pr.Base)):
raise AttributeError('Strange access on %s: %s.' % (self, name))
return getattr(self._array, name)
def __getitem__(self):
return self._array.__getitem__()
def __iter__(self):
return self._array.__iter__()
return iter(self._array)
def __len__(self):
return self._array.__len__()
return len(self._array)
def __repr__(self):
return "<e%s of %s>" % (type(self).__name__, self._array)
@@ -209,6 +231,26 @@ class ArrayMethod(object):
return "<%s of %s>" % (type(self).__name__, self.name)
class MergedArray(Array):
def __init__(self, evaluator, arrays):
super(MergedArray, self).__init__(evaluator, arrays[-1]._array)
self._arrays = arrays
def get_index_types(self, mixed_index):
return list(chain(*(a.values() for a in self._arrays)))
def get_exact_index_types(self, mixed_index):
raise IndexError
def __iter__(self):
for array in self._arrays:
for a in array:
yield a
def __len__(self):
return sum(len(a) for a in self._arrays)
def get_iterator_types(inputs):
"""Returns the types of any iterator (arrays, yields, __iter__, etc)."""
iterators = []
@@ -228,22 +270,22 @@ def get_iterator_types(inputs):
result = []
from jedi.evaluate.representation import Instance
for gen in iterators:
if isinstance(gen, Array):
for it in iterators:
if isinstance(it, Array):
# Array is a little bit special, since this is an internal
# array, but there's also the list builtin, which is
# another thing.
result += gen.get_index_types()
elif isinstance(gen, Instance):
result += it.values()
elif isinstance(it, Instance):
# __iter__ returned an instance.
name = '__next__' if is_py3 else 'next'
try:
result += gen.execute_subscope_by_name(name)
result += it.execute_subscope_by_name(name)
except KeyError:
debug.warning('Instance has no __next__ function in %s.', gen)
debug.warning('Instance has no __next__ function in %s.', it)
else:
# is a generator
result += gen.iter_content()
# Is a generator.
result += it.iter_content()
return result

View File

@@ -1,175 +1,375 @@
import copy
from jedi._compatibility import unicode, zip_longest
from jedi.parser import representation as pr
from jedi.evaluate import iterable
from jedi import common
from jedi.evaluate import helpers
from jedi.evaluate import analysis
from jedi.evaluate.compiled import CompiledObject
class ExecutedParam(pr.Param):
def __init__(self):
"""Don't use this method, it's just here to overwrite the old one."""
pass
@classmethod
def from_param(cls, param, parent, var_args):
instance = cls()
before = ()
for cls in param.__class__.__mro__:
with common.ignored(AttributeError):
if before == cls.__slots__:
continue
before = cls.__slots__
for name in before:
setattr(instance, name, getattr(param, name))
instance.original_param = param
instance.is_generated = True
instance.parent = parent
instance.var_args = var_args
return instance
def _get_calling_var_args(evaluator, var_args):
old_var_args = None
while var_args != old_var_args:
old_var_args = var_args
for argument in reversed(var_args):
if not isinstance(argument, pr.Statement):
continue
exp_list = argument.expression_list()
if len(exp_list) != 2 or exp_list[0] not in ('*', '**'):
continue
names, _ = evaluator.goto(argument, [exp_list[1].get_code()])
if len(names) != 1:
break
param = names[0].parent
if not isinstance(param, ExecutedParam):
if isinstance(param, pr.Param):
# There is no calling var_args in this case - there's just
# a param without any input.
return None
break
# We never want var_args to be a tuple. This should be enough for
# now, we can change it later, if we need to.
if isinstance(param.var_args, pr.Array):
var_args = param.var_args
return var_args
def get_params(evaluator, func, var_args):
def gen_param_name_copy(param, keys=(), values=(), array_type=None):
"""
Create a param with the original scope (of varargs) as parent.
"""
if isinstance(var_args, pr.Array):
parent = var_args.parent
start_pos = var_args.start_pos
else:
parent = func
start_pos = 0, 0
new_param = copy.copy(param)
new_param.is_generated = True
if parent is not None:
new_param.parent = parent
# create an Array (-> needed for *args/**kwargs tuples/dicts)
arr = pr.Array(helpers.FakeSubModule, start_pos, array_type, parent)
arr.values = values
key_stmts = []
for key in keys:
key_stmts.append(helpers.FakeStatement([key], start_pos))
arr.keys = key_stmts
arr.type = array_type
new_param.set_expression_list([arr])
name = copy.copy(param.get_name())
name.parent = new_param
return name
result = []
start_offset = 0
from jedi.evaluate.representation import InstanceElement
if isinstance(func, InstanceElement):
# Care for self -> just exclude it and add the instance
start_offset = 1
self_name = copy.copy(func.params[0].get_name())
self_name.parent = func.instance
result.append(self_name)
param_dict = {}
for param in func.params:
param_dict[str(param.get_name())] = param
# There may be calls, which don't fit all the params, this just ignores it.
var_arg_iterator = common.PushBackIterator(_var_args_iterator(evaluator, var_args))
unpacked_va = _unpack_var_args(evaluator, var_args, func)
var_arg_iterator = common.PushBackIterator(iter(unpacked_va))
non_matching_keys = []
keys_used = set()
keys_only = False
for param in func.params[start_offset:]:
va_values = None
had_multiple_value_error = False
for param in func.params:
# The value and key can both be null. There, the defaults apply.
# args / kwargs will just be empty arrays / dicts, respectively.
# Wrong value count is just ignored. If you try to test cases that are
# not allowed in Python, Jedi will maybe not show any completions.
key, value = next(var_arg_iterator, (None, None))
key, va_values = next(var_arg_iterator, (None, []))
while key:
keys_only = True
k = unicode(key)
try:
key_param = param_dict[str(key)]
key_param = param_dict[unicode(key)]
except KeyError:
non_matching_keys.append((key, value))
non_matching_keys.append((key, va_values))
else:
keys_used.add(str(key))
result.append(gen_param_name_copy(key_param, values=[value]))
key, value = next(var_arg_iterator, (None, None))
result.append(_gen_param_name_copy(func, var_args, key_param,
values=va_values))
if k in keys_used:
had_multiple_value_error = True
m = ("TypeError: %s() got multiple values for keyword argument '%s'."
% (func.name, k))
calling_va = _get_calling_var_args(evaluator, var_args)
if calling_va is not None:
analysis.add(evaluator, 'type-error-multiple-values',
calling_va, message=m)
else:
keys_used.add(k)
key, va_values = next(var_arg_iterator, (None, []))
expression_list = param.expression_list()
keys = []
values = []
array_type = None
ignore_creation = False
has_default_value = False
if param.stars == 1:
# *args param
array_type = pr.Array.TUPLE
if value:
values.append(value)
for key, value in var_arg_iterator:
lst_values = [va_values]
for key, va_values in var_arg_iterator:
# Iterate until a key argument is found.
if key:
var_arg_iterator.push_back((key, value))
var_arg_iterator.push_back((key, va_values))
break
values.append(value)
lst_values.append(va_values)
if lst_values[0]:
values = [helpers.stmts_to_stmt(v) for v in lst_values]
elif param.stars == 2:
# **kwargs param
array_type = pr.Array.DICT
if non_matching_keys:
keys, values = zip(*non_matching_keys)
elif not keys_only:
values = [helpers.stmts_to_stmt(list(v)) for v in values]
non_matching_keys = []
else:
# normal param
if value is not None:
values = [value]
if va_values:
values = va_values
else:
if param.assignment_details:
# No value: return the default values.
ignore_creation = True
# No value: Return the default values.
has_default_value = True
result.append(param.get_name())
# TODO is this allowed? it changes it long time.
param.is_generated = True
else:
# If there is no assignment detail, that means there is no
# assignment, just the result. Therefore nothing has to be
# returned.
# No value: Return an empty container
values = []
if not keys_only and isinstance(var_args, pr.Array):
calling_va = _get_calling_var_args(evaluator, var_args)
if calling_va is not None:
m = _error_argument_count(func, len(unpacked_va))
analysis.add(evaluator, 'type-error-too-few-arguments',
calling_va, message=m)
# Just ignore all the params that are without a key, after one keyword
# argument was set.
if not ignore_creation and (not keys_only or expression_list[0] == '**'):
keys_used.add(str(key))
result.append(gen_param_name_copy(param, keys=keys, values=values,
array_type=array_type))
# Now add to result if it's not one of the previously covered cases.
if not has_default_value and (not keys_only or param.stars == 2):
keys_used.add(unicode(param.get_name()))
result.append(_gen_param_name_copy(func, var_args, param,
keys=keys, values=values,
array_type=array_type))
if keys_only:
# sometimes param arguments are not completely written (which would
# create an Exception, but we have to handle that).
# All arguments should be handed over to the next function. It's not
# about the values inside, it's about the names. Jedi needs to now that
# there's nothing to find for certain names.
for k in set(param_dict) - keys_used:
result.append(gen_param_name_copy(param_dict[k]))
param = param_dict[k]
result.append(_gen_param_name_copy(func, var_args, param))
if not (non_matching_keys or had_multiple_value_error
or param.stars or param.assignment_details):
# add a warning only if there's not another one.
calling_va = _get_calling_var_args(evaluator, var_args)
if calling_va is not None:
m = _error_argument_count(func, len(unpacked_va))
analysis.add(evaluator, 'type-error-too-few-arguments',
calling_va, message=m)
for key, va_values in non_matching_keys:
m = "TypeError: %s() got an unexpected keyword argument '%s'." \
% (func.name, key)
for value in va_values:
analysis.add(evaluator, 'type-error-keyword-argument', value, message=m)
remaining_params = list(var_arg_iterator)
if remaining_params:
m = _error_argument_count(func, len(unpacked_va))
for p in remaining_params[0][1]:
analysis.add(evaluator, 'type-error-too-many-arguments',
p, message=m)
return result
def _var_args_iterator(evaluator, var_args):
def _unpack_var_args(evaluator, var_args, func):
"""
Yields a key/value pair, the key is None, if its not a named arg.
"""
argument_list = []
from jedi.evaluate.representation import InstanceElement
if isinstance(func, InstanceElement):
# Include self at this place.
argument_list.append((None, [helpers.FakeStatement([func.instance])]))
# `var_args` is typically an Array, and not a list.
for stmt in var_args:
for stmt in _reorder_var_args(var_args):
if not isinstance(stmt, pr.Statement):
if stmt is None:
yield None, None
argument_list.append((None, []))
# TODO generate warning?
continue
old = stmt
# generate a statement if it's not already one.
stmt = helpers.FakeStatement([old])
# *args
expression_list = stmt.expression_list()
if not len(expression_list):
continue
# *args
if expression_list[0] == '*':
# *args must be some sort of an array, otherwise -> ignore
for array in evaluator.eval_expression_list(expression_list[1:]):
if isinstance(array, iterable.Array):
for field_stmt in array: # yield from plz!
yield None, field_stmt
elif isinstance(array, iterable.Generator):
for field_stmt in array.iter_content():
yield None, helpers.FakeStatement([field_stmt])
arrays = evaluator.eval_expression_list(expression_list[1:])
iterators = [_iterate_star_args(evaluator, a, expression_list[1:], func)
for a in arrays]
for values in list(zip_longest(*iterators)):
argument_list.append((None, [v for v in values if v is not None]))
# **kwargs
elif expression_list[0] == '**':
dct = {}
for array in evaluator.eval_expression_list(expression_list[1:]):
if isinstance(array, iterable.Array):
for key_stmt, value_stmt in array.items():
# first index, is the key if syntactically correct
call = key_stmt.expression_list()[0]
if isinstance(call, pr.Name):
yield call, value_stmt
elif isinstance(call, pr.Call):
yield call.name, value_stmt
# Merge multiple kwargs dictionaries, if used with dynamic
# parameters.
s = _star_star_dict(evaluator, array, expression_list[1:], func)
for name, (key, value) in s.items():
try:
dct[name][1].add(value)
except KeyError:
dct[name] = key, set([value])
for key, values in dct.values():
# merge **kwargs/*args also for dynamic parameters
for i, p in enumerate(func.params):
if str(p.get_name()) == str(key) and not p.stars:
try:
k, vs = argument_list[i]
except IndexError:
pass
else:
if k is None: # k would imply a named argument
# Don't merge if they orginate at the same
# place. -> type-error-multiple-values
if [v.parent for v in values] != [v.parent for v in vs]:
vs.extend(values)
break
else:
# default is to merge
argument_list.append((key, values))
# Normal arguments (including key arguments).
else:
if stmt.assignment_details:
key_arr, op = stmt.assignment_details[0]
# Filter error tokens
key_arr = [x for x in key_arr if isinstance(x, pr.Call)]
# named parameter
if key_arr and isinstance(key_arr[0], pr.Call):
yield key_arr[0].name, stmt
argument_list.append((key_arr[0].name, [stmt]))
else:
yield None, stmt
argument_list.append((None, [stmt]))
return argument_list
def _reorder_var_args(var_args):
"""
Reordering var_args is necessary, because star args sometimes appear after
named argument, but in the actual order it's prepended.
"""
named_index = None
new_args = []
for i, stmt in enumerate(var_args):
if isinstance(stmt, pr.Statement):
if named_index is None and stmt.assignment_details:
named_index = i
if named_index is not None:
expression_list = stmt.expression_list()
if expression_list and expression_list[0] == '*':
new_args.insert(named_index, stmt)
named_index += 1
continue
new_args.append(stmt)
return new_args
def _iterate_star_args(evaluator, array, expression_list, func):
from jedi.evaluate.representation import Instance
if isinstance(array, iterable.Array):
for field_stmt in array: # yield from plz!
yield field_stmt
elif isinstance(array, iterable.Generator):
for field_stmt in array.iter_content():
yield helpers.FakeStatement([field_stmt])
elif isinstance(array, Instance) and array.name == 'tuple':
pass
else:
if expression_list:
m = "TypeError: %s() argument after * must be a sequence, not %s" \
% (func.name, array)
analysis.add(evaluator, 'type-error-star',
expression_list[0], message=m)
def _star_star_dict(evaluator, array, expression_list, func):
dct = {}
from jedi.evaluate.representation import Instance
if isinstance(array, Instance) and array.name == 'dict':
# For now ignore this case. In the future add proper iterators and just
# make one call without crazy isinstance checks.
return {}
if isinstance(array, iterable.Array) and array.type == pr.Array.DICT:
for key_stmt, value_stmt in array.items():
# first index, is the key if syntactically correct
call = key_stmt.expression_list()[0]
if isinstance(call, pr.Name):
key = call
elif isinstance(call, pr.Call):
key = call.name
else:
continue # We ignore complicated statements here, for now.
# If the string is a duplicate, we don't care it's illegal Python
# anyway.
dct[str(key)] = key, value_stmt
else:
if expression_list:
m = "TypeError: %s argument after ** must be a mapping, not %s" \
% (func.name, array)
analysis.add(evaluator, 'type-error-star-star',
expression_list[0], message=m)
return dct
def _gen_param_name_copy(func, var_args, param, keys=(), values=(), array_type=None):
"""
Create a param with the original scope (of varargs) as parent.
"""
if isinstance(var_args, pr.Array):
parent = var_args.parent
start_pos = var_args.start_pos
else:
parent = func
start_pos = 0, 0
new_param = ExecutedParam.from_param(param, parent, var_args)
# create an Array (-> needed for *args/**kwargs tuples/dicts)
arr = pr.Array(helpers.FakeSubModule, start_pos, array_type, parent)
arr.values = list(values) # Arrays only work with list.
key_stmts = []
for key in keys:
key_stmts.append(helpers.FakeStatement([key], start_pos))
arr.keys = key_stmts
arr.type = array_type
new_param.set_expression_list([arr])
name = copy.copy(param.get_name())
name.parent = new_param
return name
def _error_argument_count(func, actual_count):
default_arguments = sum(1 for p in func.params if p.assignment_details or p.stars)
if default_arguments == 0:
before = 'exactly '
else:
before = 'from %s to ' % (len(func.params) - default_arguments)
return ('TypeError: %s() takes %s%s arguments (%s given).'
% (func.name, before, len(func.params), actual_count))

View File

@@ -7,6 +7,7 @@ from jedi.parser import representation as pr
from jedi import debug
from jedi.common import PushBackIterator
from jedi.evaluate.compiled import CompiledObject, create, builtin
from jedi.evaluate import analysis
class PythonGrammar(object):
@@ -70,7 +71,7 @@ class Precedence(object):
which = which.value
return which
return (process(self.left), self.operator, process(self.right))
return (process(self.left), self.operator.string, process(self.right))
def __repr__(self):
return '(%s %s %s)' % (self.left, self.operator, self.right)
@@ -107,10 +108,27 @@ def _get_number(iterator, priority=PythonGrammar.LOWEST_PRIORITY):
_syntax_error(el)
return _get_number(iterator, priority)
return Precedence(None, el, right)
elif isinstance(el, pr.tokenize.Token):
return _get_number(iterator, priority)
else:
return el
class MergedOperator(pr.Operator):
"""
A way to merge the two operators `is not` and `not int`, which are two
words instead of one.
Maybe there's a better way (directly in the tokenizer/parser? but for now
this is fine.)
"""
def __init__(self, first, second):
string = first.string + ' ' + second.string
super(MergedOperator, self).__init__(first._sub_module, string,
first.parent, first.start_pos)
self.first = first
self.second = second
def _check_operator(iterator, priority=PythonGrammar.LOWEST_PRIORITY):
try:
left = _get_number(iterator, priority)
@@ -137,14 +155,14 @@ def _check_operator(iterator, priority=PythonGrammar.LOWEST_PRIORITY):
match = check[match_index]
if isinstance(match, PythonGrammar.MultiPart):
next_tok = next(iterator)
if next_tok != match.second:
if next_tok == match.second:
el = MergedOperator(el, next_tok)
else:
iterator.push_back(next_tok)
if el == 'is': # `is not` special case
match = 'is'
else:
if el == 'not':
continue
operator = match
operator = el
break
if operator is None:
@@ -168,9 +186,9 @@ def _check_operator(iterator, priority=PythonGrammar.LOWEST_PRIORITY):
_syntax_error(iterator.current, 'SyntaxError operand missing')
else:
if operator in PythonGrammar.TERNARY:
left = TernaryPrecedence(left, str(operator), right, middle)
left = TernaryPrecedence(left, operator, right, middle)
else:
left = Precedence(left, str(operator), right)
left = Precedence(left, operator, right)
return left
@@ -232,16 +250,48 @@ def is_literal(obj):
return _is_number(obj) or _is_string(obj)
def _is_tuple(obj):
from jedi.evaluate import iterable
return isinstance(obj, iterable.Array) and obj.type == pr.Array.TUPLE
def _is_list(obj):
from jedi.evaluate import iterable
return isinstance(obj, iterable.Array) and obj.type == pr.Array.LIST
def _element_calculate(evaluator, left, operator, right):
from jedi.evaluate import iterable, representation as er
l_is_num = _is_number(left)
r_is_num = _is_number(right)
if operator == '*':
# for iterables, ignore * operations
from jedi.evaluate import iterable
if isinstance(left, iterable.Array) or _is_string(left):
return [left]
elif isinstance(right, iterable.Array) or _is_string(right):
return [right]
elif operator == '+':
if _is_number(left) and _is_number(right) or _is_string(left) and _is_string(right):
if l_is_num and r_is_num or _is_string(left) and _is_string(right):
return [create(evaluator, left.obj + right.obj)]
elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right):
return [iterable.MergedArray(evaluator, (left, right))]
elif operator == '-':
if _is_number(left) and _is_number(right):
if l_is_num and r_is_num:
return [create(evaluator, left.obj - right.obj)]
elif operator == '%':
# With strings and numbers the left type typically remains. Except for
# `int() % float()`.
return [left]
def check(obj):
"""Checks if a Jedi object is either a float or an int."""
return isinstance(obj, er.Instance) and obj.name in ('int', 'float')
# Static analysis, one is a number, the other one is not.
if operator in ('+', '-') and l_is_num != r_is_num \
and not (check(left) or check(right)):
message = "TypeError: unsupported operand type(s) for +: %s and %s"
analysis.add(evaluator, 'type-error-operation', operator,
message % (left, right))
return [left, right]

View File

@@ -39,7 +39,7 @@ class RecursionDetector(object):
def push_stmt(self, stmt):
self.current = _RecursionNode(stmt, self.current)
check = self._check_recursion()
if check: # TODO remove False!!!!
if check:
debug.warning('catched stmt recursion: %s against %s @%s', stmt,
check.stmt, stmt.start_pos)
self.pop_stmt()
@@ -88,7 +88,10 @@ class _RecursionNode(object):
if not other:
return None
is_list_comp = lambda x: isinstance(x, pr.ForFlow) and x.is_list_comp
# List Comprehensions start on the same line as its statement.
# Therefore we have the unfortunate situation of the same start_pos for
# two statements.
is_list_comp = lambda x: isinstance(x, pr.ListComprehension)
return self.script == other.script \
and self.position == other.position \
and not is_list_comp(self.stmt.parent) \

View File

@@ -11,6 +11,8 @@ So, why is there also a ``Class`` class here? Well, there are decorators and
they change classes in Python 3.
"""
import copy
import os
import pkgutil
from jedi._compatibility import use_metaclass, unicode
from jedi.parser import representation as pr
@@ -137,19 +139,7 @@ class Instance(use_metaclass(CachedMetaClass, Executable)):
args = [obj, obj.base] if isinstance(obj, Instance) else [None, obj]
return self.execute_subscope_by_name('__get__', args)
@memoize_default([])
def get_defined_names(self):
"""
Get the instance vars of a class. This includes the vars of all
classes
"""
names = self.get_self_attributes()
for var in self.base.instance_names():
names.append(InstanceElement(self._evaluator, self, var, True))
return names
def scope_generator(self):
def scope_names_generator(self, position=None):
"""
An Instance has two scopes: The scope with self names and the class
scope. Instance variables have priority over the class scope.
@@ -168,15 +158,18 @@ class Instance(use_metaclass(CachedMetaClass, Executable)):
except KeyError:
return False
def get_index_types(self, indexes=[]):
def get_index_types(self, index_array):
indexes = iterable.create_indexes_or_slices(self._evaluator, index_array)
if any([isinstance(i, iterable.Slice) for i in indexes]):
# Slice support in Jedi is very marginal, at the moment, so just
# ignore them in case of __getitem__.
# TODO support slices in a more general way.
indexes = []
index = helpers.FakeStatement(indexes, parent=compiled.builtin)
try:
return self.execute_subscope_by_name('__getitem__', indexes)
return self.execute_subscope_by_name('__getitem__', [index])
except KeyError:
debug.warning('No __getitem__, cannot access the array.')
return []
@@ -231,13 +224,18 @@ class InstanceElement(use_metaclass(CachedMetaClass, pr.Base)):
def expression_list(self):
# Copy and modify the array.
return [InstanceElement(self.instance._evaluator, self.instance, command, self.is_class_var)
return [InstanceElement(self._evaluator, self.instance, command, self.is_class_var)
if not isinstance(command, (pr.Operator, Token)) else command
for command in self.var.expression_list()]
def __iter__(self):
for el in self.var.__iter__():
yield InstanceElement(self.instance._evaluator, self.instance, el, self.is_class_var)
yield InstanceElement(self.instance._evaluator, self.instance, el,
self.is_class_var)
def __getitem__(self, index):
return InstanceElement(self._evaluator, self.instance, self.var[index],
self.is_class_var)
def __getattr__(self, name):
return getattr(self.var, name)
@@ -293,25 +291,21 @@ class Class(use_metaclass(CachedMetaClass, pr.IsScope)):
# TODO mro!
for cls in self.get_super_classes():
# Get the inherited names.
if isinstance(cls, compiled.CompiledObject):
super_result += cls.get_defined_names()
else:
for i in cls.instance_names():
if not in_iterable(i, result):
super_result.append(i)
for i in cls.instance_names():
if not in_iterable(i, result):
super_result.append(i)
result += super_result
return result
@memoize_default(default=())
def get_defined_names(self):
result = self.instance_names()
type_cls = self._evaluator.find_types(compiled.builtin, 'type')[0]
return result + list(type_cls.get_defined_names())
def scope_names_generator(self, position=None):
yield self, self.instance_names()
yield self, compiled.type_names
def get_subscope_by_name(self, name):
for sub in reversed(self.subscopes):
if sub.name.get_code() == name:
return sub
for s in [self] + self.get_super_classes():
for sub in reversed(s.subscopes):
if sub.name.get_code() == name:
return sub
raise KeyError("Couldn't find subscope.")
def is_callable(self):
@@ -405,10 +399,10 @@ class Function(use_metaclass(CachedMetaClass, pr.IsScope)):
return getattr(self.base_func, name)
def __repr__(self):
decorated_func = self._decorated_func()
dec_func = self._decorated_func()
dec = ''
if decorated_func is not None and decorated_func != self:
dec = " is " + repr(decorated_func)
if not self.is_decorated and self.base_func.decorators:
dec = " is " + repr(dec_func)
return "<e%s of %s%s>" % (type(self).__name__, self.base_func, dec)
@@ -428,10 +422,16 @@ class FunctionExecution(Executable):
# Feed the listeners, with the params.
for listener in func.listeners:
listener.execute(self._get_params())
if func.listeners:
# If we do have listeners, that means that there's not a regular
# execution ongoing. In this case Jedi is interested in the
# inserted params, not in the actual execution of the function.
return []
if func.is_generator and not evaluate_generator:
return [iterable.Generator(self._evaluator, func, self.var_args)]
else:
stmts = docstrings.find_return_types(self._evaluator, func)
stmts = list(docstrings.find_return_types(self._evaluator, func))
for r in self.returns:
if r is not None:
stmts += self._evaluator.eval_statement(r)
@@ -454,6 +454,10 @@ class FunctionExecution(Executable):
"""
return self._get_params() + pr.Scope.get_defined_names(self)
def scope_names_generator(self, position=None):
names = pr.filter_after_position(pr.Scope.get_defined_names(self), position)
yield self, self._get_params() + names
def _copy_properties(self, prop):
"""
Literally copies a property of a Function. Copying is very expensive,
@@ -519,3 +523,47 @@ class FunctionExecution(Executable):
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.base)
class ModuleWrapper(use_metaclass(CachedMetaClass, pr.Module)):
def __init__(self, evaluator, module):
self._evaluator = evaluator
self._module = module
def scope_names_generator(self, position=None):
yield self, pr.filter_after_position(self._module.get_defined_names(), position)
yield self, self._module_attributes()
sub_modules = self._sub_modules()
if sub_modules:
yield self, self._sub_modules()
@memoize_default()
def _module_attributes(self):
names = ['__file__', '__package__', '__doc__', '__name__', '__version__']
# All the additional module attributes are strings.
parent = Instance(self._evaluator, compiled.create(self._evaluator, str))
return [helpers.FakeName(n, parent) for n in names]
@memoize_default()
def _sub_modules(self):
"""
Lists modules in the directory of this module (if this module is a
package).
"""
path = self._module.path
names = []
if path is not None and path.endswith(os.path.sep + '__init__.py'):
mods = pkgutil.iter_modules([os.path.dirname(path)])
for module_loader, name, is_pkg in mods:
name = helpers.FakeName(name)
# It's obviously a relative import to the current module.
imp = helpers.FakeImport(name, self, level=1)
name.parent = imp
names.append(name)
return names
def __getattr__(self, name):
return getattr(self._module, name)
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, self._module)

View File

@@ -22,11 +22,17 @@ def execute(evaluator, obj, params):
pass
else:
if obj.parent == compiled.builtin:
# for now we just support builtin functions.
try:
return _implemented['builtins'][obj_name](evaluator, obj, params)
except KeyError:
pass
module_name = 'builtins'
elif isinstance(obj.parent, pr.Module):
module_name = str(obj.parent.name)
else:
module_name = ''
# for now we just support builtin functions.
try:
return _implemented[module_name][obj_name](evaluator, obj, params)
except KeyError:
pass
raise NotInStdLib()
@@ -86,7 +92,7 @@ def builtins_super(evaluator, obj, params):
def builtins_reversed(evaluator, obj, params):
objects = _follow_param(evaluator, params, 0)
objects = tuple(_follow_param(evaluator, params, 0))
if objects:
# unpack the iterator values
objects = tuple(iterable.get_iterator_types(objects))
@@ -97,15 +103,29 @@ def builtins_reversed(evaluator, obj, params):
# would fail in certain cases like `reversed(x).__iter__` if we
# just returned the result directly.
stmts = [FakeStatement([r]) for r in rev]
objects = (FakeArray(stmts, objects[0].parent),)
objects = (iterable.Array(evaluator, FakeArray(stmts, objects[0].parent)),)
return [er.Instance(evaluator, obj, objects)]
def _return_first_param(evaluator, obj, params):
if len(params) == 1:
return _follow_param(evaluator, params, 0)
return []
_implemented = {
'builtins': {
'getattr': builtins_getattr,
'type': builtins_type,
'super': builtins_super,
'reversed': builtins_reversed,
}
},
'copy': {
'copy': _return_first_param,
'deepcopy': _return_first_param,
},
'json': {
'load': lambda *args: [],
'loads': lambda *args: [],
},
}

View File

@@ -3,6 +3,8 @@ import sys
from jedi._compatibility import exec_function, unicode
from jedi.parser import representation as pr
from jedi.parser import Parser
from jedi.evaluate.cache import memoize_default
from jedi import debug
from jedi import common
@@ -14,77 +16,109 @@ def get_sys_path():
if not venv:
return
venv = os.path.abspath(venv)
p = os.path.join(
venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages')
sys_path.insert(0, p)
if os.name == 'nt':
p = os.path.join(venv, 'lib', 'site-packages')
else:
p = os.path.join(venv, 'lib', 'python%d.%d' % sys.version_info[:2],
'site-packages')
if p not in sys_path:
sys_path.insert(0, p)
check_virtual_env(sys.path)
return [p for p in sys.path if p != ""]
#@cache.memoize_default([]) TODO add some sort of cache again.
def sys_path_with_modifications(module):
def execute_code(code):
c = "import os; from os.path import *; result=%s"
variables = {'__file__': module.path}
try:
exec_function(c % code, variables)
except Exception:
debug.warning('sys.path manipulation detected, but failed to evaluate.')
return None
try:
res = variables['result']
if isinstance(res, str):
return os.path.abspath(res)
else:
return None
except KeyError:
def _execute_code(module_path, code):
c = "import os; from os.path import *; result=%s"
variables = {'__file__': module_path}
try:
exec_function(c % code, variables)
except Exception:
debug.warning('sys.path manipulation detected, but failed to evaluate.')
return None
try:
res = variables['result']
if isinstance(res, str):
return os.path.abspath(res)
else:
return None
except KeyError:
return None
def check_module(module):
try:
possible_stmts = module.used_names['path']
except KeyError:
return get_sys_path()
sys_path = list(get_sys_path()) # copy
for p in possible_stmts:
if not isinstance(p, pr.Statement):
continue
expression_list = p.expression_list()
# sys.path command is just one thing.
if len(expression_list) != 1 or not isinstance(expression_list[0], pr.Call):
continue
call = expression_list[0]
n = call.name
if not isinstance(n, pr.Name) or len(n.names) != 3:
continue
if [unicode(x) for x in n.names[:2]] != ['sys', 'path']:
continue
array_cmd = unicode(n.names[2])
if call.execution is None:
continue
exe = call.execution
if not (array_cmd == 'insert' and len(exe) == 2
or array_cmd == 'append' and len(exe) == 1):
continue
def _paths_from_assignment(statement):
"""
extracts the assigned strings from an assignment that looks as follows::
if array_cmd == 'insert':
exe_type, exe.type = exe.type, pr.Array.NOARRAY
exe_pop = exe.values.pop(0)
res = execute_code(exe.get_code())
if res is not None:
sys_path.insert(0, res)
debug.dbg('sys path inserted: %s', res)
exe.type = exe_type
exe.values.insert(0, exe_pop)
elif array_cmd == 'append':
res = execute_code(exe.get_code())
if res is not None:
sys_path.append(res)
debug.dbg('sys path added: %s', res)
return sys_path
>>> sys.path[0:0] = ['module/path', 'another/module/path']
"""
names = statement.get_defined_names()
if len(names) != 1:
return []
if [unicode(x) for x in names[0].names] != ['sys', 'path']:
return []
expressions = statement.expression_list()
if len(expressions) != 1 or not isinstance(expressions[0], pr.Array):
return
stmts = (s for s in expressions[0].values if isinstance(s, pr.Statement))
expression_lists = (s.expression_list() for s in stmts)
return [e.value for exprs in expression_lists for e in exprs
if isinstance(e, pr.Literal) and e.value]
def _paths_from_insert(module_path, exe):
""" extract the inserted module path from an "sys.path.insert" statement
"""
exe_type, exe.type = exe.type, pr.Array.NOARRAY
exe_pop = exe.values.pop(0)
res = _execute_code(module_path, exe.get_code())
exe.type = exe_type
exe.values.insert(0, exe_pop)
return res
def _paths_from_call_expression(module_path, call):
""" extract the path from either "sys.path.append" or "sys.path.insert" """
if call.execution is None:
return
n = call.name
if not isinstance(n, pr.Name) or len(n.names) != 3:
return
names = [unicode(x) for x in n.names]
if names[:2] != ['sys', 'path']:
return
cmd = names[2]
exe = call.execution
if cmd == 'insert' and len(exe) == 2:
path = _paths_from_insert(module_path, exe)
elif cmd == 'append' and len(exe) == 1:
path = _execute_code(module_path, exe.get_code())
return path and [path] or []
def _check_module(module):
try:
possible_stmts = module.used_names['path']
except KeyError:
return get_sys_path()
sys_path = list(get_sys_path()) # copy
statements = (p for p in possible_stmts if isinstance(p, pr.Statement))
for stmt in statements:
expressions = stmt.expression_list()
if len(expressions) == 1 and isinstance(expressions[0], pr.Call):
sys_path.extend(
_paths_from_call_expression(module.path, expressions[0]) or [])
elif (
hasattr(stmt, 'assignment_details') and
len(stmt.assignment_details) == 1
):
sys_path.extend(_paths_from_assignment(stmt) or [])
return sys_path
@memoize_default(evaluator_is_first_arg=True)
def sys_path_with_modifications(evaluator, module):
if module.path is None:
# Support for modules without a path is bad, therefore return the
# normal path.
@@ -94,27 +128,82 @@ def sys_path_with_modifications(module):
with common.ignored(OSError):
os.chdir(os.path.dirname(module.path))
result = check_module(module)
result = _check_module(module)
result += _detect_django_path(module.path)
# buildout scripts often contain the same sys.path modifications
# the set here is used to avoid duplicate sys.path entries
buildout_paths = set()
for module_path in _get_buildout_scripts(module.path):
try:
with open(module_path, 'rb') as f:
source = f.read()
except IOError:
pass
else:
p = Parser(common.source_to_unicode(source), module_path)
for path in _check_module(p.module):
if path not in buildout_paths:
buildout_paths.add(path)
result.append(path)
# cleanup, back to old directory
os.chdir(curdir)
return result
return list(result)
def _traverse_parents(path):
while True:
new = os.path.dirname(path)
if new == path:
return
path = new
yield path
def _get_parent_dir_with_file(path, filename):
for parent in _traverse_parents(path):
if os.path.isfile(os.path.join(parent, filename)):
return parent
return None
def _detect_django_path(module_path):
""" Detects the path of the very well known Django library (if used) """
result = []
while True:
new = os.path.dirname(module_path)
# If the module_path doesn't change anymore, we're finished -> /
if new == module_path:
break
else:
module_path = new
for parent in _traverse_parents(module_path):
with common.ignored(IOError):
with open(module_path + os.path.sep + 'manage.py'):
with open(parent + os.path.sep + 'manage.py'):
debug.dbg('Found django path: %s', module_path)
result.append(module_path)
result.append(parent)
return result
def _get_buildout_scripts(module_path):
"""
if there is a 'buildout.cfg' file in one of the parent directories of the
given module it will return a list of all files in the buildout bin
directory that look like python files.
:param module_path: absolute path to the module.
:type module_path: str
"""
project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg')
if not project_root:
return []
bin_path = os.path.join(project_root, 'bin')
if not os.path.exists(bin_path):
return []
extra_module_paths = []
for filename in os.listdir(bin_path):
try:
filepath = os.path.join(bin_path, filename)
with open(filepath, 'r') as f:
firstline = f.readline()
if firstline.startswith('#!') and 'python' in firstline:
extra_module_paths.append(filepath)
except IOError as e:
# either permission error or race cond. because file got deleted
# ignore
debug.warning(unicode(e))
continue
return extra_module_paths

View File

@@ -76,11 +76,16 @@ class Parser(object):
d.parent = self.module
self.module.end_pos = self._gen.current.end_pos
if self._gen.current.type in (tokenize.NEWLINE,):
if self._gen.current.type == tokenize.NEWLINE:
# This case is only relevant with the FastTokenizer, because
# otherwise there's always an EndMarker.
# otherwise there's always an ENDMARKER.
# we added a newline before, so we need to "remove" it again.
self.module.end_pos = self._gen.tokenizer_previous.end_pos
#
# NOTE: It should be keep end_pos as-is if the last token of
# a source is a NEWLINE, otherwise the newline at the end of
# a source is not included in a ParserNode.code.
if self._gen.previous.type != tokenize.NEWLINE:
self.module.end_pos = self._gen.previous.end_pos
del self._gen
@@ -320,7 +325,9 @@ class Parser(object):
# print 'parse_stmt', tok, tokenize.tok_name[token_type]
is_kw = tok.string in OPERATOR_KEYWORDS
if tok.type == tokenize.OP or is_kw:
tok_list.append(pr.Operator(tok.string, tok.start_pos))
tok_list.append(
pr.Operator(self.module, tok.string, self._scope, tok.start_pos)
)
else:
tok_list.append(tok)
@@ -557,6 +564,8 @@ class Parser(object):
if stmt is not None:
stmt.parent = use_as_parent_scope
try:
func.statements.append(pr.KeywordStatement(tok_str, s,
use_as_parent_scope, stmt))
func.returns.append(stmt)
# start_pos is the one of the return statement
stmt.start_pos = s
@@ -566,6 +575,7 @@ class Parser(object):
stmt, tok = self._parse_statement()
if stmt is not None:
stmt.parent = use_as_parent_scope
self._scope.statements.append(stmt)
self._scope.asserts.append(stmt)
elif tok_str in STATEMENT_KEYWORDS:
stmt, _ = self._parse_statement()
@@ -620,17 +630,10 @@ class PushBackTokenizer(object):
if self._push_backs:
return self._push_backs.pop(0)
self.previous = self.current
previous = self.current
self.current = next(self._tokenizer)
self.previous = previous
return self.current
def __iter__(self):
return self
@property
def tokenizer_previous(self):
"""
Temporary hack, basically returns the last previous if the fast parser
sees an EndMarker. The fast parser positions have to be changed anyway.
"""
return self._tokenizer.previous

View File

@@ -16,7 +16,7 @@ from jedi.parser.tokenize import (source_tokens, Token, FLOWS, NEWLINE,
COMMENT, ENDMARKER)
class Module(pr.Simple, pr.Module):
class Module(pr.Module, pr.Simple):
def __init__(self, parsers):
super(Module, self).__init__(self, (1, 0))
self.parsers = parsers
@@ -183,6 +183,9 @@ class ParserNode(object):
class FastParser(use_metaclass(CachedFastParser)):
_keyword_re = re.compile('^[ \t]*(def|class|@|%s)' % '|'.join(tokenize.FLOWS))
def __init__(self, code, module_path=None):
# set values like `pr.Module`.
self.module_path = module_path
@@ -196,7 +199,7 @@ class FastParser(use_metaclass(CachedFastParser)):
self._parse(code)
except:
# FastParser is cached, be careful with exceptions
self.parsers[:] = []
del self.parsers[:]
raise
def update(self, code):
@@ -206,7 +209,7 @@ class FastParser(use_metaclass(CachedFastParser)):
self._parse(code)
except:
# FastParser is cached, be careful with exceptions
self.parsers[:] = []
del self.parsers[:]
raise
def _split_parts(self, code):
@@ -215,34 +218,26 @@ class FastParser(use_metaclass(CachedFastParser)):
each part seperately and therefore cache parts of the file and not
everything.
"""
def add_part():
txt = '\n'.join(current_lines)
if txt:
if add_to_last and parts:
parts[-1] += '\n' + txt
else:
parts.append(txt)
current_lines[:] = []
r_keyword = '^[ \t]*(def|class|@|%s)' % '|'.join(tokenize.FLOWS)
def gen_part():
text = '\n'.join(current_lines)
del current_lines[:]
return text
# Split only new lines. Distinction between \r\n is the tokenizer's
# job.
self._lines = code.split('\n')
current_lines = []
parts = []
is_decorator = False
current_indent = 0
old_indent = 0
new_indent = False
in_flow = False
add_to_last = False
# All things within flows are simply being ignored.
for i, l in enumerate(self._lines):
for l in self._lines:
# check for dedents
m = re.match('^([\t ]*)(.?)', l)
indent = len(m.group(1))
if m.group(2) in ['', '#']:
s = l.lstrip('\t ')
indent = len(l) - len(s)
if not s or s[0] in ('#', '\r'):
current_lines.append(l) # just ignore comments and blank lines
continue
@@ -250,8 +245,8 @@ class FastParser(use_metaclass(CachedFastParser)):
current_indent = indent
new_indent = False
if not in_flow or indent < old_indent:
add_part()
add_to_last = False
if current_lines:
yield gen_part()
in_flow = False
elif new_indent:
current_indent = indent
@@ -259,12 +254,12 @@ class FastParser(use_metaclass(CachedFastParser)):
# Check lines for functions/classes and split the code there.
if not in_flow:
m = re.match(r_keyword, l)
m = self._keyword_re.match(l)
if m:
in_flow = m.group(1) in tokenize.FLOWS
if not is_decorator and not in_flow:
add_part()
add_to_last = False
if current_lines:
yield gen_part()
is_decorator = '@' == m.group(1)
if not is_decorator:
old_indent = current_indent
@@ -272,12 +267,10 @@ class FastParser(use_metaclass(CachedFastParser)):
new_indent = True
elif is_decorator:
is_decorator = False
add_to_last = True
current_lines.append(l)
add_part()
return parts
if current_lines:
yield gen_part()
def _parse(self, code):
""" :type code: str """
@@ -285,24 +278,20 @@ class FastParser(use_metaclass(CachedFastParser)):
new, temp = self._get_parser(unicode(''), unicode(''), 0, [], False)
return new
parts = self._split_parts(code)
self.parsers[:] = []
del self.parsers[:]
line_offset = 0
start = 0
p = None
is_first = True
for code_part in parts:
lines = code_part.count('\n') + 1
for code_part in self._split_parts(code):
if is_first or line_offset >= p.module.end_pos[0]:
indent = len(re.match(r'[ \t]*', code_part).group(0))
indent = len(code_part) - len(code_part.lstrip('\t '))
if is_first and self.current_node is not None:
nodes = [self.current_node]
else:
nodes = []
if self.current_node is not None:
self.current_node = \
self.current_node.parent_until_indent(indent)
nodes += self.current_node.old_children
@@ -347,7 +336,7 @@ class FastParser(use_metaclass(CachedFastParser)):
#else:
#print '#'*45, line_offset, p.module.end_pos, 'theheck\n', repr(code_part)
line_offset += lines
line_offset += code_part.count('\n') + 1
start += len(code_part) + 1 # +1 for newline
if self.parsers:
@@ -358,29 +347,26 @@ class FastParser(use_metaclass(CachedFastParser)):
self.module.end_pos = self.parsers[-1].module.end_pos
# print(self.parsers[0].module.get_code())
del code
def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr):
h = hash(code)
hashes = [n.hash for n in nodes]
node = None
try:
index = hashes.index(h)
if nodes[index].code != code:
raise ValueError()
except ValueError:
for index, node in enumerate(nodes):
if node.hash != h or node.code != code:
continue
if node != self.current_node:
offset = int(nodes[0] == self.current_node)
self.current_node.old_children.pop(index - offset)
p = node.parser
m = p.module
m.line_offset += line_offset + 1 - m.start_pos[0]
break
else:
tokenizer = FastTokenizer(parser_code, line_offset)
p = Parser(parser_code, self.module_path, tokenizer=tokenizer,
top_module=self.module, no_docstr=no_docstr)
p.module.parent = self.module
else:
if nodes[index] != self.current_node:
offset = int(nodes[0] == self.current_node)
self.current_node.old_children.pop(index - offset)
node = nodes.pop(index)
p = node.parser
m = p.module
m.line_offset += line_offset + 1 - m.start_pos[0]
node = None
return p, node
@@ -406,6 +392,7 @@ class FastTokenizer(object):
self.parser_indent = self.old_parser_indent = 0
self.is_decorator = False
self.first_stmt = True
self.parentheses_level = 0
def next(self):
""" Python 2 Compatibility """
@@ -433,15 +420,20 @@ class FastTokenizer(object):
self.closed = True
raise common.MultiLevelStopIteration()
# ignore comments/ newlines
if self.previous.type in (None, NEWLINE) and tok_type not in (COMMENT, NEWLINE):
# Ignore comments/newlines, irrelevant for indentation.
if self.previous.type in (None, NEWLINE) \
and tok_type not in (COMMENT, NEWLINE):
# print c, tok_name[c[0]]
indent = current.start_pos[1]
if indent < self.parser_indent: # -> dedent
if self.parentheses_level:
# parentheses ignore the indentation rules.
pass
elif indent < self.parser_indent: # -> dedent
self.parser_indent = indent
self.new_indent = False
if not self.in_flow or indent < self.old_parser_indent:
close()
self.in_flow = False
elif self.new_indent:
self.parser_indent = indent
@@ -452,6 +444,7 @@ class FastTokenizer(object):
self.in_flow = tok_str in FLOWS
if not self.is_decorator and not self.in_flow:
close()
self.is_decorator = '@' == tok_str
if not self.is_decorator:
self.old_parser_indent = self.parser_indent
@@ -462,4 +455,12 @@ class FastTokenizer(object):
if self.first_stmt and not self.new_indent:
self.parser_indent = indent
self.first_stmt = False
# Ignore closing parentheses, because they are all
# irrelevant for the indentation.
if tok_str in '([{':
self.parentheses_level += 1
elif tok_str in ')]}':
self.parentheses_level = max(self.parentheses_level - 1, 0)
return current

View File

@@ -49,6 +49,21 @@ from jedi.parser import tokenize
SCOPE_CONTENTS = 'asserts', 'subscopes', 'imports', 'statements', 'returns'
def filter_after_position(names, position):
"""
Removes all names after a certain position. If position is None, just
returns the names list.
"""
if position is None:
return names
names_new = []
for n in names:
if n.start_pos[0] is not None and n.start_pos < position:
names_new.append(n)
return names_new
class GetCodeState(object):
"""A helper class for passing the state of get_code in a thread-safe
manner."""
@@ -70,7 +85,13 @@ class DocstringMixin(object):
""" Returns a cleaned version of the docstring token. """
try:
# Returns a literal cleaned version of the ``Token``.
return unicode(cleandoc(literal_eval(self._doc_token.string)))
cleaned = cleandoc(literal_eval(self._doc_token.string))
# Since we want the docstr output to be always unicode, just force
# it.
if is_py3 or isinstance(cleaned, unicode):
return cleaned
else:
return unicode(cleaned, 'UTF-8', 'replace')
except AttributeError:
return u('')
@@ -191,12 +212,18 @@ class Simple(Base):
return "<%s: %s@%s,%s>" % \
(type(self).__name__, code, self.start_pos[0], self.start_pos[1])
def is_scope(self):
return False
class IsScope(Base):
__slots__ = ()
def is_scope(self):
return True
class Scope(Simple, IsScope, DocstringMixin):
class Scope(IsScope, Simple, DocstringMixin):
"""
Super class for the parser tree, which represents the state of a python
text file.
@@ -351,6 +378,18 @@ class Scope(Simple, IsScope, DocstringMixin):
return "<%s: %s@%s-%s>" % (type(self).__name__, name,
self.start_pos[0], self.end_pos[0])
def walk(self):
yield self
for s in self.subscopes:
for scope in s.walk():
yield scope
for r in self.statements:
while isinstance(r, Flow):
for scope in r.walk():
yield scope
r = r.next
class Module(IsScope):
"""
@@ -481,6 +520,9 @@ class Class(Scope):
sub.get_call_signature(funcname=self.name.names[-1]), docstr)
return docstr
def scope_names_generator(self, position=None):
yield self, filter_after_position(self.get_defined_names(), position)
class Function(Scope):
"""
@@ -527,6 +569,9 @@ class Function(Scope):
debug.warning("multiple names in param %s", n)
return n
def scope_names_generator(self, position=None):
yield self, filter_after_position(self.get_defined_names(), position)
def get_call_signature(self, width=72, funcname=None):
"""
Generate call signature of this function.
@@ -670,16 +715,19 @@ class Flow(Scope):
self.next.parent = self.parent
return next
def scope_names_generator(self, position=None):
# For `with` and `for`.
yield self, filter_after_position(self.get_defined_names(), position)
class ForFlow(Flow):
"""
Used for the for loop, because there are two statement parts.
"""
def __init__(self, module, inputs, start_pos, set_stmt, is_list_comp=False):
def __init__(self, module, inputs, start_pos, set_stmt):
super(ForFlow, self).__init__(module, 'for', inputs, start_pos)
self.set_stmt = set_stmt
self.is_list_comp = is_list_comp
if set_stmt is not None:
set_stmt.parent = self.use_as_parent
@@ -776,28 +824,44 @@ class Import(Simple):
n.append(self.alias)
return n
def is_nested(self):
"""
This checks for the special case of nested imports, without aliases and
from statement::
import foo.bar
"""
return not self.alias and not self.from_ns and self.namespace is not None \
and len(self.namespace.names) > 1
class KeywordStatement(Base):
"""
For the following statements: `assert`, `del`, `global`, `nonlocal`,
`raise`, `return`, `yield`, `pass`, `continue`, `break`, `return`, `yield`.
"""
__slots__ = ('name', 'start_pos', '_stmt', 'parent')
__slots__ = ('name', 'start_pos', 'stmt', 'parent')
def __init__(self, name, start_pos, parent, stmt=None):
self.name = name
self.start_pos = start_pos
self._stmt = stmt
self.stmt = stmt
self.parent = parent
if stmt is not None:
stmt.parent = self
def is_scope(self):
return False
def __repr__(self):
return "<%s(%s): %s>" % (type(self).__name__, self.name, self.stmt)
def get_code(self):
if self._stmt is None:
if self.stmt is None:
return "%s\n" % self.name
else:
return '%s %s\n' % (self.name, self._stmt)
return '%s %s\n' % (self.name, self.stmt)
def get_defined_names(self):
return []
@@ -805,7 +869,7 @@ class KeywordStatement(Base):
@property
def end_pos(self):
try:
return self._stmt.end_pos
return self.stmt.end_pos
except AttributeError:
return self.start_pos[0], self.start_pos[1] + len(self.name)
@@ -854,7 +918,7 @@ class Statement(Simple, DocstringMixin):
def get_code(self, new_line=True):
def assemble(command_list, assignment=None):
pieces = [c.get_code() if isinstance(c, Simple) else c.string if
isinstance(c, (tokenize.Token, Operator)) else unicode(c)
isinstance(c, tokenize.Token) else unicode(c)
for c in command_list]
if assignment is None:
return ''.join(pieces)
@@ -876,7 +940,7 @@ isinstance(c, (tokenize.Token, Operator)) else unicode(c)
def search_calls(calls):
for call in calls:
if isinstance(call, Array):
if isinstance(call, Array) and call.type != Array.DICT:
for stmt in call:
search_calls(stmt.expression_list())
elif isinstance(call, Call):
@@ -965,7 +1029,11 @@ isinstance(c, (tokenize.Token, Operator)) else unicode(c)
# always dictionaries and not sets.
arr.type = Array.DICT
arr.end_pos = (break_tok or stmt or old_stmt).end_pos
try:
arr.end_pos = (break_tok or stmt or old_stmt).end_pos
except UnboundLocalError:
# In case of something like `(def`
arr.end_pos = start_pos[0], start_pos[1] + 1
return arr, break_tok
def parse_stmt(token_iterator, maybe_dict=False, added_breaks=(),
@@ -984,18 +1052,19 @@ isinstance(c, (tokenize.Token, Operator)) else unicode(c)
if isinstance(tok, Base):
# the token is a Name, which has already been parsed
if isinstance(tok, ListComprehension):
# it's not possible to set it earlier
tok.parent = self
elif tok == 'lambda':
lambd, tok = parse_lambda(token_iterator)
if lambd is not None:
token_list.append(lambd)
elif tok == 'for':
list_comp, tok = parse_list_comp(token_iterator, token_list,
start_pos, tok.end_pos)
if list_comp is not None:
token_list = [list_comp]
if not level:
if isinstance(tok, ListComprehension):
# it's not possible to set it earlier
tok.parent = self
elif tok == 'lambda':
lambd, tok = parse_lambda(token_iterator)
if lambd is not None:
token_list.append(lambd)
elif tok == 'for':
list_comp, tok = parse_list_comp(token_iterator, token_list,
start_pos, tok.end_pos)
if list_comp is not None:
token_list = [list_comp]
if tok in closing_brackets:
level -= 1
@@ -1010,7 +1079,8 @@ isinstance(c, (tokenize.Token, Operator)) else unicode(c)
end_pos = end_pos[0], end_pos[1] - 1
break
token_list.append(tok)
if tok is not None: # Can be None, because of lambda/for.
token_list.append(tok)
if not token_list:
return None, tok
@@ -1070,7 +1140,7 @@ isinstance(c, (tokenize.Token, Operator)) else unicode(c)
debug.warning('list comprehension in @%s', start_pos)
return None, tok
return ListComprehension(st, middle, in_clause, self), tok
return ListComprehension(self._sub_module, st, middle, in_clause, self), tok
# initializations
result = []
@@ -1403,6 +1473,9 @@ class NamePart(object):
def get_parent_until(self, *args, **kwargs):
return self.parent.get_parent_until(*args, **kwargs)
def isinstance(self, *cls):
return isinstance(self, cls)
@property
def start_pos(self):
offset = self.parent._sub_module.line_offset
@@ -1453,22 +1526,31 @@ class Name(Simple):
return len(self.names)
class ListComprehension(Base):
class ListComprehension(ForFlow):
""" Helper class for list comprehensions """
def __init__(self, stmt, middle, input, parent):
def __init__(self, module, stmt, middle, input, parent):
self.input = input
nested_lc = input.expression_list()[0]
if isinstance(nested_lc, ListComprehension):
# is nested LC
input = nested_lc.stmt
nested_lc.parent = self
super(ListComprehension, self).__init__(module, [input],
stmt.start_pos, middle)
self.parent = parent
self.stmt = stmt
self.middle = middle
self.input = input
for s in stmt, middle, input:
for s in middle, input:
s.parent = self
self.parent = parent
# The stmt always refers to the most inner list comprehension.
stmt.parent = self._get_most_inner_lc()
def get_parent_until(self, *args, **kwargs):
return Simple.get_parent_until(self, *args, **kwargs)
@property
def start_pos(self):
return self.stmt.start_pos
def _get_most_inner_lc(self):
nested_lc = self.input.expression_list()[0]
if isinstance(nested_lc, ListComprehension):
return nested_lc._get_most_inner_lc()
return self
@property
def end_pos(self):
@@ -1483,26 +1565,21 @@ class ListComprehension(Base):
return "%s for %s in %s" % tuple(code)
class Operator(Base):
__slots__ = ('string', '_line', '_column')
class Operator(Simple):
__slots__ = ('string',)
def __init__(self, string, start_pos):
# TODO needs module param
def __init__(self, module, string, parent, start_pos):
end_pos = start_pos[0], start_pos[1] + len(string)
super(Operator, self).__init__(module, start_pos, end_pos)
self.string = string
self._line = start_pos[0]
self._column = start_pos[1]
self.parent = parent
def get_code(self):
return self.string
def __repr__(self):
return "<%s: `%s`>" % (type(self).__name__, self.string)
@property
def start_pos(self):
return self._line, self._column
@property
def end_pos(self):
return self._line, self._column + len(self.string)
def __eq__(self, other):
"""Make comparisons easy. Improves the readability of the parser."""
return self.string == other

View File

@@ -1,13 +1,13 @@
# -*- coding: utf-8 -*-
"""
This tokenizer has been copied from the ``tokenize.py`` standard library
tokenizer. The reason was simple: The standanrd library tokenizer fails
tokenizer. The reason was simple: The standard library tokenizer fails
if the indentation is not right. The fast parser of jedi however requires
"wrong" indentation.
Basically this is a stripped down version of the standard library module, so
you can read the documentation there. Additionally we included some speed and
memory optimizations, here.
memory optimizations here.
"""
from __future__ import absolute_import

View File

@@ -2,6 +2,7 @@ import re
import os
from jedi import cache
from jedi import common
from jedi.parser import tokenize
from jedi._compatibility import u
from jedi.parser.fast import FastParser
@@ -91,9 +92,12 @@ class UserContext(object):
elif tok_str == '.':
force_point = False
elif force_point:
# it is reversed, therefore a number is getting recognized
# as a floating point number
if tok_type == tokenize.NUMBER and tok_str[0] == '.':
# Reversed tokenizing, therefore a number is recognized as a
# floating point number.
# The same is true for string prefixes -> represented as a
# combination of string and name.
if tok_type == tokenize.NUMBER and tok_str[0] == '.' \
or tok_type == tokenize.NAME and last_type == tokenize.STRING:
force_point = False
else:
break
@@ -175,12 +179,7 @@ class UserContext(object):
def get_line(self, line_nr):
if not self._line_cache:
self._line_cache = self.source.splitlines()
if self.source:
if self.source[-1] == '\n':
self._line_cache.append(u(''))
else: # ''.splitlines() == []
self._line_cache = [u('')]
self._line_cache = common.splitlines(self.source)
if line_nr == 0:
# This is a fix for the zeroth line. We need a newline there, for

View File

@@ -83,7 +83,7 @@ def _rename(names, replace_str):
with open(current_path) as f:
source = f.read()
new_lines = common.source_to_unicode(source).splitlines()
new_lines = common.splitlines(common.source_to_unicode(source))
old_lines = new_lines[:]
nr, indent = name.line, name.column
@@ -101,7 +101,7 @@ def extract(script, new_name):
:type source: str
:return: list of changed lines/changed files
"""
new_lines = common.source_to_unicode(script.source).splitlines()
new_lines = common.splitlines(common.source_to_unicode(script.source))
old_lines = new_lines[:]
user_stmt = script._parser.user_stmt()
@@ -160,7 +160,7 @@ def inline(script):
"""
:type script: api.Script
"""
new_lines = common.source_to_unicode(script.source).splitlines()
new_lines = common.splitlines(common.source_to_unicode(script.source))
dct = {}

View File

@@ -42,6 +42,7 @@ Dynamic stuff
.. autodata:: dynamic_params
.. autodata:: dynamic_params_for_other_modules
.. autodata:: additional_dynamic_modules
.. autodata:: auto_import_modules
.. _settings-recursion:
@@ -179,6 +180,15 @@ dynamic_flow_information = True
Check for `isinstance` and other information to infer a type.
"""
auto_import_modules = [
'hashlib', # setattr
]
"""
Modules that are not analyzed but imported, although they contain Python code.
This improves autocompletion for libraries that use ``setattr`` or
``globals()`` modifications a lot.
"""
# ----------------
# recursions
# ----------------

View File

@@ -2,7 +2,7 @@
addopts = --doctest-modules
# Ignore broken files in blackbox test directories
norecursedirs = .* docs completion refactor absolute_import namespace_package scripts extensions speed
norecursedirs = .* docs completion refactor absolute_import namespace_package scripts extensions speed static_analysis
# Activate `clean_jedi_cache` fixture for all tests. This should be
# fine as long as we are using `clean_jedi_cache` as a session scoped

View File

@@ -26,7 +26,7 @@ setup(name='jedi',
keywords='python completion refactoring vim',
long_description=readme,
packages=['jedi', 'jedi.parser', 'jedi.evaluate', 'jedi.evaluate.compiled', 'jedi.api'],
package_data={'jedi': ['evlaluate/evaluate/compiled/fake/*.pym']},
package_data={'jedi': ['evaluate/compiled/fake/*.pym']},
platforms=['any'],
classifiers=[
'Development Status :: 4 - Beta',

View File

@@ -56,14 +56,6 @@ a = ['']*2
#? list()
a
a = 2*2
#? int()
a
a = "a"*3
#? str()
a
# -----------------
# tuple assignments
# -----------------
@@ -284,6 +276,17 @@ class GetItem():
#? str()
GetItem("")[1]
class GetItemWithList():
def __getitem__(self, index):
return [1, 1.0, 's'][index]
#? float()
GetItemWithList()[1]
for i in 0, 2:
#? int() str()
GetItemWithList()[i]
# -----------------
# conversions
# -----------------

View File

@@ -43,6 +43,18 @@ def func():
#? int() str()
func()
# -----------------
# keywords
# -----------------
#? list()
assert []
def focus_return():
#? list
return []
# -----------------
# for loops
# -----------------
@@ -131,6 +143,12 @@ a = [a if 1.0 else '' for a in [1] if [1.0]]
#? int() str()
a[0]
# name resolve should be correct
left, right = 'a', 'b'
left, right = [x for x in (left, right)]
#? str()
left
# with a dict literal
#? str()
[a for a in {1:'x'}][0]
@@ -138,6 +156,17 @@ a[0]
##? str()
{a-1:b for a,b in {1:'a', 3:1.0}.items()}[0]
# list comprehensions should also work in combination with functions
def listen(arg):
for x in arg:
#? str()
x
listen(['' for x in [1]])
#? str()
([str for x in []])[0]
# -----------------
# nested list comprehensions
# -----------------
@@ -162,6 +191,36 @@ a[0]
#? int()
a[0][0]
# -----------------
# generator comprehensions
# -----------------
left, right = (i for i in (1, ''))
#? int()
left
gen = (i for i in (1,))
#? int()
next(gen)
#?
gen[0]
gen = (a for arr in [[1.0]] for a in arr)
#? float()
next(gen)
#? int()
(i for i in (1,)).send()
# issues with different formats
left, right = (i for i in
('1', '2'))
#? str()
left
# -----------------
# ternary operator
# -----------------
@@ -273,6 +332,21 @@ except ImportError, i_b:
#? ImportError()
i_b
class MyException(Exception):
def __init__(self, my_attr):
self.my_attr = my_attr
try:
raise MyException(1)
except MyException as e:
#? ['my_attr']
e.my_attr
#? 22 ['my_attr']
for x in e.my_attr:
pass
# -----------------
# continuations
# -----------------
@@ -281,3 +355,23 @@ foo = \
1
#? int()
foo
# -----------------
# if `is not` checks
# -----------------
foo = ['a']
if foo is not None:
foo = ''.join(foo)
#? str()
foo
# -----------------
# module attributes
# -----------------
# Don't move this to imports.py, because there's a star import.
#? str()
__file__
#? ['__file__']
__file__

View File

@@ -394,3 +394,14 @@ def third():
return list(b)
#?
third()[0]
# -----------------
# list comprehensions
# -----------------
def from_comprehension(foo):
#? int() float()
return foo
[from_comprehension(1.0) for n in (1,)]
[from_comprehension(n) for n in (1,)]

View File

@@ -46,6 +46,15 @@ def multi_line_func(a, # comment blabla
#? str()
multi_line_func(1,'')
def multi_line_call(b):
return b
multi_line_call(
#? int()
b=1)
# nothing after comma
def asdf(a):
return a
@@ -138,6 +147,36 @@ func(1.0)[0]
#? str()
func(1.0)[1]
#? float()
func(a=1.0)[0]
#? str()
func(a=1.0)[1]
#? int()
func(b=1.0)[0]
#? float()
func(b=1.0)[1]
#? list
func(a=list, b=set)[0]
#? set
func(a=list, b=set)[1]
def func_default(a, b=1):
return a, b
def nested_default(**kwargs):
return func_default(**kwargs)
#? float()
nested_default(a=1.0)[0]
#? int()
nested_default(a=1.0)[1]
#? str()
nested_default(a=1.0, b='')[1]
# -----------------
# closures
# -----------------
@@ -201,6 +240,33 @@ exe[1]
#? list()
exe[1][1]
# In a dynamic search, both inputs should be given.
def simple(a):
#? int() str()
return a
def xargs(*args):
return simple(*args)
xargs(1)
xargs('')
# *args without a self symbol
def memoize(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class Something():
@memoize
def x(self, a, b=1):
return a
Something().x(1)
# -----------------
# ** kwargs
# -----------------

View File

@@ -110,6 +110,18 @@ a
#? str() float()
b
def simple():
yield 1
yield ''
a, b = simple()
#? int()
a
#? str()
b
# -----------------
# More complicated access
# -----------------

View File

@@ -19,8 +19,6 @@ def scope_basic():
#? str()
import_tree.a
#? []
import_tree.mod1
def scope_pkg():
import import_tree.mod1
@@ -46,8 +44,8 @@ def scope_nested():
#? ['sqrt']
import_tree.pkg.sqrt
#? ['a', 'pkg']
import_tree.
#? ['pkg']
import_tree.p
#? float()
import_tree.pkg.mod1.a
@@ -64,7 +62,11 @@ def scope_nested2():
import_tree.mod1
#? ['pkg']
import_tree.pkg
#? []
# With the latest changes this completion also works, because submodules
# are always included (some nested import structures lead to this,
# typically).
#? ['rename1']
import_tree.rename1
def from_names():
@@ -317,3 +319,18 @@ else:
a = not_existing_import
#?
a
# -----------------
# module underscore descriptors
# -----------------
def underscore():
import keyword
#? ['__file__']
keyword.__file__
#? str()
keyword.__file__
# Does that also work for the our own module?
#? ['__file__']
__file__

36
test/completion/parser.py Normal file
View File

@@ -0,0 +1,36 @@
"""
Issues with the parser not the completion engine should be here.
"""
class IndentIssues():
"""
issue jedi-vim#288
Which is really a fast parser issue. It used to start a new block at the
parentheses, because it had problems with the indentation.
"""
def one_param(
self,
):
return 1
def with_param(
self,
y):
return y
#? int()
IndentIssues().one_param()
#? str()
IndentIssues().with_param('')
"""
Just because there's a def keyword, doesn't mean it should not be able to
complete to definition.
"""
definition = 0
#? ['definition', 'def']
str(def

View File

@@ -41,6 +41,14 @@ x = 'upp' + 'e'
#? str.upper
getattr(str, x + 'r')
a = "a"*3
#? str()
a
a = 3 * "a"
#? str()
a
# -----------------
# assignments
# -----------------

View File

@@ -12,6 +12,7 @@ sorted(arr)[0]
#? str()
next(reversed(arr))
next(reversed(arr))
# should not fail if there's no return value.
def yielder():
@@ -20,9 +21,20 @@ def yielder():
#?
next(reversed(yielder()))
# empty reversed should not raise an error
#?
next(reversed())
#? str()
next(open(''))
#? int()
{'a':2}.setdefault('a', 3)
# Compiled classes should have the meta class attributes.
#? ['__itemsize__']
tuple.__itemsize__
# -----------------
# re
# -----------------
@@ -110,3 +122,34 @@ def huhu(db):
"""
#? sqlite3.Connection()
db
# -----------------
# hashlib
# -----------------
import hashlib
#? ['md5']
hashlib.md5
# -----------------
# copy
# -----------------
import copy
#? int()
copy.deepcopy(1)
#?
copy.copy()
# -----------------
# json
# -----------------
# We don't want any results for json, because it depends on IO.
import json
#?
json.load('asdf')
#?
json.loads('[1]')

View File

@@ -80,7 +80,7 @@ import module_not_exists
module_not_exists
#< ('rename1', 1,0), (0,24), (3,0), (6,17), ('rename2', 4,5), (10,17), (13,17)
#< ('rename1', 1,0), (0,24), (3,0), (6,17), ('rename2', 4,5), (10,17), (13,17), ('imports', 70, 16)
from import_tree import rename1
#< (0,8), ('rename1',3,0), ('rename2',4,20), ('rename2',6,0), (3,32), (7,32), (4,0)
@@ -90,7 +90,7 @@ rename1.abc
from import_tree.rename1 import abc
abc
#< 20 ('rename1', 1,0), ('rename2', 4,5), (-10,24), (-7,0), (-4,17), (0,17), (3,17)
#< 20 ('rename1', 1,0), ('rename2', 4,5), (-10,24), (-7,0), (-4,17), (0,17), (3,17), ('imports', 70, 16)
from import_tree.rename1 import abc
#< (0, 32),

View File

@@ -1,5 +1,6 @@
import os
import shutil
import re
import tempfile
import pytest
@@ -7,6 +8,8 @@ import pytest
from . import helpers
from . import run
from . import refactor
import jedi
from jedi.evaluate.analysis import Warning
def pytest_addoption(parser):
@@ -59,12 +62,58 @@ def pytest_generate_tests(metafunc):
os.path.join(base_dir, 'thirdparty'), test_files, True))
ids = ["%s:%s" % (c.module_name, c.line_nr_test) for c in cases]
metafunc.parametrize('case', cases, ids=ids)
if 'refactor_case' in metafunc.fixturenames:
base_dir = metafunc.config.option.refactor_case_dir
metafunc.parametrize(
'refactor_case',
refactor.collect_dir_tests(base_dir, test_files))
if 'static_analysis_case' in metafunc.fixturenames:
base_dir = os.path.join(os.path.dirname(__file__), 'static_analysis')
metafunc.parametrize(
'static_analysis_case',
collect_static_analysis_tests(base_dir, test_files))
def collect_static_analysis_tests(base_dir, test_files):
for f_name in os.listdir(base_dir):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
if f_name.endswith(".py") and (not test_files or files_to_execute):
path = os.path.join(base_dir, f_name)
yield StaticAnalysisCase(path)
class StaticAnalysisCase(object):
"""
Static Analysis cases lie in the static_analysis folder.
The tests also start with `#!`, like the goto_definition tests.
"""
def __init__(self, path):
self.skip = False
self._path = path
with open(path) as f:
self._source = f.read()
def collect_comparison(self):
cases = []
for line_nr, line in enumerate(self._source.splitlines(), 1):
match = re.match(r'(\s*)#! (\d+ )?(.*)$', line)
if match is not None:
column = int(match.group(2) or 0) + len(match.group(1))
cases.append((line_nr + 1, column, match.group(3)))
return cases
def run(self, compare_cb):
analysis = jedi.Script(self._source, path=self._path)._analysis()
typ_str = lambda inst: 'warning ' if isinstance(inst, Warning) else ''
analysis = [(r.line, r.column, typ_str(r) + r.name)
for r in analysis]
compare_cb(self, analysis, self.collect_comparison())
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, os.path.basename(self._path))
@pytest.fixture()
def isolated_jedi_cache(monkeypatch, tmpdir):

View File

@@ -139,7 +139,7 @@ class IntegrationTestCase(object):
@property
def module_name(self):
return re.sub('.*/|\.py$', '', self.path)
return os.path.splitext(os.path.basename(self.path))[0]
@property
def line_nr_test(self):
@@ -365,6 +365,11 @@ if __name__ == '__main__':
current = cases[0].path if cases else None
count = fails = 0
for c in cases:
if current != c.path:
file_change(current, count, fails)
current = c.path
count = fails = 0
try:
if c.run(report):
tests_fail += 1
@@ -380,10 +385,6 @@ if __name__ == '__main__':
count += 1
if current != c.path:
file_change(current, count, fails)
current = c.path
count = fails = 0
file_change(current, count, fails)
print('\nSummary: (%s fails of %s tests) in %.3fs' % (tests_fail,

View File

@@ -0,0 +1,73 @@
# -----------------
# normal arguments (no keywords)
# -----------------
def simple(a):
return a
simple(1)
#! 6 type-error-too-few-arguments
simple()
#! 10 type-error-too-many-arguments
simple(1, 2)
#! 10 type-error-too-many-arguments
simple(1, 2, 3)
# -----------------
# keyword arguments
# -----------------
simple(a=1)
#! 7 type-error-keyword-argument
simple(b=1)
#! 10 type-error-too-many-arguments
simple(1, a=1)
def two_params(x, y):
return y
two_params(y=2, x=1)
two_params(1, y=2)
#! 10 type-error-multiple-values
two_params(1, x=2)
#! 17 type-error-too-many-arguments
two_params(1, 2, y=3)
# -----------------
# default arguments
# -----------------
def default(x, y=1, z=2):
return x
#! 7 type-error-too-few-arguments
default()
default(1)
default(1, 2)
default(1, 2, 3)
#! 17 type-error-too-many-arguments
default(1, 2, 3, 4)
default(x=1)
# -----------------
# class arguments
# -----------------
class Instance():
def __init__(self, foo):
self.foo = foo
Instance(1).foo
Instance(foo=1).foo
#! 12 type-error-too-many-arguments
Instance(1, 2).foo
#! 8 type-error-too-few-arguments
Instance().foo

View File

@@ -0,0 +1,113 @@
class Cls():
class_attr = ''
def __init__(self, input):
self.instance_attr = 3
self.input = input
def f(self):
#! 12 attribute-error
return self.not_existing
def undefined_object(self, obj):
"""
Uses an arbitrary object and performs an operation on it, shouldn't
be a problem.
"""
obj.arbitrary_lookup
def defined_lookup(self, obj):
"""
`obj` is defined by a call into this function.
"""
obj.upper
#! 4 attribute-error
obj.arbitrary_lookup
#! 13 name-error
class_attr = a
Cls(1).defined_lookup('')
c = Cls(1)
c.class_attr
Cls.class_attr
#! 4 attribute-error
Cls.class_attr_error
c.instance_attr
#! 2 attribute-error
c.instance_attr_error
c.something = None
#! 12 name-error
something = a
something
# -----------------
# Unused array variables should still raise attribute errors.
# -----------------
# should not raise anything.
for loop_variable in [1, 2]:
#! 4 name-error
x = undefined
loop_variable
#! 28 name-error
for loop_variable in [1, 2, undefined]:
pass
#! 7 attribute-error
[1, ''.undefined_attr]
def return_one(something):
return 1
#! 14 attribute-error
return_one(''.undefined_attribute)
#! 12 name-error
[r for r in undefined]
#! 1 name-error
[undefined for r in [1, 2]]
[r for r in [1, 2]]
# some random error that showed up
class NotCalled():
def match_something(self, param):
seems_to_need_an_assignment = param
return [value.match_something() for value in []]
# -----------------
# decorators
# -----------------
#! 1 name-error
@undefined_decorator
def func():
return 1
# -----------------
# operators
# -----------------
string = '%s %s' % (1, 2)
# Shouldn't raise an error, because `string` is really just a string, not an
# array or something.
string.upper
# -----------------
# imports
# -----------------
# Star imports and the like in modules should not cause attribute errors in
# this module.
import import_tree
import_tree.a
import_tree.b

View File

@@ -0,0 +1,46 @@
"""
Jedi issues warnings for possible errors if ``__getattr__``,
``__getattribute__`` or ``setattr`` are used.
"""
# -----------------
# __getattr*__
# -----------------
class Cls():
def __getattr__(self, name):
return getattr(str, name)
Cls().upper
#! 6 warning attribute-error
Cls().undefined
class Inherited(Cls):
pass
Inherited().upper
#! 12 warning attribute-error
Inherited().undefined
# -----------------
# setattr
# -----------------
class SetattrCls():
def __init__(self, dct):
# Jedi doesn't even try to understand such code
for k, v in dct:
setattr(self, k, v)
self.defined = 3
c = SetattrCls({'a': 'b'})
c.defined
#! 2 warning attribute-error
c.undefined

View File

@@ -0,0 +1,13 @@
# classmethod
class TarFile():
@classmethod
def open(cls, name, **kwargs):
return cls.taropen(name, **kwargs)
@classmethod
def taropen(cls, name, **kwargs):
return name
# should just work
TarFile.open('hallo')

View File

@@ -0,0 +1,7 @@
def generator():
yield 1
#! 11 type-error-generator
generator()[0]
list(generator())[0]

View File

@@ -0,0 +1,5 @@
"""
Another import tree, this time not for completion, but static analysis.
"""
from .a import *

View File

@@ -0,0 +1 @@
from . import b

View File

@@ -0,0 +1,25 @@
#! 7 import-error
import not_existing
import os
from os.path import abspath
#! 20 import-error
from os.path import not_existing
from datetime import date
date.today
#! 5 attribute-error
date.not_existing_attribute
#! 26 import-error
from datetime.date import today
#! 16 import-error
import datetime.date
#! 7 import-error
import not_existing_nested.date
import os.path

View File

@@ -0,0 +1,11 @@
-1 + 1
1 + 1.0
#! 2 type-error-operation
1 + '1'
#! 2 type-error-operation
1 - '1'
-1 - - 1
-1 - int()
int() - float()
float() - 3.0

View File

@@ -0,0 +1,113 @@
# -----------------
# *args
# -----------------
def simple(a):
return a
def nested(*args):
return simple(*args)
nested(1)
#! 6 type-error-too-few-arguments
nested()
def nested_no_call_to_function(*args):
return simple(1, *args)
def simple2(a, b, c):
return b
def nested(*args):
return simple2(1, *args)
def nested_twice(*args1):
return nested(*args1)
nested_twice(2, 3)
#! 12 type-error-too-few-arguments
nested_twice(2)
#! 19 type-error-too-many-arguments
nested_twice(2, 3, 4)
# A named argument can be located before *args.
def star_args_with_named(*args):
return simple2(c='', *args)
star_args_with_named(1, 2)
# -----------------
# **kwargs
# -----------------
def kwargs_test(**kwargs):
return simple2(1, **kwargs)
kwargs_test(c=3, b=2)
#! 11 type-error-too-few-arguments
kwargs_test(c=3)
#! 11 type-error-too-few-arguments
kwargs_test(b=2)
#! 22 type-error-keyword-argument
kwargs_test(b=2, c=3, d=4)
##! 11 type-error-multiple-values
kwargs_test(b=2, c=3, a=4)
def kwargs_nested(**kwargs):
return kwargs_test(b=2, **kwargs)
kwargs_nested(c=3)
#! 13 type-error-too-few-arguments
kwargs_nested()
#! 19 type-error-keyword-argument
kwargs_nested(c=2, d=4)
##! 13 type-error-multiple-values
kwargs_nested(c=2, a=4)
#! 13 type-error-multiple-values
kwargs_nested(b=3, c=2)
# -----------------
# mixed *args/**kwargs
# -----------------
def simple_mixed(a, b, c):
return b
def mixed(*args, **kwargs):
return simple_mixed(1, *args, **kwargs)
mixed(1, 2)
mixed(1, c=2)
mixed(b=2, c=3)
mixed(c=4, b='')
# need separate functions, otherwise these might swallow the errors
def mixed2(*args, **kwargs):
return simple_mixed(1, *args, **kwargs)
#! 6 type-error-too-few-arguments
mixed2(c=2)
#! 6 type-error-too-few-arguments
mixed2(3)
#! 13 type-error-too-many-arguments
mixed2(3, 4, 5)
#! 13 type-error-too-many-arguments
mixed2(3, 4, c=5)
#! 6 type-error-multiple-values
mixed2(3, b=5)
# -----------------
# plain wrong arguments
# -----------------
#! 12 type-error-star-star
simple(1, **[])
#! 11 type-error-star
simple(1, *1)

View File

@@ -0,0 +1,89 @@
try:
#! 4 attribute-error
str.not_existing
except TypeError:
pass
try:
str.not_existing
except AttributeError:
#! 4 attribute-error
str.not_existing
pass
try:
import not_existing_import
except ImportError:
pass
try:
#! 7 import-error
import not_existing_import
except AttributeError:
pass
# -----------------
# multi except
# -----------------
try:
str.not_existing
except (TypeError, AttributeError): pass
try:
str.not_existing
except ImportError:
pass
except (NotImplementedError, AttributeError): pass
try:
#! 4 attribute-error
str.not_existing
except (TypeError, NotImplementedError): pass
# -----------------
# detailed except
# -----------------
try:
str.not_existing
except ((AttributeError)): pass
try:
#! 4 attribute-error
str.not_existing
except [AttributeError]: pass
# Should be able to detect errors in except statement as well.
try:
pass
#! 7 name-error
except Undefined:
pass
# -----------------
# inheritance
# -----------------
try:
undefined
except Exception:
pass
# should catch everything
try:
undefined
except:
pass
# -----------------
# kind of similar: hasattr
# -----------------
if hasattr(str, 'undefined'):
str.undefined
str.upper
#! 4 attribute-error
str.undefined2
#! 4 attribute-error
int.undefined
else:
str.upper
#! 4 attribute-error
str.undefined

View File

@@ -127,6 +127,11 @@ def test_completion_docstring():
assert c.docstring(raw=True, fast=False) == cleandoc(Script.__doc__)
def test_completion_params():
c = Script('import string; string.capwords').completions()[0]
assert [p.name for p in c.params] == ['s', 'sep']
def test_signature_params():
def check(defs):
params = defs[0].params

View File

@@ -227,6 +227,23 @@ def test_no_signature():
assert len(Script(s, column=2).call_signatures()) == 1
def test_dict_literal_in_incomplete_call():
source = """\
import json
def foo():
json.loads(
json.load.return_value = {'foo': [],
'bar': True}
c = Foo()
"""
script = Script(dedent(source), line=4, column=15)
assert script.call_signatures()
def test_completion_interference():
"""Seems to cause problems, see also #396."""
cache.parser_cache.pop(None, None)

View File

@@ -75,9 +75,8 @@ def test_star_import_cache_duration():
# reset values
jedi.settings.star_import_cache_validity = old
length = len(cache._star_import_cache)
assert len(cache._star_import_cache) == 1
cache._star_import_cache = {}
assert length == 1
def test_cache_call_signatures():

View File

@@ -0,0 +1,12 @@
#!/usr/bin/python
import sys
sys.path[0:0] = [
'/usr/lib/python3.4/site-packages',
'/tmp/.buildout/eggs/important_package.egg'
]
import important_package
if __name__ == '__main__':
sys.exit(important_package.main())

View File

@@ -1,10 +0,0 @@
This directory contains pre-compiled extensions modules used to test completions
for compiled modules on Travis-CI (Ubuntu 12.04 64bit).
To build the extensions modules, run::
python setup.py build_ext -i
Then move the compiled modules to their testing package ( ./**compiledXX**, where XX is the
python version used to run setup.py).

View File

@@ -1,6 +0,0 @@
cdef class Foo:
pass
cdef class Bar:
pass

View File

@@ -1,6 +0,0 @@
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules=cythonize("compiled.pyx")
)

View File

@@ -0,0 +1,71 @@
import os
from jedi._compatibility import u
from jedi.evaluate.sys_path import (_get_parent_dir_with_file,
_get_buildout_scripts,
_check_module)
from jedi.parser import Parser
from ..helpers import cwd_at
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_parent_dir_with_file():
parent = _get_parent_dir_with_file(
os.path.abspath(os.curdir), 'buildout.cfg')
assert parent is not None
assert parent.endswith(os.path.join('test', 'test_evaluate', 'buildout_project'))
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_buildout_detection():
scripts = _get_buildout_scripts(os.path.abspath('./module_name.py'))
assert len(scripts) == 1
curdir = os.path.abspath(os.curdir)
appdir_path = os.path.normpath(os.path.join(curdir, '../../bin/app'))
assert scripts[0] == appdir_path
def test_append_on_non_sys_path():
SRC = u("""
class Dummy(object):
path = []
d = Dummy()
d.path.append('foo')""")
p = Parser(SRC)
paths = _check_module(p.module)
assert len(paths) > 0
assert 'foo' not in paths
def test_path_from_invalid_sys_path_assignment():
SRC = u("""
import sys
sys.path = 'invalid'""")
p = Parser(SRC)
paths = _check_module(p.module)
assert len(paths) > 0
assert 'invalid' not in paths
def test_path_from_sys_path_assignment():
SRC = u("""
#!/usr/bin/python
import sys
sys.path[0:0] = [
'/usr/lib/python3.4/site-packages',
'/home/test/.buildout/eggs/important_package.egg'
]
path[0:0] = [1]
import important_package
if __name__ == '__main__':
sys.exit(important_package.main())""")
p = Parser(SRC)
paths = _check_module(p.module)
assert 1 not in paths
assert '/home/test/.buildout/eggs/important_package.egg' in paths

View File

@@ -1,7 +1,8 @@
from jedi._compatibility import builtins
from jedi._compatibility import builtins, is_py3
from jedi.parser.representation import Function
from jedi.evaluate import compiled
from jedi.evaluate import Evaluator
from jedi import Script
def test_simple():
@@ -47,3 +48,19 @@ def test_doc():
"""
obj = compiled.CompiledObject(''.__getnewargs__)
assert obj.doc == ''
def test_string_literals():
def typ(string):
d = Script(string).goto_definitions()[0]
return d.name
assert typ('""') == 'str'
assert typ('r""') == 'str'
if is_py3:
assert typ('br""') == 'bytes'
assert typ('b""') == 'bytes'
assert typ('u""') == 'str'
else:
assert typ('b""') == 'str'
assert typ('u""') == 'unicode'

View File

@@ -2,41 +2,30 @@
Test compiled module
"""
import os
import platform
import sys
import jedi
from ..helpers import cwd_at
@cwd_at('test/test_evaluate/extensions')
def test_completions():
if platform.architecture()[0] == '64bit':
package_name = "compiled%s%s" % sys.version_info[:2]
sys.path.insert(0, os.getcwd())
if os.path.exists(package_name):
s = jedi.Script("from %s import compiled; compiled." % package_name)
assert len(s.completions()) >= 2
s = jedi.Script('import _ctypes; _ctypes.')
assert len(s.completions()) >= 15
@cwd_at('test/test_evaluate/extensions')
def test_call_signatures_extension():
# with a cython extension
if platform.architecture()[0] == '64bit':
package_name = "compiled%s%s" % sys.version_info[:2]
sys.path.insert(0, os.getcwd())
if os.path.exists(package_name):
s = jedi.Script("from %s import compiled; compiled.Foo(" %
package_name)
defs = s.call_signatures()
for call_def in defs:
for param in call_def.params:
pass
if os.name == 'nt':
func = 'LoadLibrary'
params = 1
else:
func = 'dlopen'
params = 2
s = jedi.Script('import _ctypes; _ctypes.%s(' % (func,))
sigs = s.call_signatures()
assert len(sigs) == 1
assert len(sigs[0].params) == params
def test_call_signatures_stdlib():
code = "import math; math.cos("
s = jedi.Script(code)
defs = s.call_signatures()
for call_def in defs:
assert len(call_def.params) == 1
s = jedi.Script('import math; math.cos(')
sigs = s.call_signatures()
assert len(sigs) == 1
assert len(sigs[0].params) == 1

View File

@@ -0,0 +1,20 @@
from jedi import Script
def test_function_execution():
"""
We've been having an issue of a mutable list that was changed inside the
function execution. Test if an execution always returns the same result.
"""
s = """
def x():
return str()
x"""
d = Script(s).goto_definitions()[0]
# Now just use the internals of the result (easiest way to get a fully
# usable function).
func, evaluator = d._definition, d._evaluator
# Should return the same result both times.
assert len(evaluator.execute(func)) == 1
assert len(evaluator.execute(func)) == 1

View File

@@ -15,12 +15,23 @@ def assert_case_equal(case, actual, desired):
"""
assert actual == desired, """
Test %r failed.
actual = %s
actual = %s
desired = %s
""" % (case, actual, desired)
def test_integration(case, monkeypatch, pytestconfig):
def assert_static_analysis(case, actual, desired):
"""A nicer formatting for static analysis tests."""
a = set(actual)
d = set(desired)
assert actual == desired, """
Test %r failed.
not raised = %s
unspecified = %s
""" % (case, sorted(d - a), sorted(a - d))
def test_completion(case, monkeypatch):
if case.skip is not None:
pytest.skip(case.skip)
repo_root = helpers.root_dir
@@ -28,6 +39,10 @@ def test_integration(case, monkeypatch, pytestconfig):
case.run(assert_case_equal)
def test_static_analysis(static_analysis_case):
static_analysis_case.run(assert_static_analysis)
def test_refactor(refactor_case):
"""
Run refactoring test case.

View File

@@ -58,6 +58,7 @@ def test_operators():
diff_code_assert(src, prs.module.get_code())
@pytest.mark.skipif('True', reason='Broke get_code support for yield/return statements.')
def test_get_code():
"""Use the same code that the parser also generates, to compare"""
s = u('''"""a docstring"""

View File

@@ -1,4 +1,6 @@
from jedi._compatibility import u
# -*- coding: utf-8 -*-
from jedi._compatibility import u, is_py3
from jedi.parser import Parser
from jedi.parser.user_context import UserContextParser
from jedi.parser import representation as pr
@@ -119,3 +121,25 @@ def test_carriage_return_statements():
source = source.replace('\n', '\r\n')
stmt = Parser(source).module.statements[0]
assert '#' not in stmt.get_code()
def test_incomplete_list_comprehension():
""" Shouldn't raise an error, same bug as #418. """
s = Parser(u('(1 for def')).module.statements[0]
assert s.expression_list()
def test_hex_values_in_docstring():
source = r'''
def foo(object):
"""
\xff
"""
return 1
'''
doc = Parser(dedent(u(source))).module.subscopes[0].raw_doc
if is_py3:
assert doc == '\xff'
else:
assert doc == u('<EFBFBD>')

View File

@@ -0,0 +1,8 @@
from jedi.parser import Parser
from jedi._compatibility import u
def test_import_is_nested():
imp = Parser(u('import ')).module.imports[0]
# should not raise an error, even if it's not a complete import
assert not imp.is_nested()

View File

@@ -1,10 +1,7 @@
from jedi import parser
from jedi._compatibility import u
from jedi import parser
try:
import unittest2 as unittest
except ImportError: # pragma: no cover
import unittest
from ..helpers import unittest
class TokenTest(unittest.TestCase):

View File

@@ -0,0 +1,6 @@
import jedi
def test_form_feed_characters():
s = "\f\nclass Test(object):\n pass"
jedi.Script(s, line=2, column=18).call_signatures()

View File

@@ -57,7 +57,7 @@ class TestRegression(TestCase):
self.assertRaises(jedi.NotFoundError, get_def, cls)
@pytest.mark.skip('Skip for now, test case is not really supported.')
@pytest.mark.skipif('True', reason='Skip for now, test case is not really supported.')
@cwd_at('jedi')
def test_add_dynamic_mods(self):
fname = '__main__.py'

View File

@@ -34,7 +34,7 @@ class TestSpeed(TestCase):
s = "from posixpath import join; join('', '')."
assert len(jedi.Script(s).completions()) > 10 # is a str completion
@_check_speed(0.1)
@_check_speed(0.15)
def test_scipy_speed(self):
s = 'import scipy.weave; scipy.weave.inline('
script = jedi.Script(s, 1, len(s), '')

View File

@@ -1,10 +1,15 @@
import readline
try:
import readline
except ImportError:
readline = False
from jedi import utils
from .helpers import TestCase, cwd_at
from .helpers import unittest, cwd_at
class TestSetupReadline(TestCase):
@unittest.skipIf(not readline, "readline not found")
class TestSetupReadline(unittest.TestCase):
class NameSpace(object):
pass

View File

@@ -2,7 +2,7 @@
envlist = py26, py27, py32, py33, py34
[testenv]
deps =
https://bitbucket.org/hpk42/pytest/get/c4f58165e0d4.zip
pytest>=2.3.5
# docopt for sith doctests
docopt
# coloroma for colored debug output