forked from VimPlug/jedi
Compare commits
780 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60415033b4 | ||
|
|
a06d760f45 | ||
|
|
b7687fcfb7 | ||
|
|
0ec86d5034 | ||
|
|
cef23f44cd | ||
|
|
e889a4923e | ||
|
|
114aba462c | ||
|
|
26c7cec7b5 | ||
|
|
3e3a33ab79 | ||
|
|
7f386e0e68 | ||
|
|
82d970d2b8 | ||
|
|
3ed9e836cc | ||
|
|
f984e8d6ef | ||
|
|
670cf4d394 | ||
|
|
e85fba844c | ||
|
|
ee5557ddf6 | ||
|
|
42f72b219b | ||
|
|
374721b789 | ||
|
|
01cec186ae | ||
|
|
3fb89f9f9b | ||
|
|
a0b4e76c1a | ||
|
|
97bf83aa03 | ||
|
|
ca7658cab7 | ||
|
|
dd78f4cfbf | ||
|
|
08019075c3 | ||
|
|
943617a94f | ||
|
|
d579c0ad57 | ||
|
|
76c6104415 | ||
|
|
ef9d803ce3 | ||
|
|
a26cb42d07 | ||
|
|
6b9b2836ba | ||
|
|
abdb8de89d | ||
|
|
ac492ef598 | ||
|
|
947bfe7b78 | ||
|
|
be6c90d135 | ||
|
|
8cb059deda | ||
|
|
0f4da5c1cf | ||
|
|
de138e9114 | ||
|
|
15bb9b29a2 | ||
|
|
4c132d94b9 | ||
|
|
925fc89447 | ||
|
|
cb95dbc707 | ||
|
|
1e3b6a201d | ||
|
|
3829ef4785 | ||
|
|
b382f06be0 | ||
|
|
94faceb57c | ||
|
|
a9ff58683e | ||
|
|
fafd6b2ac6 | ||
|
|
344a03e6b2 | ||
|
|
265abe1d08 | ||
|
|
ebdae87821 | ||
|
|
56ec79d62a | ||
|
|
c413b486fb | ||
|
|
cb0a0d228a | ||
|
|
3ae4a154f9 | ||
|
|
aa2dc6be09 | ||
|
|
a62ba86d7b | ||
|
|
454447d422 | ||
|
|
02d10a3aff | ||
|
|
4479b866ff | ||
|
|
907fdaa153 | ||
|
|
b85c0db72e | ||
|
|
8852745cf3 | ||
|
|
d1501527a2 | ||
|
|
ccd7939a92 | ||
|
|
db716d96e5 | ||
|
|
5f81353182 | ||
|
|
b71a851081 | ||
|
|
474dcb857a | ||
|
|
5ad0e3d72e | ||
|
|
2cf1797465 | ||
|
|
f2f54f2864 | ||
|
|
38232fe133 | ||
|
|
4405c4f190 | ||
|
|
c3a0fec2d9 | ||
|
|
8e3caaca7f | ||
|
|
860f627f48 | ||
|
|
3ddbee1666 | ||
|
|
fc20faf8f8 | ||
|
|
0749e5091a | ||
|
|
e61949da66 | ||
|
|
fdad24cc0a | ||
|
|
3ed30409ea | ||
|
|
d55d494e0a | ||
|
|
e7423696af | ||
|
|
c6c49d1476 | ||
|
|
4564275eba | ||
|
|
9b610c9760 | ||
|
|
ce97b0a5e7 | ||
|
|
5a26d4cf8f | ||
|
|
a0adff9d36 | ||
|
|
ad2fbf71ba | ||
|
|
097b073d20 | ||
|
|
a3afdc0ece | ||
|
|
ed092e6da7 | ||
|
|
78973a9f35 | ||
|
|
f672d3329a | ||
|
|
be269f3e1c | ||
|
|
c1047bef4f | ||
|
|
1b0677ec55 | ||
|
|
5ef0563abe | ||
|
|
56d8945d17 | ||
|
|
7f853a324a | ||
|
|
7f3e55df02 | ||
|
|
144aa97c00 | ||
|
|
9871fe2adf | ||
|
|
95f3aed82c | ||
|
|
8ba3e5d463 | ||
|
|
c8937ccdbf | ||
|
|
49f652a2ad | ||
|
|
12dbdbf258 | ||
|
|
abba305f64 | ||
|
|
d4cccd452d | ||
|
|
a555def6ca | ||
|
|
827a79861d | ||
|
|
42b6e20729 | ||
|
|
f3364a458c | ||
|
|
48b1b9a1aa | ||
|
|
787276366e | ||
|
|
6e758acd16 | ||
|
|
eef02e5c56 | ||
|
|
26951f5c18 | ||
|
|
bb42850d63 | ||
|
|
0ff1a88cc4 | ||
|
|
f80828cb07 | ||
|
|
65d5c6eb2b | ||
|
|
94dfe7bf69 | ||
|
|
97f342fc4c | ||
|
|
a43a6cbc06 | ||
|
|
8c495a1142 | ||
|
|
5d3028bd1f | ||
|
|
07f9f241c6 | ||
|
|
659c043584 | ||
|
|
b98bf07767 | ||
|
|
84eb91beaa | ||
|
|
de03b96232 | ||
|
|
0d11a94dad | ||
|
|
da4e6f275e | ||
|
|
b24e782b7d | ||
|
|
1139761525 | ||
|
|
0a56211df8 | ||
|
|
586354b571 | ||
|
|
bade4e661f | ||
|
|
c8d658e452 | ||
|
|
30526c564e | ||
|
|
8ec6f54f86 | ||
|
|
1213b51c66 | ||
|
|
c6173efe61 | ||
|
|
5ba8fd1267 | ||
|
|
4aa91efc2e | ||
|
|
448f08b74e | ||
|
|
b4e41ef953 | ||
|
|
fcf214b548 | ||
|
|
b9e8bff5e2 | ||
|
|
9c40c75136 | ||
|
|
77bd393a92 | ||
|
|
55d40e22b3 | ||
|
|
190793d82f | ||
|
|
d6c89ced99 | ||
|
|
d9332aec8c | ||
|
|
cdc9520c9d | ||
|
|
6cdde65052 | ||
|
|
6d62e55b5e | ||
|
|
ed93bbfb68 | ||
|
|
39eefdbc00 | ||
|
|
1e9e684575 | ||
|
|
3fb5b4992b | ||
|
|
4d647238b3 | ||
|
|
f83c38f5c1 | ||
|
|
f7076da700 | ||
|
|
9a713bc36f | ||
|
|
c6dcfcdf6d | ||
|
|
df038d8f05 | ||
|
|
0e5b17be85 | ||
|
|
4b3262622b | ||
|
|
3ef99863ee | ||
|
|
255d4fc04f | ||
|
|
742f385f23 | ||
|
|
0cc7ea9bc9 | ||
|
|
b39928188f | ||
|
|
946869ab23 | ||
|
|
5fa8338886 | ||
|
|
ec7b6b8d80 | ||
|
|
6f41530a03 | ||
|
|
1002acf907 | ||
|
|
d2355ea53b | ||
|
|
bee9bd7621 | ||
|
|
5a6d8ba010 | ||
|
|
8d24e35fa9 | ||
|
|
fc4d1151c7 | ||
|
|
c9e3e6902b | ||
|
|
2a3ecbac60 | ||
|
|
8e27c60120 | ||
|
|
11f3eece6d | ||
|
|
901182bcfc | ||
|
|
6a67d2dad2 | ||
|
|
1411fc11ee | ||
|
|
8e2e73fd81 | ||
|
|
4292129652 | ||
|
|
877705ca42 | ||
|
|
7bd3669220 | ||
|
|
9aa8f6bcf2 | ||
|
|
b2b08ab432 | ||
|
|
3bec1a6938 | ||
|
|
9bb88b43ca | ||
|
|
a2931d7a48 | ||
|
|
d241c31e3c | ||
|
|
b1e6901d61 | ||
|
|
f46d676130 | ||
|
|
9463c112df | ||
|
|
d44e7086d7 | ||
|
|
c05629b3de | ||
|
|
c64ee8a07c | ||
|
|
857f6a79ae | ||
|
|
744662d096 | ||
|
|
81e7dcf31e | ||
|
|
eca845fa81 | ||
|
|
3df63cff12 | ||
|
|
16b64f59b7 | ||
|
|
6f9f5102d0 | ||
|
|
b17e7d5746 | ||
|
|
95cd8427f4 | ||
|
|
03de39092a | ||
|
|
aa924cd09b | ||
|
|
beacb58eb1 | ||
|
|
70527d7329 | ||
|
|
f01b2fb4d9 | ||
|
|
655344c09c | ||
|
|
d2d1bb4def | ||
|
|
b5016d6f43 | ||
|
|
7583d297ad | ||
|
|
146ddd5669 | ||
|
|
ffd720c323 | ||
|
|
8cad21819c | ||
|
|
016e66846b | ||
|
|
6cf6903d32 | ||
|
|
ea490b9a2b | ||
|
|
7ec76bc0b5 | ||
|
|
4b2518ca9a | ||
|
|
1b668966ce | ||
|
|
c4f0c7940f | ||
|
|
f9eedfbf64 | ||
|
|
05a3d7a3bc | ||
|
|
cbd16e6d6b | ||
|
|
7d41fb970e | ||
|
|
3251d8ffe6 | ||
|
|
6eb92f55df | ||
|
|
c654301f22 | ||
|
|
55feb95d41 | ||
|
|
9e29e35e16 | ||
|
|
8db3bb3dc1 | ||
|
|
7f5225cb70 | ||
|
|
dc2f4e06c8 | ||
|
|
61ccbb0d3e | ||
|
|
4176af337f | ||
|
|
cc68942ec1 | ||
|
|
52ae6e7f0b | ||
|
|
ba59ab40ab | ||
|
|
0fb5fd271a | ||
|
|
8e3f85c475 | ||
|
|
b1bd630a37 | ||
|
|
4b829c358b | ||
|
|
02ab71ff26 | ||
|
|
ac962ea6db | ||
|
|
7de5fee3ad | ||
|
|
e70c49fea2 | ||
|
|
c640aa9213 | ||
|
|
9d5f57d798 | ||
|
|
063eef3eaf | ||
|
|
b5d1e00930 | ||
|
|
f53c977069 | ||
|
|
051db30dfb | ||
|
|
4f64dd30f9 | ||
|
|
904c4d04bb | ||
|
|
f49d48fbd2 | ||
|
|
e4170d65b7 | ||
|
|
b7eeb60e9c | ||
|
|
7fc7e631f8 | ||
|
|
0e95aaeaad | ||
|
|
dcbc60e1f0 | ||
|
|
03f29c51cf | ||
|
|
5ff3e4d1d1 | ||
|
|
8b1d4a7824 | ||
|
|
079783e3a1 | ||
|
|
409bf907d9 | ||
|
|
4a2ada56e5 | ||
|
|
de7b638e6c | ||
|
|
a6a71c59f4 | ||
|
|
e57ff54caa | ||
|
|
1430ac2675 | ||
|
|
eb07c0b4cf | ||
|
|
be6760e427 | ||
|
|
f8f858216f | ||
|
|
037a069ddd | ||
|
|
dc15470e0b | ||
|
|
895eae1d54 | ||
|
|
ad48ec4cfd | ||
|
|
a6693616a0 | ||
|
|
ea6462daf4 | ||
|
|
67d7f8d867 | ||
|
|
ee86b58ab9 | ||
|
|
5099ef15b4 | ||
|
|
c675e85d69 | ||
|
|
afced5014c | ||
|
|
cabdb7f032 | ||
|
|
8fcf885de3 | ||
|
|
2d6c037f39 | ||
|
|
d9919efb4c | ||
|
|
1302d8abef | ||
|
|
c6586ed811 | ||
|
|
eb0977b700 | ||
|
|
b7c866f5e4 | ||
|
|
7c385f72a1 | ||
|
|
9af8638589 | ||
|
|
16ec84efe4 | ||
|
|
c0c1aff577 | ||
|
|
45a5eee18a | ||
|
|
d0b0fb3cb3 | ||
|
|
f71d6883d9 | ||
|
|
43849d2b8e | ||
|
|
2d8d4d5c99 | ||
|
|
2cb1bd162f | ||
|
|
f996df087e | ||
|
|
c647bfa490 | ||
|
|
a925301caf | ||
|
|
202b1784a1 | ||
|
|
87fd56859d | ||
|
|
73aca23615 | ||
|
|
44b9b8787a | ||
|
|
171874d288 | ||
|
|
329270e444 | ||
|
|
4d3a698a12 | ||
|
|
df9c9d8dff | ||
|
|
0e42df2da7 | ||
|
|
3afcfccba8 | ||
|
|
2f562040ac | ||
|
|
6ced926db0 | ||
|
|
ad0000886d | ||
|
|
3c74b9bf10 | ||
|
|
05eb06d91b | ||
|
|
3602c95341 | ||
|
|
b2f6758a9c | ||
|
|
e843c6108d | ||
|
|
2724ac9e07 | ||
|
|
201cf880f9 | ||
|
|
0bf4bf36f0 | ||
|
|
3bef9a67b8 | ||
|
|
3ba3d72d6b | ||
|
|
44639ee50e | ||
|
|
0f037d0e6c | ||
|
|
1e12e1e318 | ||
|
|
bb050eebed | ||
|
|
9f26c27b6d | ||
|
|
c801e24afc | ||
|
|
31442ecb3b | ||
|
|
24a06d2bf9 | ||
|
|
e61e210b41 | ||
|
|
255d0d9fb5 | ||
|
|
8c9ac923c6 | ||
|
|
85fc799d62 | ||
|
|
3d5b13c25e | ||
|
|
cccbf50a0e | ||
|
|
e50f65527d | ||
|
|
a356859e7e | ||
|
|
96d607d411 | ||
|
|
d6232e238a | ||
|
|
8d0c4d3cec | ||
|
|
e95f4c7aa5 | ||
|
|
d222d78c7b | ||
|
|
aaae4b343e | ||
|
|
7ccc0d9d7b | ||
|
|
02b01a8bc3 | ||
|
|
c0f5c5f24c | ||
|
|
c997d568f3 | ||
|
|
87bcaadf40 | ||
|
|
f4a6856e54 | ||
|
|
fa17681cf6 | ||
|
|
7c56052d58 | ||
|
|
2fc53045c7 | ||
|
|
2f1ce2bbf9 | ||
|
|
aa37f6f738 | ||
|
|
2ad652a071 | ||
|
|
ab8d7e8659 | ||
|
|
7cd79c440c | ||
|
|
a4b5950495 | ||
|
|
04095f7682 | ||
|
|
1c105b5c68 | ||
|
|
f4c17e578c | ||
|
|
993567ca56 | ||
|
|
e01d901399 | ||
|
|
a437c2cb02 | ||
|
|
b6612a83c3 | ||
|
|
151935dc67 | ||
|
|
ad69daf1a3 | ||
|
|
234f3d93cd | ||
|
|
77a7792afc | ||
|
|
e2fea0a5de | ||
|
|
fce37fa0e3 | ||
|
|
7ab3586e52 | ||
|
|
92a8a84ff2 | ||
|
|
156e5f6beb | ||
|
|
8e9a91abf8 | ||
|
|
32d2397e64 | ||
|
|
087a58965b | ||
|
|
b7a164afa8 | ||
|
|
b659b20d27 | ||
|
|
d77e43b57d | ||
|
|
bfd8ce475a | ||
|
|
967d35e4be | ||
|
|
0cad79ad18 | ||
|
|
cd8c9436c5 | ||
|
|
f93134d4f8 | ||
|
|
5743f54d69 | ||
|
|
1914d10836 | ||
|
|
6b579d53ec | ||
|
|
6031971028 | ||
|
|
c1d65ff144 | ||
|
|
7374819ade | ||
|
|
9d19b060a9 | ||
|
|
23d61e5e97 | ||
|
|
46742328b6 | ||
|
|
467c2e5def | ||
|
|
ffd9a6b484 | ||
|
|
8aca357de6 | ||
|
|
1a32663f85 | ||
|
|
4fecca032d | ||
|
|
2a9e678877 | ||
|
|
17136e03d2 | ||
|
|
94f2677752 | ||
|
|
eac69aef2b | ||
|
|
2dd2d06bca | ||
|
|
5a2e3ee8e3 | ||
|
|
8ac7d1fdb6 | ||
|
|
0bf8a69024 | ||
|
|
9bb8f335c9 | ||
|
|
8d313e014f | ||
|
|
a79d386eba | ||
|
|
48b137a7f5 | ||
|
|
b4a4dacebd | ||
|
|
efd8861d62 | ||
|
|
2f86f549f5 | ||
|
|
cc0c4cc308 | ||
|
|
e3d5ee8332 | ||
|
|
3c201cc36c | ||
|
|
f6983d6126 | ||
|
|
1c80705276 | ||
|
|
d3f205f634 | ||
|
|
b542b17d93 | ||
|
|
59c7623769 | ||
|
|
e2ab4c060f | ||
|
|
025b8bba76 | ||
|
|
5e7ff808d4 | ||
|
|
86fbf3fef6 | ||
|
|
24174632d4 | ||
|
|
1065768c77 | ||
|
|
ca784916bb | ||
|
|
fcda3f7bc5 | ||
|
|
fcda62862c | ||
|
|
881ffadb5c | ||
|
|
7b20ad7749 | ||
|
|
ddef626e66 | ||
|
|
50399935c9 | ||
|
|
57587f71ab | ||
|
|
b561d1fc17 | ||
|
|
ed90a69e2c | ||
|
|
3703c43d62 | ||
|
|
30c2e64d9e | ||
|
|
af12789762 | ||
|
|
9bf2b9f6e4 | ||
|
|
50edd82268 | ||
|
|
babf074448 | ||
|
|
9d3043ee39 | ||
|
|
33b73d7fbc | ||
|
|
af51c9cc33 | ||
|
|
f55da1e1d6 | ||
|
|
ba0d71bef1 | ||
|
|
add33f5f80 | ||
|
|
79189f243a | ||
|
|
81b42c8633 | ||
|
|
541a8d3a3e | ||
|
|
3cbba71e7e | ||
|
|
9617d4527d | ||
|
|
dc77c12e83 | ||
|
|
3ec78ba6c9 | ||
|
|
a21eaf9dba | ||
|
|
249564d6ea | ||
|
|
90a28c7b1e | ||
|
|
46da1df5ae | ||
|
|
fda6409600 | ||
|
|
d1be92ac80 | ||
|
|
b6cb1fb72d | ||
|
|
26b49f8d01 | ||
|
|
c87398a8c2 | ||
|
|
3940fd8eff | ||
|
|
aa4846bff6 | ||
|
|
3ec194093d | ||
|
|
f7442032b2 | ||
|
|
2c5e2609f3 | ||
|
|
ae1f5fa511 | ||
|
|
0c37256050 | ||
|
|
decb5046ea | ||
|
|
b2824a3547 | ||
|
|
74c965b55c | ||
|
|
83ba02d0fb | ||
|
|
63bd762f91 | ||
|
|
cc9641f8c1 | ||
|
|
c446bcf885 | ||
|
|
d9e711ab11 | ||
|
|
3260867918 | ||
|
|
d90011c002 | ||
|
|
2406c8374f | ||
|
|
3d4f241129 | ||
|
|
9766abf1c5 | ||
|
|
feefde400e | ||
|
|
15ae767a79 | ||
|
|
b293e8e9e1 | ||
|
|
bb0bf41cab | ||
|
|
b2c0597a7d | ||
|
|
3c3ad7b240 | ||
|
|
a7c21eff4b | ||
|
|
6b86ad9083 | ||
|
|
2b268435c4 | ||
|
|
07d48df314 | ||
|
|
a07b062752 | ||
|
|
dd1e53b498 | ||
|
|
2eb5e9b42d | ||
|
|
5e6e4356fc | ||
|
|
5bb88ca703 | ||
|
|
eb27c64c71 | ||
|
|
644e292fa7 | ||
|
|
021d1bc568 | ||
|
|
12a0357f6b | ||
|
|
55982d699b | ||
|
|
1948f23fb3 | ||
|
|
cb3cd3022d | ||
|
|
d2c0b13a02 | ||
|
|
cf6cae728a | ||
|
|
8b039287c8 | ||
|
|
75203c55f8 | ||
|
|
aeeb4880b1 | ||
|
|
d5d7679120 | ||
|
|
986c69abea | ||
|
|
a73c7092bb | ||
|
|
3ecae30b5c | ||
|
|
6dc53c3887 | ||
|
|
4fbede7445 | ||
|
|
c29cde6784 | ||
|
|
f610af36c6 | ||
|
|
d8090cfa0a | ||
|
|
b847bb1c72 | ||
|
|
4491175db4 | ||
|
|
d0fa228282 | ||
|
|
faacfb9578 | ||
|
|
26329de5a5 | ||
|
|
1eb8658922 | ||
|
|
8fa3f093a1 | ||
|
|
fbc327b960 | ||
|
|
52aa5b6764 | ||
|
|
4a5cb389b7 | ||
|
|
f2d67f4a5d | ||
|
|
3581ce7059 | ||
|
|
0a67b387c6 | ||
|
|
a352fc8595 | ||
|
|
a93dff2673 | ||
|
|
7856d27724 | ||
|
|
da3ffd8bd0 | ||
|
|
742179ee38 | ||
|
|
d5d9e51f66 | ||
|
|
19096f83db | ||
|
|
2f3fb54ebb | ||
|
|
e12f9d5a1c | ||
|
|
a45d86c2a4 | ||
|
|
be58b627b2 | ||
|
|
b008a525cb | ||
|
|
228440c03f | ||
|
|
3f5ac0cf56 | ||
|
|
1e8674b51c | ||
|
|
a8401f6923 | ||
|
|
dddd302980 | ||
|
|
5d44e1991f | ||
|
|
55f0966a9a | ||
|
|
7daa26ce81 | ||
|
|
8dca2b81e4 | ||
|
|
b14b3d1012 | ||
|
|
43c04a71a8 | ||
|
|
9313fb9021 | ||
|
|
380f0ac404 | ||
|
|
1b8c87215d | ||
|
|
65340e6e24 | ||
|
|
f96a14e7f4 | ||
|
|
ad83f5419a | ||
|
|
ba5abf4700 | ||
|
|
78f0cc9e8a | ||
|
|
d6bdb206c8 | ||
|
|
6539031d5a | ||
|
|
f35c233289 | ||
|
|
fbd72179a1 | ||
|
|
af5d9d804e | ||
|
|
8e8271cf54 | ||
|
|
b5b0214c3c | ||
|
|
4bb7a595e8 | ||
|
|
7d3eba1d8d | ||
|
|
f3b2d49880 | ||
|
|
bdff4e21a8 | ||
|
|
f1b45bed96 | ||
|
|
fe41c29b29 | ||
|
|
a06ca5d035 | ||
|
|
75a02a13d9 | ||
|
|
8fad33b125 | ||
|
|
bbc6e830e2 | ||
|
|
ef9d0421fa | ||
|
|
cc493866cd | ||
|
|
2ec4d1e426 | ||
|
|
de311b2f2d | ||
|
|
c2b78b175c | ||
|
|
ff6516d1d7 | ||
|
|
f435f23570 | ||
|
|
994e7d1910 | ||
|
|
389d4e3d9c | ||
|
|
43ffcb0802 | ||
|
|
5fda4a2f8b | ||
|
|
9807a7f038 | ||
|
|
57fa5f5bd9 | ||
|
|
1b11162132 | ||
|
|
75ab83da63 | ||
|
|
cc3b08fd1b | ||
|
|
eb9a852443 | ||
|
|
93d50e0f0c | ||
|
|
62df944c47 | ||
|
|
d07d1a78d3 | ||
|
|
1107967f76 | ||
|
|
5d9f29743c | ||
|
|
6807e3b6d5 | ||
|
|
1244eb9998 | ||
|
|
9ece2844f4 | ||
|
|
a646d930c8 | ||
|
|
6f8385143f | ||
|
|
1a29552bff | ||
|
|
190a531daa | ||
|
|
9722860417 | ||
|
|
7fff203360 | ||
|
|
bd3bd2e53b | ||
|
|
6abd96a398 | ||
|
|
eac8cfe63d | ||
|
|
928e80c9e9 | ||
|
|
4a69ab3bf8 | ||
|
|
91a18ec63c | ||
|
|
9e7879d43f | ||
|
|
99c08fd205 | ||
|
|
82af902cc8 | ||
|
|
d0c1df5f2a | ||
|
|
a5e6f26267 | ||
|
|
4730c71b16 | ||
|
|
9cbf20aa48 | ||
|
|
68bd61708e | ||
|
|
fa16c9e59d | ||
|
|
39162de2a8 | ||
|
|
4a3fc91c1e | ||
|
|
ab872b9a34 | ||
|
|
e086c433ff | ||
|
|
5d24bc7625 | ||
|
|
74db580671 | ||
|
|
6036ea60d1 | ||
|
|
f432a0b7c4 | ||
|
|
38176ae7e6 | ||
|
|
35ce54630e | ||
|
|
39f1dfc85e | ||
|
|
0edc63ca8b | ||
|
|
3351b06603 | ||
|
|
5302032b63 | ||
|
|
6bf21c4157 | ||
|
|
a28b179a45 | ||
|
|
7d6141abb7 | ||
|
|
e3203ebaa5 | ||
|
|
ecda9cc746 | ||
|
|
ab4e415aec | ||
|
|
369dca79ef | ||
|
|
8dc2aee4b4 | ||
|
|
78ac2c1f1f | ||
|
|
2dfe2de0fe | ||
|
|
aef4aa6859 | ||
|
|
2ec503d6eb | ||
|
|
f5f9fc1955 | ||
|
|
10383de959 | ||
|
|
c0c6ce2987 | ||
|
|
7fc311bb3e | ||
|
|
5979b93a7a | ||
|
|
ac6b7ff14e | ||
|
|
80ab4d8ff5 | ||
|
|
bf6974dabb | ||
|
|
28a55386b6 | ||
|
|
1fce0b45f4 | ||
|
|
18e6a784e8 | ||
|
|
511ba5231a | ||
|
|
0edfe86d8b | ||
|
|
762d56204f | ||
|
|
a884b6c782 | ||
|
|
1a5710f140 | ||
|
|
af9f019d37 | ||
|
|
cbf6c617de | ||
|
|
921ab6e391 | ||
|
|
e74d4fe9b7 | ||
|
|
7c8051feab | ||
|
|
7b896ae5d0 | ||
|
|
b3ffc092cd | ||
|
|
bd5af5f148 | ||
|
|
4a7bded98d | ||
|
|
5261cdf4a1 | ||
|
|
05d07c23ab | ||
|
|
10bc446255 | ||
|
|
ac7ce7c481 | ||
|
|
4daa73d487 | ||
|
|
3cfbedcb69 | ||
|
|
18b6febe86 | ||
|
|
465264e07d | ||
|
|
3526def0a0 | ||
|
|
05cf6af546 | ||
|
|
9fe9bed1c9 | ||
|
|
6ddc242746 | ||
|
|
5081b06016 | ||
|
|
fe78fa9850 | ||
|
|
11b2ac9923 | ||
|
|
73682b95f5 | ||
|
|
705f561bdb | ||
|
|
84b89f4689 | ||
|
|
bc5ca4d8ae | ||
|
|
53ca7c19cd | ||
|
|
b3a07941bb | ||
|
|
62842c8ac1 | ||
|
|
d30af70351 | ||
|
|
52746faabf | ||
|
|
f7f32fe206 | ||
|
|
aa8e2c7173 | ||
|
|
facbf61133 | ||
|
|
1ade520ac0 | ||
|
|
5466f930be | ||
|
|
505c424cf4 | ||
|
|
62a941f233 | ||
|
|
97c9aca245 | ||
|
|
49eae5b6f8 | ||
|
|
7a48fdc5f6 | ||
|
|
faba29a42b | ||
|
|
403cf02c65 | ||
|
|
59d43683dc | ||
|
|
50b58a314e | ||
|
|
a3b5247de9 | ||
|
|
5143c71589 | ||
|
|
31bf8e48bb | ||
|
|
61de28f741 | ||
|
|
c8caa8f4ac | ||
|
|
c196075cb8 | ||
|
|
dfbd1f8772 | ||
|
|
b5670fdc5f | ||
|
|
cdb96bff47 | ||
|
|
35361f4edc | ||
|
|
9bba91628a | ||
|
|
b073b05aa0 | ||
|
|
e6f28b06b5 | ||
|
|
4e75a35468 | ||
|
|
e827559340 | ||
|
|
6bcac44050 | ||
|
|
ee43fd7579 | ||
|
|
b809768934 | ||
|
|
1739ae44f0 | ||
|
|
f72f3f3797 | ||
|
|
18f26a0c04 | ||
|
|
873558a392 | ||
|
|
c88afb71c9 | ||
|
|
27ab4ba339 | ||
|
|
8a9202135b | ||
|
|
7711167052 | ||
|
|
e7635b40d5 | ||
|
|
f5cbb5de49 | ||
|
|
2cd1ae73ed | ||
|
|
061489ec9a | ||
|
|
df55f62ad8 | ||
|
|
7d2b7bb3c1 | ||
|
|
c4e2892100 | ||
|
|
b9f8daf848 | ||
|
|
a34ee5bb92 |
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "jedi/third_party/typeshed"]
|
||||
path = jedi/third_party/typeshed
|
||||
url = https://github.com/davidhalter/typeshed.git
|
||||
54
.travis.yml
54
.travis.yml
@@ -1,50 +1,64 @@
|
||||
dist: xenial
|
||||
language: python
|
||||
sudo: true
|
||||
python:
|
||||
- 2.7
|
||||
- 3.4
|
||||
- 3.5
|
||||
- 3.6
|
||||
- 3.7
|
||||
|
||||
env:
|
||||
- JEDI_TEST_ENVIRONMENT=27
|
||||
- JEDI_TEST_ENVIRONMENT=34
|
||||
- JEDI_TEST_ENVIRONMENT=35
|
||||
- JEDI_TEST_ENVIRONMENT=36
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
# Required to properly create a virtual environment with system Python 3.4.
|
||||
- python3.4-venv
|
||||
- JEDI_TEST_ENVIRONMENT=37
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- python: pypy
|
||||
- env: TOXENV=sith
|
||||
- python: 3.7-dev
|
||||
- python: 3.8-dev
|
||||
include:
|
||||
- python: 3.6
|
||||
env:
|
||||
- TOXENV=cov
|
||||
- JEDI_TEST_ENVIRONMENT=36
|
||||
- python: 3.6
|
||||
env: TOXENV=sith
|
||||
# For now ignore pypy, there are so many issues that we don't really need
|
||||
# to run it.
|
||||
#- python: pypy
|
||||
- python: "3.7-dev"
|
||||
before_install:
|
||||
- ./travis_install.sh
|
||||
# Need to add the path to the Python versions in the end. This might add
|
||||
# something twice, but it doesn't really matter, because they are appended.
|
||||
- export PATH=$PATH:/opt/python/3.5/bin
|
||||
# 3.6 was not installed manually, but already is on the system. However
|
||||
# it's not on path (unless 3.6 is selected).
|
||||
- export PATH=$PATH:/opt/python/3.6/bin
|
||||
- python: 3.8-dev
|
||||
env:
|
||||
- JEDI_TEST_ENVIRONMENT=38
|
||||
install:
|
||||
- pip install --quiet tox-travis
|
||||
script:
|
||||
- |
|
||||
# Setup/install Python for $JEDI_TEST_ENVIRONMENT.
|
||||
set -ex
|
||||
test_env_version=${JEDI_TEST_ENVIRONMENT:0:1}.${JEDI_TEST_ENVIRONMENT:1:1}
|
||||
if [ "$TRAVIS_PYTHON_VERSION" != "$test_env_version" ]; then
|
||||
python_bin=python$test_env_version
|
||||
python_path="$(which $python_bin || true)"
|
||||
if [ -z "$python_path" ]; then
|
||||
# Only required for JEDI_TEST_ENVIRONMENT=34.
|
||||
download_name=python-$test_env_version
|
||||
wget https://s3.amazonaws.com/travis-python-archives/binaries/ubuntu/16.04/x86_64/$download_name.tar.bz2
|
||||
sudo tar xjf $download_name.tar.bz2 --directory / opt/python
|
||||
ln -s "/opt/python/${test_env_version}/bin/python" /home/travis/bin/$python_bin
|
||||
elif [ "${python_path#/opt/pyenv/shims}" != "$python_path" ]; then
|
||||
# Activate pyenv version (required with JEDI_TEST_ENVIRONMENT=36).
|
||||
pyenv_bin="$(pyenv whence --path "$python_bin" | head -n1)"
|
||||
ln -s "$pyenv_bin" /home/travis/bin/$python_bin
|
||||
fi
|
||||
$python_bin --version
|
||||
python_ver=$($python_bin -c 'import sys; print("%d%d" % sys.version_info[0:2])')
|
||||
if [ "$JEDI_TEST_ENVIRONMENT" != "$python_ver" ]; then
|
||||
echo "Unexpected Python version for $JEDI_TEST_ENVIRONMENT: $python_ver"
|
||||
set +ex
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
set +ex
|
||||
- tox
|
||||
after_script:
|
||||
- |
|
||||
|
||||
@@ -51,5 +51,6 @@ Maksim Novikov (@m-novikov) <mnovikov.work@gmail.com>
|
||||
Tobias Rzepka (@TobiasRzepka)
|
||||
micbou (@micbou)
|
||||
Dima Gerasimov (@karlicoss) <karlicoss@gmail.com>
|
||||
Max Woerner Chase (@mwchase) <max.chase@gmail.com>
|
||||
|
||||
Note: (@user) means a github user name.
|
||||
|
||||
@@ -3,6 +3,25 @@
|
||||
Changelog
|
||||
---------
|
||||
|
||||
0.14.1 (2019-07-13)
|
||||
+++++++++++++++++++
|
||||
|
||||
- CallSignature.index should now be working a lot better
|
||||
- A couple of smaller bugfixes
|
||||
|
||||
0.14.0 (2019-06-20)
|
||||
+++++++++++++++++++
|
||||
|
||||
- Added ``goto_*(prefer_stubs=True)`` as well as ``goto_*(prefer_stubs=True)``
|
||||
- Stubs are used now for type inference
|
||||
- Typeshed is used for better type inference
|
||||
- Reworked Definition.full_name, should have more correct return values
|
||||
|
||||
0.13.3 (2019-02-24)
|
||||
+++++++++++++++++++
|
||||
|
||||
- Fixed an issue with embedded Python, see https://github.com/davidhalter/jedi-vim/issues/870
|
||||
|
||||
0.13.2 (2018-12-15)
|
||||
+++++++++++++++++++
|
||||
|
||||
|
||||
@@ -8,8 +8,10 @@ include conftest.py
|
||||
include pytest.ini
|
||||
include tox.ini
|
||||
include requirements.txt
|
||||
include jedi/evaluate/compiled/fake/*.pym
|
||||
include jedi/parser/python/grammar*.txt
|
||||
recursive-include jedi/third_party *.pyi
|
||||
include jedi/third_party/typeshed/LICENSE
|
||||
include jedi/third_party/typeshed/README
|
||||
recursive-include test *
|
||||
recursive-include docs *
|
||||
recursive-exclude * *.pyc
|
||||
|
||||
27
README.rst
27
README.rst
@@ -27,20 +27,18 @@ Jedi - an awesome autocompletion/static analysis library for Python
|
||||
<https://stackoverflow.com/questions/tagged/python-jedi>`_ *with the label* ``python-jedi``.
|
||||
|
||||
|
||||
Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
|
||||
historic focus is autocompletion, but does static analysis for now as well.
|
||||
Jedi is fast and is very well tested. It understands Python on a deeper level
|
||||
than all other static analysis frameworks for Python.
|
||||
Jedi is a static analysis tool for Python that can be used in IDEs/editors.
|
||||
Jedi has a focus on autocompletion and goto functionality. Jedi is fast and is
|
||||
very well tested. It understands Python and stubs on a deep level.
|
||||
|
||||
Jedi has support for two different goto functions. It's possible to search for
|
||||
related names and to list all names in a Python file and infer them. Jedi
|
||||
understands docstrings and you can use Jedi autocompletion in your REPL as
|
||||
well.
|
||||
Jedi has support for different goto functions. It's possible to search for
|
||||
usages and list names in a Python file to get information about them.
|
||||
|
||||
Jedi uses a very simple API to connect with IDEs. There's a reference
|
||||
Jedi uses a very simple API to connect with IDE's. There's a reference
|
||||
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
|
||||
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
|
||||
It's really easy.
|
||||
Autocompletion in your REPL is also possible, IPython uses it natively and for
|
||||
the CPython REPL you have to install it.
|
||||
|
||||
Jedi can currently be used with the following editors/projects:
|
||||
|
||||
@@ -148,17 +146,12 @@ This means that in Python you can enable tab completion in a `REPL
|
||||
<https://jedi.readthedocs.org/en/latest/docs/usage.html#tab-completion-in-the-python-shell>`_.
|
||||
|
||||
|
||||
Static Analysis / Linter
|
||||
Static Analysis
|
||||
------------------------
|
||||
|
||||
To do all forms of static analysis, please try to use ``jedi.names``. It will
|
||||
return a list of names that you can use to infer types and so on.
|
||||
|
||||
Linting is another thing that is going to be part of Jedi. For now you can try
|
||||
an alpha version ``python -m jedi linter``. The API might change though and
|
||||
it's still buggy. It's Jedi's goal to be smarter than classic linter and
|
||||
understand ``AttributeError`` and other code issues.
|
||||
|
||||
|
||||
Refactoring
|
||||
-----------
|
||||
@@ -210,7 +203,7 @@ Acknowledgements
|
||||
|
||||
|
||||
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
|
||||
.. _youcompleteme: https://valloric.github.io/YouCompleteMe/
|
||||
.. _youcompleteme: https://github.com/ycm-core/YouCompleteMe
|
||||
.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
|
||||
.. _completor.vim: https://github.com/maralla/completor.vim
|
||||
.. _Jedi.el: https://github.com/tkf/emacs-jedi
|
||||
|
||||
29
appveyor.yml
29
appveyor.yml
@@ -12,6 +12,9 @@ environment:
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
|
||||
- TOXENV: py34
|
||||
PYTHON_PATH: C:\Python34
|
||||
@@ -25,6 +28,9 @@ environment:
|
||||
- TOXENV: py34
|
||||
PYTHON_PATH: C:\Python34
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py34
|
||||
PYTHON_PATH: C:\Python34
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
@@ -38,6 +44,9 @@ environment:
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
@@ -51,7 +60,27 @@ environment:
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 34
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
install:
|
||||
- git submodule update --init --recursive
|
||||
- set PATH=%PYTHON_PATH%;%PYTHON_PATH%\Scripts;%PATH%
|
||||
- pip install tox
|
||||
build_script:
|
||||
|
||||
23
conftest.py
23
conftest.py
@@ -22,7 +22,7 @@ collect_ignore = [
|
||||
# to modify `jedi.settings.cache_directory` because `clean_jedi_cache`
|
||||
# has no effect during doctests. Without these hooks, doctests uses
|
||||
# user's cache (e.g., ~/.cache/jedi/). We should remove this
|
||||
# workaround once the problem is fixed in py.test.
|
||||
# workaround once the problem is fixed in pytest.
|
||||
#
|
||||
# See:
|
||||
# - https://github.com/davidhalter/jedi/pull/168
|
||||
@@ -105,6 +105,11 @@ def Script(environment):
|
||||
return partial(jedi.Script, environment=environment)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def names(environment):
|
||||
return partial(jedi.names, environment=environment)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def has_typing(environment):
|
||||
if environment.version_info >= (3, 5, 0):
|
||||
@@ -119,3 +124,19 @@ def has_typing(environment):
|
||||
@pytest.fixture(scope='session')
|
||||
def jedi_path():
|
||||
return os.path.dirname(__file__)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def skip_python2(environment):
|
||||
if environment.version_info.major == 2:
|
||||
# This if is just needed to avoid that tests ever skip way more than
|
||||
# they should for all Python versions.
|
||||
pytest.skip()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def skip_pre_python38(environment):
|
||||
if environment.version_info < (3, 8):
|
||||
# This if is just needed to avoid that tests ever skip way more than
|
||||
# they should for all Python versions.
|
||||
pytest.skip()
|
||||
|
||||
@@ -21,6 +21,7 @@ rm -rf $PROJECT_NAME
|
||||
git clone .. $PROJECT_NAME
|
||||
cd $PROJECT_NAME
|
||||
git checkout $BRANCH
|
||||
git submodule update --init
|
||||
|
||||
# Test first.
|
||||
tox
|
||||
|
||||
@@ -8,6 +8,8 @@ Jedi Development
|
||||
.. note:: This documentation is for Jedi developers who want to improve Jedi
|
||||
itself, but have no idea how Jedi works. If you want to use Jedi for
|
||||
your IDE, look at the `plugin api <api.html>`_.
|
||||
It is also important to note that it's a pretty old version and some things
|
||||
might not apply anymore.
|
||||
|
||||
|
||||
Introduction
|
||||
|
||||
@@ -8,9 +8,6 @@ Jedi obviously supports autocompletion. It's also possible to get it working in
|
||||
|
||||
Static analysis is also possible by using the command ``jedi.names``.
|
||||
|
||||
The Jedi Linter is currently in an alpha version and can be tested by calling
|
||||
``python -m jedi linter``.
|
||||
|
||||
Jedi would in theory support refactoring, but we have never publicized it,
|
||||
because it's not production ready. If you're interested in helping out here,
|
||||
let me know. With the latest parser changes, it should be very easy to actually
|
||||
@@ -26,6 +23,7 @@ General Features
|
||||
- Great Virtualenv support
|
||||
- Can infer function arguments from sphinx, epydoc and basic numpydoc docstrings,
|
||||
and PEP0484-style type hints (:ref:`type hinting <type-hinting>`)
|
||||
- Stub files
|
||||
|
||||
|
||||
Supported Python Features
|
||||
|
||||
@@ -11,8 +11,16 @@ jedi-vim_ does by default), or you can install it systemwide.
|
||||
editor, refer to the corresponding documentation.
|
||||
|
||||
|
||||
The preferred way
|
||||
-----------------
|
||||
The normal way
|
||||
--------------
|
||||
|
||||
Most people use Jedi with a :ref:`editor plugins<editor-plugins>`. Typically
|
||||
you install Jedi by installing an editor plugin. No necessary steps are needed.
|
||||
Just take a look at the instructions for the plugin.
|
||||
|
||||
|
||||
With pip
|
||||
--------
|
||||
|
||||
On any system you can install |jedi| directly from the Python package index
|
||||
using pip::
|
||||
@@ -57,19 +65,15 @@ Others
|
||||
We are in the discussion of adding |jedi| to the Fedora repositories.
|
||||
|
||||
|
||||
Manual installation from a downloaded package
|
||||
Manual installation from GitHub
|
||||
---------------------------------------------
|
||||
|
||||
If you prefer not to use an automated package installer, you can `download
|
||||
<https://github.com/davidhalter/jedi/archive/master.zip>`__ a current copy of
|
||||
|jedi| and install it manually.
|
||||
|
||||
To install it, navigate to the directory containing `setup.py` on your console
|
||||
and type::
|
||||
If you prefer not to use an automated package installer, you can clone the source from GitHub and install it manually. To install it, run these commands::
|
||||
|
||||
git clone --recurse-submodules https://github.com/davidhalter/jedi
|
||||
cd jedi
|
||||
sudo python setup.py install
|
||||
|
||||
|
||||
Inclusion as a submodule
|
||||
------------------------
|
||||
|
||||
|
||||
@@ -1,42 +1,39 @@
|
||||
"""
|
||||
Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
|
||||
historic focus is autocompletion, but does static analysis for now as well.
|
||||
Jedi is fast and is very well tested. It understands Python on a deeper level
|
||||
than all other static analysis frameworks for Python.
|
||||
Jedi is a static analysis tool for Python that can be used in IDEs/editors.
|
||||
Jedi has a focus on autocompletion and goto functionality. Jedi is fast and is
|
||||
very well tested. It understands Python and stubs on a deep level.
|
||||
|
||||
Jedi has support for two different goto functions. It's possible to search for
|
||||
related names and to list all names in a Python file and infer them. Jedi
|
||||
understands docstrings and you can use Jedi autocompletion in your REPL as
|
||||
well.
|
||||
Jedi has support for different goto functions. It's possible to search for
|
||||
usages and list names in a Python file to get information about them.
|
||||
|
||||
Jedi uses a very simple API to connect with IDE's. There's a reference
|
||||
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
|
||||
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
|
||||
It's really easy.
|
||||
Autocompletion in your REPL is also possible, IPython uses it natively and for
|
||||
the CPython REPL you have to install it.
|
||||
|
||||
To give you a simple example how you can use the Jedi library, here is an
|
||||
example for the autocompletion feature:
|
||||
Here's a simple example of the autocompletion feature:
|
||||
|
||||
>>> import jedi
|
||||
>>> source = '''
|
||||
... import datetime
|
||||
... datetime.da'''
|
||||
>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
|
||||
... import json
|
||||
... json.lo'''
|
||||
>>> script = jedi.Script(source, 3, len('json.lo'), 'example.py')
|
||||
>>> script
|
||||
<Script: 'example.py' ...>
|
||||
>>> completions = script.completions()
|
||||
>>> completions #doctest: +ELLIPSIS
|
||||
[<Completion: date>, <Completion: datetime>, ...]
|
||||
>>> completions
|
||||
[<Completion: load>, <Completion: loads>]
|
||||
>>> print(completions[0].complete)
|
||||
te
|
||||
ad
|
||||
>>> print(completions[0].name)
|
||||
date
|
||||
load
|
||||
|
||||
As you see Jedi is pretty simple and allows you to concentrate on writing a
|
||||
good text editor, while still having very good IDE features for Python.
|
||||
"""
|
||||
|
||||
__version__ = '0.13.2'
|
||||
__version__ = '0.14.1'
|
||||
|
||||
from jedi.api import Script, Interpreter, set_debug_function, \
|
||||
preload_module, names
|
||||
|
||||
@@ -2,7 +2,10 @@
|
||||
To ensure compatibility from Python ``2.7`` - ``3.x``, a module has been
|
||||
created. Clearly there is huge need to use conforming syntax.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
import atexit
|
||||
import errno
|
||||
import functools
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
@@ -10,10 +13,14 @@ import pkgutil
|
||||
import warnings
|
||||
import inspect
|
||||
import subprocess
|
||||
import weakref
|
||||
try:
|
||||
import importlib
|
||||
except ImportError:
|
||||
pass
|
||||
from zipimport import zipimporter
|
||||
|
||||
from jedi.file_io import KnownContentFileIO, ZipFileIO
|
||||
|
||||
is_py3 = sys.version_info[0] >= 3
|
||||
is_py35 = is_py3 and sys.version_info[1] >= 5
|
||||
@@ -55,7 +62,7 @@ def find_module_py34(string, path=None, full_name=None, is_global_search=True):
|
||||
# This is a namespace package.
|
||||
full_name = string if not path else full_name
|
||||
implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
|
||||
return None, implicit_ns_info, False
|
||||
return implicit_ns_info, True
|
||||
break
|
||||
|
||||
return find_module_py33(string, path, loader)
|
||||
@@ -81,47 +88,81 @@ def find_module_py33(string, path=None, loader=None, full_name=None, is_global_s
|
||||
if loader is None:
|
||||
raise ImportError("Couldn't find a loader for {}".format(string))
|
||||
|
||||
return _from_loader(loader, string)
|
||||
|
||||
|
||||
def _from_loader(loader, string):
|
||||
is_package = loader.is_package(string)
|
||||
try:
|
||||
is_package = loader.is_package(string)
|
||||
if is_package:
|
||||
if hasattr(loader, 'path'):
|
||||
module_path = os.path.dirname(loader.path)
|
||||
else:
|
||||
# At least zipimporter does not have path attribute
|
||||
module_path = os.path.dirname(loader.get_filename(string))
|
||||
if hasattr(loader, 'archive'):
|
||||
module_file = DummyFile(loader, string)
|
||||
else:
|
||||
module_file = None
|
||||
else:
|
||||
module_path = loader.get_filename(string)
|
||||
module_file = DummyFile(loader, string)
|
||||
get_filename = loader.get_filename
|
||||
except AttributeError:
|
||||
# ExtensionLoader has not attribute get_filename, instead it has a
|
||||
# path attribute that we can use to retrieve the module path
|
||||
try:
|
||||
module_path = loader.path
|
||||
module_file = DummyFile(loader, string)
|
||||
except AttributeError:
|
||||
module_path = string
|
||||
module_file = None
|
||||
finally:
|
||||
is_package = False
|
||||
return None, is_package
|
||||
else:
|
||||
module_path = cast_path(get_filename(string))
|
||||
|
||||
if hasattr(loader, 'archive'):
|
||||
module_path = loader.archive
|
||||
# To avoid unicode and read bytes, "overwrite" loader.get_source if
|
||||
# possible.
|
||||
f = type(loader).get_source
|
||||
if is_py3 and f is not importlib.machinery.SourceFileLoader.get_source:
|
||||
# Unfortunately we are reading unicode here, not bytes.
|
||||
# It seems hard to get bytes, because the zip importer
|
||||
# logic just unpacks the zip file and returns a file descriptor
|
||||
# that we cannot as easily access. Therefore we just read it as
|
||||
# a string in the cases where get_source was overwritten.
|
||||
code = loader.get_source(string)
|
||||
else:
|
||||
code = _get_source(loader, string)
|
||||
|
||||
return module_file, module_path, is_package
|
||||
if code is None:
|
||||
return None, is_package
|
||||
if isinstance(loader, zipimporter):
|
||||
return ZipFileIO(module_path, code, cast_path(loader.archive)), is_package
|
||||
|
||||
return KnownContentFileIO(module_path, code), is_package
|
||||
|
||||
|
||||
def find_module_pre_py34(string, path=None, full_name=None, is_global_search=True):
|
||||
def _get_source(loader, fullname):
|
||||
"""
|
||||
This method is here as a replacement for SourceLoader.get_source. That
|
||||
method returns unicode, but we prefer bytes.
|
||||
"""
|
||||
path = loader.get_filename(fullname)
|
||||
try:
|
||||
return loader.get_data(path)
|
||||
except OSError:
|
||||
raise ImportError('source not available through get_data()',
|
||||
name=fullname)
|
||||
|
||||
|
||||
def find_module_pre_py3(string, path=None, full_name=None, is_global_search=True):
|
||||
# This import is here, because in other places it will raise a
|
||||
# DeprecationWarning.
|
||||
import imp
|
||||
try:
|
||||
module_file, module_path, description = imp.find_module(string, path)
|
||||
module_type = description[2]
|
||||
return module_file, module_path, module_type is imp.PKG_DIRECTORY
|
||||
is_package = module_type is imp.PKG_DIRECTORY
|
||||
if is_package:
|
||||
# In Python 2 directory package imports are returned as folder
|
||||
# paths, not __init__.py paths.
|
||||
p = os.path.join(module_path, '__init__.py')
|
||||
try:
|
||||
module_file = open(p)
|
||||
module_path = p
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
elif module_type != imp.PY_SOURCE:
|
||||
if module_file is not None:
|
||||
module_file.close()
|
||||
module_file = None
|
||||
|
||||
if module_file is None:
|
||||
code = None
|
||||
return None, is_package
|
||||
|
||||
with module_file:
|
||||
code = module_file.read()
|
||||
return KnownContentFileIO(cast_path(module_path), code), is_package
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
@@ -130,26 +171,13 @@ def find_module_pre_py34(string, path=None, full_name=None, is_global_search=Tru
|
||||
for item in path:
|
||||
loader = pkgutil.get_importer(item)
|
||||
if loader:
|
||||
try:
|
||||
loader = loader.find_module(string)
|
||||
if loader:
|
||||
is_package = loader.is_package(string)
|
||||
is_archive = hasattr(loader, 'archive')
|
||||
module_path = loader.get_filename(string)
|
||||
if is_package:
|
||||
module_path = os.path.dirname(module_path)
|
||||
if is_archive:
|
||||
module_path = loader.archive
|
||||
file = None
|
||||
if not is_package or is_archive:
|
||||
file = DummyFile(loader, string)
|
||||
return file, module_path, is_package
|
||||
except ImportError:
|
||||
pass
|
||||
loader = loader.find_module(string)
|
||||
if loader is not None:
|
||||
return _from_loader(loader, string)
|
||||
raise ImportError("No module named {}".format(string))
|
||||
|
||||
|
||||
find_module = find_module_py34 if is_py3 else find_module_pre_py34
|
||||
find_module = find_module_py34 if is_py3 else find_module_pre_py3
|
||||
find_module.__doc__ = """
|
||||
Provides information about a module.
|
||||
|
||||
@@ -263,17 +291,6 @@ Usage::
|
||||
"""
|
||||
|
||||
|
||||
class Python3Method(object):
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
|
||||
def __get__(self, obj, objtype):
|
||||
if obj is None:
|
||||
return lambda *args, **kwargs: self.func(*args, **kwargs)
|
||||
else:
|
||||
return lambda *args, **kwargs: self.func(obj, *args, **kwargs)
|
||||
|
||||
|
||||
def use_metaclass(meta, *bases):
|
||||
""" Create a class with a metaclass. """
|
||||
if not bases:
|
||||
@@ -344,9 +361,14 @@ except NameError:
|
||||
FileNotFoundError = IOError
|
||||
|
||||
try:
|
||||
NotADirectoryError = NotADirectoryError
|
||||
IsADirectoryError = IsADirectoryError
|
||||
except NameError:
|
||||
NotADirectoryError = IOError
|
||||
IsADirectoryError = IOError
|
||||
|
||||
try:
|
||||
PermissionError = PermissionError
|
||||
except NameError:
|
||||
PermissionError = IOError
|
||||
|
||||
|
||||
def no_unicode_pprint(dct):
|
||||
@@ -361,14 +383,6 @@ def no_unicode_pprint(dct):
|
||||
print(re.sub("u'", "'", s))
|
||||
|
||||
|
||||
def print_to_stderr(*args):
|
||||
if is_py3:
|
||||
eval("print(*args, file=sys.stderr)")
|
||||
else:
|
||||
print >> sys.stderr, args
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def utf8_repr(func):
|
||||
"""
|
||||
``__repr__`` methods in Python 2 don't allow unicode objects to be
|
||||
@@ -471,8 +485,24 @@ def pickle_load(file):
|
||||
raise
|
||||
|
||||
|
||||
def _python2_dct_keys_to_unicode(data):
|
||||
"""
|
||||
Python 2 stores object __dict__ entries as bytes, not unicode, correct it
|
||||
here. Python 2 can deal with both, Python 3 expects unicode.
|
||||
"""
|
||||
if isinstance(data, tuple):
|
||||
return tuple(_python2_dct_keys_to_unicode(x) for x in data)
|
||||
elif isinstance(data, list):
|
||||
return list(_python2_dct_keys_to_unicode(x) for x in data)
|
||||
elif hasattr(data, '__dict__') and type(data.__dict__) == dict:
|
||||
data.__dict__ = {unicode(k): v for k, v in data.__dict__.items()}
|
||||
return data
|
||||
|
||||
|
||||
def pickle_dump(data, file, protocol):
|
||||
try:
|
||||
if not is_py3:
|
||||
data = _python2_dct_keys_to_unicode(data)
|
||||
pickle.dump(data, file, protocol)
|
||||
# On Python 3.3 flush throws sometimes an error even though the writing
|
||||
# operation should be completed.
|
||||
@@ -587,3 +617,49 @@ def which(cmd, mode=os.F_OK | os.X_OK, path=None):
|
||||
if _access_check(name, mode):
|
||||
return name
|
||||
return None
|
||||
|
||||
|
||||
if not is_py3:
|
||||
# Simplified backport of Python 3 weakref.finalize:
|
||||
# https://github.com/python/cpython/blob/ded4737989316653469763230036b04513cb62b3/Lib/weakref.py#L502-L662
|
||||
class finalize(object):
|
||||
"""Class for finalization of weakrefable objects.
|
||||
|
||||
finalize(obj, func, *args, **kwargs) returns a callable finalizer
|
||||
object which will be called when obj is garbage collected. The
|
||||
first time the finalizer is called it evaluates func(*arg, **kwargs)
|
||||
and returns the result. After this the finalizer is dead, and
|
||||
calling it just returns None.
|
||||
|
||||
When the program exits any remaining finalizers will be run.
|
||||
"""
|
||||
|
||||
# Finalizer objects don't have any state of their own.
|
||||
# This ensures that they cannot be part of a ref-cycle.
|
||||
__slots__ = ()
|
||||
_registry = {}
|
||||
|
||||
def __init__(self, obj, func, *args, **kwargs):
|
||||
info = functools.partial(func, *args, **kwargs)
|
||||
info.weakref = weakref.ref(obj, self)
|
||||
self._registry[self] = info
|
||||
|
||||
def __call__(self):
|
||||
"""Return func(*args, **kwargs) if alive."""
|
||||
info = self._registry.pop(self, None)
|
||||
if info:
|
||||
return info()
|
||||
|
||||
@classmethod
|
||||
def _exitfunc(cls):
|
||||
if not cls._registry:
|
||||
return
|
||||
for finalizer in list(cls._registry):
|
||||
try:
|
||||
finalizer()
|
||||
except Exception:
|
||||
sys.excepthook(*sys.exc_info())
|
||||
assert finalizer not in cls._registry
|
||||
|
||||
atexit.register(finalize._exitfunc)
|
||||
weakref.finalize = finalize
|
||||
|
||||
@@ -16,11 +16,12 @@ import warnings
|
||||
import parso
|
||||
from parso.python import tree
|
||||
|
||||
from jedi._compatibility import force_unicode, is_py3
|
||||
from jedi._compatibility import force_unicode, cast_path, is_py3
|
||||
from jedi.parser_utils import get_executable_nodes
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi import cache
|
||||
from jedi.file_io import KnownContentFileIO
|
||||
from jedi.api import classes
|
||||
from jedi.api import interpreter
|
||||
from jedi.api import helpers
|
||||
@@ -32,11 +33,14 @@ from jedi.evaluate import imports
|
||||
from jedi.evaluate import usages
|
||||
from jedi.evaluate.arguments import try_iter_content
|
||||
from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf
|
||||
from jedi.evaluate.sys_path import dotted_path_in_sys_path
|
||||
from jedi.evaluate.filters import TreeNameDefinition, ParamName
|
||||
from jedi.evaluate.sys_path import transform_path_to_dotted
|
||||
from jedi.evaluate.names import TreeNameDefinition, ParamName
|
||||
from jedi.evaluate.syntax_tree import tree_name_to_contexts
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
from jedi.evaluate.base_context import ContextSet
|
||||
from jedi.evaluate.context.iterable import unpack_tuple_to_dict
|
||||
from jedi.evaluate.gradual.conversion import convert_names, convert_contexts
|
||||
from jedi.evaluate.gradual.utils import load_proper_stub_module
|
||||
|
||||
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
|
||||
# can remove some "maximum recursion depth" errors.
|
||||
@@ -78,10 +82,11 @@ class Script(object):
|
||||
:param sys_path: ``sys.path`` to use during analysis of the script
|
||||
:type sys_path: list
|
||||
:param environment: TODO
|
||||
:type sys_path: Environment
|
||||
:type environment: Environment
|
||||
"""
|
||||
def __init__(self, source=None, line=None, column=None, path=None,
|
||||
encoding='utf-8', sys_path=None, environment=None):
|
||||
encoding='utf-8', sys_path=None, environment=None,
|
||||
_project=None):
|
||||
self._orig_path = path
|
||||
# An empty path (also empty string) should always result in no path.
|
||||
self.path = os.path.abspath(path) if path else None
|
||||
@@ -97,22 +102,24 @@ class Script(object):
|
||||
if sys_path is not None and not is_py3:
|
||||
sys_path = list(map(force_unicode, sys_path))
|
||||
|
||||
# Load the Python grammar of the current interpreter.
|
||||
project = get_default_project(
|
||||
os.path.dirname(self.path)if path else os.getcwd()
|
||||
)
|
||||
project = _project
|
||||
if project is None:
|
||||
# Load the Python grammar of the current interpreter.
|
||||
project = get_default_project(
|
||||
os.path.dirname(self.path)if path else os.getcwd()
|
||||
)
|
||||
# TODO deprecate and remove sys_path from the Script API.
|
||||
if sys_path is not None:
|
||||
project._sys_path = sys_path
|
||||
self._evaluator = Evaluator(
|
||||
project, environment=environment, script_path=self.path
|
||||
)
|
||||
self._project = project
|
||||
debug.speed('init')
|
||||
self._module_node, source = self._evaluator.parse_and_get_code(
|
||||
code=source,
|
||||
path=self.path,
|
||||
encoding=encoding,
|
||||
use_latest_grammar=path and path.endswith('.pyi'),
|
||||
cache=False, # No disk cache, because the current script often changes.
|
||||
diff_cache=settings.fast_parser,
|
||||
cache_path=settings.cache_directory,
|
||||
@@ -142,18 +149,48 @@ class Script(object):
|
||||
cache.clear_time_caches()
|
||||
debug.reset_time()
|
||||
|
||||
# Cache the module, this is mostly useful for testing, since this shouldn't
|
||||
# be called multiple times.
|
||||
@cache.memoize_method
|
||||
def _get_module(self):
|
||||
name = '__main__'
|
||||
names = None
|
||||
is_package = False
|
||||
if self.path is not None:
|
||||
import_names = dotted_path_in_sys_path(self._evaluator.get_sys_path(), self.path)
|
||||
import_names, is_p = transform_path_to_dotted(
|
||||
self._evaluator.get_sys_path(add_parent_paths=False),
|
||||
self.path
|
||||
)
|
||||
if import_names is not None:
|
||||
name = '.'.join(import_names)
|
||||
names = import_names
|
||||
is_package = is_p
|
||||
|
||||
if self.path is None:
|
||||
file_io = None
|
||||
else:
|
||||
file_io = KnownContentFileIO(cast_path(self.path), self._code)
|
||||
if self.path is not None and self.path.endswith('.pyi'):
|
||||
# We are in a stub file. Try to load the stub properly.
|
||||
stub_module = load_proper_stub_module(
|
||||
self._evaluator,
|
||||
file_io,
|
||||
names,
|
||||
self._module_node
|
||||
)
|
||||
if stub_module is not None:
|
||||
return stub_module
|
||||
|
||||
if names is None:
|
||||
names = ('__main__',)
|
||||
|
||||
module = ModuleContext(
|
||||
self._evaluator, self._module_node, self.path,
|
||||
code_lines=self._code_lines
|
||||
self._evaluator, self._module_node, file_io,
|
||||
string_names=names,
|
||||
code_lines=self._code_lines,
|
||||
is_package=is_package,
|
||||
)
|
||||
imports.add_module_to_cache(self._evaluator, name, module)
|
||||
if names[0] not in ('builtins', '__builtin__', 'typing'):
|
||||
# These modules are essential for Jedi, so don't overwrite them.
|
||||
self._evaluator.module_cache.add(names, ContextSet([module]))
|
||||
return module
|
||||
|
||||
def __repr__(self):
|
||||
@@ -171,34 +208,14 @@ class Script(object):
|
||||
:return: Completion objects, sorted by name and __ comes last.
|
||||
:rtype: list of :class:`classes.Completion`
|
||||
"""
|
||||
debug.speed('completions start')
|
||||
completion = Completion(
|
||||
self._evaluator, self._get_module(), self._code_lines,
|
||||
self._pos, self.call_signatures
|
||||
)
|
||||
completions = completion.completions()
|
||||
with debug.increase_indent_cm('completions'):
|
||||
completion = Completion(
|
||||
self._evaluator, self._get_module(), self._code_lines,
|
||||
self._pos, self.call_signatures
|
||||
)
|
||||
return completion.completions()
|
||||
|
||||
def iter_import_completions():
|
||||
for c in completions:
|
||||
tree_name = c._name.tree_name
|
||||
if tree_name is None:
|
||||
continue
|
||||
definition = tree_name.get_definition()
|
||||
if definition is not None \
|
||||
and definition.type in ('import_name', 'import_from'):
|
||||
yield c
|
||||
|
||||
if len(list(iter_import_completions())) > 10:
|
||||
# For now disable completions if there's a lot of imports that
|
||||
# might potentially be resolved. This is the case for tensorflow
|
||||
# and has been fixed for it. This is obviously temporary until we
|
||||
# have a better solution.
|
||||
self._evaluator.infer_enabled = False
|
||||
|
||||
debug.speed('completions end')
|
||||
return completions
|
||||
|
||||
def goto_definitions(self):
|
||||
def goto_definitions(self, **kwargs):
|
||||
"""
|
||||
Return the definitions of a the path under the cursor. goto function!
|
||||
This follows complicated paths and returns the end, not the first
|
||||
@@ -208,8 +225,15 @@ class Script(object):
|
||||
because Python itself is a dynamic language, which means depending on
|
||||
an option you can have two different versions of a function.
|
||||
|
||||
:param only_stubs: Only return stubs for this goto call.
|
||||
:param prefer_stubs: Prefer stubs to Python objects for this type
|
||||
inference call.
|
||||
:rtype: list of :class:`classes.Definition`
|
||||
"""
|
||||
with debug.increase_indent_cm('goto_definitions'):
|
||||
return self._goto_definitions(**kwargs)
|
||||
|
||||
def _goto_definitions(self, only_stubs=False, prefer_stubs=False):
|
||||
leaf = self._module_node.get_name_of_position(self._pos)
|
||||
if leaf is None:
|
||||
leaf = self._module_node.get_leaf_for_position(self._pos)
|
||||
@@ -217,27 +241,42 @@ class Script(object):
|
||||
return []
|
||||
|
||||
context = self._evaluator.create_context(self._get_module(), leaf)
|
||||
definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf)
|
||||
|
||||
names = [s.name for s in definitions]
|
||||
defs = [classes.Definition(self._evaluator, name) for name in names]
|
||||
contexts = helpers.evaluate_goto_definition(self._evaluator, context, leaf)
|
||||
contexts = convert_contexts(
|
||||
contexts,
|
||||
only_stubs=only_stubs,
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
|
||||
defs = [classes.Definition(self._evaluator, c.name) for c in contexts]
|
||||
# The additional set here allows the definitions to become unique in an
|
||||
# API sense. In the internals we want to separate more things than in
|
||||
# the API.
|
||||
return helpers.sorted_definitions(set(defs))
|
||||
|
||||
def goto_assignments(self, follow_imports=False, follow_builtin_imports=False):
|
||||
def goto_assignments(self, follow_imports=False, follow_builtin_imports=False, **kwargs):
|
||||
"""
|
||||
Return the first definition found, while optionally following imports.
|
||||
Multiple objects may be returned, because Python itself is a
|
||||
dynamic language, which means depending on an option you can have two
|
||||
different versions of a function.
|
||||
|
||||
.. note:: It is deprecated to use follow_imports and follow_builtin_imports as
|
||||
positional arguments. Will be a keyword argument in 0.16.0.
|
||||
|
||||
:param follow_imports: The goto call will follow imports.
|
||||
:param follow_builtin_imports: If follow_imports is True will decide if
|
||||
it follow builtin imports.
|
||||
:param only_stubs: Only return stubs for this goto call.
|
||||
:param prefer_stubs: Prefer stubs to Python objects for this goto call.
|
||||
:rtype: list of :class:`classes.Definition`
|
||||
"""
|
||||
with debug.increase_indent_cm('goto_assignments'):
|
||||
return self._goto_assignments(follow_imports, follow_builtin_imports, **kwargs)
|
||||
|
||||
def _goto_assignments(self, follow_imports, follow_builtin_imports,
|
||||
only_stubs=False, prefer_stubs=False):
|
||||
def filter_follow_imports(names, check):
|
||||
for name in names:
|
||||
if check(name):
|
||||
@@ -248,7 +287,7 @@ class Script(object):
|
||||
if new_name.start_pos is None:
|
||||
found_builtin = True
|
||||
|
||||
if found_builtin and not isinstance(name, imports.SubModuleName):
|
||||
if found_builtin:
|
||||
yield name
|
||||
else:
|
||||
for new_name in new_names:
|
||||
@@ -258,18 +297,19 @@ class Script(object):
|
||||
|
||||
tree_name = self._module_node.get_name_of_position(self._pos)
|
||||
if tree_name is None:
|
||||
return []
|
||||
# Without a name we really just want to jump to the result e.g.
|
||||
# executed by `foo()`, if we the cursor is after `)`.
|
||||
return self.goto_definitions(only_stubs=only_stubs, prefer_stubs=prefer_stubs)
|
||||
context = self._evaluator.create_context(self._get_module(), tree_name)
|
||||
names = list(self._evaluator.goto(context, tree_name))
|
||||
|
||||
if follow_imports:
|
||||
def check(name):
|
||||
return name.is_import()
|
||||
else:
|
||||
def check(name):
|
||||
return isinstance(name, imports.SubModuleName)
|
||||
|
||||
names = filter_follow_imports(names, check)
|
||||
names = filter_follow_imports(names, lambda name: name.is_import())
|
||||
names = convert_names(
|
||||
names,
|
||||
only_stubs=only_stubs,
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
|
||||
defs = [classes.Definition(self._evaluator, d) for d in set(names)]
|
||||
return helpers.sorted_definitions(defs)
|
||||
@@ -325,29 +365,27 @@ class Script(object):
|
||||
|
||||
:rtype: list of :class:`classes.CallSignature`
|
||||
"""
|
||||
call_signature_details = \
|
||||
helpers.get_call_signature_details(self._module_node, self._pos)
|
||||
if call_signature_details is None:
|
||||
call_details = helpers.get_call_signature_details(self._module_node, self._pos)
|
||||
if call_details is None:
|
||||
return []
|
||||
|
||||
context = self._evaluator.create_context(
|
||||
self._get_module(),
|
||||
call_signature_details.bracket_leaf
|
||||
call_details.bracket_leaf
|
||||
)
|
||||
definitions = helpers.cache_call_signatures(
|
||||
self._evaluator,
|
||||
context,
|
||||
call_signature_details.bracket_leaf,
|
||||
call_details.bracket_leaf,
|
||||
self._code_lines,
|
||||
self._pos
|
||||
)
|
||||
debug.speed('func_call followed')
|
||||
|
||||
return [classes.CallSignature(self._evaluator, d.name,
|
||||
call_signature_details.bracket_leaf.start_pos,
|
||||
call_signature_details.call_index,
|
||||
call_signature_details.keyword_name_str)
|
||||
for d in definitions if hasattr(d, 'py__call__')]
|
||||
# TODO here we use stubs instead of the actual contexts. We should use
|
||||
# the signatures from stubs, but the actual contexts, probably?!
|
||||
return [classes.CallSignature(self._evaluator, signature, call_details)
|
||||
for signature in definitions.get_signatures()]
|
||||
|
||||
def _analysis(self):
|
||||
self._evaluator.is_analysis = True
|
||||
@@ -434,7 +472,7 @@ class Interpreter(Script):
|
||||
self._evaluator,
|
||||
self._module_node,
|
||||
self.namespaces,
|
||||
path=self.path,
|
||||
file_io=KnownContentFileIO(self.path, self._code),
|
||||
code_lines=self._code_lines,
|
||||
)
|
||||
|
||||
@@ -464,9 +502,8 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False,
|
||||
cls = ParamName
|
||||
else:
|
||||
cls = TreeNameDefinition
|
||||
is_module = name.parent.type == 'file_input'
|
||||
return cls(
|
||||
module_context.create_context(name if is_module else name.parent),
|
||||
module_context.create_context(name),
|
||||
name
|
||||
)
|
||||
|
||||
|
||||
@@ -4,17 +4,21 @@ These classes are the much bigger part of the whole API, because they contain
|
||||
the interesting information about completion and goto operations.
|
||||
"""
|
||||
import re
|
||||
import warnings
|
||||
|
||||
from parso.python.tree import search_ancestor
|
||||
|
||||
from jedi import settings
|
||||
from jedi.evaluate.utils import ignored, unite
|
||||
from jedi import debug
|
||||
from jedi.evaluate.utils import unite
|
||||
from jedi.cache import memoize_method
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate.imports import ImportName
|
||||
from jedi.evaluate.context import instance
|
||||
from jedi.evaluate.context import ClassContext, FunctionExecutionContext
|
||||
from jedi.evaluate.context import FunctionExecutionContext
|
||||
from jedi.evaluate.gradual.typeshed import StubModuleContext
|
||||
from jedi.evaluate.gradual.conversion import convert_names, convert_contexts
|
||||
from jedi.evaluate.base_context import ContextSet
|
||||
from jedi.api.keywords import KeywordName
|
||||
|
||||
|
||||
@@ -45,9 +49,10 @@ class BaseDefinition(object):
|
||||
'posix': 'os',
|
||||
'_io': 'io',
|
||||
'_functools': 'functools',
|
||||
'_collections': 'collections',
|
||||
'_socket': 'socket',
|
||||
'_sqlite3': 'sqlite3',
|
||||
'__builtin__': '',
|
||||
'builtins': '',
|
||||
'__builtin__': 'builtins',
|
||||
}
|
||||
|
||||
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
|
||||
@@ -58,17 +63,27 @@ class BaseDefinition(object):
|
||||
self._evaluator = evaluator
|
||||
self._name = name
|
||||
"""
|
||||
An instance of :class:`parso.reprsentation.Name` subclass.
|
||||
An instance of :class:`parso.python.tree.Name` subclass.
|
||||
"""
|
||||
self.is_keyword = isinstance(self._name, KeywordName)
|
||||
|
||||
# generate a path to the definition
|
||||
self._module = name.get_root_context()
|
||||
if self.in_builtin_module():
|
||||
self.module_path = None
|
||||
else:
|
||||
self.module_path = self._module.py__file__()
|
||||
"""Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
|
||||
@memoize_method
|
||||
def _get_module(self):
|
||||
# This can take a while to complete, because in the worst case of
|
||||
# imports (consider `import a` completions), we need to load all
|
||||
# modules starting with a first.
|
||||
return self._name.get_root_context()
|
||||
|
||||
@property
|
||||
def module_path(self):
|
||||
"""Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
|
||||
module = self._get_module()
|
||||
if module.is_stub() or not module.is_compiled():
|
||||
# Compiled modules should not return a module path even if they
|
||||
# have one.
|
||||
return self._get_module().py__file__()
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -91,6 +106,7 @@ class BaseDefinition(object):
|
||||
to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of
|
||||
definition for ``sys``, ``f``, ``C`` and ``x``.
|
||||
|
||||
>>> from jedi._compatibility import no_unicode_pprint
|
||||
>>> from jedi import Script
|
||||
>>> source = '''
|
||||
... import keyword
|
||||
@@ -116,9 +132,11 @@ class BaseDefinition(object):
|
||||
so that it is easy to relate the result to the source code.
|
||||
|
||||
>>> defs = sorted(defs, key=lambda d: d.line)
|
||||
>>> defs # doctest: +NORMALIZE_WHITESPACE
|
||||
[<Definition module keyword>, <Definition class C>,
|
||||
<Definition instance D>, <Definition def f>]
|
||||
>>> no_unicode_pprint(defs) # doctest: +NORMALIZE_WHITESPACE
|
||||
[<Definition full_name='keyword', description='module keyword'>,
|
||||
<Definition full_name='__main__.C', description='class C'>,
|
||||
<Definition full_name='__main__.D', description='instance D'>,
|
||||
<Definition full_name='__main__.f', description='def f'>]
|
||||
|
||||
Finally, here is what you can get from :attr:`type`:
|
||||
|
||||
@@ -147,46 +165,6 @@ class BaseDefinition(object):
|
||||
return context.api_type
|
||||
return self._name.api_type
|
||||
|
||||
def _path(self):
|
||||
"""The path to a module/class/function definition."""
|
||||
def to_reverse():
|
||||
name = self._name
|
||||
if name.api_type == 'module':
|
||||
try:
|
||||
name = list(name.infer())[0].name
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
if name.api_type in 'module':
|
||||
module_contexts = name.infer()
|
||||
if module_contexts:
|
||||
module_context, = module_contexts
|
||||
for n in reversed(module_context.py__name__().split('.')):
|
||||
yield n
|
||||
else:
|
||||
# We don't really know anything about the path here. This
|
||||
# module is just an import that would lead in an
|
||||
# ImportError. So simply return the name.
|
||||
yield name.string_name
|
||||
return
|
||||
else:
|
||||
yield name.string_name
|
||||
|
||||
parent_context = name.parent_context
|
||||
while parent_context is not None:
|
||||
try:
|
||||
method = parent_context.py__name__
|
||||
except AttributeError:
|
||||
try:
|
||||
yield parent_context.name.string_name
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
for name in reversed(method().split('.')):
|
||||
yield name
|
||||
parent_context = parent_context.parent_context
|
||||
return reversed(list(to_reverse()))
|
||||
|
||||
@property
|
||||
def module_name(self):
|
||||
"""
|
||||
@@ -196,14 +174,17 @@ class BaseDefinition(object):
|
||||
>>> source = 'import json'
|
||||
>>> script = Script(source, path='example.py')
|
||||
>>> d = script.goto_definitions()[0]
|
||||
>>> print(d.module_name) # doctest: +ELLIPSIS
|
||||
>>> print(d.module_name) # doctest: +ELLIPSIS
|
||||
json
|
||||
"""
|
||||
return self._module.name.string_name
|
||||
return self._get_module().name.string_name
|
||||
|
||||
def in_builtin_module(self):
|
||||
"""Whether this is a builtin module."""
|
||||
return isinstance(self._module, compiled.CompiledObject)
|
||||
if isinstance(self._get_module(), StubModuleContext):
|
||||
return any(isinstance(context, compiled.CompiledObject)
|
||||
for context in self._get_module().non_stub_context_set)
|
||||
return isinstance(self._get_module(), compiled.CompiledObject)
|
||||
|
||||
@property
|
||||
def line(self):
|
||||
@@ -283,71 +264,91 @@ class BaseDefinition(object):
|
||||
be ``<module 'posixpath' ...>```. However most users find the latter
|
||||
more practical.
|
||||
"""
|
||||
path = list(self._path())
|
||||
# TODO add further checks, the mapping should only occur on stdlib.
|
||||
if not path:
|
||||
return None # for keywords the path is empty
|
||||
if not self._name.is_context_name:
|
||||
return None
|
||||
|
||||
with ignored(KeyError):
|
||||
path[0] = self._mapping[path[0]]
|
||||
for key, repl in self._tuple_mapping.items():
|
||||
if tuple(path[:len(key)]) == key:
|
||||
path = [repl] + path[len(key):]
|
||||
names = self._name.get_qualified_names(include_module_names=True)
|
||||
if names is None:
|
||||
return names
|
||||
|
||||
return '.'.join(path if path[0] else path[1:])
|
||||
names = list(names)
|
||||
try:
|
||||
names[0] = self._mapping[names[0]]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def goto_assignments(self):
|
||||
if self._name.tree_name is None:
|
||||
return self
|
||||
return '.'.join(names)
|
||||
|
||||
names = self._evaluator.goto(self._name.parent_context, self._name.tree_name)
|
||||
return [Definition(self._evaluator, n) for n in names]
|
||||
def is_stub(self):
|
||||
if not self._name.is_context_name:
|
||||
return False
|
||||
|
||||
def _goto_definitions(self):
|
||||
# TODO make this function public.
|
||||
return [Definition(self._evaluator, d.name) for d in self._name.infer()]
|
||||
return self._name.get_root_context().is_stub()
|
||||
|
||||
def goto_assignments(self, **kwargs): # Python 2...
|
||||
with debug.increase_indent_cm('goto for %s' % self._name):
|
||||
return self._goto_assignments(**kwargs)
|
||||
|
||||
def _goto_assignments(self, only_stubs=False, prefer_stubs=False):
|
||||
assert not (only_stubs and prefer_stubs)
|
||||
|
||||
if not self._name.is_context_name:
|
||||
return []
|
||||
|
||||
names = convert_names(
|
||||
self._name.goto(),
|
||||
only_stubs=only_stubs,
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
return [self if n == self._name else Definition(self._evaluator, n)
|
||||
for n in names]
|
||||
|
||||
def infer(self, **kwargs): # Python 2...
|
||||
with debug.increase_indent_cm('infer for %s' % self._name):
|
||||
return self._infer(**kwargs)
|
||||
|
||||
def _infer(self, only_stubs=False, prefer_stubs=False):
|
||||
assert not (only_stubs and prefer_stubs)
|
||||
|
||||
if not self._name.is_context_name:
|
||||
return []
|
||||
|
||||
# First we need to make sure that we have stub names (if possible) that
|
||||
# we can follow. If we don't do that, we can end up with the inferred
|
||||
# results of Python objects instead of stubs.
|
||||
names = convert_names([self._name], prefer_stubs=True)
|
||||
contexts = convert_contexts(
|
||||
ContextSet.from_sets(n.infer() for n in names),
|
||||
only_stubs=only_stubs,
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
resulting_names = [c.name for c in contexts]
|
||||
return [self if n == self._name else Definition(self._evaluator, n)
|
||||
for n in resulting_names]
|
||||
|
||||
@property
|
||||
@memoize_method
|
||||
def params(self):
|
||||
"""
|
||||
Raises an ``AttributeError``if the definition is not callable.
|
||||
Raises an ``AttributeError`` if the definition is not callable.
|
||||
Otherwise returns a list of `Definition` that represents the params.
|
||||
"""
|
||||
def get_param_names(context):
|
||||
param_names = []
|
||||
if context.api_type == 'function':
|
||||
param_names = list(context.get_param_names())
|
||||
if isinstance(context, instance.BoundMethod):
|
||||
param_names = param_names[1:]
|
||||
elif isinstance(context, (instance.AbstractInstanceContext, ClassContext)):
|
||||
if isinstance(context, ClassContext):
|
||||
search = u'__init__'
|
||||
else:
|
||||
search = u'__call__'
|
||||
names = context.get_function_slot_names(search)
|
||||
if not names:
|
||||
return []
|
||||
# Only return the first one. There might be multiple one, especially
|
||||
# with overloading.
|
||||
for context in self._name.infer():
|
||||
for signature in context.get_signatures():
|
||||
return [Definition(self._evaluator, n) for n in signature.get_param_names()]
|
||||
|
||||
# Just take the first one here, not optimal, but currently
|
||||
# there's no better solution.
|
||||
inferred = names[0].infer()
|
||||
param_names = get_param_names(next(iter(inferred)))
|
||||
if isinstance(context, ClassContext):
|
||||
param_names = param_names[1:]
|
||||
return param_names
|
||||
elif isinstance(context, compiled.CompiledObject):
|
||||
return list(context.get_param_names())
|
||||
return param_names
|
||||
|
||||
followed = list(self._name.infer())
|
||||
if not followed or not hasattr(followed[0], 'py__call__'):
|
||||
raise AttributeError('There are no params defined on this.')
|
||||
context = followed[0] # only check the first one.
|
||||
|
||||
return [Definition(self._evaluator, n) for n in get_param_names(context)]
|
||||
if self.type == 'function' or self.type == 'class':
|
||||
# Fallback, if no signatures were defined (which is probably by
|
||||
# itself a bug).
|
||||
return []
|
||||
raise AttributeError('There are no params defined on this.')
|
||||
|
||||
def parent(self):
|
||||
if not self._name.is_context_name:
|
||||
return None
|
||||
|
||||
context = self._name.parent_context
|
||||
if context is None:
|
||||
return None
|
||||
@@ -357,7 +358,12 @@ class BaseDefinition(object):
|
||||
return Definition(self._evaluator, context.name)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s>" % (type(self).__name__, self.description)
|
||||
return "<%s %sname=%r, description=%r>" % (
|
||||
self.__class__.__name__,
|
||||
'full_' if self.full_name else '',
|
||||
self.full_name or self.name,
|
||||
self.description,
|
||||
)
|
||||
|
||||
def get_line_code(self, before=0, after=0):
|
||||
"""
|
||||
@@ -369,7 +375,7 @@ class BaseDefinition(object):
|
||||
:return str: Returns the line(s) of code or an empty string if it's a
|
||||
builtin.
|
||||
"""
|
||||
if self.in_builtin_module():
|
||||
if not self._name.is_context_name or self.in_builtin_module():
|
||||
return ''
|
||||
|
||||
lines = self._name.get_root_context().code_lines
|
||||
@@ -397,7 +403,7 @@ class Completion(BaseDefinition):
|
||||
def _complete(self, like_name):
|
||||
append = ''
|
||||
if settings.add_bracket_after_function \
|
||||
and self.type == 'Function':
|
||||
and self.type == 'function':
|
||||
append = '('
|
||||
|
||||
if self._name.api_type == 'param' and self._stack is not None:
|
||||
@@ -467,6 +473,8 @@ class Completion(BaseDefinition):
|
||||
@memoize_method
|
||||
def follow_definition(self):
|
||||
"""
|
||||
Deprecated!
|
||||
|
||||
Return the original definitions. I strongly recommend not using it for
|
||||
your completions, because it might slow down |jedi|. If you want to
|
||||
read only a few objects (<=20), it might be useful, especially to get
|
||||
@@ -474,8 +482,12 @@ class Completion(BaseDefinition):
|
||||
follows all results. This means with 1000 completions (e.g. numpy),
|
||||
it's just PITA-slow.
|
||||
"""
|
||||
defs = self._name.infer()
|
||||
return [Definition(self._evaluator, d.name) for d in defs]
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.14.0. Use .infer.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.infer()
|
||||
|
||||
|
||||
class Definition(BaseDefinition):
|
||||
@@ -494,6 +506,7 @@ class Definition(BaseDefinition):
|
||||
|
||||
Example:
|
||||
|
||||
>>> from jedi._compatibility import no_unicode_pprint
|
||||
>>> from jedi import Script
|
||||
>>> source = '''
|
||||
... def f():
|
||||
@@ -506,8 +519,9 @@ class Definition(BaseDefinition):
|
||||
>>> script = Script(source, column=3) # line is maximum by default
|
||||
>>> defs = script.goto_definitions()
|
||||
>>> defs = sorted(defs, key=lambda d: d.line)
|
||||
>>> defs
|
||||
[<Definition def f>, <Definition class C>]
|
||||
>>> no_unicode_pprint(defs) # doctest: +NORMALIZE_WHITESPACE
|
||||
[<Definition full_name='__main__.f', description='def f'>,
|
||||
<Definition full_name='__main__.C', description='class C'>]
|
||||
>>> str(defs[0].description) # strip literals in python2
|
||||
'def f'
|
||||
>>> str(defs[1].description)
|
||||
@@ -516,17 +530,13 @@ class Definition(BaseDefinition):
|
||||
"""
|
||||
typ = self.type
|
||||
tree_name = self._name.tree_name
|
||||
if typ == 'param':
|
||||
return typ + ' ' + self._name.to_string()
|
||||
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
|
||||
if typ == 'function':
|
||||
# For the description we want a short and a pythonic way.
|
||||
typ = 'def'
|
||||
return typ + ' ' + self._name.string_name
|
||||
elif typ == 'param':
|
||||
code = search_ancestor(tree_name, 'param').get_code(
|
||||
include_prefix=False,
|
||||
include_comma=False
|
||||
)
|
||||
return typ + ' ' + code
|
||||
|
||||
definition = tree_name.get_definition() or tree_name
|
||||
# Remove the prefix, because that's not what we want for get_code
|
||||
@@ -594,11 +604,10 @@ class CallSignature(Definition):
|
||||
It knows what functions you are currently in. e.g. `isinstance(` would
|
||||
return the `isinstance` function. without `(` it would return nothing.
|
||||
"""
|
||||
def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str):
|
||||
super(CallSignature, self).__init__(evaluator, executable_name)
|
||||
self._index = index
|
||||
self._key_name_str = key_name_str
|
||||
self._bracket_start_pos = bracket_start_pos
|
||||
def __init__(self, evaluator, signature, call_details):
|
||||
super(CallSignature, self).__init__(evaluator, signature.name)
|
||||
self._call_details = call_details
|
||||
self._signature = signature
|
||||
|
||||
@property
|
||||
def index(self):
|
||||
@@ -606,26 +615,11 @@ class CallSignature(Definition):
|
||||
The Param index of the current call.
|
||||
Returns None if the index cannot be found in the curent call.
|
||||
"""
|
||||
if self._key_name_str is not None:
|
||||
for i, param in enumerate(self.params):
|
||||
if self._key_name_str == param.name:
|
||||
return i
|
||||
if self.params:
|
||||
param_name = self.params[-1]._name
|
||||
if param_name.tree_name is not None:
|
||||
if param_name.tree_name.get_definition().star_count == 2:
|
||||
return i
|
||||
return None
|
||||
return self._call_details.calculate_index(self._signature.get_param_names())
|
||||
|
||||
if self._index >= len(self.params):
|
||||
for i, param in enumerate(self.params):
|
||||
tree_name = param._name.tree_name
|
||||
if tree_name is not None:
|
||||
# *args case
|
||||
if tree_name.get_definition().star_count == 1:
|
||||
return i
|
||||
return None
|
||||
return self._index
|
||||
@property
|
||||
def params(self):
|
||||
return [Definition(self._evaluator, n) for n in self._signature.get_param_names()]
|
||||
|
||||
@property
|
||||
def bracket_start(self):
|
||||
@@ -633,7 +627,7 @@ class CallSignature(Definition):
|
||||
The indent of the bracket that is responsible for the last function
|
||||
call.
|
||||
"""
|
||||
return self._bracket_start_pos
|
||||
return self._call_details.bracket_leaf.start_pos
|
||||
|
||||
@property
|
||||
def _params_str(self):
|
||||
@@ -644,11 +638,18 @@ class CallSignature(Definition):
|
||||
return '<%s: %s index=%r params=[%s]>' % (
|
||||
type(self).__name__,
|
||||
self._name.string_name,
|
||||
self._index,
|
||||
self.index,
|
||||
self._params_str,
|
||||
)
|
||||
|
||||
|
||||
def _format_signatures(context):
|
||||
return '\n'.join(
|
||||
signature.to_string()
|
||||
for signature in context.get_signatures()
|
||||
)
|
||||
|
||||
|
||||
class _Help(object):
|
||||
"""
|
||||
Temporary implementation, will be used as `Script.help() or something in
|
||||
@@ -673,9 +674,29 @@ class _Help(object):
|
||||
|
||||
See :attr:`doc` for example.
|
||||
"""
|
||||
# TODO: Use all of the followed objects as output. Possibly divinding
|
||||
# them by a few dashes.
|
||||
full_doc = ''
|
||||
# Using the first docstring that we see.
|
||||
for context in self._get_contexts(fast=fast):
|
||||
return context.py__doc__(include_call_signature=not raw)
|
||||
if full_doc:
|
||||
# In case we have multiple contexts, just return all of them
|
||||
# separated by a few dashes.
|
||||
full_doc += '\n' + '-' * 30 + '\n'
|
||||
|
||||
return ''
|
||||
doc = context.py__doc__()
|
||||
|
||||
signature_text = ''
|
||||
if self._name.is_context_name:
|
||||
if not raw:
|
||||
signature_text = _format_signatures(context)
|
||||
if not doc and context.is_stub():
|
||||
for c in convert_contexts(ContextSet({context}), ignore_compiled=False):
|
||||
doc = c.py__doc__()
|
||||
if doc:
|
||||
break
|
||||
|
||||
if signature_text and doc:
|
||||
full_doc += signature_text + '\n\n' + doc
|
||||
else:
|
||||
full_doc += signature_text + doc
|
||||
|
||||
return full_doc
|
||||
|
||||
@@ -9,8 +9,9 @@ from jedi.api import classes
|
||||
from jedi.api import helpers
|
||||
from jedi.evaluate import imports
|
||||
from jedi.api import keywords
|
||||
from jedi.evaluate.helpers import evaluate_call_of_leaf
|
||||
from jedi.evaluate.helpers import evaluate_call_of_leaf, parse_dotted_names
|
||||
from jedi.evaluate.filters import get_global_filters
|
||||
from jedi.evaluate.gradual.conversion import convert_contexts
|
||||
from jedi.parser_utils import get_statement_of_position
|
||||
|
||||
|
||||
@@ -170,7 +171,10 @@ class Completion:
|
||||
elif type_ == 'for_stmt':
|
||||
allowed_transitions.append('else')
|
||||
|
||||
completion_names = list(self._get_keyword_completion_names(allowed_transitions))
|
||||
completion_names = []
|
||||
current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
|
||||
if not current_line or current_line[-1] in ' \t.;':
|
||||
completion_names += self._get_keyword_completion_names(allowed_transitions)
|
||||
|
||||
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
|
||||
PythonTokenTypes.INDENT)):
|
||||
@@ -178,14 +182,19 @@ class Completion:
|
||||
|
||||
nonterminals = [stack_node.nonterminal for stack_node in stack]
|
||||
|
||||
nodes = [node for stack_node in stack for node in stack_node.nodes]
|
||||
nodes = []
|
||||
for stack_node in stack:
|
||||
if stack_node.dfa.from_rule == 'small_stmt':
|
||||
nodes = []
|
||||
else:
|
||||
nodes += stack_node.nodes
|
||||
|
||||
if nodes and nodes[-1] in ('as', 'def', 'class'):
|
||||
# No completions for ``with x as foo`` and ``import x as foo``.
|
||||
# Also true for defining names as a class or function.
|
||||
return list(self._get_class_context_completions(is_function=True))
|
||||
elif "import_stmt" in nonterminals:
|
||||
level, names = self._parse_dotted_names(nodes, "import_from" in nonterminals)
|
||||
level, names = parse_dotted_names(nodes, "import_from" in nonterminals)
|
||||
|
||||
only_modules = not ("import_from" in nonterminals and 'import' in nodes)
|
||||
completion_names += self._get_importer_names(
|
||||
@@ -233,32 +242,21 @@ class Completion:
|
||||
)
|
||||
contexts = evaluate_call_of_leaf(evaluation_context, previous_leaf)
|
||||
completion_names = []
|
||||
debug.dbg('trailer completion contexts: %s', contexts)
|
||||
debug.dbg('trailer completion contexts: %s', contexts, color='MAGENTA')
|
||||
for context in contexts:
|
||||
for filter in context.get_filters(
|
||||
search_global=False, origin_scope=user_context.tree_node):
|
||||
search_global=False,
|
||||
origin_scope=user_context.tree_node):
|
||||
completion_names += filter.values()
|
||||
return completion_names
|
||||
|
||||
def _parse_dotted_names(self, nodes, is_import_from):
|
||||
level = 0
|
||||
names = []
|
||||
for node in nodes[1:]:
|
||||
if node in ('.', '...'):
|
||||
if not names:
|
||||
level += len(node.value)
|
||||
elif node.type == 'dotted_name':
|
||||
names += node.children[::2]
|
||||
elif node.type == 'name':
|
||||
names.append(node)
|
||||
elif node == ',':
|
||||
if not is_import_from:
|
||||
names = []
|
||||
else:
|
||||
# Here if the keyword `import` comes along it stops checking
|
||||
# for names.
|
||||
break
|
||||
return level, names
|
||||
python_contexts = convert_contexts(contexts)
|
||||
for c in python_contexts:
|
||||
if c not in contexts:
|
||||
for filter in c.get_filters(
|
||||
search_global=False,
|
||||
origin_scope=user_context.tree_node):
|
||||
completion_names += filter.values()
|
||||
return completion_names
|
||||
|
||||
def _get_importer_names(self, names, level=0, only_modules=True):
|
||||
names = [n.value for n in names]
|
||||
@@ -288,5 +286,6 @@ class Completion:
|
||||
next(filters)
|
||||
for filter in filters:
|
||||
for name in filter.values():
|
||||
# TODO we should probably check here for properties
|
||||
if (name.api_type == 'function') == is_function:
|
||||
yield name
|
||||
|
||||
@@ -17,7 +17,7 @@ import parso
|
||||
|
||||
_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
|
||||
|
||||
_SUPPORTED_PYTHONS = ['3.7', '3.6', '3.5', '3.4', '3.3', '2.7']
|
||||
_SUPPORTED_PYTHONS = ['3.8', '3.7', '3.6', '3.5', '3.4', '2.7']
|
||||
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
|
||||
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
|
||||
|
||||
@@ -128,17 +128,18 @@ class Environment(_BaseEnvironment):
|
||||
return self._get_subprocess().get_sys_path()
|
||||
|
||||
|
||||
class SameEnvironment(Environment):
|
||||
class _SameEnvironmentMixin(object):
|
||||
def __init__(self):
|
||||
self._start_executable = self.executable = sys.executable
|
||||
self.path = sys.prefix
|
||||
self.version_info = _VersionInfo(*sys.version_info[:3])
|
||||
|
||||
|
||||
class InterpreterEnvironment(_BaseEnvironment):
|
||||
def __init__(self):
|
||||
self.version_info = _VersionInfo(*sys.version_info[:3])
|
||||
class SameEnvironment(_SameEnvironmentMixin, Environment):
|
||||
pass
|
||||
|
||||
|
||||
class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment):
|
||||
def get_evaluator_subprocess(self, evaluator):
|
||||
return EvaluatorSameProcess(evaluator)
|
||||
|
||||
@@ -153,9 +154,13 @@ def _get_virtual_env_from_var():
|
||||
variable is considered to be safe / controlled by the user solely.
|
||||
"""
|
||||
var = os.environ.get('VIRTUAL_ENV')
|
||||
if var is not None:
|
||||
if var == sys.prefix:
|
||||
return SameEnvironment()
|
||||
if var:
|
||||
# Under macOS in some cases - notably when using Pipenv - the
|
||||
# sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
|
||||
# /path/to/env so we need to fully resolve the paths in order to
|
||||
# compare them.
|
||||
if os.path.realpath(var) == os.path.realpath(sys.prefix):
|
||||
return _try_get_same_env()
|
||||
|
||||
try:
|
||||
return create_environment(var, safe=False)
|
||||
@@ -184,15 +189,58 @@ def get_default_environment():
|
||||
if virtual_env is not None:
|
||||
return virtual_env
|
||||
|
||||
# If no VirtualEnv is found, use the environment we're already
|
||||
return _try_get_same_env()
|
||||
|
||||
|
||||
def _try_get_same_env():
|
||||
env = SameEnvironment()
|
||||
if not os.path.basename(env.executable).lower().startswith('python'):
|
||||
# This tries to counter issues with embedding. In some cases (e.g.
|
||||
# VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This
|
||||
# happens, because for Mac a function called `_NSGetExecutablePath` is
|
||||
# used and for Windows `GetModuleFileNameW`. These are both platform
|
||||
# specific functions. For all other systems sys.executable should be
|
||||
# alright. However here we try to generalize:
|
||||
#
|
||||
# 1. Check if the executable looks like python (heuristic)
|
||||
# 2. In case it's not try to find the executable
|
||||
# 3. In case we don't find it use an interpreter environment.
|
||||
#
|
||||
# The last option will always work, but leads to potential crashes of
|
||||
# Jedi - which is ok, because it happens very rarely and even less,
|
||||
# because the code below should work for most cases.
|
||||
if os.name == 'nt':
|
||||
# The first case would be a virtualenv and the second a normal
|
||||
# Python installation.
|
||||
checks = (r'Scripts\python.exe', 'python.exe')
|
||||
else:
|
||||
# For unix it looks like Python is always in a bin folder.
|
||||
checks = (
|
||||
'bin/python%s.%s' % (sys.version_info[0], sys.version[1]),
|
||||
'bin/python%s' % (sys.version_info[0]),
|
||||
'bin/python',
|
||||
)
|
||||
for check in checks:
|
||||
guess = os.path.join(sys.exec_prefix, check)
|
||||
if os.path.isfile(guess):
|
||||
# Bingo - We think we have our Python.
|
||||
return Environment(guess)
|
||||
# It looks like there is no reasonable Python to be found.
|
||||
return InterpreterEnvironment()
|
||||
# If no virtualenv is found, use the environment we're already
|
||||
# using.
|
||||
return SameEnvironment()
|
||||
return env
|
||||
|
||||
|
||||
def get_cached_default_environment():
|
||||
var = os.environ.get('VIRTUAL_ENV')
|
||||
environment = _get_cached_default_environment()
|
||||
if var and var != environment.path:
|
||||
|
||||
# Under macOS in some cases - notably when using Pipenv - the
|
||||
# sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
|
||||
# /path/to/env so we need to fully resolve the paths in order to
|
||||
# compare them.
|
||||
if var and os.path.realpath(var) != os.path.realpath(environment.path):
|
||||
_get_cached_default_environment.clear_cache()
|
||||
return _get_cached_default_environment()
|
||||
return environment
|
||||
@@ -286,7 +334,10 @@ def get_system_environment(version):
|
||||
|
||||
if os.name == 'nt':
|
||||
for exe in _get_executables_from_windows_registry(version):
|
||||
return Environment(exe)
|
||||
try:
|
||||
return Environment(exe)
|
||||
except InvalidPythonEnvironment:
|
||||
pass
|
||||
raise InvalidPythonEnvironment("Cannot find executable python%s." % version)
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,8 @@ from textwrap import dedent
|
||||
from parso.python.parser import Parser
|
||||
from parso.python import tree
|
||||
|
||||
from jedi._compatibility import u
|
||||
from jedi._compatibility import u, Parameter
|
||||
from jedi.evaluate.base_context import NO_CONTEXTS
|
||||
from jedi.evaluate.syntax_tree import eval_atom
|
||||
from jedi.evaluate.helpers import evaluate_call_of_leaf
|
||||
from jedi.evaluate.compiled import get_string_context_set
|
||||
@@ -20,7 +21,7 @@ CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
|
||||
|
||||
def sorted_definitions(defs):
|
||||
# Note: `or ''` below is required because `module_path` could be
|
||||
return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0))
|
||||
return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0, x.name))
|
||||
|
||||
|
||||
def get_on_completion_name(module_node, lines, position):
|
||||
@@ -130,7 +131,10 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
|
||||
p.parse(tokens=tokenize_without_endmarker(code))
|
||||
except EndMarkerReached:
|
||||
return p.stack
|
||||
raise SystemError("This really shouldn't happen. There's a bug in Jedi.")
|
||||
raise SystemError(
|
||||
"This really shouldn't happen. There's a bug in Jedi:\n%s"
|
||||
% list(tokenize_without_endmarker(code))
|
||||
)
|
||||
|
||||
|
||||
def evaluate_goto_definition(evaluator, context, leaf):
|
||||
@@ -140,21 +144,154 @@ def evaluate_goto_definition(evaluator, context, leaf):
|
||||
return evaluator.goto_definitions(context, leaf)
|
||||
|
||||
parent = leaf.parent
|
||||
definitions = NO_CONTEXTS
|
||||
if parent.type == 'atom':
|
||||
return context.eval_node(leaf.parent)
|
||||
# e.g. `(a + b)`
|
||||
definitions = context.eval_node(leaf.parent)
|
||||
elif parent.type == 'trailer':
|
||||
return evaluate_call_of_leaf(context, leaf)
|
||||
# e.g. `a()`
|
||||
definitions = evaluate_call_of_leaf(context, leaf)
|
||||
elif isinstance(leaf, tree.Literal):
|
||||
# e.g. `"foo"` or `1.0`
|
||||
return eval_atom(context, leaf)
|
||||
elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
|
||||
return get_string_context_set(evaluator)
|
||||
return []
|
||||
return definitions
|
||||
|
||||
|
||||
CallSignatureDetails = namedtuple(
|
||||
'CallSignatureDetails',
|
||||
['bracket_leaf', 'call_index', 'keyword_name_str']
|
||||
)
|
||||
class CallDetails(object):
|
||||
def __init__(self, bracket_leaf, children, position):
|
||||
['bracket_leaf', 'call_index', 'keyword_name_str']
|
||||
self.bracket_leaf = bracket_leaf
|
||||
self._children = children
|
||||
self._position = position
|
||||
|
||||
@property
|
||||
def index(self):
|
||||
return _get_index_and_key(self._children, self._position)[0]
|
||||
|
||||
@property
|
||||
def keyword_name_str(self):
|
||||
return _get_index_and_key(self._children, self._position)[1]
|
||||
|
||||
def calculate_index(self, param_names):
|
||||
positional_count = 0
|
||||
used_names = set()
|
||||
star_count = -1
|
||||
args = list(_iter_arguments(self._children, self._position))
|
||||
if not args:
|
||||
if param_names:
|
||||
return 0
|
||||
else:
|
||||
return None
|
||||
|
||||
is_kwarg = False
|
||||
for i, (star_count, key_start, had_equal) in enumerate(args):
|
||||
is_kwarg |= had_equal | (star_count == 2)
|
||||
if star_count:
|
||||
pass # For now do nothing, we don't know what's in there here.
|
||||
else:
|
||||
if i + 1 != len(args): # Not last
|
||||
if had_equal:
|
||||
used_names.add(key_start)
|
||||
else:
|
||||
positional_count += 1
|
||||
|
||||
for i, param_name in enumerate(param_names):
|
||||
kind = param_name.get_kind()
|
||||
|
||||
if not is_kwarg:
|
||||
if kind == Parameter.VAR_POSITIONAL:
|
||||
return i
|
||||
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY):
|
||||
if i == positional_count:
|
||||
return i
|
||||
|
||||
if key_start is not None and not star_count == 1 or star_count == 2:
|
||||
if param_name.string_name not in used_names \
|
||||
and (kind == Parameter.KEYWORD_ONLY
|
||||
or kind == Parameter.POSITIONAL_OR_KEYWORD
|
||||
and positional_count <= i):
|
||||
if star_count:
|
||||
return i
|
||||
if had_equal:
|
||||
if param_name.string_name == key_start:
|
||||
return i
|
||||
else:
|
||||
if param_name.string_name.startswith(key_start):
|
||||
return i
|
||||
|
||||
if kind == Parameter.VAR_KEYWORD:
|
||||
return i
|
||||
return None
|
||||
|
||||
|
||||
def _iter_arguments(nodes, position):
|
||||
def remove_after_pos(name):
|
||||
if name.type != 'name':
|
||||
return None
|
||||
return name.value[:position[1] - name.start_pos[1]]
|
||||
|
||||
# Returns Generator[Tuple[star_count, Optional[key_start: str], had_equal]]
|
||||
nodes_before = [c for c in nodes if c.start_pos < position]
|
||||
if nodes_before[-1].type == 'arglist':
|
||||
for x in _iter_arguments(nodes_before[-1].children, position):
|
||||
yield x # Python 2 :(
|
||||
return
|
||||
|
||||
previous_node_yielded = False
|
||||
stars_seen = 0
|
||||
for i, node in enumerate(nodes_before):
|
||||
if node.type == 'argument':
|
||||
previous_node_yielded = True
|
||||
first = node.children[0]
|
||||
second = node.children[1]
|
||||
if second == '=':
|
||||
if second.start_pos < position:
|
||||
yield 0, first.value, True
|
||||
else:
|
||||
yield 0, remove_after_pos(first), False
|
||||
elif first in ('*', '**'):
|
||||
yield len(first.value), remove_after_pos(second), False
|
||||
else:
|
||||
# Must be a Comprehension
|
||||
first_leaf = node.get_first_leaf()
|
||||
if first_leaf.type == 'name' and first_leaf.start_pos >= position:
|
||||
yield 0, remove_after_pos(first_leaf), False
|
||||
else:
|
||||
yield 0, None, False
|
||||
stars_seen = 0
|
||||
elif node.type in ('testlist', 'testlist_star_expr'): # testlist is Python 2
|
||||
for n in node.children[::2]:
|
||||
if n.type == 'star_expr':
|
||||
stars_seen = 1
|
||||
n = n.children[1]
|
||||
yield stars_seen, remove_after_pos(n), False
|
||||
stars_seen = 0
|
||||
# The count of children is even if there's a comma at the end.
|
||||
previous_node_yielded = bool(len(node.children) % 2)
|
||||
elif isinstance(node, tree.PythonLeaf) and node.value == ',':
|
||||
if not previous_node_yielded:
|
||||
yield stars_seen, '', False
|
||||
stars_seen = 0
|
||||
previous_node_yielded = False
|
||||
elif isinstance(node, tree.PythonLeaf) and node.value in ('*', '**'):
|
||||
stars_seen = len(node.value)
|
||||
elif node == '=' and nodes_before[-1]:
|
||||
previous_node_yielded = True
|
||||
before = nodes_before[i - 1]
|
||||
if before.type == 'name':
|
||||
yield 0, before.value, True
|
||||
else:
|
||||
yield 0, None, False
|
||||
# Just ignore the star that is probably a syntax error.
|
||||
stars_seen = 0
|
||||
|
||||
if not previous_node_yielded:
|
||||
if nodes_before[-1].type == 'name':
|
||||
yield stars_seen, remove_after_pos(nodes_before[-1]), False
|
||||
else:
|
||||
yield stars_seen, '', False
|
||||
|
||||
|
||||
def _get_index_and_key(nodes, position):
|
||||
@@ -163,23 +300,22 @@ def _get_index_and_key(nodes, position):
|
||||
"""
|
||||
nodes_before = [c for c in nodes if c.start_pos < position]
|
||||
if nodes_before[-1].type == 'arglist':
|
||||
nodes_before = [c for c in nodes_before[-1].children if c.start_pos < position]
|
||||
return _get_index_and_key(nodes_before[-1].children, position)
|
||||
|
||||
key_str = None
|
||||
|
||||
if nodes_before:
|
||||
last = nodes_before[-1]
|
||||
if last.type == 'argument' and last.children[1] == '=' \
|
||||
and last.children[1].end_pos <= position:
|
||||
# Checked if the argument
|
||||
key_str = last.children[0].value
|
||||
elif last == '=':
|
||||
key_str = nodes_before[-2].value
|
||||
last = nodes_before[-1]
|
||||
if last.type == 'argument' and last.children[1] == '=' \
|
||||
and last.children[1].end_pos <= position:
|
||||
# Checked if the argument
|
||||
key_str = last.children[0].value
|
||||
elif last == '=':
|
||||
key_str = nodes_before[-2].value
|
||||
|
||||
return nodes_before.count(','), key_str
|
||||
|
||||
|
||||
def _get_call_signature_details_from_error_node(node, position):
|
||||
def _get_call_signature_details_from_error_node(node, additional_children, position):
|
||||
for index, element in reversed(list(enumerate(node.children))):
|
||||
# `index > 0` means that it's a trailer and not an atom.
|
||||
if element == '(' and element.end_pos <= position and index > 0:
|
||||
@@ -190,10 +326,7 @@ def _get_call_signature_details_from_error_node(node, position):
|
||||
if name is None:
|
||||
continue
|
||||
if name.type == 'name' or name.parent.type in ('trailer', 'atom'):
|
||||
return CallSignatureDetails(
|
||||
element,
|
||||
*_get_index_and_key(children, position)
|
||||
)
|
||||
return CallDetails(element, children + additional_children, position)
|
||||
|
||||
|
||||
def get_call_signature_details(module, position):
|
||||
@@ -205,6 +338,7 @@ def get_call_signature_details(module, position):
|
||||
return None
|
||||
|
||||
if leaf == ')':
|
||||
# TODO is this ok?
|
||||
if leaf.end_pos == position:
|
||||
leaf = leaf.get_next_leaf()
|
||||
|
||||
@@ -217,18 +351,25 @@ def get_call_signature_details(module, position):
|
||||
# makes it feel strange to have a call signature.
|
||||
return None
|
||||
|
||||
for n in node.children[::-1]:
|
||||
if n.start_pos < position and n.type == 'error_node':
|
||||
result = _get_call_signature_details_from_error_node(n, position)
|
||||
if result is not None:
|
||||
return result
|
||||
additional_children = []
|
||||
for n in reversed(node.children):
|
||||
if n.start_pos < position:
|
||||
if n.type == 'error_node':
|
||||
result = _get_call_signature_details_from_error_node(
|
||||
n, additional_children, position
|
||||
)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
additional_children[0:0] = n.children
|
||||
continue
|
||||
additional_children.insert(0, n)
|
||||
|
||||
if node.type == 'trailer' and node.children[0] == '(':
|
||||
leaf = node.get_previous_leaf()
|
||||
if leaf is None:
|
||||
return None
|
||||
return CallSignatureDetails(
|
||||
node.children[0], *_get_index_and_key(node.children, position))
|
||||
return CallDetails(node.children[0], node.children, position)
|
||||
|
||||
node = node.parent
|
||||
|
||||
@@ -253,5 +394,5 @@ def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos
|
||||
yield evaluate_goto_definition(
|
||||
evaluator,
|
||||
context,
|
||||
bracket_leaf.get_previous_leaf()
|
||||
bracket_leaf.get_previous_leaf(),
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ from jedi.evaluate.context import ModuleContext
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate.compiled import mixed
|
||||
from jedi.evaluate.compiled.access import create_access_path
|
||||
from jedi.evaluate.base_context import Context
|
||||
from jedi.evaluate.base_context import ContextWrapper
|
||||
|
||||
|
||||
def _create(evaluator, obj):
|
||||
@@ -20,42 +20,28 @@ class NamespaceObject(object):
|
||||
self.__dict__ = dct
|
||||
|
||||
|
||||
class MixedModuleContext(Context):
|
||||
class MixedModuleContext(ContextWrapper):
|
||||
type = 'mixed_module'
|
||||
|
||||
def __init__(self, evaluator, tree_module, namespaces, path, code_lines):
|
||||
self.evaluator = evaluator
|
||||
self._namespaces = namespaces
|
||||
|
||||
self._namespace_objects = [NamespaceObject(n) for n in namespaces]
|
||||
self._module_context = ModuleContext(
|
||||
def __init__(self, evaluator, tree_module, namespaces, file_io, code_lines):
|
||||
module_context = ModuleContext(
|
||||
evaluator, tree_module,
|
||||
path=path,
|
||||
file_io=file_io,
|
||||
string_names=('__main__',),
|
||||
code_lines=code_lines
|
||||
)
|
||||
self.tree_node = tree_module
|
||||
|
||||
def get_node(self):
|
||||
return self.tree_node
|
||||
super(MixedModuleContext, self).__init__(module_context)
|
||||
self._namespace_objects = [NamespaceObject(n) for n in namespaces]
|
||||
|
||||
def get_filters(self, *args, **kwargs):
|
||||
for filter in self._module_context.get_filters(*args, **kwargs):
|
||||
for filter in self._wrapped_context.get_filters(*args, **kwargs):
|
||||
yield filter
|
||||
|
||||
for namespace_obj in self._namespace_objects:
|
||||
compiled_object = _create(self.evaluator, namespace_obj)
|
||||
mixed_object = mixed.MixedObject(
|
||||
self.evaluator,
|
||||
parent_context=self,
|
||||
compiled_object=compiled_object,
|
||||
tree_context=self._module_context
|
||||
tree_context=self._wrapped_context
|
||||
)
|
||||
for filter in mixed_object.get_filters(*args, **kwargs):
|
||||
yield filter
|
||||
|
||||
@property
|
||||
def code_lines(self):
|
||||
return self._module_context.code_lines
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._module_context, name)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pydoc
|
||||
|
||||
from jedi.evaluate.utils import ignored
|
||||
from jedi.evaluate.filters import AbstractNameDefinition
|
||||
from jedi.evaluate.names import AbstractNameDefinition
|
||||
|
||||
try:
|
||||
from pydoc_data import topics as pydoc_topics
|
||||
@@ -21,6 +21,7 @@ def get_operator(evaluator, string, pos):
|
||||
|
||||
class KeywordName(AbstractNameDefinition):
|
||||
api_type = u'keyword'
|
||||
is_context_name = False
|
||||
|
||||
def __init__(self, evaluator, name):
|
||||
self.evaluator = evaluator
|
||||
@@ -44,9 +45,15 @@ class Keyword(object):
|
||||
""" For a `parsing.Name` like comparision """
|
||||
return [self.name]
|
||||
|
||||
def py__doc__(self, include_call_signature=False):
|
||||
def py__doc__(self):
|
||||
return imitate_pydoc(self.name.string_name)
|
||||
|
||||
def get_signatures(self):
|
||||
# TODO this makes no sense, I think Keyword should somehow merge with
|
||||
# Context to make it easier for the api/classes.py to deal with all
|
||||
# of it.
|
||||
return []
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (type(self).__name__, self.name)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
import json
|
||||
|
||||
from jedi._compatibility import FileNotFoundError, NotADirectoryError
|
||||
from jedi._compatibility import FileNotFoundError, PermissionError, IsADirectoryError
|
||||
from jedi.api.environment import SameEnvironment, \
|
||||
get_cached_default_environment
|
||||
from jedi.api.exceptions import WrongVersion
|
||||
@@ -67,7 +67,7 @@ class Project(object):
|
||||
"""
|
||||
def py2_comp(path, environment=None, sys_path=None,
|
||||
smart_sys_path=True, _django=False):
|
||||
self._path = path
|
||||
self._path = os.path.abspath(path)
|
||||
if isinstance(environment, SameEnvironment):
|
||||
self._environment = environment
|
||||
|
||||
@@ -77,7 +77,8 @@ class Project(object):
|
||||
|
||||
py2_comp(path, **kwargs)
|
||||
|
||||
def _get_base_sys_path(self, environment=None):
|
||||
@evaluator_as_method_param_cache()
|
||||
def _get_base_sys_path(self, evaluator, environment=None):
|
||||
if self._sys_path is not None:
|
||||
return self._sys_path
|
||||
|
||||
@@ -93,7 +94,7 @@ class Project(object):
|
||||
return sys_path
|
||||
|
||||
@evaluator_as_method_param_cache()
|
||||
def _get_sys_path(self, evaluator, environment=None):
|
||||
def _get_sys_path(self, evaluator, environment=None, add_parent_paths=True):
|
||||
"""
|
||||
Keep this method private for all users of jedi. However internally this
|
||||
one is used like a public method.
|
||||
@@ -101,19 +102,20 @@ class Project(object):
|
||||
suffixed = []
|
||||
prefixed = []
|
||||
|
||||
sys_path = list(self._get_base_sys_path(environment))
|
||||
sys_path = list(self._get_base_sys_path(evaluator, environment))
|
||||
if self._smart_sys_path:
|
||||
prefixed.append(self._path)
|
||||
|
||||
if evaluator.script_path is not None:
|
||||
suffixed += discover_buildout_paths(evaluator, evaluator.script_path)
|
||||
|
||||
traversed = list(traverse_parents(evaluator.script_path))
|
||||
if add_parent_paths:
|
||||
traversed = list(traverse_parents(evaluator.script_path))
|
||||
|
||||
# AFAIK some libraries have imports like `foo.foo.bar`, which
|
||||
# leads to the conclusion to by default prefer longer paths
|
||||
# rather than shorter ones by default.
|
||||
suffixed += reversed(traversed)
|
||||
# AFAIK some libraries have imports like `foo.foo.bar`, which
|
||||
# leads to the conclusion to by default prefer longer paths
|
||||
# rather than shorter ones by default.
|
||||
suffixed += reversed(traversed)
|
||||
|
||||
if self._django:
|
||||
prefixed.append(self._path)
|
||||
@@ -151,7 +153,7 @@ def _is_django_path(directory):
|
||||
try:
|
||||
with open(os.path.join(directory, 'manage.py'), 'rb') as f:
|
||||
return b"DJANGO_SETTINGS_MODULE" in f.read()
|
||||
except (FileNotFoundError, NotADirectoryError):
|
||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
||||
return False
|
||||
|
||||
return False
|
||||
@@ -167,7 +169,7 @@ def get_default_project(path=None):
|
||||
for dir in traverse_parents(check, include_current=True):
|
||||
try:
|
||||
return Project.load(dir)
|
||||
except (FileNotFoundError, NotADirectoryError):
|
||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
||||
pass
|
||||
|
||||
if first_no_init_file is None:
|
||||
|
||||
@@ -12,17 +12,15 @@ class BaseContext(object):
|
||||
|
||||
|
||||
class BaseContextSet(object):
|
||||
def __init__(self, *args):
|
||||
self._set = set(args)
|
||||
def __init__(self, iterable):
|
||||
self._set = frozenset(iterable)
|
||||
for context in iterable:
|
||||
assert not isinstance(context, BaseContextSet)
|
||||
|
||||
@classmethod
|
||||
def from_iterable(cls, iterable):
|
||||
return cls.from_set(set(iterable))
|
||||
|
||||
@classmethod
|
||||
def from_set(cls, set_):
|
||||
self = cls()
|
||||
self._set = set_
|
||||
def _from_frozen_set(cls, frozenset_):
|
||||
self = cls.__new__(cls)
|
||||
self._set = frozenset_
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
@@ -31,16 +29,18 @@ class BaseContextSet(object):
|
||||
Used to work with an iterable of set.
|
||||
"""
|
||||
aggregated = set()
|
||||
sets = list(sets)
|
||||
for set_ in sets:
|
||||
if isinstance(set_, BaseContextSet):
|
||||
aggregated |= set_._set
|
||||
else:
|
||||
aggregated |= set_
|
||||
return cls.from_set(aggregated)
|
||||
aggregated |= frozenset(set_)
|
||||
return cls._from_frozen_set(frozenset(aggregated))
|
||||
|
||||
def __or__(self, other):
|
||||
return type(self).from_set(self._set | other._set)
|
||||
return self._from_frozen_set(self._set | other._set)
|
||||
|
||||
def __and__(self, other):
|
||||
return self._from_frozen_set(self._set & other._set)
|
||||
|
||||
def __iter__(self):
|
||||
for element in self._set:
|
||||
@@ -53,15 +53,21 @@ class BaseContextSet(object):
|
||||
return len(self._set)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set))
|
||||
return 'S{%s}' % (', '.join(str(s) for s in self._set))
|
||||
|
||||
def filter(self, filter_func):
|
||||
return type(self).from_iterable(filter(filter_func, self._set))
|
||||
return self.__class__(filter(filter_func, self._set))
|
||||
|
||||
def __getattr__(self, name):
|
||||
def mapper(*args, **kwargs):
|
||||
return type(self).from_sets(
|
||||
return self.from_sets(
|
||||
getattr(context, name)(*args, **kwargs)
|
||||
for context in self._set
|
||||
)
|
||||
return mapper
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._set == other._set
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._set)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from jedi._compatibility import encoding, is_py3, u
|
||||
import os
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
|
||||
from jedi._compatibility import encoding, is_py3, u
|
||||
|
||||
_inited = False
|
||||
|
||||
@@ -51,6 +53,7 @@ except ImportError:
|
||||
YELLOW = ''
|
||||
MAGENTA = ''
|
||||
RESET = ''
|
||||
BLUE = ''
|
||||
|
||||
NOTICE = object()
|
||||
WARNING = object()
|
||||
@@ -75,15 +78,25 @@ def reset_time():
|
||||
def increase_indent(func):
|
||||
"""Decorator for makin """
|
||||
def wrapper(*args, **kwargs):
|
||||
global _debug_indent
|
||||
_debug_indent += 1
|
||||
try:
|
||||
with increase_indent_cm():
|
||||
return func(*args, **kwargs)
|
||||
finally:
|
||||
_debug_indent -= 1
|
||||
return wrapper
|
||||
|
||||
|
||||
@contextmanager
|
||||
def increase_indent_cm(title=None):
|
||||
global _debug_indent
|
||||
if title:
|
||||
dbg('Start: ' + title, color='MAGENTA')
|
||||
_debug_indent += 1
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_debug_indent -= 1
|
||||
if title:
|
||||
dbg('End: ' + title, color='MAGENTA')
|
||||
|
||||
|
||||
def dbg(message, *args, **kwargs):
|
||||
""" Looks at the stack, to see if a debug message should be printed. """
|
||||
# Python 2 compatibility, because it doesn't understand default args
|
||||
|
||||
@@ -62,10 +62,12 @@ I need to mention now that lazy evaluation is really good because it
|
||||
only *evaluates* what needs to be *evaluated*. All the statements and modules
|
||||
that are not used are just being ignored.
|
||||
"""
|
||||
from functools import partial
|
||||
|
||||
from parso.python import tree
|
||||
import parso
|
||||
from parso import python_bytes_to_unicode
|
||||
from jedi.file_io import FileIO
|
||||
|
||||
from jedi import debug
|
||||
from jedi import parser_utils
|
||||
@@ -73,9 +75,8 @@ from jedi.evaluate.utils import unite
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate import recursion
|
||||
from jedi.evaluate.cache import evaluator_function_cache
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate.filters import TreeNameDefinition, ParamName
|
||||
from jedi.evaluate.names import TreeNameDefinition, ParamName
|
||||
from jedi.evaluate.base_context import ContextualizedName, ContextualizedNode, \
|
||||
ContextSet, NO_CONTEXTS, iterate_contexts
|
||||
from jedi.evaluate.context import ClassContext, FunctionContext, \
|
||||
@@ -85,6 +86,14 @@ from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \
|
||||
eval_node, check_tuple_assignments
|
||||
|
||||
|
||||
def _execute(context, arguments):
|
||||
debug.dbg('execute: %s %s', context, arguments)
|
||||
with debug.increase_indent_cm():
|
||||
context_set = context.py__call__(arguments=arguments)
|
||||
debug.dbg('execute result: %s in %s', context_set, context)
|
||||
return context_set
|
||||
|
||||
|
||||
class Evaluator(object):
|
||||
def __init__(self, project, environment=None, script_path=None):
|
||||
if environment is None:
|
||||
@@ -94,9 +103,10 @@ class Evaluator(object):
|
||||
self.compiled_subprocess = environment.get_evaluator_subprocess(self)
|
||||
self.grammar = environment.get_grammar()
|
||||
|
||||
self.latest_grammar = parso.load_grammar(version='3.6')
|
||||
self.latest_grammar = parso.load_grammar(version='3.7')
|
||||
self.memoize_cache = {} # for memoize decorators
|
||||
self.module_cache = imports.ModuleCache() # does the job of `sys.modules`.
|
||||
self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleContext]]
|
||||
self.compiled_cache = {} # see `evaluate.compiled.create()`
|
||||
self.inferred_element_counts = {}
|
||||
self.mixed_cache = {} # see `evaluate.compiled.mixed._create()`
|
||||
@@ -105,30 +115,53 @@ class Evaluator(object):
|
||||
self.is_analysis = False
|
||||
self.project = project
|
||||
self.access_cache = {}
|
||||
# This setting is only temporary to limit the work we have to do with
|
||||
# tensorflow and others.
|
||||
self.infer_enabled = True
|
||||
|
||||
self.reset_recursion_limitations()
|
||||
self.allow_different_encoding = True
|
||||
|
||||
# Plugin API
|
||||
from jedi.plugins import plugin_manager
|
||||
plugin_callbacks = plugin_manager.get_callbacks(self)
|
||||
self.execute = plugin_callbacks.decorate('execute', callback=_execute)
|
||||
self._import_module = partial(
|
||||
plugin_callbacks.decorate(
|
||||
'import_module',
|
||||
callback=imports.import_module
|
||||
),
|
||||
self,
|
||||
)
|
||||
|
||||
def import_module(self, import_names, parent_module_context=None,
|
||||
sys_path=None, prefer_stubs=True):
|
||||
if sys_path is None:
|
||||
sys_path = self.get_sys_path()
|
||||
return self._import_module(import_names, parent_module_context,
|
||||
sys_path, prefer_stubs=prefer_stubs)
|
||||
|
||||
@property
|
||||
@evaluator_function_cache()
|
||||
def builtins_module(self):
|
||||
return compiled.get_special_object(self, u'BUILTINS')
|
||||
module_name = u'builtins'
|
||||
if self.environment.version_info.major == 2:
|
||||
module_name = u'__builtin__'
|
||||
builtins_module, = self.import_module((module_name,), sys_path=())
|
||||
return builtins_module
|
||||
|
||||
@property
|
||||
@evaluator_function_cache()
|
||||
def typing_module(self):
|
||||
typing_module, = self.import_module((u'typing',))
|
||||
return typing_module
|
||||
|
||||
def reset_recursion_limitations(self):
|
||||
self.recursion_detector = recursion.RecursionDetector()
|
||||
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
|
||||
|
||||
def get_sys_path(self):
|
||||
def get_sys_path(self, **kwargs):
|
||||
"""Convenience function"""
|
||||
return self.project._get_sys_path(self, environment=self.environment)
|
||||
return self.project._get_sys_path(self, environment=self.environment, **kwargs)
|
||||
|
||||
def eval_element(self, context, element):
|
||||
if not self.infer_enabled:
|
||||
return NO_CONTEXTS
|
||||
|
||||
if isinstance(context, CompForContext):
|
||||
return eval_node(context, element)
|
||||
|
||||
@@ -179,14 +212,14 @@ class Evaluator(object):
|
||||
new_name_dicts = list(original_name_dicts)
|
||||
for i, name_dict in enumerate(new_name_dicts):
|
||||
new_name_dicts[i] = name_dict.copy()
|
||||
new_name_dicts[i][if_name.value] = ContextSet(definition)
|
||||
new_name_dicts[i][if_name.value] = ContextSet([definition])
|
||||
|
||||
name_dicts += new_name_dicts
|
||||
else:
|
||||
for name_dict in name_dicts:
|
||||
name_dict[if_name.value] = definitions
|
||||
if len(name_dicts) > 1:
|
||||
result = ContextSet()
|
||||
result = NO_CONTEXTS
|
||||
for name_dict in name_dicts:
|
||||
with helpers.predefine_names(context, if_stmt, name_dict):
|
||||
result |= eval_node(context, element)
|
||||
@@ -219,10 +252,13 @@ class Evaluator(object):
|
||||
def_ = name.get_definition(import_name_always=True)
|
||||
if def_ is not None:
|
||||
type_ = def_.type
|
||||
if type_ == 'classdef':
|
||||
return [ClassContext(self, context, name.parent)]
|
||||
elif type_ == 'funcdef':
|
||||
return [FunctionContext.from_context(context, name.parent)]
|
||||
is_classdef = type_ == 'classdef'
|
||||
if is_classdef or type_ == 'funcdef':
|
||||
if is_classdef:
|
||||
c = ClassContext(self, context, name.parent)
|
||||
else:
|
||||
c = FunctionContext.from_context(context, name.parent)
|
||||
return ContextSet([c])
|
||||
|
||||
if type_ == 'expr_stmt':
|
||||
is_simple_name = name.parent.type not in ('power', 'trailer')
|
||||
@@ -236,9 +272,40 @@ class Evaluator(object):
|
||||
return check_tuple_assignments(self, c_node, for_types)
|
||||
if type_ in ('import_from', 'import_name'):
|
||||
return imports.infer_import(context, name)
|
||||
else:
|
||||
result = self._follow_error_node_imports_if_possible(context, name)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
return helpers.evaluate_call_of_leaf(context, name)
|
||||
|
||||
def _follow_error_node_imports_if_possible(self, context, name):
|
||||
error_node = tree.search_ancestor(name, 'error_node')
|
||||
if error_node is not None:
|
||||
# Get the first command start of a started simple_stmt. The error
|
||||
# node is sometimes a small_stmt and sometimes a simple_stmt. Check
|
||||
# for ; leaves that start a new statements.
|
||||
start_index = 0
|
||||
for index, n in enumerate(error_node.children):
|
||||
if n.start_pos > name.start_pos:
|
||||
break
|
||||
if n == ';':
|
||||
start_index = index + 1
|
||||
nodes = error_node.children[start_index:]
|
||||
first_name = nodes[0].get_first_leaf().value
|
||||
|
||||
# Make it possible to infer stuff like `import foo.` or
|
||||
# `from foo.bar`.
|
||||
if first_name in ('from', 'import'):
|
||||
is_import_from = first_name == 'from'
|
||||
level, names = helpers.parse_dotted_names(
|
||||
nodes,
|
||||
is_import_from=is_import_from,
|
||||
until_node=name,
|
||||
)
|
||||
return imports.Importer(self, names, context.get_root_context(), level).follow()
|
||||
return None
|
||||
|
||||
def goto(self, context, name):
|
||||
definition = name.get_definition(import_name_always=True)
|
||||
if definition is not None:
|
||||
@@ -251,11 +318,15 @@ class Evaluator(object):
|
||||
return [TreeNameDefinition(context, name)]
|
||||
elif type_ == 'param':
|
||||
return [ParamName(context, name)]
|
||||
elif type_ in ('funcdef', 'classdef'):
|
||||
return [TreeNameDefinition(context, name)]
|
||||
elif type_ in ('import_from', 'import_name'):
|
||||
module_names = imports.infer_import(context, name, is_goto=True)
|
||||
return module_names
|
||||
else:
|
||||
return [TreeNameDefinition(context, name)]
|
||||
else:
|
||||
contexts = self._follow_error_node_imports_if_possible(context, name)
|
||||
if contexts is not None:
|
||||
return [context.name for context in contexts]
|
||||
|
||||
par = name.parent
|
||||
node_type = par.type
|
||||
@@ -277,12 +348,8 @@ class Evaluator(object):
|
||||
context_set = eval_trailer(context, context_set, trailer)
|
||||
param_names = []
|
||||
for context in context_set:
|
||||
try:
|
||||
get_param_names = context.get_param_names
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
for param_name in get_param_names():
|
||||
for signature in context.get_signatures():
|
||||
for param_name in signature.get_param_names():
|
||||
if param_name.string_name == name.value:
|
||||
param_names.append(param_name)
|
||||
return param_names
|
||||
@@ -299,10 +366,7 @@ class Evaluator(object):
|
||||
|
||||
if node_type == 'trailer' and par.children[0] == '.':
|
||||
values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
|
||||
return unite(
|
||||
value.py__getattribute__(name, name_context=context, is_goto=True)
|
||||
for value in values
|
||||
)
|
||||
return values.py__getattribute__(name, name_context=context, is_goto=True)
|
||||
else:
|
||||
stmt = tree.search_ancestor(
|
||||
name, 'expr_stmt', 'lambdef'
|
||||
@@ -323,44 +387,38 @@ class Evaluator(object):
|
||||
if parser_utils.is_scope(node):
|
||||
return node
|
||||
elif node.type in ('argument', 'testlist_comp'):
|
||||
if node.children[1].type == 'comp_for':
|
||||
if node.children[1].type in ('comp_for', 'sync_comp_for'):
|
||||
return node.children[1]
|
||||
elif node.type == 'dictorsetmaker':
|
||||
for n in node.children[1:4]:
|
||||
# In dictionaries it can be pretty much anything.
|
||||
if n.type == 'comp_for':
|
||||
if n.type in ('comp_for', 'sync_comp_for'):
|
||||
return n
|
||||
|
||||
def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False):
|
||||
def from_scope_node(scope_node, is_nested=True, node_is_object=False):
|
||||
if scope_node == base_node:
|
||||
return base_context
|
||||
|
||||
is_funcdef = scope_node.type in ('funcdef', 'lambdef')
|
||||
parent_scope = parser_utils.get_parent_scope(scope_node)
|
||||
parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef)
|
||||
parent_context = from_scope_node(parent_scope)
|
||||
|
||||
if is_funcdef:
|
||||
func = FunctionContext.from_context(
|
||||
parent_context,
|
||||
scope_node
|
||||
)
|
||||
if isinstance(parent_context, AnonymousInstance):
|
||||
func = FunctionContext.from_context(parent_context, scope_node)
|
||||
if parent_context.is_class():
|
||||
instance = AnonymousInstance(
|
||||
self, parent_context.parent_context, parent_context)
|
||||
func = BoundMethod(
|
||||
instance=parent_context,
|
||||
klass=parent_context.class_context,
|
||||
instance=instance,
|
||||
function=func
|
||||
)
|
||||
|
||||
if is_nested and not node_is_object:
|
||||
return func.get_function_execution()
|
||||
return func
|
||||
elif scope_node.type == 'classdef':
|
||||
class_context = ClassContext(self, parent_context, scope_node)
|
||||
if child_is_funcdef:
|
||||
# anonymous instance
|
||||
return AnonymousInstance(self, parent_context, class_context)
|
||||
else:
|
||||
return class_context
|
||||
elif scope_node.type == 'comp_for':
|
||||
return ClassContext(self, parent_context, scope_node)
|
||||
elif scope_node.type in ('comp_for', 'sync_comp_for'):
|
||||
if node.start_pos >= scope_node.children[-1].start_pos:
|
||||
return parent_context
|
||||
return CompForContext.from_comp_for(parent_context, scope_node)
|
||||
@@ -371,21 +429,26 @@ class Evaluator(object):
|
||||
if node_is_context and parser_utils.is_scope(node):
|
||||
scope_node = node
|
||||
else:
|
||||
if node.parent.type in ('funcdef', 'classdef') and node.parent.name == node:
|
||||
# When we're on class/function names/leafs that define the
|
||||
# object itself and not its contents.
|
||||
node = node.parent
|
||||
scope_node = parent_scope(node)
|
||||
if scope_node.type in ('funcdef', 'classdef'):
|
||||
colon = scope_node.children[scope_node.children.index(':')]
|
||||
if node.start_pos < colon.start_pos:
|
||||
parent = node.parent
|
||||
if not (parent.type == 'param' and parent.name == node):
|
||||
scope_node = parent_scope(scope_node)
|
||||
return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object)
|
||||
|
||||
def parse_and_get_code(self, code=None, path=None, encoding='utf-8', **kwargs):
|
||||
def parse_and_get_code(self, code=None, path=None, encoding='utf-8',
|
||||
use_latest_grammar=False, file_io=None, **kwargs):
|
||||
if self.allow_different_encoding:
|
||||
if code is None:
|
||||
with open(path, 'rb') as f:
|
||||
code = f.read()
|
||||
if file_io is None:
|
||||
file_io = FileIO(path)
|
||||
code = file_io.read()
|
||||
code = python_bytes_to_unicode(code, encoding=encoding, errors='replace')
|
||||
|
||||
return self.grammar.parse(code=code, path=path, **kwargs), code
|
||||
grammar = self.latest_grammar if use_latest_grammar else self.grammar
|
||||
return grammar.parse(code=code, path=path, file_io=file_io, **kwargs), code
|
||||
|
||||
def parse(self, *args, **kwargs):
|
||||
return self.parse_and_get_code(*args, **kwargs)[0]
|
||||
|
||||
@@ -5,7 +5,6 @@ from parso.python import tree
|
||||
|
||||
from jedi._compatibility import force_unicode
|
||||
from jedi import debug
|
||||
from jedi.evaluate.compiled import CompiledObject
|
||||
from jedi.evaluate.helpers import is_string
|
||||
|
||||
|
||||
@@ -86,37 +85,40 @@ def add(node_context, error_name, node, message=None, typ=Error, payload=None):
|
||||
# TODO this path is probably not right
|
||||
module_context = node_context.get_root_context()
|
||||
module_path = module_context.py__file__()
|
||||
instance = typ(error_name, module_path, node.start_pos, message)
|
||||
debug.warning(str(instance), format=False)
|
||||
node_context.evaluator.analysis.append(instance)
|
||||
issue_instance = typ(error_name, module_path, node.start_pos, message)
|
||||
debug.warning(str(issue_instance), format=False)
|
||||
node_context.evaluator.analysis.append(issue_instance)
|
||||
return issue_instance
|
||||
|
||||
|
||||
def _check_for_setattr(instance):
|
||||
"""
|
||||
Check if there's any setattr method inside an instance. If so, return True.
|
||||
"""
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
module = instance.get_root_context()
|
||||
if not isinstance(module, ModuleContext):
|
||||
node = module.tree_node
|
||||
if node is None:
|
||||
# If it's a compiled module or doesn't have a tree_node
|
||||
return False
|
||||
|
||||
node = module.tree_node
|
||||
try:
|
||||
stmts = node.get_used_names()['setattr']
|
||||
stmt_names = node.get_used_names()['setattr']
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
return any(node.start_pos < stmt.start_pos < node.end_pos
|
||||
for stmt in stmts)
|
||||
return any(node.start_pos < n.start_pos < node.end_pos
|
||||
# Check if it's a function called setattr.
|
||||
and not (n.parent.type == 'funcdef' and n.parent.name == n)
|
||||
for n in stmt_names)
|
||||
|
||||
|
||||
def add_attribute_error(name_context, lookup_context, name):
|
||||
message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name))
|
||||
from jedi.evaluate.context.instance import AbstractInstanceContext, CompiledInstanceName
|
||||
from jedi.evaluate.context.instance import CompiledInstanceName
|
||||
# Check for __getattr__/__getattribute__ existance and issue a warning
|
||||
# instead of an error, if that happens.
|
||||
typ = Error
|
||||
if isinstance(lookup_context, AbstractInstanceContext):
|
||||
if lookup_context.is_instance() and not lookup_context.is_compiled():
|
||||
slot_names = lookup_context.get_function_slot_names(u'__getattr__') + \
|
||||
lookup_context.get_function_slot_names(u'__getattribute__')
|
||||
for n in slot_names:
|
||||
@@ -142,11 +144,15 @@ def _check_for_exception_catch(node_context, jedi_name, exception, payload=None)
|
||||
Returns True if the exception was catched.
|
||||
"""
|
||||
def check_match(cls, exception):
|
||||
try:
|
||||
return isinstance(cls, CompiledObject) and cls.is_super_class(exception)
|
||||
except TypeError:
|
||||
if not cls.is_class():
|
||||
return False
|
||||
|
||||
for python_cls in exception.mro():
|
||||
if cls.py__name__() == python_cls.__name__ \
|
||||
and cls.parent_context == cls.evaluator.builtins_module:
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_try_for_except(obj, exception):
|
||||
# Only nodes in try
|
||||
iterator = iter(obj.children)
|
||||
|
||||
@@ -4,13 +4,14 @@ from parso.python import tree
|
||||
|
||||
from jedi._compatibility import zip_longest
|
||||
from jedi import debug
|
||||
from jedi.evaluate.utils import PushBackIterator
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
|
||||
LazyTreeContext, get_merged_lazy_context
|
||||
from jedi.evaluate.filters import ParamName
|
||||
from jedi.evaluate.base_context import NO_CONTEXTS
|
||||
from jedi.evaluate.names import ParamName, TreeNameDefinition
|
||||
from jedi.evaluate.base_context import NO_CONTEXTS, ContextSet, ContextualizedNode
|
||||
from jedi.evaluate.context import iterable
|
||||
from jedi.evaluate.param import get_executed_params, ExecutedParam
|
||||
from jedi.evaluate.param import get_executed_params_and_issues, ExecutedParam
|
||||
|
||||
|
||||
def try_iter_content(types, depth=0):
|
||||
@@ -30,6 +31,10 @@ def try_iter_content(types, depth=0):
|
||||
try_iter_content(lazy_context.infer(), depth + 1)
|
||||
|
||||
|
||||
class ParamIssue(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def repack_with_argument_clinic(string, keep_arguments_param=False):
|
||||
"""
|
||||
Transforms a function or method with arguments to the signature that is
|
||||
@@ -44,34 +49,51 @@ def repack_with_argument_clinic(string, keep_arguments_param=False):
|
||||
clinic_args = list(_parse_argument_clinic(string))
|
||||
|
||||
def decorator(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
def wrapper(context, *args, **kwargs):
|
||||
if keep_arguments_param:
|
||||
arguments = kwargs['arguments']
|
||||
else:
|
||||
arguments = kwargs.pop('arguments')
|
||||
try:
|
||||
args += tuple(_iterate_argument_clinic(arguments, clinic_args))
|
||||
except ValueError:
|
||||
args += tuple(_iterate_argument_clinic(
|
||||
context.evaluator,
|
||||
arguments,
|
||||
clinic_args
|
||||
))
|
||||
except ParamIssue:
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
return func(*args, **kwargs)
|
||||
return func(context, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def _iterate_argument_clinic(arguments, parameters):
|
||||
def _iterate_argument_clinic(evaluator, arguments, parameters):
|
||||
"""Uses a list with argument clinic information (see PEP 436)."""
|
||||
iterator = arguments.unpack()
|
||||
for i, (name, optional, allow_kwargs) in enumerate(parameters):
|
||||
iterator = PushBackIterator(arguments.unpack())
|
||||
for i, (name, optional, allow_kwargs, stars) in enumerate(parameters):
|
||||
if stars == 1:
|
||||
lazy_contexts = []
|
||||
for key, argument in iterator:
|
||||
if key is not None:
|
||||
iterator.push_back((key, argument))
|
||||
break
|
||||
|
||||
lazy_contexts.append(argument)
|
||||
yield ContextSet([iterable.FakeSequence(evaluator, u'tuple', lazy_contexts)])
|
||||
lazy_contexts
|
||||
continue
|
||||
elif stars == 2:
|
||||
raise NotImplementedError()
|
||||
key, argument = next(iterator, (None, None))
|
||||
if key is not None:
|
||||
debug.warning('Keyword arguments in argument clinic are currently not supported.')
|
||||
raise ValueError
|
||||
raise ParamIssue
|
||||
if argument is None and not optional:
|
||||
debug.warning('TypeError: %s expected at least %s arguments, got %s',
|
||||
name, len(parameters), i)
|
||||
raise ValueError
|
||||
raise ParamIssue
|
||||
|
||||
context_set = NO_CONTEXTS if argument is None else argument.infer()
|
||||
|
||||
@@ -80,7 +102,7 @@ def _iterate_argument_clinic(arguments, parameters):
|
||||
# that's ok, maybe something is too hard to resolve, however,
|
||||
# we will not proceed with the evaluation of that function.
|
||||
debug.warning('argument_clinic "%s" not resolvable.', name)
|
||||
raise ValueError
|
||||
raise ParamIssue
|
||||
yield context_set
|
||||
|
||||
|
||||
@@ -92,21 +114,21 @@ def _parse_argument_clinic(string):
|
||||
# at the end of the arguments. This is therefore not a proper argument
|
||||
# clinic implementation. `range()` for exmple allows an optional start
|
||||
# value at the beginning.
|
||||
match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string)
|
||||
match = re.match(r'(?:(?:(\[),? ?|, ?|)(\**\w+)|, ?/)\]*', string)
|
||||
string = string[len(match.group(0)):]
|
||||
if not match.group(2): # A slash -> allow named arguments
|
||||
allow_kwargs = True
|
||||
continue
|
||||
optional = optional or bool(match.group(1))
|
||||
word = match.group(2)
|
||||
yield (word, optional, allow_kwargs)
|
||||
stars = word.count('*')
|
||||
word = word[stars:]
|
||||
yield (word, optional, allow_kwargs, stars)
|
||||
if stars:
|
||||
allow_kwargs = True
|
||||
|
||||
|
||||
class AbstractArguments(object):
|
||||
context = None
|
||||
argument_node = None
|
||||
trailer = None
|
||||
|
||||
class _AbstractArgumentsMixin(object):
|
||||
def eval_all(self, funcdef=None):
|
||||
"""
|
||||
Evaluates all arguments as a support for static analysis
|
||||
@@ -116,29 +138,62 @@ class AbstractArguments(object):
|
||||
types = lazy_context.infer()
|
||||
try_iter_content(types)
|
||||
|
||||
def get_calling_nodes(self):
|
||||
return []
|
||||
|
||||
def unpack(self, funcdef=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_executed_params(self, execution_context):
|
||||
return get_executed_params(execution_context, self)
|
||||
def get_executed_params_and_issues(self, execution_context):
|
||||
return get_executed_params_and_issues(execution_context, self)
|
||||
|
||||
def get_calling_nodes(self):
|
||||
return []
|
||||
|
||||
|
||||
class AbstractArguments(_AbstractArgumentsMixin):
|
||||
context = None
|
||||
argument_node = None
|
||||
trailer = None
|
||||
|
||||
|
||||
class AnonymousArguments(AbstractArguments):
|
||||
def get_executed_params(self, execution_context):
|
||||
def get_executed_params_and_issues(self, execution_context):
|
||||
from jedi.evaluate.dynamic import search_params
|
||||
return search_params(
|
||||
execution_context.evaluator,
|
||||
execution_context,
|
||||
execution_context.tree_node
|
||||
)
|
||||
), []
|
||||
|
||||
def __repr__(self):
|
||||
return '%s()' % self.__class__.__name__
|
||||
|
||||
|
||||
def unpack_arglist(arglist):
|
||||
if arglist is None:
|
||||
return
|
||||
|
||||
# Allow testlist here as well for Python2's class inheritance
|
||||
# definitions.
|
||||
if not (arglist.type in ('arglist', 'testlist') or (
|
||||
# in python 3.5 **arg is an argument, not arglist
|
||||
(arglist.type == 'argument') and
|
||||
arglist.children[0] in ('*', '**'))):
|
||||
yield 0, arglist
|
||||
return
|
||||
|
||||
iterator = iter(arglist.children)
|
||||
for child in iterator:
|
||||
if child == ',':
|
||||
continue
|
||||
elif child in ('*', '**'):
|
||||
yield len(child.value), next(iterator)
|
||||
elif child.type == 'argument' and \
|
||||
child.children[0] in ('*', '**'):
|
||||
assert len(child.children) == 2
|
||||
yield len(child.children[0].value), child.children[1]
|
||||
else:
|
||||
yield 0, child
|
||||
|
||||
|
||||
class TreeArguments(AbstractArguments):
|
||||
def __init__(self, evaluator, context, argument_node, trailer=None):
|
||||
"""
|
||||
@@ -153,35 +208,9 @@ class TreeArguments(AbstractArguments):
|
||||
self._evaluator = evaluator
|
||||
self.trailer = trailer # Can be None, e.g. in a class definition.
|
||||
|
||||
def _split(self):
|
||||
if self.argument_node is None:
|
||||
return
|
||||
|
||||
# Allow testlist here as well for Python2's class inheritance
|
||||
# definitions.
|
||||
if not (self.argument_node.type in ('arglist', 'testlist') or (
|
||||
# in python 3.5 **arg is an argument, not arglist
|
||||
(self.argument_node.type == 'argument') and
|
||||
self.argument_node.children[0] in ('*', '**'))):
|
||||
yield 0, self.argument_node
|
||||
return
|
||||
|
||||
iterator = iter(self.argument_node.children)
|
||||
for child in iterator:
|
||||
if child == ',':
|
||||
continue
|
||||
elif child in ('*', '**'):
|
||||
yield len(child.value), next(iterator)
|
||||
elif child.type == 'argument' and \
|
||||
child.children[0] in ('*', '**'):
|
||||
assert len(child.children) == 2
|
||||
yield len(child.children[0].value), child.children[1]
|
||||
else:
|
||||
yield 0, child
|
||||
|
||||
def unpack(self, funcdef=None):
|
||||
named_args = []
|
||||
for star_count, el in self._split():
|
||||
for star_count, el in unpack_arglist(self.argument_node):
|
||||
if star_count == 1:
|
||||
arrays = self.context.eval_node(el)
|
||||
iterators = [_iterate_star_args(self.context, a, el, funcdef)
|
||||
@@ -204,25 +233,40 @@ class TreeArguments(AbstractArguments):
|
||||
named_args.append((c[0].value, LazyTreeContext(self.context, c[2]),))
|
||||
else: # Generator comprehension.
|
||||
# Include the brackets with the parent.
|
||||
sync_comp_for = el.children[1]
|
||||
if sync_comp_for.type == 'comp_for':
|
||||
sync_comp_for = sync_comp_for.children[1]
|
||||
comp = iterable.GeneratorComprehension(
|
||||
self._evaluator, self.context, self.argument_node.parent)
|
||||
self._evaluator,
|
||||
defining_context=self.context,
|
||||
sync_comp_for_node=sync_comp_for,
|
||||
entry_node=el.children[0],
|
||||
)
|
||||
yield None, LazyKnownContext(comp)
|
||||
else:
|
||||
yield None, LazyTreeContext(self.context, el)
|
||||
|
||||
# Reordering var_args is necessary, because star args sometimes appear
|
||||
# Reordering arguments is necessary, because star args sometimes appear
|
||||
# after named argument, but in the actual order it's prepended.
|
||||
for named_arg in named_args:
|
||||
yield named_arg
|
||||
|
||||
def as_tree_tuple_objects(self):
|
||||
for star_count, argument in self._split():
|
||||
def _as_tree_tuple_objects(self):
|
||||
for star_count, argument in unpack_arglist(self.argument_node):
|
||||
default = None
|
||||
if argument.type == 'argument':
|
||||
argument, default = argument.children[::2]
|
||||
else:
|
||||
default = None
|
||||
if len(argument.children) == 3: # Keyword argument.
|
||||
argument, default = argument.children[::2]
|
||||
yield argument, default, star_count
|
||||
|
||||
def iter_calling_names_with_star(self):
|
||||
for name, default, star_count in self._as_tree_tuple_objects():
|
||||
# TODO this function is a bit strange. probably refactor?
|
||||
if not star_count or not isinstance(name, tree.Name):
|
||||
continue
|
||||
|
||||
yield TreeNameDefinition(self.context, name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.argument_node)
|
||||
|
||||
@@ -236,11 +280,8 @@ class TreeArguments(AbstractArguments):
|
||||
break
|
||||
|
||||
old_arguments_list.append(arguments)
|
||||
for name, default, star_count in reversed(list(arguments.as_tree_tuple_objects())):
|
||||
if not star_count or not isinstance(name, tree.Name):
|
||||
continue
|
||||
|
||||
names = self._evaluator.goto(arguments.context, name)
|
||||
for calling_name in reversed(list(arguments.iter_calling_names_with_star())):
|
||||
names = calling_name.goto()
|
||||
if len(names) != 1:
|
||||
break
|
||||
if not isinstance(names[0], ParamName):
|
||||
@@ -257,9 +298,9 @@ class TreeArguments(AbstractArguments):
|
||||
break
|
||||
|
||||
if arguments.argument_node is not None:
|
||||
return [arguments.argument_node]
|
||||
return [ContextualizedNode(arguments.context, arguments.argument_node)]
|
||||
if arguments.trailer is not None:
|
||||
return [arguments.trailer]
|
||||
return [ContextualizedNode(arguments.context, arguments.trailer)]
|
||||
return []
|
||||
|
||||
|
||||
@@ -275,15 +316,43 @@ class ValuesArguments(AbstractArguments):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._values_list)
|
||||
|
||||
|
||||
class TreeArgumentsWrapper(_AbstractArgumentsMixin):
|
||||
def __init__(self, arguments):
|
||||
self._wrapped_arguments = arguments
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
return self._wrapped_arguments.context
|
||||
|
||||
@property
|
||||
def argument_node(self):
|
||||
return self._wrapped_arguments.argument_node
|
||||
|
||||
@property
|
||||
def trailer(self):
|
||||
return self._wrapped_arguments.trailer
|
||||
|
||||
def unpack(self, func=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_calling_nodes(self):
|
||||
return self._wrapped_arguments.get_calling_nodes()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._wrapped_arguments)
|
||||
|
||||
|
||||
def _iterate_star_args(context, array, input_node, funcdef=None):
|
||||
try:
|
||||
iter_ = array.py__iter__
|
||||
except AttributeError:
|
||||
if not array.py__getattribute__('__iter__'):
|
||||
if funcdef is not None:
|
||||
# TODO this funcdef should not be needed.
|
||||
m = "TypeError: %s() argument after * must be a sequence, not %s" \
|
||||
% (funcdef.name.value, array)
|
||||
analysis.add(context, 'type-error-star', input_node, message=m)
|
||||
try:
|
||||
iter_ = array.py__iter__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
for lazy_context in iter_():
|
||||
yield lazy_context
|
||||
|
||||
@@ -6,145 +6,55 @@ A ContextSet is typically used to specify the return of a function or any other
|
||||
static analysis operation. In jedi there are always multiple returns and not
|
||||
just one.
|
||||
"""
|
||||
from parso.python.tree import ExprStmt, CompFor
|
||||
from functools import reduce
|
||||
from operator import add
|
||||
from parso.python.tree import ExprStmt, SyncCompFor
|
||||
|
||||
from jedi import debug
|
||||
from jedi._compatibility import Python3Method, zip_longest, unicode
|
||||
from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature
|
||||
from jedi._compatibility import zip_longest, unicode
|
||||
from jedi.parser_utils import clean_scope_docstring
|
||||
from jedi.common import BaseContextSet, BaseContext
|
||||
from jedi.evaluate.helpers import EvaluatorIndexError, EvaluatorTypeError, \
|
||||
EvaluatorKeyError
|
||||
from jedi.evaluate.helpers import SimpleGetItemNotFound
|
||||
from jedi.evaluate.utils import safe_property
|
||||
from jedi.evaluate.cache import evaluator_as_method_param_cache
|
||||
from jedi.cache import memoize_method
|
||||
|
||||
_sentinel = object()
|
||||
|
||||
|
||||
class Context(BaseContext):
|
||||
"""
|
||||
Should be defined, otherwise the API returns empty types.
|
||||
"""
|
||||
class HelperContextMixin(object):
|
||||
def get_root_context(self):
|
||||
context = self
|
||||
while True:
|
||||
if context.parent_context is None:
|
||||
return context
|
||||
context = context.parent_context
|
||||
|
||||
predefined_names = {}
|
||||
tree_node = None
|
||||
"""
|
||||
To be defined by subclasses.
|
||||
"""
|
||||
@classmethod
|
||||
@evaluator_as_method_param_cache()
|
||||
def create_cached(cls, *args, **kwargs):
|
||||
return cls(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
# By default just lower name of the class. Can and should be
|
||||
# overwritten.
|
||||
return self.__class__.__name__.lower()
|
||||
|
||||
@debug.increase_indent
|
||||
def execute(self, arguments):
|
||||
"""
|
||||
In contrast to py__call__ this function is always available.
|
||||
|
||||
`hasattr(x, py__call__)` can also be checked to see if a context is
|
||||
executable.
|
||||
"""
|
||||
if self.evaluator.is_analysis:
|
||||
arguments.eval_all()
|
||||
|
||||
debug.dbg('execute: %s %s', self, arguments)
|
||||
from jedi.evaluate import stdlib
|
||||
try:
|
||||
# Some stdlib functions like super(), namedtuple(), etc. have been
|
||||
# hard-coded in Jedi to support them.
|
||||
return stdlib.execute(self.evaluator, self, arguments)
|
||||
except stdlib.NotInStdLib:
|
||||
pass
|
||||
|
||||
try:
|
||||
func = self.py__call__
|
||||
except AttributeError:
|
||||
debug.warning("no execution possible %s", self)
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
context_set = func(arguments)
|
||||
debug.dbg('execute result: %s in %s', context_set, self)
|
||||
return context_set
|
||||
|
||||
return self.evaluator.execute(self, arguments)
|
||||
return self.evaluator.execute(self, arguments=arguments)
|
||||
|
||||
def execute_evaluated(self, *value_list):
|
||||
"""
|
||||
Execute a function with already executed arguments.
|
||||
"""
|
||||
from jedi.evaluate.arguments import ValuesArguments
|
||||
arguments = ValuesArguments([ContextSet(value) for value in value_list])
|
||||
return self.execute(arguments)
|
||||
arguments = ValuesArguments([ContextSet([value]) for value in value_list])
|
||||
return self.evaluator.execute(self, arguments)
|
||||
|
||||
def iterate(self, contextualized_node=None, is_async=False):
|
||||
debug.dbg('iterate %s', self)
|
||||
try:
|
||||
if is_async:
|
||||
iter_method = self.py__aiter__
|
||||
else:
|
||||
iter_method = self.py__iter__
|
||||
except AttributeError:
|
||||
if contextualized_node is not None:
|
||||
from jedi.evaluate import analysis
|
||||
analysis.add(
|
||||
contextualized_node.context,
|
||||
'type-error-not-iterable',
|
||||
contextualized_node.node,
|
||||
message="TypeError: '%s' object is not iterable" % self)
|
||||
return iter([])
|
||||
else:
|
||||
return iter_method()
|
||||
def execute_annotation(self):
|
||||
return self.execute_evaluated()
|
||||
|
||||
def get_item(self, index_contexts, contextualized_node):
|
||||
from jedi.evaluate.compiled import CompiledObject
|
||||
from jedi.evaluate.context.iterable import Slice, Sequence
|
||||
result = ContextSet()
|
||||
def gather_annotation_classes(self):
|
||||
return ContextSet([self])
|
||||
|
||||
for index in index_contexts:
|
||||
if isinstance(index, Slice):
|
||||
index = index.obj
|
||||
if isinstance(index, CompiledObject):
|
||||
try:
|
||||
index = index.get_safe_value()
|
||||
except ValueError:
|
||||
pass
|
||||
def merge_types_of_iterate(self, contextualized_node=None, is_async=False):
|
||||
return ContextSet.from_sets(
|
||||
lazy_context.infer()
|
||||
for lazy_context in self.iterate(contextualized_node, is_async)
|
||||
)
|
||||
|
||||
if type(index) not in (float, int, str, unicode, slice, bytes):
|
||||
# If the index is not clearly defined, we have to get all the
|
||||
# possiblities.
|
||||
if isinstance(self, Sequence) and self.array_type == 'dict':
|
||||
result |= self.dict_values()
|
||||
else:
|
||||
result |= iterate_contexts(ContextSet(self))
|
||||
continue
|
||||
|
||||
# The actual getitem call.
|
||||
try:
|
||||
getitem = self.py__getitem__
|
||||
except AttributeError:
|
||||
from jedi.evaluate import analysis
|
||||
# TODO this context is probably not right.
|
||||
analysis.add(
|
||||
contextualized_node.context,
|
||||
'type-error-not-subscriptable',
|
||||
contextualized_node.node,
|
||||
message="TypeError: '%s' object is not subscriptable" % self
|
||||
)
|
||||
else:
|
||||
try:
|
||||
result |= getitem(index)
|
||||
except EvaluatorIndexError:
|
||||
result |= iterate_contexts(ContextSet(self))
|
||||
except EvaluatorKeyError:
|
||||
# Must be a dict. Lists don't raise KeyErrors.
|
||||
result |= self.dict_values()
|
||||
except EvaluatorTypeError:
|
||||
# The type is wrong and therefore it makes no sense to do
|
||||
# anything anymore.
|
||||
result = NO_CONTEXTS
|
||||
return result
|
||||
|
||||
def eval_node(self, node):
|
||||
return self.evaluator.eval_element(self, node)
|
||||
|
||||
@Python3Method
|
||||
def py__getattribute__(self, name_or_str, name_context=None, position=None,
|
||||
search_global=False, is_goto=False,
|
||||
analysis_errors=True):
|
||||
@@ -161,12 +71,108 @@ class Context(BaseContext):
|
||||
return f.filter_name(filters)
|
||||
return f.find(filters, attribute_lookup=not search_global)
|
||||
|
||||
def py__await__(self):
|
||||
await_context_set = self.py__getattribute__(u"__await__")
|
||||
if not await_context_set:
|
||||
debug.warning('Tried to run __await__ on context %s', self)
|
||||
return await_context_set.execute_evaluated()
|
||||
|
||||
def eval_node(self, node):
|
||||
return self.evaluator.eval_element(self, node)
|
||||
|
||||
def create_context(self, node, node_is_context=False, node_is_object=False):
|
||||
return self.evaluator.create_context(self, node, node_is_context, node_is_object)
|
||||
|
||||
def iterate(self, contextualized_node=None, is_async=False):
|
||||
debug.dbg('iterate %s', self)
|
||||
if is_async:
|
||||
from jedi.evaluate.lazy_context import LazyKnownContexts
|
||||
# TODO if no __aiter__ contexts are there, error should be:
|
||||
# TypeError: 'async for' requires an object with __aiter__ method, got int
|
||||
return iter([
|
||||
LazyKnownContexts(
|
||||
self.py__getattribute__('__aiter__').execute_evaluated()
|
||||
.py__getattribute__('__anext__').execute_evaluated()
|
||||
.py__getattribute__('__await__').execute_evaluated()
|
||||
.py__stop_iteration_returns()
|
||||
) # noqa
|
||||
])
|
||||
return self.py__iter__(contextualized_node)
|
||||
|
||||
def is_sub_class_of(self, class_context):
|
||||
for cls in self.py__mro__():
|
||||
if cls.is_same_class(class_context):
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_same_class(self, class2):
|
||||
# Class matching should prefer comparisons that are not this function.
|
||||
if type(class2).is_same_class != HelperContextMixin.is_same_class:
|
||||
return class2.is_same_class(self)
|
||||
return self == class2
|
||||
|
||||
|
||||
class Context(HelperContextMixin, BaseContext):
|
||||
"""
|
||||
Should be defined, otherwise the API returns empty types.
|
||||
"""
|
||||
predefined_names = {}
|
||||
"""
|
||||
To be defined by subclasses.
|
||||
"""
|
||||
tree_node = None
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
# By default just lower name of the class. Can and should be
|
||||
# overwritten.
|
||||
return self.__class__.__name__.lower()
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
from jedi.evaluate import analysis
|
||||
# TODO this context is probably not right.
|
||||
analysis.add(
|
||||
contextualized_node.context,
|
||||
'type-error-not-subscriptable',
|
||||
contextualized_node.node,
|
||||
message="TypeError: '%s' object is not subscriptable" % self
|
||||
)
|
||||
return NO_CONTEXTS
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
if contextualized_node is not None:
|
||||
from jedi.evaluate import analysis
|
||||
analysis.add(
|
||||
contextualized_node.context,
|
||||
'type-error-not-iterable',
|
||||
contextualized_node.node,
|
||||
message="TypeError: '%s' object is not iterable" % self)
|
||||
return iter([])
|
||||
|
||||
def get_signatures(self):
|
||||
return []
|
||||
|
||||
def is_class(self):
|
||||
return False
|
||||
|
||||
def is_instance(self):
|
||||
return False
|
||||
|
||||
def is_function(self):
|
||||
return False
|
||||
|
||||
def is_module(self):
|
||||
return False
|
||||
|
||||
def is_namespace(self):
|
||||
return False
|
||||
|
||||
def is_compiled(self):
|
||||
return False
|
||||
|
||||
def is_bound_method(self):
|
||||
return False
|
||||
|
||||
def py__bool__(self):
|
||||
"""
|
||||
Since Wrapper is a super class for classes, functions and modules,
|
||||
@@ -174,18 +180,36 @@ class Context(BaseContext):
|
||||
"""
|
||||
return True
|
||||
|
||||
def py__doc__(self, include_call_signature=False):
|
||||
def py__doc__(self):
|
||||
try:
|
||||
self.tree_node.get_doc_node
|
||||
except AttributeError:
|
||||
return ''
|
||||
else:
|
||||
if include_call_signature:
|
||||
return get_doc_with_call_signature(self.tree_node)
|
||||
else:
|
||||
return clean_scope_docstring(self.tree_node)
|
||||
return clean_scope_docstring(self.tree_node)
|
||||
return None
|
||||
|
||||
def get_safe_value(self, default=_sentinel):
|
||||
if default is _sentinel:
|
||||
raise ValueError("There exists no safe value for context %s" % self)
|
||||
return default
|
||||
|
||||
def py__call__(self, arguments):
|
||||
debug.warning("no execution possible %s", self)
|
||||
return NO_CONTEXTS
|
||||
|
||||
def py__stop_iteration_returns(self):
|
||||
debug.warning("Not possible to return the stop iterations of %s", self)
|
||||
return NO_CONTEXTS
|
||||
|
||||
def get_qualified_names(self):
|
||||
# Returns Optional[Tuple[str, ...]]
|
||||
return None
|
||||
|
||||
def is_stub(self):
|
||||
# The root context knows if it's a stub or not.
|
||||
return self.parent_context.is_stub()
|
||||
|
||||
|
||||
def iterate_contexts(contexts, contextualized_node=None, is_async=False):
|
||||
"""
|
||||
@@ -198,6 +222,51 @@ def iterate_contexts(contexts, contextualized_node=None, is_async=False):
|
||||
)
|
||||
|
||||
|
||||
class _ContextWrapperBase(HelperContextMixin):
|
||||
predefined_names = {}
|
||||
|
||||
@safe_property
|
||||
def name(self):
|
||||
from jedi.evaluate.names import ContextName
|
||||
wrapped_name = self._wrapped_context.name
|
||||
if wrapped_name.tree_name is not None:
|
||||
return ContextName(self, wrapped_name.tree_name)
|
||||
else:
|
||||
from jedi.evaluate.compiled import CompiledContextName
|
||||
return CompiledContextName(self, wrapped_name.string_name)
|
||||
|
||||
@classmethod
|
||||
@evaluator_as_method_param_cache()
|
||||
def create_cached(cls, evaluator, *args, **kwargs):
|
||||
return cls(*args, **kwargs)
|
||||
|
||||
def __getattr__(self, name):
|
||||
assert name != '_wrapped_context', 'Problem with _get_wrapped_context'
|
||||
return getattr(self._wrapped_context, name)
|
||||
|
||||
|
||||
class LazyContextWrapper(_ContextWrapperBase):
|
||||
@safe_property
|
||||
@memoize_method
|
||||
def _wrapped_context(self):
|
||||
with debug.increase_indent_cm('Resolve lazy context wrapper'):
|
||||
return self._get_wrapped_context()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s>' % (self.__class__.__name__)
|
||||
|
||||
def _get_wrapped_context(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ContextWrapper(_ContextWrapperBase):
|
||||
def __init__(self, wrapped_context):
|
||||
self._wrapped_context = wrapped_context
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (self.__class__.__name__, self._wrapped_context)
|
||||
|
||||
|
||||
class TreeContext(Context):
|
||||
def __init__(self, evaluator, parent_context, tree_node):
|
||||
super(TreeContext, self).__init__(evaluator, parent_context)
|
||||
@@ -219,6 +288,9 @@ class ContextualizedNode(object):
|
||||
def infer(self):
|
||||
return self.context.eval_node(self.node)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)
|
||||
|
||||
|
||||
class ContextualizedName(ContextualizedNode):
|
||||
# TODO merge with TreeNameDefinition?!
|
||||
@@ -236,19 +308,31 @@ class ContextualizedName(ContextualizedNode):
|
||||
x, (y, z) = 2, ''
|
||||
|
||||
would result in ``[(1, xyz_node), (0, yz_node)]``.
|
||||
|
||||
When searching for b in the case ``a, *b, c = [...]`` it will return::
|
||||
|
||||
[(slice(1, -1), abc_node)]
|
||||
"""
|
||||
indexes = []
|
||||
is_star_expr = False
|
||||
node = self.node.parent
|
||||
compare = self.node
|
||||
while node is not None:
|
||||
if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):
|
||||
for i, child in enumerate(node.children):
|
||||
if child == compare:
|
||||
indexes.insert(0, (int(i / 2), node))
|
||||
index = int(i / 2)
|
||||
if is_star_expr:
|
||||
from_end = int((len(node.children) - i) / 2)
|
||||
index = slice(index, -from_end)
|
||||
indexes.insert(0, (index, node))
|
||||
break
|
||||
else:
|
||||
raise LookupError("Couldn't find the assignment.")
|
||||
elif isinstance(node, (ExprStmt, CompFor)):
|
||||
is_star_expr = False
|
||||
elif node.type == 'star_expr':
|
||||
is_star_expr = True
|
||||
elif isinstance(node, (ExprStmt, SyncCompFor)):
|
||||
break
|
||||
|
||||
compare = node
|
||||
@@ -256,9 +340,51 @@ class ContextualizedName(ContextualizedNode):
|
||||
return indexes
|
||||
|
||||
|
||||
def _getitem(context, index_contexts, contextualized_node):
|
||||
from jedi.evaluate.context.iterable import Slice
|
||||
|
||||
# The actual getitem call.
|
||||
simple_getitem = getattr(context, 'py__simple_getitem__', None)
|
||||
|
||||
result = NO_CONTEXTS
|
||||
unused_contexts = set()
|
||||
for index_context in index_contexts:
|
||||
if simple_getitem is not None:
|
||||
index = index_context
|
||||
if isinstance(index_context, Slice):
|
||||
index = index.obj
|
||||
|
||||
try:
|
||||
method = index.get_safe_value
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
index = method(default=None)
|
||||
|
||||
if type(index) in (float, int, str, unicode, slice, bytes):
|
||||
try:
|
||||
result |= simple_getitem(index)
|
||||
continue
|
||||
except SimpleGetItemNotFound:
|
||||
pass
|
||||
|
||||
unused_contexts.add(index_context)
|
||||
|
||||
# The index was somehow not good enough or simply a wrong type.
|
||||
# Therefore we now iterate through all the contexts and just take
|
||||
# all results.
|
||||
if unused_contexts or not index_contexts:
|
||||
result |= context.py__getitem__(
|
||||
ContextSet(unused_contexts),
|
||||
contextualized_node
|
||||
)
|
||||
debug.dbg('py__getitem__ result: %s', result)
|
||||
return result
|
||||
|
||||
|
||||
class ContextSet(BaseContextSet):
|
||||
def py__class__(self):
|
||||
return ContextSet.from_iterable(c.py__class__() for c in self._set)
|
||||
return ContextSet(c.py__class__() for c in self._set)
|
||||
|
||||
def iterate(self, contextualized_node=None, is_async=False):
|
||||
from jedi.evaluate.lazy_context import get_merged_lazy_context
|
||||
@@ -268,12 +394,43 @@ class ContextSet(BaseContextSet):
|
||||
[l for l in lazy_contexts if l is not None]
|
||||
)
|
||||
|
||||
def execute(self, arguments):
|
||||
return ContextSet.from_sets(c.evaluator.execute(c, arguments) for c in self._set)
|
||||
|
||||
NO_CONTEXTS = ContextSet()
|
||||
def execute_evaluated(self, *args, **kwargs):
|
||||
return ContextSet.from_sets(c.execute_evaluated(*args, **kwargs) for c in self._set)
|
||||
|
||||
def py__getattribute__(self, *args, **kwargs):
|
||||
if kwargs.get('is_goto'):
|
||||
return reduce(add, [c.py__getattribute__(*args, **kwargs) for c in self._set], [])
|
||||
return ContextSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set)
|
||||
|
||||
def get_item(self, *args, **kwargs):
|
||||
return ContextSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set)
|
||||
|
||||
def try_merge(self, function_name):
|
||||
context_set = self.__class__([])
|
||||
for c in self._set:
|
||||
try:
|
||||
method = getattr(c, function_name)
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
context_set |= method()
|
||||
return context_set
|
||||
|
||||
def gather_annotation_classes(self):
|
||||
return ContextSet.from_sets([c.gather_annotation_classes() for c in self._set])
|
||||
|
||||
def get_signatures(self):
|
||||
return [sig for c in self._set for sig in c.get_signatures()]
|
||||
|
||||
|
||||
NO_CONTEXTS = ContextSet([])
|
||||
|
||||
|
||||
def iterator_to_context_set(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
return ContextSet.from_iterable(func(*args, **kwargs))
|
||||
return ContextSet(func(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -4,7 +4,10 @@
|
||||
- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
|
||||
"""
|
||||
|
||||
from jedi import debug
|
||||
|
||||
_NO_DEFAULT = object()
|
||||
_RECURSION_SENTINEL = object()
|
||||
|
||||
|
||||
def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False):
|
||||
@@ -28,8 +31,7 @@ def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_a
|
||||
try:
|
||||
memo = cache[function]
|
||||
except KeyError:
|
||||
memo = {}
|
||||
cache[function] = memo
|
||||
cache[function] = memo = {}
|
||||
|
||||
key = (obj, args, frozenset(kwargs.items()))
|
||||
if key in memo:
|
||||
@@ -75,3 +77,47 @@ class CachedMetaClass(type):
|
||||
@evaluator_as_method_param_cache()
|
||||
def __call__(self, *args, **kwargs):
|
||||
return super(CachedMetaClass, self).__call__(*args, **kwargs)
|
||||
|
||||
|
||||
def evaluator_method_generator_cache():
|
||||
"""
|
||||
This is a special memoizer. It memoizes generators and also checks for
|
||||
recursion errors and returns no further iterator elemends in that case.
|
||||
"""
|
||||
def func(function):
|
||||
def wrapper(obj, *args, **kwargs):
|
||||
cache = obj.evaluator.memoize_cache
|
||||
try:
|
||||
memo = cache[function]
|
||||
except KeyError:
|
||||
cache[function] = memo = {}
|
||||
|
||||
key = (obj, args, frozenset(kwargs.items()))
|
||||
|
||||
if key in memo:
|
||||
actual_generator, cached_lst = memo[key]
|
||||
else:
|
||||
actual_generator = function(obj, *args, **kwargs)
|
||||
cached_lst = []
|
||||
memo[key] = actual_generator, cached_lst
|
||||
|
||||
i = 0
|
||||
while True:
|
||||
try:
|
||||
next_element = cached_lst[i]
|
||||
if next_element is _RECURSION_SENTINEL:
|
||||
debug.warning('Found a generator recursion for %s' % obj)
|
||||
# This means we have hit a recursion.
|
||||
return
|
||||
except IndexError:
|
||||
cached_lst.append(_RECURSION_SENTINEL)
|
||||
next_element = next(actual_generator, None)
|
||||
if next_element is None:
|
||||
cached_lst.pop()
|
||||
return
|
||||
cached_lst[-1] = next_element
|
||||
yield next_element
|
||||
i += 1
|
||||
return wrapper
|
||||
|
||||
return func
|
||||
|
||||
@@ -1,12 +1,34 @@
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.evaluate.compiled.context import CompiledObject, CompiledName, \
|
||||
CompiledObjectFilter, CompiledContextName, create_from_access_path, \
|
||||
create_from_name
|
||||
CompiledObjectFilter, CompiledContextName, create_from_access_path
|
||||
from jedi.evaluate.base_context import ContextWrapper
|
||||
|
||||
|
||||
def builtin_from_name(evaluator, string):
|
||||
builtins = evaluator.builtins_module
|
||||
return create_from_name(evaluator, builtins, string)
|
||||
typing_builtins_module = evaluator.builtins_module
|
||||
if string in ('None', 'True', 'False'):
|
||||
builtins, = typing_builtins_module.non_stub_context_set
|
||||
filter_ = next(builtins.get_filters())
|
||||
else:
|
||||
filter_ = next(typing_builtins_module.get_filters())
|
||||
name, = filter_.get(string)
|
||||
context, = name.infer()
|
||||
return context
|
||||
|
||||
|
||||
class CompiledValue(ContextWrapper):
|
||||
def __init__(self, instance, compiled_obj):
|
||||
super(CompiledValue, self).__init__(instance)
|
||||
self._compiled_obj = compiled_obj
|
||||
|
||||
def __getattribute__(self, name):
|
||||
if name in ('get_safe_value', 'execute_operation', 'access_handle',
|
||||
'negate', 'py__bool__', 'is_compiled'):
|
||||
return getattr(self._compiled_obj, name)
|
||||
return super(CompiledValue, self).__getattribute__(name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._compiled_obj)
|
||||
|
||||
|
||||
def create_simple_object(evaluator, obj):
|
||||
@@ -14,18 +36,13 @@ def create_simple_object(evaluator, obj):
|
||||
Only allows creations of objects that are easily picklable across Python
|
||||
versions.
|
||||
"""
|
||||
assert isinstance(obj, (int, float, str, bytes, unicode, slice, complex))
|
||||
return create_from_access_path(
|
||||
assert type(obj) in (int, float, str, bytes, unicode, slice, complex, bool), obj
|
||||
compiled_obj = create_from_access_path(
|
||||
evaluator,
|
||||
evaluator.compiled_subprocess.create_simple_object(obj)
|
||||
)
|
||||
|
||||
|
||||
def get_special_object(evaluator, identifier):
|
||||
return create_from_access_path(
|
||||
evaluator,
|
||||
evaluator.compiled_subprocess.get_special_object(identifier)
|
||||
)
|
||||
instance, = builtin_from_name(evaluator, compiled_obj.name.string_name).execute_evaluated()
|
||||
return CompiledValue(instance, compiled_obj)
|
||||
|
||||
|
||||
def get_string_context_set(evaluator):
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
from __future__ import print_function
|
||||
import inspect
|
||||
import types
|
||||
import sys
|
||||
from textwrap import dedent
|
||||
import operator as op
|
||||
from collections import namedtuple
|
||||
|
||||
from jedi._compatibility import unicode, is_py3, builtins, \
|
||||
py_version, force_unicode, print_to_stderr
|
||||
py_version, force_unicode
|
||||
from jedi.evaluate.compiled.getattr_static import getattr_static
|
||||
|
||||
ALLOWED_GETITEM_TYPES = (str, list, tuple, unicode, bytes, bytearray, dict)
|
||||
|
||||
MethodDescriptorType = type(str.replace)
|
||||
# These are not considered classes and access is granted even though they have
|
||||
@@ -43,12 +44,6 @@ WrapperDescriptorType = type(set.__iter__)
|
||||
object_class_dict = type.__dict__["__dict__"].__get__(object)
|
||||
ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
|
||||
|
||||
def _a_generator(foo):
|
||||
"""Used to have an object to return for generators."""
|
||||
yield 42
|
||||
yield foo
|
||||
|
||||
|
||||
_sentinel = object()
|
||||
|
||||
# Maps Python syntax to the operator module.
|
||||
@@ -98,7 +93,7 @@ def safe_getattr(obj, name, default=_sentinel):
|
||||
|
||||
SignatureParam = namedtuple(
|
||||
'SignatureParam',
|
||||
'name has_default default has_annotation annotation kind_name'
|
||||
'name has_default default default_string has_annotation annotation annotation_string kind_name'
|
||||
)
|
||||
|
||||
|
||||
@@ -140,13 +135,13 @@ def load_module(evaluator, dotted_name, sys_path):
|
||||
__import__(dotted_name)
|
||||
except ImportError:
|
||||
# If a module is "corrupt" or not really a Python module or whatever.
|
||||
print_to_stderr('Module %s not importable in path %s.' % (dotted_name, sys_path))
|
||||
print('Module %s not importable in path %s.' % (dotted_name, sys_path), file=sys.stderr)
|
||||
return None
|
||||
except Exception:
|
||||
# Since __import__ pretty much makes code execution possible, just
|
||||
# catch any error here and print it.
|
||||
import traceback
|
||||
print_to_stderr("Cannot import:\n%s" % traceback.format_exc())
|
||||
print("Cannot import:\n%s" % traceback.format_exc(), file=sys.stderr)
|
||||
return None
|
||||
finally:
|
||||
sys.path = temp
|
||||
@@ -203,7 +198,7 @@ class DirectObjectAccess(object):
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
def py__doc__(self, include_call_signature=False):
|
||||
def py__doc__(self):
|
||||
return force_unicode(inspect.getdoc(self._obj)) or u''
|
||||
|
||||
def py__name__(self):
|
||||
@@ -226,15 +221,23 @@ class DirectObjectAccess(object):
|
||||
def py__mro__accesses(self):
|
||||
return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:])
|
||||
|
||||
def py__getitem__(self, index):
|
||||
if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
|
||||
def py__getitem__all_values(self):
|
||||
if isinstance(self._obj, dict):
|
||||
return [self._create_access_path(v) for v in self._obj.values()]
|
||||
return self.py__iter__list()
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
|
||||
# Get rid of side effects, we won't call custom `__getitem__`s.
|
||||
return None
|
||||
|
||||
return self._create_access_path(self._obj[index])
|
||||
|
||||
def py__iter__list(self):
|
||||
if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
|
||||
if not hasattr(self._obj, '__getitem__'):
|
||||
return None
|
||||
|
||||
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
|
||||
# Get rid of side effects, we won't call custom `__getitem__`s.
|
||||
return []
|
||||
|
||||
@@ -277,9 +280,28 @@ class DirectObjectAccess(object):
|
||||
def is_class(self):
|
||||
return inspect.isclass(self._obj)
|
||||
|
||||
def is_module(self):
|
||||
return inspect.ismodule(self._obj)
|
||||
|
||||
def is_instance(self):
|
||||
return _is_class_instance(self._obj)
|
||||
|
||||
def ismethoddescriptor(self):
|
||||
return inspect.ismethoddescriptor(self._obj)
|
||||
|
||||
def get_qualified_names(self):
|
||||
def try_to_get_name(obj):
|
||||
return getattr(obj, '__qualname__', getattr(obj, '__name__', None))
|
||||
|
||||
if self.is_module():
|
||||
return ()
|
||||
name = try_to_get_name(self._obj)
|
||||
if name is None:
|
||||
name = try_to_get_name(type(self._obj))
|
||||
if name is None:
|
||||
return ()
|
||||
return tuple(name.split('.'))
|
||||
|
||||
def dir(self):
|
||||
return list(map(force_unicode, dir(self._obj)))
|
||||
|
||||
@@ -303,16 +325,26 @@ class DirectObjectAccess(object):
|
||||
return True, True
|
||||
return True, False
|
||||
|
||||
def getattr(self, name, default=_sentinel):
|
||||
def getattr_paths(self, name, default=_sentinel):
|
||||
try:
|
||||
return self._create_access(getattr(self._obj, name))
|
||||
return_obj = getattr(self._obj, name)
|
||||
except AttributeError:
|
||||
# Happens e.g. in properties of
|
||||
# PyQt4.QtGui.QStyleOptionComboBox.currentText
|
||||
# -> just set it to None
|
||||
if default is _sentinel:
|
||||
raise
|
||||
return self._create_access(default)
|
||||
return_obj = default
|
||||
access = self._create_access(return_obj)
|
||||
if inspect.ismodule(return_obj):
|
||||
return [access]
|
||||
|
||||
module = inspect.getmodule(return_obj)
|
||||
if module is None:
|
||||
module = inspect.getmodule(type(return_obj))
|
||||
if module is None:
|
||||
module = builtins
|
||||
return [self._create_access(module), access]
|
||||
|
||||
def get_safe_value(self):
|
||||
if type(self._obj) in (bool, bytes, float, int, str, unicode, slice):
|
||||
@@ -359,7 +391,6 @@ class DirectObjectAccess(object):
|
||||
yield builtins
|
||||
else:
|
||||
try:
|
||||
# TODO use sys.modules, __module__ can be faked.
|
||||
yield sys.modules[imp_plz]
|
||||
except KeyError:
|
||||
# __module__ can be something arbitrary that doesn't exist.
|
||||
@@ -376,6 +407,20 @@ class DirectObjectAccess(object):
|
||||
return inspect.isclass(self._obj) and self._obj != type
|
||||
|
||||
def get_signature_params(self):
|
||||
return [
|
||||
SignatureParam(
|
||||
name=p.name,
|
||||
has_default=p.default is not p.empty,
|
||||
default=self._create_access_path(p.default),
|
||||
default_string=repr(p.default),
|
||||
has_annotation=p.annotation is not p.empty,
|
||||
annotation=self._create_access_path(p.annotation),
|
||||
annotation_string=str(p.default),
|
||||
kind_name=str(p.kind)
|
||||
) for p in self._get_signature().parameters.values()
|
||||
]
|
||||
|
||||
def _get_signature(self):
|
||||
obj = self._obj
|
||||
if py_version < 33:
|
||||
raise ValueError("inspect.signature was introduced in 3.3")
|
||||
@@ -396,32 +441,27 @@ class DirectObjectAccess(object):
|
||||
raise ValueError
|
||||
|
||||
try:
|
||||
signature = inspect.signature(obj)
|
||||
return inspect.signature(obj)
|
||||
except (RuntimeError, TypeError):
|
||||
# Reading the code of the function in Python 3.6 implies there are
|
||||
# at least these errors that might occur if something is wrong with
|
||||
# the signature. In that case we just want a simple escape for now.
|
||||
raise ValueError
|
||||
return [
|
||||
SignatureParam(
|
||||
name=p.name,
|
||||
has_default=p.default is not p.empty,
|
||||
default=self._create_access_path(p.default),
|
||||
has_annotation=p.annotation is not p.empty,
|
||||
annotation=self._create_access_path(p.annotation),
|
||||
kind_name=str(p.kind)
|
||||
) for p in signature.parameters.values()
|
||||
]
|
||||
|
||||
def get_return_annotation(self):
|
||||
try:
|
||||
o = self._obj.__annotations__.get('return')
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
if o is None:
|
||||
return None
|
||||
|
||||
return self._create_access_path(o)
|
||||
|
||||
def negate(self):
|
||||
return self._create_access_path(-self._obj)
|
||||
|
||||
def dict_values(self):
|
||||
return [self._create_access_path(v) for v in self._obj.values()]
|
||||
|
||||
def is_super_class(self, exception):
|
||||
return issubclass(exception, self._obj)
|
||||
|
||||
def get_dir_infos(self):
|
||||
"""
|
||||
Used to return a couple of infos that are needed when accessing the sub
|
||||
@@ -443,41 +483,3 @@ def _is_class_instance(obj):
|
||||
return False
|
||||
else:
|
||||
return cls != type and not issubclass(cls, NOT_CLASS_TYPES)
|
||||
|
||||
|
||||
if py_version >= 35:
|
||||
exec(compile(dedent("""
|
||||
async def _coroutine(): pass
|
||||
_coroutine = _coroutine()
|
||||
CoroutineType = type(_coroutine)
|
||||
_coroutine.close() # Prevent ResourceWarning
|
||||
"""), 'blub', 'exec'))
|
||||
_coroutine_wrapper = _coroutine.__await__()
|
||||
else:
|
||||
_coroutine = None
|
||||
_coroutine_wrapper = None
|
||||
|
||||
if py_version >= 36:
|
||||
exec(compile(dedent("""
|
||||
async def _async_generator():
|
||||
yield
|
||||
_async_generator = _async_generator()
|
||||
AsyncGeneratorType = type(_async_generator)
|
||||
"""), 'blub', 'exec'))
|
||||
else:
|
||||
_async_generator = None
|
||||
|
||||
class _SPECIAL_OBJECTS(object):
|
||||
FUNCTION_CLASS = types.FunctionType
|
||||
BOUND_METHOD_CLASS = type(DirectObjectAccess(None, None).py__bool__)
|
||||
MODULE_CLASS = types.ModuleType
|
||||
GENERATOR_OBJECT = _a_generator(1.0)
|
||||
BUILTINS = builtins
|
||||
COROUTINE = _coroutine
|
||||
COROUTINE_WRAPPER = _coroutine_wrapper
|
||||
ASYNC_GENERATOR = _async_generator
|
||||
|
||||
|
||||
def get_special_object(evaluator, identifier):
|
||||
obj = getattr(_SPECIAL_OBJECTS, identifier)
|
||||
return create_access_path(evaluator, obj)
|
||||
|
||||
@@ -5,113 +5,145 @@ import re
|
||||
from functools import partial
|
||||
|
||||
from jedi import debug
|
||||
from jedi._compatibility import force_unicode, Parameter
|
||||
from jedi.evaluate.utils import to_list
|
||||
from jedi._compatibility import force_unicode, Parameter, cast_path
|
||||
from jedi.cache import underscore_memoization, memoize_method
|
||||
from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \
|
||||
ContextNameMixin
|
||||
from jedi.evaluate.base_context import Context, ContextSet
|
||||
from jedi.evaluate.filters import AbstractFilter
|
||||
from jedi.evaluate.names import AbstractNameDefinition, ContextNameMixin, \
|
||||
ParamNameInterface
|
||||
from jedi.evaluate.base_context import Context, ContextSet, NO_CONTEXTS
|
||||
from jedi.evaluate.lazy_context import LazyKnownContext
|
||||
from jedi.evaluate.compiled.access import _sentinel
|
||||
from jedi.evaluate.cache import evaluator_function_cache
|
||||
from jedi.evaluate.helpers import reraise_as_evaluator
|
||||
from . import fake
|
||||
from jedi.evaluate.helpers import reraise_getitem_errors
|
||||
from jedi.evaluate.signature import BuiltinSignature
|
||||
|
||||
|
||||
class CheckAttribute(object):
|
||||
"""Raises an AttributeError if the attribute X isn't available."""
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
def __init__(self, check_name=None):
|
||||
# Remove the py in front of e.g. py__call__.
|
||||
self.check_name = force_unicode(func.__name__[2:])
|
||||
self.check_name = check_name
|
||||
|
||||
def __call__(self, func):
|
||||
self.func = func
|
||||
if self.check_name is None:
|
||||
self.check_name = force_unicode(func.__name__[2:])
|
||||
return self
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
if instance is None:
|
||||
return self
|
||||
|
||||
# This might raise an AttributeError. That's wanted.
|
||||
if self.check_name == '__iter__':
|
||||
# Python iterators are a bit strange, because there's no need for
|
||||
# the __iter__ function as long as __getitem__ is defined (it will
|
||||
# just start with __getitem__(0). This is especially true for
|
||||
# Python 2 strings, where `str.__iter__` is not even defined.
|
||||
if not instance.access_handle.has_iter():
|
||||
raise AttributeError
|
||||
else:
|
||||
instance.access_handle.getattr(self.check_name)
|
||||
instance.access_handle.getattr_paths(self.check_name)
|
||||
return partial(self.func, instance)
|
||||
|
||||
|
||||
class CompiledObject(Context):
|
||||
def __init__(self, evaluator, access_handle, parent_context=None, faked_class=None):
|
||||
def __init__(self, evaluator, access_handle, parent_context=None):
|
||||
super(CompiledObject, self).__init__(evaluator, parent_context)
|
||||
self.access_handle = access_handle
|
||||
# This attribute will not be set for most classes, except for fakes.
|
||||
self.tree_node = faked_class
|
||||
|
||||
@CheckAttribute
|
||||
def py__call__(self, params):
|
||||
if self.tree_node is not None and self.tree_node.type == 'funcdef':
|
||||
from jedi.evaluate.context.function import FunctionContext
|
||||
return FunctionContext(
|
||||
self.evaluator,
|
||||
parent_context=self.parent_context,
|
||||
tree_node=self.tree_node
|
||||
).py__call__(params)
|
||||
if self.access_handle.is_class():
|
||||
from jedi.evaluate.context import CompiledInstance
|
||||
return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params))
|
||||
def py__call__(self, arguments):
|
||||
return_annotation = self.access_handle.get_return_annotation()
|
||||
if return_annotation is not None:
|
||||
# TODO the return annotation may also be a string.
|
||||
return create_from_access_path(self.evaluator, return_annotation).execute_annotation()
|
||||
|
||||
try:
|
||||
self.access_handle.getattr_paths(u'__call__')
|
||||
except AttributeError:
|
||||
return super(CompiledObject, self).py__call__(arguments)
|
||||
else:
|
||||
return ContextSet.from_iterable(self._execute_function(params))
|
||||
if self.access_handle.is_class():
|
||||
from jedi.evaluate.context import CompiledInstance
|
||||
return ContextSet([
|
||||
CompiledInstance(self.evaluator, self.parent_context, self, arguments)
|
||||
])
|
||||
else:
|
||||
return ContextSet(self._execute_function(arguments))
|
||||
|
||||
@CheckAttribute
|
||||
@CheckAttribute()
|
||||
def py__class__(self):
|
||||
return create_from_access_path(self.evaluator, self.access_handle.py__class__())
|
||||
|
||||
@CheckAttribute
|
||||
@CheckAttribute()
|
||||
def py__mro__(self):
|
||||
return (self,) + tuple(
|
||||
create_from_access_path(self.evaluator, access)
|
||||
for access in self.access_handle.py__mro__accesses()
|
||||
)
|
||||
|
||||
@CheckAttribute
|
||||
@CheckAttribute()
|
||||
def py__bases__(self):
|
||||
return tuple(
|
||||
create_from_access_path(self.evaluator, access)
|
||||
for access in self.access_handle.py__bases__()
|
||||
)
|
||||
|
||||
@CheckAttribute
|
||||
@CheckAttribute()
|
||||
def py__path__(self):
|
||||
return self.access_handle.py__path__()
|
||||
return map(cast_path, self.access_handle.py__path__())
|
||||
|
||||
@property
|
||||
def string_names(self):
|
||||
# For modules
|
||||
name = self.py__name__()
|
||||
if name is None:
|
||||
return ()
|
||||
return tuple(name.split('.'))
|
||||
|
||||
def get_qualified_names(self):
|
||||
return self.access_handle.get_qualified_names()
|
||||
|
||||
def py__bool__(self):
|
||||
return self.access_handle.py__bool__()
|
||||
|
||||
def py__file__(self):
|
||||
return self.access_handle.py__file__()
|
||||
return cast_path(self.access_handle.py__file__())
|
||||
|
||||
def is_class(self):
|
||||
return self.access_handle.is_class()
|
||||
|
||||
def py__doc__(self, include_call_signature=False):
|
||||
def is_module(self):
|
||||
return self.access_handle.is_module()
|
||||
|
||||
def is_compiled(self):
|
||||
return True
|
||||
|
||||
def is_stub(self):
|
||||
return False
|
||||
|
||||
def is_instance(self):
|
||||
return self.access_handle.is_instance()
|
||||
|
||||
def py__doc__(self):
|
||||
return self.access_handle.py__doc__()
|
||||
|
||||
@to_list
|
||||
def get_param_names(self):
|
||||
try:
|
||||
signature_params = self.access_handle.get_signature_params()
|
||||
except ValueError: # Has no signature
|
||||
params_str, ret = self._parse_function_doc()
|
||||
tokens = params_str.split(',')
|
||||
if not params_str:
|
||||
tokens = []
|
||||
else:
|
||||
tokens = params_str.split(',')
|
||||
if self.access_handle.ismethoddescriptor():
|
||||
tokens.insert(0, 'self')
|
||||
for p in tokens:
|
||||
parts = p.strip().split('=')
|
||||
yield UnresolvableParamName(self, parts[0])
|
||||
name, _, default = p.strip().partition('=')
|
||||
yield UnresolvableParamName(self, name, default)
|
||||
else:
|
||||
for signature_param in signature_params:
|
||||
yield SignatureParamName(self, signature_param)
|
||||
|
||||
def get_signatures(self):
|
||||
_, return_string = self._parse_function_doc()
|
||||
return [BuiltinSignature(self, return_string)]
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr())
|
||||
|
||||
@@ -148,18 +180,41 @@ class CompiledObject(Context):
|
||||
"""
|
||||
return CompiledObjectFilter(self.evaluator, self, is_instance)
|
||||
|
||||
@CheckAttribute
|
||||
def py__getitem__(self, index):
|
||||
with reraise_as_evaluator(IndexError, KeyError, TypeError):
|
||||
access = self.access_handle.py__getitem__(index)
|
||||
@CheckAttribute(u'__getitem__')
|
||||
def py__simple_getitem__(self, index):
|
||||
with reraise_getitem_errors(IndexError, KeyError, TypeError):
|
||||
access = self.access_handle.py__simple_getitem__(index)
|
||||
if access is None:
|
||||
return ContextSet()
|
||||
return NO_CONTEXTS
|
||||
|
||||
return ContextSet(create_from_access_path(self.evaluator, access))
|
||||
return ContextSet([create_from_access_path(self.evaluator, access)])
|
||||
|
||||
@CheckAttribute
|
||||
def py__iter__(self):
|
||||
for access in self.access_handle.py__iter__list():
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
all_access_paths = self.access_handle.py__getitem__all_values()
|
||||
if all_access_paths is None:
|
||||
# This means basically that no __getitem__ has been defined on this
|
||||
# object.
|
||||
return super(CompiledObject, self).py__getitem__(index_context_set, contextualized_node)
|
||||
return ContextSet(
|
||||
create_from_access_path(self.evaluator, access)
|
||||
for access in all_access_paths
|
||||
)
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
# Python iterators are a bit strange, because there's no need for
|
||||
# the __iter__ function as long as __getitem__ is defined (it will
|
||||
# just start with __getitem__(0). This is especially true for
|
||||
# Python 2 strings, where `str.__iter__` is not even defined.
|
||||
if not self.access_handle.has_iter():
|
||||
for x in super(CompiledObject, self).py__iter__(contextualized_node):
|
||||
yield x
|
||||
|
||||
access_path_list = self.access_handle.py__iter__list()
|
||||
if access_path_list is None:
|
||||
# There is no __iter__ method on this object.
|
||||
return
|
||||
|
||||
for access in access_path_list:
|
||||
yield LazyKnownContext(create_from_access_path(self.evaluator, access))
|
||||
|
||||
def py__name__(self):
|
||||
@@ -182,22 +237,16 @@ class CompiledObject(Context):
|
||||
try:
|
||||
# TODO wtf is this? this is exactly the same as the thing
|
||||
# below. It uses getattr as well.
|
||||
self.evaluator.builtins_module.access_handle.getattr(name)
|
||||
self.evaluator.builtins_module.access_handle.getattr_paths(name)
|
||||
except AttributeError:
|
||||
continue
|
||||
else:
|
||||
bltn_obj = builtin_from_name(self.evaluator, name)
|
||||
for result in bltn_obj.execute(params):
|
||||
for result in self.evaluator.execute(bltn_obj, params):
|
||||
yield result
|
||||
for type_ in docstrings.infer_return_types(self):
|
||||
yield type_
|
||||
|
||||
def dict_values(self):
|
||||
return ContextSet.from_iterable(
|
||||
create_from_access_path(self.evaluator, access)
|
||||
for access in self.access_handle.dict_values()
|
||||
)
|
||||
|
||||
def get_safe_value(self, default=_sentinel):
|
||||
try:
|
||||
return self.access_handle.get_safe_value()
|
||||
@@ -215,9 +264,6 @@ class CompiledObject(Context):
|
||||
def negate(self):
|
||||
return create_from_access_path(self.evaluator, self.access_handle.negate())
|
||||
|
||||
def is_super_class(self, exception):
|
||||
return self.access_handle.is_super_class(exception)
|
||||
|
||||
|
||||
class CompiledName(AbstractNameDefinition):
|
||||
def __init__(self, evaluator, parent_context, name):
|
||||
@@ -225,6 +271,10 @@ class CompiledName(AbstractNameDefinition):
|
||||
self.parent_context = parent_context
|
||||
self.string_name = name
|
||||
|
||||
def _get_qualified_names(self):
|
||||
parent_qualified_names = self.parent_context.get_qualified_names()
|
||||
return parent_qualified_names + (self.string_name,)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
name = self.parent_context.name # __name__ is not defined all the time
|
||||
@@ -234,18 +284,20 @@ class CompiledName(AbstractNameDefinition):
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
return next(iter(self.infer())).api_type
|
||||
api = self.infer()
|
||||
# If we can't find the type, assume it is an instance variable
|
||||
if not api:
|
||||
return "instance"
|
||||
return next(iter(api)).api_type
|
||||
|
||||
@underscore_memoization
|
||||
def infer(self):
|
||||
return ContextSet(create_from_name(
|
||||
return ContextSet([_create_from_name(
|
||||
self._evaluator, self.parent_context, self.string_name
|
||||
))
|
||||
)])
|
||||
|
||||
|
||||
class SignatureParamName(AbstractNameDefinition):
|
||||
api_type = u'param'
|
||||
|
||||
class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
|
||||
def __init__(self, compiled_obj, signature_param):
|
||||
self.parent_context = compiled_obj.parent_context
|
||||
self._signature_param = signature_param
|
||||
@@ -254,6 +306,14 @@ class SignatureParamName(AbstractNameDefinition):
|
||||
def string_name(self):
|
||||
return self._signature_param.name
|
||||
|
||||
def to_string(self):
|
||||
s = self._kind_string() + self.string_name
|
||||
if self._signature_param.has_annotation:
|
||||
s += ': ' + self._signature_param.annotation_string
|
||||
if self._signature_param.has_default:
|
||||
s += '=' + self._signature_param.default_string
|
||||
return s
|
||||
|
||||
def get_kind(self):
|
||||
return getattr(Parameter, self._signature_param.kind_name)
|
||||
|
||||
@@ -263,27 +323,32 @@ class SignatureParamName(AbstractNameDefinition):
|
||||
def infer(self):
|
||||
p = self._signature_param
|
||||
evaluator = self.parent_context.evaluator
|
||||
contexts = ContextSet()
|
||||
contexts = NO_CONTEXTS
|
||||
if p.has_default:
|
||||
contexts = ContextSet(create_from_access_path(evaluator, p.default))
|
||||
contexts = ContextSet([create_from_access_path(evaluator, p.default)])
|
||||
if p.has_annotation:
|
||||
annotation = create_from_access_path(evaluator, p.annotation)
|
||||
contexts |= annotation.execute_evaluated()
|
||||
return contexts
|
||||
|
||||
|
||||
class UnresolvableParamName(AbstractNameDefinition):
|
||||
api_type = u'param'
|
||||
|
||||
def __init__(self, compiled_obj, name):
|
||||
class UnresolvableParamName(ParamNameInterface, AbstractNameDefinition):
|
||||
def __init__(self, compiled_obj, name, default):
|
||||
self.parent_context = compiled_obj.parent_context
|
||||
self.string_name = name
|
||||
self._default = default
|
||||
|
||||
def get_kind(self):
|
||||
return Parameter.POSITIONAL_ONLY
|
||||
|
||||
def to_string(self):
|
||||
string = self.string_name
|
||||
if self._default:
|
||||
string += '=' + self._default
|
||||
return string
|
||||
|
||||
def infer(self):
|
||||
return ContextSet()
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
class CompiledContextName(ContextNameMixin, AbstractNameDefinition):
|
||||
@@ -304,7 +369,7 @@ class EmptyCompiledName(AbstractNameDefinition):
|
||||
self.string_name = name
|
||||
|
||||
def infer(self):
|
||||
return ContextSet()
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
class CompiledObjectFilter(AbstractFilter):
|
||||
@@ -313,7 +378,7 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
def __init__(self, evaluator, compiled_object, is_instance=False):
|
||||
self._evaluator = evaluator
|
||||
self._compiled_object = compiled_object
|
||||
self._is_instance = is_instance
|
||||
self.is_instance = is_instance
|
||||
|
||||
def get(self, name):
|
||||
return self._get(
|
||||
@@ -337,7 +402,7 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
if is_descriptor or not has_attribute:
|
||||
return [self._get_cached_name(name, is_empty=True)]
|
||||
|
||||
if self._is_instance and name not in dir_callback():
|
||||
if self.is_instance and name not in dir_callback():
|
||||
return []
|
||||
return [self._get_cached_name(name)]
|
||||
|
||||
@@ -360,7 +425,7 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
)
|
||||
|
||||
# ``dir`` doesn't include the type names.
|
||||
if not self._is_instance and needs_type_completions:
|
||||
if not self.is_instance and needs_type_completions:
|
||||
for filter in builtin_from_name(self._evaluator, u'type').get_filters():
|
||||
names += filter.values()
|
||||
return names
|
||||
@@ -368,6 +433,9 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
def _create_name(self, name):
|
||||
return self.name_class(self._evaluator, self._compiled_object, name)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, self._compiled_object)
|
||||
|
||||
|
||||
docstr_defaults = {
|
||||
'floating point number': u'float',
|
||||
@@ -439,45 +507,35 @@ def _parse_function_doc(doc):
|
||||
return param_str, ret
|
||||
|
||||
|
||||
def create_from_name(evaluator, compiled_object, name):
|
||||
faked = None
|
||||
try:
|
||||
faked = fake.get_faked_with_parent_context(compiled_object, name)
|
||||
except fake.FakeDoesNotExist:
|
||||
pass
|
||||
|
||||
access = compiled_object.access_handle.getattr(name, default=None)
|
||||
def _create_from_name(evaluator, compiled_object, name):
|
||||
access_paths = compiled_object.access_handle.getattr_paths(name, default=None)
|
||||
parent_context = compiled_object
|
||||
if parent_context.is_class():
|
||||
parent_context = parent_context.parent_context
|
||||
return create_cached_compiled_object(
|
||||
evaluator, access, parent_context=parent_context, faked=faked
|
||||
)
|
||||
|
||||
context = None
|
||||
for access_path in access_paths:
|
||||
context = create_cached_compiled_object(
|
||||
evaluator, access_path, parent_context=context
|
||||
)
|
||||
return context
|
||||
|
||||
|
||||
def _normalize_create_args(func):
|
||||
"""The cache doesn't care about keyword vs. normal args."""
|
||||
def wrapper(evaluator, obj, parent_context=None, faked=None):
|
||||
return func(evaluator, obj, parent_context, faked)
|
||||
def wrapper(evaluator, obj, parent_context=None):
|
||||
return func(evaluator, obj, parent_context)
|
||||
return wrapper
|
||||
|
||||
|
||||
def create_from_access_path(evaluator, access_path):
|
||||
parent_context = None
|
||||
for name, access in access_path.accesses:
|
||||
try:
|
||||
if parent_context is None:
|
||||
faked = fake.get_faked_module(evaluator, access_path.accesses[0][0])
|
||||
else:
|
||||
faked = fake.get_faked_with_parent_context(parent_context, name)
|
||||
except fake.FakeDoesNotExist:
|
||||
faked = None
|
||||
|
||||
parent_context = create_cached_compiled_object(evaluator, access, parent_context, faked)
|
||||
parent_context = create_cached_compiled_object(evaluator, access, parent_context)
|
||||
return parent_context
|
||||
|
||||
|
||||
@_normalize_create_args
|
||||
@evaluator_function_cache()
|
||||
def create_cached_compiled_object(evaluator, access_handle, parent_context, faked):
|
||||
return CompiledObject(evaluator, access_handle, parent_context, faked)
|
||||
def create_cached_compiled_object(evaluator, access_handle, parent_context):
|
||||
return CompiledObject(evaluator, access_handle, parent_context)
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
"""
|
||||
Loads functions that are mixed in to the standard library. E.g. builtins are
|
||||
written in C (binaries), but my autocompletion only understands Python code. By
|
||||
mixing in Python code, the autocompletion should work much better for builtins.
|
||||
"""
|
||||
|
||||
import os
|
||||
from itertools import chain
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
|
||||
fake_modules = {}
|
||||
|
||||
|
||||
def _get_path_dict():
|
||||
path = os.path.dirname(os.path.abspath(__file__))
|
||||
base_path = os.path.join(path, 'fake')
|
||||
dct = {}
|
||||
for file_name in os.listdir(base_path):
|
||||
if file_name.endswith('.pym'):
|
||||
dct[file_name[:-4]] = os.path.join(base_path, file_name)
|
||||
return dct
|
||||
|
||||
|
||||
_path_dict = _get_path_dict()
|
||||
|
||||
|
||||
class FakeDoesNotExist(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _load_faked_module(evaluator, module_name):
|
||||
try:
|
||||
return fake_modules[module_name]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
check_module_name = module_name
|
||||
if module_name == '__builtin__' and evaluator.environment.version_info.major == 2:
|
||||
check_module_name = 'builtins'
|
||||
|
||||
try:
|
||||
path = _path_dict[check_module_name]
|
||||
except KeyError:
|
||||
fake_modules[module_name] = None
|
||||
return
|
||||
|
||||
with open(path) as f:
|
||||
source = f.read()
|
||||
|
||||
fake_modules[module_name] = m = evaluator.latest_grammar.parse(unicode(source))
|
||||
|
||||
if check_module_name != module_name:
|
||||
# There are two implementations of `open` for either python 2/3.
|
||||
# -> Rename the python2 version (`look at fake/builtins.pym`).
|
||||
open_func = _search_scope(m, 'open')
|
||||
open_func.children[1].value = 'open_python3'
|
||||
open_func = _search_scope(m, 'open_python2')
|
||||
open_func.children[1].value = 'open'
|
||||
return m
|
||||
|
||||
|
||||
def _search_scope(scope, obj_name):
|
||||
for s in chain(scope.iter_classdefs(), scope.iter_funcdefs()):
|
||||
if s.name.value == obj_name:
|
||||
return s
|
||||
|
||||
|
||||
def get_faked_with_parent_context(parent_context, name):
|
||||
if parent_context.tree_node is not None:
|
||||
# Try to search in already clearly defined stuff.
|
||||
found = _search_scope(parent_context.tree_node, name)
|
||||
if found is not None:
|
||||
return found
|
||||
raise FakeDoesNotExist
|
||||
|
||||
|
||||
def get_faked_module(evaluator, string_name):
|
||||
module = _load_faked_module(evaluator, string_name)
|
||||
if module is None:
|
||||
raise FakeDoesNotExist
|
||||
return module
|
||||
@@ -1,9 +0,0 @@
|
||||
class partial():
|
||||
def __init__(self, func, *args, **keywords):
|
||||
self.__func = func
|
||||
self.__args = args
|
||||
self.__keywords = keywords
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
# TODO should be **dict(self.__keywords, **kwargs)
|
||||
return self.__func(*(self.__args + args), **self.__keywords)
|
||||
@@ -1,26 +0,0 @@
|
||||
def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None):
|
||||
return Connection()
|
||||
|
||||
|
||||
class Connection():
|
||||
def cursor(self):
|
||||
return Cursor()
|
||||
|
||||
|
||||
class Cursor():
|
||||
def cursor(self):
|
||||
return Cursor()
|
||||
|
||||
def fetchone(self):
|
||||
return Row()
|
||||
|
||||
def fetchmany(self, size=cursor.arraysize):
|
||||
return [self.fetchone()]
|
||||
|
||||
def fetchall(self):
|
||||
return [self.fetchone()]
|
||||
|
||||
|
||||
class Row():
|
||||
def keys(self):
|
||||
return ['']
|
||||
@@ -1,99 +0,0 @@
|
||||
def compile():
|
||||
class SRE_Match():
|
||||
endpos = int()
|
||||
lastgroup = int()
|
||||
lastindex = int()
|
||||
pos = int()
|
||||
string = str()
|
||||
regs = ((int(), int()),)
|
||||
|
||||
def __init__(self, pattern):
|
||||
self.re = pattern
|
||||
|
||||
def start(self):
|
||||
return int()
|
||||
|
||||
def end(self):
|
||||
return int()
|
||||
|
||||
def span(self):
|
||||
return int(), int()
|
||||
|
||||
def expand(self):
|
||||
return str()
|
||||
|
||||
def group(self, nr):
|
||||
return str()
|
||||
|
||||
def groupdict(self):
|
||||
return {str(): str()}
|
||||
|
||||
def groups(self):
|
||||
return (str(),)
|
||||
|
||||
class SRE_Pattern():
|
||||
flags = int()
|
||||
groupindex = {}
|
||||
groups = int()
|
||||
pattern = str()
|
||||
|
||||
def findall(self, string, pos=None, endpos=None):
|
||||
"""
|
||||
findall(string[, pos[, endpos]]) --> list.
|
||||
Return a list of all non-overlapping matches of pattern in string.
|
||||
"""
|
||||
return [str()]
|
||||
|
||||
def finditer(self, string, pos=None, endpos=None):
|
||||
"""
|
||||
finditer(string[, pos[, endpos]]) --> iterator.
|
||||
Return an iterator over all non-overlapping matches for the
|
||||
RE pattern in string. For each match, the iterator returns a
|
||||
match object.
|
||||
"""
|
||||
yield SRE_Match(self)
|
||||
|
||||
def match(self, string, pos=None, endpos=None):
|
||||
"""
|
||||
match(string[, pos[, endpos]]) --> match object or None.
|
||||
Matches zero or more characters at the beginning of the string
|
||||
pattern
|
||||
"""
|
||||
return SRE_Match(self)
|
||||
|
||||
def scanner(self, string, pos=None, endpos=None):
|
||||
pass
|
||||
|
||||
def search(self, string, pos=None, endpos=None):
|
||||
"""
|
||||
search(string[, pos[, endpos]]) --> match object or None.
|
||||
Scan through string looking for a match, and return a corresponding
|
||||
MatchObject instance. Return None if no position in the string matches.
|
||||
"""
|
||||
return SRE_Match(self)
|
||||
|
||||
def split(self, string, maxsplit=0]):
|
||||
"""
|
||||
split(string[, maxsplit = 0]) --> list.
|
||||
Split string by the occurrences of pattern.
|
||||
"""
|
||||
return [str()]
|
||||
|
||||
def sub(self, repl, string, count=0):
|
||||
"""
|
||||
sub(repl, string[, count = 0]) --> newstring
|
||||
Return the string obtained by replacing the leftmost non-overlapping
|
||||
occurrences of pattern in string by the replacement repl.
|
||||
"""
|
||||
return str()
|
||||
|
||||
def subn(self, repl, string, count=0):
|
||||
"""
|
||||
subn(repl, string[, count = 0]) --> (newstring, number of subs)
|
||||
Return the tuple (new_string, number_of_subs_made) found by replacing
|
||||
the leftmost non-overlapping occurrences of pattern with the
|
||||
replacement repl.
|
||||
"""
|
||||
return (str(), int())
|
||||
|
||||
return SRE_Pattern()
|
||||
@@ -1,9 +0,0 @@
|
||||
def proxy(object, callback=None):
|
||||
return object
|
||||
|
||||
class ref():
|
||||
def __init__(self, object, callback=None):
|
||||
self.__object = object
|
||||
|
||||
def __call__(self):
|
||||
return self.__object
|
||||
@@ -1,277 +0,0 @@
|
||||
"""
|
||||
Pure Python implementation of some builtins.
|
||||
This code is not going to be executed anywhere.
|
||||
These implementations are not always correct, but should work as good as
|
||||
possible for the auto completion.
|
||||
"""
|
||||
|
||||
|
||||
def next(iterator, default=None):
|
||||
if random.choice([0, 1]):
|
||||
if hasattr("next"):
|
||||
return iterator.next()
|
||||
else:
|
||||
return iterator.__next__()
|
||||
else:
|
||||
if default is not None:
|
||||
return default
|
||||
|
||||
|
||||
def iter(collection, sentinel=None):
|
||||
if sentinel:
|
||||
yield collection()
|
||||
else:
|
||||
for c in collection:
|
||||
yield c
|
||||
|
||||
|
||||
def range(start, stop=None, step=1):
|
||||
return [0]
|
||||
|
||||
|
||||
class file():
|
||||
def __iter__(self):
|
||||
yield ''
|
||||
|
||||
def next(self):
|
||||
return ''
|
||||
|
||||
def readlines(self):
|
||||
return ['']
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
|
||||
class xrange():
|
||||
# Attention: this function doesn't exist in Py3k (there it is range).
|
||||
def __iter__(self):
|
||||
yield 1
|
||||
|
||||
def count(self):
|
||||
return 1
|
||||
|
||||
def index(self):
|
||||
return 1
|
||||
|
||||
|
||||
def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True):
|
||||
import io
|
||||
return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd)
|
||||
|
||||
|
||||
def open_python2(name, mode=None, buffering=None):
|
||||
return file(name, mode, buffering)
|
||||
|
||||
|
||||
#--------------------------------------------------------
|
||||
# descriptors
|
||||
#--------------------------------------------------------
|
||||
class property():
|
||||
def __init__(self, fget, fset=None, fdel=None, doc=None):
|
||||
self.fget = fget
|
||||
self.fset = fset
|
||||
self.fdel = fdel
|
||||
self.__doc__ = doc
|
||||
|
||||
def __get__(self, obj, cls):
|
||||
return self.fget(obj)
|
||||
|
||||
def __set__(self, obj, value):
|
||||
self.fset(obj, value)
|
||||
|
||||
def __delete__(self, obj):
|
||||
self.fdel(obj)
|
||||
|
||||
def setter(self, func):
|
||||
self.fset = func
|
||||
return self
|
||||
|
||||
def getter(self, func):
|
||||
self.fget = func
|
||||
return self
|
||||
|
||||
def deleter(self, func):
|
||||
self.fdel = func
|
||||
return self
|
||||
|
||||
|
||||
class staticmethod():
|
||||
def __init__(self, func):
|
||||
self.__func = func
|
||||
|
||||
def __get__(self, obj, cls):
|
||||
return self.__func
|
||||
|
||||
|
||||
class classmethod():
|
||||
def __init__(self, func):
|
||||
self.__func = func
|
||||
|
||||
def __get__(self, obj, cls):
|
||||
def _method(*args, **kwargs):
|
||||
return self.__func(cls, *args, **kwargs)
|
||||
return _method
|
||||
|
||||
|
||||
#--------------------------------------------------------
|
||||
# array stuff
|
||||
#--------------------------------------------------------
|
||||
class list():
|
||||
def __init__(self, iterable=[]):
|
||||
self.__iterable = []
|
||||
for i in iterable:
|
||||
self.__iterable += [i]
|
||||
|
||||
def __iter__(self):
|
||||
for i in self.__iterable:
|
||||
yield i
|
||||
|
||||
def __getitem__(self, y):
|
||||
return self.__iterable[y]
|
||||
|
||||
def pop(self):
|
||||
return self.__iterable[int()]
|
||||
|
||||
|
||||
class tuple():
|
||||
def __init__(self, iterable=[]):
|
||||
self.__iterable = []
|
||||
for i in iterable:
|
||||
self.__iterable += [i]
|
||||
|
||||
def __iter__(self):
|
||||
for i in self.__iterable:
|
||||
yield i
|
||||
|
||||
def __getitem__(self, y):
|
||||
return self.__iterable[y]
|
||||
|
||||
def index(self):
|
||||
return 1
|
||||
|
||||
def count(self):
|
||||
return 1
|
||||
|
||||
|
||||
class set():
|
||||
def __init__(self, iterable=[]):
|
||||
self.__iterable = iterable
|
||||
|
||||
def __iter__(self):
|
||||
for i in self.__iterable:
|
||||
yield i
|
||||
|
||||
def pop(self):
|
||||
return list(self.__iterable)[-1]
|
||||
|
||||
def copy(self):
|
||||
return self
|
||||
|
||||
def difference(self, other):
|
||||
return self - other
|
||||
|
||||
def intersection(self, other):
|
||||
return self & other
|
||||
|
||||
def symmetric_difference(self, other):
|
||||
return self ^ other
|
||||
|
||||
def union(self, other):
|
||||
return self | other
|
||||
|
||||
|
||||
class frozenset():
|
||||
def __init__(self, iterable=[]):
|
||||
self.__iterable = iterable
|
||||
|
||||
def __iter__(self):
|
||||
for i in self.__iterable:
|
||||
yield i
|
||||
|
||||
def copy(self):
|
||||
return self
|
||||
|
||||
|
||||
class dict():
|
||||
def __init__(self, **elements):
|
||||
self.__elements = elements
|
||||
|
||||
def clear(self):
|
||||
# has a strange docstr
|
||||
pass
|
||||
|
||||
def __getitem__(self, obj):
|
||||
return self.__elements[obj]
|
||||
|
||||
def get(self, k, d=None):
|
||||
# TODO implement
|
||||
try:
|
||||
return self.__elements[k]
|
||||
pass
|
||||
except KeyError:
|
||||
return d
|
||||
|
||||
def values(self):
|
||||
return self.__elements.values()
|
||||
|
||||
def setdefault(self, k, d):
|
||||
# TODO maybe also return the content
|
||||
return d
|
||||
|
||||
|
||||
class enumerate():
|
||||
def __init__(self, sequence, start=0):
|
||||
self.__sequence = sequence
|
||||
|
||||
def __iter__(self):
|
||||
for i in self.__sequence:
|
||||
yield 1, i
|
||||
|
||||
def __next__(self):
|
||||
return next(self.__iter__())
|
||||
|
||||
def next(self):
|
||||
return next(self.__iter__())
|
||||
|
||||
|
||||
class reversed():
|
||||
def __init__(self, sequence):
|
||||
self.__sequence = sequence
|
||||
|
||||
def __iter__(self):
|
||||
for i in self.__sequence:
|
||||
yield i
|
||||
|
||||
def __next__(self):
|
||||
return next(self.__iter__())
|
||||
|
||||
def next(self):
|
||||
return next(self.__iter__())
|
||||
|
||||
|
||||
def sorted(iterable, cmp=None, key=None, reverse=False):
|
||||
return iterable
|
||||
|
||||
|
||||
#--------------------------------------------------------
|
||||
# basic types
|
||||
#--------------------------------------------------------
|
||||
class int():
|
||||
def __init__(self, x, base=None):
|
||||
pass
|
||||
|
||||
|
||||
class str():
|
||||
def __init__(self, obj):
|
||||
pass
|
||||
|
||||
def strip(self):
|
||||
return str()
|
||||
|
||||
def split(self):
|
||||
return [str()]
|
||||
|
||||
class type():
|
||||
def mro():
|
||||
return [object]
|
||||
@@ -1,4 +0,0 @@
|
||||
class datetime():
|
||||
@staticmethod
|
||||
def now():
|
||||
return datetime()
|
||||
@@ -1,12 +0,0 @@
|
||||
class TextIOWrapper():
|
||||
def __next__(self):
|
||||
return str()
|
||||
|
||||
def __iter__(self):
|
||||
yield str()
|
||||
|
||||
def readlines(self):
|
||||
return ['']
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
@@ -1,33 +0,0 @@
|
||||
# Just copied this code from Python 3.6.
|
||||
|
||||
class itemgetter:
|
||||
"""
|
||||
Return a callable object that fetches the given item(s) from its operand.
|
||||
After f = itemgetter(2), the call f(r) returns r[2].
|
||||
After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])
|
||||
"""
|
||||
__slots__ = ('_items', '_call')
|
||||
|
||||
def __init__(self, item, *items):
|
||||
if not items:
|
||||
self._items = (item,)
|
||||
def func(obj):
|
||||
return obj[item]
|
||||
self._call = func
|
||||
else:
|
||||
self._items = items = (item,) + items
|
||||
def func(obj):
|
||||
return tuple(obj[i] for i in items)
|
||||
self._call = func
|
||||
|
||||
def __call__(self, obj):
|
||||
return self._call(obj)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s.%s(%s)' % (self.__class__.__module__,
|
||||
self.__class__.__name__,
|
||||
', '.join(map(repr, self._items)))
|
||||
|
||||
def __reduce__(self):
|
||||
return self.__class__, self._items
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
def getcwd():
|
||||
return ''
|
||||
|
||||
def getcwdu():
|
||||
return ''
|
||||
@@ -4,22 +4,28 @@ Used only for REPL Completion.
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
|
||||
from jedi.parser_utils import get_cached_code_lines
|
||||
|
||||
from jedi import settings
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.cache import underscore_memoization
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate.base_context import Context, ContextSet
|
||||
from jedi.file_io import FileIO
|
||||
from jedi.evaluate.base_context import ContextSet, ContextWrapper
|
||||
from jedi.evaluate.helpers import SimpleGetItemNotFound
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
from jedi.evaluate.cache import evaluator_function_cache
|
||||
from jedi.evaluate.compiled.getattr_static import getattr_static
|
||||
from jedi.evaluate.compiled.access import compiled_objects_cache
|
||||
from jedi.evaluate.compiled.access import compiled_objects_cache, \
|
||||
ALLOWED_GETITEM_TYPES
|
||||
from jedi.evaluate.compiled.context import create_cached_compiled_object
|
||||
from jedi.evaluate.gradual.conversion import to_stub
|
||||
|
||||
_sentinel = object()
|
||||
|
||||
|
||||
class MixedObject(object):
|
||||
class MixedObject(ContextWrapper):
|
||||
"""
|
||||
A ``MixedObject`` is used in two ways:
|
||||
|
||||
@@ -36,27 +42,34 @@ class MixedObject(object):
|
||||
fewer special cases, because we in Python you don't have the same freedoms
|
||||
to modify the runtime.
|
||||
"""
|
||||
def __init__(self, evaluator, parent_context, compiled_object, tree_context):
|
||||
self.evaluator = evaluator
|
||||
self.parent_context = parent_context
|
||||
def __init__(self, compiled_object, tree_context):
|
||||
super(MixedObject, self).__init__(tree_context)
|
||||
self.compiled_object = compiled_object
|
||||
self._context = tree_context
|
||||
self.access_handle = compiled_object.access_handle
|
||||
|
||||
# We have to overwrite everything that has to do with trailers, name
|
||||
# lookups and filters to make it possible to route name lookups towards
|
||||
# compiled objects and the rest towards tree node contexts.
|
||||
def py__getattribute__(*args, **kwargs):
|
||||
return Context.py__getattribute__(*args, **kwargs)
|
||||
|
||||
def get_filters(self, *args, **kwargs):
|
||||
yield MixedObjectFilter(self.evaluator, self)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (type(self).__name__, self.access_handle.get_repr())
|
||||
def py__call__(self, arguments):
|
||||
return (to_stub(self._wrapped_context) or self._wrapped_context).py__call__(arguments)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._context, name)
|
||||
def get_safe_value(self, default=_sentinel):
|
||||
if default is _sentinel:
|
||||
return self.compiled_object.get_safe_value()
|
||||
else:
|
||||
return self.compiled_object.get_safe_value(default)
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
python_object = self.compiled_object.access_handle.access._obj
|
||||
if type(python_object) in ALLOWED_GETITEM_TYPES:
|
||||
return self.compiled_object.py__simple_getitem__(index)
|
||||
raise SimpleGetItemNotFound
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (
|
||||
type(self).__name__,
|
||||
self.access_handle.get_repr()
|
||||
)
|
||||
|
||||
|
||||
class MixedName(compiled.CompiledName):
|
||||
@@ -78,12 +91,21 @@ class MixedName(compiled.CompiledName):
|
||||
|
||||
@underscore_memoization
|
||||
def infer(self):
|
||||
access_handle = self.parent_context.access_handle
|
||||
# TODO use logic from compiled.CompiledObjectFilter
|
||||
access_handle = access_handle.getattr(self.string_name, default=None)
|
||||
return ContextSet(
|
||||
_create(self._evaluator, access_handle, parent_context=self.parent_context)
|
||||
access_paths = self.parent_context.access_handle.getattr_paths(
|
||||
self.string_name,
|
||||
default=None
|
||||
)
|
||||
assert len(access_paths)
|
||||
contexts = [None]
|
||||
for access in access_paths:
|
||||
contexts = ContextSet.from_sets(
|
||||
_create(self._evaluator, access, parent_context=c)
|
||||
if c is None or isinstance(c, MixedObject)
|
||||
else ContextSet({create_cached_compiled_object(c.evaluator, access, c)})
|
||||
for c in contexts
|
||||
)
|
||||
return contexts
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
@@ -93,18 +115,10 @@ class MixedName(compiled.CompiledName):
|
||||
class MixedObjectFilter(compiled.CompiledObjectFilter):
|
||||
name_class = MixedName
|
||||
|
||||
def __init__(self, evaluator, mixed_object, is_instance=False):
|
||||
super(MixedObjectFilter, self).__init__(
|
||||
evaluator, mixed_object, is_instance)
|
||||
self._mixed_object = mixed_object
|
||||
|
||||
#def _create(self, name):
|
||||
#return MixedName(self._evaluator, self._compiled_object, name)
|
||||
|
||||
|
||||
@evaluator_function_cache()
|
||||
def _load_module(evaluator, path):
|
||||
module_node = evaluator.grammar.parse(
|
||||
module_node = evaluator.parse(
|
||||
path=path,
|
||||
cache=True,
|
||||
diff_cache=settings.fast_parser,
|
||||
@@ -118,6 +132,9 @@ def _load_module(evaluator, path):
|
||||
|
||||
def _get_object_to_check(python_object):
|
||||
"""Check if inspect.getfile has a chance to find the source."""
|
||||
if sys.version_info[0] > 2:
|
||||
python_object = inspect.unwrap(python_object)
|
||||
|
||||
if (inspect.ismodule(python_object) or
|
||||
inspect.isclass(python_object) or
|
||||
inspect.ismethod(python_object) or
|
||||
@@ -133,10 +150,7 @@ def _get_object_to_check(python_object):
|
||||
raise TypeError # Prevents computation of `repr` within inspect.
|
||||
|
||||
|
||||
def _find_syntax_node_name(evaluator, access_handle):
|
||||
# TODO accessing this is bad, but it probably doesn't matter that much,
|
||||
# because we're working with interpreteters only here.
|
||||
python_object = access_handle.access._obj
|
||||
def _find_syntax_node_name(evaluator, python_object):
|
||||
try:
|
||||
python_object = _get_object_to_check(python_object)
|
||||
path = inspect.getsourcefile(python_object)
|
||||
@@ -147,6 +161,7 @@ def _find_syntax_node_name(evaluator, access_handle):
|
||||
# The path might not exist or be e.g. <stdin>.
|
||||
return None
|
||||
|
||||
file_io = FileIO(path)
|
||||
module_node = _load_module(evaluator, path)
|
||||
|
||||
if inspect.ismodule(python_object):
|
||||
@@ -154,7 +169,7 @@ def _find_syntax_node_name(evaluator, access_handle):
|
||||
# a way to write a module in a module in Python (and also __name__ can
|
||||
# be something like ``email.utils``).
|
||||
code_lines = get_cached_code_lines(evaluator.grammar, path)
|
||||
return module_node, module_node, path, code_lines
|
||||
return module_node, module_node, file_io, code_lines
|
||||
|
||||
try:
|
||||
name_str = python_object.__name__
|
||||
@@ -167,7 +182,13 @@ def _find_syntax_node_name(evaluator, access_handle):
|
||||
|
||||
# Doesn't always work (e.g. os.stat_result)
|
||||
names = module_node.get_used_names().get(name_str, [])
|
||||
names = [n for n in names if n.is_definition()]
|
||||
# Only functions and classes are relevant. If a name e.g. points to an
|
||||
# import, it's probably a builtin (like collections.deque) and needs to be
|
||||
# ignored.
|
||||
names = [
|
||||
n for n in names
|
||||
if n.parent.type in ('funcdef', 'classdef') and n.parent.name == n
|
||||
]
|
||||
if not names:
|
||||
return None
|
||||
|
||||
@@ -193,46 +214,62 @@ def _find_syntax_node_name(evaluator, access_handle):
|
||||
# completions at some points but will lead to mostly correct type
|
||||
# inference, because people tend to define a public name in a module only
|
||||
# once.
|
||||
return module_node, names[-1].parent, path, code_lines
|
||||
return module_node, names[-1].parent, file_io, code_lines
|
||||
|
||||
|
||||
@compiled_objects_cache('mixed_cache')
|
||||
def _create(evaluator, access_handle, parent_context, *args):
|
||||
compiled_object = create_cached_compiled_object(
|
||||
evaluator, access_handle, parent_context=parent_context.compiled_object)
|
||||
|
||||
result = _find_syntax_node_name(evaluator, access_handle)
|
||||
if result is None:
|
||||
return compiled_object
|
||||
|
||||
module_node, tree_node, path, code_lines = result
|
||||
|
||||
if parent_context.tree_node.get_root_node() == module_node:
|
||||
module_context = parent_context.get_root_context()
|
||||
else:
|
||||
module_context = ModuleContext(
|
||||
evaluator, module_node,
|
||||
path=path,
|
||||
code_lines=code_lines,
|
||||
)
|
||||
# TODO this __name__ is probably wrong.
|
||||
name = compiled_object.get_root_context().py__name__()
|
||||
if name is not None:
|
||||
imports.add_module_to_cache(evaluator, name, module_context)
|
||||
|
||||
tree_context = module_context.create_context(
|
||||
tree_node,
|
||||
node_is_context=True,
|
||||
node_is_object=True
|
||||
)
|
||||
if tree_node.type == 'classdef':
|
||||
if not access_handle.is_class():
|
||||
# Is an instance, not a class.
|
||||
tree_context, = tree_context.execute_evaluated()
|
||||
|
||||
return MixedObject(
|
||||
evaluator,
|
||||
parent_context,
|
||||
compiled_object,
|
||||
tree_context=tree_context
|
||||
access_handle,
|
||||
parent_context=parent_context and parent_context.compiled_object
|
||||
)
|
||||
|
||||
# TODO accessing this is bad, but it probably doesn't matter that much,
|
||||
# because we're working with interpreteters only here.
|
||||
python_object = access_handle.access._obj
|
||||
result = _find_syntax_node_name(evaluator, python_object)
|
||||
if result is None:
|
||||
# TODO Care about generics from stuff like `[1]` and don't return like this.
|
||||
if type(python_object) in (dict, list, tuple):
|
||||
return ContextSet({compiled_object})
|
||||
|
||||
tree_contexts = to_stub(compiled_object)
|
||||
if not tree_contexts:
|
||||
return ContextSet({compiled_object})
|
||||
else:
|
||||
module_node, tree_node, file_io, code_lines = result
|
||||
|
||||
if parent_context is None:
|
||||
# TODO this __name__ is probably wrong.
|
||||
name = compiled_object.get_root_context().py__name__()
|
||||
string_names = tuple(name.split('.'))
|
||||
module_context = ModuleContext(
|
||||
evaluator, module_node,
|
||||
file_io=file_io,
|
||||
string_names=string_names,
|
||||
code_lines=code_lines,
|
||||
is_package=hasattr(compiled_object, 'py__path__'),
|
||||
)
|
||||
if name is not None:
|
||||
evaluator.module_cache.add(string_names, ContextSet([module_context]))
|
||||
else:
|
||||
assert parent_context.tree_node.get_root_node() == module_node
|
||||
module_context = parent_context.get_root_context()
|
||||
|
||||
tree_contexts = ContextSet({
|
||||
module_context.create_context(
|
||||
tree_node,
|
||||
node_is_context=True,
|
||||
node_is_object=True
|
||||
)
|
||||
})
|
||||
if tree_node.type == 'classdef':
|
||||
if not access_handle.is_class():
|
||||
# Is an instance, not a class.
|
||||
tree_contexts = tree_contexts.execute_evaluated()
|
||||
|
||||
return ContextSet(
|
||||
MixedObject(compiled_object, tree_context=tree_context)
|
||||
for tree_context in tree_contexts
|
||||
)
|
||||
|
||||
@@ -12,7 +12,6 @@ import sys
|
||||
import subprocess
|
||||
import socket
|
||||
import errno
|
||||
import weakref
|
||||
import traceback
|
||||
from functools import partial
|
||||
from threading import Thread
|
||||
@@ -22,7 +21,7 @@ except ImportError:
|
||||
from Queue import Queue, Empty # python 2.7
|
||||
|
||||
from jedi._compatibility import queue, is_py3, force_unicode, \
|
||||
pickle_dump, pickle_load, GeneralizedPopen, print_to_stderr
|
||||
pickle_dump, pickle_load, GeneralizedPopen, weakref
|
||||
from jedi import debug
|
||||
from jedi.cache import memoize_method
|
||||
from jedi.evaluate.compiled.subprocess import functions
|
||||
@@ -37,7 +36,6 @@ _MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
|
||||
def _enqueue_output(out, queue):
|
||||
for line in iter(out.readline, b''):
|
||||
queue.put(line)
|
||||
out.close()
|
||||
|
||||
|
||||
def _add_stderr_to_debug(stderr_queue):
|
||||
@@ -56,6 +54,22 @@ def _get_function(name):
|
||||
return getattr(functions, name)
|
||||
|
||||
|
||||
def _cleanup_process(process, thread):
|
||||
try:
|
||||
process.kill()
|
||||
process.wait()
|
||||
except OSError:
|
||||
# Raised if the process is already killed.
|
||||
pass
|
||||
thread.join()
|
||||
for stream in [process.stdin, process.stdout, process.stderr]:
|
||||
try:
|
||||
stream.close()
|
||||
except OSError:
|
||||
# Raised if the stream is broken.
|
||||
pass
|
||||
|
||||
|
||||
class _EvaluatorProcess(object):
|
||||
def __init__(self, evaluator):
|
||||
self._evaluator_weakref = weakref.ref(evaluator)
|
||||
@@ -145,6 +159,7 @@ class CompiledSubprocess(object):
|
||||
def __init__(self, executable):
|
||||
self._executable = executable
|
||||
self._evaluator_deletion_queue = queue.deque()
|
||||
self._cleanup_callable = lambda: None
|
||||
|
||||
def __repr__(self):
|
||||
pid = os.getpid()
|
||||
@@ -156,9 +171,8 @@ class CompiledSubprocess(object):
|
||||
pid,
|
||||
)
|
||||
|
||||
@property
|
||||
@memoize_method
|
||||
def _process(self):
|
||||
def _get_process(self):
|
||||
debug.dbg('Start environment subprocess %s', self._executable)
|
||||
parso_path = sys.modules['parso'].__file__
|
||||
args = (
|
||||
@@ -183,6 +197,12 @@ class CompiledSubprocess(object):
|
||||
)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
# Ensure the subprocess is properly cleaned up when the object
|
||||
# is garbage collected.
|
||||
self._cleanup_callable = weakref.finalize(self,
|
||||
_cleanup_process,
|
||||
process,
|
||||
t)
|
||||
return process
|
||||
|
||||
def run(self, evaluator, function, args=(), kwargs={}):
|
||||
@@ -203,18 +223,7 @@ class CompiledSubprocess(object):
|
||||
|
||||
def _kill(self):
|
||||
self.is_crashed = True
|
||||
try:
|
||||
self._process.kill()
|
||||
self._process.wait()
|
||||
except (AttributeError, TypeError):
|
||||
# If the Python process is terminating, it will remove some modules
|
||||
# earlier than others and in general it's unclear how to deal with
|
||||
# that so we just ignore the exceptions here.
|
||||
pass
|
||||
|
||||
def __del__(self):
|
||||
if not self.is_crashed:
|
||||
self._kill()
|
||||
self._cleanup_callable()
|
||||
|
||||
def _send(self, evaluator_id, function, args=(), kwargs={}):
|
||||
if self.is_crashed:
|
||||
@@ -226,7 +235,7 @@ class CompiledSubprocess(object):
|
||||
|
||||
data = evaluator_id, function, args, kwargs
|
||||
try:
|
||||
pickle_dump(data, self._process.stdin, self._pickle_protocol)
|
||||
pickle_dump(data, self._get_process().stdin, self._pickle_protocol)
|
||||
except (socket.error, IOError) as e:
|
||||
# Once Python2 will be removed we can just use `BrokenPipeError`.
|
||||
# Also, somehow in windows it returns EINVAL instead of EPIPE if
|
||||
@@ -239,10 +248,10 @@ class CompiledSubprocess(object):
|
||||
% self._executable)
|
||||
|
||||
try:
|
||||
is_exception, traceback, result = pickle_load(self._process.stdout)
|
||||
is_exception, traceback, result = pickle_load(self._get_process().stdout)
|
||||
except EOFError as eof_error:
|
||||
try:
|
||||
stderr = self._process.stderr.read().decode('utf-8', 'replace')
|
||||
stderr = self._get_process().stderr.read().decode('utf-8', 'replace')
|
||||
except Exception as exc:
|
||||
stderr = '<empty/not available (%r)>' % exc
|
||||
self._kill()
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import os
|
||||
|
||||
from jedi._compatibility import find_module, cast_path, force_unicode, \
|
||||
iter_modules, all_suffixes, print_to_stderr
|
||||
iter_modules, all_suffixes
|
||||
from jedi.evaluate.compiled import access
|
||||
from jedi import parser_utils
|
||||
|
||||
@@ -20,52 +21,24 @@ def get_compiled_method_return(evaluator, id, attribute, *args, **kwargs):
|
||||
return getattr(handle.access, attribute)(*args, **kwargs)
|
||||
|
||||
|
||||
def get_special_object(evaluator, identifier):
|
||||
return access.get_special_object(evaluator, identifier)
|
||||
|
||||
|
||||
def create_simple_object(evaluator, obj):
|
||||
return access.create_access_path(evaluator, obj)
|
||||
|
||||
|
||||
def get_module_info(evaluator, sys_path=None, full_name=None, **kwargs):
|
||||
"""
|
||||
Returns Tuple[Union[NamespaceInfo, FileIO, None], Optional[bool]]
|
||||
"""
|
||||
if sys_path is not None:
|
||||
sys.path, temp = sys_path, sys.path
|
||||
try:
|
||||
module_file, module_path, is_pkg = find_module(full_name=full_name, **kwargs)
|
||||
return find_module(full_name=full_name, **kwargs)
|
||||
except ImportError:
|
||||
return None, None, None
|
||||
return None, None
|
||||
finally:
|
||||
if sys_path is not None:
|
||||
sys.path = temp
|
||||
|
||||
code = None
|
||||
if is_pkg:
|
||||
# In this case, we don't have a file yet. Search for the
|
||||
# __init__ file.
|
||||
if module_path.endswith(('.zip', '.egg')):
|
||||
code = module_file.loader.get_source(full_name)
|
||||
else:
|
||||
module_path = _get_init_path(module_path)
|
||||
elif module_file:
|
||||
if module_path.endswith(('.zip', '.egg')):
|
||||
# Unfortunately we are reading unicode here already, not byes.
|
||||
# It seems however hard to get bytes, because the zip importer
|
||||
# logic just unpacks the zip file and returns a file descriptor
|
||||
# that we cannot as easily access. Therefore we just read it as
|
||||
# a string.
|
||||
code = module_file.read()
|
||||
else:
|
||||
# Read the code with a binary file, because the binary file
|
||||
# might not be proper unicode. This is handled by the parser
|
||||
# wrapper.
|
||||
with open(module_path, 'rb') as f:
|
||||
code = f.read()
|
||||
|
||||
module_file.close()
|
||||
|
||||
return code, cast_path(module_path), is_pkg
|
||||
|
||||
|
||||
def list_module_names(evaluator, search_path):
|
||||
return [
|
||||
@@ -90,7 +63,7 @@ def _test_print(evaluator, stderr=None, stdout=None):
|
||||
Force some prints in the subprocesses. This exists for unit tests.
|
||||
"""
|
||||
if stderr is not None:
|
||||
print_to_stderr(stderr)
|
||||
print(stderr, file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
if stdout is not None:
|
||||
print(stdout)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from jedi.evaluate.context.module import ModuleContext
|
||||
from jedi.evaluate.context.klass import ClassContext
|
||||
from jedi.evaluate.context.function import FunctionContext, FunctionExecutionContext
|
||||
from jedi.evaluate.context.function import FunctionContext, \
|
||||
MethodContext, FunctionExecutionContext
|
||||
from jedi.evaluate.context.instance import AnonymousInstance, BoundMethod, \
|
||||
CompiledInstance, AbstractInstanceContext, TreeInstance
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
from jedi.evaluate.filters import publish_method, BuiltinOverwrite
|
||||
from jedi.evaluate.base_context import ContextSet
|
||||
|
||||
|
||||
class AsyncBase(BuiltinOverwrite):
|
||||
def __init__(self, evaluator, func_execution_context):
|
||||
super(AsyncBase, self).__init__(evaluator)
|
||||
self.func_execution_context = func_execution_context
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.get_object().name
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s of %s>" % (type(self).__name__, self.func_execution_context)
|
||||
|
||||
|
||||
class Coroutine(AsyncBase):
|
||||
special_object_identifier = u'COROUTINE'
|
||||
|
||||
@publish_method('__await__')
|
||||
def _await(self):
|
||||
return ContextSet(CoroutineWrapper(self.evaluator, self.func_execution_context))
|
||||
|
||||
|
||||
class CoroutineWrapper(AsyncBase):
|
||||
special_object_identifier = u'COROUTINE_WRAPPER'
|
||||
|
||||
def py__stop_iteration_returns(self):
|
||||
return self.func_execution_context.get_return_values()
|
||||
|
||||
|
||||
class AsyncGenerator(AsyncBase):
|
||||
"""Handling of `yield` functions."""
|
||||
special_object_identifier = u'ASYNC_GENERATOR'
|
||||
|
||||
def py__aiter__(self):
|
||||
return self.func_execution_context.get_yield_lazy_contexts(is_async=True)
|
||||
@@ -6,20 +6,20 @@ from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import recursion
|
||||
from jedi.evaluate import docstrings
|
||||
from jedi.evaluate import pep0484
|
||||
from jedi.evaluate import flow_analysis
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate.signature import TreeSignature
|
||||
from jedi.evaluate.arguments import AnonymousArguments
|
||||
from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
|
||||
ContextName, AbstractNameDefinition, ParamName
|
||||
from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter
|
||||
from jedi.evaluate.names import ContextName, AbstractNameDefinition, ParamName
|
||||
from jedi.evaluate.base_context import ContextualizedNode, NO_CONTEXTS, \
|
||||
ContextSet, TreeContext
|
||||
ContextSet, TreeContext, ContextWrapper
|
||||
from jedi.evaluate.lazy_context import LazyKnownContexts, LazyKnownContext, \
|
||||
LazyTreeContext
|
||||
from jedi.evaluate.context import iterable
|
||||
from jedi.evaluate.context import asynchronous
|
||||
from jedi import parser_utils
|
||||
from jedi.evaluate.parser_cache import get_yield_exprs
|
||||
from jedi.evaluate.helpers import contexts_from_qualified_names
|
||||
|
||||
|
||||
class LambdaName(AbstractNameDefinition):
|
||||
@@ -35,13 +35,27 @@ class LambdaName(AbstractNameDefinition):
|
||||
return self._lambda_context.tree_node.start_pos
|
||||
|
||||
def infer(self):
|
||||
return ContextSet(self._lambda_context)
|
||||
return ContextSet([self._lambda_context])
|
||||
|
||||
|
||||
class AbstractFunction(TreeContext):
|
||||
class FunctionAndClassBase(TreeContext):
|
||||
def get_qualified_names(self):
|
||||
if self.parent_context.is_class():
|
||||
n = self.parent_context.get_qualified_names()
|
||||
if n is None:
|
||||
# This means that the parent class lives within a function.
|
||||
return None
|
||||
return n + (self.py__name__(),)
|
||||
elif self.parent_context.is_module():
|
||||
return (self.py__name__(),)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class FunctionMixin(object):
|
||||
api_type = u'function'
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
if search_global:
|
||||
yield ParserTreeFilter(
|
||||
self.evaluator,
|
||||
@@ -50,9 +64,17 @@ class AbstractFunction(TreeContext):
|
||||
origin_scope=origin_scope
|
||||
)
|
||||
else:
|
||||
scope = self.py__class__()
|
||||
for filter in scope.get_filters(search_global=False, origin_scope=origin_scope):
|
||||
yield filter
|
||||
cls = self.py__class__()
|
||||
for instance in cls.execute_evaluated():
|
||||
for filter in instance.get_filters(search_global=False, origin_scope=origin_scope):
|
||||
yield filter
|
||||
|
||||
def py__get__(self, instance, class_context):
|
||||
from jedi.evaluate.context.instance import BoundMethod
|
||||
if instance is None:
|
||||
# Calling the Foo.bar results in the original bar function.
|
||||
return ContextSet([self])
|
||||
return ContextSet([BoundMethod(instance, self)])
|
||||
|
||||
def get_param_names(self):
|
||||
function_execution = self.get_function_execution()
|
||||
@@ -65,51 +87,12 @@ class AbstractFunction(TreeContext):
|
||||
return LambdaName(self)
|
||||
return ContextName(self, self.tree_node.name)
|
||||
|
||||
def get_function_execution(self, arguments=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def py__call__(self, arguments):
|
||||
function_execution = self.get_function_execution(arguments)
|
||||
return self.infer_function_execution(function_execution)
|
||||
|
||||
def infer_function_execution(self, function_execution):
|
||||
"""
|
||||
Created to be used by inheritance.
|
||||
"""
|
||||
is_coroutine = self.tree_node.parent.type == 'async_stmt'
|
||||
is_generator = bool(get_yield_exprs(self.evaluator, self.tree_node))
|
||||
|
||||
if is_coroutine:
|
||||
if is_generator:
|
||||
if self.evaluator.environment.version_info < (3, 6):
|
||||
return NO_CONTEXTS
|
||||
return ContextSet(asynchronous.AsyncGenerator(self.evaluator, function_execution))
|
||||
else:
|
||||
if self.evaluator.environment.version_info < (3, 5):
|
||||
return NO_CONTEXTS
|
||||
return ContextSet(asynchronous.Coroutine(self.evaluator, function_execution))
|
||||
else:
|
||||
if is_generator:
|
||||
return ContextSet(iterable.Generator(self.evaluator, function_execution))
|
||||
else:
|
||||
return function_execution.get_return_values()
|
||||
|
||||
def py__name__(self):
|
||||
return self.name.string_name
|
||||
|
||||
|
||||
class FunctionContext(use_metaclass(CachedMetaClass, AbstractFunction)):
|
||||
"""
|
||||
Needed because of decorators. Decorators are evaluated here.
|
||||
"""
|
||||
@classmethod
|
||||
def from_context(cls, context, tree_node):
|
||||
from jedi.evaluate.context import AbstractInstanceContext
|
||||
|
||||
while context.is_class() or isinstance(context, AbstractInstanceContext):
|
||||
context = context.parent_context
|
||||
|
||||
return cls(context.evaluator, parent_context=context, tree_node=tree_node)
|
||||
def py__call__(self, arguments):
|
||||
function_execution = self.get_function_execution(arguments)
|
||||
return function_execution.infer()
|
||||
|
||||
def get_function_execution(self, arguments=None):
|
||||
if arguments is None:
|
||||
@@ -117,8 +100,72 @@ class FunctionContext(use_metaclass(CachedMetaClass, AbstractFunction)):
|
||||
|
||||
return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments)
|
||||
|
||||
|
||||
class FunctionContext(use_metaclass(CachedMetaClass, FunctionMixin, FunctionAndClassBase)):
|
||||
"""
|
||||
Needed because of decorators. Decorators are evaluated here.
|
||||
"""
|
||||
def is_function(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def from_context(cls, context, tree_node):
|
||||
def create(tree_node):
|
||||
if context.is_class():
|
||||
return MethodContext(
|
||||
context.evaluator,
|
||||
context,
|
||||
parent_context=parent_context,
|
||||
tree_node=tree_node
|
||||
)
|
||||
else:
|
||||
return cls(
|
||||
context.evaluator,
|
||||
parent_context=parent_context,
|
||||
tree_node=tree_node
|
||||
)
|
||||
|
||||
overloaded_funcs = list(_find_overload_functions(context, tree_node))
|
||||
|
||||
parent_context = context
|
||||
while parent_context.is_class() or parent_context.is_instance():
|
||||
parent_context = parent_context.parent_context
|
||||
|
||||
function = create(tree_node)
|
||||
|
||||
if overloaded_funcs:
|
||||
return OverloadedFunctionContext(
|
||||
function,
|
||||
[create(f) for f in overloaded_funcs]
|
||||
)
|
||||
return function
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.get_special_object(self.evaluator, u'FUNCTION_CLASS')
|
||||
c, = contexts_from_qualified_names(self.evaluator, u'types', u'FunctionType')
|
||||
return c
|
||||
|
||||
def get_default_param_context(self):
|
||||
return self.parent_context
|
||||
|
||||
def get_signatures(self):
|
||||
return [TreeSignature(self)]
|
||||
|
||||
|
||||
class MethodContext(FunctionContext):
|
||||
def __init__(self, evaluator, class_context, *args, **kwargs):
|
||||
super(MethodContext, self).__init__(evaluator, *args, **kwargs)
|
||||
self.class_context = class_context
|
||||
|
||||
def get_default_param_context(self):
|
||||
return self.class_context
|
||||
|
||||
def get_qualified_names(self):
|
||||
# Need to implement this, because the parent context of a method
|
||||
# context is not the class context but the module.
|
||||
names = self.class_context.get_qualified_names()
|
||||
if names is None:
|
||||
return None
|
||||
return names + (self.py__name__(),)
|
||||
|
||||
|
||||
class FunctionExecutionContext(TreeContext):
|
||||
@@ -153,8 +200,13 @@ class FunctionExecutionContext(TreeContext):
|
||||
returns = get_yield_exprs(self.evaluator, funcdef)
|
||||
else:
|
||||
returns = funcdef.iter_return_stmts()
|
||||
context_set = docstrings.infer_return_types(self.function_context)
|
||||
context_set |= pep0484.infer_return_types(self.function_context)
|
||||
from jedi.evaluate.gradual.annotation import infer_return_types
|
||||
context_set = infer_return_types(self)
|
||||
if context_set:
|
||||
# If there are annotations, prefer them over anything else.
|
||||
# This will make it faster.
|
||||
return context_set
|
||||
context_set |= docstrings.infer_return_types(self.function_context)
|
||||
|
||||
for r in returns:
|
||||
check = flow_analysis.reachability_check(self, funcdef, r)
|
||||
@@ -171,7 +223,7 @@ class FunctionExecutionContext(TreeContext):
|
||||
children = r.children
|
||||
except AttributeError:
|
||||
ctx = compiled.builtin_from_name(self.evaluator, u'None')
|
||||
context_set |= ContextSet(ctx)
|
||||
context_set |= ContextSet([ctx])
|
||||
else:
|
||||
context_set |= self.eval_node(children[1])
|
||||
if check is flow_analysis.REACHABLE:
|
||||
@@ -243,11 +295,178 @@ class FunctionExecutionContext(TreeContext):
|
||||
for result in self._get_yield_lazy_context(yield_in_same_for_stmt):
|
||||
yield result
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
def merge_yield_contexts(self, is_async=False):
|
||||
return ContextSet.from_sets(
|
||||
lazy_context.infer()
|
||||
for lazy_context in self.get_yield_lazy_contexts()
|
||||
)
|
||||
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
yield self.function_execution_filter(self.evaluator, self,
|
||||
until_position=until_position,
|
||||
origin_scope=origin_scope)
|
||||
|
||||
@evaluator_method_cache()
|
||||
def get_executed_params(self):
|
||||
return self.var_args.get_executed_params(self)
|
||||
def get_executed_params_and_issues(self):
|
||||
return self.var_args.get_executed_params_and_issues(self)
|
||||
|
||||
def matches_signature(self):
|
||||
executed_params, issues = self.get_executed_params_and_issues()
|
||||
if issues:
|
||||
return False
|
||||
|
||||
matches = all(executed_param.matches_signature()
|
||||
for executed_param in executed_params)
|
||||
if debug.enable_notice:
|
||||
signature = parser_utils.get_call_signature(self.tree_node)
|
||||
if matches:
|
||||
debug.dbg("Overloading match: %s@%s (%s)",
|
||||
signature, self.tree_node.start_pos[0], self.var_args, color='BLUE')
|
||||
else:
|
||||
debug.dbg("Overloading no match: %s@%s (%s)",
|
||||
signature, self.tree_node.start_pos[0], self.var_args, color='BLUE')
|
||||
return matches
|
||||
|
||||
def infer(self):
|
||||
"""
|
||||
Created to be used by inheritance.
|
||||
"""
|
||||
evaluator = self.evaluator
|
||||
is_coroutine = self.tree_node.parent.type in ('async_stmt', 'async_funcdef')
|
||||
is_generator = bool(get_yield_exprs(evaluator, self.tree_node))
|
||||
from jedi.evaluate.gradual.typing import GenericClass
|
||||
|
||||
if is_coroutine:
|
||||
if is_generator:
|
||||
if evaluator.environment.version_info < (3, 6):
|
||||
return NO_CONTEXTS
|
||||
async_generator_classes = evaluator.typing_module \
|
||||
.py__getattribute__('AsyncGenerator')
|
||||
|
||||
yield_contexts = self.merge_yield_contexts(is_async=True)
|
||||
# The contravariant doesn't seem to be defined.
|
||||
generics = (yield_contexts.py__class__(), NO_CONTEXTS)
|
||||
return ContextSet(
|
||||
# In Python 3.6 AsyncGenerator is still a class.
|
||||
GenericClass(c, generics)
|
||||
for c in async_generator_classes
|
||||
).execute_annotation()
|
||||
else:
|
||||
if evaluator.environment.version_info < (3, 5):
|
||||
return NO_CONTEXTS
|
||||
async_classes = evaluator.typing_module.py__getattribute__('Coroutine')
|
||||
return_contexts = self.get_return_values()
|
||||
# Only the first generic is relevant.
|
||||
generics = (return_contexts.py__class__(), NO_CONTEXTS, NO_CONTEXTS)
|
||||
return ContextSet(
|
||||
GenericClass(c, generics) for c in async_classes
|
||||
).execute_annotation()
|
||||
else:
|
||||
if is_generator:
|
||||
return ContextSet([iterable.Generator(evaluator, self)])
|
||||
else:
|
||||
return self.get_return_values()
|
||||
|
||||
|
||||
class OverloadedFunctionContext(FunctionMixin, ContextWrapper):
|
||||
def __init__(self, function, overloaded_functions):
|
||||
super(OverloadedFunctionContext, self).__init__(function)
|
||||
self.overloaded_functions = overloaded_functions
|
||||
|
||||
def py__call__(self, arguments):
|
||||
debug.dbg("Execute overloaded function %s", self._wrapped_context, color='BLUE')
|
||||
function_executions = []
|
||||
context_set = NO_CONTEXTS
|
||||
matched = False
|
||||
for f in self.overloaded_functions:
|
||||
function_execution = f.get_function_execution(arguments)
|
||||
function_executions.append(function_execution)
|
||||
if function_execution.matches_signature():
|
||||
matched = True
|
||||
return function_execution.infer()
|
||||
|
||||
if matched:
|
||||
return context_set
|
||||
|
||||
if self.evaluator.is_analysis:
|
||||
# In this case we want precision.
|
||||
return NO_CONTEXTS
|
||||
return ContextSet.from_sets(fe.infer() for fe in function_executions)
|
||||
|
||||
def get_signatures(self):
|
||||
return [TreeSignature(f) for f in self.overloaded_functions]
|
||||
|
||||
|
||||
def signature_matches(function_context, arguments):
|
||||
unpacked_arguments = arguments.unpack()
|
||||
key_args = {}
|
||||
for param_node in function_context.tree_node.get_params():
|
||||
while True:
|
||||
key, argument = next(unpacked_arguments, (None, None))
|
||||
if key is None or argument is None:
|
||||
break
|
||||
key_args[key] = argument
|
||||
if argument is None:
|
||||
argument = key_args.pop(param_node.name.value, None)
|
||||
if argument is None:
|
||||
# This signature has an parameter more than arguments were given.
|
||||
return bool(param_node.star_count == 1)
|
||||
|
||||
if param_node.annotation is not None:
|
||||
if param_node.star_count == 2:
|
||||
return False # TODO allow this
|
||||
|
||||
annotation_contexts = function_context.evaluator.eval_element(
|
||||
function_context.get_default_param_context(),
|
||||
param_node.annotation
|
||||
)
|
||||
argument_contexts = argument.infer().py__class__()
|
||||
if not any(c1.is_sub_class_of(c2)
|
||||
for c1 in argument_contexts
|
||||
for c2 in annotation_contexts):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _find_overload_functions(context, tree_node):
|
||||
def _is_overload_decorated(funcdef):
|
||||
if funcdef.parent.type == 'decorated':
|
||||
decorators = funcdef.parent.children[0]
|
||||
if decorators.type == 'decorator':
|
||||
decorators = [decorators]
|
||||
else:
|
||||
decorators = decorators.children
|
||||
for decorator in decorators:
|
||||
dotted_name = decorator.children[1]
|
||||
if dotted_name.type == 'name' and dotted_name.value == 'overload':
|
||||
# TODO check with contexts if it's the right overload
|
||||
return True
|
||||
return False
|
||||
|
||||
if tree_node.type == 'lambdef':
|
||||
return
|
||||
|
||||
if _is_overload_decorated(tree_node):
|
||||
yield tree_node
|
||||
|
||||
while True:
|
||||
filter = ParserTreeFilter(
|
||||
context.evaluator,
|
||||
context,
|
||||
until_position=tree_node.start_pos
|
||||
)
|
||||
names = filter.get(tree_node.name.value)
|
||||
assert isinstance(names, list)
|
||||
if not names:
|
||||
break
|
||||
|
||||
found = False
|
||||
for name in names:
|
||||
funcdef = name.tree_name.parent
|
||||
if funcdef.type == 'funcdef' and _is_overload_decorated(funcdef):
|
||||
tree_node = funcdef
|
||||
found = True
|
||||
yield funcdef
|
||||
|
||||
if not found:
|
||||
break
|
||||
|
||||
@@ -3,46 +3,58 @@ from abc import abstractproperty
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import filters
|
||||
from jedi.evaluate.helpers import contexts_from_qualified_names
|
||||
from jedi.evaluate.filters import AbstractFilter
|
||||
from jedi.evaluate.names import ContextName, TreeNameDefinition
|
||||
from jedi.evaluate.base_context import Context, NO_CONTEXTS, ContextSet, \
|
||||
iterator_to_context_set
|
||||
iterator_to_context_set, ContextWrapper
|
||||
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate.arguments import AbstractArguments, AnonymousArguments
|
||||
from jedi.evaluate.arguments import AnonymousArguments, \
|
||||
ValuesArguments, TreeArgumentsWrapper
|
||||
from jedi.evaluate.context.function import FunctionExecutionContext, \
|
||||
FunctionContext, AbstractFunction
|
||||
from jedi.evaluate.context.klass import ClassContext, apply_py__get__, ClassFilter
|
||||
FunctionContext, FunctionMixin, OverloadedFunctionContext
|
||||
from jedi.evaluate.context.klass import ClassContext, apply_py__get__, \
|
||||
ClassFilter
|
||||
from jedi.evaluate.context import iterable
|
||||
from jedi.parser_utils import get_parent_scope
|
||||
|
||||
|
||||
class InstanceExecutedParam(object):
|
||||
def __init__(self, instance):
|
||||
def __init__(self, instance, tree_param):
|
||||
self._instance = instance
|
||||
self._tree_param = tree_param
|
||||
self.string_name = self._tree_param.name.value
|
||||
|
||||
def infer(self):
|
||||
return ContextSet(self._instance)
|
||||
return ContextSet([self._instance])
|
||||
|
||||
def matches_signature(self):
|
||||
return True
|
||||
|
||||
|
||||
class AnonymousInstanceArguments(AnonymousArguments):
|
||||
def __init__(self, instance):
|
||||
self._instance = instance
|
||||
|
||||
def get_executed_params(self, execution_context):
|
||||
def get_executed_params_and_issues(self, execution_context):
|
||||
from jedi.evaluate.dynamic import search_params
|
||||
self_param = InstanceExecutedParam(self._instance)
|
||||
tree_params = execution_context.tree_node.get_params()
|
||||
if not tree_params:
|
||||
return [], []
|
||||
|
||||
self_param = InstanceExecutedParam(self._instance, tree_params[0])
|
||||
if len(tree_params) == 1:
|
||||
# If the only param is self, we don't need to try to find
|
||||
# executions of this function, we have all the params already.
|
||||
return [self_param]
|
||||
return [self_param], []
|
||||
executed_params = list(search_params(
|
||||
execution_context.evaluator,
|
||||
execution_context,
|
||||
execution_context.tree_node
|
||||
))
|
||||
executed_params[0] = self_param
|
||||
return executed_params
|
||||
return executed_params, []
|
||||
|
||||
|
||||
class AbstractInstanceContext(Context):
|
||||
@@ -58,20 +70,22 @@ class AbstractInstanceContext(Context):
|
||||
self.class_context = class_context
|
||||
self.var_args = var_args
|
||||
|
||||
def is_class(self):
|
||||
return False
|
||||
def is_instance(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def py__call__(self):
|
||||
def get_qualified_names(self):
|
||||
return self.class_context.get_qualified_names()
|
||||
|
||||
def get_annotated_class_object(self):
|
||||
return self.class_context # This is the default.
|
||||
|
||||
def py__call__(self, arguments):
|
||||
names = self.get_function_slot_names(u'__call__')
|
||||
if not names:
|
||||
# Means the Instance is not callable.
|
||||
raise AttributeError
|
||||
return super(AbstractInstanceContext, self).py__call__(arguments)
|
||||
|
||||
def execute(arguments):
|
||||
return ContextSet.from_sets(name.infer().execute(arguments) for name in names)
|
||||
|
||||
return execute
|
||||
return ContextSet.from_sets(name.infer().execute(arguments) for name in names)
|
||||
|
||||
def py__class__(self):
|
||||
return self.class_context
|
||||
@@ -96,23 +110,25 @@ class AbstractInstanceContext(Context):
|
||||
for name in names
|
||||
)
|
||||
|
||||
def py__get__(self, obj):
|
||||
def py__get__(self, obj, class_context):
|
||||
"""
|
||||
obj may be None.
|
||||
"""
|
||||
# Arguments in __get__ descriptors are obj, class.
|
||||
# `method` is the new parent of the array, don't know if that's good.
|
||||
names = self.get_function_slot_names(u'__get__')
|
||||
if names:
|
||||
if isinstance(obj, AbstractInstanceContext):
|
||||
return self.execute_function_slots(names, obj, obj.class_context)
|
||||
else:
|
||||
none_obj = compiled.builtin_from_name(self.evaluator, u'None')
|
||||
return self.execute_function_slots(names, none_obj, obj)
|
||||
if obj is None:
|
||||
obj = compiled.builtin_from_name(self.evaluator, u'None')
|
||||
return self.execute_function_slots(names, obj, class_context)
|
||||
else:
|
||||
return ContextSet(self)
|
||||
return ContextSet([self])
|
||||
|
||||
def get_filters(self, search_global=None, until_position=None,
|
||||
origin_scope=None, include_self_names=True):
|
||||
class_context = self.get_annotated_class_object()
|
||||
if include_self_names:
|
||||
for cls in self.class_context.py__mro__():
|
||||
for cls in class_context.py__mro__():
|
||||
if not isinstance(cls, compiled.CompiledObject) \
|
||||
or cls.tree_node is not None:
|
||||
# In this case we're excluding compiled objects that are
|
||||
@@ -120,62 +136,62 @@ class AbstractInstanceContext(Context):
|
||||
# compiled objects to search for self variables.
|
||||
yield SelfAttributeFilter(self.evaluator, self, cls, origin_scope)
|
||||
|
||||
for cls in self.class_context.py__mro__():
|
||||
for cls in class_context.py__mro__():
|
||||
if isinstance(cls, compiled.CompiledObject):
|
||||
yield CompiledInstanceClassFilter(self.evaluator, self, cls)
|
||||
else:
|
||||
yield InstanceClassFilter(self.evaluator, self, cls, origin_scope)
|
||||
|
||||
def py__getitem__(self, index):
|
||||
try:
|
||||
names = self.get_function_slot_names(u'__getitem__')
|
||||
except KeyError:
|
||||
debug.warning('No __getitem__, cannot access the array.')
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
index_obj = compiled.create_simple_object(self.evaluator, index)
|
||||
return self.execute_function_slots(names, index_obj)
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
names = self.get_function_slot_names(u'__getitem__')
|
||||
if not names:
|
||||
return super(AbstractInstanceContext, self).py__getitem__(
|
||||
index_context_set,
|
||||
contextualized_node,
|
||||
)
|
||||
|
||||
def py__iter__(self):
|
||||
args = ValuesArguments([index_context_set])
|
||||
return ContextSet.from_sets(name.infer().execute(args) for name in names)
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
iter_slot_names = self.get_function_slot_names(u'__iter__')
|
||||
if not iter_slot_names:
|
||||
debug.warning('No __iter__ on %s.' % self)
|
||||
return
|
||||
return super(AbstractInstanceContext, self).py__iter__(contextualized_node)
|
||||
|
||||
for generator in self.execute_function_slots(iter_slot_names):
|
||||
if isinstance(generator, AbstractInstanceContext):
|
||||
# `__next__` logic.
|
||||
if self.evaluator.environment.version_info.major == 2:
|
||||
name = u'next'
|
||||
def iterate():
|
||||
for generator in self.execute_function_slots(iter_slot_names):
|
||||
if generator.is_instance() and not generator.is_compiled():
|
||||
# `__next__` logic.
|
||||
if self.evaluator.environment.version_info.major == 2:
|
||||
name = u'next'
|
||||
else:
|
||||
name = u'__next__'
|
||||
next_slot_names = generator.get_function_slot_names(name)
|
||||
if next_slot_names:
|
||||
yield LazyKnownContexts(
|
||||
generator.execute_function_slots(next_slot_names)
|
||||
)
|
||||
else:
|
||||
debug.warning('Instance has no __next__ function in %s.', generator)
|
||||
else:
|
||||
name = u'__next__'
|
||||
iter_slot_names = generator.get_function_slot_names(name)
|
||||
if iter_slot_names:
|
||||
yield LazyKnownContexts(
|
||||
generator.execute_function_slots(iter_slot_names)
|
||||
)
|
||||
else:
|
||||
debug.warning('Instance has no __next__ function in %s.', generator)
|
||||
else:
|
||||
for lazy_context in generator.py__iter__():
|
||||
yield lazy_context
|
||||
for lazy_context in generator.py__iter__():
|
||||
yield lazy_context
|
||||
return iterate()
|
||||
|
||||
@abstractproperty
|
||||
def name(self):
|
||||
pass
|
||||
|
||||
def _create_init_execution(self, class_context, bound_method):
|
||||
return bound_method.get_function_execution(self.var_args)
|
||||
|
||||
def create_init_executions(self):
|
||||
for name in self.get_function_slot_names(u'__init__'):
|
||||
# TODO is this correct? I think we need to check for functions.
|
||||
if isinstance(name, LazyInstanceClassName):
|
||||
function = FunctionContext.from_context(
|
||||
self.parent_context,
|
||||
name.tree_name.parent
|
||||
)
|
||||
bound_method = BoundMethod(self, name.class_context, function)
|
||||
yield self._create_init_execution(name.class_context, bound_method)
|
||||
bound_method = BoundMethod(self, function)
|
||||
yield bound_method.get_function_execution(self.var_args)
|
||||
|
||||
@evaluator_method_cache()
|
||||
def create_instance_context(self, class_context, node):
|
||||
@@ -191,21 +207,25 @@ class AbstractInstanceContext(Context):
|
||||
parent_context,
|
||||
scope,
|
||||
)
|
||||
bound_method = BoundMethod(self, class_context, func)
|
||||
bound_method = BoundMethod(self, func)
|
||||
if scope.name.value == '__init__' and parent_context == class_context:
|
||||
return self._create_init_execution(class_context, bound_method)
|
||||
return bound_method.get_function_execution(self.var_args)
|
||||
else:
|
||||
return bound_method.get_function_execution()
|
||||
elif scope.type == 'classdef':
|
||||
class_context = ClassContext(self.evaluator, parent_context, scope)
|
||||
return class_context
|
||||
elif scope.type == 'comp_for':
|
||||
elif scope.type in ('comp_for', 'sync_comp_for'):
|
||||
# Comprehensions currently don't have a special scope in Jedi.
|
||||
return self.create_instance_context(class_context, scope)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
return class_context
|
||||
|
||||
def get_signatures(self):
|
||||
init_funcs = self.py__getattribute__('__call__')
|
||||
return [sig.bind(self) for sig in init_funcs.get_signatures()]
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s of %s(%s)>" % (self.__class__.__name__, self.class_context,
|
||||
self.var_args)
|
||||
@@ -214,27 +234,12 @@ class AbstractInstanceContext(Context):
|
||||
class CompiledInstance(AbstractInstanceContext):
|
||||
def __init__(self, evaluator, parent_context, class_context, var_args):
|
||||
self._original_var_args = var_args
|
||||
|
||||
# I don't think that dynamic append lookups should happen here. That
|
||||
# sounds more like something that should go to py__iter__.
|
||||
if class_context.py__name__() in ['list', 'set'] \
|
||||
and parent_context.get_root_context() == evaluator.builtins_module:
|
||||
# compare the module path with the builtin name.
|
||||
if settings.dynamic_array_additions:
|
||||
var_args = iterable.get_dynamic_array_instance(self, var_args)
|
||||
|
||||
super(CompiledInstance, self).__init__(evaluator, parent_context, class_context, var_args)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return compiled.CompiledContextName(self, self.class_context.name.string_name)
|
||||
|
||||
def create_instance_context(self, class_context, node):
|
||||
if get_parent_scope(node).type == 'classdef':
|
||||
return class_context
|
||||
else:
|
||||
return super(CompiledInstance, self).create_instance_context(class_context, node)
|
||||
|
||||
def get_first_non_keyword_argument_contexts(self):
|
||||
key, lazy_context = next(self._original_var_args.unpack(), ('', None))
|
||||
if key is not None:
|
||||
@@ -242,16 +247,63 @@ class CompiledInstance(AbstractInstanceContext):
|
||||
|
||||
return lazy_context.infer()
|
||||
|
||||
def is_stub(self):
|
||||
return False
|
||||
|
||||
|
||||
class TreeInstance(AbstractInstanceContext):
|
||||
def __init__(self, evaluator, parent_context, class_context, var_args):
|
||||
# I don't think that dynamic append lookups should happen here. That
|
||||
# sounds more like something that should go to py__iter__.
|
||||
if class_context.py__name__() in ['list', 'set'] \
|
||||
and parent_context.get_root_context() == evaluator.builtins_module:
|
||||
# compare the module path with the builtin name.
|
||||
if settings.dynamic_array_additions:
|
||||
var_args = iterable.get_dynamic_array_instance(self, var_args)
|
||||
|
||||
super(TreeInstance, self).__init__(evaluator, parent_context,
|
||||
class_context, var_args)
|
||||
self.tree_node = class_context.tree_node
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return filters.ContextName(self, self.class_context.name.tree_name)
|
||||
return ContextName(self, self.class_context.name.tree_name)
|
||||
|
||||
# This can recurse, if the initialization of the class includes a reference
|
||||
# to itself.
|
||||
@evaluator_method_cache(default=None)
|
||||
def _get_annotated_class_object(self):
|
||||
from jedi.evaluate.gradual.annotation import py__annotations__, \
|
||||
infer_type_vars_for_execution
|
||||
|
||||
for func in self._get_annotation_init_functions():
|
||||
# Just take the first result, it should always be one, because we
|
||||
# control the typeshed code.
|
||||
bound = BoundMethod(self, func)
|
||||
execution = bound.get_function_execution(self.var_args)
|
||||
if not execution.matches_signature():
|
||||
# First check if the signature even matches, if not we don't
|
||||
# need to infer anything.
|
||||
continue
|
||||
|
||||
all_annotations = py__annotations__(execution.tree_node)
|
||||
defined, = self.class_context.define_generics(
|
||||
infer_type_vars_for_execution(execution, all_annotations),
|
||||
)
|
||||
debug.dbg('Inferred instance context as %s', defined, color='BLUE')
|
||||
return defined
|
||||
return None
|
||||
|
||||
def get_annotated_class_object(self):
|
||||
return self._get_annotated_class_object() or self.class_context
|
||||
|
||||
def _get_annotation_init_functions(self):
|
||||
filter = next(self.class_context.get_filters())
|
||||
for init_name in filter.get('__init__'):
|
||||
for init in init_name.infer():
|
||||
if init.is_function():
|
||||
for signature in init.get_signatures():
|
||||
yield signature.context
|
||||
|
||||
|
||||
class AnonymousInstance(TreeInstance):
|
||||
@@ -263,6 +315,9 @@ class AnonymousInstance(TreeInstance):
|
||||
var_args=AnonymousInstanceArguments(self),
|
||||
)
|
||||
|
||||
def get_annotated_class_object(self):
|
||||
return self.class_context # This is the default.
|
||||
|
||||
|
||||
class CompiledInstanceName(compiled.CompiledName):
|
||||
|
||||
@@ -279,17 +334,13 @@ class CompiledInstanceName(compiled.CompiledName):
|
||||
@iterator_to_context_set
|
||||
def infer(self):
|
||||
for result_context in self._class_member_name.infer():
|
||||
is_function = result_context.api_type == 'function'
|
||||
if result_context.tree_node is not None and is_function:
|
||||
yield BoundMethod(self._instance, self._class, result_context)
|
||||
if result_context.api_type == 'function':
|
||||
yield CompiledBoundMethod(result_context)
|
||||
else:
|
||||
if is_function:
|
||||
yield CompiledBoundMethod(result_context)
|
||||
else:
|
||||
yield result_context
|
||||
yield result_context
|
||||
|
||||
|
||||
class CompiledInstanceClassFilter(filters.AbstractFilter):
|
||||
class CompiledInstanceClassFilter(AbstractFilter):
|
||||
name_class = CompiledInstanceName
|
||||
|
||||
def __init__(self, evaluator, instance, klass):
|
||||
@@ -311,49 +362,60 @@ class CompiledInstanceClassFilter(filters.AbstractFilter):
|
||||
]
|
||||
|
||||
|
||||
class BoundMethod(AbstractFunction):
|
||||
def __init__(self, instance, klass, function):
|
||||
super(BoundMethod, self).__init__(
|
||||
function.evaluator,
|
||||
function.parent_context,
|
||||
function.tree_node,
|
||||
)
|
||||
self._instance = instance
|
||||
self._class = klass
|
||||
self._function = function
|
||||
class BoundMethod(FunctionMixin, ContextWrapper):
|
||||
def __init__(self, instance, function):
|
||||
super(BoundMethod, self).__init__(function)
|
||||
self.instance = instance
|
||||
|
||||
def is_bound_method(self):
|
||||
return True
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.get_special_object(self.evaluator, u'BOUND_METHOD_CLASS')
|
||||
c, = contexts_from_qualified_names(self.evaluator, u'types', u'MethodType')
|
||||
return c
|
||||
|
||||
def _get_arguments(self, arguments):
|
||||
if arguments is None:
|
||||
arguments = AnonymousInstanceArguments(self.instance)
|
||||
|
||||
return InstanceArguments(self.instance, arguments)
|
||||
|
||||
def get_function_execution(self, arguments=None):
|
||||
if arguments is None:
|
||||
arguments = AnonymousInstanceArguments(self._instance)
|
||||
arguments = self._get_arguments(arguments)
|
||||
|
||||
arguments = InstanceArguments(self._instance, arguments)
|
||||
|
||||
if isinstance(self._function, compiled.CompiledObject):
|
||||
if isinstance(self._wrapped_context, compiled.CompiledObject):
|
||||
# This is kind of weird, because it's coming from a compiled object
|
||||
# and we're not sure if we want that in the future.
|
||||
# TODO remove?!
|
||||
return FunctionExecutionContext(
|
||||
self.evaluator, self.parent_context, self, arguments
|
||||
)
|
||||
|
||||
return self._function.get_function_execution(arguments)
|
||||
return super(BoundMethod, self).get_function_execution(arguments)
|
||||
|
||||
def py__call__(self, arguments):
|
||||
if isinstance(self._wrapped_context, OverloadedFunctionContext):
|
||||
return self._wrapped_context.py__call__(self._get_arguments(arguments))
|
||||
|
||||
function_execution = self.get_function_execution(arguments)
|
||||
return function_execution.infer()
|
||||
|
||||
def get_signatures(self):
|
||||
return [sig.bind(self) for sig in self._wrapped_context.get_signatures()]
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._function)
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._wrapped_context)
|
||||
|
||||
|
||||
class CompiledBoundMethod(compiled.CompiledObject):
|
||||
def __init__(self, func):
|
||||
super(CompiledBoundMethod, self).__init__(
|
||||
func.evaluator, func.access_handle, func.parent_context, func.tree_node)
|
||||
class CompiledBoundMethod(ContextWrapper):
|
||||
def is_bound_method(self):
|
||||
return True
|
||||
|
||||
def get_param_names(self):
|
||||
return list(super(CompiledBoundMethod, self).get_param_names())[1:]
|
||||
def get_signatures(self):
|
||||
return [sig.bind(self) for sig in self._wrapped_context.get_signatures()]
|
||||
|
||||
|
||||
class SelfName(filters.TreeNameDefinition):
|
||||
class SelfName(TreeNameDefinition):
|
||||
"""
|
||||
This name calculates the parent_context lazily.
|
||||
"""
|
||||
@@ -376,20 +438,17 @@ class LazyInstanceClassName(object):
|
||||
@iterator_to_context_set
|
||||
def infer(self):
|
||||
for result_context in self._class_member_name.infer():
|
||||
if isinstance(result_context, FunctionContext):
|
||||
# Classes are never used to resolve anything within the
|
||||
# functions. Only other functions and modules will resolve
|
||||
# those things.
|
||||
yield BoundMethod(self._instance, self.class_context, result_context)
|
||||
else:
|
||||
for c in apply_py__get__(result_context, self._instance):
|
||||
yield c
|
||||
for c in apply_py__get__(result_context, self._instance, self.class_context):
|
||||
yield c
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._class_member_name, name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._class_member_name)
|
||||
|
||||
class InstanceClassFilter(filters.AbstractFilter):
|
||||
|
||||
class InstanceClassFilter(AbstractFilter):
|
||||
"""
|
||||
This filter is special in that it uses the class filter and wraps the
|
||||
resulting names in LazyINstanceClassName. The idea is that the class name
|
||||
@@ -403,16 +462,20 @@ class InstanceClassFilter(filters.AbstractFilter):
|
||||
origin_scope=origin_scope,
|
||||
is_instance=True,
|
||||
))
|
||||
assert isinstance(self._class_filter, ClassFilter), self._class_filter
|
||||
|
||||
def get(self, name):
|
||||
return self._convert(self._class_filter.get(name))
|
||||
return self._convert(self._class_filter.get(name, from_instance=True))
|
||||
|
||||
def values(self):
|
||||
return self._convert(self._class_filter.values())
|
||||
return self._convert(self._class_filter.values(from_instance=True))
|
||||
|
||||
def _convert(self, names):
|
||||
return [LazyInstanceClassName(self._instance, self._class_context, n) for n in names]
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s for %s>' % (self.__class__.__name__, self._class_context)
|
||||
|
||||
|
||||
class SelfAttributeFilter(ClassFilter):
|
||||
"""
|
||||
@@ -432,20 +495,17 @@ class SelfAttributeFilter(ClassFilter):
|
||||
|
||||
def _filter(self, names):
|
||||
names = self._filter_self_names(names)
|
||||
if isinstance(self._parser_scope, compiled.CompiledObject) and False:
|
||||
# This would be for builtin skeletons, which are not yet supported.
|
||||
return list(names)
|
||||
else:
|
||||
start, end = self._parser_scope.start_pos, self._parser_scope.end_pos
|
||||
return [n for n in names if start < n.start_pos < end]
|
||||
start, end = self._parser_scope.start_pos, self._parser_scope.end_pos
|
||||
return [n for n in names if start < n.start_pos < end]
|
||||
|
||||
def _filter_self_names(self, names):
|
||||
for name in names:
|
||||
trailer = name.parent
|
||||
if trailer.type == 'trailer' \
|
||||
and len(trailer.children) == 2 \
|
||||
and len(trailer.parent.children) == 2 \
|
||||
and trailer.children[0] == '.':
|
||||
if name.is_definition() and self._access_possible(name):
|
||||
if name.is_definition() and self._access_possible(name, from_instance=True):
|
||||
# TODO filter non-self assignments.
|
||||
yield name
|
||||
|
||||
def _convert_names(self, names):
|
||||
@@ -455,29 +515,18 @@ class SelfAttributeFilter(ClassFilter):
|
||||
return names
|
||||
|
||||
|
||||
class InstanceArguments(AbstractArguments):
|
||||
def __init__(self, instance, var_args):
|
||||
class InstanceArguments(TreeArgumentsWrapper):
|
||||
def __init__(self, instance, arguments):
|
||||
super(InstanceArguments, self).__init__(arguments)
|
||||
self.instance = instance
|
||||
self._var_args = var_args
|
||||
|
||||
@property
|
||||
def argument_node(self):
|
||||
return self._var_args.argument_node
|
||||
|
||||
@property
|
||||
def trailer(self):
|
||||
return self._var_args.trailer
|
||||
|
||||
def unpack(self, func=None):
|
||||
yield None, LazyKnownContext(self.instance)
|
||||
for values in self._var_args.unpack(func):
|
||||
for values in self._wrapped_arguments.unpack(func):
|
||||
yield values
|
||||
|
||||
def get_calling_nodes(self):
|
||||
return self._var_args.get_calling_nodes()
|
||||
def get_executed_params_and_issues(self, execution_context):
|
||||
if isinstance(self._wrapped_arguments, AnonymousInstanceArguments):
|
||||
return self._wrapped_arguments.get_executed_params_and_issues(execution_context)
|
||||
|
||||
def get_executed_params(self, execution_context):
|
||||
if isinstance(self._var_args, AnonymousInstanceArguments):
|
||||
return self._var_args.get_executed_params(execution_context)
|
||||
|
||||
return super(InstanceArguments, self).get_executed_params(execution_context)
|
||||
return super(InstanceArguments, self).get_executed_params_and_issues(execution_context)
|
||||
|
||||
@@ -23,33 +23,46 @@ It is important to note that:
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi._compatibility import force_unicode, is_py3
|
||||
from jedi.cache import memoize_method
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate import recursion
|
||||
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
|
||||
LazyTreeContext
|
||||
from jedi.evaluate.helpers import get_int_or_none, is_string, \
|
||||
predefine_names, evaluate_call_of_leaf, reraise_as_evaluator, \
|
||||
EvaluatorKeyError
|
||||
from jedi.evaluate.utils import safe_property
|
||||
from jedi.evaluate.utils import to_list
|
||||
predefine_names, evaluate_call_of_leaf, reraise_getitem_errors, \
|
||||
SimpleGetItemNotFound
|
||||
from jedi.evaluate.utils import safe_property, to_list
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate.filters import ParserTreeFilter, BuiltinOverwrite, \
|
||||
from jedi.evaluate.filters import ParserTreeFilter, LazyAttributeOverwrite, \
|
||||
publish_method
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context, \
|
||||
TreeContext, ContextualizedNode
|
||||
from jedi.parser_utils import get_comp_fors
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, \
|
||||
TreeContext, ContextualizedNode, iterate_contexts, HelperContextMixin
|
||||
from jedi.parser_utils import get_sync_comp_fors
|
||||
|
||||
|
||||
class IterableMixin(object):
|
||||
def py__stop_iteration_returns(self):
|
||||
return ContextSet(compiled.builtin_from_name(self.evaluator, u'None'))
|
||||
return ContextSet([compiled.builtin_from_name(self.evaluator, u'None')])
|
||||
|
||||
|
||||
class GeneratorBase(BuiltinOverwrite, IterableMixin):
|
||||
class GeneratorBase(LazyAttributeOverwrite, IterableMixin):
|
||||
array_type = None
|
||||
special_object_identifier = u'GENERATOR_OBJECT'
|
||||
|
||||
def _get_wrapped_context(self):
|
||||
generator, = self.evaluator.typing_module \
|
||||
.py__getattribute__('Generator') \
|
||||
.execute_annotation()
|
||||
return generator
|
||||
|
||||
def is_instance(self):
|
||||
return False
|
||||
|
||||
def py__bool__(self):
|
||||
return True
|
||||
|
||||
@publish_method('__iter__')
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
return ContextSet([self])
|
||||
|
||||
@publish_method('send')
|
||||
@publish_method('next', python_version_match=2)
|
||||
@@ -57,9 +70,12 @@ class GeneratorBase(BuiltinOverwrite, IterableMixin):
|
||||
def py__next__(self):
|
||||
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
|
||||
|
||||
def py__stop_iteration_returns(self):
|
||||
return ContextSet([compiled.builtin_from_name(self.evaluator, u'None')])
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return compiled.CompiledContextName(self, 'generator')
|
||||
return compiled.CompiledContextName(self, 'Generator')
|
||||
|
||||
|
||||
class Generator(GeneratorBase):
|
||||
@@ -68,7 +84,7 @@ class Generator(GeneratorBase):
|
||||
super(Generator, self).__init__(evaluator)
|
||||
self._func_execution_context = func_execution_context
|
||||
|
||||
def py__iter__(self):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
return self._func_execution_context.get_yield_lazy_contexts()
|
||||
|
||||
def py__stop_iteration_returns(self):
|
||||
@@ -83,68 +99,64 @@ class CompForContext(TreeContext):
|
||||
def from_comp_for(cls, parent_context, comp_for):
|
||||
return cls(parent_context.evaluator, parent_context, comp_for)
|
||||
|
||||
def get_node(self):
|
||||
return self.tree_node
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
yield ParserTreeFilter(self.evaluator, self)
|
||||
|
||||
|
||||
def comprehension_from_atom(evaluator, context, atom):
|
||||
bracket = atom.children[0]
|
||||
test_list_comp = atom.children[1]
|
||||
|
||||
if bracket == '{':
|
||||
if atom.children[1].children[1] == ':':
|
||||
cls = DictComprehension
|
||||
sync_comp_for = test_list_comp.children[3]
|
||||
if sync_comp_for.type == 'comp_for':
|
||||
sync_comp_for = sync_comp_for.children[1]
|
||||
|
||||
return DictComprehension(
|
||||
evaluator,
|
||||
context,
|
||||
sync_comp_for_node=sync_comp_for,
|
||||
key_node=test_list_comp.children[0],
|
||||
value_node=test_list_comp.children[2],
|
||||
)
|
||||
else:
|
||||
cls = SetComprehension
|
||||
elif bracket == '(':
|
||||
cls = GeneratorComprehension
|
||||
elif bracket == '[':
|
||||
cls = ListComprehension
|
||||
return cls(evaluator, context, atom)
|
||||
|
||||
sync_comp_for = test_list_comp.children[1]
|
||||
if sync_comp_for.type == 'comp_for':
|
||||
sync_comp_for = sync_comp_for.children[1]
|
||||
|
||||
return cls(
|
||||
evaluator,
|
||||
defining_context=context,
|
||||
sync_comp_for_node=sync_comp_for,
|
||||
entry_node=test_list_comp.children[0],
|
||||
)
|
||||
|
||||
|
||||
class ComprehensionMixin(object):
|
||||
def __init__(self, evaluator, defining_context, atom):
|
||||
super(ComprehensionMixin, self).__init__(evaluator)
|
||||
self._defining_context = defining_context
|
||||
self._atom = atom
|
||||
|
||||
def _get_comprehension(self):
|
||||
"return 'a for a in b'"
|
||||
# The atom contains a testlist_comp
|
||||
return self._atom.children[1]
|
||||
|
||||
def _get_comp_for(self):
|
||||
"return CompFor('for a in b')"
|
||||
return self._get_comprehension().children[1]
|
||||
|
||||
def _eval_node(self, index=0):
|
||||
"""
|
||||
The first part `x + 1` of the list comprehension:
|
||||
|
||||
[x + 1 for x in foo]
|
||||
"""
|
||||
return self._get_comprehension().children[index]
|
||||
|
||||
@evaluator_method_cache()
|
||||
def _get_comp_for_context(self, parent_context, comp_for):
|
||||
# TODO shouldn't this be part of create_context?
|
||||
return CompForContext.from_comp_for(parent_context, comp_for)
|
||||
|
||||
def _nested(self, comp_fors, parent_context=None):
|
||||
comp_for = comp_fors[0]
|
||||
|
||||
is_async = 'async' == comp_for.children[comp_for.children.index('for') - 1]
|
||||
is_async = comp_for.parent.type == 'comp_for'
|
||||
|
||||
input_node = comp_for.children[comp_for.children.index('in') + 1]
|
||||
input_node = comp_for.children[3]
|
||||
parent_context = parent_context or self._defining_context
|
||||
input_types = parent_context.eval_node(input_node)
|
||||
# TODO: simulate await if self.is_async
|
||||
|
||||
cn = ContextualizedNode(parent_context, input_node)
|
||||
iterated = input_types.iterate(cn, is_async=is_async)
|
||||
exprlist = comp_for.children[comp_for.children.index('for') + 1]
|
||||
exprlist = comp_for.children[1]
|
||||
for i, lazy_context in enumerate(iterated):
|
||||
types = lazy_context.infer()
|
||||
dct = unpack_tuple_to_dict(parent_context, types, exprlist)
|
||||
@@ -157,39 +169,47 @@ class ComprehensionMixin(object):
|
||||
for result in self._nested(comp_fors[1:], context_):
|
||||
yield result
|
||||
except IndexError:
|
||||
iterated = context_.eval_node(self._eval_node())
|
||||
iterated = context_.eval_node(self._entry_node)
|
||||
if self.array_type == 'dict':
|
||||
yield iterated, context_.eval_node(self._eval_node(2))
|
||||
yield iterated, context_.eval_node(self._value_node)
|
||||
else:
|
||||
yield iterated
|
||||
|
||||
@evaluator_method_cache(default=[])
|
||||
@to_list
|
||||
def _iterate(self):
|
||||
comp_fors = tuple(get_comp_fors(self._get_comp_for()))
|
||||
comp_fors = tuple(get_sync_comp_fors(self._sync_comp_for_node))
|
||||
for result in self._nested(comp_fors):
|
||||
yield result
|
||||
|
||||
def py__iter__(self):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
for set_ in self._iterate():
|
||||
yield LazyKnownContexts(set_)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s of %s>" % (type(self).__name__, self._atom)
|
||||
return "<%s of %s>" % (type(self).__name__, self._sync_comp_for_node)
|
||||
|
||||
|
||||
class Sequence(BuiltinOverwrite, IterableMixin):
|
||||
class _DictMixin(object):
|
||||
def _get_generics(self):
|
||||
return tuple(c_set.py__class__() for c_set in self.get_mapping_item_contexts())
|
||||
|
||||
|
||||
class Sequence(LazyAttributeOverwrite, IterableMixin):
|
||||
api_type = u'instance'
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return compiled.CompiledContextName(self, self.array_type)
|
||||
|
||||
@memoize_method
|
||||
def get_object(self):
|
||||
compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type)
|
||||
only_obj, = compiled_obj.execute_evaluated(self)
|
||||
return only_obj
|
||||
def _get_generics(self):
|
||||
return (self.merge_types_of_iterate().py__class__(),)
|
||||
|
||||
def _get_wrapped_context(self):
|
||||
from jedi.evaluate.gradual.typing import GenericClass
|
||||
klass = compiled.builtin_from_name(self.evaluator, self.array_type)
|
||||
c, = GenericClass(klass, self._get_generics()).execute_annotation()
|
||||
return c
|
||||
|
||||
def py__bool__(self):
|
||||
return None # We don't know the length, because of appends.
|
||||
@@ -201,55 +221,77 @@ class Sequence(BuiltinOverwrite, IterableMixin):
|
||||
def parent(self):
|
||||
return self.evaluator.builtins_module
|
||||
|
||||
def dict_values(self):
|
||||
return ContextSet.from_sets(
|
||||
self._defining_context.eval_node(v)
|
||||
for k, v in self._items()
|
||||
)
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
if self.array_type == 'dict':
|
||||
return self._dict_values()
|
||||
return iterate_contexts(ContextSet([self]))
|
||||
|
||||
|
||||
class ListComprehension(ComprehensionMixin, Sequence):
|
||||
class _BaseComprehension(ComprehensionMixin):
|
||||
def __init__(self, evaluator, defining_context, sync_comp_for_node, entry_node):
|
||||
assert sync_comp_for_node.type == 'sync_comp_for'
|
||||
super(_BaseComprehension, self).__init__(evaluator)
|
||||
self._defining_context = defining_context
|
||||
self._sync_comp_for_node = sync_comp_for_node
|
||||
self._entry_node = entry_node
|
||||
|
||||
|
||||
class ListComprehension(_BaseComprehension, Sequence):
|
||||
array_type = u'list'
|
||||
|
||||
def py__getitem__(self, index):
|
||||
def py__simple_getitem__(self, index):
|
||||
if isinstance(index, slice):
|
||||
return ContextSet(self)
|
||||
return ContextSet([self])
|
||||
|
||||
all_types = list(self.py__iter__())
|
||||
with reraise_as_evaluator(IndexError, TypeError):
|
||||
with reraise_getitem_errors(IndexError, TypeError):
|
||||
lazy_context = all_types[index]
|
||||
return lazy_context.infer()
|
||||
|
||||
|
||||
class SetComprehension(ComprehensionMixin, Sequence):
|
||||
class SetComprehension(_BaseComprehension, Sequence):
|
||||
array_type = u'set'
|
||||
|
||||
|
||||
class GeneratorComprehension(_BaseComprehension, GeneratorBase):
|
||||
pass
|
||||
|
||||
|
||||
class DictComprehension(ComprehensionMixin, Sequence):
|
||||
array_type = u'dict'
|
||||
|
||||
def _get_comp_for(self):
|
||||
return self._get_comprehension().children[3]
|
||||
def __init__(self, evaluator, defining_context, sync_comp_for_node, key_node, value_node):
|
||||
assert sync_comp_for_node.type == 'sync_comp_for'
|
||||
super(DictComprehension, self).__init__(evaluator)
|
||||
self._defining_context = defining_context
|
||||
self._sync_comp_for_node = sync_comp_for_node
|
||||
self._entry_node = key_node
|
||||
self._value_node = value_node
|
||||
|
||||
def py__iter__(self):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
for keys, values in self._iterate():
|
||||
yield LazyKnownContexts(keys)
|
||||
|
||||
def py__getitem__(self, index):
|
||||
def py__simple_getitem__(self, index):
|
||||
for keys, values in self._iterate():
|
||||
for k in keys:
|
||||
if isinstance(k, compiled.CompiledObject):
|
||||
# Be careful in the future if refactoring, index could be a
|
||||
# slice.
|
||||
if k.get_safe_value(default=object()) == index:
|
||||
return values
|
||||
return self.dict_values()
|
||||
raise SimpleGetItemNotFound()
|
||||
|
||||
def dict_values(self):
|
||||
def _dict_keys(self):
|
||||
return ContextSet.from_sets(keys for keys, values in self._iterate())
|
||||
|
||||
def _dict_values(self):
|
||||
return ContextSet.from_sets(values for keys, values in self._iterate())
|
||||
|
||||
@publish_method('values')
|
||||
def _imitate_values(self):
|
||||
lazy_context = LazyKnownContexts(self.dict_values())
|
||||
return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context]))
|
||||
lazy_context = LazyKnownContexts(self._dict_values())
|
||||
return ContextSet([FakeSequence(self.evaluator, u'list', [lazy_context])])
|
||||
|
||||
@publish_method('items')
|
||||
def _imitate_items(self):
|
||||
@@ -265,7 +307,10 @@ class DictComprehension(ComprehensionMixin, Sequence):
|
||||
for key, value in self._iterate()
|
||||
]
|
||||
|
||||
return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts))
|
||||
return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)])
|
||||
|
||||
def get_mapping_item_contexts(self):
|
||||
return self._dict_keys(), self._dict_values()
|
||||
|
||||
def exact_key_items(self):
|
||||
# NOTE: A smarter thing can probably done here to achieve better
|
||||
@@ -273,11 +318,8 @@ class DictComprehension(ComprehensionMixin, Sequence):
|
||||
return []
|
||||
|
||||
|
||||
class GeneratorComprehension(ComprehensionMixin, GeneratorBase):
|
||||
pass
|
||||
|
||||
|
||||
class SequenceLiteralContext(Sequence):
|
||||
_TUPLE_LIKE = 'testlist_star_expr', 'testlist', 'subscriptlist'
|
||||
mapping = {'(': u'tuple',
|
||||
'[': u'list',
|
||||
'{': u'set'}
|
||||
@@ -287,63 +329,73 @@ class SequenceLiteralContext(Sequence):
|
||||
self.atom = atom
|
||||
self._defining_context = defining_context
|
||||
|
||||
if self.atom.type in ('testlist_star_expr', 'testlist'):
|
||||
if self.atom.type in self._TUPLE_LIKE:
|
||||
self.array_type = u'tuple'
|
||||
else:
|
||||
self.array_type = SequenceLiteralContext.mapping[atom.children[0]]
|
||||
"""The builtin name of the array (list, set, tuple or dict)."""
|
||||
|
||||
def py__getitem__(self, index):
|
||||
def py__simple_getitem__(self, index):
|
||||
"""Here the index is an int/str. Raises IndexError/KeyError."""
|
||||
if self.array_type == u'dict':
|
||||
compiled_obj_index = compiled.create_simple_object(self.evaluator, index)
|
||||
for key, value in self._items():
|
||||
for key, value in self.get_tree_entries():
|
||||
for k in self._defining_context.eval_node(key):
|
||||
if isinstance(k, compiled.CompiledObject) \
|
||||
and k.execute_operation(compiled_obj_index, u'==').get_safe_value():
|
||||
return self._defining_context.eval_node(value)
|
||||
raise EvaluatorKeyError('No key found in dictionary %s.' % self)
|
||||
try:
|
||||
method = k.execute_operation
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if method(compiled_obj_index, u'==').get_safe_value():
|
||||
return self._defining_context.eval_node(value)
|
||||
raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)
|
||||
|
||||
# Can raise an IndexError
|
||||
if isinstance(index, slice):
|
||||
return ContextSet(self)
|
||||
return ContextSet([self])
|
||||
else:
|
||||
with reraise_as_evaluator(TypeError, KeyError, IndexError):
|
||||
node = self._items()[index]
|
||||
with reraise_getitem_errors(TypeError, KeyError, IndexError):
|
||||
node = self.get_tree_entries()[index]
|
||||
return self._defining_context.eval_node(node)
|
||||
|
||||
def py__iter__(self):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
"""
|
||||
While values returns the possible values for any array field, this
|
||||
function returns the value for a certain index.
|
||||
"""
|
||||
if self.array_type == u'dict':
|
||||
# Get keys.
|
||||
types = ContextSet()
|
||||
for k, _ in self._items():
|
||||
types = NO_CONTEXTS
|
||||
for k, _ in self.get_tree_entries():
|
||||
types |= self._defining_context.eval_node(k)
|
||||
# We don't know which dict index comes first, therefore always
|
||||
# yield all the types.
|
||||
for _ in types:
|
||||
yield LazyKnownContexts(types)
|
||||
else:
|
||||
for node in self._items():
|
||||
yield LazyTreeContext(self._defining_context, node)
|
||||
|
||||
for node in self.get_tree_entries():
|
||||
if node == ':' or node.type == 'subscript':
|
||||
# TODO this should probably use at least part of the code
|
||||
# of eval_subscript_list.
|
||||
yield LazyKnownContext(Slice(self._defining_context, None, None, None))
|
||||
else:
|
||||
yield LazyTreeContext(self._defining_context, node)
|
||||
for addition in check_array_additions(self._defining_context, self):
|
||||
yield addition
|
||||
|
||||
def _values(self):
|
||||
"""Returns a list of a list of node."""
|
||||
if self.array_type == u'dict':
|
||||
return ContextSet.from_sets(v for k, v in self._items())
|
||||
else:
|
||||
return self._items()
|
||||
def py__len__(self):
|
||||
# This function is not really used often. It's more of a try.
|
||||
return len(self.get_tree_entries())
|
||||
|
||||
def _items(self):
|
||||
def _dict_values(self):
|
||||
return ContextSet.from_sets(
|
||||
self._defining_context.eval_node(v)
|
||||
for k, v in self.get_tree_entries()
|
||||
)
|
||||
|
||||
def get_tree_entries(self):
|
||||
c = self.atom.children
|
||||
|
||||
if self.atom.type in ('testlist_star_expr', 'testlist'):
|
||||
if self.atom.type in self._TUPLE_LIKE:
|
||||
return c[::2]
|
||||
|
||||
array_node = c[1]
|
||||
@@ -390,7 +442,7 @@ class SequenceLiteralContext(Sequence):
|
||||
Returns a generator of tuples like dict.items(), where the key is
|
||||
resolved (as a string) and the values are still lazy contexts.
|
||||
"""
|
||||
for key_node, value in self._items():
|
||||
for key_node, value in self.get_tree_entries():
|
||||
for key in self._defining_context.eval_node(key_node):
|
||||
if is_string(key):
|
||||
yield key.get_safe_value(), LazyTreeContext(self._defining_context, value)
|
||||
@@ -399,7 +451,7 @@ class SequenceLiteralContext(Sequence):
|
||||
return "<%s of %s>" % (self.__class__.__name__, self.atom)
|
||||
|
||||
|
||||
class DictLiteralContext(SequenceLiteralContext):
|
||||
class DictLiteralContext(_DictMixin, SequenceLiteralContext):
|
||||
array_type = u'dict'
|
||||
|
||||
def __init__(self, evaluator, defining_context, atom):
|
||||
@@ -409,8 +461,8 @@ class DictLiteralContext(SequenceLiteralContext):
|
||||
|
||||
@publish_method('values')
|
||||
def _imitate_values(self):
|
||||
lazy_context = LazyKnownContexts(self.dict_values())
|
||||
return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context]))
|
||||
lazy_context = LazyKnownContexts(self._dict_values())
|
||||
return ContextSet([FakeSequence(self.evaluator, u'list', [lazy_context])])
|
||||
|
||||
@publish_method('items')
|
||||
def _imitate_items(self):
|
||||
@@ -419,10 +471,19 @@ class DictLiteralContext(SequenceLiteralContext):
|
||||
self.evaluator, u'tuple',
|
||||
(LazyTreeContext(self._defining_context, key_node),
|
||||
LazyTreeContext(self._defining_context, value_node))
|
||||
)) for key_node, value_node in self._items()
|
||||
)) for key_node, value_node in self.get_tree_entries()
|
||||
]
|
||||
|
||||
return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts))
|
||||
return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)])
|
||||
|
||||
def _dict_keys(self):
|
||||
return ContextSet.from_sets(
|
||||
self._defining_context.eval_node(k)
|
||||
for k, v in self.get_tree_entries()
|
||||
)
|
||||
|
||||
def get_mapping_item_contexts(self):
|
||||
return self._dict_keys(), self._dict_values()
|
||||
|
||||
|
||||
class _FakeArray(SequenceLiteralContext):
|
||||
@@ -441,12 +502,15 @@ class FakeSequence(_FakeArray):
|
||||
super(FakeSequence, self).__init__(evaluator, None, array_type)
|
||||
self._lazy_context_list = lazy_context_list
|
||||
|
||||
def py__getitem__(self, index):
|
||||
with reraise_as_evaluator(IndexError, TypeError):
|
||||
def py__simple_getitem__(self, index):
|
||||
if isinstance(index, slice):
|
||||
return ContextSet([self])
|
||||
|
||||
with reraise_getitem_errors(IndexError, TypeError):
|
||||
lazy_context = self._lazy_context_list[index]
|
||||
return lazy_context.infer()
|
||||
|
||||
def py__iter__(self):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
return self._lazy_context_list
|
||||
|
||||
def py__bool__(self):
|
||||
@@ -456,16 +520,16 @@ class FakeSequence(_FakeArray):
|
||||
return "<%s of %s>" % (type(self).__name__, self._lazy_context_list)
|
||||
|
||||
|
||||
class FakeDict(_FakeArray):
|
||||
class FakeDict(_DictMixin, _FakeArray):
|
||||
def __init__(self, evaluator, dct):
|
||||
super(FakeDict, self).__init__(evaluator, dct, u'dict')
|
||||
self._dct = dct
|
||||
|
||||
def py__iter__(self):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
for key in self._dct:
|
||||
yield LazyKnownContext(compiled.create_simple_object(self.evaluator, key))
|
||||
|
||||
def py__getitem__(self, index):
|
||||
def py__simple_getitem__(self, index):
|
||||
if is_py3 and self.evaluator.environment.version_info.major == 2:
|
||||
# In Python 2 bytes and unicode compare.
|
||||
if isinstance(index, bytes):
|
||||
@@ -481,20 +545,26 @@ class FakeDict(_FakeArray):
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
with reraise_as_evaluator(KeyError):
|
||||
with reraise_getitem_errors(KeyError, TypeError):
|
||||
lazy_context = self._dct[index]
|
||||
return lazy_context.infer()
|
||||
|
||||
@publish_method('values')
|
||||
def _values(self):
|
||||
return ContextSet(FakeSequence(
|
||||
return ContextSet([FakeSequence(
|
||||
self.evaluator, u'tuple',
|
||||
[LazyKnownContexts(self.dict_values())]
|
||||
))
|
||||
[LazyKnownContexts(self._dict_values())]
|
||||
)])
|
||||
|
||||
def dict_values(self):
|
||||
def _dict_values(self):
|
||||
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values())
|
||||
|
||||
def _dict_keys(self):
|
||||
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
|
||||
|
||||
def get_mapping_item_contexts(self):
|
||||
return self._dict_keys(), self._dict_values()
|
||||
|
||||
def exact_key_items(self):
|
||||
return self._dct.items()
|
||||
|
||||
@@ -504,17 +574,17 @@ class MergedArray(_FakeArray):
|
||||
super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type)
|
||||
self._arrays = arrays
|
||||
|
||||
def py__iter__(self):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
for array in self._arrays:
|
||||
for lazy_context in array.py__iter__():
|
||||
yield lazy_context
|
||||
|
||||
def py__getitem__(self, index):
|
||||
def py__simple_getitem__(self, index):
|
||||
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
|
||||
|
||||
def _items(self):
|
||||
def get_tree_entries(self):
|
||||
for array in self._arrays:
|
||||
for a in array._items():
|
||||
for a in array.get_tree_entries():
|
||||
yield a
|
||||
|
||||
def __len__(self):
|
||||
@@ -527,7 +597,7 @@ def unpack_tuple_to_dict(context, types, exprlist):
|
||||
"""
|
||||
if exprlist.type == 'name':
|
||||
return {exprlist.value: types}
|
||||
elif exprlist.type == 'atom' and exprlist.children[0] in '([':
|
||||
elif exprlist.type == 'atom' and exprlist.children[0] in ('(', '['):
|
||||
return unpack_tuple_to_dict(context, types, exprlist.children[1])
|
||||
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
|
||||
'testlist_star_expr'):
|
||||
@@ -585,7 +655,7 @@ def _check_array_additions(context, sequence):
|
||||
module_context = context.get_root_context()
|
||||
if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject):
|
||||
debug.dbg('Dynamic array search aborted.', color='MAGENTA')
|
||||
return ContextSet()
|
||||
return NO_CONTEXTS
|
||||
|
||||
def find_additions(context, arglist, add_name):
|
||||
params = list(arguments.TreeArguments(context.evaluator, context, arglist).unpack())
|
||||
@@ -593,8 +663,8 @@ def _check_array_additions(context, sequence):
|
||||
if add_name in ['insert']:
|
||||
params = params[1:]
|
||||
if add_name in ['append', 'add', 'insert']:
|
||||
for key, whatever in params:
|
||||
result.add(whatever)
|
||||
for key, lazy_context in params:
|
||||
result.add(lazy_context)
|
||||
elif add_name in ['extend', 'update']:
|
||||
for key, lazy_context in params:
|
||||
result |= set(lazy_context.infer().iterate())
|
||||
@@ -657,25 +727,24 @@ def get_dynamic_array_instance(instance, arguments):
|
||||
"""Used for set() and list() instances."""
|
||||
ai = _ArrayInstance(instance, arguments)
|
||||
from jedi.evaluate import arguments
|
||||
return arguments.ValuesArguments([ContextSet(ai)])
|
||||
return arguments.ValuesArguments([ContextSet([ai])])
|
||||
|
||||
|
||||
class _ArrayInstance(object):
|
||||
class _ArrayInstance(HelperContextMixin):
|
||||
"""
|
||||
Used for the usage of set() and list().
|
||||
This is definitely a hack, but a good one :-)
|
||||
It makes it possible to use set/list conversions.
|
||||
|
||||
In contrast to Array, ListComprehension and all other iterable types, this
|
||||
is something that is only used inside `evaluate/compiled/fake/builtins.py`
|
||||
and therefore doesn't need filters, `py__bool__` and so on, because
|
||||
we don't use these operations in `builtins.py`.
|
||||
"""
|
||||
def __init__(self, instance, var_args):
|
||||
self.instance = instance
|
||||
self.var_args = var_args
|
||||
|
||||
def py__iter__(self):
|
||||
def py__class__(self):
|
||||
tuple_, = self.instance.evaluator.builtins_module.py__getattribute__('tuple')
|
||||
return tuple_
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
var_args = self.var_args
|
||||
try:
|
||||
_, lazy_context = next(var_args.unpack())
|
||||
@@ -692,21 +761,24 @@ class _ArrayInstance(object):
|
||||
yield addition
|
||||
|
||||
def iterate(self, contextualized_node=None, is_async=False):
|
||||
return self.py__iter__()
|
||||
return self.py__iter__(contextualized_node)
|
||||
|
||||
|
||||
class Slice(Context):
|
||||
class Slice(object):
|
||||
def __init__(self, context, start, stop, step):
|
||||
super(Slice, self).__init__(
|
||||
context.evaluator,
|
||||
parent_context=context.evaluator.builtins_module
|
||||
)
|
||||
self._context = context
|
||||
# all of them are either a Precedence or None.
|
||||
self._slice_object = None
|
||||
# All of them are either a Precedence or None.
|
||||
self._start = start
|
||||
self._stop = stop
|
||||
self._step = step
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self._slice_object is None:
|
||||
context = compiled.builtin_from_name(self._context.evaluator, 'slice')
|
||||
self._slice_object, = context.execute_evaluated()
|
||||
return getattr(self._slice_object, name)
|
||||
|
||||
@property
|
||||
def obj(self):
|
||||
"""
|
||||
|
||||
@@ -17,44 +17,47 @@ and others. Here's a list:
|
||||
====================================== ========================================
|
||||
**Method** **Description**
|
||||
-------------------------------------- ----------------------------------------
|
||||
py__call__(params: Array) On callable objects, returns types.
|
||||
py__call__(arguments: Array) On callable objects, returns types.
|
||||
py__bool__() Returns True/False/None; None means that
|
||||
there's no certainty.
|
||||
py__bases__() Returns a list of base classes.
|
||||
py__mro__() Returns a list of classes (the mro).
|
||||
py__iter__() Returns a generator of a set of types.
|
||||
py__class__() Returns the class of an instance.
|
||||
py__getitem__(index: int/str) Returns a a set of types of the index.
|
||||
py__simple_getitem__(index: int/str) Returns a a set of types of the index.
|
||||
Can raise an IndexError/KeyError.
|
||||
py__getitem__(indexes: ContextSet) Returns a a set of types of the index.
|
||||
py__file__() Only on modules. Returns None if does
|
||||
not exist.
|
||||
py__package__() Only on modules. For the import system.
|
||||
py__package__() -> List[str] Only on modules. For the import system.
|
||||
py__path__() Only on modules. For the import system.
|
||||
py__get__(call_object) Only on instances. Simulates
|
||||
descriptors.
|
||||
py__doc__(include_call_signature: Returns the docstring for a context.
|
||||
bool)
|
||||
py__doc__() Returns the docstring for a context.
|
||||
====================================== ========================================
|
||||
|
||||
"""
|
||||
from jedi import debug
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi.parser_utils import get_parent_scope
|
||||
from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
|
||||
from jedi.parser_utils import get_cached_parent_scope
|
||||
from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass, \
|
||||
evaluator_method_generator_cache
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate.lazy_context import LazyKnownContext
|
||||
from jedi.evaluate.filters import ParserTreeFilter, TreeNameDefinition, \
|
||||
ContextName
|
||||
from jedi.evaluate.lazy_context import LazyKnownContexts
|
||||
from jedi.evaluate.filters import ParserTreeFilter
|
||||
from jedi.evaluate.names import TreeNameDefinition, ContextName
|
||||
from jedi.evaluate.arguments import unpack_arglist, ValuesArguments
|
||||
from jedi.evaluate.base_context import ContextSet, iterator_to_context_set, \
|
||||
TreeContext
|
||||
NO_CONTEXTS
|
||||
from jedi.evaluate.context.function import FunctionAndClassBase
|
||||
|
||||
|
||||
def apply_py__get__(context, base_context):
|
||||
def apply_py__get__(context, instance, class_context):
|
||||
try:
|
||||
method = context.py__get__
|
||||
except AttributeError:
|
||||
yield context
|
||||
else:
|
||||
for descriptor_context in method(base_context):
|
||||
for descriptor_context in method(instance, class_context):
|
||||
yield descriptor_context
|
||||
|
||||
|
||||
@@ -66,16 +69,16 @@ class ClassName(TreeNameDefinition):
|
||||
|
||||
@iterator_to_context_set
|
||||
def infer(self):
|
||||
# TODO this _name_to_types might get refactored and be a part of the
|
||||
# parent class. Once it is, we can probably just overwrite method to
|
||||
# achieve this.
|
||||
# We're using a different context to infer, so we cannot call super().
|
||||
from jedi.evaluate.syntax_tree import tree_name_to_contexts
|
||||
inferred = tree_name_to_contexts(
|
||||
self.parent_context.evaluator, self._name_context, self.tree_name)
|
||||
|
||||
for result_context in inferred:
|
||||
if self._apply_decorators:
|
||||
for c in apply_py__get__(result_context, self.parent_context):
|
||||
for c in apply_py__get__(result_context,
|
||||
instance=None,
|
||||
class_context=self.parent_context):
|
||||
yield c
|
||||
else:
|
||||
yield result_context
|
||||
@@ -103,32 +106,60 @@ class ClassFilter(ParserTreeFilter):
|
||||
while node is not None:
|
||||
if node == self._parser_scope or node == self.context:
|
||||
return True
|
||||
node = get_parent_scope(node)
|
||||
node = get_cached_parent_scope(self._used_names, node)
|
||||
return False
|
||||
|
||||
def _access_possible(self, name):
|
||||
def _access_possible(self, name, from_instance=False):
|
||||
# Filter for ClassVar variables
|
||||
# TODO this is not properly done, yet. It just checks for the string
|
||||
# ClassVar in the annotation, which can be quite imprecise. If we
|
||||
# wanted to do this correct, we would have to resolve the ClassVar.
|
||||
if not from_instance:
|
||||
expr_stmt = name.get_definition()
|
||||
if expr_stmt is not None and expr_stmt.type == 'expr_stmt':
|
||||
annassign = expr_stmt.children[1]
|
||||
if annassign.type == 'annassign':
|
||||
# TODO this is not proper matching
|
||||
if 'ClassVar' not in annassign.children[1].get_code():
|
||||
return False
|
||||
|
||||
# Filter for name mangling of private variables like __foo
|
||||
return not name.value.startswith('__') or name.value.endswith('__') \
|
||||
or self._equals_origin_scope()
|
||||
|
||||
def _filter(self, names):
|
||||
def _filter(self, names, from_instance=False):
|
||||
names = super(ClassFilter, self)._filter(names)
|
||||
return [name for name in names if self._access_possible(name)]
|
||||
return [name for name in names if self._access_possible(name, from_instance)]
|
||||
|
||||
|
||||
class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
||||
"""
|
||||
This class is not only important to extend `tree.Class`, it is also a
|
||||
important for descriptors (if the descriptor methods are evaluated or not).
|
||||
"""
|
||||
api_type = u'class'
|
||||
class ClassMixin(object):
|
||||
def is_class(self):
|
||||
return True
|
||||
|
||||
@evaluator_method_cache(default=())
|
||||
def py__call__(self, arguments):
|
||||
from jedi.evaluate.context import TreeInstance
|
||||
return ContextSet([TreeInstance(self.evaluator, self.parent_context, self, arguments)])
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.builtin_from_name(self.evaluator, u'type')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return ContextName(self, self.tree_node.name)
|
||||
|
||||
def py__name__(self):
|
||||
return self.name.string_name
|
||||
|
||||
def get_param_names(self):
|
||||
for context_ in self.py__getattribute__(u'__init__'):
|
||||
if context_.is_function():
|
||||
return list(context_.get_param_names())[1:]
|
||||
return []
|
||||
|
||||
@evaluator_method_generator_cache()
|
||||
def py__mro__(self):
|
||||
def add(cls):
|
||||
if cls not in mro:
|
||||
mro.append(cls)
|
||||
|
||||
mro = [self]
|
||||
yield self
|
||||
# TODO Do a proper mro resolution. Currently we are just listing
|
||||
# classes. However, it's a complicated algorithm.
|
||||
for lazy_cls in self.py__bases__():
|
||||
@@ -151,31 +182,15 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
||||
File "<stdin>", line 1, in <module>
|
||||
TypeError: int() takes at most 2 arguments (3 given)
|
||||
"""
|
||||
pass
|
||||
debug.warning('Super class of %s is not a class: %s', self, cls)
|
||||
else:
|
||||
add(cls)
|
||||
for cls_new in mro_method():
|
||||
add(cls_new)
|
||||
return tuple(mro)
|
||||
if cls_new not in mro:
|
||||
mro.append(cls_new)
|
||||
yield cls_new
|
||||
|
||||
@evaluator_method_cache(default=())
|
||||
def py__bases__(self):
|
||||
arglist = self.tree_node.get_super_arglist()
|
||||
if arglist:
|
||||
from jedi.evaluate import arguments
|
||||
args = arguments.TreeArguments(self.evaluator, self.parent_context, arglist)
|
||||
return [value for key, value in args.unpack() if key is None]
|
||||
else:
|
||||
return [LazyKnownContext(compiled.builtin_from_name(self.evaluator, u'object'))]
|
||||
|
||||
def py__call__(self, params):
|
||||
from jedi.evaluate.context import TreeInstance
|
||||
return ContextSet(TreeInstance(self.evaluator, self.parent_context, self, params))
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.builtin_from_name(self.evaluator, u'type')
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False):
|
||||
def get_filters(self, search_global=False, until_position=None,
|
||||
origin_scope=None, is_instance=False):
|
||||
if search_global:
|
||||
yield ParserTreeFilter(
|
||||
self.evaluator,
|
||||
@@ -194,28 +209,97 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
||||
origin_scope=origin_scope,
|
||||
is_instance=is_instance
|
||||
)
|
||||
if not is_instance:
|
||||
from jedi.evaluate.compiled import builtin_from_name
|
||||
type_ = builtin_from_name(self.evaluator, u'type')
|
||||
assert isinstance(type_, ClassContext)
|
||||
if type_ != self:
|
||||
for instance in type_.py__call__(ValuesArguments([])):
|
||||
instance_filters = instance.get_filters()
|
||||
# Filter out self filters
|
||||
next(instance_filters)
|
||||
next(instance_filters)
|
||||
yield next(instance_filters)
|
||||
|
||||
def is_class(self):
|
||||
return True
|
||||
|
||||
def get_function_slot_names(self, name):
|
||||
for filter in self.get_filters(search_global=False):
|
||||
names = filter.get(name)
|
||||
if names:
|
||||
return names
|
||||
return []
|
||||
class ClassContext(use_metaclass(CachedMetaClass, ClassMixin, FunctionAndClassBase)):
|
||||
"""
|
||||
This class is not only important to extend `tree.Class`, it is also a
|
||||
important for descriptors (if the descriptor methods are evaluated or not).
|
||||
"""
|
||||
api_type = u'class'
|
||||
|
||||
def get_param_names(self):
|
||||
for name in self.get_function_slot_names(u'__init__'):
|
||||
for context_ in name.infer():
|
||||
try:
|
||||
method = context_.get_param_names
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
return list(method())[1:]
|
||||
return []
|
||||
@evaluator_method_cache()
|
||||
def list_type_vars(self):
|
||||
found = []
|
||||
arglist = self.tree_node.get_super_arglist()
|
||||
if arglist is None:
|
||||
return []
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return ContextName(self, self.tree_node.name)
|
||||
for stars, node in unpack_arglist(arglist):
|
||||
if stars:
|
||||
continue # These are not relevant for this search.
|
||||
|
||||
from jedi.evaluate.gradual.annotation import find_unknown_type_vars
|
||||
for type_var in find_unknown_type_vars(self.parent_context, node):
|
||||
if type_var not in found:
|
||||
# The order matters and it's therefore a list.
|
||||
found.append(type_var)
|
||||
return found
|
||||
|
||||
@evaluator_method_cache(default=())
|
||||
def py__bases__(self):
|
||||
arglist = self.tree_node.get_super_arglist()
|
||||
if arglist:
|
||||
from jedi.evaluate import arguments
|
||||
args = arguments.TreeArguments(self.evaluator, self.parent_context, arglist)
|
||||
lst = [value for key, value in args.unpack() if key is None]
|
||||
if lst:
|
||||
return lst
|
||||
|
||||
if self.py__name__() == 'object' \
|
||||
and self.parent_context == self.evaluator.builtins_module:
|
||||
return []
|
||||
return [LazyKnownContexts(
|
||||
self.evaluator.builtins_module.py__getattribute__('object')
|
||||
)]
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
from jedi.evaluate.gradual.typing import LazyGenericClass
|
||||
if not index_context_set:
|
||||
return ContextSet([self])
|
||||
return ContextSet(
|
||||
LazyGenericClass(
|
||||
self,
|
||||
index_context,
|
||||
context_of_index=contextualized_node.context,
|
||||
)
|
||||
for index_context in index_context_set
|
||||
)
|
||||
|
||||
def define_generics(self, type_var_dict):
|
||||
from jedi.evaluate.gradual.typing import GenericClass
|
||||
|
||||
def remap_type_vars():
|
||||
"""
|
||||
The TypeVars in the resulting classes have sometimes different names
|
||||
and we need to check for that, e.g. a signature can be:
|
||||
|
||||
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
|
||||
However, the iterator is defined as Iterator[_T_co], which means it has
|
||||
a different type var name.
|
||||
"""
|
||||
for type_var in self.list_type_vars():
|
||||
yield type_var_dict.get(type_var.py__name__(), NO_CONTEXTS)
|
||||
|
||||
if type_var_dict:
|
||||
return ContextSet([GenericClass(
|
||||
self,
|
||||
generics=tuple(remap_type_vars())
|
||||
)])
|
||||
return ContextSet({self})
|
||||
|
||||
def get_signatures(self):
|
||||
init_funcs = self.py__getattribute__('__init__')
|
||||
return [sig.bind(self) for sig in init_funcs.get_signatures()]
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
import re
|
||||
import os
|
||||
|
||||
from parso import python_bytes_to_unicode
|
||||
|
||||
from jedi import debug
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi._compatibility import iter_modules, all_suffixes
|
||||
from jedi.evaluate.filters import GlobalNameFilter, ContextNameMixin, \
|
||||
AbstractNameDefinition, ParserTreeFilter, DictFilter, MergedFilter
|
||||
from jedi.evaluate.names import ContextNameMixin, AbstractNameDefinition
|
||||
from jedi.evaluate.filters import GlobalNameFilter, ParserTreeFilter, DictFilter, MergedFilter
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate.base_context import TreeContext
|
||||
from jedi.evaluate.imports import SubModuleName, infer_import
|
||||
from jedi.evaluate.names import SubModuleName
|
||||
from jedi.evaluate.helpers import contexts_from_qualified_names
|
||||
|
||||
|
||||
class _ModuleAttributeName(AbstractNameDefinition):
|
||||
@@ -38,20 +37,57 @@ class ModuleName(ContextNameMixin, AbstractNameDefinition):
|
||||
return self._name
|
||||
|
||||
|
||||
class ModuleContext(TreeContext):
|
||||
api_type = u'module'
|
||||
parent_context = None
|
||||
def iter_module_names(evaluator, paths):
|
||||
# Python modules/packages
|
||||
for n in evaluator.compiled_subprocess.list_module_names(paths):
|
||||
yield n
|
||||
|
||||
def __init__(self, evaluator, module_node, path, code_lines):
|
||||
super(ModuleContext, self).__init__(
|
||||
evaluator,
|
||||
parent_context=None,
|
||||
tree_node=module_node
|
||||
)
|
||||
self._path = path
|
||||
self.code_lines = code_lines
|
||||
for path in paths:
|
||||
try:
|
||||
dirs = os.listdir(path)
|
||||
except OSError:
|
||||
# The file might not exist or reading it might lead to an error.
|
||||
debug.warning("Not possible to list directory: %s", path)
|
||||
continue
|
||||
for name in dirs:
|
||||
# Namespaces
|
||||
if os.path.isdir(os.path.join(path, name)):
|
||||
# pycache is obviously not an interestin namespace. Also the
|
||||
# name must be a valid identifier.
|
||||
# TODO use str.isidentifier, once Python 2 is removed
|
||||
if name != '__pycache__' and not re.search(r'\W|^\d', name):
|
||||
yield name
|
||||
# Stub files
|
||||
if name.endswith('.pyi'):
|
||||
if name != '__init__.pyi':
|
||||
yield name[:-4]
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
|
||||
class SubModuleDictMixin(object):
|
||||
@evaluator_method_cache()
|
||||
def sub_modules_dict(self):
|
||||
"""
|
||||
Lists modules in the directory of this module (if this module is a
|
||||
package).
|
||||
"""
|
||||
names = {}
|
||||
try:
|
||||
method = self.py__path__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
mods = iter_module_names(self.evaluator, method())
|
||||
for name in mods:
|
||||
# It's obviously a relative import to the current module.
|
||||
names[name] = SubModuleName(self, name)
|
||||
|
||||
# In the case of an import like `from x.` we don't need to
|
||||
# add all the variables, this is only about submodules.
|
||||
return names
|
||||
|
||||
|
||||
class ModuleMixin(SubModuleDictMixin):
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
yield MergedFilter(
|
||||
ParserTreeFilter(
|
||||
self.evaluator,
|
||||
@@ -61,8 +97,46 @@ class ModuleContext(TreeContext):
|
||||
),
|
||||
GlobalNameFilter(self, self.tree_node),
|
||||
)
|
||||
yield DictFilter(self._sub_modules_dict())
|
||||
yield DictFilter(self.sub_modules_dict())
|
||||
yield DictFilter(self._module_attributes_dict())
|
||||
for star_filter in self.iter_star_filters():
|
||||
yield star_filter
|
||||
|
||||
def py__class__(self):
|
||||
c, = contexts_from_qualified_names(self.evaluator, u'types', u'ModuleType')
|
||||
return c
|
||||
|
||||
def is_module(self):
|
||||
return True
|
||||
|
||||
def is_stub(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
@evaluator_method_cache()
|
||||
def name(self):
|
||||
return ModuleName(self, self._string_name)
|
||||
|
||||
@property
|
||||
def _string_name(self):
|
||||
""" This is used for the goto functions. """
|
||||
# TODO It's ugly that we even use this, the name is usually well known
|
||||
# ahead so just pass it when create a ModuleContext.
|
||||
if self._path is None:
|
||||
return '' # no path -> empty name
|
||||
else:
|
||||
sep = (re.escape(os.path.sep),) * 2
|
||||
r = re.search(r'([^%s]*?)(%s__init__)?(\.pyi?|\.so)?$' % sep, self._path)
|
||||
# Remove PEP 3149 names
|
||||
return re.sub(r'\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
|
||||
|
||||
@evaluator_method_cache()
|
||||
def _module_attributes_dict(self):
|
||||
names = ['__file__', '__package__', '__doc__', '__name__']
|
||||
# All the additional module attributes are strings.
|
||||
return dict((n, _ModuleAttributeName(self, n)) for n in names)
|
||||
|
||||
def iter_star_filters(self, search_global=False):
|
||||
for star_module in self.star_imports():
|
||||
yield next(star_module.get_filters(search_global))
|
||||
|
||||
@@ -71,58 +145,64 @@ class ModuleContext(TreeContext):
|
||||
# to push the star imports into Evaluator.module_cache, if we reenable this.
|
||||
@evaluator_method_cache([])
|
||||
def star_imports(self):
|
||||
from jedi.evaluate.imports import Importer
|
||||
|
||||
modules = []
|
||||
for i in self.tree_node.iter_imports():
|
||||
if i.is_star_import():
|
||||
name = i.get_paths()[-1][-1]
|
||||
new = infer_import(self, name)
|
||||
new = Importer(
|
||||
self.evaluator,
|
||||
import_path=i.get_paths()[-1],
|
||||
module_context=self,
|
||||
level=i.level
|
||||
).follow()
|
||||
|
||||
for module in new:
|
||||
if isinstance(module, ModuleContext):
|
||||
modules += module.star_imports()
|
||||
modules += new
|
||||
return modules
|
||||
|
||||
@evaluator_method_cache()
|
||||
def _module_attributes_dict(self):
|
||||
names = ['__file__', '__package__', '__doc__', '__name__']
|
||||
# All the additional module attributes are strings.
|
||||
return dict((n, _ModuleAttributeName(self, n)) for n in names)
|
||||
def get_qualified_names(self):
|
||||
"""
|
||||
A module doesn't have a qualified name, but it's important to note that
|
||||
it's reachable and not `None`. With this information we can add
|
||||
qualified names on top for all context children.
|
||||
"""
|
||||
return ()
|
||||
|
||||
@property
|
||||
def _string_name(self):
|
||||
""" This is used for the goto functions. """
|
||||
if self._path is None:
|
||||
return '' # no path -> empty name
|
||||
|
||||
class ModuleContext(ModuleMixin, TreeContext):
|
||||
api_type = u'module'
|
||||
parent_context = None
|
||||
|
||||
def __init__(self, evaluator, module_node, file_io, string_names, code_lines, is_package=False):
|
||||
super(ModuleContext, self).__init__(
|
||||
evaluator,
|
||||
parent_context=None,
|
||||
tree_node=module_node
|
||||
)
|
||||
self.file_io = file_io
|
||||
if file_io is None:
|
||||
self._path = None
|
||||
else:
|
||||
sep = (re.escape(os.path.sep),) * 2
|
||||
r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self._path)
|
||||
# Remove PEP 3149 names
|
||||
return re.sub(r'\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
|
||||
self._path = file_io.path
|
||||
self.string_names = string_names # Optional[Tuple[str, ...]]
|
||||
self.code_lines = code_lines
|
||||
self.is_package = is_package
|
||||
|
||||
@property
|
||||
@evaluator_method_cache()
|
||||
def name(self):
|
||||
return ModuleName(self, self._string_name)
|
||||
|
||||
def _get_init_directory(self):
|
||||
"""
|
||||
:return: The path to the directory of a package. None in case it's not
|
||||
a package.
|
||||
"""
|
||||
for suffix in all_suffixes():
|
||||
ending = '__init__' + suffix
|
||||
py__file__ = self.py__file__()
|
||||
if py__file__ is not None and py__file__.endswith(ending):
|
||||
# Remove the ending, including the separator.
|
||||
return self.py__file__()[:-len(ending) - 1]
|
||||
return None
|
||||
def is_stub(self):
|
||||
if self._path is not None and self._path.endswith('.pyi'):
|
||||
# Currently this is the way how we identify stubs when e.g. goto is
|
||||
# used in them. This could be changed if stubs would be identified
|
||||
# sooner and used as StubModuleContext.
|
||||
return True
|
||||
return super(ModuleContext, self).is_stub()
|
||||
|
||||
def py__name__(self):
|
||||
for name, module in self.evaluator.module_cache.iterate_modules_with_names():
|
||||
if module == self and name != '':
|
||||
return name
|
||||
|
||||
return '__main__'
|
||||
if self.string_names is None:
|
||||
return None
|
||||
return '.'.join(self.string_names)
|
||||
|
||||
def py__file__(self):
|
||||
"""
|
||||
@@ -134,35 +214,34 @@ class ModuleContext(TreeContext):
|
||||
return os.path.abspath(self._path)
|
||||
|
||||
def py__package__(self):
|
||||
if self._get_init_directory() is None:
|
||||
return re.sub(r'\.?[^.]+$', '', self.py__name__())
|
||||
else:
|
||||
return self.py__name__()
|
||||
if self.is_package:
|
||||
return self.string_names
|
||||
return self.string_names[:-1]
|
||||
|
||||
def _py__path__(self):
|
||||
search_path = self.evaluator.get_sys_path()
|
||||
init_path = self.py__file__()
|
||||
if os.path.basename(init_path) == '__init__.py':
|
||||
with open(init_path, 'rb') as f:
|
||||
content = python_bytes_to_unicode(f.read(), errors='replace')
|
||||
# these are strings that need to be used for namespace packages,
|
||||
# the first one is ``pkgutil``, the second ``pkg_resources``.
|
||||
options = ('declare_namespace(__name__)', 'extend_path(__path__')
|
||||
if options[0] in content or options[1] in content:
|
||||
# It is a namespace, now try to find the rest of the
|
||||
# modules on sys_path or whatever the search_path is.
|
||||
paths = set()
|
||||
for s in search_path:
|
||||
other = os.path.join(s, self.name.string_name)
|
||||
if os.path.isdir(other):
|
||||
paths.add(other)
|
||||
if paths:
|
||||
return list(paths)
|
||||
# TODO I'm not sure if this is how nested namespace
|
||||
# packages work. The tests are not really good enough to
|
||||
# show that.
|
||||
# Default to this.
|
||||
return [self._get_init_directory()]
|
||||
# A namespace package is typically auto generated and ~10 lines long.
|
||||
first_few_lines = ''.join(self.code_lines[:50])
|
||||
# these are strings that need to be used for namespace packages,
|
||||
# the first one is ``pkgutil``, the second ``pkg_resources``.
|
||||
options = ('declare_namespace(__name__)', 'extend_path(__path__')
|
||||
if options[0] in first_few_lines or options[1] in first_few_lines:
|
||||
# It is a namespace, now try to find the rest of the
|
||||
# modules on sys_path or whatever the search_path is.
|
||||
paths = set()
|
||||
for s in self.evaluator.get_sys_path():
|
||||
other = os.path.join(s, self.name.string_name)
|
||||
if os.path.isdir(other):
|
||||
paths.add(other)
|
||||
if paths:
|
||||
return list(paths)
|
||||
# Nested namespace packages will not be supported. Nobody ever
|
||||
# asked for it and in Python 3 they are there without using all the
|
||||
# crap above.
|
||||
|
||||
# Default to the of this file.
|
||||
file = self.py__file__()
|
||||
assert file is not None # Shouldn't be a package in the first place.
|
||||
return [os.path.dirname(file)]
|
||||
|
||||
@property
|
||||
def py__path__(self):
|
||||
@@ -176,44 +255,14 @@ class ModuleContext(TreeContext):
|
||||
is a list of paths (strings).
|
||||
Raises an AttributeError if the module is not a package.
|
||||
"""
|
||||
path = self._get_init_directory()
|
||||
|
||||
if path is None:
|
||||
raise AttributeError('Only packages have __path__ attributes.')
|
||||
else:
|
||||
if self.is_package:
|
||||
return self._py__path__
|
||||
|
||||
@evaluator_method_cache()
|
||||
def _sub_modules_dict(self):
|
||||
"""
|
||||
Lists modules in the directory of this module (if this module is a
|
||||
package).
|
||||
"""
|
||||
names = {}
|
||||
try:
|
||||
method = self.py__path__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
for path in method():
|
||||
mods = iter_modules([path])
|
||||
for module_loader, name, is_pkg in mods:
|
||||
# It's obviously a relative import to the current module.
|
||||
names[name] = SubModuleName(self, name)
|
||||
|
||||
# TODO add something like this in the future, its cleaner than the
|
||||
# import hacks.
|
||||
# ``os.path`` is a hardcoded exception, because it's a
|
||||
# ``sys.modules`` modification.
|
||||
# if str(self.name) == 'os':
|
||||
# names.append(Name('path', parent_context=self))
|
||||
|
||||
return names
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.get_special_object(self.evaluator, u'MODULE_CLASS')
|
||||
raise AttributeError('Only packages have __path__ attributes.')
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s@%s-%s>" % (
|
||||
return "<%s: %s@%s-%s is_stub=%s>" % (
|
||||
self.__class__.__name__, self._string_name,
|
||||
self.tree_node.start_pos[0], self.tree_node.end_pos[0])
|
||||
self.tree_node.start_pos[0], self.tree_node.end_pos[0],
|
||||
self.is_stub()
|
||||
)
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import os
|
||||
from itertools import chain
|
||||
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, ContextNameMixin
|
||||
from jedi.evaluate.filters import DictFilter
|
||||
from jedi.evaluate.names import ContextNameMixin, AbstractNameDefinition
|
||||
from jedi.evaluate.base_context import Context
|
||||
from jedi.evaluate.context.module import SubModuleDictMixin
|
||||
|
||||
|
||||
class ImplicitNSName(ContextNameMixin, AbstractNameDefinition):
|
||||
@@ -17,7 +15,7 @@ class ImplicitNSName(ContextNameMixin, AbstractNameDefinition):
|
||||
self.string_name = string_name
|
||||
|
||||
|
||||
class ImplicitNamespaceContext(Context):
|
||||
class ImplicitNamespaceContext(Context, SubModuleDictMixin):
|
||||
"""
|
||||
Provides support for implicit namespace packages
|
||||
"""
|
||||
@@ -31,15 +29,15 @@ class ImplicitNamespaceContext(Context):
|
||||
super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None)
|
||||
self.evaluator = evaluator
|
||||
self._fullname = fullname
|
||||
self.paths = paths
|
||||
self._paths = paths
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
yield DictFilter(self._sub_modules_dict())
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
yield DictFilter(self.sub_modules_dict())
|
||||
|
||||
@property
|
||||
@evaluator_method_cache()
|
||||
def name(self):
|
||||
string_name = self.py__package__().rpartition('.')[-1]
|
||||
string_name = self.py__package__()[-1]
|
||||
return ImplicitNSName(self, string_name)
|
||||
|
||||
def py__file__(self):
|
||||
@@ -48,25 +46,19 @@ class ImplicitNamespaceContext(Context):
|
||||
def py__package__(self):
|
||||
"""Return the fullname
|
||||
"""
|
||||
return self._fullname
|
||||
return self._fullname.split('.')
|
||||
|
||||
def py__path__(self):
|
||||
return [self.paths]
|
||||
return self._paths
|
||||
|
||||
def py__name__(self):
|
||||
return self._fullname
|
||||
|
||||
@evaluator_method_cache()
|
||||
def _sub_modules_dict(self):
|
||||
names = {}
|
||||
def is_namespace(self):
|
||||
return True
|
||||
|
||||
file_names = chain.from_iterable(os.listdir(path) for path in self.paths)
|
||||
mods = [
|
||||
file_name.rpartition('.')[0] if '.' in file_name else file_name
|
||||
for file_name in file_names
|
||||
if file_name != '__pycache__'
|
||||
]
|
||||
def is_stub(self):
|
||||
return False
|
||||
|
||||
for name in mods:
|
||||
names[name] = imports.SubModuleName(self, name)
|
||||
return names
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._fullname)
|
||||
|
||||
@@ -16,11 +16,13 @@ annotations.
|
||||
"""
|
||||
|
||||
import re
|
||||
import warnings
|
||||
from textwrap import dedent
|
||||
|
||||
from parso import parse, ParserSyntaxError
|
||||
|
||||
from jedi._compatibility import u
|
||||
from jedi import debug
|
||||
from jedi.evaluate.utils import indent_block
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate.base_context import iterator_to_context_set, ContextSet, \
|
||||
@@ -47,25 +49,26 @@ _numpy_doc_string_cache = None
|
||||
|
||||
def _get_numpy_doc_string_cls():
|
||||
global _numpy_doc_string_cache
|
||||
if isinstance(_numpy_doc_string_cache, ImportError):
|
||||
if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)):
|
||||
raise _numpy_doc_string_cache
|
||||
try:
|
||||
from numpydoc.docscrape import NumpyDocString
|
||||
_numpy_doc_string_cache = NumpyDocString
|
||||
except ImportError as e:
|
||||
_numpy_doc_string_cache = e
|
||||
except (ImportError, SyntaxError) as e:
|
||||
raise
|
||||
return _numpy_doc_string_cache
|
||||
|
||||
|
||||
def _search_param_in_numpydocstr(docstr, param_str):
|
||||
"""Search `docstr` (in numpydoc format) for type(-s) of `param_str`."""
|
||||
try:
|
||||
# This is a non-public API. If it ever changes we should be
|
||||
# prepared and return gracefully.
|
||||
params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters']
|
||||
except (KeyError, AttributeError, ImportError):
|
||||
return []
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
try:
|
||||
# This is a non-public API. If it ever changes we should be
|
||||
# prepared and return gracefully.
|
||||
params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters']
|
||||
except Exception:
|
||||
return []
|
||||
for p_name, p_type, p_descr in params:
|
||||
if p_name == param_str:
|
||||
m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
|
||||
@@ -79,16 +82,18 @@ def _search_return_in_numpydocstr(docstr):
|
||||
"""
|
||||
Search `docstr` (in numpydoc format) for type(-s) of function returns.
|
||||
"""
|
||||
try:
|
||||
doc = _get_numpy_doc_string_cls()(docstr)
|
||||
except ImportError:
|
||||
return
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
try:
|
||||
doc = _get_numpy_doc_string_cls()(docstr)
|
||||
except Exception:
|
||||
return
|
||||
try:
|
||||
# This is a non-public API. If it ever changes we should be
|
||||
# prepared and return gracefully.
|
||||
returns = doc._parsed_data['Returns']
|
||||
returns += doc._parsed_data['Yields']
|
||||
except (KeyError, AttributeError):
|
||||
except Exception:
|
||||
return
|
||||
for r_name, r_type, r_descr in returns:
|
||||
# Return names are optional and if so the type is in the name
|
||||
@@ -111,7 +116,7 @@ def _expand_typestr(type_str):
|
||||
yield type_str.split('of')[0]
|
||||
# Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
|
||||
elif type_str.startswith('{'):
|
||||
node = parse(type_str, version='3.6').children[0]
|
||||
node = parse(type_str, version='3.7').children[0]
|
||||
if node.type == 'atom':
|
||||
for leaf in node.children[1].children:
|
||||
if leaf.type == 'number':
|
||||
@@ -202,6 +207,7 @@ def _evaluate_for_statement_string(module_context, string):
|
||||
# Take the default grammar here, if we load the Python 2.7 grammar here, it
|
||||
# will be impossible to use `...` (Ellipsis) as a token. Docstring types
|
||||
# don't need to conform with the current grammar.
|
||||
debug.dbg('Parse docstring code %s', string, color='BLUE')
|
||||
grammar = module_context.evaluator.latest_grammar
|
||||
try:
|
||||
module = grammar.parse(code.format(indent_block(string)), error_recovery=False)
|
||||
@@ -261,7 +267,7 @@ def _execute_array_values(evaluator, array):
|
||||
values.append(LazyKnownContexts(objects))
|
||||
return {FakeSequence(evaluator, array.array_type, values)}
|
||||
else:
|
||||
return array.execute_evaluated()
|
||||
return array.execute_annotation()
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
@@ -270,7 +276,7 @@ def infer_param(execution_context, param):
|
||||
from jedi.evaluate.context import FunctionExecutionContext
|
||||
|
||||
def eval_docstring(docstring):
|
||||
return ContextSet.from_iterable(
|
||||
return ContextSet(
|
||||
p
|
||||
for param_str in _search_param_in_docstr(docstring, param.name.value)
|
||||
for p in _evaluate_for_statement_string(module_context, param_str)
|
||||
@@ -287,6 +293,7 @@ def infer_param(execution_context, param):
|
||||
class_context = execution_context.var_args.instance.class_context
|
||||
types |= eval_docstring(class_context.py__doc__())
|
||||
|
||||
debug.dbg('Found param types for docstring: %s', types, color='BLUE')
|
||||
return types
|
||||
|
||||
|
||||
|
||||
@@ -99,10 +99,11 @@ def search_params(evaluator, execution_context, funcdef):
|
||||
)
|
||||
if function_executions:
|
||||
zipped_params = zip(*list(
|
||||
function_execution.get_executed_params()
|
||||
function_execution.get_executed_params_and_issues()[0]
|
||||
for function_execution in function_executions
|
||||
))
|
||||
params = [DynamicExecutedParams(evaluator, executed_params) for executed_params in zipped_params]
|
||||
params = [DynamicExecutedParams(evaluator, executed_params)
|
||||
for executed_params in zipped_params]
|
||||
# Evaluate the ExecutedParams to types.
|
||||
else:
|
||||
return create_default_params(execution_context, funcdef)
|
||||
@@ -122,7 +123,7 @@ def _search_function_executions(evaluator, module_context, funcdef, string_name)
|
||||
compare_node = funcdef
|
||||
if string_name == '__init__':
|
||||
cls = get_parent_scope(funcdef)
|
||||
if isinstance(cls, tree.Class):
|
||||
if cls.type == 'classdef':
|
||||
string_name = cls.name.value
|
||||
compare_node = cls
|
||||
|
||||
@@ -208,7 +209,7 @@ def _check_name_for_execution(evaluator, context, compare_node, name, trailer):
|
||||
# Here we're trying to find decorators by checking the first
|
||||
# parameter. It's not very generic though. Should find a better
|
||||
# solution that also applies to nested decorators.
|
||||
params = value.parent_context.get_executed_params()
|
||||
params, _ = value.parent_context.get_executed_params_and_issues()
|
||||
if len(params) != 1:
|
||||
continue
|
||||
values = params[0].infer()
|
||||
|
||||
@@ -3,142 +3,19 @@ Filters are objects that you can use to filter names in different scopes. They
|
||||
are needed for name resolution.
|
||||
"""
|
||||
from abc import abstractmethod
|
||||
import weakref
|
||||
|
||||
from parso.tree import search_ancestor
|
||||
|
||||
from jedi._compatibility import use_metaclass, Parameter
|
||||
from jedi.cache import memoize_method
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi.evaluate import flow_analysis
|
||||
from jedi.evaluate.base_context import ContextSet, Context
|
||||
from jedi.parser_utils import get_parent_scope
|
||||
from jedi.evaluate.base_context import ContextSet, Context, ContextWrapper, \
|
||||
LazyContextWrapper
|
||||
from jedi.parser_utils import get_cached_parent_scope
|
||||
from jedi.evaluate.utils import to_list
|
||||
from jedi.evaluate.names import TreeNameDefinition, ParamName, AbstractNameDefinition
|
||||
|
||||
|
||||
class AbstractNameDefinition(object):
|
||||
start_pos = None
|
||||
string_name = None
|
||||
parent_context = None
|
||||
tree_name = None
|
||||
|
||||
@abstractmethod
|
||||
def infer(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def goto(self):
|
||||
# Typically names are already definitions and therefore a goto on that
|
||||
# name will always result on itself.
|
||||
return {self}
|
||||
|
||||
def get_root_context(self):
|
||||
return self.parent_context.get_root_context()
|
||||
|
||||
def __repr__(self):
|
||||
if self.start_pos is None:
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.string_name)
|
||||
return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos)
|
||||
|
||||
def is_import(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
return self.parent_context.api_type
|
||||
|
||||
|
||||
class AbstractTreeName(AbstractNameDefinition):
|
||||
def __init__(self, parent_context, tree_name):
|
||||
self.parent_context = parent_context
|
||||
self.tree_name = tree_name
|
||||
|
||||
def goto(self):
|
||||
return self.parent_context.evaluator.goto(self.parent_context, self.tree_name)
|
||||
|
||||
def is_import(self):
|
||||
imp = search_ancestor(self.tree_name, 'import_from', 'import_name')
|
||||
return imp is not None
|
||||
|
||||
@property
|
||||
def string_name(self):
|
||||
return self.tree_name.value
|
||||
|
||||
@property
|
||||
def start_pos(self):
|
||||
return self.tree_name.start_pos
|
||||
|
||||
|
||||
class ContextNameMixin(object):
|
||||
def infer(self):
|
||||
return ContextSet(self._context)
|
||||
|
||||
def get_root_context(self):
|
||||
if self.parent_context is None:
|
||||
return self._context
|
||||
return super(ContextNameMixin, self).get_root_context()
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
return self._context.api_type
|
||||
|
||||
|
||||
class ContextName(ContextNameMixin, AbstractTreeName):
|
||||
def __init__(self, context, tree_name):
|
||||
super(ContextName, self).__init__(context.parent_context, tree_name)
|
||||
self._context = context
|
||||
|
||||
|
||||
class TreeNameDefinition(AbstractTreeName):
|
||||
_API_TYPES = dict(
|
||||
import_name='module',
|
||||
import_from='module',
|
||||
funcdef='function',
|
||||
param='param',
|
||||
classdef='class',
|
||||
)
|
||||
|
||||
def infer(self):
|
||||
# Refactor this, should probably be here.
|
||||
from jedi.evaluate.syntax_tree import tree_name_to_contexts
|
||||
return tree_name_to_contexts(self.parent_context.evaluator, self.parent_context, self.tree_name)
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
definition = self.tree_name.get_definition(import_name_always=True)
|
||||
if definition is None:
|
||||
return 'statement'
|
||||
return self._API_TYPES.get(definition.type, 'statement')
|
||||
|
||||
|
||||
class ParamName(AbstractTreeName):
|
||||
api_type = u'param'
|
||||
|
||||
def __init__(self, parent_context, tree_name):
|
||||
self.parent_context = parent_context
|
||||
self.tree_name = tree_name
|
||||
|
||||
def get_kind(self):
|
||||
tree_param = search_ancestor(self.tree_name, 'param')
|
||||
if tree_param.star_count == 1: # *args
|
||||
return Parameter.VAR_POSITIONAL
|
||||
if tree_param.star_count == 2: # **kwargs
|
||||
return Parameter.VAR_KEYWORD
|
||||
|
||||
parent = tree_param.parent
|
||||
for p in parent.children:
|
||||
if p.type == 'param':
|
||||
if p.star_count:
|
||||
return Parameter.KEYWORD_ONLY
|
||||
if p == tree_param:
|
||||
break
|
||||
return Parameter.POSITIONAL_OR_KEYWORD
|
||||
|
||||
def infer(self):
|
||||
return self.get_param().infer()
|
||||
|
||||
def get_param(self):
|
||||
params = self.parent_context.get_executed_params()
|
||||
param_node = search_ancestor(self.tree_name, 'param')
|
||||
return params[param_node.position_index]
|
||||
_definition_name_cache = weakref.WeakKeyDictionary()
|
||||
|
||||
|
||||
class AbstractFilter(object):
|
||||
@@ -158,34 +35,70 @@ class AbstractFilter(object):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class FilterWrapper(object):
|
||||
name_wrapper_class = None
|
||||
|
||||
def __init__(self, wrapped_filter):
|
||||
self._wrapped_filter = wrapped_filter
|
||||
|
||||
def wrap_names(self, names):
|
||||
return [self.name_wrapper_class(name) for name in names]
|
||||
|
||||
def get(self, name):
|
||||
return self.wrap_names(self._wrapped_filter.get(name))
|
||||
|
||||
def values(self):
|
||||
return self.wrap_names(self._wrapped_filter.values())
|
||||
|
||||
|
||||
def _get_definition_names(used_names, name_key):
|
||||
try:
|
||||
for_module = _definition_name_cache[used_names]
|
||||
except KeyError:
|
||||
for_module = _definition_name_cache[used_names] = {}
|
||||
|
||||
try:
|
||||
return for_module[name_key]
|
||||
except KeyError:
|
||||
names = used_names.get(name_key, ())
|
||||
result = for_module[name_key] = tuple(name for name in names if name.is_definition())
|
||||
return result
|
||||
|
||||
|
||||
class AbstractUsedNamesFilter(AbstractFilter):
|
||||
name_class = TreeNameDefinition
|
||||
|
||||
def __init__(self, context, parser_scope):
|
||||
self._parser_scope = parser_scope
|
||||
self._used_names = self._parser_scope.get_root_node().get_used_names()
|
||||
self._module_node = self._parser_scope.get_root_node()
|
||||
self._used_names = self._module_node.get_used_names()
|
||||
self.context = context
|
||||
|
||||
def get(self, name):
|
||||
try:
|
||||
names = self._used_names[name]
|
||||
except KeyError:
|
||||
return []
|
||||
|
||||
return self._convert_names(self._filter(names))
|
||||
def get(self, name, **filter_kwargs):
|
||||
return self._convert_names(self._filter(
|
||||
_get_definition_names(self._used_names, name),
|
||||
**filter_kwargs
|
||||
))
|
||||
|
||||
def _convert_names(self, names):
|
||||
return [self.name_class(self.context, name) for name in names]
|
||||
|
||||
def values(self):
|
||||
return self._convert_names(name for name_list in self._used_names.values()
|
||||
for name in self._filter(name_list))
|
||||
def values(self, **filter_kwargs):
|
||||
return self._convert_names(
|
||||
name
|
||||
for name_key in self._used_names
|
||||
for name in self._filter(
|
||||
_get_definition_names(self._used_names, name_key),
|
||||
**filter_kwargs
|
||||
)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.context)
|
||||
|
||||
|
||||
class ParserTreeFilter(AbstractUsedNamesFilter):
|
||||
# TODO remove evaluator as an argument, it's not used.
|
||||
def __init__(self, evaluator, context, node_context=None, until_position=None,
|
||||
origin_scope=None):
|
||||
"""
|
||||
@@ -207,13 +120,11 @@ class ParserTreeFilter(AbstractUsedNamesFilter):
|
||||
return list(self._check_flows(names))
|
||||
|
||||
def _is_name_reachable(self, name):
|
||||
if not name.is_definition():
|
||||
return False
|
||||
parent = name.parent
|
||||
if parent.type == 'trailer':
|
||||
return False
|
||||
base_node = parent if parent.type in ('classdef', 'funcdef') else name
|
||||
return get_parent_scope(base_node) == self._parser_scope
|
||||
return get_cached_parent_scope(self._used_names, base_node) == self._parser_scope
|
||||
|
||||
def _check_flows(self, names):
|
||||
for name in sorted(names, key=lambda name: name.start_pos, reverse=True):
|
||||
@@ -257,12 +168,25 @@ class GlobalNameFilter(AbstractUsedNamesFilter):
|
||||
def __init__(self, context, parser_scope):
|
||||
super(GlobalNameFilter, self).__init__(context, parser_scope)
|
||||
|
||||
def get(self, name):
|
||||
try:
|
||||
names = self._used_names[name]
|
||||
except KeyError:
|
||||
return []
|
||||
return self._convert_names(self._filter(names))
|
||||
|
||||
@to_list
|
||||
def _filter(self, names):
|
||||
for name in names:
|
||||
if name.parent.type == 'global_stmt':
|
||||
yield name
|
||||
|
||||
def values(self):
|
||||
return self._convert_names(
|
||||
name for name_list in self._used_names.values()
|
||||
for name in self._filter(name_list)
|
||||
)
|
||||
|
||||
|
||||
class DictFilter(AbstractFilter):
|
||||
def __init__(self, dct):
|
||||
@@ -288,6 +212,10 @@ class DictFilter(AbstractFilter):
|
||||
def _convert(self, name, value):
|
||||
return value
|
||||
|
||||
def __repr__(self):
|
||||
keys = ', '.join(self._dct.keys())
|
||||
return '<%s: for {%s}>' % (self.__class__.__name__, keys)
|
||||
|
||||
|
||||
class MergedFilter(object):
|
||||
def __init__(self, *filters):
|
||||
@@ -315,7 +243,7 @@ class _BuiltinMappedMethod(Context):
|
||||
self._method = method
|
||||
self._builtin_func = builtin_func
|
||||
|
||||
def py__call__(self, params):
|
||||
def py__call__(self, arguments):
|
||||
# TODO add TypeError if params are given/or not correct.
|
||||
return self._method(self.parent_context)
|
||||
|
||||
@@ -353,9 +281,9 @@ class SpecialMethodFilter(DictFilter):
|
||||
else:
|
||||
continue
|
||||
break
|
||||
return ContextSet(
|
||||
return ContextSet([
|
||||
_BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)
|
||||
)
|
||||
])
|
||||
|
||||
def __init__(self, context, dct, builtin_context):
|
||||
super(SpecialMethodFilter, self).__init__(dct)
|
||||
@@ -391,31 +319,23 @@ class _OverwriteMeta(type):
|
||||
cls.overwritten_methods = base_dct
|
||||
|
||||
|
||||
class AbstractObjectOverwrite(use_metaclass(_OverwriteMeta, object)):
|
||||
def get_object(self):
|
||||
raise NotImplementedError
|
||||
class _AttributeOverwriteMixin(object):
|
||||
def get_filters(self, search_global=False, *args, **kwargs):
|
||||
yield SpecialMethodFilter(self, self.overwritten_methods, self._wrapped_context)
|
||||
|
||||
def get_filters(self, search_global, *args, **kwargs):
|
||||
yield SpecialMethodFilter(self, self.overwritten_methods, self.get_object())
|
||||
|
||||
for filter in self.get_object().get_filters(search_global):
|
||||
for filter in self._wrapped_context.get_filters(search_global):
|
||||
yield filter
|
||||
|
||||
|
||||
class BuiltinOverwrite(Context, AbstractObjectOverwrite):
|
||||
special_object_identifier = None
|
||||
|
||||
class LazyAttributeOverwrite(use_metaclass(_OverwriteMeta, _AttributeOverwriteMixin,
|
||||
LazyContextWrapper)):
|
||||
def __init__(self, evaluator):
|
||||
super(BuiltinOverwrite, self).__init__(evaluator, evaluator.builtins_module)
|
||||
self.evaluator = evaluator
|
||||
|
||||
@memoize_method
|
||||
def get_object(self):
|
||||
from jedi.evaluate import compiled
|
||||
assert self.special_object_identifier
|
||||
return compiled.get_special_object(self.evaluator, self.special_object_identifier)
|
||||
|
||||
def py__class__(self):
|
||||
return self.get_object().py__class__()
|
||||
class AttributeOverwrite(use_metaclass(_OverwriteMeta, _AttributeOverwriteMixin,
|
||||
ContextWrapper)):
|
||||
pass
|
||||
|
||||
|
||||
def publish_method(method_name, python_version_match=None):
|
||||
@@ -449,10 +369,11 @@ def get_global_filters(evaluator, context, until_position, origin_scope):
|
||||
|
||||
First we get the names from the function scope.
|
||||
|
||||
>>> no_unicode_pprint(filters[0]) #doctest: +ELLIPSIS
|
||||
>>> no_unicode_pprint(filters[0]) # doctest: +ELLIPSIS
|
||||
MergedFilter(<ParserTreeFilter: ...>, <GlobalNameFilter: ...>)
|
||||
>>> sorted(str(n) for n in filters[0].values())
|
||||
['<TreeNameDefinition: func@(3, 4)>', '<TreeNameDefinition: x@(2, 0)>']
|
||||
>>> sorted(str(n) for n in filters[0].values()) # doctest: +NORMALIZE_WHITESPACE
|
||||
['<TreeNameDefinition: string_name=func start_pos=(3, 4)>',
|
||||
'<TreeNameDefinition: string_name=x start_pos=(2, 0)>']
|
||||
>>> filters[0]._filters[0]._until_position
|
||||
(4, 0)
|
||||
>>> filters[0]._filters[1]._until_position
|
||||
@@ -470,8 +391,8 @@ def get_global_filters(evaluator, context, until_position, origin_scope):
|
||||
Finally, it yields the builtin filter, if `include_builtin` is
|
||||
true (default).
|
||||
|
||||
>>> filters[3].values() #doctest: +ELLIPSIS
|
||||
[<CompiledName: ...>, ...]
|
||||
>>> list(filters[3].values()) # doctest: +ELLIPSIS
|
||||
[...]
|
||||
"""
|
||||
from jedi.evaluate.context.function import FunctionExecutionContext
|
||||
while context is not None:
|
||||
@@ -488,5 +409,5 @@ def get_global_filters(evaluator, context, until_position, origin_scope):
|
||||
context = context.parent_context
|
||||
|
||||
# Add builtins to the global scope.
|
||||
for filter in evaluator.builtins_module.get_filters(search_global=True):
|
||||
for filter in evaluator.builtins_module.get_filters():
|
||||
yield filter
|
||||
|
||||
@@ -19,16 +19,17 @@ from parso.python import tree
|
||||
from parso.tree import search_ancestor
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi.evaluate.context import AbstractInstanceContext
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate import flow_analysis
|
||||
from jedi.evaluate.arguments import TreeArguments
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate.context import iterable
|
||||
from jedi.evaluate.filters import get_global_filters, TreeNameDefinition
|
||||
from jedi.evaluate.base_context import ContextSet
|
||||
from jedi.evaluate.filters import get_global_filters
|
||||
from jedi.evaluate.names import TreeNameDefinition
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
||||
from jedi.parser_utils import is_scope, get_parent_scope
|
||||
from jedi.evaluate.gradual.conversion import convert_contexts
|
||||
|
||||
|
||||
class NameFinder(object):
|
||||
@@ -47,7 +48,6 @@ class NameFinder(object):
|
||||
self._found_predefined_types = None
|
||||
self._analysis_errors = analysis_errors
|
||||
|
||||
@debug.increase_indent
|
||||
def find(self, filters, attribute_lookup):
|
||||
"""
|
||||
:params bool attribute_lookup: Tell to logic if we're accessing the
|
||||
@@ -61,7 +61,7 @@ class NameFinder(object):
|
||||
node=self._name,
|
||||
)
|
||||
if check is flow_analysis.UNREACHABLE:
|
||||
return ContextSet()
|
||||
return NO_CONTEXTS
|
||||
return self._found_predefined_types
|
||||
|
||||
types = self._names_to_types(names, attribute_lookup)
|
||||
@@ -110,13 +110,22 @@ class NameFinder(object):
|
||||
ancestor = search_ancestor(origin_scope, 'funcdef', 'classdef')
|
||||
if ancestor is not None:
|
||||
colon = ancestor.children[-2]
|
||||
if position < colon.start_pos:
|
||||
if position is not None and position < colon.start_pos:
|
||||
if lambdef is None or position < lambdef.children[-2].start_pos:
|
||||
position = ancestor.start_pos
|
||||
|
||||
return get_global_filters(self._evaluator, self._context, position, origin_scope)
|
||||
else:
|
||||
return self._context.get_filters(search_global, self._position, origin_scope=origin_scope)
|
||||
return self._get_context_filters(origin_scope)
|
||||
|
||||
def _get_context_filters(self, origin_scope):
|
||||
for f in self._context.get_filters(False, self._position, origin_scope=origin_scope):
|
||||
yield f
|
||||
# This covers the case where a stub files are incomplete.
|
||||
if self._context.is_stub():
|
||||
for c in convert_contexts(ContextSet({self._context})):
|
||||
for f in c.get_filters():
|
||||
yield f
|
||||
|
||||
def filter_name(self, filters):
|
||||
"""
|
||||
@@ -124,11 +133,13 @@ class NameFinder(object):
|
||||
``filters``), until a name fits.
|
||||
"""
|
||||
names = []
|
||||
# This paragraph is currently needed for proper branch evaluation
|
||||
# (static analysis).
|
||||
if self._context.predefined_names and isinstance(self._name, tree.Name):
|
||||
node = self._name
|
||||
while node is not None and not is_scope(node):
|
||||
node = node.parent
|
||||
if node.type in ("if_stmt", "for_stmt", "comp_for"):
|
||||
if node.type in ("if_stmt", "for_stmt", "comp_for", 'sync_comp_for'):
|
||||
try:
|
||||
name_dict = self._context.predefined_names[node]
|
||||
types = name_dict[self._string_name]
|
||||
@@ -150,7 +161,8 @@ class NameFinder(object):
|
||||
# it's kind of hard, because for Jedi it's not clear
|
||||
# that that name has not been defined, yet.
|
||||
if n.tree_name == self._name:
|
||||
if self._name.get_definition().type == 'import_from':
|
||||
def_ = self._name.get_definition()
|
||||
if def_ is not None and def_.type == 'import_from':
|
||||
continue
|
||||
break
|
||||
|
||||
@@ -178,16 +190,17 @@ class NameFinder(object):
|
||||
contexts = ContextSet.from_sets(name.infer() for name in names)
|
||||
|
||||
debug.dbg('finder._names_to_types: %s -> %s', names, contexts)
|
||||
if not names and isinstance(self._context, AbstractInstanceContext):
|
||||
if not names and self._context.is_instance() and not self._context.is_compiled():
|
||||
# handling __getattr__ / __getattribute__
|
||||
return self._check_getattr(self._context)
|
||||
|
||||
# Add isinstance and other if/assert knowledge.
|
||||
if not contexts and isinstance(self._name, tree.Name) and \
|
||||
not isinstance(self._name_context, AbstractInstanceContext):
|
||||
not self._name_context.is_instance() and not self._context.is_compiled():
|
||||
flow_scope = self._name
|
||||
base_node = self._name_context.tree_node
|
||||
if base_node.type == 'comp_for':
|
||||
base_nodes = [self._name_context.tree_node]
|
||||
|
||||
if any(b.type in ('comp_for', 'sync_comp_for') for b in base_nodes):
|
||||
return contexts
|
||||
while True:
|
||||
flow_scope = get_parent_scope(flow_scope, include_flows=True)
|
||||
@@ -195,7 +208,7 @@ class NameFinder(object):
|
||||
self._name, self._position)
|
||||
if n is not None:
|
||||
return n
|
||||
if flow_scope == base_node:
|
||||
if flow_scope in base_nodes:
|
||||
break
|
||||
return contexts
|
||||
|
||||
@@ -267,12 +280,11 @@ def _check_isinstance_type(context, element, search_name):
|
||||
except AssertionError:
|
||||
return None
|
||||
|
||||
context_set = ContextSet()
|
||||
context_set = NO_CONTEXTS
|
||||
for cls_or_tup in lazy_context_cls.infer():
|
||||
if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple':
|
||||
for lazy_context in cls_or_tup.py__iter__():
|
||||
for context in lazy_context.infer():
|
||||
context_set |= context.execute_evaluated()
|
||||
context_set |= lazy_context.infer().execute_evaluated()
|
||||
else:
|
||||
context_set |= cls_or_tup.execute_evaluated()
|
||||
return context_set
|
||||
|
||||
0
jedi/evaluate/gradual/__init__.py
Normal file
0
jedi/evaluate/gradual/__init__.py
Normal file
384
jedi/evaluate/gradual/annotation.py
Normal file
384
jedi/evaluate/gradual/annotation.py
Normal file
@@ -0,0 +1,384 @@
|
||||
"""
|
||||
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
|
||||
through function annotations. There is a strong suggestion in this document
|
||||
that only the type of type hinting defined in PEP0484 should be allowed
|
||||
as annotations in future python versions.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from parso import ParserSyntaxError, parse
|
||||
|
||||
from jedi._compatibility import force_unicode
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
||||
from jedi.evaluate.gradual.typing import TypeVar, LazyGenericClass, \
|
||||
AbstractAnnotatedClass
|
||||
from jedi.evaluate.helpers import is_string
|
||||
from jedi import debug
|
||||
from jedi import parser_utils
|
||||
|
||||
|
||||
def eval_annotation(context, annotation):
|
||||
"""
|
||||
Evaluates an annotation node. This means that it evaluates the part of
|
||||
`int` here:
|
||||
|
||||
foo: int = 3
|
||||
|
||||
Also checks for forward references (strings)
|
||||
"""
|
||||
context_set = context.eval_node(annotation)
|
||||
if len(context_set) != 1:
|
||||
debug.warning("Eval'ed typing index %s should lead to 1 object, "
|
||||
" not %s" % (annotation, context_set))
|
||||
return context_set
|
||||
|
||||
evaled_context = list(context_set)[0]
|
||||
if is_string(evaled_context):
|
||||
result = _get_forward_reference_node(context, evaled_context.get_safe_value())
|
||||
if result is not None:
|
||||
return context.eval_node(result)
|
||||
return context_set
|
||||
|
||||
|
||||
def _evaluate_annotation_string(context, string, index=None):
|
||||
node = _get_forward_reference_node(context, string)
|
||||
if node is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
context_set = context.eval_node(node)
|
||||
if index is not None:
|
||||
context_set = context_set.filter(
|
||||
lambda context: context.array_type == u'tuple' # noqa
|
||||
and len(list(context.py__iter__())) >= index
|
||||
).py__simple_getitem__(index)
|
||||
return context_set
|
||||
|
||||
|
||||
def _get_forward_reference_node(context, string):
|
||||
try:
|
||||
new_node = context.evaluator.grammar.parse(
|
||||
force_unicode(string),
|
||||
start_symbol='eval_input',
|
||||
error_recovery=False
|
||||
)
|
||||
except ParserSyntaxError:
|
||||
debug.warning('Annotation not parsed: %s' % string)
|
||||
return None
|
||||
else:
|
||||
module = context.tree_node.get_root_node()
|
||||
parser_utils.move(new_node, module.end_pos[0])
|
||||
new_node.parent = context.tree_node
|
||||
return new_node
|
||||
|
||||
|
||||
def _split_comment_param_declaration(decl_text):
|
||||
"""
|
||||
Split decl_text on commas, but group generic expressions
|
||||
together.
|
||||
|
||||
For example, given "foo, Bar[baz, biz]" we return
|
||||
['foo', 'Bar[baz, biz]'].
|
||||
|
||||
"""
|
||||
try:
|
||||
node = parse(decl_text, error_recovery=False).children[0]
|
||||
except ParserSyntaxError:
|
||||
debug.warning('Comment annotation is not valid Python: %s' % decl_text)
|
||||
return []
|
||||
|
||||
if node.type == 'name':
|
||||
return [node.get_code().strip()]
|
||||
|
||||
params = []
|
||||
try:
|
||||
children = node.children
|
||||
except AttributeError:
|
||||
return []
|
||||
else:
|
||||
for child in children:
|
||||
if child.type in ['name', 'atom_expr', 'power']:
|
||||
params.append(child.get_code().strip())
|
||||
|
||||
return params
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
def infer_param(execution_context, param):
|
||||
"""
|
||||
Infers the type of a function parameter, using type annotations.
|
||||
"""
|
||||
annotation = param.annotation
|
||||
if annotation is None:
|
||||
# If no Python 3-style annotation, look for a Python 2-style comment
|
||||
# annotation.
|
||||
# Identify parameters to function in the same sequence as they would
|
||||
# appear in a type comment.
|
||||
all_params = [child for child in param.parent.children
|
||||
if child.type == 'param']
|
||||
|
||||
node = param.parent.parent
|
||||
comment = parser_utils.get_following_comment_same_line(node)
|
||||
if comment is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
|
||||
if not match:
|
||||
return NO_CONTEXTS
|
||||
params_comments = _split_comment_param_declaration(match.group(1))
|
||||
|
||||
# Find the specific param being investigated
|
||||
index = all_params.index(param)
|
||||
# If the number of parameters doesn't match length of type comment,
|
||||
# ignore first parameter (assume it's self).
|
||||
if len(params_comments) != len(all_params):
|
||||
debug.warning(
|
||||
"Comments length != Params length %s %s",
|
||||
params_comments, all_params
|
||||
)
|
||||
from jedi.evaluate.context.instance import InstanceArguments
|
||||
if isinstance(execution_context.var_args, InstanceArguments):
|
||||
if index == 0:
|
||||
# Assume it's self, which is already handled
|
||||
return NO_CONTEXTS
|
||||
index -= 1
|
||||
if index >= len(params_comments):
|
||||
return NO_CONTEXTS
|
||||
|
||||
param_comment = params_comments[index]
|
||||
return _evaluate_annotation_string(
|
||||
execution_context.function_context.get_default_param_context(),
|
||||
param_comment
|
||||
)
|
||||
# Annotations are like default params and resolve in the same way.
|
||||
context = execution_context.function_context.get_default_param_context()
|
||||
return eval_annotation(context, annotation)
|
||||
|
||||
|
||||
def py__annotations__(funcdef):
|
||||
dct = {}
|
||||
for function_param in funcdef.get_params():
|
||||
param_annotation = function_param.annotation
|
||||
if param_annotation is not None:
|
||||
dct[function_param.name.value] = param_annotation
|
||||
|
||||
return_annotation = funcdef.annotation
|
||||
if return_annotation:
|
||||
dct['return'] = return_annotation
|
||||
return dct
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
def infer_return_types(function_execution_context):
|
||||
"""
|
||||
Infers the type of a function's return value,
|
||||
according to type annotations.
|
||||
"""
|
||||
all_annotations = py__annotations__(function_execution_context.tree_node)
|
||||
annotation = all_annotations.get("return", None)
|
||||
if annotation is None:
|
||||
# If there is no Python 3-type annotation, look for a Python 2-type annotation
|
||||
node = function_execution_context.tree_node
|
||||
comment = parser_utils.get_following_comment_same_line(node)
|
||||
if comment is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
|
||||
if not match:
|
||||
return NO_CONTEXTS
|
||||
|
||||
return _evaluate_annotation_string(
|
||||
function_execution_context.function_context.get_default_param_context(),
|
||||
match.group(1).strip()
|
||||
).execute_annotation()
|
||||
if annotation is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
context = function_execution_context.function_context.get_default_param_context()
|
||||
unknown_type_vars = list(find_unknown_type_vars(context, annotation))
|
||||
annotation_contexts = eval_annotation(context, annotation)
|
||||
if not unknown_type_vars:
|
||||
return annotation_contexts.execute_annotation()
|
||||
|
||||
type_var_dict = infer_type_vars_for_execution(function_execution_context, all_annotations)
|
||||
|
||||
return ContextSet.from_sets(
|
||||
ann.define_generics(type_var_dict)
|
||||
if isinstance(ann, (AbstractAnnotatedClass, TypeVar)) else ContextSet({ann})
|
||||
for ann in annotation_contexts
|
||||
).execute_annotation()
|
||||
|
||||
|
||||
def infer_type_vars_for_execution(execution_context, annotation_dict):
|
||||
"""
|
||||
Some functions use type vars that are not defined by the class, but rather
|
||||
only defined in the function. See for example `iter`. In those cases we
|
||||
want to:
|
||||
|
||||
1. Search for undefined type vars.
|
||||
2. Infer type vars with the execution state we have.
|
||||
3. Return the union of all type vars that have been found.
|
||||
"""
|
||||
context = execution_context.function_context.get_default_param_context()
|
||||
|
||||
annotation_variable_results = {}
|
||||
executed_params, _ = execution_context.get_executed_params_and_issues()
|
||||
for executed_param in executed_params:
|
||||
try:
|
||||
annotation_node = annotation_dict[executed_param.string_name]
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
annotation_variables = find_unknown_type_vars(context, annotation_node)
|
||||
if annotation_variables:
|
||||
# Infer unknown type var
|
||||
annotation_context_set = context.eval_node(annotation_node)
|
||||
star_count = executed_param._param_node.star_count
|
||||
actual_context_set = executed_param.infer(use_hints=False)
|
||||
if star_count == 1:
|
||||
actual_context_set = actual_context_set.merge_types_of_iterate()
|
||||
elif star_count == 2:
|
||||
# TODO _dict_values is not public.
|
||||
actual_context_set = actual_context_set.try_merge('_dict_values')
|
||||
for ann in annotation_context_set:
|
||||
_merge_type_var_dicts(
|
||||
annotation_variable_results,
|
||||
_infer_type_vars(ann, actual_context_set),
|
||||
)
|
||||
|
||||
return annotation_variable_results
|
||||
|
||||
|
||||
def _merge_type_var_dicts(base_dict, new_dict):
|
||||
for type_var_name, contexts in new_dict.items():
|
||||
try:
|
||||
base_dict[type_var_name] |= contexts
|
||||
except KeyError:
|
||||
base_dict[type_var_name] = contexts
|
||||
|
||||
|
||||
def _infer_type_vars(annotation_context, context_set):
|
||||
"""
|
||||
This function tries to find information about undefined type vars and
|
||||
returns a dict from type var name to context set.
|
||||
|
||||
This is for example important to understand what `iter([1])` returns.
|
||||
According to typeshed, `iter` returns an `Iterator[_T]`:
|
||||
|
||||
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
|
||||
This functions would generate `int` for `_T` in this case, because it
|
||||
unpacks the `Iterable`.
|
||||
"""
|
||||
type_var_dict = {}
|
||||
if isinstance(annotation_context, TypeVar):
|
||||
return {annotation_context.py__name__(): context_set.py__class__()}
|
||||
elif isinstance(annotation_context, LazyGenericClass):
|
||||
name = annotation_context.py__name__()
|
||||
if name == 'Iterable':
|
||||
given = annotation_context.get_generics()
|
||||
if given:
|
||||
for nested_annotation_context in given[0]:
|
||||
_merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
_infer_type_vars(
|
||||
nested_annotation_context,
|
||||
context_set.merge_types_of_iterate()
|
||||
)
|
||||
)
|
||||
elif name == 'Mapping':
|
||||
given = annotation_context.get_generics()
|
||||
if len(given) == 2:
|
||||
for context in context_set:
|
||||
try:
|
||||
method = context.get_mapping_item_contexts
|
||||
except AttributeError:
|
||||
continue
|
||||
key_contexts, value_contexts = method()
|
||||
|
||||
for nested_annotation_context in given[0]:
|
||||
_merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
_infer_type_vars(
|
||||
nested_annotation_context,
|
||||
key_contexts,
|
||||
)
|
||||
)
|
||||
for nested_annotation_context in given[1]:
|
||||
_merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
_infer_type_vars(
|
||||
nested_annotation_context,
|
||||
value_contexts,
|
||||
)
|
||||
)
|
||||
return type_var_dict
|
||||
|
||||
|
||||
def find_type_from_comment_hint_for(context, node, name):
|
||||
return _find_type_from_comment_hint(context, node, node.children[1], name)
|
||||
|
||||
|
||||
def find_type_from_comment_hint_with(context, node, name):
|
||||
assert len(node.children[1].children) == 3, \
|
||||
"Can only be here when children[1] is 'foo() as f'"
|
||||
varlist = node.children[1].children[2]
|
||||
return _find_type_from_comment_hint(context, node, varlist, name)
|
||||
|
||||
|
||||
def find_type_from_comment_hint_assign(context, node, name):
|
||||
return _find_type_from_comment_hint(context, node, node.children[0], name)
|
||||
|
||||
|
||||
def _find_type_from_comment_hint(context, node, varlist, name):
|
||||
index = None
|
||||
if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
|
||||
# something like "a, b = 1, 2"
|
||||
index = 0
|
||||
for child in varlist.children:
|
||||
if child == name:
|
||||
break
|
||||
if child.type == "operator":
|
||||
continue
|
||||
index += 1
|
||||
else:
|
||||
return []
|
||||
|
||||
comment = parser_utils.get_following_comment_same_line(node)
|
||||
if comment is None:
|
||||
return []
|
||||
match = re.match(r"^#\s*type:\s*([^#]*)", comment)
|
||||
if match is None:
|
||||
return []
|
||||
return _evaluate_annotation_string(
|
||||
context, match.group(1).strip(), index
|
||||
).execute_annotation()
|
||||
|
||||
|
||||
def find_unknown_type_vars(context, node):
|
||||
def check_node(node):
|
||||
if node.type in ('atom_expr', 'power'):
|
||||
trailer = node.children[-1]
|
||||
if trailer.type == 'trailer' and trailer.children[0] == '[':
|
||||
for subscript_node in _unpack_subscriptlist(trailer.children[1]):
|
||||
check_node(subscript_node)
|
||||
else:
|
||||
type_var_set = context.eval_node(node)
|
||||
for type_var in type_var_set:
|
||||
if isinstance(type_var, TypeVar) and type_var not in found:
|
||||
found.append(type_var)
|
||||
|
||||
found = [] # We're not using a set, because the order matters.
|
||||
check_node(node)
|
||||
return found
|
||||
|
||||
|
||||
def _unpack_subscriptlist(subscriptlist):
|
||||
if subscriptlist.type == 'subscriptlist':
|
||||
for subscript in subscriptlist.children[::2]:
|
||||
if subscript.type != 'subscript':
|
||||
yield subscript
|
||||
else:
|
||||
if subscriptlist.type != 'subscript':
|
||||
yield subscriptlist
|
||||
198
jedi/evaluate/gradual/conversion.py
Normal file
198
jedi/evaluate/gradual/conversion.py
Normal file
@@ -0,0 +1,198 @@
|
||||
from jedi import debug
|
||||
from jedi.evaluate.base_context import ContextSet, \
|
||||
NO_CONTEXTS
|
||||
from jedi.evaluate.utils import to_list
|
||||
from jedi.evaluate.gradual.stub_context import StubModuleContext
|
||||
|
||||
|
||||
def _stub_to_python_context_set(stub_context, ignore_compiled=False):
|
||||
stub_module = stub_context.get_root_context()
|
||||
if not stub_module.is_stub():
|
||||
return ContextSet([stub_context])
|
||||
|
||||
was_instance = stub_context.is_instance()
|
||||
if was_instance:
|
||||
stub_context = stub_context.py__class__()
|
||||
|
||||
qualified_names = stub_context.get_qualified_names()
|
||||
if qualified_names is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
was_bound_method = stub_context.is_bound_method()
|
||||
if was_bound_method:
|
||||
# Infer the object first. We can infer the method later.
|
||||
method_name = qualified_names[-1]
|
||||
qualified_names = qualified_names[:-1]
|
||||
was_instance = True
|
||||
|
||||
contexts = _infer_from_stub(stub_module, qualified_names, ignore_compiled)
|
||||
if was_instance:
|
||||
contexts = ContextSet.from_sets(
|
||||
c.execute_evaluated()
|
||||
for c in contexts
|
||||
if c.is_class()
|
||||
)
|
||||
if was_bound_method:
|
||||
# Now that the instance has been properly created, we can simply get
|
||||
# the method.
|
||||
contexts = contexts.py__getattribute__(method_name)
|
||||
return contexts
|
||||
|
||||
|
||||
def _infer_from_stub(stub_module, qualified_names, ignore_compiled):
|
||||
assert isinstance(stub_module, StubModuleContext), stub_module
|
||||
non_stubs = stub_module.non_stub_context_set
|
||||
if ignore_compiled:
|
||||
non_stubs = non_stubs.filter(lambda c: not c.is_compiled())
|
||||
for name in qualified_names:
|
||||
non_stubs = non_stubs.py__getattribute__(name)
|
||||
return non_stubs
|
||||
|
||||
|
||||
@to_list
|
||||
def _try_stub_to_python_names(names, prefer_stub_to_compiled=False):
|
||||
for name in names:
|
||||
module = name.get_root_context()
|
||||
if not module.is_stub():
|
||||
yield name
|
||||
continue
|
||||
|
||||
name_list = name.get_qualified_names()
|
||||
if name_list is None:
|
||||
contexts = NO_CONTEXTS
|
||||
else:
|
||||
contexts = _infer_from_stub(
|
||||
module,
|
||||
name_list[:-1],
|
||||
ignore_compiled=prefer_stub_to_compiled,
|
||||
)
|
||||
if contexts and name_list:
|
||||
new_names = contexts.py__getattribute__(name_list[-1], is_goto=True)
|
||||
for new_name in new_names:
|
||||
yield new_name
|
||||
if new_names:
|
||||
continue
|
||||
elif contexts:
|
||||
for c in contexts:
|
||||
yield c.name
|
||||
continue
|
||||
# This is the part where if we haven't found anything, just return the
|
||||
# stub name.
|
||||
yield name
|
||||
|
||||
|
||||
def _load_stub_module(module):
|
||||
if module.is_stub():
|
||||
return module
|
||||
from jedi.evaluate.gradual.typeshed import _try_to_load_stub_cached
|
||||
return _try_to_load_stub_cached(
|
||||
module.evaluator,
|
||||
import_names=module.string_names,
|
||||
python_context_set=ContextSet([module]),
|
||||
parent_module_context=None,
|
||||
sys_path=module.evaluator.get_sys_path(),
|
||||
)
|
||||
|
||||
|
||||
@to_list
|
||||
def _python_to_stub_names(names, fallback_to_python=False):
|
||||
for name in names:
|
||||
module = name.get_root_context()
|
||||
if module.is_stub():
|
||||
yield name
|
||||
continue
|
||||
|
||||
if name.is_import():
|
||||
for new_name in name.goto():
|
||||
# Imports don't need to be converted, because they are already
|
||||
# stubs if possible.
|
||||
if fallback_to_python or new_name.is_stub():
|
||||
yield new_name
|
||||
continue
|
||||
|
||||
name_list = name.get_qualified_names()
|
||||
stubs = NO_CONTEXTS
|
||||
if name_list is not None:
|
||||
stub_module = _load_stub_module(module)
|
||||
if stub_module is not None:
|
||||
stubs = ContextSet({stub_module})
|
||||
for name in name_list[:-1]:
|
||||
stubs = stubs.py__getattribute__(name)
|
||||
if stubs and name_list:
|
||||
new_names = stubs.py__getattribute__(name_list[-1], is_goto=True)
|
||||
for new_name in new_names:
|
||||
yield new_name
|
||||
if new_names:
|
||||
continue
|
||||
elif stubs:
|
||||
for c in stubs:
|
||||
yield c.name
|
||||
continue
|
||||
if fallback_to_python:
|
||||
# This is the part where if we haven't found anything, just return
|
||||
# the stub name.
|
||||
yield name
|
||||
|
||||
|
||||
def convert_names(names, only_stubs=False, prefer_stubs=False):
|
||||
assert not (only_stubs and prefer_stubs)
|
||||
with debug.increase_indent_cm('convert names'):
|
||||
if only_stubs or prefer_stubs:
|
||||
return _python_to_stub_names(names, fallback_to_python=prefer_stubs)
|
||||
else:
|
||||
return _try_stub_to_python_names(names, prefer_stub_to_compiled=True)
|
||||
|
||||
|
||||
def convert_contexts(contexts, only_stubs=False, prefer_stubs=False, ignore_compiled=True):
|
||||
assert not (only_stubs and prefer_stubs)
|
||||
with debug.increase_indent_cm('convert contexts'):
|
||||
if only_stubs or prefer_stubs:
|
||||
return ContextSet.from_sets(
|
||||
to_stub(context)
|
||||
or (ContextSet({context}) if prefer_stubs else NO_CONTEXTS)
|
||||
for context in contexts
|
||||
)
|
||||
else:
|
||||
return ContextSet.from_sets(
|
||||
_stub_to_python_context_set(stub_context, ignore_compiled=ignore_compiled)
|
||||
or ContextSet({stub_context})
|
||||
for stub_context in contexts
|
||||
)
|
||||
|
||||
|
||||
# TODO merge with _python_to_stub_names?
|
||||
def to_stub(context):
|
||||
if context.is_stub():
|
||||
return ContextSet([context])
|
||||
|
||||
was_instance = context.is_instance()
|
||||
if was_instance:
|
||||
context = context.py__class__()
|
||||
|
||||
qualified_names = context.get_qualified_names()
|
||||
stub_module = _load_stub_module(context.get_root_context())
|
||||
if stub_module is None or qualified_names is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
was_bound_method = context.is_bound_method()
|
||||
if was_bound_method:
|
||||
# Infer the object first. We can infer the method later.
|
||||
method_name = qualified_names[-1]
|
||||
qualified_names = qualified_names[:-1]
|
||||
was_instance = True
|
||||
|
||||
stub_contexts = ContextSet([stub_module])
|
||||
for name in qualified_names:
|
||||
stub_contexts = stub_contexts.py__getattribute__(name)
|
||||
|
||||
if was_instance:
|
||||
stub_contexts = ContextSet.from_sets(
|
||||
c.execute_evaluated()
|
||||
for c in stub_contexts
|
||||
if c.is_class()
|
||||
)
|
||||
if was_bound_method:
|
||||
# Now that the instance has been properly created, we can simply get
|
||||
# the method.
|
||||
stub_contexts = stub_contexts.py__getattribute__(method_name)
|
||||
return stub_contexts
|
||||
105
jedi/evaluate/gradual/stub_context.py
Normal file
105
jedi/evaluate/gradual/stub_context.py
Normal file
@@ -0,0 +1,105 @@
|
||||
from jedi.evaluate.base_context import ContextWrapper
|
||||
from jedi.evaluate.context.module import ModuleContext
|
||||
from jedi.evaluate.filters import ParserTreeFilter, \
|
||||
TreeNameDefinition
|
||||
from jedi.evaluate.gradual.typing import TypingModuleFilterWrapper
|
||||
|
||||
|
||||
class StubModuleContext(ModuleContext):
|
||||
def __init__(self, non_stub_context_set, *args, **kwargs):
|
||||
super(StubModuleContext, self).__init__(*args, **kwargs)
|
||||
self.non_stub_context_set = non_stub_context_set
|
||||
|
||||
def is_stub(self):
|
||||
return True
|
||||
|
||||
def sub_modules_dict(self):
|
||||
"""
|
||||
We have to overwrite this, because it's possible to have stubs that
|
||||
don't have code for all the child modules. At the time of writing this
|
||||
there are for example no stubs for `json.tool`.
|
||||
"""
|
||||
names = {}
|
||||
for context in self.non_stub_context_set:
|
||||
try:
|
||||
method = context.sub_modules_dict
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
names.update(method())
|
||||
names.update(super(StubModuleContext, self).sub_modules_dict())
|
||||
return names
|
||||
|
||||
def _get_first_non_stub_filters(self):
|
||||
for context in self.non_stub_context_set:
|
||||
yield next(context.get_filters(search_global=False))
|
||||
|
||||
def _get_stub_filters(self, search_global, **filter_kwargs):
|
||||
return [StubFilter(
|
||||
self.evaluator,
|
||||
context=self,
|
||||
search_global=search_global,
|
||||
**filter_kwargs
|
||||
)] + list(self.iter_star_filters(search_global=search_global))
|
||||
|
||||
def get_filters(self, search_global=False, until_position=None,
|
||||
origin_scope=None, **kwargs):
|
||||
filters = super(StubModuleContext, self).get_filters(
|
||||
search_global, until_position, origin_scope, **kwargs
|
||||
)
|
||||
next(filters) # Ignore the first filter and replace it with our own
|
||||
stub_filters = self._get_stub_filters(
|
||||
search_global=search_global,
|
||||
until_position=until_position,
|
||||
origin_scope=origin_scope,
|
||||
)
|
||||
for f in stub_filters:
|
||||
yield f
|
||||
|
||||
for f in filters:
|
||||
yield f
|
||||
|
||||
|
||||
class TypingModuleWrapper(StubModuleContext):
|
||||
def get_filters(self, *args, **kwargs):
|
||||
filters = super(TypingModuleWrapper, self).get_filters(*args, **kwargs)
|
||||
yield TypingModuleFilterWrapper(next(filters))
|
||||
for f in filters:
|
||||
yield f
|
||||
|
||||
|
||||
# From here on down we make looking up the sys.version_info fast.
|
||||
class _StubName(TreeNameDefinition):
|
||||
def infer(self):
|
||||
inferred = super(_StubName, self).infer()
|
||||
if self.string_name == 'version_info' and self.get_root_context().py__name__() == 'sys':
|
||||
return [VersionInfo(c) for c in inferred]
|
||||
return inferred
|
||||
|
||||
|
||||
class StubFilter(ParserTreeFilter):
|
||||
name_class = _StubName
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._search_global = kwargs.pop('search_global') # Python 2 :/
|
||||
super(StubFilter, self).__init__(*args, **kwargs)
|
||||
|
||||
def _is_name_reachable(self, name):
|
||||
if not super(StubFilter, self)._is_name_reachable(name):
|
||||
return False
|
||||
|
||||
if not self._search_global:
|
||||
# Imports in stub files are only public if they have an "as"
|
||||
# export.
|
||||
definition = name.get_definition()
|
||||
if definition.type in ('import_from', 'import_name'):
|
||||
if name.parent.type not in ('import_as_name', 'dotted_as_name'):
|
||||
return False
|
||||
n = name.value
|
||||
if n.startswith('_') and not (n.startswith('__') and n.endswith('__')):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class VersionInfo(ContextWrapper):
|
||||
pass
|
||||
287
jedi/evaluate/gradual/typeshed.py
Normal file
287
jedi/evaluate/gradual/typeshed.py
Normal file
@@ -0,0 +1,287 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
from jedi.file_io import FileIO
|
||||
from jedi._compatibility import FileNotFoundError, cast_path
|
||||
from jedi.parser_utils import get_cached_code_lines
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
||||
from jedi.evaluate.gradual.stub_context import TypingModuleWrapper, StubModuleContext
|
||||
|
||||
_jedi_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
TYPESHED_PATH = os.path.join(_jedi_path, 'third_party', 'typeshed')
|
||||
|
||||
_IMPORT_MAP = dict(
|
||||
_collections='collections',
|
||||
_socket='socket',
|
||||
)
|
||||
|
||||
|
||||
def _merge_create_stub_map(directories):
|
||||
map_ = {}
|
||||
for directory in directories:
|
||||
map_.update(_create_stub_map(directory))
|
||||
return map_
|
||||
|
||||
|
||||
def _create_stub_map(directory):
|
||||
"""
|
||||
Create a mapping of an importable name in Python to a stub file.
|
||||
"""
|
||||
def generate():
|
||||
try:
|
||||
listed = os.listdir(directory)
|
||||
except (FileNotFoundError, OSError):
|
||||
# OSError is Python 2
|
||||
return
|
||||
|
||||
for entry in listed:
|
||||
entry = cast_path(entry)
|
||||
path = os.path.join(directory, entry)
|
||||
if os.path.isdir(path):
|
||||
init = os.path.join(path, '__init__.pyi')
|
||||
if os.path.isfile(init):
|
||||
yield entry, init
|
||||
elif entry.endswith('.pyi') and os.path.isfile(path):
|
||||
name = entry.rstrip('.pyi')
|
||||
if name != '__init__':
|
||||
yield name, path
|
||||
|
||||
# Create a dictionary from the tuple generator.
|
||||
return dict(generate())
|
||||
|
||||
|
||||
def _get_typeshed_directories(version_info):
|
||||
check_version_list = ['2and3', str(version_info.major)]
|
||||
for base in ['stdlib', 'third_party']:
|
||||
base = os.path.join(TYPESHED_PATH, base)
|
||||
base_list = os.listdir(base)
|
||||
for base_list_entry in base_list:
|
||||
match = re.match(r'(\d+)\.(\d+)$', base_list_entry)
|
||||
if match is not None:
|
||||
if int(match.group(1)) == version_info.major \
|
||||
and int(match.group(2)) <= version_info.minor:
|
||||
check_version_list.append(base_list_entry)
|
||||
|
||||
for check_version in check_version_list:
|
||||
yield os.path.join(base, check_version)
|
||||
|
||||
|
||||
_version_cache = {}
|
||||
|
||||
|
||||
def _cache_stub_file_map(version_info):
|
||||
"""
|
||||
Returns a map of an importable name in Python to a stub file.
|
||||
"""
|
||||
# TODO this caches the stub files indefinitely, maybe use a time cache
|
||||
# for that?
|
||||
version = version_info[:2]
|
||||
try:
|
||||
return _version_cache[version]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
_version_cache[version] = file_set = \
|
||||
_merge_create_stub_map(_get_typeshed_directories(version_info))
|
||||
return file_set
|
||||
|
||||
|
||||
def import_module_decorator(func):
|
||||
def wrapper(evaluator, import_names, parent_module_context, sys_path, prefer_stubs):
|
||||
try:
|
||||
python_context_set = evaluator.module_cache.get(import_names)
|
||||
except KeyError:
|
||||
if parent_module_context is not None and parent_module_context.is_stub():
|
||||
parent_module_contexts = parent_module_context.non_stub_context_set
|
||||
else:
|
||||
parent_module_contexts = [parent_module_context]
|
||||
if import_names == ('os', 'path'):
|
||||
# This is a huge exception, we follow a nested import
|
||||
# ``os.path``, because it's a very important one in Python
|
||||
# that is being achieved by messing with ``sys.modules`` in
|
||||
# ``os``.
|
||||
python_parent = next(iter(parent_module_contexts))
|
||||
if python_parent is None:
|
||||
python_parent, = evaluator.import_module(('os',), prefer_stubs=False)
|
||||
python_context_set = python_parent.py__getattribute__('path')
|
||||
else:
|
||||
python_context_set = ContextSet.from_sets(
|
||||
func(evaluator, import_names, p, sys_path,)
|
||||
for p in parent_module_contexts
|
||||
)
|
||||
evaluator.module_cache.add(import_names, python_context_set)
|
||||
|
||||
if not prefer_stubs:
|
||||
return python_context_set
|
||||
|
||||
stub = _try_to_load_stub_cached(evaluator, import_names, python_context_set,
|
||||
parent_module_context, sys_path)
|
||||
if stub is not None:
|
||||
return ContextSet([stub])
|
||||
return python_context_set
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def _try_to_load_stub_cached(evaluator, import_names, *args, **kwargs):
|
||||
try:
|
||||
return evaluator.stub_module_cache[import_names]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# TODO is this needed? where are the exceptions coming from that make this
|
||||
# necessary? Just remove this line.
|
||||
evaluator.stub_module_cache[import_names] = None
|
||||
evaluator.stub_module_cache[import_names] = result = \
|
||||
_try_to_load_stub(evaluator, import_names, *args, **kwargs)
|
||||
return result
|
||||
|
||||
|
||||
def _try_to_load_stub(evaluator, import_names, python_context_set,
|
||||
parent_module_context, sys_path):
|
||||
"""
|
||||
Trying to load a stub for a set of import_names.
|
||||
|
||||
This is modelled to work like "PEP 561 -- Distributing and Packaging Type
|
||||
Information", see https://www.python.org/dev/peps/pep-0561.
|
||||
"""
|
||||
if parent_module_context is None and len(import_names) > 1:
|
||||
try:
|
||||
parent_module_context = _try_to_load_stub_cached(
|
||||
evaluator, import_names[:-1], NO_CONTEXTS,
|
||||
parent_module_context=None, sys_path=sys_path)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# 1. Try to load foo-stubs folders on path for import name foo.
|
||||
if len(import_names) == 1:
|
||||
# foo-stubs
|
||||
for p in sys_path:
|
||||
init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi'
|
||||
m = _try_to_load_stub_from_file(
|
||||
evaluator,
|
||||
python_context_set,
|
||||
file_io=FileIO(init),
|
||||
import_names=import_names,
|
||||
)
|
||||
if m is not None:
|
||||
return m
|
||||
|
||||
# 2. Try to load pyi files next to py files.
|
||||
for c in python_context_set:
|
||||
try:
|
||||
method = c.py__file__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
file_path = method()
|
||||
file_paths = []
|
||||
if c.is_namespace():
|
||||
file_paths = [os.path.join(p, '__init__.pyi') for p in c.py__path__()]
|
||||
elif file_path is not None and file_path.endswith('.py'):
|
||||
file_paths = [file_path + 'i']
|
||||
|
||||
for file_path in file_paths:
|
||||
m = _try_to_load_stub_from_file(
|
||||
evaluator,
|
||||
python_context_set,
|
||||
# The file path should end with .pyi
|
||||
file_io=FileIO(file_path),
|
||||
import_names=import_names,
|
||||
)
|
||||
if m is not None:
|
||||
return m
|
||||
|
||||
# 3. Try to load typeshed
|
||||
m = _load_from_typeshed(evaluator, python_context_set, parent_module_context, import_names)
|
||||
if m is not None:
|
||||
return m
|
||||
|
||||
# 4. Try to load pyi file somewhere if python_context_set was not defined.
|
||||
if not python_context_set:
|
||||
if parent_module_context is not None:
|
||||
try:
|
||||
method = parent_module_context.py__path__
|
||||
except AttributeError:
|
||||
check_path = []
|
||||
else:
|
||||
check_path = method()
|
||||
# In case import_names
|
||||
names_for_path = (import_names[-1],)
|
||||
else:
|
||||
check_path = sys_path
|
||||
names_for_path = import_names
|
||||
|
||||
for p in check_path:
|
||||
m = _try_to_load_stub_from_file(
|
||||
evaluator,
|
||||
python_context_set,
|
||||
file_io=FileIO(os.path.join(p, *names_for_path) + '.pyi'),
|
||||
import_names=import_names,
|
||||
)
|
||||
if m is not None:
|
||||
return m
|
||||
|
||||
# If no stub is found, that's fine, the calling function has to deal with
|
||||
# it.
|
||||
return None
|
||||
|
||||
|
||||
def _load_from_typeshed(evaluator, python_context_set, parent_module_context, import_names):
|
||||
import_name = import_names[-1]
|
||||
map_ = None
|
||||
if len(import_names) == 1:
|
||||
map_ = _cache_stub_file_map(evaluator.grammar.version_info)
|
||||
import_name = _IMPORT_MAP.get(import_name, import_name)
|
||||
elif isinstance(parent_module_context, StubModuleContext):
|
||||
if not parent_module_context.is_package:
|
||||
# Only if it's a package (= a folder) something can be
|
||||
# imported.
|
||||
return None
|
||||
path = parent_module_context.py__path__()
|
||||
map_ = _merge_create_stub_map(path)
|
||||
|
||||
if map_ is not None:
|
||||
path = map_.get(import_name)
|
||||
if path is not None:
|
||||
return _try_to_load_stub_from_file(
|
||||
evaluator,
|
||||
python_context_set,
|
||||
file_io=FileIO(path),
|
||||
import_names=import_names,
|
||||
)
|
||||
|
||||
|
||||
def _try_to_load_stub_from_file(evaluator, python_context_set, file_io, import_names):
|
||||
try:
|
||||
stub_module_node = evaluator.parse(
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
use_latest_grammar=True
|
||||
)
|
||||
except (OSError, IOError): # IOError is Python 2 only
|
||||
# The file that you're looking for doesn't exist (anymore).
|
||||
return None
|
||||
else:
|
||||
return create_stub_module(
|
||||
evaluator, python_context_set, stub_module_node, file_io,
|
||||
import_names
|
||||
)
|
||||
|
||||
|
||||
def create_stub_module(evaluator, python_context_set, stub_module_node, file_io, import_names):
|
||||
if import_names == ('typing',):
|
||||
module_cls = TypingModuleWrapper
|
||||
else:
|
||||
module_cls = StubModuleContext
|
||||
file_name = os.path.basename(file_io.path)
|
||||
stub_module_context = module_cls(
|
||||
python_context_set, evaluator, stub_module_node,
|
||||
file_io=file_io,
|
||||
string_names=import_names,
|
||||
# The code was loaded with latest_grammar, so use
|
||||
# that.
|
||||
code_lines=get_cached_code_lines(evaluator.latest_grammar, file_io.path),
|
||||
is_package=file_name == '__init__.pyi',
|
||||
)
|
||||
return stub_module_context
|
||||
708
jedi/evaluate/gradual/typing.py
Normal file
708
jedi/evaluate/gradual/typing.py
Normal file
@@ -0,0 +1,708 @@
|
||||
"""
|
||||
We need to somehow work with the typing objects. Since the typing objects are
|
||||
pretty bare we need to add all the Jedi customizations to make them work as
|
||||
contexts.
|
||||
|
||||
This file deals with all the typing.py cases.
|
||||
"""
|
||||
from jedi._compatibility import unicode, force_unicode
|
||||
from jedi import debug
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate.compiled import builtin_from_name
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context, \
|
||||
iterator_to_context_set, HelperContextMixin, ContextWrapper
|
||||
from jedi.evaluate.lazy_context import LazyKnownContexts
|
||||
from jedi.evaluate.context.iterable import SequenceLiteralContext
|
||||
from jedi.evaluate.arguments import repack_with_argument_clinic
|
||||
from jedi.evaluate.utils import to_list
|
||||
from jedi.evaluate.filters import FilterWrapper
|
||||
from jedi.evaluate.names import NameWrapper, AbstractTreeName, \
|
||||
AbstractNameDefinition, ContextName
|
||||
from jedi.evaluate.helpers import is_string
|
||||
from jedi.evaluate.context.klass import ClassMixin, ClassFilter
|
||||
|
||||
_PROXY_CLASS_TYPES = 'Tuple Generic Protocol Callable Type'.split()
|
||||
_TYPE_ALIAS_TYPES = {
|
||||
'List': 'builtins.list',
|
||||
'Dict': 'builtins.dict',
|
||||
'Set': 'builtins.set',
|
||||
'FrozenSet': 'builtins.frozenset',
|
||||
'ChainMap': 'collections.ChainMap',
|
||||
'Counter': 'collections.Counter',
|
||||
'DefaultDict': 'collections.defaultdict',
|
||||
'Deque': 'collections.deque',
|
||||
}
|
||||
_PROXY_TYPES = 'Optional Union ClassVar'.split()
|
||||
|
||||
|
||||
class TypingName(AbstractTreeName):
|
||||
def __init__(self, context, other_name):
|
||||
super(TypingName, self).__init__(context.parent_context, other_name.tree_name)
|
||||
self._context = context
|
||||
|
||||
def infer(self):
|
||||
return ContextSet([self._context])
|
||||
|
||||
|
||||
class _BaseTypingContext(Context):
|
||||
def __init__(self, evaluator, parent_context, tree_name):
|
||||
super(_BaseTypingContext, self).__init__(evaluator, parent_context)
|
||||
self._tree_name = tree_name
|
||||
|
||||
@property
|
||||
def tree_node(self):
|
||||
return self._tree_name
|
||||
|
||||
def get_filters(self, *args, **kwargs):
|
||||
# TODO this is obviously wrong. Is it though?
|
||||
class EmptyFilter(ClassFilter):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get(self, name, **kwargs):
|
||||
return []
|
||||
|
||||
def values(self, **kwargs):
|
||||
return []
|
||||
|
||||
yield EmptyFilter()
|
||||
|
||||
def py__class__(self):
|
||||
# TODO this is obviously not correct, but at least gives us a class if
|
||||
# we have none. Some of these objects don't really have a base class in
|
||||
# typeshed.
|
||||
return builtin_from_name(self.evaluator, u'object')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return ContextName(self, self._tree_name)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)
|
||||
|
||||
|
||||
class TypingModuleName(NameWrapper):
|
||||
def infer(self):
|
||||
return ContextSet(self._remap())
|
||||
|
||||
def _remap(self):
|
||||
name = self.string_name
|
||||
evaluator = self.parent_context.evaluator
|
||||
try:
|
||||
actual = _TYPE_ALIAS_TYPES[name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
yield TypeAlias.create_cached(evaluator, self.parent_context, self.tree_name, actual)
|
||||
return
|
||||
|
||||
if name in _PROXY_CLASS_TYPES:
|
||||
yield TypingClassContext.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
elif name in _PROXY_TYPES:
|
||||
yield TypingContext.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
elif name == 'runtime':
|
||||
# We don't want anything here, not sure what this function is
|
||||
# supposed to do, since it just appears in the stubs and shouldn't
|
||||
# have any effects there (because it's never executed).
|
||||
return
|
||||
elif name == 'TypeVar':
|
||||
yield TypeVarClass.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
elif name == 'Any':
|
||||
yield Any.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
elif name == 'TYPE_CHECKING':
|
||||
# This is needed for e.g. imports that are only available for type
|
||||
# checking or are in cycles. The user can then check this variable.
|
||||
yield builtin_from_name(evaluator, u'True')
|
||||
elif name == 'overload':
|
||||
yield OverloadFunction.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
elif name == 'NewType':
|
||||
yield NewTypeFunction.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
elif name == 'cast':
|
||||
# TODO implement cast
|
||||
yield CastFunction.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
elif name == 'TypedDict':
|
||||
# TODO doesn't even exist in typeshed/typing.py, yet. But will be
|
||||
# added soon.
|
||||
pass
|
||||
elif name in ('no_type_check', 'no_type_check_decorator'):
|
||||
# This is not necessary, as long as we are not doing type checking.
|
||||
for c in self._wrapped_name.infer(): # Fuck my life Python 2
|
||||
yield c
|
||||
else:
|
||||
# Everything else shouldn't be relevant for type checking.
|
||||
for c in self._wrapped_name.infer(): # Fuck my life Python 2
|
||||
yield c
|
||||
|
||||
|
||||
class TypingModuleFilterWrapper(FilterWrapper):
|
||||
name_wrapper_class = TypingModuleName
|
||||
|
||||
|
||||
class _WithIndexBase(_BaseTypingContext):
|
||||
def __init__(self, evaluator, parent_context, name, index_context, context_of_index):
|
||||
super(_WithIndexBase, self).__init__(evaluator, parent_context, name)
|
||||
self._index_context = index_context
|
||||
self._context_of_index = context_of_index
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s[%s]>' % (
|
||||
self.__class__.__name__,
|
||||
self._tree_name.value,
|
||||
self._index_context,
|
||||
)
|
||||
|
||||
|
||||
class TypingContextWithIndex(_WithIndexBase):
|
||||
def execute_annotation(self):
|
||||
string_name = self._tree_name.value
|
||||
|
||||
if string_name == 'Union':
|
||||
# This is kind of a special case, because we have Unions (in Jedi
|
||||
# ContextSets).
|
||||
return self.gather_annotation_classes().execute_annotation()
|
||||
elif string_name == 'Optional':
|
||||
# Optional is basically just saying it's either None or the actual
|
||||
# type.
|
||||
return self.gather_annotation_classes().execute_annotation() \
|
||||
| ContextSet([builtin_from_name(self.evaluator, u'None')])
|
||||
elif string_name == 'Type':
|
||||
# The type is actually already given in the index_context
|
||||
return ContextSet([self._index_context])
|
||||
elif string_name == 'ClassVar':
|
||||
# For now don't do anything here, ClassVars are always used.
|
||||
return self._index_context.execute_annotation()
|
||||
|
||||
cls = globals()[string_name]
|
||||
return ContextSet([cls(
|
||||
self.evaluator,
|
||||
self.parent_context,
|
||||
self._tree_name,
|
||||
self._index_context,
|
||||
self._context_of_index
|
||||
)])
|
||||
|
||||
def gather_annotation_classes(self):
|
||||
return ContextSet.from_sets(
|
||||
_iter_over_arguments(self._index_context, self._context_of_index)
|
||||
)
|
||||
|
||||
|
||||
class TypingContext(_BaseTypingContext):
|
||||
index_class = TypingContextWithIndex
|
||||
py__simple_getitem__ = None
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
return ContextSet(
|
||||
self.index_class.create_cached(
|
||||
self.evaluator,
|
||||
self.parent_context,
|
||||
self._tree_name,
|
||||
index_context,
|
||||
context_of_index=contextualized_node.context)
|
||||
for index_context in index_context_set
|
||||
)
|
||||
|
||||
|
||||
class _TypingClassMixin(object):
|
||||
def py__bases__(self):
|
||||
return [LazyKnownContexts(
|
||||
self.evaluator.builtins_module.py__getattribute__('object')
|
||||
)]
|
||||
|
||||
|
||||
class TypingClassContextWithIndex(_TypingClassMixin, TypingContextWithIndex, ClassMixin):
|
||||
pass
|
||||
|
||||
|
||||
class TypingClassContext(_TypingClassMixin, TypingContext, ClassMixin):
|
||||
index_class = TypingClassContextWithIndex
|
||||
|
||||
|
||||
def _iter_over_arguments(maybe_tuple_context, defining_context):
|
||||
def iterate():
|
||||
if isinstance(maybe_tuple_context, SequenceLiteralContext):
|
||||
for lazy_context in maybe_tuple_context.py__iter__(contextualized_node=None):
|
||||
yield lazy_context.infer()
|
||||
else:
|
||||
yield ContextSet([maybe_tuple_context])
|
||||
|
||||
def resolve_forward_references(context_set):
|
||||
for context in context_set:
|
||||
if is_string(context):
|
||||
from jedi.evaluate.gradual.annotation import _get_forward_reference_node
|
||||
node = _get_forward_reference_node(defining_context, context.get_safe_value())
|
||||
if node is not None:
|
||||
for c in defining_context.eval_node(node):
|
||||
yield c
|
||||
else:
|
||||
yield context
|
||||
|
||||
for context_set in iterate():
|
||||
yield ContextSet(resolve_forward_references(context_set))
|
||||
|
||||
|
||||
class TypeAlias(HelperContextMixin):
|
||||
def __init__(self, evaluator, parent_context, origin_tree_name, actual):
|
||||
self.evaluator = evaluator
|
||||
self.parent_context = parent_context
|
||||
self._origin_tree_name = origin_tree_name
|
||||
self._actual = actual # e.g. builtins.list
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return ContextName(self, self._origin_tree_name)
|
||||
|
||||
def py__name__(self):
|
||||
return self.name.string_name
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._get_type_alias_class(), name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._actual)
|
||||
|
||||
@evaluator_method_cache()
|
||||
def _get_type_alias_class(self):
|
||||
module_name, class_name = self._actual.split('.')
|
||||
if self.evaluator.environment.version_info.major == 2 and module_name == 'builtins':
|
||||
module_name = '__builtin__'
|
||||
|
||||
# TODO use evaluator.import_module?
|
||||
from jedi.evaluate.imports import Importer
|
||||
module, = Importer(
|
||||
self.evaluator, [module_name], self.evaluator.builtins_module
|
||||
).follow()
|
||||
classes = module.py__getattribute__(class_name)
|
||||
# There should only be one, because it's code that we control.
|
||||
assert len(classes) == 1, classes
|
||||
cls = next(iter(classes))
|
||||
return cls
|
||||
|
||||
|
||||
class _ContainerBase(_WithIndexBase):
|
||||
def _get_getitem_contexts(self, index):
|
||||
args = _iter_over_arguments(self._index_context, self._context_of_index)
|
||||
for i, contexts in enumerate(args):
|
||||
if i == index:
|
||||
return contexts
|
||||
|
||||
debug.warning('No param #%s found for annotation %s', index, self._index_context)
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
class Callable(_ContainerBase):
|
||||
def py__call__(self, arguments):
|
||||
# The 0th index are the arguments.
|
||||
return self._get_getitem_contexts(1).execute_annotation()
|
||||
|
||||
|
||||
class Tuple(_ContainerBase):
|
||||
def _is_homogenous(self):
|
||||
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
|
||||
# is used.
|
||||
if isinstance(self._index_context, SequenceLiteralContext):
|
||||
entries = self._index_context.get_tree_entries()
|
||||
if len(entries) == 2 and entries[1] == '...':
|
||||
return True
|
||||
return False
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
if self._is_homogenous():
|
||||
return self._get_getitem_contexts(0).execute_annotation()
|
||||
else:
|
||||
if isinstance(index, int):
|
||||
return self._get_getitem_contexts(index).execute_annotation()
|
||||
|
||||
debug.dbg('The getitem type on Tuple was %s' % index)
|
||||
return NO_CONTEXTS
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
if self._is_homogenous():
|
||||
yield LazyKnownContexts(self._get_getitem_contexts(0).execute_annotation())
|
||||
else:
|
||||
if isinstance(self._index_context, SequenceLiteralContext):
|
||||
for i in range(self._index_context.py__len__()):
|
||||
yield LazyKnownContexts(self._get_getitem_contexts(i).execute_annotation())
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
if self._is_homogenous():
|
||||
return self._get_getitem_contexts(0).execute_annotation()
|
||||
|
||||
return ContextSet.from_sets(
|
||||
_iter_over_arguments(self._index_context, self._context_of_index)
|
||||
).execute_annotation()
|
||||
|
||||
|
||||
class Generic(_ContainerBase):
|
||||
pass
|
||||
|
||||
|
||||
class Protocol(_ContainerBase):
|
||||
pass
|
||||
|
||||
|
||||
class Any(_BaseTypingContext):
|
||||
def execute_annotation(self):
|
||||
debug.warning('Used Any - returned no results')
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
class TypeVarClass(_BaseTypingContext):
|
||||
def py__call__(self, arguments):
|
||||
unpacked = arguments.unpack()
|
||||
|
||||
key, lazy_context = next(unpacked, (None, None))
|
||||
var_name = self._find_string_name(lazy_context)
|
||||
# The name must be given, otherwise it's useless.
|
||||
if var_name is None or key is not None:
|
||||
debug.warning('Found a variable without a name %s', arguments)
|
||||
return NO_CONTEXTS
|
||||
|
||||
return ContextSet([TypeVar.create_cached(
|
||||
self.evaluator,
|
||||
self.parent_context,
|
||||
self._tree_name,
|
||||
var_name,
|
||||
unpacked
|
||||
)])
|
||||
|
||||
def _find_string_name(self, lazy_context):
|
||||
if lazy_context is None:
|
||||
return None
|
||||
|
||||
context_set = lazy_context.infer()
|
||||
if not context_set:
|
||||
return None
|
||||
if len(context_set) > 1:
|
||||
debug.warning('Found multiple contexts for a type variable: %s', context_set)
|
||||
|
||||
name_context = next(iter(context_set))
|
||||
try:
|
||||
method = name_context.get_safe_value
|
||||
except AttributeError:
|
||||
return None
|
||||
else:
|
||||
safe_value = method(default=None)
|
||||
if self.evaluator.environment.version_info.major == 2:
|
||||
if isinstance(safe_value, bytes):
|
||||
return force_unicode(safe_value)
|
||||
if isinstance(safe_value, (str, unicode)):
|
||||
return safe_value
|
||||
return None
|
||||
|
||||
|
||||
class TypeVar(_BaseTypingContext):
|
||||
def __init__(self, evaluator, parent_context, tree_name, var_name, unpacked_args):
|
||||
super(TypeVar, self).__init__(evaluator, parent_context, tree_name)
|
||||
self._var_name = var_name
|
||||
|
||||
self._constraints_lazy_contexts = []
|
||||
self._bound_lazy_context = None
|
||||
self._covariant_lazy_context = None
|
||||
self._contravariant_lazy_context = None
|
||||
for key, lazy_context in unpacked_args:
|
||||
if key is None:
|
||||
self._constraints_lazy_contexts.append(lazy_context)
|
||||
else:
|
||||
if key == 'bound':
|
||||
self._bound_lazy_context = lazy_context
|
||||
elif key == 'covariant':
|
||||
self._covariant_lazy_context = lazy_context
|
||||
elif key == 'contravariant':
|
||||
self._contra_variant_lazy_context = lazy_context
|
||||
else:
|
||||
debug.warning('Invalid TypeVar param name %s', key)
|
||||
|
||||
def py__name__(self):
|
||||
return self._var_name
|
||||
|
||||
def get_filters(self, *args, **kwargs):
|
||||
return iter([])
|
||||
|
||||
def _get_classes(self):
|
||||
if self._bound_lazy_context is not None:
|
||||
return self._bound_lazy_context.infer()
|
||||
if self._constraints_lazy_contexts:
|
||||
return self.constraints
|
||||
debug.warning('Tried to infer the TypeVar %s without a given type', self._var_name)
|
||||
return NO_CONTEXTS
|
||||
|
||||
def is_same_class(self, other):
|
||||
# Everything can match an undefined type var.
|
||||
return True
|
||||
|
||||
@property
|
||||
def constraints(self):
|
||||
return ContextSet.from_sets(
|
||||
lazy.infer() for lazy in self._constraints_lazy_contexts
|
||||
)
|
||||
|
||||
def define_generics(self, type_var_dict):
|
||||
try:
|
||||
found = type_var_dict[self.py__name__()]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if found:
|
||||
return found
|
||||
return self._get_classes() or ContextSet({self})
|
||||
|
||||
def execute_annotation(self):
|
||||
return self._get_classes().execute_annotation()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.py__name__())
|
||||
|
||||
|
||||
class OverloadFunction(_BaseTypingContext):
|
||||
@repack_with_argument_clinic('func, /')
|
||||
def py__call__(self, func_context_set):
|
||||
# Just pass arguments through.
|
||||
return func_context_set
|
||||
|
||||
|
||||
class NewTypeFunction(_BaseTypingContext):
|
||||
def py__call__(self, arguments):
|
||||
ordered_args = arguments.unpack()
|
||||
next(ordered_args, (None, None))
|
||||
_, second_arg = next(ordered_args, (None, None))
|
||||
if second_arg is None:
|
||||
return NO_CONTEXTS
|
||||
return ContextSet(
|
||||
NewType(
|
||||
self.evaluator,
|
||||
contextualized_node.context,
|
||||
contextualized_node.node,
|
||||
second_arg.infer(),
|
||||
) for contextualized_node in arguments.get_calling_nodes())
|
||||
|
||||
|
||||
class NewType(Context):
|
||||
def __init__(self, evaluator, parent_context, tree_node, type_context_set):
|
||||
super(NewType, self).__init__(evaluator, parent_context)
|
||||
self._type_context_set = type_context_set
|
||||
self.tree_node = tree_node
|
||||
|
||||
def py__call__(self, arguments):
|
||||
return self._type_context_set.execute_annotation()
|
||||
|
||||
|
||||
class CastFunction(_BaseTypingContext):
|
||||
@repack_with_argument_clinic('type, object, /')
|
||||
def py__call__(self, type_context_set, object_context_set):
|
||||
return type_context_set.execute_annotation()
|
||||
|
||||
|
||||
class BoundTypeVarName(AbstractNameDefinition):
|
||||
"""
|
||||
This type var was bound to a certain type, e.g. int.
|
||||
"""
|
||||
def __init__(self, type_var, context_set):
|
||||
self._type_var = type_var
|
||||
self.parent_context = type_var.parent_context
|
||||
self._context_set = context_set
|
||||
|
||||
def infer(self):
|
||||
def iter_():
|
||||
for context in self._context_set:
|
||||
# Replace any with the constraints if they are there.
|
||||
if isinstance(context, Any):
|
||||
for constraint in self._type_var.constraints:
|
||||
yield constraint
|
||||
else:
|
||||
yield context
|
||||
return ContextSet(iter_())
|
||||
|
||||
def py__name__(self):
|
||||
return self._type_var.py__name__()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s -> %s>' % (self.__class__.__name__, self.py__name__(), self._context_set)
|
||||
|
||||
|
||||
class TypeVarFilter(object):
|
||||
"""
|
||||
A filter for all given variables in a class.
|
||||
|
||||
A = TypeVar('A')
|
||||
B = TypeVar('B')
|
||||
class Foo(Mapping[A, B]):
|
||||
...
|
||||
|
||||
In this example we would have two type vars given: A and B
|
||||
"""
|
||||
def __init__(self, generics, type_vars):
|
||||
self._generics = generics
|
||||
self._type_vars = type_vars
|
||||
|
||||
def get(self, name):
|
||||
for i, type_var in enumerate(self._type_vars):
|
||||
if type_var.py__name__() == name:
|
||||
try:
|
||||
return [BoundTypeVarName(type_var, self._generics[i])]
|
||||
except IndexError:
|
||||
return [type_var.name]
|
||||
return []
|
||||
|
||||
def values(self):
|
||||
# The values are not relevant. If it's not searched exactly, the type
|
||||
# vars are just global and should be looked up as that.
|
||||
return []
|
||||
|
||||
|
||||
class AbstractAnnotatedClass(ClassMixin, ContextWrapper):
|
||||
def get_type_var_filter(self):
|
||||
return TypeVarFilter(self.get_generics(), self.list_type_vars())
|
||||
|
||||
def get_filters(self, search_global=False, *args, **kwargs):
|
||||
filters = super(AbstractAnnotatedClass, self).get_filters(
|
||||
search_global,
|
||||
*args, **kwargs
|
||||
)
|
||||
for f in filters:
|
||||
yield f
|
||||
|
||||
if search_global:
|
||||
# The type vars can only be looked up if it's a global search and
|
||||
# not a direct lookup on the class.
|
||||
yield self.get_type_var_filter()
|
||||
|
||||
def is_same_class(self, other):
|
||||
if not isinstance(other, AbstractAnnotatedClass):
|
||||
return False
|
||||
|
||||
if self.tree_node != other.tree_node:
|
||||
# TODO not sure if this is nice.
|
||||
return False
|
||||
given_params1 = self.get_generics()
|
||||
given_params2 = other.get_generics()
|
||||
|
||||
if len(given_params1) != len(given_params2):
|
||||
# If the amount of type vars doesn't match, the class doesn't
|
||||
# match.
|
||||
return False
|
||||
|
||||
# Now compare generics
|
||||
return all(
|
||||
any(
|
||||
# TODO why is this ordering the correct one?
|
||||
cls2.is_same_class(cls1)
|
||||
for cls1 in class_set1
|
||||
for cls2 in class_set2
|
||||
) for class_set1, class_set2 in zip(given_params1, given_params2)
|
||||
)
|
||||
|
||||
def py__call__(self, arguments):
|
||||
instance, = super(AbstractAnnotatedClass, self).py__call__(arguments)
|
||||
return ContextSet([InstanceWrapper(instance)])
|
||||
|
||||
def get_generics(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def define_generics(self, type_var_dict):
|
||||
changed = False
|
||||
new_generics = []
|
||||
for generic_set in self.get_generics():
|
||||
contexts = NO_CONTEXTS
|
||||
for generic in generic_set:
|
||||
if isinstance(generic, (AbstractAnnotatedClass, TypeVar)):
|
||||
result = generic.define_generics(type_var_dict)
|
||||
contexts |= result
|
||||
if result != ContextSet({generic}):
|
||||
changed = True
|
||||
else:
|
||||
contexts |= ContextSet([generic])
|
||||
new_generics.append(contexts)
|
||||
|
||||
if not changed:
|
||||
# There might not be any type vars that change. In that case just
|
||||
# return itself, because it does not make sense to potentially lose
|
||||
# cached results.
|
||||
return ContextSet([self])
|
||||
|
||||
return ContextSet([GenericClass(
|
||||
self._wrapped_context,
|
||||
generics=tuple(new_generics)
|
||||
)])
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s%s>' % (
|
||||
self.__class__.__name__,
|
||||
self._wrapped_context,
|
||||
list(self.get_generics()),
|
||||
)
|
||||
|
||||
@to_list
|
||||
def py__bases__(self):
|
||||
for base in self._wrapped_context.py__bases__():
|
||||
yield LazyAnnotatedBaseClass(self, base)
|
||||
|
||||
|
||||
class LazyGenericClass(AbstractAnnotatedClass):
|
||||
def __init__(self, class_context, index_context, context_of_index):
|
||||
super(LazyGenericClass, self).__init__(class_context)
|
||||
self._index_context = index_context
|
||||
self._context_of_index = context_of_index
|
||||
|
||||
@evaluator_method_cache()
|
||||
def get_generics(self):
|
||||
return list(_iter_over_arguments(self._index_context, self._context_of_index))
|
||||
|
||||
|
||||
class GenericClass(AbstractAnnotatedClass):
|
||||
def __init__(self, class_context, generics):
|
||||
super(GenericClass, self).__init__(class_context)
|
||||
self._generics = generics
|
||||
|
||||
def get_generics(self):
|
||||
return self._generics
|
||||
|
||||
|
||||
class LazyAnnotatedBaseClass(object):
|
||||
def __init__(self, class_context, lazy_base_class):
|
||||
self._class_context = class_context
|
||||
self._lazy_base_class = lazy_base_class
|
||||
|
||||
@iterator_to_context_set
|
||||
def infer(self):
|
||||
for base in self._lazy_base_class.infer():
|
||||
if isinstance(base, AbstractAnnotatedClass):
|
||||
# Here we have to recalculate the given types.
|
||||
yield GenericClass.create_cached(
|
||||
base.evaluator,
|
||||
base._wrapped_context,
|
||||
tuple(self._remap_type_vars(base)),
|
||||
)
|
||||
else:
|
||||
yield base
|
||||
|
||||
def _remap_type_vars(self, base):
|
||||
filter = self._class_context.get_type_var_filter()
|
||||
for type_var_set in base.get_generics():
|
||||
new = NO_CONTEXTS
|
||||
for type_var in type_var_set:
|
||||
if isinstance(type_var, TypeVar):
|
||||
names = filter.get(type_var.py__name__())
|
||||
new |= ContextSet.from_sets(
|
||||
name.infer() for name in names
|
||||
)
|
||||
else:
|
||||
# Mostly will be type vars, except if in some cases
|
||||
# a concrete type will already be there. In that
|
||||
# case just add it to the context set.
|
||||
new |= ContextSet([type_var])
|
||||
yield new
|
||||
|
||||
|
||||
class InstanceWrapper(ContextWrapper):
|
||||
def py__stop_iteration_returns(self):
|
||||
for cls in self._wrapped_context.class_context.py__mro__():
|
||||
if cls.py__name__() == 'Generator':
|
||||
generics = cls.get_generics()
|
||||
try:
|
||||
return generics[2].execute_annotation()
|
||||
except IndexError:
|
||||
pass
|
||||
elif cls.py__name__() == 'Iterator':
|
||||
return ContextSet([builtin_from_name(self.evaluator, u'None')])
|
||||
return self._wrapped_context.py__stop_iteration_returns()
|
||||
32
jedi/evaluate/gradual/utils.py
Normal file
32
jedi/evaluate/gradual/utils.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
|
||||
from jedi.evaluate.gradual.typeshed import TYPESHED_PATH, create_stub_module
|
||||
|
||||
|
||||
def load_proper_stub_module(evaluator, file_io, import_names, module_node):
|
||||
"""
|
||||
This function is given a random .pyi file and should return the proper
|
||||
module.
|
||||
"""
|
||||
path = file_io.path
|
||||
assert path.endswith('.pyi')
|
||||
if path.startswith(TYPESHED_PATH):
|
||||
# /foo/stdlib/3/os/__init__.pyi -> stdlib/3/os/__init__
|
||||
rest = path[len(TYPESHED_PATH) + 1: -4]
|
||||
split_paths = tuple(rest.split(os.path.sep))
|
||||
# Remove the stdlib/3 or third_party/3.5 part
|
||||
import_names = split_paths[2:]
|
||||
if import_names[-1] == '__init__':
|
||||
import_names = import_names[:-1]
|
||||
|
||||
if import_names is not None:
|
||||
actual_context_set = evaluator.import_module(import_names, prefer_stubs=False)
|
||||
if not actual_context_set:
|
||||
return None
|
||||
|
||||
stub = create_stub_module(
|
||||
evaluator, actual_context_set, module_node, file_io, import_names
|
||||
)
|
||||
evaluator.stub_module_cache[import_names] = stub
|
||||
return stub
|
||||
return None
|
||||
@@ -166,13 +166,21 @@ def get_module_names(module, all_scopes):
|
||||
Returns a dictionary with name parts as keys and their call paths as
|
||||
values.
|
||||
"""
|
||||
names = chain.from_iterable(module.get_used_names().values())
|
||||
names = list(chain.from_iterable(module.get_used_names().values()))
|
||||
if not all_scopes:
|
||||
# We have to filter all the names that don't have the module as a
|
||||
# parent_scope. There's None as a parent, because nodes in the module
|
||||
# node have the parent module and not suite as all the others.
|
||||
# Therefore it's important to catch that case.
|
||||
names = [n for n in names if get_parent_scope(n).parent in (module, None)]
|
||||
|
||||
def is_module_scope_name(name):
|
||||
parent_scope = get_parent_scope(name)
|
||||
# async functions have an extra wrapper. Strip it.
|
||||
if parent_scope and parent_scope.type == 'async_stmt':
|
||||
parent_scope = parent_scope.parent
|
||||
return parent_scope in (module, None)
|
||||
|
||||
names = [n for n in names if is_module_scope_name(n)]
|
||||
return names
|
||||
|
||||
|
||||
@@ -186,17 +194,12 @@ def predefine_names(context, flow_scope, dct):
|
||||
del predefined[flow_scope]
|
||||
|
||||
|
||||
def is_compiled(context):
|
||||
from jedi.evaluate.compiled import CompiledObject
|
||||
return isinstance(context, CompiledObject)
|
||||
|
||||
|
||||
def is_string(context):
|
||||
if context.evaluator.environment.version_info.major == 2:
|
||||
str_classes = (unicode, bytes)
|
||||
else:
|
||||
str_classes = (unicode,)
|
||||
return is_compiled(context) and isinstance(context.get_safe_value(default=None), str_classes)
|
||||
return context.is_compiled() and isinstance(context.get_safe_value(default=None), str_classes)
|
||||
|
||||
|
||||
def is_literal(context):
|
||||
@@ -204,36 +207,63 @@ def is_literal(context):
|
||||
|
||||
|
||||
def _get_safe_value_or_none(context, accept):
|
||||
if is_compiled(context):
|
||||
value = context.get_safe_value(default=None)
|
||||
if isinstance(value, accept):
|
||||
return value
|
||||
value = context.get_safe_value(default=None)
|
||||
if isinstance(value, accept):
|
||||
return value
|
||||
|
||||
|
||||
def get_int_or_none(context):
|
||||
return _get_safe_value_or_none(context, int)
|
||||
|
||||
|
||||
def get_str_or_none(context):
|
||||
return _get_safe_value_or_none(context, (bytes, unicode))
|
||||
|
||||
|
||||
def is_number(context):
|
||||
return _get_safe_value_or_none(context, (int, float)) is not None
|
||||
|
||||
|
||||
class EvaluatorTypeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class EvaluatorIndexError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class EvaluatorKeyError(Exception):
|
||||
class SimpleGetItemNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def reraise_as_evaluator(*exception_classes):
|
||||
def reraise_getitem_errors(*exception_classes):
|
||||
try:
|
||||
yield
|
||||
except exception_classes as e:
|
||||
new_exc_cls = globals()['Evaluator' + e.__class__.__name__]
|
||||
raise new_exc_cls(e)
|
||||
raise SimpleGetItemNotFound(e)
|
||||
|
||||
|
||||
def parse_dotted_names(nodes, is_import_from, until_node=None):
|
||||
level = 0
|
||||
names = []
|
||||
for node in nodes[1:]:
|
||||
if node in ('.', '...'):
|
||||
if not names:
|
||||
level += len(node.value)
|
||||
elif node.type == 'dotted_name':
|
||||
for n in node.children[::2]:
|
||||
names.append(n)
|
||||
if n is until_node:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
break
|
||||
elif node.type == 'name':
|
||||
names.append(node)
|
||||
if node is until_node:
|
||||
break
|
||||
elif node == ',':
|
||||
if not is_import_from:
|
||||
names = []
|
||||
else:
|
||||
# Here if the keyword `import` comes along it stops checking
|
||||
# for names.
|
||||
break
|
||||
return level, names
|
||||
|
||||
|
||||
def contexts_from_qualified_names(evaluator, *names):
|
||||
return evaluator.import_module(names[:-1]).py__getattribute__(names[-1])
|
||||
|
||||
@@ -21,6 +21,7 @@ from jedi._compatibility import (FileNotFoundError, ImplicitNSInfo,
|
||||
force_unicode, unicode)
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi.file_io import KnownContentFileIO, FolderIO, FileIO
|
||||
from jedi.parser_utils import get_cached_code_lines
|
||||
from jedi.evaluate import sys_path
|
||||
from jedi.evaluate import helpers
|
||||
@@ -28,8 +29,10 @@ from jedi.evaluate import compiled
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate.utils import unite
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate.filters import AbstractNameDefinition
|
||||
from jedi.evaluate.names import ImportName, SubModuleName
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
||||
from jedi.evaluate.gradual.typeshed import import_module_decorator
|
||||
from jedi.evaluate.context.module import iter_module_names
|
||||
|
||||
|
||||
class ModuleCache(object):
|
||||
@@ -37,16 +40,14 @@ class ModuleCache(object):
|
||||
self._path_cache = {}
|
||||
self._name_cache = {}
|
||||
|
||||
def add(self, module, name):
|
||||
path = module.py__file__()
|
||||
self._path_cache[path] = module
|
||||
self._name_cache[name] = module
|
||||
def add(self, string_names, context_set):
|
||||
#path = module.py__file__()
|
||||
#self._path_cache[path] = context_set
|
||||
if string_names is not None:
|
||||
self._name_cache[string_names] = context_set
|
||||
|
||||
def iterate_modules_with_names(self):
|
||||
return self._name_cache.items()
|
||||
|
||||
def get(self, name):
|
||||
return self._name_cache[name]
|
||||
def get(self, string_names):
|
||||
return self._name_cache[string_names]
|
||||
|
||||
def get_from_path(self, path):
|
||||
return self._path_cache[path]
|
||||
@@ -95,7 +96,7 @@ def infer_import(context, tree_name, is_goto=False):
|
||||
for t in types
|
||||
)
|
||||
if not is_goto:
|
||||
types = ContextSet.from_set(types)
|
||||
types = ContextSet(types)
|
||||
|
||||
if not types:
|
||||
path = import_path + [from_import_name]
|
||||
@@ -146,47 +147,38 @@ class NestedImportModule(tree.Module):
|
||||
self._nested_import)
|
||||
|
||||
|
||||
def _add_error(context, name, message=None):
|
||||
# Should be a name, not a string!
|
||||
if message is None:
|
||||
name_str = str(name.value) if isinstance(name, tree.Name) else name
|
||||
message = 'No module named ' + name_str
|
||||
if hasattr(name, 'parent'):
|
||||
def _add_error(context, name, message):
|
||||
if hasattr(name, 'parent') and context is not None:
|
||||
analysis.add(context, 'import-error', name, message)
|
||||
else:
|
||||
debug.warning('ImportError without origin: ' + message)
|
||||
|
||||
|
||||
class ImportName(AbstractNameDefinition):
|
||||
start_pos = (1, 0)
|
||||
_level = 0
|
||||
def _level_to_base_import_path(project_path, directory, level):
|
||||
"""
|
||||
In case the level is outside of the currently known package (something like
|
||||
import .....foo), we can still try our best to help the user for
|
||||
completions.
|
||||
"""
|
||||
for i in range(level - 1):
|
||||
old = directory
|
||||
directory = os.path.dirname(directory)
|
||||
if old == directory:
|
||||
return None, None
|
||||
|
||||
def __init__(self, parent_context, string_name):
|
||||
self.parent_context = parent_context
|
||||
self.string_name = string_name
|
||||
|
||||
def infer(self):
|
||||
return Importer(
|
||||
self.parent_context.evaluator,
|
||||
[self.string_name],
|
||||
self.parent_context,
|
||||
level=self._level,
|
||||
).follow()
|
||||
|
||||
def goto(self):
|
||||
return [m.name for m in self.infer()]
|
||||
|
||||
def get_root_context(self):
|
||||
# Not sure if this is correct.
|
||||
return self.parent_context.get_root_context()
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
return 'module'
|
||||
|
||||
|
||||
class SubModuleName(ImportName):
|
||||
_level = 1
|
||||
d = directory
|
||||
level_import_paths = []
|
||||
# Now that we are on the level that the user wants to be, calculate the
|
||||
# import path for it.
|
||||
while True:
|
||||
if d == project_path:
|
||||
return level_import_paths, d
|
||||
dir_name = os.path.basename(d)
|
||||
if dir_name:
|
||||
level_import_paths.insert(0, dir_name)
|
||||
d = os.path.dirname(d)
|
||||
else:
|
||||
return None, directory
|
||||
|
||||
|
||||
class Importer(object):
|
||||
@@ -203,215 +195,132 @@ class Importer(object):
|
||||
|
||||
:param import_path: List of namespaces (strings or Names).
|
||||
"""
|
||||
debug.speed('import %s' % (import_path,))
|
||||
debug.speed('import %s %s' % (import_path, module_context))
|
||||
self._evaluator = evaluator
|
||||
self.level = level
|
||||
self.module_context = module_context
|
||||
try:
|
||||
self.file_path = module_context.py__file__()
|
||||
except AttributeError:
|
||||
# Can be None for certain compiled modules like 'builtins'.
|
||||
self.file_path = None
|
||||
|
||||
self._fixed_sys_path = None
|
||||
self._inference_possible = True
|
||||
if level:
|
||||
base = module_context.py__package__().split('.')
|
||||
if base == [''] or base == ['__main__']:
|
||||
base = []
|
||||
if level > len(base):
|
||||
path = module_context.py__file__()
|
||||
if path is not None:
|
||||
import_path = list(import_path)
|
||||
p = path
|
||||
for i in range(level):
|
||||
p = os.path.dirname(p)
|
||||
dir_name = os.path.basename(p)
|
||||
# This is not the proper way to do relative imports. However, since
|
||||
# Jedi cannot be sure about the entry point, we just calculate an
|
||||
# absolute path here.
|
||||
if dir_name:
|
||||
# TODO those sys.modules modifications are getting
|
||||
# really stupid. this is the 3rd time that we're using
|
||||
# this. We should probably refactor.
|
||||
if path.endswith(os.path.sep + 'os.py'):
|
||||
import_path.insert(0, 'os')
|
||||
else:
|
||||
import_path.insert(0, dir_name)
|
||||
else:
|
||||
_add_error(
|
||||
module_context, import_path[-1],
|
||||
message='Attempted relative import beyond top-level package.'
|
||||
)
|
||||
import_path = []
|
||||
# If no path is defined in the module we have no ideas where we
|
||||
# are in the file system. Therefore we cannot know what to do.
|
||||
# In this case we just let the path there and ignore that it's
|
||||
# a relative path. Not sure if that's a good idea.
|
||||
else:
|
||||
base = module_context.py__package__()
|
||||
# We need to care for two cases, the first one is if it's a valid
|
||||
# Python import. This import has a properly defined module name
|
||||
# chain like `foo.bar.baz` and an import in baz is made for
|
||||
# `..lala.` It can then resolve to `foo.bar.lala`.
|
||||
# The else here is a heuristic for all other cases, if for example
|
||||
# in `foo` you search for `...bar`, it's obviously out of scope.
|
||||
# However since Jedi tries to just do it's best, we help the user
|
||||
# here, because he might have specified something wrong in his
|
||||
# project.
|
||||
if level <= len(base):
|
||||
# Here we basically rewrite the level to 0.
|
||||
base = tuple(base)
|
||||
if level > 1:
|
||||
base = base[:-level + 1]
|
||||
|
||||
import_path = base + tuple(import_path)
|
||||
else:
|
||||
path = module_context.py__file__()
|
||||
import_path = list(import_path)
|
||||
if path is None:
|
||||
# If no path is defined, our best guess is that the current
|
||||
# file is edited by a user on the current working
|
||||
# directory. We need to add an initial path, because it
|
||||
# will get removed as the name of the current file.
|
||||
directory = os.getcwd()
|
||||
else:
|
||||
directory = os.path.dirname(path)
|
||||
|
||||
base_import_path, base_directory = _level_to_base_import_path(
|
||||
self._evaluator.project._path, directory, level,
|
||||
)
|
||||
if base_directory is None:
|
||||
# Everything is lost, the relative import does point
|
||||
# somewhere out of the filesystem.
|
||||
self._inference_possible = False
|
||||
else:
|
||||
self._fixed_sys_path = [force_unicode(base_directory)]
|
||||
|
||||
if base_import_path is None:
|
||||
if import_path:
|
||||
_add_error(
|
||||
module_context, import_path[0],
|
||||
message='Attempted relative import beyond top-level package.'
|
||||
)
|
||||
else:
|
||||
import_path = base_import_path + import_path
|
||||
self.import_path = import_path
|
||||
|
||||
@property
|
||||
def str_import_path(self):
|
||||
def _str_import_path(self):
|
||||
"""Returns the import path as pure strings instead of `Name`."""
|
||||
return tuple(
|
||||
name.value if isinstance(name, tree.Name) else name
|
||||
for name in self.import_path
|
||||
)
|
||||
|
||||
def sys_path_with_modifications(self):
|
||||
def _sys_path_with_modifications(self):
|
||||
if self._fixed_sys_path is not None:
|
||||
return self._fixed_sys_path
|
||||
|
||||
sys_path_mod = (
|
||||
self._evaluator.get_sys_path()
|
||||
+ sys_path.check_sys_path_modifications(self.module_context)
|
||||
)
|
||||
|
||||
if self.import_path and self.file_path is not None \
|
||||
and self._evaluator.environment.version_info.major == 2:
|
||||
# Python2 uses an old strange way of importing relative imports.
|
||||
sys_path_mod.append(force_unicode(os.path.dirname(self.file_path)))
|
||||
if self._evaluator.environment.version_info.major == 2:
|
||||
file_path = self.module_context.py__file__()
|
||||
if file_path is not None:
|
||||
# Python2 uses an old strange way of importing relative imports.
|
||||
sys_path_mod.append(force_unicode(os.path.dirname(file_path)))
|
||||
|
||||
return sys_path_mod
|
||||
|
||||
def follow(self):
|
||||
if not self.import_path or not self._evaluator.infer_enabled:
|
||||
if not self.import_path or not self._inference_possible:
|
||||
return NO_CONTEXTS
|
||||
|
||||
return self._do_import(self.import_path, self.sys_path_with_modifications())
|
||||
|
||||
def _do_import(self, import_path, sys_path):
|
||||
"""
|
||||
This method is very similar to importlib's `_gcd_import`.
|
||||
"""
|
||||
import_parts = [
|
||||
import_names = tuple(
|
||||
force_unicode(i.value if isinstance(i, tree.Name) else i)
|
||||
for i in import_path
|
||||
]
|
||||
|
||||
# Handle "magic" Flask extension imports:
|
||||
# ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
|
||||
if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
|
||||
# New style.
|
||||
ipath = ('flask_' + str(import_parts[2]),) + import_path[3:]
|
||||
modules = self._do_import(ipath, sys_path)
|
||||
if modules:
|
||||
return modules
|
||||
else:
|
||||
# Old style
|
||||
return self._do_import(('flaskext',) + import_path[2:], sys_path)
|
||||
|
||||
if import_parts[0] in settings.auto_import_modules:
|
||||
module = _load_module(
|
||||
self._evaluator,
|
||||
import_names=import_parts,
|
||||
sys_path=sys_path,
|
||||
)
|
||||
return ContextSet(module)
|
||||
|
||||
module_name = '.'.join(import_parts)
|
||||
try:
|
||||
return ContextSet(self._evaluator.module_cache.get(module_name))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if len(import_path) > 1:
|
||||
# This is a recursive way of importing that works great with
|
||||
# the module cache.
|
||||
bases = self._do_import(import_path[:-1], sys_path)
|
||||
if not bases:
|
||||
return NO_CONTEXTS
|
||||
# We can take the first element, because only the os special
|
||||
# case yields multiple modules, which is not important for
|
||||
# further imports.
|
||||
parent_module = list(bases)[0]
|
||||
|
||||
# This is a huge exception, we follow a nested import
|
||||
# ``os.path``, because it's a very important one in Python
|
||||
# that is being achieved by messing with ``sys.modules`` in
|
||||
# ``os``.
|
||||
if import_parts == ['os', 'path']:
|
||||
return parent_module.py__getattribute__('path')
|
||||
|
||||
try:
|
||||
method = parent_module.py__path__
|
||||
except AttributeError:
|
||||
# The module is not a package.
|
||||
_add_error(self.module_context, import_path[-1])
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
paths = method()
|
||||
debug.dbg('search_module %s in paths %s', module_name, paths)
|
||||
for path in paths:
|
||||
# At the moment we are only using one path. So this is
|
||||
# not important to be correct.
|
||||
if not isinstance(path, list):
|
||||
path = [path]
|
||||
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
|
||||
string=import_parts[-1],
|
||||
path=path,
|
||||
full_name=module_name,
|
||||
is_global_search=False,
|
||||
)
|
||||
if module_path is not None:
|
||||
break
|
||||
else:
|
||||
_add_error(self.module_context, import_path[-1])
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
debug.dbg('global search_module %s in %s', import_parts[-1], self.file_path)
|
||||
# Override the sys.path. It works only good that way.
|
||||
# Injecting the path directly into `find_module` did not work.
|
||||
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
|
||||
string=import_parts[-1],
|
||||
full_name=module_name,
|
||||
sys_path=sys_path,
|
||||
is_global_search=True,
|
||||
)
|
||||
if module_path is None:
|
||||
# The module is not a package.
|
||||
_add_error(self.module_context, import_path[-1])
|
||||
return NO_CONTEXTS
|
||||
|
||||
module = _load_module(
|
||||
self._evaluator, module_path, code, sys_path,
|
||||
import_names=import_parts,
|
||||
safe_module_name=True,
|
||||
for i in self.import_path
|
||||
)
|
||||
sys_path = self._sys_path_with_modifications()
|
||||
|
||||
if module is None:
|
||||
# The file might raise an ImportError e.g. and therefore not be
|
||||
# importable.
|
||||
return NO_CONTEXTS
|
||||
|
||||
return ContextSet(module)
|
||||
|
||||
def _generate_name(self, name, in_module=None):
|
||||
# Create a pseudo import to be able to follow them.
|
||||
if in_module is None:
|
||||
return ImportName(self.module_context, name)
|
||||
return SubModuleName(in_module, name)
|
||||
context_set = [None]
|
||||
for i, name in enumerate(self.import_path):
|
||||
context_set = ContextSet.from_sets([
|
||||
self._evaluator.import_module(
|
||||
import_names[:i+1],
|
||||
parent_module_context,
|
||||
sys_path
|
||||
) for parent_module_context in context_set
|
||||
])
|
||||
if not context_set:
|
||||
message = 'No module named ' + '.'.join(import_names)
|
||||
_add_error(self.module_context, name, message)
|
||||
return NO_CONTEXTS
|
||||
return context_set
|
||||
|
||||
def _get_module_names(self, search_path=None, in_module=None):
|
||||
"""
|
||||
Get the names of all modules in the search_path. This means file names
|
||||
and not names defined in the files.
|
||||
"""
|
||||
sub = self._evaluator.compiled_subprocess
|
||||
|
||||
names = []
|
||||
# add builtin module names
|
||||
if search_path is None and in_module is None:
|
||||
names += [self._generate_name(name) for name in sub.get_builtin_module_names()]
|
||||
names += [ImportName(self.module_context, name)
|
||||
for name in self._evaluator.compiled_subprocess.get_builtin_module_names()]
|
||||
|
||||
if search_path is None:
|
||||
search_path = self.sys_path_with_modifications()
|
||||
search_path = self._sys_path_with_modifications()
|
||||
|
||||
for name in sub.list_module_names(search_path):
|
||||
names.append(self._generate_name(name, in_module=in_module))
|
||||
for name in iter_module_names(self._evaluator, search_path):
|
||||
if in_module is None:
|
||||
n = ImportName(self.module_context, name)
|
||||
else:
|
||||
n = SubModuleName(in_module, name)
|
||||
names.append(n)
|
||||
return names
|
||||
|
||||
def completion_names(self, evaluator, only_modules=False):
|
||||
@@ -419,176 +328,239 @@ class Importer(object):
|
||||
:param only_modules: Indicates wheter it's possible to import a
|
||||
definition that is not defined in a module.
|
||||
"""
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
from jedi.evaluate.context.namespace import ImplicitNamespaceContext
|
||||
if not self._inference_possible:
|
||||
return []
|
||||
|
||||
names = []
|
||||
if self.import_path:
|
||||
# flask
|
||||
if self.str_import_path == ('flask', 'ext'):
|
||||
if self._str_import_path == ('flask', 'ext'):
|
||||
# List Flask extensions like ``flask_foo``
|
||||
for mod in self._get_module_names():
|
||||
modname = mod.string_name
|
||||
if modname.startswith('flask_'):
|
||||
extname = modname[len('flask_'):]
|
||||
names.append(self._generate_name(extname))
|
||||
names.append(ImportName(self.module_context, extname))
|
||||
# Now the old style: ``flaskext.foo``
|
||||
for dir in self.sys_path_with_modifications():
|
||||
for dir in self._sys_path_with_modifications():
|
||||
flaskext = os.path.join(dir, 'flaskext')
|
||||
if os.path.isdir(flaskext):
|
||||
names += self._get_module_names([flaskext])
|
||||
|
||||
for context in self.follow():
|
||||
contexts = self.follow()
|
||||
for context in contexts:
|
||||
# Non-modules are not completable.
|
||||
if context.api_type != 'module': # not a module
|
||||
continue
|
||||
# namespace packages
|
||||
if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'):
|
||||
paths = context.py__path__()
|
||||
names += self._get_module_names(paths, in_module=context)
|
||||
names += context.sub_modules_dict().values()
|
||||
|
||||
# implicit namespace packages
|
||||
elif isinstance(context, ImplicitNamespaceContext):
|
||||
paths = context.paths
|
||||
names += self._get_module_names(paths, in_module=context)
|
||||
if not only_modules:
|
||||
from jedi.evaluate.gradual.conversion import convert_contexts
|
||||
|
||||
if only_modules:
|
||||
# In the case of an import like `from x.` we don't need to
|
||||
# add all the variables.
|
||||
if ('os',) == self.str_import_path and not self.level:
|
||||
# os.path is a hardcoded exception, because it's a
|
||||
# ``sys.modules`` modification.
|
||||
names.append(self._generate_name('path', context))
|
||||
|
||||
continue
|
||||
|
||||
for filter in context.get_filters(search_global=False):
|
||||
names += filter.values()
|
||||
both_contexts = contexts | convert_contexts(contexts)
|
||||
for c in both_contexts:
|
||||
for filter in c.get_filters(search_global=False):
|
||||
names += filter.values()
|
||||
else:
|
||||
# Empty import path=completion after import
|
||||
if not self.level:
|
||||
if self.level:
|
||||
# We only get here if the level cannot be properly calculated.
|
||||
names += self._get_module_names(self._fixed_sys_path)
|
||||
else:
|
||||
# This is just the list of global imports.
|
||||
names += self._get_module_names()
|
||||
|
||||
if self.file_path is not None:
|
||||
path = os.path.abspath(self.file_path)
|
||||
for i in range(self.level - 1):
|
||||
path = os.path.dirname(path)
|
||||
names += self._get_module_names([path])
|
||||
|
||||
return names
|
||||
|
||||
|
||||
def _load_module(evaluator, path=None, code=None, sys_path=None,
|
||||
import_names=None, safe_module_name=False):
|
||||
if import_names is None:
|
||||
dotted_name = None
|
||||
else:
|
||||
dotted_name = '.'.join(import_names)
|
||||
try:
|
||||
return evaluator.module_cache.get(dotted_name)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
return evaluator.module_cache.get_from_path(path)
|
||||
except KeyError:
|
||||
pass
|
||||
@import_module_decorator
|
||||
def import_module(evaluator, import_names, parent_module_context, sys_path):
|
||||
"""
|
||||
This method is very similar to importlib's `_gcd_import`.
|
||||
"""
|
||||
if import_names[0] in settings.auto_import_modules:
|
||||
module = _load_builtin_module(evaluator, import_names, sys_path)
|
||||
if module is None:
|
||||
return NO_CONTEXTS
|
||||
return ContextSet([module])
|
||||
|
||||
if isinstance(path, ImplicitNSInfo):
|
||||
module_name = '.'.join(import_names)
|
||||
if parent_module_context is None:
|
||||
# Override the sys.path. It works only good that way.
|
||||
# Injecting the path directly into `find_module` did not work.
|
||||
file_io_or_ns, is_pkg = evaluator.compiled_subprocess.get_module_info(
|
||||
string=import_names[-1],
|
||||
full_name=module_name,
|
||||
sys_path=sys_path,
|
||||
is_global_search=True,
|
||||
)
|
||||
if is_pkg is None:
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
try:
|
||||
method = parent_module_context.py__path__
|
||||
except AttributeError:
|
||||
# The module is not a package.
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
paths = method()
|
||||
for path in paths:
|
||||
# At the moment we are only using one path. So this is
|
||||
# not important to be correct.
|
||||
if not isinstance(path, list):
|
||||
path = [path]
|
||||
file_io_or_ns, is_pkg = evaluator.compiled_subprocess.get_module_info(
|
||||
string=import_names[-1],
|
||||
path=path,
|
||||
full_name=module_name,
|
||||
is_global_search=False,
|
||||
)
|
||||
if is_pkg is not None:
|
||||
break
|
||||
else:
|
||||
return NO_CONTEXTS
|
||||
|
||||
if isinstance(file_io_or_ns, ImplicitNSInfo):
|
||||
from jedi.evaluate.context.namespace import ImplicitNamespaceContext
|
||||
module = ImplicitNamespaceContext(
|
||||
evaluator,
|
||||
fullname=path.name,
|
||||
paths=path.paths,
|
||||
fullname=file_io_or_ns.name,
|
||||
paths=file_io_or_ns.paths,
|
||||
)
|
||||
elif file_io_or_ns is None:
|
||||
module = _load_builtin_module(evaluator, import_names, sys_path)
|
||||
if module is None:
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
if sys_path is None:
|
||||
sys_path = evaluator.get_sys_path()
|
||||
module = _load_python_module(
|
||||
evaluator, file_io_or_ns, sys_path,
|
||||
import_names=import_names,
|
||||
is_package=is_pkg,
|
||||
)
|
||||
|
||||
if path is not None and path.endswith(('.py', '.zip', '.egg')):
|
||||
module_node = evaluator.parse(
|
||||
code=code, path=path, cache=True,
|
||||
diff_cache=settings.fast_parser,
|
||||
cache_path=settings.cache_directory)
|
||||
if parent_module_context is None:
|
||||
debug.dbg('global search_module %s: %s', import_names[-1], module)
|
||||
else:
|
||||
debug.dbg('search_module %s in paths %s: %s', module_name, paths, module)
|
||||
return ContextSet([module])
|
||||
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
module = ModuleContext(
|
||||
evaluator, module_node,
|
||||
path=path,
|
||||
code_lines=get_cached_code_lines(evaluator.grammar, path),
|
||||
)
|
||||
else:
|
||||
assert dotted_name is not None
|
||||
module = compiled.load_module(evaluator, dotted_name=dotted_name, sys_path=sys_path)
|
||||
|
||||
if module is not None and dotted_name is not None:
|
||||
add_module_to_cache(evaluator, dotted_name, module, safe=safe_module_name)
|
||||
def _load_python_module(evaluator, file_io, sys_path=None,
|
||||
import_names=None, is_package=False):
|
||||
try:
|
||||
return evaluator.module_cache.get_from_path(file_io.path)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
module_node = evaluator.parse(
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
diff_cache=settings.fast_parser,
|
||||
cache_path=settings.cache_directory
|
||||
)
|
||||
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
return ModuleContext(
|
||||
evaluator, module_node,
|
||||
file_io=file_io,
|
||||
string_names=import_names,
|
||||
code_lines=get_cached_code_lines(evaluator.grammar, file_io.path),
|
||||
is_package=is_package,
|
||||
)
|
||||
|
||||
|
||||
def _load_builtin_module(evaluator, import_names=None, sys_path=None):
|
||||
if sys_path is None:
|
||||
sys_path = evaluator.get_sys_path()
|
||||
|
||||
dotted_name = '.'.join(import_names)
|
||||
assert dotted_name is not None
|
||||
module = compiled.load_module(evaluator, dotted_name=dotted_name, sys_path=sys_path)
|
||||
if module is None:
|
||||
# The file might raise an ImportError e.g. and therefore not be
|
||||
# importable.
|
||||
return None
|
||||
return module
|
||||
|
||||
|
||||
def add_module_to_cache(evaluator, module_name, module, safe=False):
|
||||
if not safe and '.' not in module_name:
|
||||
# We cannot add paths with dots, because that would collide with
|
||||
# the sepatator dots for nested packages. Therefore we return
|
||||
# `__main__` in ModuleWrapper.py__name__(), which is similar to
|
||||
# Python behavior.
|
||||
return
|
||||
evaluator.module_cache.add(module, module_name)
|
||||
def _load_module_from_path(evaluator, file_io, base_names):
|
||||
"""
|
||||
This should pretty much only be used for get_modules_containing_name. It's
|
||||
here to ensure that a random path is still properly loaded into the Jedi
|
||||
module structure.
|
||||
"""
|
||||
e_sys_path = evaluator.get_sys_path()
|
||||
path = file_io.path
|
||||
if base_names:
|
||||
module_name = os.path.basename(path)
|
||||
module_name = sys_path.remove_python_path_suffix(module_name)
|
||||
is_package = module_name == '__init__'
|
||||
if is_package:
|
||||
import_names = base_names
|
||||
else:
|
||||
import_names = base_names + (module_name,)
|
||||
else:
|
||||
import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path)
|
||||
|
||||
module = _load_python_module(
|
||||
evaluator, file_io,
|
||||
sys_path=e_sys_path,
|
||||
import_names=import_names,
|
||||
is_package=is_package,
|
||||
)
|
||||
evaluator.module_cache.add(import_names, ContextSet([module]))
|
||||
return module
|
||||
|
||||
|
||||
def get_modules_containing_name(evaluator, modules, name):
|
||||
"""
|
||||
Search a name in the directories of modules.
|
||||
"""
|
||||
def check_directories(paths):
|
||||
for p in paths:
|
||||
if p is not None:
|
||||
# We need abspath, because the seetings paths might not already
|
||||
# have been converted to absolute paths.
|
||||
d = os.path.dirname(os.path.abspath(p))
|
||||
for file_name in os.listdir(d):
|
||||
path = os.path.join(d, file_name)
|
||||
if file_name.endswith('.py'):
|
||||
yield path
|
||||
def check_directory(folder_io):
|
||||
for file_name in folder_io.list():
|
||||
if file_name.endswith('.py'):
|
||||
yield folder_io.get_file_io(file_name)
|
||||
|
||||
def check_fs(path):
|
||||
def check_fs(file_io, base_names):
|
||||
try:
|
||||
f = open(path, 'rb')
|
||||
code = file_io.read()
|
||||
except FileNotFoundError:
|
||||
return
|
||||
with f:
|
||||
code = python_bytes_to_unicode(f.read(), errors='replace')
|
||||
if name in code:
|
||||
e_sys_path = evaluator.get_sys_path()
|
||||
import_names = sys_path.dotted_path_in_sys_path(e_sys_path, path)
|
||||
module = _load_module(
|
||||
evaluator, path, code,
|
||||
sys_path=e_sys_path,
|
||||
import_names=import_names,
|
||||
)
|
||||
return module
|
||||
return None
|
||||
code = python_bytes_to_unicode(code, errors='replace')
|
||||
if name not in code:
|
||||
return None
|
||||
new_file_io = KnownContentFileIO(file_io.path, code)
|
||||
m = _load_module_from_path(evaluator, new_file_io, base_names)
|
||||
if isinstance(m, compiled.CompiledObject):
|
||||
return None
|
||||
return m
|
||||
|
||||
# skip non python modules
|
||||
used_mod_paths = set()
|
||||
folders_with_names_to_be_checked = []
|
||||
for m in modules:
|
||||
try:
|
||||
path = m.py__file__()
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
used_mod_paths.add(path)
|
||||
if m.file_io is not None:
|
||||
path = m.file_io.path
|
||||
if path not in used_mod_paths:
|
||||
used_mod_paths.add(path)
|
||||
folders_with_names_to_be_checked.append((
|
||||
m.file_io.get_parent_folder(),
|
||||
m.py__package__()
|
||||
))
|
||||
yield m
|
||||
|
||||
if not settings.dynamic_params_for_other_modules:
|
||||
return
|
||||
|
||||
additional = set(os.path.abspath(p) for p in settings.additional_dynamic_modules)
|
||||
# Check the directories of used modules.
|
||||
paths = (additional | set(check_directories(used_mod_paths))) \
|
||||
- used_mod_paths
|
||||
def get_file_ios_to_check():
|
||||
for folder_io, base_names in folders_with_names_to_be_checked:
|
||||
for file_io in check_directory(folder_io):
|
||||
yield file_io, base_names
|
||||
|
||||
# Sort here to make issues less random.
|
||||
for p in sorted(paths):
|
||||
# make testing easier, sort it - same results on every interpreter
|
||||
m = check_fs(p)
|
||||
if m is not None and not isinstance(m, compiled.CompiledObject):
|
||||
for p in settings.additional_dynamic_modules:
|
||||
p = os.path.abspath(p)
|
||||
if p not in used_mod_paths:
|
||||
yield FileIO(p), None
|
||||
|
||||
for file_io, base_names in get_file_ios_to_check():
|
||||
m = check_fs(file_io, base_names)
|
||||
if m is not None:
|
||||
yield m
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
"""
|
||||
This module is not intended to be used in jedi, rather it will be fed to the
|
||||
jedi-parser to replace classes in the typing module
|
||||
"""
|
||||
|
||||
try:
|
||||
from collections import abc
|
||||
except ImportError:
|
||||
# python 2
|
||||
import collections as abc
|
||||
|
||||
|
||||
def factory(typing_name, indextypes):
|
||||
class Iterable(abc.Iterable):
|
||||
def __iter__(self):
|
||||
while True:
|
||||
yield indextypes[0]()
|
||||
|
||||
class Iterator(Iterable, abc.Iterator):
|
||||
def next(self):
|
||||
""" needed for python 2 """
|
||||
return self.__next__()
|
||||
|
||||
def __next__(self):
|
||||
return indextypes[0]()
|
||||
|
||||
class Sequence(abc.Sequence):
|
||||
def __getitem__(self, index):
|
||||
return indextypes[0]()
|
||||
|
||||
class MutableSequence(Sequence, abc.MutableSequence):
|
||||
pass
|
||||
|
||||
class List(MutableSequence, list):
|
||||
pass
|
||||
|
||||
class Tuple(Sequence, tuple):
|
||||
def __getitem__(self, index):
|
||||
if indextypes[1] == Ellipsis:
|
||||
# https://www.python.org/dev/peps/pep-0484/#the-typing-module
|
||||
# Tuple[int, ...] means a tuple of ints of indetermined length
|
||||
return indextypes[0]()
|
||||
else:
|
||||
return indextypes[index]()
|
||||
|
||||
class AbstractSet(Iterable, abc.Set):
|
||||
pass
|
||||
|
||||
class MutableSet(AbstractSet, abc.MutableSet):
|
||||
pass
|
||||
|
||||
class KeysView(Iterable, abc.KeysView):
|
||||
pass
|
||||
|
||||
class ValuesView(abc.ValuesView):
|
||||
def __iter__(self):
|
||||
while True:
|
||||
yield indextypes[1]()
|
||||
|
||||
class ItemsView(abc.ItemsView):
|
||||
def __iter__(self):
|
||||
while True:
|
||||
yield (indextypes[0](), indextypes[1]())
|
||||
|
||||
class Mapping(Iterable, abc.Mapping):
|
||||
def __getitem__(self, item):
|
||||
return indextypes[1]()
|
||||
|
||||
def keys(self):
|
||||
return KeysView()
|
||||
|
||||
def values(self):
|
||||
return ValuesView()
|
||||
|
||||
def items(self):
|
||||
return ItemsView()
|
||||
|
||||
class MutableMapping(Mapping, abc.MutableMapping):
|
||||
pass
|
||||
|
||||
class Dict(MutableMapping, dict):
|
||||
pass
|
||||
|
||||
class DefaultDict(MutableMapping, dict):
|
||||
pass
|
||||
|
||||
dct = {
|
||||
"Sequence": Sequence,
|
||||
"MutableSequence": MutableSequence,
|
||||
"List": List,
|
||||
"Iterable": Iterable,
|
||||
"Iterator": Iterator,
|
||||
"AbstractSet": AbstractSet,
|
||||
"MutableSet": MutableSet,
|
||||
"Mapping": Mapping,
|
||||
"MutableMapping": MutableMapping,
|
||||
"Tuple": Tuple,
|
||||
"KeysView": KeysView,
|
||||
"ItemsView": ItemsView,
|
||||
"ValuesView": ValuesView,
|
||||
"Dict": Dict,
|
||||
"DefaultDict": DefaultDict,
|
||||
}
|
||||
return dct[typing_name]
|
||||
@@ -16,7 +16,7 @@ class AbstractLazyContext(object):
|
||||
class LazyKnownContext(AbstractLazyContext):
|
||||
"""data is a context."""
|
||||
def infer(self):
|
||||
return ContextSet(self.data)
|
||||
return ContextSet([self.data])
|
||||
|
||||
|
||||
class LazyKnownContexts(AbstractLazyContext):
|
||||
@@ -36,14 +36,14 @@ class LazyUnknownContext(AbstractLazyContext):
|
||||
class LazyTreeContext(AbstractLazyContext):
|
||||
def __init__(self, context, node):
|
||||
super(LazyTreeContext, self).__init__(node)
|
||||
self._context = context
|
||||
self.context = context
|
||||
# We need to save the predefined names. It's an unfortunate side effect
|
||||
# that needs to be tracked otherwise results will be wrong.
|
||||
self._predefined_names = dict(context.predefined_names)
|
||||
|
||||
def infer(self):
|
||||
with monkeypatch(self._context, 'predefined_names', self._predefined_names):
|
||||
return self._context.eval_node(self.data)
|
||||
with monkeypatch(self.context, 'predefined_names', self._predefined_names):
|
||||
return self.context.eval_node(self.data)
|
||||
|
||||
|
||||
def get_merged_lazy_context(lazy_contexts):
|
||||
|
||||
293
jedi/evaluate/names.py
Normal file
293
jedi/evaluate/names.py
Normal file
@@ -0,0 +1,293 @@
|
||||
from abc import abstractmethod
|
||||
|
||||
from parso.tree import search_ancestor
|
||||
|
||||
from jedi._compatibility import Parameter
|
||||
from jedi.evaluate.base_context import ContextSet
|
||||
from jedi.cache import memoize_method
|
||||
|
||||
|
||||
class AbstractNameDefinition(object):
|
||||
start_pos = None
|
||||
string_name = None
|
||||
parent_context = None
|
||||
tree_name = None
|
||||
is_context_name = True
|
||||
"""
|
||||
Used for the Jedi API to know if it's a keyword or an actual name.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def infer(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def goto(self):
|
||||
# Typically names are already definitions and therefore a goto on that
|
||||
# name will always result on itself.
|
||||
return {self}
|
||||
|
||||
def get_qualified_names(self, include_module_names=False):
|
||||
qualified_names = self._get_qualified_names()
|
||||
if qualified_names is None or not include_module_names:
|
||||
return qualified_names
|
||||
|
||||
module_names = self.get_root_context().string_names
|
||||
if module_names is None:
|
||||
return None
|
||||
return module_names + qualified_names
|
||||
|
||||
def _get_qualified_names(self):
|
||||
# By default, a name has no qualified names.
|
||||
return None
|
||||
|
||||
def get_root_context(self):
|
||||
return self.parent_context.get_root_context()
|
||||
|
||||
def __repr__(self):
|
||||
if self.start_pos is None:
|
||||
return '<%s: string_name=%s>' % (self.__class__.__name__, self.string_name)
|
||||
return '<%s: string_name=%s start_pos=%s>' % (self.__class__.__name__,
|
||||
self.string_name, self.start_pos)
|
||||
|
||||
def is_import(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
return self.parent_context.api_type
|
||||
|
||||
|
||||
class AbstractTreeName(AbstractNameDefinition):
|
||||
def __init__(self, parent_context, tree_name):
|
||||
self.parent_context = parent_context
|
||||
self.tree_name = tree_name
|
||||
|
||||
def get_qualified_names(self, include_module_names=False):
|
||||
import_node = search_ancestor(self.tree_name, 'import_name', 'import_from')
|
||||
# For import nodes we cannot just have names, because it's very unclear
|
||||
# how they would look like. For now we just ignore them in most cases.
|
||||
# In case of level == 1, it works always, because it's like a submodule
|
||||
# lookup.
|
||||
if import_node is not None and not (import_node.level == 1
|
||||
and self.get_root_context().is_package):
|
||||
# TODO improve the situation for when level is present.
|
||||
if include_module_names and not import_node.level:
|
||||
return tuple(n.value for n in import_node.get_path_for_name(self.tree_name))
|
||||
else:
|
||||
return None
|
||||
|
||||
return super(AbstractTreeName, self).get_qualified_names(include_module_names)
|
||||
|
||||
def _get_qualified_names(self):
|
||||
parent_names = self.parent_context.get_qualified_names()
|
||||
if parent_names is None:
|
||||
return None
|
||||
return parent_names + (self.tree_name.value,)
|
||||
|
||||
def goto(self, **kwargs):
|
||||
return self.parent_context.evaluator.goto(self.parent_context, self.tree_name, **kwargs)
|
||||
|
||||
def is_import(self):
|
||||
imp = search_ancestor(self.tree_name, 'import_from', 'import_name')
|
||||
return imp is not None
|
||||
|
||||
@property
|
||||
def string_name(self):
|
||||
return self.tree_name.value
|
||||
|
||||
@property
|
||||
def start_pos(self):
|
||||
return self.tree_name.start_pos
|
||||
|
||||
|
||||
class ContextNameMixin(object):
|
||||
def infer(self):
|
||||
return ContextSet([self._context])
|
||||
|
||||
def _get_qualified_names(self):
|
||||
return self._context.get_qualified_names()
|
||||
|
||||
def get_root_context(self):
|
||||
if self.parent_context is None: # A module
|
||||
return self._context
|
||||
return super(ContextNameMixin, self).get_root_context()
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
return self._context.api_type
|
||||
|
||||
|
||||
class ContextName(ContextNameMixin, AbstractTreeName):
|
||||
def __init__(self, context, tree_name):
|
||||
super(ContextName, self).__init__(context.parent_context, tree_name)
|
||||
self._context = context
|
||||
|
||||
def goto(self):
|
||||
return ContextSet([self._context.name])
|
||||
|
||||
|
||||
class TreeNameDefinition(AbstractTreeName):
|
||||
_API_TYPES = dict(
|
||||
import_name='module',
|
||||
import_from='module',
|
||||
funcdef='function',
|
||||
param='param',
|
||||
classdef='class',
|
||||
)
|
||||
|
||||
def infer(self):
|
||||
# Refactor this, should probably be here.
|
||||
from jedi.evaluate.syntax_tree import tree_name_to_contexts
|
||||
parent = self.parent_context
|
||||
return tree_name_to_contexts(parent.evaluator, parent, self.tree_name)
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
definition = self.tree_name.get_definition(import_name_always=True)
|
||||
if definition is None:
|
||||
return 'statement'
|
||||
return self._API_TYPES.get(definition.type, 'statement')
|
||||
|
||||
|
||||
class ParamNameInterface(object):
|
||||
api_type = u'param'
|
||||
|
||||
def _kind_string(self):
|
||||
kind = self.get_kind()
|
||||
if kind == Parameter.VAR_POSITIONAL: # *args
|
||||
return '*'
|
||||
if kind == Parameter.VAR_KEYWORD: # **kwargs
|
||||
return '**'
|
||||
return ''
|
||||
|
||||
def get_kind(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def to_string(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ParamName(ParamNameInterface, AbstractTreeName):
|
||||
def __init__(self, parent_context, tree_name):
|
||||
self.parent_context = parent_context
|
||||
self.tree_name = tree_name
|
||||
|
||||
def _get_param_node(self):
|
||||
return search_ancestor(self.tree_name, 'param')
|
||||
|
||||
@property
|
||||
def string_name(self):
|
||||
name = self.tree_name.value
|
||||
if name.startswith('__'):
|
||||
# Params starting with __ are an equivalent to positional only
|
||||
# variables in typeshed.
|
||||
name = name[2:]
|
||||
return name
|
||||
|
||||
def get_kind(self):
|
||||
tree_param = self._get_param_node()
|
||||
if tree_param.star_count == 1: # *args
|
||||
return Parameter.VAR_POSITIONAL
|
||||
if tree_param.star_count == 2: # **kwargs
|
||||
return Parameter.VAR_KEYWORD
|
||||
|
||||
# Params starting with __ are an equivalent to positional only
|
||||
# variables in typeshed.
|
||||
if tree_param.name.value.startswith('__'):
|
||||
return Parameter.POSITIONAL_ONLY
|
||||
|
||||
parent = tree_param.parent
|
||||
param_appeared = False
|
||||
for p in parent.children:
|
||||
if param_appeared:
|
||||
if p == '/':
|
||||
return Parameter.POSITIONAL_ONLY
|
||||
else:
|
||||
if p == '*':
|
||||
return Parameter.KEYWORD_ONLY
|
||||
if p.type == 'param':
|
||||
if p.star_count:
|
||||
return Parameter.KEYWORD_ONLY
|
||||
if p == tree_param:
|
||||
param_appeared = True
|
||||
return Parameter.POSITIONAL_OR_KEYWORD
|
||||
|
||||
def to_string(self):
|
||||
output = self._kind_string() + self.string_name
|
||||
param_node = self._get_param_node()
|
||||
if param_node.annotation is not None:
|
||||
output += ': ' + param_node.annotation.get_code(include_prefix=False)
|
||||
if param_node.default is not None:
|
||||
output += '=' + param_node.default.get_code(include_prefix=False)
|
||||
return output
|
||||
|
||||
def infer(self):
|
||||
return self.get_param().infer()
|
||||
|
||||
def get_param(self):
|
||||
params, _ = self.parent_context.get_executed_params_and_issues()
|
||||
param_node = search_ancestor(self.tree_name, 'param')
|
||||
return params[param_node.position_index]
|
||||
|
||||
|
||||
class ImportName(AbstractNameDefinition):
|
||||
start_pos = (1, 0)
|
||||
_level = 0
|
||||
|
||||
def __init__(self, parent_context, string_name):
|
||||
self._from_module_context = parent_context
|
||||
self.string_name = string_name
|
||||
|
||||
def get_qualified_names(self, include_module_names=False):
|
||||
if include_module_names:
|
||||
if self._level:
|
||||
assert self._level == 1, "Everything else is not supported for now"
|
||||
module_names = self._from_module_context.string_names
|
||||
if module_names is None:
|
||||
return module_names
|
||||
return module_names + (self.string_name,)
|
||||
return (self.string_name,)
|
||||
return ()
|
||||
|
||||
@property
|
||||
def parent_context(self):
|
||||
m = self._from_module_context
|
||||
import_contexts = self.infer()
|
||||
if not import_contexts:
|
||||
return m
|
||||
# It's almost always possible to find the import or to not find it. The
|
||||
# importing returns only one context, pretty much always.
|
||||
return next(iter(import_contexts))
|
||||
|
||||
@memoize_method
|
||||
def infer(self):
|
||||
from jedi.evaluate.imports import Importer
|
||||
m = self._from_module_context
|
||||
return Importer(m.evaluator, [self.string_name], m, level=self._level).follow()
|
||||
|
||||
def goto(self):
|
||||
return [m.name for m in self.infer()]
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
return 'module'
|
||||
|
||||
|
||||
class SubModuleName(ImportName):
|
||||
_level = 1
|
||||
|
||||
|
||||
class NameWrapper(object):
|
||||
def __init__(self, wrapped_name):
|
||||
self._wrapped_name = wrapped_name
|
||||
|
||||
@abstractmethod
|
||||
def infer(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._wrapped_name, name)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (self.__class__.__name__, self._wrapped_name)
|
||||
@@ -1,38 +1,62 @@
|
||||
from collections import defaultdict
|
||||
|
||||
from jedi import debug
|
||||
from jedi.evaluate.utils import PushBackIterator
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate.lazy_context import LazyKnownContext, \
|
||||
LazyTreeContext, LazyUnknownContext
|
||||
from jedi.evaluate import docstrings
|
||||
from jedi.evaluate import pep0484
|
||||
from jedi.evaluate.context import iterable
|
||||
|
||||
|
||||
def _add_argument_issue(parent_context, error_name, lazy_context, message):
|
||||
def _add_argument_issue(error_name, lazy_context, message):
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
node = lazy_context.data
|
||||
if node.parent.type == 'argument':
|
||||
node = node.parent
|
||||
analysis.add(parent_context, error_name, node, message)
|
||||
return analysis.add(lazy_context.context, error_name, node, message)
|
||||
|
||||
|
||||
class ExecutedParam(object):
|
||||
"""Fake a param and give it values."""
|
||||
def __init__(self, execution_context, param_node, lazy_context):
|
||||
def __init__(self, execution_context, param_node, lazy_context, is_default=False):
|
||||
self._execution_context = execution_context
|
||||
self._param_node = param_node
|
||||
self._lazy_context = lazy_context
|
||||
self.string_name = param_node.name.value
|
||||
self._is_default = is_default
|
||||
|
||||
def infer(self):
|
||||
pep0484_hints = pep0484.infer_param(self._execution_context, self._param_node)
|
||||
doc_params = docstrings.infer_param(self._execution_context, self._param_node)
|
||||
if pep0484_hints or doc_params:
|
||||
return pep0484_hints | doc_params
|
||||
def infer_annotations(self):
|
||||
from jedi.evaluate.gradual.annotation import infer_param
|
||||
return infer_param(self._execution_context, self._param_node)
|
||||
|
||||
def infer(self, use_hints=True):
|
||||
if use_hints:
|
||||
doc_params = docstrings.infer_param(self._execution_context, self._param_node)
|
||||
ann = self.infer_annotations().execute_annotation()
|
||||
if ann or doc_params:
|
||||
return ann | doc_params
|
||||
|
||||
return self._lazy_context.infer()
|
||||
|
||||
def matches_signature(self):
|
||||
if self._is_default:
|
||||
return True
|
||||
argument_contexts = self.infer(use_hints=False).py__class__()
|
||||
if self._param_node.star_count:
|
||||
return True
|
||||
annotations = self.infer_annotations()
|
||||
if not annotations:
|
||||
# If we cannot infer annotations - or there aren't any - pretend
|
||||
# that the signature matches.
|
||||
return True
|
||||
matches = any(c1.is_sub_class_of(c2)
|
||||
for c1 in argument_contexts
|
||||
for c2 in annotations.gather_annotation_classes())
|
||||
debug.dbg("signature compare %s: %s <=> %s",
|
||||
matches, argument_contexts, annotations, color='BLUE')
|
||||
return matches
|
||||
|
||||
@property
|
||||
def var_args(self):
|
||||
return self._execution_context.var_args
|
||||
@@ -41,15 +65,35 @@ class ExecutedParam(object):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.string_name)
|
||||
|
||||
|
||||
def get_executed_params(execution_context, var_args):
|
||||
def get_executed_params_and_issues(execution_context, arguments):
|
||||
def too_many_args(argument):
|
||||
m = _error_argument_count(funcdef, len(unpacked_va))
|
||||
# Just report an error for the first param that is not needed (like
|
||||
# cPython).
|
||||
if arguments.get_calling_nodes():
|
||||
# There might not be a valid calling node so check for that first.
|
||||
issues.append(
|
||||
_add_argument_issue(
|
||||
'type-error-too-many-arguments',
|
||||
argument,
|
||||
message=m
|
||||
)
|
||||
)
|
||||
else:
|
||||
issues.append(None)
|
||||
|
||||
issues = [] # List[Optional[analysis issue]]
|
||||
result_params = []
|
||||
param_dict = {}
|
||||
funcdef = execution_context.tree_node
|
||||
parent_context = execution_context.parent_context
|
||||
# Default params are part of the context where the function was defined.
|
||||
# This means that they might have access on class variables that the
|
||||
# function itself doesn't have.
|
||||
default_param_context = execution_context.function_context.get_default_param_context()
|
||||
|
||||
for param in funcdef.get_params():
|
||||
param_dict[param.name.value] = param
|
||||
unpacked_va = list(var_args.unpack(funcdef))
|
||||
unpacked_va = list(arguments.unpack(funcdef))
|
||||
var_arg_iterator = PushBackIterator(iter(unpacked_va))
|
||||
|
||||
non_matching_keys = defaultdict(lambda: [])
|
||||
@@ -61,6 +105,7 @@ def get_executed_params(execution_context, var_args):
|
||||
# args / kwargs will just be empty arrays / dicts, respectively.
|
||||
# Wrong value count is just ignored. If you try to test cases that are
|
||||
# not allowed in Python, Jedi will maybe not show any completions.
|
||||
is_default = False
|
||||
key, argument = next(var_arg_iterator, (None, None))
|
||||
while key is not None:
|
||||
keys_only = True
|
||||
@@ -73,9 +118,12 @@ def get_executed_params(execution_context, var_args):
|
||||
had_multiple_value_error = True
|
||||
m = ("TypeError: %s() got multiple values for keyword argument '%s'."
|
||||
% (funcdef.name, key))
|
||||
for node in var_args.get_calling_nodes():
|
||||
analysis.add(parent_context, 'type-error-multiple-values',
|
||||
node, message=m)
|
||||
for contextualized_node in arguments.get_calling_nodes():
|
||||
issues.append(
|
||||
analysis.add(contextualized_node.context,
|
||||
'type-error-multiple-values',
|
||||
contextualized_node.node, message=m)
|
||||
)
|
||||
else:
|
||||
keys_used[key] = ExecutedParam(execution_context, key_param, argument)
|
||||
key, argument = next(var_arg_iterator, (None, None))
|
||||
@@ -100,6 +148,8 @@ def get_executed_params(execution_context, var_args):
|
||||
seq = iterable.FakeSequence(execution_context.evaluator, u'tuple', lazy_context_list)
|
||||
result_arg = LazyKnownContext(seq)
|
||||
elif param.star_count == 2:
|
||||
if argument is not None:
|
||||
too_many_args(argument)
|
||||
# **kwargs param
|
||||
dct = iterable.FakeDict(execution_context.evaluator, dict(non_matching_keys))
|
||||
result_arg = LazyKnownContext(dct)
|
||||
@@ -111,16 +161,26 @@ def get_executed_params(execution_context, var_args):
|
||||
if param.default is None:
|
||||
result_arg = LazyUnknownContext()
|
||||
if not keys_only:
|
||||
for node in var_args.get_calling_nodes():
|
||||
for contextualized_node in arguments.get_calling_nodes():
|
||||
m = _error_argument_count(funcdef, len(unpacked_va))
|
||||
analysis.add(parent_context, 'type-error-too-few-arguments',
|
||||
node, message=m)
|
||||
issues.append(
|
||||
analysis.add(
|
||||
contextualized_node.context,
|
||||
'type-error-too-few-arguments',
|
||||
contextualized_node.node,
|
||||
message=m,
|
||||
)
|
||||
)
|
||||
else:
|
||||
result_arg = LazyTreeContext(parent_context, param.default)
|
||||
result_arg = LazyTreeContext(default_param_context, param.default)
|
||||
is_default = True
|
||||
else:
|
||||
result_arg = argument
|
||||
|
||||
result_params.append(ExecutedParam(execution_context, param, result_arg))
|
||||
result_params.append(ExecutedParam(
|
||||
execution_context, param, result_arg,
|
||||
is_default=is_default
|
||||
))
|
||||
if not isinstance(result_arg, LazyUnknownContext):
|
||||
keys_used[param.name.value] = result_params[-1]
|
||||
|
||||
@@ -134,31 +194,30 @@ def get_executed_params(execution_context, var_args):
|
||||
if not (non_matching_keys or had_multiple_value_error or
|
||||
param.star_count or param.default):
|
||||
# add a warning only if there's not another one.
|
||||
for node in var_args.get_calling_nodes():
|
||||
for contextualized_node in arguments.get_calling_nodes():
|
||||
m = _error_argument_count(funcdef, len(unpacked_va))
|
||||
analysis.add(parent_context, 'type-error-too-few-arguments',
|
||||
node, message=m)
|
||||
issues.append(
|
||||
analysis.add(contextualized_node.context,
|
||||
'type-error-too-few-arguments',
|
||||
contextualized_node.node, message=m)
|
||||
)
|
||||
|
||||
for key, lazy_context in non_matching_keys.items():
|
||||
m = "TypeError: %s() got an unexpected keyword argument '%s'." \
|
||||
% (funcdef.name, key)
|
||||
_add_argument_issue(
|
||||
parent_context,
|
||||
'type-error-keyword-argument',
|
||||
lazy_context,
|
||||
message=m
|
||||
issues.append(
|
||||
_add_argument_issue(
|
||||
'type-error-keyword-argument',
|
||||
lazy_context,
|
||||
message=m
|
||||
)
|
||||
)
|
||||
|
||||
remaining_arguments = list(var_arg_iterator)
|
||||
if remaining_arguments:
|
||||
m = _error_argument_count(funcdef, len(unpacked_va))
|
||||
# Just report an error for the first param that is not needed (like
|
||||
# cPython).
|
||||
first_key, lazy_context = remaining_arguments[0]
|
||||
if var_args.get_calling_nodes():
|
||||
# There might not be a valid calling node so check for that first.
|
||||
_add_argument_issue(parent_context, 'type-error-too-many-arguments', lazy_context, message=m)
|
||||
return result_params
|
||||
too_many_args(lazy_context)
|
||||
return result_params, issues
|
||||
|
||||
|
||||
def _error_argument_count(funcdef, actual_count):
|
||||
|
||||
@@ -1,330 +0,0 @@
|
||||
"""
|
||||
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
|
||||
through function annotations. There is a strong suggestion in this document
|
||||
that only the type of type hinting defined in PEP0484 should be allowed
|
||||
as annotations in future python versions.
|
||||
|
||||
The (initial / probably incomplete) implementation todo list for pep-0484:
|
||||
v Function parameter annotations with builtin/custom type classes
|
||||
v Function returntype annotations with builtin/custom type classes
|
||||
v Function parameter annotations with strings (forward reference)
|
||||
v Function return type annotations with strings (forward reference)
|
||||
v Local variable type hints
|
||||
v Assigned types: `Url = str\ndef get(url:Url) -> str:`
|
||||
v Type hints in `with` statements
|
||||
x Stub files support
|
||||
x support `@no_type_check` and `@no_type_check_decorator`
|
||||
x support for typing.cast() operator
|
||||
x support for type hint comments for functions, `# type: (int, str) -> int`.
|
||||
See comment from Guido https://github.com/davidhalter/jedi/issues/662
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from parso import ParserSyntaxError, parse, split_lines
|
||||
from parso.python import tree
|
||||
|
||||
from jedi._compatibility import unicode, force_unicode
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate.base_context import NO_CONTEXTS, ContextSet
|
||||
from jedi.evaluate.lazy_context import LazyTreeContext
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
from jedi.evaluate.helpers import is_string
|
||||
from jedi import debug
|
||||
from jedi import parser_utils
|
||||
|
||||
|
||||
def _evaluate_for_annotation(context, annotation, index=None):
|
||||
"""
|
||||
Evaluates a string-node, looking for an annotation
|
||||
If index is not None, the annotation is expected to be a tuple
|
||||
and we're interested in that index
|
||||
"""
|
||||
context_set = context.eval_node(_fix_forward_reference(context, annotation))
|
||||
return context_set.execute_evaluated()
|
||||
|
||||
|
||||
def _evaluate_annotation_string(context, string, index=None):
|
||||
node = _get_forward_reference_node(context, string)
|
||||
if node is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
context_set = context.eval_node(node)
|
||||
if index is not None:
|
||||
context_set = context_set.filter(
|
||||
lambda context: context.array_type == u'tuple'
|
||||
and len(list(context.py__iter__())) >= index
|
||||
).py__getitem__(index)
|
||||
return context_set.execute_evaluated()
|
||||
|
||||
|
||||
def _fix_forward_reference(context, node):
|
||||
evaled_nodes = context.eval_node(node)
|
||||
if len(evaled_nodes) != 1:
|
||||
debug.warning("Eval'ed typing index %s should lead to 1 object, "
|
||||
" not %s" % (node, evaled_nodes))
|
||||
return node
|
||||
|
||||
evaled_context = list(evaled_nodes)[0]
|
||||
if is_string(evaled_context):
|
||||
result = _get_forward_reference_node(context, evaled_context.get_safe_value())
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def _get_forward_reference_node(context, string):
|
||||
try:
|
||||
new_node = context.evaluator.grammar.parse(
|
||||
force_unicode(string),
|
||||
start_symbol='eval_input',
|
||||
error_recovery=False
|
||||
)
|
||||
except ParserSyntaxError:
|
||||
debug.warning('Annotation not parsed: %s' % string)
|
||||
return None
|
||||
else:
|
||||
module = context.tree_node.get_root_node()
|
||||
parser_utils.move(new_node, module.end_pos[0])
|
||||
new_node.parent = context.tree_node
|
||||
return new_node
|
||||
|
||||
|
||||
def _split_comment_param_declaration(decl_text):
|
||||
"""
|
||||
Split decl_text on commas, but group generic expressions
|
||||
together.
|
||||
|
||||
For example, given "foo, Bar[baz, biz]" we return
|
||||
['foo', 'Bar[baz, biz]'].
|
||||
|
||||
"""
|
||||
try:
|
||||
node = parse(decl_text, error_recovery=False).children[0]
|
||||
except ParserSyntaxError:
|
||||
debug.warning('Comment annotation is not valid Python: %s' % decl_text)
|
||||
return []
|
||||
|
||||
if node.type == 'name':
|
||||
return [node.get_code().strip()]
|
||||
|
||||
params = []
|
||||
try:
|
||||
children = node.children
|
||||
except AttributeError:
|
||||
return []
|
||||
else:
|
||||
for child in children:
|
||||
if child.type in ['name', 'atom_expr', 'power']:
|
||||
params.append(child.get_code().strip())
|
||||
|
||||
return params
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
def infer_param(execution_context, param):
|
||||
"""
|
||||
Infers the type of a function parameter, using type annotations.
|
||||
"""
|
||||
annotation = param.annotation
|
||||
if annotation is None:
|
||||
# If no Python 3-style annotation, look for a Python 2-style comment
|
||||
# annotation.
|
||||
# Identify parameters to function in the same sequence as they would
|
||||
# appear in a type comment.
|
||||
all_params = [child for child in param.parent.children
|
||||
if child.type == 'param']
|
||||
|
||||
node = param.parent.parent
|
||||
comment = parser_utils.get_following_comment_same_line(node)
|
||||
if comment is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
|
||||
if not match:
|
||||
return NO_CONTEXTS
|
||||
params_comments = _split_comment_param_declaration(match.group(1))
|
||||
|
||||
# Find the specific param being investigated
|
||||
index = all_params.index(param)
|
||||
# If the number of parameters doesn't match length of type comment,
|
||||
# ignore first parameter (assume it's self).
|
||||
if len(params_comments) != len(all_params):
|
||||
debug.warning(
|
||||
"Comments length != Params length %s %s",
|
||||
params_comments, all_params
|
||||
)
|
||||
from jedi.evaluate.context.instance import InstanceArguments
|
||||
if isinstance(execution_context.var_args, InstanceArguments):
|
||||
if index == 0:
|
||||
# Assume it's self, which is already handled
|
||||
return NO_CONTEXTS
|
||||
index -= 1
|
||||
if index >= len(params_comments):
|
||||
return NO_CONTEXTS
|
||||
|
||||
param_comment = params_comments[index]
|
||||
return _evaluate_annotation_string(
|
||||
execution_context.get_root_context(),
|
||||
param_comment
|
||||
)
|
||||
module_context = execution_context.get_root_context()
|
||||
return _evaluate_for_annotation(module_context, annotation)
|
||||
|
||||
|
||||
def py__annotations__(funcdef):
|
||||
return_annotation = funcdef.annotation
|
||||
if return_annotation:
|
||||
dct = {'return': return_annotation}
|
||||
else:
|
||||
dct = {}
|
||||
for function_param in funcdef.get_params():
|
||||
param_annotation = function_param.annotation
|
||||
if param_annotation is not None:
|
||||
dct[function_param.name.value] = param_annotation
|
||||
return dct
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
def infer_return_types(function_context):
|
||||
"""
|
||||
Infers the type of a function's return value,
|
||||
according to type annotations.
|
||||
"""
|
||||
annotation = py__annotations__(function_context.tree_node).get("return", None)
|
||||
if annotation is None:
|
||||
# If there is no Python 3-type annotation, look for a Python 2-type annotation
|
||||
node = function_context.tree_node
|
||||
comment = parser_utils.get_following_comment_same_line(node)
|
||||
if comment is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
|
||||
if not match:
|
||||
return NO_CONTEXTS
|
||||
|
||||
return _evaluate_annotation_string(
|
||||
function_context.get_root_context(),
|
||||
match.group(1).strip()
|
||||
)
|
||||
|
||||
module_context = function_context.get_root_context()
|
||||
return _evaluate_for_annotation(module_context, annotation)
|
||||
|
||||
|
||||
_typing_module = None
|
||||
_typing_module_code_lines = None
|
||||
|
||||
|
||||
def _get_typing_replacement_module(grammar):
|
||||
"""
|
||||
The idea is to return our jedi replacement for the PEP-0484 typing module
|
||||
as discussed at https://github.com/davidhalter/jedi/issues/663
|
||||
"""
|
||||
global _typing_module, _typing_module_code_lines
|
||||
if _typing_module is None:
|
||||
typing_path = \
|
||||
os.path.abspath(os.path.join(__file__, "../jedi_typing.py"))
|
||||
with open(typing_path) as f:
|
||||
code = unicode(f.read())
|
||||
_typing_module = grammar.parse(code)
|
||||
_typing_module_code_lines = split_lines(code, keepends=True)
|
||||
return _typing_module, _typing_module_code_lines
|
||||
|
||||
|
||||
def py__getitem__(context, typ, node):
|
||||
if not typ.get_root_context().name.string_name == "typing":
|
||||
return None
|
||||
# we assume that any class using [] in a module called
|
||||
# "typing" with a name for which we have a replacement
|
||||
# should be replaced by that class. This is not 100%
|
||||
# airtight but I don't have a better idea to check that it's
|
||||
# actually the PEP-0484 typing module and not some other
|
||||
if node.type == "subscriptlist":
|
||||
nodes = node.children[::2] # skip the commas
|
||||
else:
|
||||
nodes = [node]
|
||||
del node
|
||||
|
||||
nodes = [_fix_forward_reference(context, node) for node in nodes]
|
||||
type_name = typ.name.string_name
|
||||
|
||||
# hacked in Union and Optional, since it's hard to do nicely in parsed code
|
||||
if type_name in ("Union", '_Union'):
|
||||
# In Python 3.6 it's still called typing.Union but it's an instance
|
||||
# called _Union.
|
||||
return ContextSet.from_sets(context.eval_node(node) for node in nodes)
|
||||
if type_name in ("Optional", '_Optional'):
|
||||
# Here we have the same issue like in Union. Therefore we also need to
|
||||
# check for the instance typing._Optional (Python 3.6).
|
||||
return context.eval_node(nodes[0])
|
||||
|
||||
module_node, code_lines = _get_typing_replacement_module(context.evaluator.latest_grammar)
|
||||
typing = ModuleContext(
|
||||
context.evaluator,
|
||||
module_node=module_node,
|
||||
path=None,
|
||||
code_lines=code_lines,
|
||||
)
|
||||
factories = typing.py__getattribute__("factory")
|
||||
assert len(factories) == 1
|
||||
factory = list(factories)[0]
|
||||
assert factory
|
||||
function_body_nodes = factory.tree_node.children[4].children
|
||||
valid_classnames = set(child.name.value
|
||||
for child in function_body_nodes
|
||||
if isinstance(child, tree.Class))
|
||||
if type_name not in valid_classnames:
|
||||
return None
|
||||
compiled_classname = compiled.create_simple_object(context.evaluator, type_name)
|
||||
|
||||
from jedi.evaluate.context.iterable import FakeSequence
|
||||
args = FakeSequence(
|
||||
context.evaluator,
|
||||
u'tuple',
|
||||
[LazyTreeContext(context, n) for n in nodes]
|
||||
)
|
||||
|
||||
result = factory.execute_evaluated(compiled_classname, args)
|
||||
return result
|
||||
|
||||
|
||||
def find_type_from_comment_hint_for(context, node, name):
|
||||
return _find_type_from_comment_hint(context, node, node.children[1], name)
|
||||
|
||||
|
||||
def find_type_from_comment_hint_with(context, node, name):
|
||||
assert len(node.children[1].children) == 3, \
|
||||
"Can only be here when children[1] is 'foo() as f'"
|
||||
varlist = node.children[1].children[2]
|
||||
return _find_type_from_comment_hint(context, node, varlist, name)
|
||||
|
||||
|
||||
def find_type_from_comment_hint_assign(context, node, name):
|
||||
return _find_type_from_comment_hint(context, node, node.children[0], name)
|
||||
|
||||
|
||||
def _find_type_from_comment_hint(context, node, varlist, name):
|
||||
index = None
|
||||
if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
|
||||
# something like "a, b = 1, 2"
|
||||
index = 0
|
||||
for child in varlist.children:
|
||||
if child == name:
|
||||
break
|
||||
if child.type == "operator":
|
||||
continue
|
||||
index += 1
|
||||
else:
|
||||
return []
|
||||
|
||||
comment = parser_utils.get_following_comment_same_line(node)
|
||||
if comment is None:
|
||||
return []
|
||||
match = re.match(r"^#\s*type:\s*([^#]*)", comment)
|
||||
if match is None:
|
||||
return []
|
||||
return _evaluate_annotation_string(context, match.group(1).strip(), index)
|
||||
@@ -79,9 +79,9 @@ def execution_recursion_decorator(default=NO_CONTEXTS):
|
||||
def decorator(func):
|
||||
def wrapper(self, **kwargs):
|
||||
detector = self.evaluator.execution_recursion_detector
|
||||
allowed = detector.push_execution(self)
|
||||
limit_reached = detector.push_execution(self)
|
||||
try:
|
||||
if allowed:
|
||||
if limit_reached:
|
||||
result = default
|
||||
else:
|
||||
result = func(self, **kwargs)
|
||||
@@ -116,6 +116,7 @@ class ExecutionRecursionDetector(object):
|
||||
self._parent_execution_funcs.append(funcdef)
|
||||
|
||||
module = execution.get_root_context()
|
||||
|
||||
if module == self._evaluator.builtins_module:
|
||||
# We have control over builtins so we know they are not recursing
|
||||
# like crazy. Therefore we just let them execute always, because
|
||||
@@ -123,16 +124,30 @@ class ExecutionRecursionDetector(object):
|
||||
return False
|
||||
|
||||
if self._recursion_level > recursion_limit:
|
||||
debug.warning('Recursion limit (%s) reached', recursion_limit)
|
||||
return True
|
||||
|
||||
if self._execution_count >= total_function_execution_limit:
|
||||
debug.warning('Function execution limit (%s) reached', total_function_execution_limit)
|
||||
return True
|
||||
self._execution_count += 1
|
||||
|
||||
if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit:
|
||||
if module.py__name__() in ('builtins', 'typing'):
|
||||
return False
|
||||
debug.warning(
|
||||
'Per function execution limit (%s) reached: %s',
|
||||
per_function_execution_limit,
|
||||
funcdef
|
||||
)
|
||||
return True
|
||||
self._funcdef_execution_counts[funcdef] += 1
|
||||
|
||||
if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit:
|
||||
debug.warning(
|
||||
'Per function recursion limit (%s) reached: %s',
|
||||
per_function_recursion_limit,
|
||||
funcdef
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
94
jedi/evaluate/signature.py
Normal file
94
jedi/evaluate/signature.py
Normal file
@@ -0,0 +1,94 @@
|
||||
from jedi._compatibility import Parameter
|
||||
|
||||
|
||||
class AbstractSignature(object):
|
||||
def __init__(self, context, is_bound=False):
|
||||
self.context = context
|
||||
self.is_bound = is_bound
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.context.name
|
||||
|
||||
@property
|
||||
def annotation_string(self):
|
||||
return ''
|
||||
|
||||
def to_string(self):
|
||||
def param_strings():
|
||||
is_positional = False
|
||||
is_kw_only = False
|
||||
for n in self.get_param_names():
|
||||
kind = n.get_kind()
|
||||
is_positional |= kind == Parameter.POSITIONAL_ONLY
|
||||
if is_positional and kind != Parameter.POSITIONAL_ONLY:
|
||||
yield '/'
|
||||
is_positional = False
|
||||
|
||||
if kind == Parameter.VAR_POSITIONAL:
|
||||
is_kw_only = True
|
||||
elif kind == Parameter.KEYWORD_ONLY and not is_kw_only:
|
||||
yield '*'
|
||||
is_kw_only = True
|
||||
|
||||
yield n.to_string()
|
||||
|
||||
if is_positional:
|
||||
yield '/'
|
||||
|
||||
s = self.name.string_name + '(' + ', '.join(param_strings()) + ')'
|
||||
annotation = self.annotation_string
|
||||
if annotation:
|
||||
s += ' -> ' + annotation
|
||||
return s
|
||||
|
||||
def bind(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_param_names(self):
|
||||
param_names = self._function_context.get_param_names()
|
||||
if self.is_bound:
|
||||
return param_names[1:]
|
||||
return param_names
|
||||
|
||||
|
||||
class TreeSignature(AbstractSignature):
|
||||
def __init__(self, context, function_context=None, is_bound=False):
|
||||
super(TreeSignature, self).__init__(context, is_bound)
|
||||
self._function_context = function_context or context
|
||||
|
||||
def bind(self, context):
|
||||
return TreeSignature(context, self._function_context, is_bound=True)
|
||||
|
||||
@property
|
||||
def _annotation(self):
|
||||
# Classes don't need annotations, even if __init__ has one. They always
|
||||
# return themselves.
|
||||
if self.context.is_class():
|
||||
return None
|
||||
return self._function_context.tree_node.annotation
|
||||
|
||||
@property
|
||||
def annotation_string(self):
|
||||
a = self._annotation
|
||||
if a is None:
|
||||
return ''
|
||||
return a.get_code(include_prefix=False)
|
||||
|
||||
|
||||
class BuiltinSignature(AbstractSignature):
|
||||
def __init__(self, context, return_string, is_bound=False):
|
||||
super(BuiltinSignature, self).__init__(context, is_bound)
|
||||
self._return_string = return_string
|
||||
|
||||
@property
|
||||
def annotation_string(self):
|
||||
return self._return_string
|
||||
|
||||
@property
|
||||
def _function_context(self):
|
||||
return self.context
|
||||
|
||||
def bind(self, context):
|
||||
assert not self.is_bound
|
||||
return BuiltinSignature(context, self._return_string, is_bound=True)
|
||||
@@ -1,321 +0,0 @@
|
||||
"""
|
||||
Implementations of standard library functions, because it's not possible to
|
||||
understand them with Jedi.
|
||||
|
||||
To add a new implementation, create a function and add it to the
|
||||
``_implemented`` dict at the bottom of this module.
|
||||
|
||||
Note that this module exists only to implement very specific functionality in
|
||||
the standard library. The usual way to understand the standard library is the
|
||||
compiled module that returns the types for C-builtins.
|
||||
"""
|
||||
import parso
|
||||
|
||||
from jedi._compatibility import force_unicode
|
||||
from jedi import debug
|
||||
from jedi.evaluate.arguments import ValuesArguments, repack_with_argument_clinic
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate.context.instance import \
|
||||
AbstractInstanceContext, CompiledInstance, BoundMethod, InstanceArguments
|
||||
from jedi.evaluate.base_context import ContextualizedNode, \
|
||||
NO_CONTEXTS, ContextSet
|
||||
from jedi.evaluate.context import ClassContext, ModuleContext, FunctionExecutionContext
|
||||
from jedi.evaluate.context import iterable
|
||||
from jedi.evaluate.lazy_context import LazyTreeContext
|
||||
from jedi.evaluate.syntax_tree import is_string
|
||||
|
||||
# Now this is all part of fake tuples in Jedi. However super doesn't work on
|
||||
# __init__ and __new__ doesn't work at all. So adding this to nametuples is
|
||||
# just the easiest way.
|
||||
_NAMEDTUPLE_INIT = """
|
||||
def __init__(_cls, {arg_list}):
|
||||
'A helper function for namedtuple.'
|
||||
self.__iterable = ({arg_list})
|
||||
|
||||
def __iter__(self):
|
||||
for i in self.__iterable:
|
||||
yield i
|
||||
|
||||
def __getitem__(self, y):
|
||||
return self.__iterable[y]
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class NotInStdLib(LookupError):
|
||||
pass
|
||||
|
||||
|
||||
def execute(evaluator, obj, arguments):
|
||||
if isinstance(obj, BoundMethod):
|
||||
raise NotInStdLib()
|
||||
|
||||
try:
|
||||
obj_name = obj.name.string_name
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if obj.parent_context == evaluator.builtins_module:
|
||||
module_name = 'builtins'
|
||||
elif isinstance(obj.parent_context, ModuleContext):
|
||||
module_name = obj.parent_context.name.string_name
|
||||
else:
|
||||
module_name = ''
|
||||
|
||||
# for now we just support builtin functions.
|
||||
try:
|
||||
func = _implemented[module_name][obj_name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
return func(evaluator, obj, arguments=arguments)
|
||||
raise NotInStdLib()
|
||||
|
||||
|
||||
def _follow_param(evaluator, arguments, index):
|
||||
try:
|
||||
key, lazy_context = list(arguments.unpack())[index]
|
||||
except IndexError:
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
return lazy_context.infer()
|
||||
|
||||
|
||||
def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False):
|
||||
"""
|
||||
Works like Argument Clinic (PEP 436), to validate function params.
|
||||
"""
|
||||
|
||||
def f(func):
|
||||
@repack_with_argument_clinic(string, keep_arguments_param=True)
|
||||
def wrapper(evaluator, obj, *args, **kwargs):
|
||||
arguments = kwargs.pop('arguments')
|
||||
assert not kwargs # Python 2...
|
||||
debug.dbg('builtin start %s' % obj, color='MAGENTA')
|
||||
result = NO_CONTEXTS
|
||||
if want_context:
|
||||
kwargs['context'] = arguments.context
|
||||
if want_obj:
|
||||
kwargs['obj'] = obj
|
||||
if want_arguments:
|
||||
kwargs['arguments'] = arguments
|
||||
result = func(evaluator, *args, **kwargs)
|
||||
debug.dbg('builtin end: %s', result, color='MAGENTA')
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
return f
|
||||
|
||||
|
||||
@argument_clinic('iterator[, default], /')
|
||||
def builtins_next(evaluator, iterators, defaults):
|
||||
"""
|
||||
TODO this function is currently not used. It's a stab at implementing next
|
||||
in a different way than fake objects. This would be a bit more flexible.
|
||||
"""
|
||||
if evaluator.environment.version_info.major == 2:
|
||||
name = 'next'
|
||||
else:
|
||||
name = '__next__'
|
||||
|
||||
context_set = NO_CONTEXTS
|
||||
for iterator in iterators:
|
||||
if isinstance(iterator, AbstractInstanceContext):
|
||||
context_set = ContextSet.from_sets(
|
||||
n.infer()
|
||||
for filter in iterator.get_filters(include_self_names=True)
|
||||
for n in filter.get(name)
|
||||
).execute_evaluated()
|
||||
if context_set:
|
||||
return context_set
|
||||
return defaults
|
||||
|
||||
|
||||
@argument_clinic('object, name[, default], /')
|
||||
def builtins_getattr(evaluator, objects, names, defaults=None):
|
||||
# follow the first param
|
||||
for obj in objects:
|
||||
for name in names:
|
||||
if is_string(name):
|
||||
return obj.py__getattribute__(force_unicode(name.get_safe_value()))
|
||||
else:
|
||||
debug.warning('getattr called without str')
|
||||
continue
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
@argument_clinic('object[, bases, dict], /')
|
||||
def builtins_type(evaluator, objects, bases, dicts):
|
||||
if bases or dicts:
|
||||
# It's a type creation... maybe someday...
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
return objects.py__class__()
|
||||
|
||||
|
||||
class SuperInstance(AbstractInstanceContext):
|
||||
"""To be used like the object ``super`` returns."""
|
||||
def __init__(self, evaluator, cls):
|
||||
su = cls.py_mro()[1]
|
||||
super().__init__(evaluator, su and su[0] or self)
|
||||
|
||||
|
||||
@argument_clinic('[type[, obj]], /', want_context=True)
|
||||
def builtins_super(evaluator, types, objects, context):
|
||||
# TODO make this able to detect multiple inheritance super
|
||||
if isinstance(context, FunctionExecutionContext):
|
||||
if isinstance(context.var_args, InstanceArguments):
|
||||
su = context.var_args.instance.py__class__().py__bases__()
|
||||
return su[0].infer().execute_evaluated()
|
||||
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
@argument_clinic('sequence, /', want_obj=True, want_arguments=True)
|
||||
def builtins_reversed(evaluator, sequences, obj, arguments):
|
||||
# While we could do without this variable (just by using sequences), we
|
||||
# want static analysis to work well. Therefore we need to generated the
|
||||
# values again.
|
||||
key, lazy_context = next(arguments.unpack())
|
||||
cn = None
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
# TODO access private
|
||||
cn = ContextualizedNode(lazy_context._context, lazy_context.data)
|
||||
ordered = list(sequences.iterate(cn))
|
||||
|
||||
rev = list(reversed(ordered))
|
||||
# Repack iterator values and then run it the normal way. This is
|
||||
# necessary, because `reversed` is a function and autocompletion
|
||||
# would fail in certain cases like `reversed(x).__iter__` if we
|
||||
# just returned the result directly.
|
||||
seq = iterable.FakeSequence(evaluator, u'list', rev)
|
||||
arguments = ValuesArguments([ContextSet(seq)])
|
||||
return ContextSet(CompiledInstance(evaluator, evaluator.builtins_module, obj, arguments))
|
||||
|
||||
|
||||
@argument_clinic('obj, type, /', want_arguments=True)
|
||||
def builtins_isinstance(evaluator, objects, types, arguments):
|
||||
bool_results = set()
|
||||
for o in objects:
|
||||
cls = o.py__class__()
|
||||
try:
|
||||
mro_func = cls.py__mro__
|
||||
except AttributeError:
|
||||
# This is temporary. Everything should have a class attribute in
|
||||
# Python?! Maybe we'll leave it here, because some numpy objects or
|
||||
# whatever might not.
|
||||
bool_results = set([True, False])
|
||||
break
|
||||
|
||||
mro = mro_func()
|
||||
|
||||
for cls_or_tup in types:
|
||||
if cls_or_tup.is_class():
|
||||
bool_results.add(cls_or_tup in mro)
|
||||
elif cls_or_tup.name.string_name == 'tuple' \
|
||||
and cls_or_tup.get_root_context() == evaluator.builtins_module:
|
||||
# Check for tuples.
|
||||
classes = ContextSet.from_sets(
|
||||
lazy_context.infer()
|
||||
for lazy_context in cls_or_tup.iterate()
|
||||
)
|
||||
bool_results.add(any(cls in mro for cls in classes))
|
||||
else:
|
||||
_, lazy_context = list(arguments.unpack())[1]
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
node = lazy_context.data
|
||||
message = 'TypeError: isinstance() arg 2 must be a ' \
|
||||
'class, type, or tuple of classes and types, ' \
|
||||
'not %s.' % cls_or_tup
|
||||
analysis.add(lazy_context._context, 'type-error-isinstance', node, message)
|
||||
|
||||
return ContextSet.from_iterable(
|
||||
compiled.builtin_from_name(evaluator, force_unicode(str(b)))
|
||||
for b in bool_results
|
||||
)
|
||||
|
||||
|
||||
def collections_namedtuple(evaluator, obj, arguments):
|
||||
"""
|
||||
Implementation of the namedtuple function.
|
||||
|
||||
This has to be done by processing the namedtuple class template and
|
||||
evaluating the result.
|
||||
|
||||
"""
|
||||
collections_context = obj.parent_context
|
||||
_class_template_set = collections_context.py__getattribute__(u'_class_template')
|
||||
if not _class_template_set:
|
||||
# Namedtuples are not supported on Python 2.6, early 2.7, because the
|
||||
# _class_template variable is not defined, there.
|
||||
return NO_CONTEXTS
|
||||
|
||||
# Process arguments
|
||||
# TODO here we only use one of the types, we should use all.
|
||||
# TODO this is buggy, doesn't need to be a string
|
||||
name = list(_follow_param(evaluator, arguments, 0))[0].get_safe_value()
|
||||
_fields = list(_follow_param(evaluator, arguments, 1))[0]
|
||||
if isinstance(_fields, compiled.CompiledObject):
|
||||
fields = _fields.get_safe_value().replace(',', ' ').split()
|
||||
elif isinstance(_fields, iterable.Sequence):
|
||||
fields = [
|
||||
v.get_safe_value()
|
||||
for lazy_context in _fields.py__iter__()
|
||||
for v in lazy_context.infer() if is_string(v)
|
||||
]
|
||||
else:
|
||||
return NO_CONTEXTS
|
||||
|
||||
def get_var(name):
|
||||
x, = collections_context.py__getattribute__(name)
|
||||
return x.get_safe_value()
|
||||
|
||||
base = next(iter(_class_template_set)).get_safe_value()
|
||||
base += _NAMEDTUPLE_INIT
|
||||
# Build source code
|
||||
code = base.format(
|
||||
typename=name,
|
||||
field_names=tuple(fields),
|
||||
num_fields=len(fields),
|
||||
arg_list=repr(tuple(fields)).replace("u'", "").replace("'", "")[1:-1],
|
||||
repr_fmt=', '.join(get_var(u'_repr_template').format(name=name) for name in fields),
|
||||
field_defs='\n'.join(get_var(u'_field_template').format(index=index, name=name)
|
||||
for index, name in enumerate(fields))
|
||||
)
|
||||
|
||||
# Parse source code
|
||||
module = evaluator.grammar.parse(code)
|
||||
generated_class = next(module.iter_classdefs())
|
||||
parent_context = ModuleContext(
|
||||
evaluator, module, None,
|
||||
code_lines=parso.split_lines(code, keepends=True),
|
||||
)
|
||||
return ContextSet(ClassContext(evaluator, parent_context, generated_class))
|
||||
|
||||
|
||||
@argument_clinic('first, /')
|
||||
def _return_first_param(evaluator, firsts):
|
||||
return firsts
|
||||
|
||||
|
||||
_implemented = {
|
||||
'builtins': {
|
||||
'getattr': builtins_getattr,
|
||||
'type': builtins_type,
|
||||
'super': builtins_super,
|
||||
'reversed': builtins_reversed,
|
||||
'isinstance': builtins_isinstance,
|
||||
},
|
||||
'copy': {
|
||||
'copy': _return_first_param,
|
||||
'deepcopy': _return_first_param,
|
||||
},
|
||||
'json': {
|
||||
'load': lambda evaluator, obj, arguments: NO_CONTEXTS,
|
||||
'loads': lambda evaluator, obj, arguments: NO_CONTEXTS,
|
||||
},
|
||||
'collections': {
|
||||
'namedtuple': collections_namedtuple,
|
||||
},
|
||||
}
|
||||
@@ -10,20 +10,22 @@ from jedi import debug
|
||||
from jedi import parser_utils
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, ContextualizedNode, \
|
||||
ContextualizedName, iterator_to_context_set, iterate_contexts
|
||||
from jedi.evaluate.lazy_context import LazyTreeContext
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import pep0484
|
||||
from jedi.evaluate import recursion
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate import arguments
|
||||
from jedi.evaluate.pep0484 import _evaluate_for_annotation
|
||||
from jedi.evaluate.context import ClassContext, FunctionContext
|
||||
from jedi.evaluate.context import iterable
|
||||
from jedi.evaluate.context import TreeInstance, CompiledInstance
|
||||
from jedi.evaluate.context import TreeInstance
|
||||
from jedi.evaluate.finder import NameFinder
|
||||
from jedi.evaluate.helpers import is_string, is_literal, is_number, is_compiled
|
||||
from jedi.evaluate.helpers import is_string, is_literal, is_number
|
||||
from jedi.evaluate.compiled.access import COMPARISON_OPERATORS
|
||||
from jedi.evaluate.cache import evaluator_method_cache
|
||||
from jedi.evaluate.gradual.stub_context import VersionInfo
|
||||
from jedi.evaluate.gradual import annotation
|
||||
|
||||
|
||||
def _limit_context_infers(func):
|
||||
@@ -51,7 +53,7 @@ def _limit_context_infers(func):
|
||||
|
||||
|
||||
def _py__stop_iteration_returns(generators):
|
||||
results = ContextSet()
|
||||
results = NO_CONTEXTS
|
||||
for generator in generators:
|
||||
try:
|
||||
method = generator.py__stop_iteration_returns
|
||||
@@ -65,13 +67,13 @@ def _py__stop_iteration_returns(generators):
|
||||
@debug.increase_indent
|
||||
@_limit_context_infers
|
||||
def eval_node(context, element):
|
||||
debug.dbg('eval_node %s@%s', element, element.start_pos)
|
||||
debug.dbg('eval_node %s@%s in %s', element, element.start_pos, context)
|
||||
evaluator = context.evaluator
|
||||
typ = element.type
|
||||
if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword'):
|
||||
if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'):
|
||||
return eval_atom(context, element)
|
||||
elif typ == 'lambdef':
|
||||
return ContextSet(FunctionContext.from_context(context, element))
|
||||
return ContextSet([FunctionContext.from_context(context, element)])
|
||||
elif typ == 'expr_stmt':
|
||||
return eval_expr_stmt(context, element)
|
||||
elif typ in ('power', 'atom_expr'):
|
||||
@@ -82,10 +84,10 @@ def eval_node(context, element):
|
||||
had_await = True
|
||||
first_child = children.pop(0)
|
||||
|
||||
context_set = eval_atom(context, first_child)
|
||||
for trailer in children:
|
||||
context_set = context.eval_node(first_child)
|
||||
for (i, trailer) in enumerate(children):
|
||||
if trailer == '**': # has a power operation.
|
||||
right = context.eval_node(children[1])
|
||||
right = context.eval_node(children[i + 1])
|
||||
context_set = _eval_comparison(
|
||||
evaluator,
|
||||
context,
|
||||
@@ -97,15 +99,11 @@ def eval_node(context, element):
|
||||
context_set = eval_trailer(context, context_set, trailer)
|
||||
|
||||
if had_await:
|
||||
await_context_set = context_set.py__getattribute__(u"__await__")
|
||||
if not await_context_set:
|
||||
debug.warning('Tried to run py__await__ on context %s', context)
|
||||
context_set = ContextSet()
|
||||
return _py__stop_iteration_returns(await_context_set.execute_evaluated())
|
||||
return context_set.py__await__().py__stop_iteration_returns()
|
||||
return context_set
|
||||
elif typ in ('testlist_star_expr', 'testlist',):
|
||||
# The implicit tuple in statements.
|
||||
return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element))
|
||||
return ContextSet([iterable.SequenceLiteralContext(evaluator, context, element)])
|
||||
elif typ in ('not_test', 'factor'):
|
||||
context_set = context.eval_node(element.children[-1])
|
||||
for operator in element.children[:-1]:
|
||||
@@ -122,7 +120,7 @@ def eval_node(context, element):
|
||||
if element.value not in ('.', '...'):
|
||||
origin = element.parent
|
||||
raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
|
||||
return ContextSet(compiled.builtin_from_name(evaluator, u'Ellipsis'))
|
||||
return ContextSet([compiled.builtin_from_name(evaluator, u'Ellipsis')])
|
||||
elif typ == 'dotted_name':
|
||||
context_set = eval_atom(context, element.children[0])
|
||||
for next_name in element.children[2::2]:
|
||||
@@ -132,16 +130,20 @@ def eval_node(context, element):
|
||||
elif typ == 'eval_input':
|
||||
return eval_node(context, element.children[0])
|
||||
elif typ == 'annassign':
|
||||
return pep0484._evaluate_for_annotation(context, element.children[1])
|
||||
return annotation.eval_annotation(context, element.children[1]) \
|
||||
.execute_annotation()
|
||||
elif typ == 'yield_expr':
|
||||
if len(element.children) and element.children[1].type == 'yield_arg':
|
||||
# Implies that it's a yield from.
|
||||
element = element.children[1].children[1]
|
||||
generators = context.eval_node(element)
|
||||
return _py__stop_iteration_returns(generators)
|
||||
generators = context.eval_node(element) \
|
||||
.py__getattribute__('__iter__').execute_evaluated()
|
||||
return generators.py__stop_iteration_returns()
|
||||
|
||||
# Generator.send() is not implemented.
|
||||
return NO_CONTEXTS
|
||||
elif typ == 'namedexpr_test':
|
||||
return eval_node(context, element.children[2])
|
||||
else:
|
||||
return eval_or_test(context, element)
|
||||
|
||||
@@ -153,20 +155,7 @@ def eval_trailer(context, base_contexts, trailer):
|
||||
|
||||
if trailer_op == '[':
|
||||
trailer_op, node, _ = trailer.children
|
||||
|
||||
# TODO It's kind of stupid to cast this from a context set to a set.
|
||||
foo = set(base_contexts)
|
||||
# special case: PEP0484 typing module, see
|
||||
# https://github.com/davidhalter/jedi/issues/663
|
||||
result = ContextSet()
|
||||
for typ in list(foo):
|
||||
if isinstance(typ, (ClassContext, TreeInstance)):
|
||||
typing_module_types = pep0484.py__getitem__(context, typ, node)
|
||||
if typing_module_types is not None:
|
||||
foo.remove(typ)
|
||||
result |= typing_module_types
|
||||
|
||||
return result | base_contexts.get_item(
|
||||
return base_contexts.get_item(
|
||||
eval_subscript_list(context.evaluator, context, node),
|
||||
ContextualizedNode(context, trailer)
|
||||
)
|
||||
@@ -190,21 +179,33 @@ def eval_atom(context, atom):
|
||||
might be a name or a literal as well.
|
||||
"""
|
||||
if atom.type == 'name':
|
||||
if atom.value in ('True', 'False', 'None'):
|
||||
# Python 2...
|
||||
return ContextSet([compiled.builtin_from_name(context.evaluator, atom.value)])
|
||||
|
||||
# This is the first global lookup.
|
||||
stmt = tree.search_ancestor(
|
||||
atom, 'expr_stmt', 'lambdef'
|
||||
) or atom
|
||||
if stmt.type == 'lambdef':
|
||||
stmt = atom
|
||||
position = stmt.start_pos
|
||||
if _is_annotation_name(atom):
|
||||
# Since Python 3.7 (with from __future__ import annotations),
|
||||
# annotations are essentially strings and can reference objects
|
||||
# that are defined further down in code. Therefore just set the
|
||||
# position to None, so the finder will not try to stop at a certain
|
||||
# position in the module.
|
||||
position = None
|
||||
return context.py__getattribute__(
|
||||
name_or_str=atom,
|
||||
position=stmt.start_pos,
|
||||
position=position,
|
||||
search_global=True
|
||||
)
|
||||
elif atom.type == 'keyword':
|
||||
# For False/True/None
|
||||
if atom.value in ('False', 'True', 'None'):
|
||||
return ContextSet(compiled.builtin_from_name(context.evaluator, atom.value))
|
||||
return ContextSet([compiled.builtin_from_name(context.evaluator, atom.value)])
|
||||
elif atom.value == 'print':
|
||||
# print e.g. could be evaluated like this in Python 2.7
|
||||
return NO_CONTEXTS
|
||||
@@ -216,7 +217,7 @@ def eval_atom(context, atom):
|
||||
|
||||
elif isinstance(atom, tree.Literal):
|
||||
string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value)
|
||||
return ContextSet(compiled.create_simple_object(context.evaluator, string))
|
||||
return ContextSet([compiled.create_simple_object(context.evaluator, string)])
|
||||
elif atom.type == 'strings':
|
||||
# Will be multiple string.
|
||||
context_set = eval_atom(context, atom.children[0])
|
||||
@@ -224,6 +225,8 @@ def eval_atom(context, atom):
|
||||
right = eval_atom(context, string)
|
||||
context_set = _eval_comparison(context.evaluator, context, context_set, u'+', right)
|
||||
return context_set
|
||||
elif atom.type == 'fstring':
|
||||
return compiled.get_string_context_set(context.evaluator)
|
||||
else:
|
||||
c = atom.children
|
||||
# Parentheses without commas are not tuples.
|
||||
@@ -244,10 +247,10 @@ def eval_atom(context, atom):
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
if comp_for.type == 'comp_for':
|
||||
return ContextSet(iterable.comprehension_from_atom(
|
||||
if comp_for.type in ('comp_for', 'sync_comp_for'):
|
||||
return ContextSet([iterable.comprehension_from_atom(
|
||||
context.evaluator, context, atom
|
||||
))
|
||||
)])
|
||||
|
||||
# It's a dict/list/tuple literal.
|
||||
array_node = c[1]
|
||||
@@ -260,7 +263,7 @@ def eval_atom(context, atom):
|
||||
context = iterable.DictLiteralContext(context.evaluator, context, atom)
|
||||
else:
|
||||
context = iterable.SequenceLiteralContext(context.evaluator, context, atom)
|
||||
return ContextSet(context)
|
||||
return ContextSet([context])
|
||||
|
||||
|
||||
@_limit_context_infers
|
||||
@@ -388,7 +391,7 @@ def _literals_to_types(evaluator, result):
|
||||
cls = compiled.builtin_from_name(evaluator, typ.name.string_name)
|
||||
new_result |= cls.execute_evaluated()
|
||||
else:
|
||||
new_result |= ContextSet(typ)
|
||||
new_result |= ContextSet([typ])
|
||||
return new_result
|
||||
|
||||
|
||||
@@ -411,6 +414,22 @@ def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts
|
||||
)
|
||||
|
||||
|
||||
def _is_annotation_name(name):
|
||||
ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt')
|
||||
if ancestor is None:
|
||||
return False
|
||||
|
||||
if ancestor.type in ('param', 'funcdef'):
|
||||
ann = ancestor.annotation
|
||||
if ann is not None:
|
||||
return ann.start_pos <= name.start_pos < ann.end_pos
|
||||
elif ancestor.type == 'expr_stmt':
|
||||
c = ancestor.children
|
||||
if len(c) > 1 and c[1].type == 'annassign':
|
||||
return c[1].start_pos <= name.start_pos < c[1].end_pos
|
||||
return False
|
||||
|
||||
|
||||
def _is_tuple(context):
|
||||
return isinstance(context, iterable.Sequence) and context.array_type == 'tuple'
|
||||
|
||||
@@ -423,6 +442,23 @@ def _bool_to_context(evaluator, bool_):
|
||||
return compiled.builtin_from_name(evaluator, force_unicode(str(bool_)))
|
||||
|
||||
|
||||
def _get_tuple_ints(context):
|
||||
if not isinstance(context, iterable.SequenceLiteralContext):
|
||||
return None
|
||||
numbers = []
|
||||
for lazy_context in context.py__iter__():
|
||||
if not isinstance(lazy_context, LazyTreeContext):
|
||||
return None
|
||||
node = lazy_context.data
|
||||
if node.type != 'number':
|
||||
return None
|
||||
try:
|
||||
numbers.append(int(node.value))
|
||||
except ValueError:
|
||||
return None
|
||||
return numbers
|
||||
|
||||
|
||||
def _eval_comparison_part(evaluator, context, left, operator, right):
|
||||
l_is_num = is_number(left)
|
||||
r_is_num = is_number(right)
|
||||
@@ -434,26 +470,26 @@ def _eval_comparison_part(evaluator, context, left, operator, right):
|
||||
if str_operator == '*':
|
||||
# for iterables, ignore * operations
|
||||
if isinstance(left, iterable.Sequence) or is_string(left):
|
||||
return ContextSet(left)
|
||||
return ContextSet([left])
|
||||
elif isinstance(right, iterable.Sequence) or is_string(right):
|
||||
return ContextSet(right)
|
||||
return ContextSet([right])
|
||||
elif str_operator == '+':
|
||||
if l_is_num and r_is_num or is_string(left) and is_string(right):
|
||||
return ContextSet(left.execute_operation(right, str_operator))
|
||||
return ContextSet([left.execute_operation(right, str_operator)])
|
||||
elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right):
|
||||
return ContextSet(iterable.MergedArray(evaluator, (left, right)))
|
||||
return ContextSet([iterable.MergedArray(evaluator, (left, right))])
|
||||
elif str_operator == '-':
|
||||
if l_is_num and r_is_num:
|
||||
return ContextSet(left.execute_operation(right, str_operator))
|
||||
return ContextSet([left.execute_operation(right, str_operator)])
|
||||
elif str_operator == '%':
|
||||
# With strings and numbers the left type typically remains. Except for
|
||||
# `int() % float()`.
|
||||
return ContextSet(left)
|
||||
return ContextSet([left])
|
||||
elif str_operator in COMPARISON_OPERATORS:
|
||||
if is_compiled(left) and is_compiled(right):
|
||||
if left.is_compiled() and right.is_compiled():
|
||||
# Possible, because the return is not an option. Just compare.
|
||||
try:
|
||||
return ContextSet(left.execute_operation(right, str_operator))
|
||||
return ContextSet([left.execute_operation(right, str_operator)])
|
||||
except TypeError:
|
||||
# Could be True or False.
|
||||
pass
|
||||
@@ -461,15 +497,24 @@ def _eval_comparison_part(evaluator, context, left, operator, right):
|
||||
if str_operator in ('is', '!=', '==', 'is not'):
|
||||
operation = COMPARISON_OPERATORS[str_operator]
|
||||
bool_ = operation(left, right)
|
||||
return ContextSet(_bool_to_context(evaluator, bool_))
|
||||
return ContextSet([_bool_to_context(evaluator, bool_)])
|
||||
|
||||
return ContextSet(_bool_to_context(evaluator, True), _bool_to_context(evaluator, False))
|
||||
if isinstance(left, VersionInfo):
|
||||
version_info = _get_tuple_ints(right)
|
||||
if version_info is not None:
|
||||
bool_result = compiled.access.COMPARISON_OPERATORS[operator](
|
||||
evaluator.environment.version_info,
|
||||
tuple(version_info)
|
||||
)
|
||||
return ContextSet([_bool_to_context(evaluator, bool_result)])
|
||||
|
||||
return ContextSet([_bool_to_context(evaluator, True), _bool_to_context(evaluator, False)])
|
||||
elif str_operator == 'in':
|
||||
return NO_CONTEXTS
|
||||
|
||||
def check(obj):
|
||||
"""Checks if a Jedi object is either a float or an int."""
|
||||
return isinstance(obj, CompiledInstance) and \
|
||||
return isinstance(obj, TreeInstance) and \
|
||||
obj.name.string_name in ('int', 'float')
|
||||
|
||||
# Static analysis, one is a number, the other one is not.
|
||||
@@ -479,7 +524,9 @@ def _eval_comparison_part(evaluator, context, left, operator, right):
|
||||
analysis.add(context, 'type-error-operation', operator,
|
||||
message % (left, right))
|
||||
|
||||
return ContextSet(left, right)
|
||||
result = ContextSet([left, right])
|
||||
debug.dbg('Used operator %s resulting in %s', operator, result)
|
||||
return result
|
||||
|
||||
|
||||
def _remove_statements(evaluator, context, stmt, name):
|
||||
@@ -490,7 +537,7 @@ def _remove_statements(evaluator, context, stmt, name):
|
||||
evaluated.
|
||||
"""
|
||||
pep0484_contexts = \
|
||||
pep0484.find_type_from_comment_hint_assign(context, stmt, name)
|
||||
annotation.find_type_from_comment_hint_assign(context, stmt, name)
|
||||
if pep0484_contexts:
|
||||
return pep0484_contexts
|
||||
|
||||
@@ -498,21 +545,22 @@ def _remove_statements(evaluator, context, stmt, name):
|
||||
|
||||
|
||||
def tree_name_to_contexts(evaluator, context, tree_name):
|
||||
|
||||
context_set = ContextSet()
|
||||
context_set = NO_CONTEXTS
|
||||
module_node = context.get_root_context().tree_node
|
||||
# First check for annotations, like: `foo: int = 3`
|
||||
if module_node is not None:
|
||||
names = module_node.get_used_names().get(tree_name.value, [])
|
||||
for name in names:
|
||||
expr_stmt = name.parent
|
||||
|
||||
correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
|
||||
|
||||
if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign" and correct_scope:
|
||||
context_set |= _evaluate_for_annotation(context, expr_stmt.children[1].children[1])
|
||||
|
||||
if context_set:
|
||||
return context_set
|
||||
if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
|
||||
correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
|
||||
if correct_scope:
|
||||
context_set |= annotation.eval_annotation(
|
||||
context, expr_stmt.children[1].children[1]
|
||||
).execute_annotation()
|
||||
if context_set:
|
||||
return context_set
|
||||
|
||||
types = []
|
||||
node = tree_name.get_definition(import_name_always=True)
|
||||
@@ -527,19 +575,20 @@ def tree_name_to_contexts(evaluator, context, tree_name):
|
||||
filters = [next(filters)]
|
||||
return finder.find(filters, attribute_lookup=False)
|
||||
elif node.type not in ('import_from', 'import_name'):
|
||||
raise ValueError("Should not happen. type: %s", node.type)
|
||||
context = evaluator.create_context(context, tree_name)
|
||||
return eval_atom(context, tree_name)
|
||||
|
||||
typ = node.type
|
||||
if typ == 'for_stmt':
|
||||
types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
|
||||
types = annotation.find_type_from_comment_hint_for(context, node, tree_name)
|
||||
if types:
|
||||
return types
|
||||
if typ == 'with_stmt':
|
||||
types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
|
||||
types = annotation.find_type_from_comment_hint_with(context, node, tree_name)
|
||||
if types:
|
||||
return types
|
||||
|
||||
if typ in ('for_stmt', 'comp_for'):
|
||||
if typ in ('for_stmt', 'comp_for', 'sync_comp_for'):
|
||||
try:
|
||||
types = context.predefined_names[node][tree_name.value]
|
||||
except KeyError:
|
||||
@@ -567,11 +616,16 @@ def tree_name_to_contexts(evaluator, context, tree_name):
|
||||
# the static analysis report.
|
||||
exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
|
||||
types = exceptions.execute_evaluated()
|
||||
elif node.type == 'param':
|
||||
types = NO_CONTEXTS
|
||||
else:
|
||||
raise ValueError("Should not happen. type: %s" % typ)
|
||||
return types
|
||||
|
||||
|
||||
# We don't want to have functions/classes that are created by the same
|
||||
# tree_node.
|
||||
@evaluator_method_cache()
|
||||
def _apply_decorators(context, node):
|
||||
"""
|
||||
Returns the function, that should to be executed in the end.
|
||||
@@ -585,27 +639,33 @@ def _apply_decorators(context, node):
|
||||
)
|
||||
else:
|
||||
decoratee_context = FunctionContext.from_context(context, node)
|
||||
initial = values = ContextSet(decoratee_context)
|
||||
initial = values = ContextSet([decoratee_context])
|
||||
for dec in reversed(node.get_decorators()):
|
||||
debug.dbg('decorator: %s %s', dec, values)
|
||||
dec_values = context.eval_node(dec.children[1])
|
||||
trailer_nodes = dec.children[2:-1]
|
||||
if trailer_nodes:
|
||||
# Create a trailer and evaluate it.
|
||||
trailer = tree.PythonNode('trailer', trailer_nodes)
|
||||
trailer.parent = dec
|
||||
dec_values = eval_trailer(context, dec_values, trailer)
|
||||
debug.dbg('decorator: %s %s', dec, values, color="MAGENTA")
|
||||
with debug.increase_indent_cm():
|
||||
dec_values = context.eval_node(dec.children[1])
|
||||
trailer_nodes = dec.children[2:-1]
|
||||
if trailer_nodes:
|
||||
# Create a trailer and evaluate it.
|
||||
trailer = tree.PythonNode('trailer', trailer_nodes)
|
||||
trailer.parent = dec
|
||||
dec_values = eval_trailer(context, dec_values, trailer)
|
||||
|
||||
if not len(dec_values):
|
||||
debug.warning('decorator not found: %s on %s', dec, node)
|
||||
return initial
|
||||
if not len(dec_values):
|
||||
code = dec.get_code(include_prefix=False)
|
||||
# For the short future, we don't want to hear about the runtime
|
||||
# decorator in typing that was intentionally omitted. This is not
|
||||
# "correct", but helps with debugging.
|
||||
if code != '@runtime\n':
|
||||
debug.warning('decorator not found: %s on %s', dec, node)
|
||||
return initial
|
||||
|
||||
values = dec_values.execute(arguments.ValuesArguments([values]))
|
||||
if not len(values):
|
||||
debug.warning('not possible to resolve wrappers found %s', node)
|
||||
return initial
|
||||
values = dec_values.execute(arguments.ValuesArguments([values]))
|
||||
if not len(values):
|
||||
debug.warning('not possible to resolve wrappers found %s', node)
|
||||
return initial
|
||||
|
||||
debug.dbg('decorator end %s', values)
|
||||
debug.dbg('decorator end %s', values, color="MAGENTA")
|
||||
return values
|
||||
|
||||
|
||||
@@ -617,6 +677,9 @@ def check_tuple_assignments(evaluator, contextualized_name, context_set):
|
||||
for index, node in contextualized_name.assignment_indexes():
|
||||
cn = ContextualizedNode(contextualized_name.context, node)
|
||||
iterated = context_set.iterate(cn)
|
||||
if isinstance(index, slice):
|
||||
# For no star unpacking is not possible.
|
||||
return NO_CONTEXTS
|
||||
for _ in range(index + 1):
|
||||
try:
|
||||
lazy_context = next(iterated)
|
||||
@@ -625,7 +688,7 @@ def check_tuple_assignments(evaluator, contextualized_name, context_set):
|
||||
# would allow this loop to run for a very long time if the
|
||||
# index number is high. Therefore break if the loop is
|
||||
# finished.
|
||||
return ContextSet()
|
||||
return NO_CONTEXTS
|
||||
context_set = lazy_context.infer()
|
||||
return context_set
|
||||
|
||||
@@ -636,7 +699,7 @@ def eval_subscript_list(evaluator, context, index):
|
||||
"""
|
||||
if index == ':':
|
||||
# Like array[:]
|
||||
return ContextSet(iterable.Slice(context, None, None, None))
|
||||
return ContextSet([iterable.Slice(context, None, None, None)])
|
||||
|
||||
elif index.type == 'subscript' and not index.children[0] == '.':
|
||||
# subscript basically implies a slice operation, except for Python 2's
|
||||
@@ -654,9 +717,9 @@ def eval_subscript_list(evaluator, context, index):
|
||||
result.append(el)
|
||||
result += [None] * (3 - len(result))
|
||||
|
||||
return ContextSet(iterable.Slice(context, *result))
|
||||
return ContextSet([iterable.Slice(context, *result)])
|
||||
elif index.type == 'subscriptlist':
|
||||
return NO_CONTEXTS
|
||||
return ContextSet([iterable.SequenceLiteralContext(evaluator, context, index)])
|
||||
|
||||
# No slices
|
||||
return context.eval_node(index)
|
||||
|
||||
@@ -6,6 +6,7 @@ from jedi.evaluate.base_context import ContextualizedNode
|
||||
from jedi.evaluate.helpers import is_string
|
||||
from jedi.common.utils import traverse_parents
|
||||
from jedi.parser_utils import get_cached_code_lines
|
||||
from jedi.file_io import FileIO
|
||||
from jedi import settings
|
||||
from jedi import debug
|
||||
|
||||
@@ -99,7 +100,7 @@ def check_sys_path_modifications(module_context):
|
||||
def get_sys_path_powers(names):
|
||||
for name in names:
|
||||
power = name.parent.parent
|
||||
if power.type in ('power', 'atom_expr'):
|
||||
if power is not None and power.type in ('power', 'atom_expr'):
|
||||
c = power.children
|
||||
if c[0].type == 'name' and c[0].value == 'sys' \
|
||||
and c[1].type == 'trailer':
|
||||
@@ -140,9 +141,10 @@ def discover_buildout_paths(evaluator, script_path):
|
||||
|
||||
|
||||
def _get_paths_from_buildout_script(evaluator, buildout_script_path):
|
||||
file_io = FileIO(buildout_script_path)
|
||||
try:
|
||||
module_node = evaluator.parse(
|
||||
path=buildout_script_path,
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
cache_path=settings.cache_directory
|
||||
)
|
||||
@@ -152,7 +154,8 @@ def _get_paths_from_buildout_script(evaluator, buildout_script_path):
|
||||
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
module = ModuleContext(
|
||||
evaluator, module_node, buildout_script_path,
|
||||
evaluator, module_node, file_io,
|
||||
string_names=None,
|
||||
code_lines=get_cached_code_lines(evaluator.grammar, buildout_script_path),
|
||||
)
|
||||
for path in check_sys_path_modifications(module):
|
||||
@@ -196,31 +199,63 @@ def _get_buildout_script_paths(search_path):
|
||||
continue
|
||||
|
||||
|
||||
def dotted_path_in_sys_path(sys_path, module_path):
|
||||
def remove_python_path_suffix(path):
|
||||
for suffix in all_suffixes():
|
||||
if path.endswith(suffix):
|
||||
path = path[:-len(suffix)]
|
||||
break
|
||||
return path
|
||||
|
||||
|
||||
def transform_path_to_dotted(sys_path, module_path):
|
||||
"""
|
||||
Returns the dotted path inside a sys.path as a list of names.
|
||||
Returns the dotted path inside a sys.path as a list of names. e.g.
|
||||
|
||||
>>> from os.path import abspath
|
||||
>>> transform_path_to_dotted([abspath("/foo")], abspath('/foo/bar/baz.py'))
|
||||
(('bar', 'baz'), False)
|
||||
|
||||
Returns (None, False) if the path doesn't really resolve to anything.
|
||||
The second return part is if it is a package.
|
||||
"""
|
||||
# First remove the suffix.
|
||||
for suffix in all_suffixes():
|
||||
if module_path.endswith(suffix):
|
||||
module_path = module_path[:-len(suffix)]
|
||||
break
|
||||
else:
|
||||
# There should always be a suffix in a valid Python file on the path.
|
||||
return None
|
||||
module_path = remove_python_path_suffix(module_path)
|
||||
|
||||
if module_path.startswith(os.path.sep):
|
||||
# The paths in sys.path most of the times don't end with a slash.
|
||||
module_path = module_path[1:]
|
||||
# Once the suffix was removed we are using the files as we know them. This
|
||||
# means that if someone uses an ending like .vim for a Python file, .vim
|
||||
# will be part of the returned dotted part.
|
||||
|
||||
for p in sys_path:
|
||||
if module_path.startswith(p):
|
||||
rest = module_path[len(p):]
|
||||
if rest:
|
||||
split = rest.split(os.path.sep)
|
||||
for string in split:
|
||||
if not string or '.' in string:
|
||||
return None
|
||||
return split
|
||||
is_package = module_path.endswith(os.path.sep + '__init__')
|
||||
if is_package:
|
||||
# -1 to remove the separator
|
||||
module_path = module_path[:-len('__init__') - 1]
|
||||
|
||||
return None
|
||||
def iter_potential_solutions():
|
||||
for p in sys_path:
|
||||
if module_path.startswith(p):
|
||||
# Strip the trailing slash/backslash
|
||||
rest = module_path[len(p):]
|
||||
# On Windows a path can also use a slash.
|
||||
if rest.startswith(os.path.sep) or rest.startswith('/'):
|
||||
# Remove a slash in cases it's still there.
|
||||
rest = rest[1:]
|
||||
|
||||
if rest:
|
||||
split = rest.split(os.path.sep)
|
||||
if not all(split):
|
||||
# This means that part of the file path was empty, this
|
||||
# is very strange and is probably a file that is called
|
||||
# `.py`.
|
||||
return
|
||||
yield tuple(split)
|
||||
|
||||
potential_solutions = tuple(iter_potential_solutions())
|
||||
if not potential_solutions:
|
||||
return None, False
|
||||
# Try to find the shortest path, this makes more sense usually, because the
|
||||
# user usually has venvs somewhere. This means that a path like
|
||||
# .tox/py37/lib/python3.7/os.py can be normal for a file. However in that
|
||||
# case we definitely want to return ['os'] as a path and not a crazy
|
||||
# ['.tox', 'py37', 'lib', 'python3.7', 'os']. Keep in mind that this is a
|
||||
# heuristic and there's now ay to "always" do it right.
|
||||
return sorted(potential_solutions, key=lambda p: len(p))[0], is_package
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate.filters import TreeNameDefinition
|
||||
from jedi.evaluate.context import ModuleContext
|
||||
from jedi.evaluate.names import TreeNameDefinition
|
||||
|
||||
|
||||
def _resolve_names(definition_names, avoid_names=()):
|
||||
@@ -39,7 +38,7 @@ def usages(module_context, tree_name):
|
||||
search_name = tree_name.value
|
||||
found_names = _find_names(module_context, tree_name)
|
||||
modules = set(d.get_root_context() for d in found_names.values())
|
||||
modules = set(m for m in modules if isinstance(m, ModuleContext))
|
||||
modules = set(m for m in modules if m.is_module() and not m.is_compiled())
|
||||
|
||||
non_matching_usage_maps = {}
|
||||
for m in imports.get_modules_containing_name(module_context.evaluator, modules, search_name):
|
||||
|
||||
48
jedi/file_io.py
Normal file
48
jedi/file_io.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import os
|
||||
|
||||
from parso import file_io
|
||||
|
||||
|
||||
class AbstractFolderIO(object):
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
def list(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_file_io(self, name):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class FolderIO(AbstractFolderIO):
|
||||
def list(self):
|
||||
return os.listdir(self.path)
|
||||
|
||||
def get_file_io(self, name):
|
||||
return FileIO(os.path.join(self.path, name))
|
||||
|
||||
|
||||
class FileIOFolderMixin(object):
|
||||
def get_parent_folder(self):
|
||||
return FolderIO(os.path.dirname(self.path))
|
||||
|
||||
|
||||
class ZipFileIO(file_io.KnownContentFileIO, FileIOFolderMixin):
|
||||
"""For .zip and .egg archives"""
|
||||
def __init__(self, path, code, zip_path):
|
||||
super(ZipFileIO, self).__init__(path, code)
|
||||
self._zip_path = zip_path
|
||||
|
||||
def get_last_modified(self):
|
||||
try:
|
||||
return os.path.getmtime(self._zip_path)
|
||||
except OSError: # Python 3 would probably only need FileNotFoundError
|
||||
return None
|
||||
|
||||
|
||||
class FileIO(file_io.FileIO, FileIOFolderMixin):
|
||||
pass
|
||||
|
||||
|
||||
class KnownContentFileIO(file_io.KnownContentFileIO, FileIOFolderMixin):
|
||||
pass
|
||||
@@ -1,6 +1,7 @@
|
||||
import re
|
||||
import textwrap
|
||||
from inspect import cleandoc
|
||||
from weakref import WeakKeyDictionary
|
||||
|
||||
from parso.python import tree
|
||||
from parso.cache import parser_cache
|
||||
@@ -54,11 +55,13 @@ def get_executable_nodes(node, last_added=False):
|
||||
return result
|
||||
|
||||
|
||||
def get_comp_fors(comp_for):
|
||||
def get_sync_comp_fors(comp_for):
|
||||
yield comp_for
|
||||
last = comp_for.children[-1]
|
||||
while True:
|
||||
if last.type == 'comp_for':
|
||||
yield last.children[1] # Ignore the async.
|
||||
elif last.type == 'sync_comp_for':
|
||||
yield last
|
||||
elif not last.type == 'comp_if':
|
||||
break
|
||||
@@ -138,7 +141,8 @@ def safe_literal_eval(value):
|
||||
return ''
|
||||
|
||||
|
||||
def get_call_signature(funcdef, width=72, call_string=None):
|
||||
def get_call_signature(funcdef, width=72, call_string=None,
|
||||
omit_first_param=False, omit_return_annotation=False):
|
||||
"""
|
||||
Generate call signature of this function.
|
||||
|
||||
@@ -155,12 +159,13 @@ def get_call_signature(funcdef, width=72, call_string=None):
|
||||
call_string = '<lambda>'
|
||||
else:
|
||||
call_string = funcdef.name.value
|
||||
if funcdef.type == 'lambdef':
|
||||
p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')'
|
||||
else:
|
||||
p = funcdef.children[2].get_code()
|
||||
params = funcdef.get_params()
|
||||
if omit_first_param:
|
||||
params = params[1:]
|
||||
p = '(' + ''.join(param.get_code() for param in params).strip() + ')'
|
||||
# TODO this is pretty bad, we should probably just normalize.
|
||||
p = re.sub(r'\s+', ' ', p)
|
||||
if funcdef.annotation:
|
||||
if funcdef.annotation and not omit_return_annotation:
|
||||
rtype = " ->" + funcdef.annotation.get_code()
|
||||
else:
|
||||
rtype = ""
|
||||
@@ -169,27 +174,6 @@ def get_call_signature(funcdef, width=72, call_string=None):
|
||||
return '\n'.join(textwrap.wrap(code, width))
|
||||
|
||||
|
||||
def get_doc_with_call_signature(scope_node):
|
||||
"""
|
||||
Return a document string including call signature.
|
||||
"""
|
||||
call_signature = None
|
||||
if scope_node.type == 'classdef':
|
||||
for funcdef in scope_node.iter_funcdefs():
|
||||
if funcdef.name.value == '__init__':
|
||||
call_signature = \
|
||||
get_call_signature(funcdef, call_string=scope_node.name.value)
|
||||
elif scope_node.type in ('funcdef', 'lambdef'):
|
||||
call_signature = get_call_signature(scope_node)
|
||||
|
||||
doc = clean_scope_docstring(scope_node)
|
||||
if call_signature is None:
|
||||
return doc
|
||||
if not doc:
|
||||
return call_signature
|
||||
return '%s\n\n%s' % (call_signature, doc)
|
||||
|
||||
|
||||
def move(node, line_offset):
|
||||
"""
|
||||
Move the `Node` start_pos.
|
||||
@@ -235,7 +219,29 @@ def get_following_comment_same_line(node):
|
||||
|
||||
|
||||
def is_scope(node):
|
||||
return node.type in ('file_input', 'classdef', 'funcdef', 'lambdef', 'comp_for')
|
||||
t = node.type
|
||||
if t == 'comp_for':
|
||||
# Starting with Python 3.8, async is outside of the statement.
|
||||
return node.children[1].type != 'sync_comp_for'
|
||||
|
||||
return t in ('file_input', 'classdef', 'funcdef', 'lambdef', 'sync_comp_for')
|
||||
|
||||
|
||||
def _get_parent_scope_cache(func):
|
||||
cache = WeakKeyDictionary()
|
||||
|
||||
def wrapper(used_names, node, include_flows=False):
|
||||
try:
|
||||
for_module = cache[used_names]
|
||||
except KeyError:
|
||||
for_module = cache[used_names] = {}
|
||||
|
||||
try:
|
||||
return for_module[node]
|
||||
except KeyError:
|
||||
result = for_module[node] = func(node, include_flows)
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def get_parent_scope(node, include_flows=False):
|
||||
@@ -243,15 +249,29 @@ def get_parent_scope(node, include_flows=False):
|
||||
Returns the underlying scope.
|
||||
"""
|
||||
scope = node.parent
|
||||
while scope is not None:
|
||||
if include_flows and isinstance(scope, tree.Flow):
|
||||
if scope is None:
|
||||
return None # It's a module already.
|
||||
|
||||
while True:
|
||||
if is_scope(scope) or include_flows and isinstance(scope, tree.Flow):
|
||||
if scope.type in ('classdef', 'funcdef', 'lambdef'):
|
||||
index = scope.children.index(':')
|
||||
if scope.children[index].start_pos >= node.start_pos:
|
||||
if node.parent.type == 'param' and node.parent.name == node:
|
||||
pass
|
||||
elif node.parent.type == 'tfpdef' and node.parent.children[0] == node:
|
||||
pass
|
||||
else:
|
||||
scope = scope.parent
|
||||
continue
|
||||
return scope
|
||||
if is_scope(scope):
|
||||
break
|
||||
scope = scope.parent
|
||||
return scope
|
||||
|
||||
|
||||
get_cached_parent_scope = _get_parent_scope_cache(get_parent_scope)
|
||||
|
||||
|
||||
def get_cached_code_lines(grammar, path):
|
||||
"""
|
||||
Basically access the cached code lines in parso. This is not the nicest way
|
||||
|
||||
37
jedi/plugins/__init__.py
Normal file
37
jedi/plugins/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from jedi.plugins.stdlib import StdlibPlugin
|
||||
from jedi.plugins.flask import FlaskPlugin
|
||||
|
||||
|
||||
class _PluginManager(object):
|
||||
def __init__(self, registered_plugin_classes=()):
|
||||
self._registered_plugin_classes = list(registered_plugin_classes)
|
||||
|
||||
def register(self, plugin_class):
|
||||
"""
|
||||
Makes it possible to register your plugin.
|
||||
"""
|
||||
self._registered_plugins.append(plugin_class)
|
||||
|
||||
def _build_chain(self, evaluator):
|
||||
for plugin_class in self._registered_plugin_classes:
|
||||
yield plugin_class(evaluator)
|
||||
|
||||
def get_callbacks(self, evaluator):
|
||||
return _PluginCallbacks(self._build_chain(evaluator))
|
||||
|
||||
|
||||
class _PluginCallbacks(object):
|
||||
def __init__(self, plugins):
|
||||
self._plugins = list(plugins)
|
||||
|
||||
def decorate(self, name, callback):
|
||||
for plugin in reversed(self._plugins):
|
||||
# Need to reverse so the first plugin is run first.
|
||||
callback = getattr(plugin, name)(callback)
|
||||
return callback
|
||||
|
||||
|
||||
plugin_manager = _PluginManager([
|
||||
StdlibPlugin,
|
||||
FlaskPlugin,
|
||||
])
|
||||
21
jedi/plugins/base.py
Normal file
21
jedi/plugins/base.py
Normal file
@@ -0,0 +1,21 @@
|
||||
class BasePlugin(object):
|
||||
"""
|
||||
Plugins are created each time an evaluator is created.
|
||||
"""
|
||||
def __init__(self, evaluator):
|
||||
# In __init__ you can do some caching.
|
||||
self._evaluator = evaluator
|
||||
|
||||
def execute(self, callback):
|
||||
"""
|
||||
Decorates the execute(context, arguments) function.
|
||||
"""
|
||||
return callback
|
||||
|
||||
def import_module(self, callback):
|
||||
"""
|
||||
Decorates the
|
||||
import_module(evaluator, import_path, sys_path, add_error_callback)
|
||||
function.
|
||||
"""
|
||||
return callback
|
||||
25
jedi/plugins/flask.py
Normal file
25
jedi/plugins/flask.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from jedi.plugins.base import BasePlugin
|
||||
|
||||
|
||||
class FlaskPlugin(BasePlugin):
|
||||
def import_module(self, callback):
|
||||
"""
|
||||
Handle "magic" Flask extension imports:
|
||||
``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
|
||||
"""
|
||||
def wrapper(evaluator, import_names, module_context, *args, **kwargs):
|
||||
if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'):
|
||||
# New style.
|
||||
ipath = (u'flask_' + import_names[2]),
|
||||
context_set = callback(evaluator, ipath, None, *args, **kwargs)
|
||||
if context_set:
|
||||
return context_set
|
||||
context_set = callback(evaluator, (u'flaskext',), None, *args, **kwargs)
|
||||
return callback(
|
||||
evaluator,
|
||||
(u'flaskext', import_names[2]),
|
||||
next(iter(context_set)),
|
||||
*args, **kwargs
|
||||
)
|
||||
return callback(evaluator, import_names, module_context, *args, **kwargs)
|
||||
return wrapper
|
||||
606
jedi/plugins/stdlib.py
Normal file
606
jedi/plugins/stdlib.py
Normal file
@@ -0,0 +1,606 @@
|
||||
"""
|
||||
Implementations of standard library functions, because it's not possible to
|
||||
understand them with Jedi.
|
||||
|
||||
To add a new implementation, create a function and add it to the
|
||||
``_implemented`` dict at the bottom of this module.
|
||||
|
||||
Note that this module exists only to implement very specific functionality in
|
||||
the standard library. The usual way to understand the standard library is the
|
||||
compiled module that returns the types for C-builtins.
|
||||
"""
|
||||
import parso
|
||||
|
||||
from jedi._compatibility import force_unicode
|
||||
from jedi.plugins.base import BasePlugin
|
||||
from jedi import debug
|
||||
from jedi.evaluate.helpers import get_str_or_none
|
||||
from jedi.evaluate.arguments import ValuesArguments, \
|
||||
repack_with_argument_clinic, AbstractArguments, TreeArgumentsWrapper
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate.context.instance import BoundMethod, InstanceArguments
|
||||
from jedi.evaluate.base_context import ContextualizedNode, \
|
||||
NO_CONTEXTS, ContextSet, ContextWrapper, LazyContextWrapper
|
||||
from jedi.evaluate.context import ClassContext, ModuleContext, \
|
||||
FunctionExecutionContext
|
||||
from jedi.evaluate.context import iterable
|
||||
from jedi.evaluate.lazy_context import LazyTreeContext, LazyKnownContext, \
|
||||
LazyKnownContexts
|
||||
from jedi.evaluate.syntax_tree import is_string
|
||||
from jedi.evaluate.filters import AttributeOverwrite, publish_method
|
||||
|
||||
|
||||
# Copied from Python 3.6's stdlib.
|
||||
_NAMEDTUPLE_CLASS_TEMPLATE = """\
|
||||
_property = property
|
||||
_tuple = tuple
|
||||
from operator import itemgetter as _itemgetter
|
||||
from collections import OrderedDict
|
||||
|
||||
class {typename}(tuple):
|
||||
'{typename}({arg_list})'
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
_fields = {field_names!r}
|
||||
|
||||
def __new__(_cls, {arg_list}):
|
||||
'Create new instance of {typename}({arg_list})'
|
||||
return _tuple.__new__(_cls, ({arg_list}))
|
||||
|
||||
@classmethod
|
||||
def _make(cls, iterable, new=tuple.__new__, len=len):
|
||||
'Make a new {typename} object from a sequence or iterable'
|
||||
result = new(cls, iterable)
|
||||
if len(result) != {num_fields:d}:
|
||||
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
|
||||
return result
|
||||
|
||||
def _replace(_self, **kwds):
|
||||
'Return a new {typename} object replacing specified fields with new values'
|
||||
result = _self._make(map(kwds.pop, {field_names!r}, _self))
|
||||
if kwds:
|
||||
raise ValueError('Got unexpected field names: %r' % list(kwds))
|
||||
return result
|
||||
|
||||
def __repr__(self):
|
||||
'Return a nicely formatted representation string'
|
||||
return self.__class__.__name__ + '({repr_fmt})' % self
|
||||
|
||||
def _asdict(self):
|
||||
'Return a new OrderedDict which maps field names to their values.'
|
||||
return OrderedDict(zip(self._fields, self))
|
||||
|
||||
def __getnewargs__(self):
|
||||
'Return self as a plain tuple. Used by copy and pickle.'
|
||||
return tuple(self)
|
||||
|
||||
# These methods were added by Jedi.
|
||||
# __new__ doesn't really work with Jedi. So adding this to nametuples seems
|
||||
# like the easiest way.
|
||||
def __init__(_cls, {arg_list}):
|
||||
'A helper function for namedtuple.'
|
||||
self.__iterable = ({arg_list})
|
||||
|
||||
def __iter__(self):
|
||||
for i in self.__iterable:
|
||||
yield i
|
||||
|
||||
def __getitem__(self, y):
|
||||
return self.__iterable[y]
|
||||
|
||||
{field_defs}
|
||||
"""
|
||||
|
||||
_NAMEDTUPLE_FIELD_TEMPLATE = '''\
|
||||
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
|
||||
'''
|
||||
|
||||
|
||||
class StdlibPlugin(BasePlugin):
|
||||
def execute(self, callback):
|
||||
def wrapper(context, arguments):
|
||||
try:
|
||||
obj_name = context.name.string_name
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if context.parent_context == self._evaluator.builtins_module:
|
||||
module_name = 'builtins'
|
||||
elif context.parent_context is not None and context.parent_context.is_module():
|
||||
module_name = context.parent_context.py__name__()
|
||||
else:
|
||||
return callback(context, arguments=arguments)
|
||||
|
||||
if isinstance(context, BoundMethod):
|
||||
if module_name == 'builtins':
|
||||
if context.py__name__() == '__get__':
|
||||
if context.class_context.py__name__() == 'property':
|
||||
return builtins_property(
|
||||
context,
|
||||
arguments=arguments
|
||||
)
|
||||
elif context.py__name__() in ('deleter', 'getter', 'setter'):
|
||||
if context.class_context.py__name__() == 'property':
|
||||
return ContextSet([context.instance])
|
||||
|
||||
return callback(context, arguments=arguments)
|
||||
|
||||
# for now we just support builtin functions.
|
||||
try:
|
||||
func = _implemented[module_name][obj_name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
return func(context, arguments=arguments)
|
||||
return callback(context, arguments=arguments)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def _follow_param(evaluator, arguments, index):
|
||||
try:
|
||||
key, lazy_context = list(arguments.unpack())[index]
|
||||
except IndexError:
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
return lazy_context.infer()
|
||||
|
||||
|
||||
def argument_clinic(string, want_obj=False, want_context=False,
|
||||
want_arguments=False, want_evaluator=False):
|
||||
"""
|
||||
Works like Argument Clinic (PEP 436), to validate function params.
|
||||
"""
|
||||
|
||||
def f(func):
|
||||
@repack_with_argument_clinic(string, keep_arguments_param=True)
|
||||
def wrapper(obj, *args, **kwargs):
|
||||
arguments = kwargs.pop('arguments')
|
||||
assert not kwargs # Python 2...
|
||||
debug.dbg('builtin start %s' % obj, color='MAGENTA')
|
||||
result = NO_CONTEXTS
|
||||
if want_context:
|
||||
kwargs['context'] = arguments.context
|
||||
if want_obj:
|
||||
kwargs['obj'] = obj
|
||||
if want_evaluator:
|
||||
kwargs['evaluator'] = obj.evaluator
|
||||
if want_arguments:
|
||||
kwargs['arguments'] = arguments
|
||||
result = func(*args, **kwargs)
|
||||
debug.dbg('builtin end: %s', result, color='MAGENTA')
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
return f
|
||||
|
||||
|
||||
@argument_clinic('obj, type, /', want_obj=True, want_arguments=True)
|
||||
def builtins_property(objects, types, obj, arguments):
|
||||
property_args = obj.instance.var_args.unpack()
|
||||
key, lazy_context = next(property_args, (None, None))
|
||||
if key is not None or lazy_context is None:
|
||||
debug.warning('property expected a first param, not %s', arguments)
|
||||
return NO_CONTEXTS
|
||||
|
||||
return lazy_context.infer().py__call__(arguments=ValuesArguments([objects]))
|
||||
|
||||
|
||||
@argument_clinic('iterator[, default], /', want_evaluator=True)
|
||||
def builtins_next(iterators, defaults, evaluator):
|
||||
if evaluator.environment.version_info.major == 2:
|
||||
name = 'next'
|
||||
else:
|
||||
name = '__next__'
|
||||
|
||||
# TODO theoretically we have to check here if something is an iterator.
|
||||
# That is probably done by checking if it's not a class.
|
||||
return defaults | iterators.py__getattribute__(name).execute_evaluated()
|
||||
|
||||
|
||||
@argument_clinic('iterator[, default], /')
|
||||
def builtins_iter(iterators_or_callables, defaults):
|
||||
# TODO implement this if it's a callable.
|
||||
return iterators_or_callables.py__getattribute__('__iter__').execute_evaluated()
|
||||
|
||||
|
||||
@argument_clinic('object, name[, default], /')
|
||||
def builtins_getattr(objects, names, defaults=None):
|
||||
# follow the first param
|
||||
for obj in objects:
|
||||
for name in names:
|
||||
string = get_str_or_none(name)
|
||||
if string is None:
|
||||
debug.warning('getattr called without str')
|
||||
continue
|
||||
else:
|
||||
return obj.py__getattribute__(force_unicode(string))
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
@argument_clinic('object[, bases, dict], /')
|
||||
def builtins_type(objects, bases, dicts):
|
||||
if bases or dicts:
|
||||
# It's a type creation... maybe someday...
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
return objects.py__class__()
|
||||
|
||||
|
||||
class SuperInstance(LazyContextWrapper):
|
||||
"""To be used like the object ``super`` returns."""
|
||||
def __init__(self, evaluator, instance):
|
||||
self.evaluator = evaluator
|
||||
self._instance = instance # Corresponds to super().__self__
|
||||
|
||||
def _get_bases(self):
|
||||
return self._instance.py__class__().py__bases__()
|
||||
|
||||
def _get_wrapped_context(self):
|
||||
objs = self._get_bases()[0].infer().execute_evaluated()
|
||||
if not objs:
|
||||
# This is just a fallback and will only be used, if it's not
|
||||
# possible to find a class
|
||||
return self._instance
|
||||
return next(iter(objs))
|
||||
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
for b in self._get_bases():
|
||||
for obj in b.infer().execute_evaluated():
|
||||
for f in obj.get_filters():
|
||||
yield f
|
||||
|
||||
|
||||
@argument_clinic('[type[, obj]], /', want_context=True)
|
||||
def builtins_super(types, objects, context):
|
||||
if isinstance(context, FunctionExecutionContext):
|
||||
if isinstance(context.var_args, InstanceArguments):
|
||||
instance = context.var_args.instance
|
||||
# TODO if a class is given it doesn't have to be the direct super
|
||||
# class, it can be an anecestor from long ago.
|
||||
return ContextSet({SuperInstance(instance.evaluator, instance)})
|
||||
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
class ReversedObject(AttributeOverwrite):
|
||||
def __init__(self, reversed_obj, iter_list):
|
||||
super(ReversedObject, self).__init__(reversed_obj)
|
||||
self._iter_list = iter_list
|
||||
|
||||
@publish_method('__iter__')
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
return self._iter_list
|
||||
|
||||
@publish_method('next', python_version_match=2)
|
||||
@publish_method('__next__', python_version_match=3)
|
||||
def py__next__(self):
|
||||
return ContextSet.from_sets(
|
||||
lazy_context.infer() for lazy_context in self._iter_list
|
||||
)
|
||||
|
||||
|
||||
@argument_clinic('sequence, /', want_obj=True, want_arguments=True)
|
||||
def builtins_reversed(sequences, obj, arguments):
|
||||
# While we could do without this variable (just by using sequences), we
|
||||
# want static analysis to work well. Therefore we need to generated the
|
||||
# values again.
|
||||
key, lazy_context = next(arguments.unpack())
|
||||
cn = None
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
# TODO access private
|
||||
cn = ContextualizedNode(lazy_context.context, lazy_context.data)
|
||||
ordered = list(sequences.iterate(cn))
|
||||
|
||||
# Repack iterator values and then run it the normal way. This is
|
||||
# necessary, because `reversed` is a function and autocompletion
|
||||
# would fail in certain cases like `reversed(x).__iter__` if we
|
||||
# just returned the result directly.
|
||||
seq, = obj.evaluator.typing_module.py__getattribute__('Iterator').execute_evaluated()
|
||||
return ContextSet([ReversedObject(seq, list(reversed(ordered)))])
|
||||
|
||||
|
||||
@argument_clinic('obj, type, /', want_arguments=True, want_evaluator=True)
|
||||
def builtins_isinstance(objects, types, arguments, evaluator):
|
||||
bool_results = set()
|
||||
for o in objects:
|
||||
cls = o.py__class__()
|
||||
try:
|
||||
cls.py__bases__
|
||||
except AttributeError:
|
||||
# This is temporary. Everything should have a class attribute in
|
||||
# Python?! Maybe we'll leave it here, because some numpy objects or
|
||||
# whatever might not.
|
||||
bool_results = set([True, False])
|
||||
break
|
||||
|
||||
mro = list(cls.py__mro__())
|
||||
|
||||
for cls_or_tup in types:
|
||||
if cls_or_tup.is_class():
|
||||
bool_results.add(cls_or_tup in mro)
|
||||
elif cls_or_tup.name.string_name == 'tuple' \
|
||||
and cls_or_tup.get_root_context() == evaluator.builtins_module:
|
||||
# Check for tuples.
|
||||
classes = ContextSet.from_sets(
|
||||
lazy_context.infer()
|
||||
for lazy_context in cls_or_tup.iterate()
|
||||
)
|
||||
bool_results.add(any(cls in mro for cls in classes))
|
||||
else:
|
||||
_, lazy_context = list(arguments.unpack())[1]
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
node = lazy_context.data
|
||||
message = 'TypeError: isinstance() arg 2 must be a ' \
|
||||
'class, type, or tuple of classes and types, ' \
|
||||
'not %s.' % cls_or_tup
|
||||
analysis.add(lazy_context.context, 'type-error-isinstance', node, message)
|
||||
|
||||
return ContextSet(
|
||||
compiled.builtin_from_name(evaluator, force_unicode(str(b)))
|
||||
for b in bool_results
|
||||
)
|
||||
|
||||
|
||||
class StaticMethodObject(AttributeOverwrite, ContextWrapper):
|
||||
def get_object(self):
|
||||
return self._wrapped_context
|
||||
|
||||
def py__get__(self, instance, klass):
|
||||
return ContextSet([self._wrapped_context])
|
||||
|
||||
|
||||
@argument_clinic('sequence, /')
|
||||
def builtins_staticmethod(functions):
|
||||
return ContextSet(StaticMethodObject(f) for f in functions)
|
||||
|
||||
|
||||
class ClassMethodObject(AttributeOverwrite, ContextWrapper):
|
||||
def __init__(self, class_method_obj, function):
|
||||
super(ClassMethodObject, self).__init__(class_method_obj)
|
||||
self._function = function
|
||||
|
||||
def get_object(self):
|
||||
return self._wrapped_context
|
||||
|
||||
def py__get__(self, obj, class_context):
|
||||
return ContextSet([
|
||||
ClassMethodGet(__get__, class_context, self._function)
|
||||
for __get__ in self._wrapped_context.py__getattribute__('__get__')
|
||||
])
|
||||
|
||||
|
||||
class ClassMethodGet(AttributeOverwrite, ContextWrapper):
|
||||
def __init__(self, get_method, klass, function):
|
||||
super(ClassMethodGet, self).__init__(get_method)
|
||||
self._class = klass
|
||||
self._function = function
|
||||
|
||||
def get_object(self):
|
||||
return self._wrapped_context
|
||||
|
||||
def py__call__(self, arguments):
|
||||
return self._function.execute(ClassMethodArguments(self._class, arguments))
|
||||
|
||||
|
||||
class ClassMethodArguments(TreeArgumentsWrapper):
|
||||
def __init__(self, klass, arguments):
|
||||
super(ClassMethodArguments, self).__init__(arguments)
|
||||
self._class = klass
|
||||
|
||||
def unpack(self, func=None):
|
||||
yield None, LazyKnownContext(self._class)
|
||||
for values in self._wrapped_arguments.unpack(func):
|
||||
yield values
|
||||
|
||||
|
||||
@argument_clinic('sequence, /', want_obj=True, want_arguments=True)
|
||||
def builtins_classmethod(functions, obj, arguments):
|
||||
return ContextSet(
|
||||
ClassMethodObject(class_method_object, function)
|
||||
for class_method_object in obj.py__call__(arguments=arguments)
|
||||
for function in functions
|
||||
)
|
||||
|
||||
|
||||
def collections_namedtuple(obj, arguments):
|
||||
"""
|
||||
Implementation of the namedtuple function.
|
||||
|
||||
This has to be done by processing the namedtuple class template and
|
||||
evaluating the result.
|
||||
|
||||
"""
|
||||
evaluator = obj.evaluator
|
||||
|
||||
# Process arguments
|
||||
name = u'jedi_unknown_namedtuple'
|
||||
for c in _follow_param(evaluator, arguments, 0):
|
||||
x = get_str_or_none(c)
|
||||
if x is not None:
|
||||
name = force_unicode(x)
|
||||
break
|
||||
|
||||
# TODO here we only use one of the types, we should use all.
|
||||
param_contexts = _follow_param(evaluator, arguments, 1)
|
||||
if not param_contexts:
|
||||
return NO_CONTEXTS
|
||||
_fields = list(param_contexts)[0]
|
||||
if isinstance(_fields, compiled.CompiledValue):
|
||||
fields = force_unicode(_fields.get_safe_value()).replace(',', ' ').split()
|
||||
elif isinstance(_fields, iterable.Sequence):
|
||||
fields = [
|
||||
force_unicode(v.get_safe_value())
|
||||
for lazy_context in _fields.py__iter__()
|
||||
for v in lazy_context.infer() if is_string(v)
|
||||
]
|
||||
else:
|
||||
return NO_CONTEXTS
|
||||
|
||||
# Build source code
|
||||
code = _NAMEDTUPLE_CLASS_TEMPLATE.format(
|
||||
typename=name,
|
||||
field_names=tuple(fields),
|
||||
num_fields=len(fields),
|
||||
arg_list=repr(tuple(fields)).replace("u'", "").replace("'", "")[1:-1],
|
||||
repr_fmt='',
|
||||
field_defs='\n'.join(_NAMEDTUPLE_FIELD_TEMPLATE.format(index=index, name=name)
|
||||
for index, name in enumerate(fields))
|
||||
)
|
||||
|
||||
# Parse source code
|
||||
module = evaluator.grammar.parse(code)
|
||||
generated_class = next(module.iter_classdefs())
|
||||
parent_context = ModuleContext(
|
||||
evaluator, module,
|
||||
file_io=None,
|
||||
string_names=None,
|
||||
code_lines=parso.split_lines(code, keepends=True),
|
||||
)
|
||||
|
||||
return ContextSet([ClassContext(evaluator, parent_context, generated_class)])
|
||||
|
||||
|
||||
class PartialObject(object):
|
||||
def __init__(self, actual_context, arguments):
|
||||
self._actual_context = actual_context
|
||||
self._arguments = arguments
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._actual_context, name)
|
||||
|
||||
def py__call__(self, arguments):
|
||||
key, lazy_context = next(self._arguments.unpack(), (None, None))
|
||||
if key is not None or lazy_context is None:
|
||||
debug.warning("Partial should have a proper function %s", self._arguments)
|
||||
return NO_CONTEXTS
|
||||
|
||||
return lazy_context.infer().execute(
|
||||
MergedPartialArguments(self._arguments, arguments)
|
||||
)
|
||||
|
||||
|
||||
class MergedPartialArguments(AbstractArguments):
|
||||
def __init__(self, partial_arguments, call_arguments):
|
||||
self._partial_arguments = partial_arguments
|
||||
self._call_arguments = call_arguments
|
||||
|
||||
def unpack(self, funcdef=None):
|
||||
unpacked = self._partial_arguments.unpack(funcdef)
|
||||
# Ignore this one, it's the function. It was checked before that it's
|
||||
# there.
|
||||
next(unpacked)
|
||||
for key_lazy_context in unpacked:
|
||||
yield key_lazy_context
|
||||
for key_lazy_context in self._call_arguments.unpack(funcdef):
|
||||
yield key_lazy_context
|
||||
|
||||
|
||||
def functools_partial(obj, arguments):
|
||||
return ContextSet(
|
||||
PartialObject(instance, arguments)
|
||||
for instance in obj.py__call__(arguments)
|
||||
)
|
||||
|
||||
|
||||
@argument_clinic('first, /')
|
||||
def _return_first_param(firsts):
|
||||
return firsts
|
||||
|
||||
|
||||
@argument_clinic('seq')
|
||||
def _random_choice(sequences):
|
||||
return ContextSet.from_sets(
|
||||
lazy_context.infer()
|
||||
for sequence in sequences
|
||||
for lazy_context in sequence.py__iter__()
|
||||
)
|
||||
|
||||
|
||||
class ItemGetterCallable(ContextWrapper):
|
||||
def __init__(self, instance, args_context_set):
|
||||
super(ItemGetterCallable, self).__init__(instance)
|
||||
self._args_context_set = args_context_set
|
||||
|
||||
@repack_with_argument_clinic('item, /')
|
||||
def py__call__(self, item_context_set):
|
||||
context_set = NO_CONTEXTS
|
||||
for args_context in self._args_context_set:
|
||||
lazy_contexts = list(args_context.py__iter__())
|
||||
if len(lazy_contexts) == 1:
|
||||
# TODO we need to add the contextualized context.
|
||||
context_set |= item_context_set.get_item(lazy_contexts[0].infer(), None)
|
||||
else:
|
||||
context_set |= ContextSet([iterable.FakeSequence(
|
||||
self._wrapped_context.evaluator,
|
||||
'list',
|
||||
[
|
||||
LazyKnownContexts(item_context_set.get_item(lazy_context.infer(), None))
|
||||
for lazy_context in lazy_contexts
|
||||
],
|
||||
)])
|
||||
return context_set
|
||||
|
||||
|
||||
@argument_clinic('*args, /', want_obj=True, want_arguments=True)
|
||||
def _operator_itemgetter(args_context_set, obj, arguments):
|
||||
return ContextSet([
|
||||
ItemGetterCallable(instance, args_context_set)
|
||||
for instance in obj.py__call__(arguments)
|
||||
])
|
||||
|
||||
|
||||
_implemented = {
|
||||
'builtins': {
|
||||
'getattr': builtins_getattr,
|
||||
'type': builtins_type,
|
||||
'super': builtins_super,
|
||||
'reversed': builtins_reversed,
|
||||
'isinstance': builtins_isinstance,
|
||||
'next': builtins_next,
|
||||
'iter': builtins_iter,
|
||||
'staticmethod': builtins_staticmethod,
|
||||
'classmethod': builtins_classmethod,
|
||||
},
|
||||
'copy': {
|
||||
'copy': _return_first_param,
|
||||
'deepcopy': _return_first_param,
|
||||
},
|
||||
'json': {
|
||||
'load': lambda obj, arguments: NO_CONTEXTS,
|
||||
'loads': lambda obj, arguments: NO_CONTEXTS,
|
||||
},
|
||||
'collections': {
|
||||
'namedtuple': collections_namedtuple,
|
||||
},
|
||||
'functools': {
|
||||
'partial': functools_partial,
|
||||
'wraps': _return_first_param,
|
||||
},
|
||||
'_weakref': {
|
||||
'proxy': _return_first_param,
|
||||
},
|
||||
'random': {
|
||||
'choice': _random_choice,
|
||||
},
|
||||
'operator': {
|
||||
'itemgetter': _operator_itemgetter,
|
||||
},
|
||||
'abc': {
|
||||
# Not sure if this is necessary, but it's used a lot in typeshed and
|
||||
# it's for now easier to just pass the function.
|
||||
'abstractmethod': _return_first_param,
|
||||
},
|
||||
'typing': {
|
||||
# The _alias function just leads to some annoying type inference.
|
||||
# Therefore, just make it return nothing, which leads to the stubs
|
||||
# being used instead. This only matters for 3.7+.
|
||||
'_alias': lambda obj, arguments: NO_CONTEXTS,
|
||||
},
|
||||
'dataclasses': {
|
||||
# For now this works at least better than Jedi trying to understand it.
|
||||
'dataclass': lambda obj, arguments: NO_CONTEXTS,
|
||||
},
|
||||
}
|
||||
@@ -144,7 +144,6 @@ Check for `isinstance` and other information to infer a type.
|
||||
"""
|
||||
|
||||
auto_import_modules = [
|
||||
'hashlib', # hashlib is mostly using setattr, which jedi doesn't understand
|
||||
'gi', # This third-party repository (GTK stuff) doesn't really work with jedi
|
||||
]
|
||||
"""
|
||||
|
||||
1
jedi/third_party/typeshed
vendored
Submodule
1
jedi/third_party/typeshed
vendored
Submodule
Submodule jedi/third_party/typeshed added at 3319cadf85
@@ -4,9 +4,11 @@ addopts = --doctest-modules
|
||||
# Ignore broken files in blackbox test directories
|
||||
norecursedirs = .* docs completion refactor absolute_import namespace_package
|
||||
scripts extensions speed static_analysis not_in_sys_path
|
||||
sample_venvs init_extension_module simple_import
|
||||
sample_venvs init_extension_module simple_import jedi/third_party
|
||||
|
||||
# Activate `clean_jedi_cache` fixture for all tests. This should be
|
||||
# fine as long as we are using `clean_jedi_cache` as a session scoped
|
||||
# fixture.
|
||||
usefixtures = clean_jedi_cache
|
||||
|
||||
testpaths = jedi test
|
||||
|
||||
@@ -1 +1 @@
|
||||
parso>=0.3.0
|
||||
parso>=0.5.0
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3.6
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Profile a piece of Python code with ``cProfile``. Tries a completion on a
|
||||
Profile a piece of Python code with ``profile``. Tries a completion on a
|
||||
certain piece of code.
|
||||
|
||||
Usage:
|
||||
profile.py [<code>] [-n <number>] [-d] [-o] [-s <sort>]
|
||||
profile.py [<code>] [-n <number>] [-d] [-o] [-s <sort>] [-i] [--precision]
|
||||
profile.py -h | --help
|
||||
|
||||
Options:
|
||||
@@ -12,34 +13,61 @@ Options:
|
||||
-n <number> Number of passes before profiling [default: 1].
|
||||
-d --debug Enable Jedi internal debugging.
|
||||
-o --omit Omit profiler, just do a normal run.
|
||||
-i --infer Infer types instead of completions.
|
||||
-s <sort> Sort the profile results, e.g. cum, name [default: time].
|
||||
--precision Makes profile time formatting more precise (nanoseconds)
|
||||
"""
|
||||
|
||||
import time
|
||||
import cProfile
|
||||
try:
|
||||
# For Python 2
|
||||
import cProfile as profile
|
||||
except ImportError:
|
||||
import profile
|
||||
import pstats
|
||||
|
||||
from docopt import docopt
|
||||
import jedi
|
||||
|
||||
|
||||
def run(code, index):
|
||||
# Monkeypatch the time formatting function of profiling to make it easier to
|
||||
# understand small time differences.
|
||||
def f8(x):
|
||||
ret = "%7.3f " % x
|
||||
if ret == ' 0.000 ':
|
||||
return "%6dµs" % (x * 1e6)
|
||||
if ret.startswith(' 0.00'):
|
||||
return "%8.4f" % x
|
||||
return ret
|
||||
|
||||
|
||||
def run(code, index, infer=False):
|
||||
start = time.time()
|
||||
result = jedi.Script(code).completions()
|
||||
script = jedi.Script(code)
|
||||
if infer:
|
||||
result = script.goto_definitions()
|
||||
else:
|
||||
result = script.completions()
|
||||
print('Used %ss for the %sth run.' % (time.time() - start, index + 1))
|
||||
return result
|
||||
|
||||
|
||||
def main(args):
|
||||
code = args['<code>']
|
||||
infer = args['--infer']
|
||||
n = int(args['-n'])
|
||||
|
||||
for i in range(n):
|
||||
run(code, i)
|
||||
run(code, i, infer=infer)
|
||||
|
||||
if args['--precision']:
|
||||
pstats.f8 = f8
|
||||
|
||||
jedi.set_debug_function(notices=args['--debug'])
|
||||
if args['--omit']:
|
||||
run(code, n)
|
||||
run(code, n, infer=infer)
|
||||
else:
|
||||
cProfile.runctx('run(code, n)', globals(), locals(), sort=args['-s'])
|
||||
profile.runctx('run(code, n, infer=infer)', globals(), locals(), sort=args['-s'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
1
scripts/profiled_pytest.sh
Executable file
1
scripts/profiled_pytest.sh
Executable file
@@ -0,0 +1 @@
|
||||
python3 -m profile -s tottime $(which pytest) $@
|
||||
@@ -6,3 +6,7 @@ max-line-length = 100
|
||||
ignore =
|
||||
# do not use bare 'except'
|
||||
E722,
|
||||
# don't know why this was ever even an option, 1+1 should be possible.
|
||||
E226,
|
||||
# line break before binary operator
|
||||
W503,
|
||||
|
||||
13
setup.py
13
setup.py
@@ -2,6 +2,7 @@
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
import os
|
||||
import ast
|
||||
|
||||
__AUTHOR__ = 'David Halter'
|
||||
@@ -10,12 +11,15 @@ __AUTHOR_EMAIL__ = 'davidhalter88@gmail.com'
|
||||
# Get the version from within jedi. It's defined in exactly one place now.
|
||||
with open('jedi/__init__.py') as f:
|
||||
tree = ast.parse(f.read())
|
||||
version = tree.body[1].value.s
|
||||
version = tree.body[int(not hasattr(tree, 'docstring'))].value.s
|
||||
|
||||
readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read()
|
||||
with open('requirements.txt') as f:
|
||||
install_requires = f.read().splitlines()
|
||||
|
||||
assert os.path.isfile("jedi/third_party/typeshed/LICENSE"), \
|
||||
"Please download the typeshed submodule first (Hint: git submodule update --init)"
|
||||
|
||||
setup(name='jedi',
|
||||
version=version,
|
||||
description='An autocompletion tool for Python that can be used for text editors.',
|
||||
@@ -33,14 +37,16 @@ setup(name='jedi',
|
||||
install_requires=install_requires,
|
||||
extras_require={
|
||||
'testing': [
|
||||
'pytest>=3.1.0',
|
||||
# Pytest 5 doesn't support Python 2 and Python 3.4 anymore.
|
||||
'pytest>=3.1.0,<5.0.0',
|
||||
# docopt for sith doctests
|
||||
'docopt',
|
||||
# coloroma for colored debug output
|
||||
'colorama',
|
||||
],
|
||||
},
|
||||
package_data={'jedi': ['evaluate/compiled/fake/*.pym']},
|
||||
package_data={'jedi': ['*.pyi', 'third_party/typeshed/LICENSE',
|
||||
'third_party/typeshed/README']},
|
||||
platforms=['any'],
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
@@ -55,6 +61,7 @@ setup(name='jedi',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Topic :: Software Development :: Libraries :: Python Modules',
|
||||
'Topic :: Text Editors :: Integrated Development Environments (IDE)',
|
||||
'Topic :: Utilities',
|
||||
|
||||
8
sith.py
8
sith.py
@@ -111,7 +111,13 @@ class TestCase(object):
|
||||
if not lines:
|
||||
lines = ['']
|
||||
line = random.randint(1, len(lines))
|
||||
column = random.randint(0, len(lines[line - 1]))
|
||||
line_string = lines[line - 1]
|
||||
line_len = len(line_string)
|
||||
if line_string.endswith('\r\n'):
|
||||
line_len -= 1
|
||||
if line_string.endswith('\n'):
|
||||
line_len -= 1
|
||||
column = random.randint(0, line_len)
|
||||
return cls(operation, path, line, column)
|
||||
|
||||
def run(self, debugger, record=None, print_result=False):
|
||||
|
||||
@@ -30,7 +30,7 @@ b = [6,7]
|
||||
#? int()
|
||||
b[8-7]
|
||||
# Something unreasonable:
|
||||
#?
|
||||
#? int()
|
||||
b['']
|
||||
|
||||
# -----------------
|
||||
@@ -45,8 +45,16 @@ b[int():]
|
||||
#? list()
|
||||
b[:]
|
||||
|
||||
#?
|
||||
#? int()
|
||||
b[:, 1]
|
||||
#? int()
|
||||
b[:1, 1]
|
||||
#? int()
|
||||
b[1:1, 1]
|
||||
#? int()
|
||||
b[1:1:, ...]
|
||||
#? int()
|
||||
b[1:1:5, ...]
|
||||
|
||||
class _StrangeSlice():
|
||||
def __getitem__(self, sliced):
|
||||
@@ -161,7 +169,7 @@ def a(): return ''
|
||||
#? str()
|
||||
(a)()
|
||||
#? str()
|
||||
(a)().replace()
|
||||
(a)().title()
|
||||
#? int()
|
||||
(tuple).index()
|
||||
#? int()
|
||||
@@ -209,8 +217,7 @@ g
|
||||
dic2 = {'asdf': 3, 'b': 'str'}
|
||||
#? int()
|
||||
dic2['asdf']
|
||||
# TODO for now get doesn't work properly when used with a literal.
|
||||
#? None
|
||||
#? None int() str()
|
||||
dic2.get('asdf')
|
||||
|
||||
# string literal
|
||||
@@ -268,11 +275,12 @@ for x in {1: 3.0, '': 1j}:
|
||||
dict().values().__iter__
|
||||
|
||||
d = dict(a=3, b='')
|
||||
x, = d.values()
|
||||
#? int() str()
|
||||
x
|
||||
#? int() str()
|
||||
d.values()[0]
|
||||
#? int()
|
||||
d['a']
|
||||
#? int() None
|
||||
#? int() str() None
|
||||
d.get('a')
|
||||
|
||||
# -----------------
|
||||
@@ -445,7 +453,7 @@ tuple({1})[0]
|
||||
a, *b, c = [1, 'b', list, dict]
|
||||
#? int()
|
||||
a
|
||||
#? str()
|
||||
#?
|
||||
b
|
||||
#? list
|
||||
c
|
||||
@@ -454,12 +462,14 @@ c
|
||||
a, *b, *c = [1, 'd', list]
|
||||
#? int()
|
||||
a
|
||||
#? str()
|
||||
#?
|
||||
b
|
||||
#? list
|
||||
#?
|
||||
c
|
||||
|
||||
lc = [x for a, *x in [(1, '', 1.0)]]
|
||||
|
||||
#?
|
||||
lc[0][0]
|
||||
#?
|
||||
lc[0][1]
|
||||
|
||||
@@ -82,3 +82,28 @@ async def foo():
|
||||
normal_var2 = False
|
||||
#? ['normal_var1', 'normal_var2']
|
||||
normal_var
|
||||
|
||||
|
||||
class C:
|
||||
@classmethod
|
||||
async def async_for_classmethod(cls) -> "C":
|
||||
return
|
||||
|
||||
async def async_for_method(cls) -> int:
|
||||
return
|
||||
|
||||
|
||||
async def f():
|
||||
c = await C.async_for_method()
|
||||
#? int()
|
||||
c
|
||||
d = await C().async_for_method()
|
||||
#? int()
|
||||
d
|
||||
|
||||
e = await C.async_for_classmethod()
|
||||
#? C()
|
||||
e
|
||||
f = await C().async_for_classmethod()
|
||||
#? C()
|
||||
f
|
||||
|
||||
@@ -18,10 +18,12 @@ int(str)
|
||||
str..
|
||||
#? []
|
||||
a(0):.
|
||||
#? 2 ['and', 'or', 'if', 'is', 'in', 'not']
|
||||
#? 2 []
|
||||
0x0
|
||||
#? ['and', 'or', 'if', 'is', 'in', 'not']
|
||||
#? []
|
||||
1j
|
||||
#? ['and', 'or', 'if', 'is', 'in', 'not']
|
||||
1j
|
||||
x = None()
|
||||
#?
|
||||
x
|
||||
@@ -79,6 +81,11 @@ for a3, b3 in (1,""), (1,""), (1,""):
|
||||
a3
|
||||
#? str()
|
||||
b3
|
||||
for (a3, b3) in (1,""), (1,""), (1,""):
|
||||
#? int()
|
||||
a3
|
||||
#? str()
|
||||
b3
|
||||
|
||||
for a4, (b4, c4) in (1,("", list)), (1,("", list)):
|
||||
#? int()
|
||||
@@ -148,6 +155,7 @@ ret()[0]
|
||||
# -----------------
|
||||
|
||||
def global_define():
|
||||
#? int()
|
||||
global global_var_in_func
|
||||
global_var_in_func = 3
|
||||
|
||||
@@ -163,6 +171,7 @@ def funct1():
|
||||
global global_dict_var
|
||||
global_dict_var = dict()
|
||||
def funct2():
|
||||
#! ['global_dict_var', 'global_dict_var']
|
||||
global global_dict_var
|
||||
#? dict()
|
||||
global_dict_var
|
||||
@@ -295,6 +304,11 @@ __file__
|
||||
#? ['__file__']
|
||||
__file__
|
||||
|
||||
#? str()
|
||||
math.__file__
|
||||
# Should not lead to errors
|
||||
#?
|
||||
math()
|
||||
|
||||
# -----------------
|
||||
# with statements
|
||||
@@ -304,7 +318,7 @@ with open('') as f:
|
||||
#? ['closed']
|
||||
f.closed
|
||||
for line in f:
|
||||
#? str()
|
||||
#? str() bytes()
|
||||
line
|
||||
|
||||
with open('') as f1, open('') as f2:
|
||||
|
||||
@@ -36,6 +36,7 @@ class TestClass(object):
|
||||
self2.var_inst = first_param
|
||||
self2.second = second_param
|
||||
self2.first = first_param
|
||||
self2.first.var_on_argument = 5
|
||||
a = 3
|
||||
|
||||
def var_func(self):
|
||||
@@ -57,6 +58,8 @@ class TestClass(object):
|
||||
# should not know any class functions!
|
||||
#? []
|
||||
values
|
||||
#?
|
||||
values
|
||||
#? ['return']
|
||||
ret
|
||||
return a1
|
||||
@@ -276,7 +279,7 @@ V(1).c()
|
||||
V(1).d()
|
||||
# Only keywords should be possible to complete.
|
||||
#? ['is', 'in', 'not', 'and', 'or', 'if']
|
||||
V(1).d()
|
||||
V(1).d()
|
||||
|
||||
|
||||
# -----------------
|
||||
@@ -576,3 +579,26 @@ class Foo(object):
|
||||
|
||||
#? int()
|
||||
Foo().b
|
||||
|
||||
# -----------------
|
||||
# default arguments
|
||||
# -----------------
|
||||
|
||||
default = ''
|
||||
class DefaultArg():
|
||||
default = 3
|
||||
def x(self, arg=default):
|
||||
#? str()
|
||||
default
|
||||
return arg
|
||||
def y(self):
|
||||
return default
|
||||
|
||||
#? int()
|
||||
DefaultArg().x()
|
||||
#? str()
|
||||
DefaultArg().y()
|
||||
#? int()
|
||||
DefaultArg.x()
|
||||
#? str()
|
||||
DefaultArg.y()
|
||||
|
||||
@@ -12,3 +12,26 @@ def asdfy():
|
||||
xorz = getattr(asdfy()(), 'asdf')
|
||||
#? time
|
||||
xorz
|
||||
|
||||
|
||||
|
||||
def args_returner(*args):
|
||||
return args
|
||||
|
||||
|
||||
#? tuple()
|
||||
args_returner(1)[:]
|
||||
#? int()
|
||||
args_returner(1)[:][0]
|
||||
|
||||
|
||||
def kwargs_returner(**kwargs):
|
||||
return kwargs
|
||||
|
||||
|
||||
# TODO This is not really correct, needs correction probably at some point, but
|
||||
# at least it doesn't raise an error.
|
||||
#? int()
|
||||
kwargs_returner(a=1)[:]
|
||||
#?
|
||||
kwargs_returner(b=1)[:][0]
|
||||
|
||||
@@ -27,6 +27,9 @@ a[0]
|
||||
a = [a for a,b in [(1,'')]]
|
||||
#? int()
|
||||
a[0]
|
||||
a = [a for (a,b) in [(1,'')]]
|
||||
#? int()
|
||||
a[0]
|
||||
|
||||
arr = [1,'']
|
||||
a = [a for a in arr]
|
||||
@@ -223,3 +226,33 @@ next(iter({a for a in range(10)}))
|
||||
[int(str(x.value) for x in list
|
||||
|
||||
def reset_missing_bracket(): pass
|
||||
|
||||
|
||||
# -----------------
|
||||
# function calls
|
||||
# -----------------
|
||||
|
||||
def foo(arg):
|
||||
return arg
|
||||
|
||||
|
||||
x = foo(x for x in [1])
|
||||
|
||||
#? int()
|
||||
next(x)
|
||||
#?
|
||||
x[0]
|
||||
|
||||
# While it's illegal to have more than one argument, when a generator
|
||||
# expression is involved, it's still a valid parse tree and Jedi should still
|
||||
# work (and especially not raise Exceptions). It's debatable wheter inferring
|
||||
# values for invalid statements is a good idea, but not failing is a must.
|
||||
|
||||
#? int()
|
||||
next(foo(x for x in [1], 1))
|
||||
|
||||
def bar(x, y):
|
||||
return y
|
||||
|
||||
#? str()
|
||||
next(bar(x for x in [1], x for x in ['']))
|
||||
|
||||
@@ -16,8 +16,18 @@ class Y(X):
|
||||
#? ['func']
|
||||
def f
|
||||
|
||||
#? ['__doc__']
|
||||
__doc__
|
||||
#? []
|
||||
def __doc__
|
||||
|
||||
# This might or might not be what we wanted, currently properties are also
|
||||
# used like this. IMO this is not wanted ~dave.
|
||||
#? ['__class__']
|
||||
def __class__
|
||||
#? []
|
||||
__class__
|
||||
|
||||
|
||||
#? ['__repr__']
|
||||
def __repr__
|
||||
|
||||
@@ -310,11 +310,14 @@ follow_statement(1)
|
||||
# class decorators should just be ignored
|
||||
@should_ignore
|
||||
class A():
|
||||
x = 3
|
||||
def ret(self):
|
||||
return 1
|
||||
|
||||
#? int()
|
||||
A().ret()
|
||||
#? int()
|
||||
A().x
|
||||
|
||||
|
||||
# -----------------
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user