forked from VimPlug/jedi
Removed py25 related code (fixes #206)
This commit is contained in:
+2
-90
@@ -1,10 +1,7 @@
|
||||
"""
|
||||
To ensure compatibility from Python ``2.5`` - ``3.2``, a module has been
|
||||
To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been
|
||||
created. Clearly there is huge need to use conforming syntax. But many changes
|
||||
(e.g. ``property``, ``hasattr`` in ``2.5``) can be rewritten in pure python.
|
||||
|
||||
Most of the code here is necessary to support Python 2.5. Once this dependency
|
||||
will be dropped, we'll get rid of most code.
|
||||
"""
|
||||
import sys
|
||||
import imp
|
||||
@@ -16,7 +13,6 @@ except:
|
||||
|
||||
is_py3k = sys.hexversion >= 0x03000000
|
||||
is_py33 = sys.hexversion >= 0x03030000
|
||||
is_py25 = sys.hexversion < 0x02060000
|
||||
|
||||
|
||||
def find_module_py33(string, path=None):
|
||||
@@ -91,34 +87,6 @@ except NameError:
|
||||
else:
|
||||
return default
|
||||
|
||||
# ast module was defined in python 2.6
|
||||
try:
|
||||
from ast import literal_eval
|
||||
except ImportError:
|
||||
literal_eval = eval
|
||||
|
||||
|
||||
# properties in 2.5
|
||||
try:
|
||||
property.setter
|
||||
except AttributeError:
|
||||
class property(property):
|
||||
def __init__(self, fget, *args, **kwargs):
|
||||
self.__doc__ = fget.__doc__
|
||||
super(property, self).__init__(fget, *args, **kwargs)
|
||||
|
||||
def setter(self, fset):
|
||||
cls_ns = sys._getframe(1).f_locals
|
||||
for k, v in cls_ns.iteritems():
|
||||
if v == self:
|
||||
propname = k
|
||||
break
|
||||
cls_ns[propname] = property(self.fget, fset,
|
||||
self.fdel, self.__doc__)
|
||||
return cls_ns[propname]
|
||||
else:
|
||||
property = property
|
||||
|
||||
# unicode function
|
||||
try:
|
||||
unicode = unicode
|
||||
@@ -203,66 +171,10 @@ def use_metaclass(meta, *bases):
|
||||
return meta("HackClass", bases, {})
|
||||
|
||||
try:
|
||||
from inspect import cleandoc
|
||||
except ImportError:
|
||||
# python 2.5 doesn't have this method
|
||||
import string
|
||||
|
||||
def cleandoc(doc):
|
||||
"""Clean up indentation from docstrings.
|
||||
|
||||
Any whitespace that can be uniformly removed from the second line
|
||||
onwards is removed."""
|
||||
try:
|
||||
lines = string.split(string.expandtabs(doc), '\n')
|
||||
except UnicodeError:
|
||||
return None
|
||||
else:
|
||||
# Find minimum indentation of any non-blank lines after first line.
|
||||
margin = sys.maxint
|
||||
for line in lines[1:]:
|
||||
content = len(string.lstrip(line))
|
||||
if content:
|
||||
indent = len(line) - content
|
||||
margin = min(margin, indent)
|
||||
# Remove indentation.
|
||||
if lines:
|
||||
lines[0] = lines[0].lstrip()
|
||||
if margin < sys.maxint:
|
||||
for i in range(1, len(lines)):
|
||||
lines[i] = lines[i][margin:]
|
||||
# Remove any trailing or leading blank lines.
|
||||
while lines and not lines[-1]:
|
||||
lines.pop()
|
||||
while lines and not lines[0]:
|
||||
lines.pop(0)
|
||||
return string.join(lines, '\n')
|
||||
|
||||
if is_py25:
|
||||
# adds the `itertools.chain.from_iterable` constructor
|
||||
import itertools
|
||||
|
||||
class chain(itertools.chain):
|
||||
@staticmethod
|
||||
def from_iterable(iterables):
|
||||
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
|
||||
for it in iterables:
|
||||
for element in it:
|
||||
yield element
|
||||
itertools.chain = chain
|
||||
del chain
|
||||
|
||||
try:
|
||||
from functools import reduce
|
||||
from functools import reduce # Python 3
|
||||
except ImportError:
|
||||
reduce = reduce
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
# python 2.5
|
||||
import simplejson as json
|
||||
|
||||
try:
|
||||
encoding = sys.stdout.encoding
|
||||
except AttributeError:
|
||||
|
||||
+1
-1
@@ -21,6 +21,7 @@ from __future__ import with_statement
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import hashlib
|
||||
try:
|
||||
import cPickle as pickle
|
||||
@@ -28,7 +29,6 @@ except:
|
||||
import pickle
|
||||
import shutil
|
||||
|
||||
from jedi._compatibility import json
|
||||
from jedi import settings
|
||||
from jedi import common
|
||||
from jedi import debug
|
||||
|
||||
@@ -14,7 +14,7 @@ from __future__ import with_statement
|
||||
import copy
|
||||
import itertools
|
||||
|
||||
from jedi._compatibility import property, use_metaclass, next, hasattr
|
||||
from jedi._compatibility import use_metaclass, next, hasattr
|
||||
from jedi import parsing_representation as pr
|
||||
from jedi import cache
|
||||
from jedi import helpers
|
||||
|
||||
+2
-2
@@ -6,7 +6,7 @@ finished (and still not working as I want), I won't document it any further.
|
||||
import re
|
||||
import operator
|
||||
|
||||
from jedi._compatibility import use_metaclass, reduce, property
|
||||
from jedi._compatibility import use_metaclass, reduce
|
||||
from jedi import settings
|
||||
from jedi import parsing
|
||||
from jedi import parsing_representation as pr
|
||||
@@ -225,7 +225,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
if settings.fast_parser_always_reparse:
|
||||
self.parsers[:] = []
|
||||
|
||||
# dict comprehensions are not available in py2.5/2.6 :-(
|
||||
# dict comprehensions are not available in 2.6 :-(
|
||||
hashes = dict((p.hash, p) for p in self.parsers)
|
||||
|
||||
line_offset = 0
|
||||
|
||||
+1
-5
@@ -11,11 +11,7 @@ try:
|
||||
from pydoc_data import topics as pydoc_topics
|
||||
except ImportError:
|
||||
# Python 2.6
|
||||
try:
|
||||
import pydoc_topics
|
||||
except ImportError:
|
||||
# Python 2.5
|
||||
pydoc_topics = None
|
||||
import pydoc_topics
|
||||
|
||||
if is_py3k:
|
||||
keys = keyword.kwlist
|
||||
|
||||
+3
-3
@@ -19,8 +19,9 @@ import re
|
||||
import tokenizer as tokenize
|
||||
import sys
|
||||
import os
|
||||
from ast import literal_eval
|
||||
|
||||
from jedi._compatibility import exec_function, unicode, is_py25, literal_eval
|
||||
from jedi._compatibility import exec_function, unicode
|
||||
from jedi import cache
|
||||
from jedi import parsing
|
||||
from jedi import parsing_representation as pr
|
||||
@@ -383,8 +384,7 @@ def source_to_unicode(source, encoding=None):
|
||||
http://docs.python.org/2/reference/lexical_analysis.html#encoding-\
|
||||
declarations
|
||||
"""
|
||||
byte_mark = '\xef\xbb\xbf' if is_py25 else \
|
||||
literal_eval(r"b'\xef\xbb\xbf'")
|
||||
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
|
||||
if source.startswith(byte_mark):
|
||||
# UTF-8 byte-order mark
|
||||
return 'utf-8'
|
||||
|
||||
@@ -39,9 +39,10 @@ from __future__ import with_statement
|
||||
import os
|
||||
import re
|
||||
import tokenizer as tokenize
|
||||
from inspect import cleandoc
|
||||
from ast import literal_eval
|
||||
|
||||
from jedi._compatibility import next, literal_eval, cleandoc, Python3Method, \
|
||||
encoding, property, unicode, is_py3k
|
||||
from jedi._compatibility import next, Python3Method, encoding, unicode, is_py3k
|
||||
from jedi import common
|
||||
from jedi import debug
|
||||
|
||||
|
||||
Reference in New Issue
Block a user