1
0
forked from VimPlug/jedi

changed _compatibility.utf8 -> 'u' and removed a lot of the issues with the now enforced unicode source input of the parser

This commit is contained in:
Dave Halter
2014-02-23 11:29:00 +01:00
parent 5478e50f8b
commit c5fcebde82
10 changed files with 41 additions and 36 deletions

View File

@@ -5,7 +5,7 @@ finished (and still not working as I want), I won't document it any further.
"""
import re
from jedi._compatibility import use_metaclass
from jedi._compatibility import use_metaclass, unicode
from jedi import settings
from jedi import common
from jedi.parser import Parser
@@ -275,7 +275,7 @@ class FastParser(use_metaclass(CachedFastParser)):
def _parse(self, code):
""" :type code: str """
def empty_parser():
new, temp = self._get_parser('', '', 0, [], False)
new, temp = self._get_parser(unicode(''), unicode(''), 0, [], False)
return new
parts = self._split_parts(code)

View File

@@ -8,7 +8,7 @@ found that a flat object with slots is the best.
from inspect import cleandoc
from ast import literal_eval
from jedi._compatibility import utf8, unicode
from jedi._compatibility import u, unicode
class Token(object):
@@ -37,7 +37,7 @@ class Token(object):
4
>>> Token.from_tuple((6, 5, (4, 3)))
<Token: (6, 5, (4, 3))>
>>> unicode(Token(1, utf8("😷"), 1 ,1)) + "p" == utf8("😷p")
>>> unicode(Token(1, u("😷"), 1 ,1)) + "p" == u("😷p")
True
"""
__slots__ = ("_token_type", "_token", "_start_pos_line", "_start_pos_col")
@@ -166,4 +166,4 @@ class TokenDocstring(TokenNoCompat):
def as_string(self):
"""Returns a literal cleaned version of the token"""
return cleandoc(literal_eval(self.token))
return unicode(cleandoc(literal_eval(self.token)))

View File

@@ -12,7 +12,8 @@ from __future__ import absolute_import
import string
import re
from io import StringIO
from token import *
from token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, NAME, OP,
ERRORTOKEN, NEWLINE)
import collections
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
@@ -23,9 +24,8 @@ namechars = string.ascii_letters + '_'
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
ENCODING = N_TOKENS + 2
ENCODING = N_TOKENS + 1
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end')):
@@ -153,7 +153,6 @@ def generate_tokens(readline, line_offset=0):
Modified to not care about dedents.
"""
lnum = line_offset
continued = False
numchars = '0123456789'
contstr = ''
contline = None
@@ -161,7 +160,7 @@ def generate_tokens(readline, line_offset=0):
line = readline() # readline returns empty if it's finished. See StringIO
if not line:
if contstr:
yield TokenInfo(ERRORTOKEN, contstr, strstart, (lnum, pos))
yield TokenInfo(ERRORTOKEN, contstr, contstr_start, (lnum, pos))
break
lnum += 1
@@ -171,7 +170,7 @@ def generate_tokens(readline, line_offset=0):
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end))
yield TokenInfo(STRING, contstr + line[:end], contstr_start, (lnum, end))
contstr = ''
contline = None
else:
@@ -207,7 +206,7 @@ def generate_tokens(readline, line_offset=0):
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos))
else:
strstart = (lnum, start) # multiple lines
contstr_start = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
@@ -215,7 +214,7 @@ def generate_tokens(readline, line_offset=0):
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
contstr_start = lnum, start
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr = line[start:]

View File

@@ -3,6 +3,7 @@ import os
from jedi import cache
from jedi.parser import tokenize
from jedi._compatibility import u
from jedi.parser.fast import FastParser
from jedi.parser import representation
from jedi import debug
@@ -70,7 +71,7 @@ class UserContext(object):
for token_type, tok, start, end in gen:
if is_first:
if start != (1, 0): # whitespace is not a path
return '', start_cursor
return u(''), start_cursor
is_first = False
# print 'tok', token_type, tok, force_point
@@ -167,14 +168,14 @@ class UserContext(object):
self._line_cache = self.source.splitlines()
if self.source:
if self.source[-1] == '\n':
self._line_cache.append('')
self._line_cache.append(u(''))
else: # ''.splitlines() == []
self._line_cache = ['']
self._line_cache = [u('')]
if line_nr == 0:
# This is a fix for the zeroth line. We need a newline there, for
# the backwards parser.
return ''
return u('')
if line_nr < 0:
raise StopIteration()
try: