Merge branch 'unicode_tokenize_fix2' of https://github.com/hatamov/jedi into dev

This commit is contained in:
Dave Halter
2015-03-06 11:44:03 +01:00
3 changed files with 22 additions and 2 deletions

View File

@@ -68,7 +68,7 @@ double = r'[^"\\]*(?:\\.[^"\\]*)*"'
single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
# Single-line ' or " string.
# Because of leftmost-then-longest match semantics, be sure to put the
@@ -126,7 +126,7 @@ single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"u'", 'u""', "U'", 'U"',
"u'", 'u"', "U'", 'U"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"'):
single_quoted[t] = t