diff --git a/jedi/parser/tokenize.py b/jedi/parser/tokenize.py index 762f630c..b3849046 100644 --- a/jedi/parser/tokenize.py +++ b/jedi/parser/tokenize.py @@ -272,7 +272,7 @@ def generate_tokens(readline): indents.append(indent) break yield NAME, token, spos, prefix - elif initial == '\\' and line[start:] == '\\\n': # continued stmt + elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n'): # continued stmt additional_prefix += prefix + line[start:] break else: diff --git a/test/test_parser/test_parser.py b/test/test_parser/test_parser.py index a2c896a7..31c92691 100644 --- a/test/test_parser/test_parser.py +++ b/test/test_parser/test_parser.py @@ -207,3 +207,9 @@ def test_param_splitting(): def test_unicode_string(): s = pt.String(None, u('bö'), (0, 0)) assert repr(s) # Should not raise an Error! + + +def test_backslash_dos_style(): + grammar = load_grammar() + m = Parser(grammar, u('\\\r\n')).module + assert m