diff --git a/jedi/parser/tokenize.py b/jedi/parser/tokenize.py index f26a6719..48d0da51 100644 --- a/jedi/parser/tokenize.py +++ b/jedi/parser/tokenize.py @@ -303,8 +303,14 @@ def generate_tokens(readline, use_exact_op_types=False): elif token in ')]}': paren_level -= 1 + try: + # This check is needed in any case to check if it's a valid + # operator or just some random unicode character. + exact_type = opmap[token] + except KeyError: + exact_type = typ = ERRORTOKEN if use_exact_op_types: - typ = opmap[token] + typ = exact_type else: typ = OP yield TokenInfo(typ, token, spos, prefix) diff --git a/test/completion/basic.py b/test/completion/basic.py index 8337b8dd..b3b07ee9 100644 --- a/test/completion/basic.py +++ b/test/completion/basic.py @@ -215,6 +215,9 @@ if 1: #? str() xyz +#? +ยน. + # ----------------- # exceptions # -----------------