From b4259b2b4f9d1780afb959970e3f2b59963398fd Mon Sep 17 00:00:00 2001 From: Dave Halter Date: Fri, 26 May 2017 11:32:00 -0400 Subject: [PATCH] Change the default of use_exact_op_types in the tokenizer. --- parso/grammar.py | 2 +- parso/pgen2/pgen.py | 2 +- parso/python/diff.py | 2 +- parso/tokenize.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/parso/grammar.py b/parso/grammar.py index 3b34d53..d54b726 100644 --- a/parso/grammar.py +++ b/parso/grammar.py @@ -118,7 +118,7 @@ class Grammar(object): tokenize_lines[-1] += '\n' tokenize_lines.append('') - tokens = self._tokenizer(tokenize_lines, use_exact_op_types=True) + tokens = self._tokenizer(tokenize_lines) p = self._parser(self._pgen_grammar, error_recovery=error_recovery, start_symbol=start_symbol) root_node = p.parse(tokens=tokens) diff --git a/parso/pgen2/pgen.py b/parso/pgen2/pgen.py index 0620999..34f7d6c 100644 --- a/parso/pgen2/pgen.py +++ b/parso/pgen2/pgen.py @@ -13,7 +13,7 @@ from parso import tokenize class ParserGenerator(object): def __init__(self, bnf_text): self._bnf_text = bnf_text - self.generator = tokenize.source_tokens(bnf_text) + self.generator = tokenize.source_tokens(bnf_text, use_exact_op_types=False) self._gettoken() # Initialize lookahead self.dfas, self.startsymbol = self._parse() self.first = {} # map from symbol name to set of tokens diff --git a/parso/python/diff.py b/parso/python/diff.py index 1c40e5d..30d42d6 100644 --- a/parso/python/diff.py +++ b/parso/python/diff.py @@ -308,7 +308,7 @@ class DiffParser(object): is_first_token = True omitted_first_indent = False indents = [] - tokens = generate_tokens(lines, use_exact_op_types=True) + tokens = generate_tokens(lines) stack = self._active_parser.pgen_parser.stack for typ, string, start_pos, prefix in tokens: start_pos = start_pos[0] + line_offset, start_pos[1] diff --git a/parso/tokenize.py b/parso/tokenize.py index 702be2a..96a7c0a 100644 --- a/parso/tokenize.py +++ b/parso/tokenize.py @@ -204,13 +204,13 @@ class TokenInfo(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])): return self.start_pos[0], self.start_pos[1] + len(self.string) -def source_tokens(source, use_exact_op_types=False): +def source_tokens(source, use_exact_op_types=True): """Generate tokens from a the source code (string).""" lines = splitlines(source, keepends=True) return generate_tokens(lines, use_exact_op_types) -def generate_tokens(lines, use_exact_op_types=False): +def generate_tokens(lines, use_exact_op_types=True): """ A heavily modified Python standard library tokenizer.