diff --git a/Parser/pgen/pgen.py b/Parser/pgen/pgen.py index cd473b30d8..bf3472d717 100644 --- a/Parser/pgen/pgen.py +++ b/Parser/pgen/pgen.py @@ -10,15 +10,11 @@ import importlib.machinery CURRENT_FOLDER_LOCATION = os.path.dirname(os.path.realpath(__file__)) LIB_LOCATION = os.path.realpath(os.path.join(CURRENT_FOLDER_LOCATION, '..', '..', 'Lib')) TOKEN_LOCATION = os.path.join(LIB_LOCATION, 'token.py') -TOKENIZE_LOCATION = os.path.join(LIB_LOCATION, 'tokenize.py') token = importlib.machinery.SourceFileLoader('token', TOKEN_LOCATION).load_module() -# Add token to the module cache so tokenize.py uses that excact one instead of -# the one in the stdlib of the interpreter executing this file. -sys.modules['token'] = token -tokenize = importlib.machinery.SourceFileLoader('tokenize', - TOKENIZE_LOCATION).load_module() + +import tokenize # from stdlib from . import grammar @@ -184,16 +180,16 @@ class ParserGenerator(object): dfas = collections.OrderedDict() startsymbol = None # MSTART: (NEWLINE | RULE)* ENDMARKER - while self.type != self.tokens.ENDMARKER: - while self.type == self.tokens.NEWLINE: + while self.type != tokenize.ENDMARKER: + while self.type == tokenize.NEWLINE: self.gettoken() # RULE: NAME ':' RHS NEWLINE - name = self.expect(self.tokens.NAME) + name = self.expect(tokenize.NAME) if self.verbose: print("Processing rule {dfa_name}".format(dfa_name=name)) - self.expect(self.tokens.OP, ":") + self.expect(tokenize.OP, ":") a, z = self.parse_rhs() - self.expect(self.tokens.NEWLINE) + self.expect(tokenize.NEWLINE) if self.verbose: self.dump_nfa(name, a, z) dfa = self.make_dfa(a, z) @@ -309,7 +305,7 @@ class ParserGenerator(object): # ALT: ITEM+ a, b = self.parse_item() while (self.value in ("(", "[") or - self.type in (self.tokens.NAME, self.tokens.STRING)): + self.type in (tokenize.NAME, tokenize.STRING)): c, d = self.parse_item() b.addarc(c) b = d @@ -320,7 +316,7 @@ class ParserGenerator(object): if self.value == "[": self.gettoken() a, z = self.parse_rhs() - self.expect(self.tokens.OP, "]") + self.expect(tokenize.OP, "]") a.addarc(z) return a, z else: @@ -340,9 +336,9 @@ class ParserGenerator(object): if self.value == "(": self.gettoken() a, z = self.parse_rhs() - self.expect(self.tokens.OP, ")") + self.expect(tokenize.OP, ")") return a, z - elif self.type in (self.tokens.NAME, self.tokens.STRING): + elif self.type in (tokenize.NAME, tokenize.STRING): a = NFAState() z = NFAState() a.addarc(z, self.value) @@ -365,7 +361,7 @@ class ParserGenerator(object): while tup[0] in (tokenize.COMMENT, tokenize.NL): tup = next(self.generator) self.type, self.value, self.begin, self.end, self.line = tup - #print self.tokens['tok_name'][self.type], repr(self.value) + # print(getattr(tokenize, 'tok_name')[self.type], repr(self.value)) def raise_error(self, msg, *args): if args: