我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用ply.lex.token()。
def trigraph(input): return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input) # ------------------------------------------------------------------ # Macro object # # This object holds information about preprocessor macros # # .name - Macro name (string) # .value - Macro value (a list of tokens) # .arglist - List of argument names # .variadic - Boolean indicating whether or not variadic macro # .vararg - Name of the variadic parameter # # When a macro is created, the macro replacement token sequence is # pre-scanned and used to create patch lists that are later used # during macro expansion # ------------------------------------------------------------------
def main(fp_in, fp_out): lex.lex() lex.input(fp_in.read()) tokens = iter(lex.token, None) instructions = list(assemble(parse(tokens))) allocate_names() inst_stream = emit_inst_bytes(substitute_names(instructions)) data_stream = emit_data_bytes() byte_stream = itertools.chain(inst_stream, data_stream) write(byte_stream, fp_out)
def tokenize(self,text): tokens = [] self.lexer.input(text) while True: tok = self.lexer.token() if not tok: break tokens.append(tok) return tokens # --------------------------------------------------------------------- # error() # # Report a preprocessor error/warning of some kind # ----------------------------------------------------------------------
def error(self,file,line,msg): print("%s:%d %s" % (file,line,msg)) # ---------------------------------------------------------------------- # lexprobe() # # This method probes the preprocessor lexer object to discover # the token types of symbols that are important to the preprocessor. # If this works right, the preprocessor will simply "work" # with any suitable lexer regardless of how tokens have been named. # ----------------------------------------------------------------------
def group_lines(self,input): lex = self.lexer.clone() lines = [x.rstrip() for x in input.splitlines()] for i in xrange(len(lines)): j = i+1 while lines[i].endswith('\\') and (j < len(lines)): lines[i] = lines[i][:-1]+lines[j] lines[j] = "" j += 1 input = "\n".join(lines) lex.input(input) lex.lineno = 1 current_line = [] while True: tok = lex.token() if not tok: break current_line.append(tok) if tok.type in self.t_WS and '\n' in tok.value: yield current_line current_line = [] if current_line: yield current_line # ---------------------------------------------------------------------- # tokenstrip() # # Remove leading/trailing whitespace tokens from a token list # ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}): self.ignore = ignore self.parser = self.parsegen(input,source) # ---------------------------------------------------------------------- # token() # # Method to return individual tokens # ----------------------------------------------------------------------
def token(self): try: while True: tok = next(self.parser) if tok.type not in self.ignore: return tok except StopIteration: self.parser = None return None