我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用ply.lex.lex()。
def __init__(self,lexer=None): if lexer is None: lexer = lex.lexer self.lexer = lexer self.macros = { } self.path = [] self.temp_path = [] # Probe the lexer for selected tokens self.lexprobe() tm = time.localtime() self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm)) self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm)) self.parser = None # ----------------------------------------------------------------------------- # tokenize() # # Utility function. Given a string of text, tokenize into a list of tokens # -----------------------------------------------------------------------------
def __init__(self, **kw): self.debug = kw.get('debug', 0), self.start = kw.get('start', 'topLevelStatementsOpt') self.lexer = lex.lex(module=self, debug=self.debug) self.parser = yacc.yacc(module=self, debug=self.debug, write_tables=0, start=self.start, ) self.si = units.Si() self.connections = Connections() self.scopeStack = [] self.tempCount = 0 self.enumerations = {} self.encapsulationStack = [Encapsulation()] self.timeUnitFromEncapName = {} self.clearEnvironment()
def get_script_lexer(module=None): t_LAYER0_MARK = r'\:' t_LAYER1_MARK = r'\.' t_LAYER2_MARK = r'\-' t_LAYER3_MARK = r'[\'\’]' t_LAYER4_MARK = r'\,' t_LAYER5_MARK = r'\_' t_LAYER6_MARK = r'\;' t_PRIMITIVE = r'[EUASBT]' t_REMARKABLE_ADDITION = r'[OMFI]' t_REMARKABLE_MULTIPLICATION = r'wo|wa|y|o|e|wu|we|u|a|i|j|g|s|b|t|h|c|k|m|n|p|x|d|f|l' t_PLUS = r'\+' t_ignore = ' \t\n' # Error handling rule def t_error(t): logger.log(logging.ERROR, "Illegal character '%s'" % t.value[0]) t.lexer.skip(1) return lxr.lex(module=module, errorlog=logging)
def get_lexer(module=None): t_TERM = TERM_REGEX t_PLUS = r'\+' t_TIMES = r'\*' t_LPAREN = r'\(' t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_L_CURLY_BRACKET = r'\{' t_R_CURLY_BRACKET = r'\}' t_SLASH = r'\/' t_LITERAL = r'\<(\\\>|[^\>])+\>' # t_USL_TAG = r'([A-Za-z0-9 _\./\\-]+)' t_ignore = ' \t\n' # Error handling rule def t_error(t): logger.log(logging.ERROR, "Illegal character '%s'" % t.value[0]) t.lexer.skip(1) return lxr.lex(module=module, errorlog=logging)
def parse(path, module_name=None, lexer=None, parser=None): if lexer is None: lexer = lex.lex() if parser is None: parser = yacc.yacc(debug=False, write_tables=0) with open(path) as f: data = f.read() if module_name is None: basename = os.path.basename(path) module_name = os.path.splitext(basename)[0] thrift = types.ModuleType(module_name) setattr(thrift, '__thrift_file__', path) thrift_stack.append(thrift) lexer.lineno = 1 parser.parse(data) thrift_stack.pop() return thrift
def prueba(data): global resultado_lexema analizador = lex.lex() analizador.input(data) resultado_lexema.clear() while True: tok = analizador.token() if not tok: break # print("lexema de "+tok.type+" valor "+tok.value+" linea "tok.lineno) estado = "Linea {:4} Tipo {:16} Valor {:16} Posicion {:4}".format(str(tok.lineno),str(tok.type) ,str(tok.value), str(tok.lexpos) ) resultado_lexema.append(estado) return resultado_lexema # instanciamos el analizador lexico
def __init__(self, **kw): super(PlyParser, self).__init__() self.debug = kw.get('debug', 0) self.names = {} try: modname = os.path.split(os.path.splitext(__file__)[0])[ 1] + "_" + self.__class__.__name__ except: modname = "parser" + "_" + self.__class__.__name__ self.debugfile = modname + ".dbg" self.tabmodule = modname + "_" + "parsetab" # print self.debugfile, self.tabmodule # Builds the lexer and parser lex.lex(module=self, debug=self.debug) yacc.yacc(module=self, debug=self.debug, debugfile=self.debugfile, tabmodule=self.tabmodule)
def reset(self): if debug.logger & debug.flagLexer: logger=debug.logger.getCurrentLogger() else: logger=lex.NullLogger() if debug.logger & debug.flagGrammar: debuglogger = debug.logger.getCurrentLogger() else: debuglogger = None self.lexer = lex.lex(module=self, reflags=re.DOTALL, outputdir=self._tempdir, debuglog=debuglogger, errorlog=logger)
def __init__(self): super(GenericIntegerLexer, self).__init__() self.tokens = ( 'NUMBER', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'POWER', 'LPAREN', 'RPAREN', 'SCALAR', ) self.t_NUMBER = r'\d+' self.t_PLUS = r'\+' self.t_MINUS = r'-' self.t_TIMES = r'\*' self.t_DIVIDE = r'/' self.t_POWER = r'\^' self.t_LPAREN = r'\(' self.t_RPAREN = r'\)' self.t_ignore = " \t" self.lexer = lex.lex(object=self)
def init(outputdir=None): outputdir = outputdir or os.path.dirname(__file__) # os.getcwd() current_module = sys.modules[__name__] #print (outputdir, current_module) debug = 0 optimize = 0 lexer = lex.lex(optimize=0, debug=debug) # lexer.input('on init\n declare shared parameter cutoff') # while True: # tok = lexer.token() # if tok is None: # break # print (tok) return yacc.yacc(method="LALR", optimize=optimize, debug=debug, write_tables=0, module=current_module, start='script', outputdir=outputdir, tabmodule='ksp_parser_tab')
def main(fp_in, fp_out): lex.lex() lex.input(fp_in.read()) tokens = iter(lex.token, None) instructions = list(assemble(parse(tokens))) allocate_names() inst_stream = emit_inst_bytes(substitute_names(instructions)) data_stream = emit_data_bytes() byte_stream = itertools.chain(inst_stream, data_stream) write(byte_stream, fp_out)
def build(self, **kwargs): self.lexer = lex.lex(object = self, debug = False)
def lex_function(text): return _function_lexer.lex(text)
def build(self, **kwargs): """ Builds the lexer from the specification. Must be called after the lexer object is created. This method exists separately, because the PLY manual warns against calling lex.lex inside __init__ """ self.lexer = lex.lex(object=self, **kwargs)
def group_lines(self,input): lex = self.lexer.clone() lines = [x.rstrip() for x in input.splitlines()] for i in xrange(len(lines)): j = i+1 while lines[i].endswith('\\') and (j < len(lines)): lines[i] = lines[i][:-1]+lines[j] lines[j] = "" j += 1 input = "\n".join(lines) lex.input(input) lex.lineno = 1 current_line = [] while True: tok = lex.token() if not tok: break current_line.append(tok) if tok.type in self.t_WS and '\n' in tok.value: yield current_line current_line = [] if current_line: yield current_line # ---------------------------------------------------------------------- # tokenstrip() # # Remove leading/trailing whitespace tokens from a token list # ----------------------------------------------------------------------
def __compile(self, code): lex.lex() parser = yacc.yacc(start = 'statements') statements = parser.parse(code) #self.__dump(statements) return statements
def __init__(self, **kwargs): super(Lexer, self).__init__() self.lexer = lex.lex(object=self, **kwargs)