String[] parseBenchArgs(StreamTokenizer tokens) throws IOException, ConfigFormatException { Vector vec = new Vector(); for (;;) { switch (tokens.ttype) { case StreamTokenizer.TT_EOF: case StreamTokenizer.TT_EOL: return (String[]) vec.toArray(new String[vec.size()]); case StreamTokenizer.TT_WORD: case '"': vec.add(tokens.sval); tokens.nextToken(); break; default: throw new ConfigFormatException("unrecognized arg token " + "on line " + tokens.lineno()); } } }
private static void loadCmdFile(String name, ListBuffer<String> args) throws IOException { Reader r = new BufferedReader(new FileReader(name)); StreamTokenizer st = new StreamTokenizer(r); st.resetSyntax(); st.wordChars(' ', 255); st.whitespaceChars(0, ' '); st.commentChar('#'); st.quoteChar('"'); st.quoteChar('\''); while (st.nextToken() != StreamTokenizer.TT_EOF) { args.append(st.sval); } r.close(); }
private static void loadCmdFile(String name, List<String> args) throws IOException { Reader r = new BufferedReader(new FileReader(name)); StreamTokenizer st = new StreamTokenizer(r); st.resetSyntax(); st.wordChars(' ', 255); st.whitespaceChars(0, ' '); st.commentChar('#'); st.quoteChar('"'); st.quoteChar('\''); while (st.nextToken() != StreamTokenizer.TT_EOF) { args.add(st.sval); } r.close(); }
public String toMessage() { switch(ttype) { case StreamTokenizer.TT_EOL: return "\"EOL\""; case StreamTokenizer.TT_EOF: return "\"EOF\""; case StreamTokenizer.TT_NUMBER: return "NUMBER"; case StreamTokenizer.TT_WORD: if (sval == null) { return "IDENTIFIER"; } else { return "IDENTIFIER " + sval; } default: if (ttype == (int)'"') { String msg = "QUOTED STRING"; if (sval != null) msg = msg + " \"" + sval + "\""; return msg; } else { return "CHARACTER \'" + (char)ttype + "\'"; } } }
private static void loadCmdFile(String name, List args) throws IOException { Reader r = new BufferedReader(new FileReader(name)); StreamTokenizer st = new StreamTokenizer(r); st.resetSyntax(); st.wordChars(' ', 255); st.whitespaceChars(0, ' '); st.commentChar('#'); st.quoteChar('"'); st.quoteChar('\''); while (st.nextToken() != st.TT_EOF) { args.add(st.sval); } r.close(); }
Token next() throws IOException { int type = tok.nextToken(); switch (type) { case StreamTokenizer.TT_EOF: case StreamTokenizer.TT_EOL: return null; case StreamTokenizer.TT_NUMBER: return new NumToken(tok.nval); case StreamTokenizer.TT_WORD: return new StrToken(TType.IDENT, tok.sval); case '"': return new StrToken(TType.QUOT, tok.sval); default: switch (type) { case ',': return new Token(TType.COMMA); case '(': return new Token(TType.LPAREN); case ')': return new Token(TType.RPAREN); default: throw new IOException("Unexpected: " + type); } } }
public JaasConfig(String loginContextName, String jaasConfigParams) { StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(jaasConfigParams)); tokenizer.slashSlashComments(true); tokenizer.slashStarComments(true); tokenizer.wordChars('-', '-'); tokenizer.wordChars('_', '_'); tokenizer.wordChars('$', '$'); try { configEntries = new ArrayList<>(); while (tokenizer.nextToken() != StreamTokenizer.TT_EOF) { configEntries.add(parseAppConfigurationEntry(tokenizer)); } if (configEntries.isEmpty()) throw new IllegalArgumentException("Login module not specified in JAAS config"); this.loginContextName = loginContextName; } catch (IOException e) { throw new KafkaException("Unexpected exception while parsing JAAS config"); } }
private AppConfigurationEntry parseAppConfigurationEntry(StreamTokenizer tokenizer) throws IOException { String loginModule = tokenizer.sval; if (tokenizer.nextToken() == StreamTokenizer.TT_EOF) throw new IllegalArgumentException("Login module control flag not specified in JAAS config"); LoginModuleControlFlag controlFlag = loginModuleControlFlag(tokenizer.sval); Map<String, String> options = new HashMap<>(); while (tokenizer.nextToken() != StreamTokenizer.TT_EOF && tokenizer.ttype != ';') { String key = tokenizer.sval; if (tokenizer.nextToken() != '=' || tokenizer.nextToken() == StreamTokenizer.TT_EOF || tokenizer.sval == null) throw new IllegalArgumentException("Value not specified for key '" + key + "' in JAAS config"); String value = tokenizer.sval; options.put(key, value); } if (tokenizer.ttype != ';') throw new IllegalArgumentException("JAAS config entry not terminated by semi-colon"); return new AppConfigurationEntry(loginModule, controlFlag, options); }
float parseBenchWeight(StreamTokenizer tokens) throws IOException, ConfigFormatException { float weight; switch (tokens.ttype) { case StreamTokenizer.TT_WORD: case '"': try { weight = Float.parseFloat(tokens.sval); } catch (NumberFormatException e) { throw new ConfigFormatException("illegal weight value \"" + tokens.sval + "\" on line " + tokens.lineno()); } tokens.nextToken(); return weight; default: throw new ConfigFormatException("missing weight value on line " + tokens.lineno()); } }
String parseBenchName(StreamTokenizer tokens) throws IOException, ConfigFormatException { String name; switch (tokens.ttype) { case StreamTokenizer.TT_WORD: case '"': name = tokens.sval; tokens.nextToken(); return name; default: throw new ConfigFormatException("missing benchmark name on " + "line " + tokens.lineno()); } }
Benchmark parseBenchClass(StreamTokenizer tokens) throws IOException, ConfigFormatException { Benchmark bench; switch (tokens.ttype) { case StreamTokenizer.TT_WORD: case '"': try { Class cls = Class.forName(tokens.sval); bench = (Benchmark) cls.newInstance(); } catch (Exception e) { throw new ConfigFormatException("unable to instantiate " + "benchmark \"" + tokens.sval + "\" on line " + tokens.lineno()); } tokens.nextToken(); return bench; default: throw new ConfigFormatException("missing benchmark class " + "name on line " + tokens.lineno()); } }
private Void parse(Reader reader) throws ParseException, IOException { StreamTokenizer st = new StreamTokenizer(reader); st.eolIsSignificant(true); st.wordChars((int) '_', (int) '_'); st.parseNumbers(); st.quoteChar((int) '"'); // These calls caused comments to be discarded st.slashSlashComments(true); st.slashStarComments(true); // Parse the file ParserState currentState = this.getBeginningOfLineState(); while (currentState != null) { currentState = currentState.parse(st); } return null; }
@Override public ParserState parse(StreamTokenizer st) throws IOException, ParseException { ParserState nextState = null; while (nextState == null && st.ttype != StreamTokenizer.TT_EOF) { int nextToken = st.nextToken(); if (nextToken != StreamTokenizer.TT_EOL && nextToken != StreamTokenizer.TT_EOF) { if (nextValueIs(ExpectedResultsParser.SECTION_IDENTIFIER, st, nextToken)) { nextState = this.getParser().getSectionReaderState(); } else if (nextValueIs(ExpectedResultsParser.METADATA_IDENTIFIER, st, nextToken)) { nextState = this.getParser().getMetadataReaderState(); } else { nextState = this.getParser().getDataReaderState(); } } } return nextState; }
private static void setJobTemplateCommand(JobTemplate jt, String line) throws IOException, DrmaaException { Reader r = new StringReader(line); StreamTokenizer tokenizer = new StreamTokenizer(r); tokenizer.quoteChar('"'); tokenizer.quoteChar('\''); String cmd = null; List<String> args = new ArrayList<String>(); for (int tok = tokenizer.nextToken(); tok != StreamTokenizer.TT_EOF; tok = tokenizer.nextToken()) { if (tok == StreamTokenizer.TT_WORD || tok == StreamTokenizer.TT_NUMBER) { if (cmd == null) { cmd = tokenizer.sval; } else { args.add(tokenizer.sval); } } } jt.setRemoteCommand(cmd); jt.setArgs(args); }
/** * Parses a option line likes * -genkaypair -dname "CN=Me" * and add the results into a list * @param list the list to fill into * @param s the line */ private static void parseArgsLine(List<String> list, String s) throws IOException, PropertyExpander.ExpandException { StreamTokenizer st = new StreamTokenizer(new StringReader(s)); st.resetSyntax(); st.whitespaceChars(0x00, 0x20); st.wordChars(0x21, 0xFF); // Everything is a word char except for quotation and apostrophe st.quoteChar('"'); st.quoteChar('\''); while (true) { if (st.nextToken() == StreamTokenizer.TT_EOF) { break; } list.add(PropertyExpander.expand(st.sval)); } }
/** * createTokenizer - build up StreamTokenizer for the command script * @param script command script to parsed * @return StreamTokenizer for command script */ private static StreamTokenizer createTokenizer(final String script) { final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(script)); tokenizer.resetSyntax(); // Default all characters to word. tokenizer.wordChars(0, 255); // Spaces and special characters are white spaces. tokenizer.whitespaceChars(0, ' '); // Ignore # comments. tokenizer.commentChar('#'); // Handle double and single quote strings. tokenizer.quoteChar('"'); tokenizer.quoteChar('\''); // Need to recognize the end of a command. tokenizer.eolIsSignificant(true); // Command separator. tokenizer.ordinaryChar(';'); // Pipe separator. tokenizer.ordinaryChar('|'); return tokenizer; }
/** * Sets up the stream tokenizer */ private void setup() { st = new StreamTokenizer(this); st.resetSyntax(); st.eolIsSignificant(false); st.lowerCaseMode(true); // Parse numbers as words st.wordChars('0', '9'); st.wordChars('-', '.'); // Characters as words st.wordChars('\u0000', '\u00FF'); // Skip comments st.commentChar('%'); // Skip whitespace and newlines st.whitespaceChars(' ', ' '); st.whitespaceChars('\u0009', '\u000e'); }
private String getPredicateOrFunctionName(int tokenRead) throws ParsingException { switch (tokenRead) { // If changed, check out checkForPredicateNamesThatAreCharacters (for cases where a single-char string is returned). case StreamTokenizer.TT_WORD: return tokenizer.sval(); // case ':': if (tokenizer.prevToken() == '-') { return ":-"; } // Support ':-' as a predicate. case '-': return "-"; case '+': if (tokenizer.prevToken() == '\\') { return "\\+"; } return "+"; case '=': if (tokenizer.prevToken() == '\\') { if (checkAndConsume('=')) { return "\\=="; } } break; case '\\': if (checkAndConsume('+')) { return "\\+"; } if (checkAndConsume('=')) { if (checkAndConsume('=')) { return "\\=="; } return "\\="; } } throw new ParsingException("Expecting a predicate name but read: '" + reportLastItemRead() + "'."); }
private int readInteger() throws ParsingException, IOException { int tokenRead = getNextToken(); boolean negated = false; if (tokenRead == '-') { negated = true; tokenRead = getNextToken(); } if (tokenRead == '@') { // A leading # indicates the value needs to be looked up in the list of set parameters. tokenRead = getNextToken(); String wordRead = tokenizer.sval(); String setting = stringHandler.getParameterSetting(wordRead); if (setting == null) { Utils.error(" Read '@" + wordRead + "', but '" + wordRead + "' has not been set."); } Integer setToInteger = Integer.parseInt(setting); if (setToInteger == null) { Utils.error(" Read '@" + wordRead + "', but '" + wordRead + "' has been set to '" + setting + "', rather than an integer."); } return setToInteger; } if (tokenRead != StreamTokenizer.TT_WORD || !isAllDigits(tokenizer.sval())) { String lastItem = reportLastItemRead(); tokenizer.pushBack(); if (negated) { tokenizer.pushBack(); } // Get back to state when readInteger() called in case the caller wants to field the exception. throw new ParsingException("Expecting an integer but got: '" + lastItem + "'."); } int value = Integer.parseInt(tokenizer.sval()); if (negated) { return -value; } return value; }
/** * Initializes the stream tokenizer. * * @param tokenizer the tokenizer to initialize */ private void initTokenizer(StreamTokenizer tokenizer) { tokenizer.resetSyntax(); tokenizer.whitespaceChars(0, (' ' - 1)); tokenizer.wordChars(' ', '\u00FF'); tokenizer.whitespaceChars(m_FieldSeparator.charAt(0), m_FieldSeparator.charAt(0)); // tokenizer.commentChar('%'); String[] parts = m_Enclosures.split(","); for (String e : parts) { if (e.length() > 1 || e.length() == 0) { throw new IllegalArgumentException( "Enclosures can only be single characters"); } tokenizer.quoteChar(e.charAt(0)); } tokenizer.eolIsSignificant(true); }
private static HollowListSchema parseListSchema(String typeName, StreamTokenizer tokenizer) throws IOException { int tok = tokenizer.nextToken(); if(tokenizer.ttype != '<') throw new IOException("Invalid Syntax: Expected '<' after 'List' for type " + typeName); tok = tokenizer.nextToken(); if(tok != StreamTokenizer.TT_WORD) { log.warning("Invalid Syntax: Expected element type declaration: " + typeName); } String elementType = tokenizer.sval; tok = tokenizer.nextToken(); if(tokenizer.ttype != '>') throw new IOException("Invalid Syntax: Expected '>' element type declaration: " + typeName); tok = tokenizer.nextToken(); if(tokenizer.ttype != ';') throw new IOException("Invalid Syntax: Expected semicolon after List schema declaration: " + typeName); return new HollowListSchema(typeName, elementType); }
private static HollowSetSchema parseSetSchema(String typeName, StreamTokenizer tokenizer) throws IOException { int tok = tokenizer.nextToken(); if(tokenizer.ttype != '<') throw new IOException("Invalid Syntax: Expected '<' after 'Set' for type " + typeName); tok = tokenizer.nextToken(); if(tok != StreamTokenizer.TT_WORD) { log.warning("Invalid Syntax: Expected element type declaration: " + typeName); } String elementType = tokenizer.sval; tok = tokenizer.nextToken(); if(tokenizer.ttype != '>') throw new IOException("Invalid Syntax: Expected '>' element type declaration: " + typeName); tok = tokenizer.nextToken(); String hashKeyPaths[] = parseHashKey(tokenizer); if(tokenizer.ttype != ';') throw new IOException("Invalid Syntax: Expected semicolon after Set schema declaration: " + typeName); return new HollowSetSchema(typeName, elementType, hashKeyPaths); }
/** * Parses a map of appearances parsed from the given stream. */ private static Map<String, Appearance> parseMaterialStream(Reader reader, URL baseUrl, Boolean useCaches) throws IOException { Map<String, Appearance> appearances = new HashMap<String, Appearance>(); Appearance currentAppearance = null; StreamTokenizer tokenizer = createTokenizer(reader); while (tokenizer.nextToken() != StreamTokenizer.TT_EOF) { switch (tokenizer.ttype) { case StreamTokenizer.TT_WORD: currentAppearance = parseMaterialLine(tokenizer, appearances, currentAppearance, baseUrl, useCaches); break; case StreamTokenizer.TT_EOL: break; default: throw new IncorrectFormatException( "Unexpected token " + tokenizer.sval + " at row " + tokenizer.lineno()); } } return appearances; }
public static String unquote2(String str, boolean escapeHtml) { if (str == null || str.equals("null")) { return null; } StreamTokenizer parser = new StreamTokenizer(new StringReader(str)); String result; try { parser.nextToken(); if (parser.ttype == '"') { result = parser.sval; } else { result = unquote(str, escapeHtml); } } catch (IOException e) { result = unquote(str, escapeHtml); } return result; }
/** Returns a sorted map of delimiters, based on their entropy of next character measure. *@return The {@link SortedMap} of Delimiters, where each delimiter is matched to its entropy measure. */ public SortedMap getDelimiters() { // If extracted then return a copy if (smDelims != null) return new TreeMap(smDelims); // Else extract smDelims = identifyCandidateDelimiters(sgOverallGraph.getDataString(), 1); int iImportant = determineImportantDelimiters(smDelims); Iterator iIter = smDelims.keySet().iterator(); int iCnt = 0; while (iIter.hasNext() && (iCnt++ < smDelims.size() - iImportant)) iIter.next(); smDelims = smDelims.tailMap(iIter.next()); if (!smDelims.containsValue(StreamTokenizer.TT_EOF)) { smDelims.put((Double)smDelims.lastKey() + 0.1, new StringBuffer().append((char)StreamTokenizer.TT_EOF).toString()); // Add EOF char } // Return copy of delims return new TreeMap(smDelims); }