Java 类org.apache.commons.io.input.CharSequenceReader 实例源码
项目:r01fb
文件:StringPersistenceUtils.java
/**
* Stores a String into a file
* @param f the file
* @param theString the string
* @throws IOException
*/
public static void save(final CharSequence theString,final File f) throws IOException {
BufferedReader reader = new BufferedReader(new CharSequenceReader(theString));
BufferedWriter writer = new BufferedWriter(new FileWriter(f, false));
String line = reader.readLine();
while (line != null) {
writer.write(line + "\r\n");
line = reader.readLine();
}
reader.close();
writer.flush();
writer.close();
}
项目:GeneralUtils
文件:GuavaReader2FileTest.java
@Test
public void givenUsingCommonsIO_whenWritingReaderContentsToFile_thenCorrect() throws IOException {
Reader initialReader = new CharSequenceReader("CharSequenceReader extends Reader");
File targetFile = new File("src/test/resources/targetFile.txt");
FileUtils.touch(targetFile);
byte[] buffer = IOUtils.toByteArray(initialReader);
FileUtils.writeByteArrayToFile(targetFile, buffer);
initialReader.close();
}
项目:GeneralUtils
文件:GuavaReader2FileTest.java
@Test
public void givenUsingCommonsIO_whenConvertingFileIntoReader_thenCorrect() throws IOException {
File initialFile = new File("src/test/resources/initialFile.txt");
FileUtils.touch(initialFile);
FileUtils.write(initialFile, "With Commons IO");
byte[] buffer = FileUtils.readFileToByteArray(initialFile);
Reader targetReader = new CharSequenceReader(new String(buffer));
targetReader.close();
}
项目:SimpleRename
文件:SimpleRename.java
public Reader getReaderFromStream(InputStream initialStream)
throws IOException {
byte[] buffer = IOUtils.toByteArray(initialStream);
Reader targetReader = new CharSequenceReader(new String(buffer));
return targetReader;
}
项目:querqy
文件:AnalyzingQuerqyParser.java
/**
* Add terms to the query for the synonyms.
*
* @param dmq
* {@link DisjunctionMaxQuery}
* @param original
* Original term to determine synonyms for.
*/
private void addSynonyms(DisjunctionMaxQuery dmq, CharSequence original) throws IOException {
try (TokenStream synonymTokens = optSynonymAnalyzer.tokenStream("querqy", new CharSequenceReader(original))) {
synonymTokens.reset();
CharTermAttribute generated = synonymTokens.addAttribute(CharTermAttribute.class);
while (synonymTokens.incrementToken()) {
// We need to copy "generated" per toString() here, because
// "generated" is transient.
dmq.addClause(new Term(dmq, generated.toString(), true));
}
synonymTokens.end();
}
}
项目:solr-bmax-queryparser
文件:Terms.java
/**
* Analyzes the given string using the given {@link Analyzer} (-chain).
*
* @param input
* Input string.
* @param analyzer
* Analyzer.
* @return All terms from the resulting token stream.
*/
public static Set<CharSequence> collect(CharSequence input, Analyzer analyzer) {
checkNotNull(input, "Pre-condition violated: input must not be null.");
checkNotNull(analyzer, "Pre-condition violated: analyzer must not be null.");
Set<CharSequence> result = new HashSet<>();
TokenStream tokenStream = null;
try {
tokenStream = analyzer.tokenStream("bmax", new CharSequenceReader(input));
tokenStream.reset();
CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
while (tokenStream.incrementToken()) {
// Needs to converted to string, because on tokenStream.end()
// the charTermAttribute will be flushed.
result.add(charTermAttribute.toString());
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
TokenStreams.endQuietly(tokenStream);
TokenStreams.resetQuietly(tokenStream);
TokenStreams.closeQuietly(tokenStream);
}
return result;
}
项目:solr-bmax-queryparser
文件:Terms.java
/**
* Collects terms from the given analyzer relying on {@linkplain BytesRef}s and not strings.
*/
public static Set<Term> collectTerms(CharSequence input, Analyzer analyzer, String field) {
checkNotNull(input, "Pre-condition violated: input must not be null.");
checkNotNull(analyzer, "Pre-condition violated: analyzer must not be null.");
checkNotNull(field, "Pre-condition violated: field must not be null.");
Set<Term> result = new HashSet<>();
TokenStream tokenStream = null;
try {
tokenStream = analyzer.tokenStream(field, new CharSequenceReader(input));
tokenStream.reset();
TermToBytesRefAttribute termAttribute = tokenStream.addAttribute(TermToBytesRefAttribute.class);
while (tokenStream.incrementToken()) {
// Needs to converted to a deep copy of byte ref, because on
// tokenStream.end()
// the termAttribute will be flushed.
termAttribute.getBytesRef();
result.add(new Term(field, BytesRef.deepCopyOf(termAttribute.getBytesRef())));
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
TokenStreams.endQuietly(tokenStream);
TokenStreams.resetQuietly(tokenStream);
TokenStreams.closeQuietly(tokenStream);
}
return result;
}
项目:BUbiNG
文件:SpamTextProcessor.java
@Override
public Appendable append(CharSequence csq) throws IOException {
fbr.setReader(new CharSequenceReader(csq));
process();
return this;
}
项目:BUbiNG
文件:SpamTextProcessor.java
@Override
public Appendable append(CharSequence csq, int start, int end) throws IOException {
fbr.setReader(new CharSequenceReader(csq.subSequence(start, end)));
process();
return this;
}
项目:webapp
文件:ApplicationOpenedEventRestController.java
@RequestMapping("/create")
public String create(
HttpServletRequest request,
// TODO: checksum,
@RequestParam MultipartFile multipartFile
) {
logger.info("create");
logger.info("request.getQueryString(): " + request.getQueryString());
if (!multipartFile.isEmpty()) {
try {
byte[] bytes = multipartFile.getBytes();
Reader reader = new CharSequenceReader((new String(bytes)));
List<String> lines = IOUtils.readLines(reader);
logger.info("lines.size(): " + lines.size());
reader.close();
for (String eventLine : lines) {
logger.info("eventLine: " + eventLine);
// Expected format: id:163|deviceId:2312aff4939750ea|time:1496843219926|packageName:ai.elimu.nyaqd|studentId:2312aff4939750ea_4
String deviceId = EventLineHelper.getDeviceId(eventLine);
Device device = deviceDao.read(deviceId);
logger.info("device: " + device);
Calendar timeOfEvent = EventLineHelper.getTime(eventLine);
String packageName = EventLineHelper.getPackageName(eventLine);
ApplicationOpenedEvent existingApplicationOpenedEvent = applicationOpenedEventDao.read(device, timeOfEvent, packageName);
logger.info("existingApplicationOpenedEvent: " + existingApplicationOpenedEvent);
if (existingApplicationOpenedEvent == null) {
ApplicationOpenedEvent applicationOpenedEvent = new ApplicationOpenedEvent();
applicationOpenedEvent.setDevice(device);
applicationOpenedEvent.setCalendar(timeOfEvent);
applicationOpenedEvent.setPackageName(packageName);
applicationOpenedEventDao.create(applicationOpenedEvent);
}
}
} catch (IOException ex) {
logger.error(null, ex);
}
}
JSONObject jsonObject = new JSONObject();
jsonObject.put("result", "success");
// TODO: handle error
logger.info("jsonObject: " + jsonObject);
return jsonObject.toString();
}
项目:querqy
文件:TermSubQueryBuilder.java
public TermSubQueryFactory termToFactory(String fieldname, Term sourceTerm, FieldBoost boost) throws IOException {
CacheKey cacheKey = null;
if (termQueryCache != null) {
cacheKey = new CacheKey(fieldname, sourceTerm);
TermQueryCacheValue cacheValue = termQueryCache.get(cacheKey);
if (cacheValue != null) {
// The cache references factories with pre-analyzed terms, or cache entries without a
// query factory if the term does not exist in the index. cacheValue.hasQuery() returns
// true/false correspondingly.
// Cache entries don't have a boost factor, it is only added later via the queryFactory.
return (cacheValue.hasQuery()) ? new TermSubQueryFactory(cacheValue, boost) : null;
}
}
LuceneQueryFactoryAndPRMSQuery root = null;
TokenStream ts = null;
try {
ts = analyzer.tokenStream(fieldname, new CharSequenceReader(sourceTerm));
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncAttr = ts.addAttribute(PositionIncrementAttribute.class);
ts.reset();
PositionSequence<org.apache.lucene.index.Term> sequence = new PositionSequence<>();
while (ts.incrementToken()) {
int inc = posIncAttr.getPositionIncrement();
if (inc > 0 || sequence.isEmpty()) {
sequence.nextPosition();
}
sequence.addElement(new org.apache.lucene.index.Term(fieldname, new BytesRef(termAttr)));
}
root = positionSequenceToQueryFactoryAndPRMS(sequence);
} finally {
if (ts != null) {
try {
ts.close();
} catch (IOException e) {
}
}
}
putQueryFactoryAndPRMSQueryIntoCache(cacheKey, root);
return root == null ? null : new TermSubQueryFactory(root, boost);
}