Java 类com.fasterxml.jackson.databind.MappingIterator 实例源码
项目:chart-parser
文件:ChartParser.java
/**
* Uses a {@link CsvMapper} to reading a String representing a CSV representation of a PDF
* Chart, returning a list of {@link ChartCharacter}s
*/
static List<ChartCharacter> readChartCsv(String csvChart) throws ChartParserException {
CsvSchema schema = CsvSchema.emptySchema()
.withHeader()
.withColumnSeparator('|')
.withoutQuoteChar();
try {
MappingIterator<ChartCharacter> mappingIterator =
getCsvMapper().readerFor(ChartCharacter.class)
.with(schema)
.readValues(csvChart);
return mappingIterator.readAll();
} catch (Exception e) {
throw new ChartParserException("Error deserializing the Chart CSV data", e);
}
}
项目:registry
文件:TruckEventsCsvConverter.java
private MappingIterator<TruckEvent> readTruckEventsFromCsv(InputStream csvStream) throws IOException {
CsvSchema bootstrap = CsvSchema.builder()
// driverId,truckId,eventTime,eventType,longitude,latitude,eventKey,correlationId,driverName,routeId,routeName,eventDate
.addColumn("driverId", CsvSchema.ColumnType.NUMBER)
.addColumn("truckId", CsvSchema.ColumnType.NUMBER)
.addColumn("eventTime", CsvSchema.ColumnType.STRING)
.addColumn("eventType", CsvSchema.ColumnType.STRING)
.addColumn("longitude", CsvSchema.ColumnType.NUMBER)
.addColumn("latitude", CsvSchema.ColumnType.NUMBER)
.addColumn("eventKey", CsvSchema.ColumnType.STRING)
.addColumn("correlationId", CsvSchema.ColumnType.NUMBER)
.addColumn("driverName", CsvSchema.ColumnType.STRING)
.addColumn("routeId", CsvSchema.ColumnType.NUMBER)
.addColumn("routeName", CsvSchema.ColumnType.STRING)
.addColumn("eventDate", CsvSchema.ColumnType.STRING)
// .addColumn("miles", CsvSchema.ColumnType.NUMBER)
.build().withHeader();
CsvMapper csvMapper = new CsvMapper();
return csvMapper.readerFor(TruckEvent.class).with(bootstrap).readValues(csvStream);
}
项目:spring-cloud-skipper
文件:PackageMetadataService.java
protected List<PackageMetadata> deserializeFromIndexFiles(List<File> indexFiles) {
List<PackageMetadata> packageMetadataList = new ArrayList<>();
YAMLMapper yamlMapper = new YAMLMapper();
yamlMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
for (File indexFile : indexFiles) {
try {
MappingIterator<PackageMetadata> it = yamlMapper.readerFor(PackageMetadata.class).readValues(indexFile);
while (it.hasNextValue()) {
PackageMetadata packageMetadata = it.next();
packageMetadataList.add(packageMetadata);
}
}
catch (IOException e) {
throw new IllegalArgumentException("Can't parse Release manifest YAML", e);
}
}
return packageMetadataList;
}
项目:xm-uaa
文件:CaptchaService.java
public boolean checkCaptcha(String captcha) {
if (isBlank(captcha)) {
throw new ErrorCheckCaptcha("error.captcha.required", "Captcha is blank");
}
String url = getReCaptcha().getUrl() + "?secret=" + getReCaptcha().getSecretKey() + "&response=" + captcha;
log.debug("Check captcha by url {}", url);
final ObjectReader reader = new ObjectMapper().readerFor(Map.class);
try {
final MappingIterator<Map<String, Object>> result = reader.readValues(new URL(url));
Map<String, Object> resultMap = result.nextValue();
log.info("Captacha result map {}", resultMap);
Boolean success = (Boolean) resultMap.get("success");
return success;
} catch (IOException e) {
throw new ErrorCheckCaptcha(e);
}
}
项目:discoursedb-core
文件:GithubConverter.java
private void processForkFile(File f, Set<String> users, Set<String> projects) {
logger.info("Processing "+f);
try(InputStream in = new FileInputStream(f);) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
MappingIterator<GitHubForkEvent> it = mapper.readerFor(GitHubForkEvent.class).with(schema).readValues(in);
while (it.hasNextValue()) {
GitHubForkEvent cde = it.next();
converterService.mapUserForkEvent(cde,
users, projects);
}
}catch(Exception e){
logger.error("Could not parse data file "+f, e);
}
}
项目:discoursedb-core
文件:GithubConverter.java
private void processCreateDeleteEntity(File f, Set<String> users, Set<String> projects) {
logger.info("Processing "+f);
try(InputStream in = new FileInputStream(f);) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
MappingIterator<GitHubCreateDeleteEvent> it = mapper.readerFor(GitHubCreateDeleteEvent.class).with(schema).readValues(in);
while (it.hasNextValue()) {
GitHubCreateDeleteEvent cde = it.next();
converterService.mapUserCreateDeleteEvent(cde,
users, projects);
}
}catch(Exception e){
logger.error("Could not parse data file "+f, e);
}
}
项目:discoursedb-core
文件:GithubConverter.java
private void processPushEvents(File f, Set<String> users, Set<String> projects) {
try(InputStream in = new FileInputStream(f);) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
long rows = 0;
MappingIterator<GitHubPushEvent> it = mapper.readerFor(GitHubPushEvent.class).with(schema).readValues(in);
while (it.hasNextValue()) {
GitHubPushEvent pe = it.next();
if (pe.getShas() != null && pe.getShas().length() > 0) {
String [] shas = pe.getShas().split(";");
converterService.mapPushEvent(pe, users, projects, commit_shas, shas);
}
rows += 1;
if (rows%10000 == 0) {
logger.info("....read " + rows + " rows of push_events.csv");
}
}
}catch(Exception e){
logger.error("Could not parse data file "+f, e);
}
}
项目:discoursedb-core
文件:GithubConverter.java
private void processCommitCommentEvents(File f, Set<String> users, Set<String> projects, Map<String,Long> commit_shas) {
logger.info("Processing "+f + ", first for Commit messages... ");
try(InputStream in = new FileInputStream(f);) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
MappingIterator<GitHubCommitCommentEvent> it = mapper.readerFor(GitHubCommitCommentEvent.class).with(schema).readValues(in);
while (it.hasNextValue()) {
GitHubCommitCommentEvent cde = it.next();
converterService.mapCommitCommentEvent(cde,
users, projects, commit_shas.get(cde.getProject() + "#" + cde.getSha()));
}
}catch(Exception e){
logger.error("Could not parse data file "+f, e);
}
}
项目:discoursedb-core
文件:GithubConverter.java
/**
* Parses a pypi_versions.csv file and calls converterService to process it.
*
* Example header plus one row:
*
* project_owner,project_name,pypi_name,pypi_rawname,version,upload_time,python_version,filename
* skwashd,python-acquia-cloud,acapi,acapi,0.4.1,2015-11-21 09:30:17,source,acapi-0.4.1.tar.gz
*
* @param filename to process
*/
private void processVersionHistoryFile(File file) {
logger.info("Processing " + file);
try(InputStream in = new FileInputStream(file);) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
MappingIterator<RevisionEvent> it = mapper.readerFor(RevisionEvent.class).with(schema).readValues(in);
boolean first = true;
while (it.hasNextValue()) {
RevisionEvent revision = it.next();
//logger.info("Version: " + revision.getProjectFullName() + ", " + revision.getPypiName() + "/" + revision.getVersion() + " " + revision.getUploadTime());
converterService.mapVersionInfo(
revision.getProjectFullName(),
revision.getPypiName(),
revision.getVersion(), revision.getFilename() + "?" + revision.getPythonVersion(),
revision.getUploadTime()
);
}
} catch(Exception e){
logger.error("Could not parse data file "+file, e);
}
}
项目:discoursedb-core
文件:GithubConverter.java
/**
* Parses a CSV file listing who watched what project when,
* binds its contents to a GitHubWatcherList object,
* and passes it on to the DiscourseDB converter
*
* File format example:
*
* actor,project,created_at
* F21,danielstjules/Stringy,2015-01-01T00:01:53Z
* radlws,tomchristie/django-rest-framework,2015-01-01T00:05:29Z
*
* @param file a dataset file to process
*/
private void processWatchEvent(File file, Set<String> users, Set<String> projects){
logger.info("Processing "+file);
try(InputStream in = new FileInputStream(file);) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
MappingIterator<GitHubWatchEvent> it = mapper.readerFor(GitHubWatchEvent.class).with(schema).readValues(in);
while (it.hasNextValue()) {
GitHubWatchEvent gwl = it.next();
converterService.mapUserRepoEvent(
gwl.getActor(), gwl.getProject(), gwl.getCreatedAt(),
DiscoursePartInteractionTypes.WATCH,
users, projects);
}
}catch(Exception e){
logger.error("Could not parse data file "+file, e);
}
}
项目:discoursedb-core
文件:GithubConverter.java
/**
* Parses a CSV file listing which pull requests contained which
* commits (by SHA),
* and passes it on to the DiscourseDB converter
*
* File format example:
*
* (fix me)
*
* @param file a dataset file to process
*/
private void processPullShasFile(File file, Set<String> users, Set<String> projects, Map<String,Long> commit_shas){
logger.info("Processing "+file);
try(InputStream in = new FileInputStream(file);) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
MappingIterator<GitHubPullReqCommits> it = mapper.readerFor(GitHubPullReqCommits.class).with(schema).readValues(in);
long row=0;
while (it.hasNextValue()) {
GitHubPullReqCommits prc = it.next();
converterService.mapPullRequestCommits(prc, users, projects, commit_shas);
row += 1;
if (row%10000 == 0) {
logger.info("pullShasFile row " + row + " out of about 46,000,000");
}
}
}catch(Exception e){
logger.error("Could not parse data file "+file, e);
}
}
项目:discoursedb-core
文件:GithubConverter.java
/**
* Parses a dataset file, binds its contents to a POJO and passes it on to the DiscourseDB converter
*
* @param file an dataset file to process
*/
private void reprocessForumFileForRelationships(File file){
// project_owner,project_name,outside_forum_id,unique_message_id,date,author_email,author_name,
// title,body,response_to_message_id,thread_path,message_path
logger.info("Processing "+file);
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaWithHeader().withNullValue("None");
try(InputStream in = new FileInputStream(file);) {
MappingIterator<MailingListComment> it = mapper.readerFor(MailingListComment.class).with(schema).readValues(in);
while (it.hasNextValue()) {
MailingListComment currentPost = it.next();
converterService.mapForumPostRelation(currentPost, "GOOGLE_GROUPS");
}
}catch(Exception e){
logger.error("Could not parse data file "+file, e);
}
}
项目:discoursedb-core
文件:CourseraConverter.java
private Iterable<Map<String,String>> csvIteratorExistingHeaders(String filename, char escapeChar) throws JsonProcessingException, IOException {
//InputStream in = new FileInputStream(filename, "UTF-8");
InputStreamReader in = new InputStreamReader(new FileInputStream(filename), "ISO-8859-1");
MappingIterator<Map<String, String>> iterator = new CsvMapper()
.readerFor(Map.class)
.with(CsvSchema.emptySchema().withColumnSeparator(',').withHeader().withEscapeChar(escapeChar))
.readValues(in);
List<Map<String,String>> sortable = iterator.readAll();
if (sortable.get(0).containsKey("discussion_answer_created_ts")) {
sortable.sort(new Comparator<Map<String,String>>() {
@Override
public int compare(Map<String,String> lhs, Map<String,String> rhs) {
return lhs.get("discussion_answer_created_ts").compareTo(rhs.get("discussion_answer_created_ts"));
}
} );
}
return () -> sortable.iterator();
}
项目:robotframework-filelibrary
文件:CsvUtil.java
public static List<Map<String, Object>> loadValuesFromFile(String filePath) {
List<Map<String, Object>> result = new ArrayList<>();
File csvFile = new File(filePath);
if (!csvFile.exists()) {
throw new FileLibraryException("Cannot find file " + csvFile.getAbsolutePath());
}
try {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = CsvSchema.emptySchema().withHeader();
MappingIterator<Map<String, Object>> it = mapper.readerFor(Map.class).with(schema).readValues(csvFile);
while (it.hasNext()) {
result.add(it.next());
}
return result;
} catch (Exception e) {
throw new FileLibraryException(e);
}
}
项目:mapping-benchmark
文件:JacksonCsvParserBenchmark.java
public static void main(String[] args) throws IOException {
CsvParam csvParam = new CsvParam();
csvParam.setUp();
CsvMapper csvMapper = new CsvMapper();
csvMapper.configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true);
CsvSchema bootstrapSchema = CsvSchema.emptySchema().withHeader();
try(Reader reader = csvParam.getReader()) {
MappingIterator<City> iterator = csvMapper.readerFor(City.class).with(bootstrapSchema).readValues(reader);
while (iterator.hasNext()) {
System.out.println(iterator.next());
}
}
}
项目:rpg
文件:MessagesReader.java
public Messages getMessages() {
Messages messages = null;
InputStream is = res.getResourceAsStream("json/messages.json");
try {
JsonFactory jsonFactory = new JsonFactory();
JsonParser jsonParser = jsonFactory.createParser(is);
ObjectMapper mapper = new ObjectMapper();
MappingIterator<Messages> messagesMappingIterator = mapper.readValues(jsonParser, Messages.class);
messages = messagesMappingIterator.next();
} catch (IOException e) {
e.printStackTrace();
}
return messages;
}
项目:MedicamentDB
文件:MedicamentService.java
private void linkPresentations(File dir, List<Medicament> medicaments) throws IOException {
LOG.info("liste présentations");
CsvMapper csvMapper = new CsvMapper();
CsvSchema schema = csvMapper.schemaFor(PresentationCSV.class).withArrayElementSeparator(';').withColumnSeparator('\t');
MappingIterator<PresentationCSV> it = csvMapper.readerFor(PresentationCSV.class).with(schema).readValues(new File(dir, Constants.CIS_CIP_BDPM_FILE));
Medicament medicament;
while (it.hasNext()) {
PresentationCSV presentation = it.next();
LOG.debug("référencement presentation médicament - CodeCIS : " + presentation.getCodeCIS());
medicament = medicaments.stream()
.filter(x -> x.getCodeCIS().equals(presentation.getCodeCIS()))
.findFirst().orElse(null);
if (medicament != null) {
medicament.getPresentations().add(mapper.toPresentationES(presentation));
}
}
}
项目:MedicamentDB
文件:MedicamentService.java
private void linkAvisSMR(File dir, List<Medicament> medicaments, Map<String, String> urlsHAS) throws IOException {
LOG.info("liste avis SMR");
CsvMapper csvMapper = new CsvMapper();
CsvSchema schema = csvMapper.schemaFor(AvisSMRCSV.class).withArrayElementSeparator(';').withColumnSeparator('\t');
MappingIterator<AvisSMRCSV> it = csvMapper.readerFor(AvisSMRCSV.class).with(schema).readValues(new File(dir, Constants.CIS_HAS_SMR_BDPM_FILE));
Medicament medicament;
AvisSMR avisSMR;
while (it.hasNext()) {
AvisSMRCSV csv = it.next();
LOG.debug("référencement SMR médicament - CodeCIS : " + csv.getCodeCIS());
medicament = medicaments.stream()
.filter(x -> x.getCodeCIS().equals(csv.getCodeCIS()))
.findFirst().orElse(null);
if (medicament != null) {
avisSMR = mapper.toAvisSMRES(csv);
avisSMR.setUrlHAS(urlsHAS.get(avisSMR.getCodeDossierHAS()));
medicament.getAvisSMR().add(avisSMR);
}
}
}
项目:MedicamentDB
文件:MedicamentService.java
private void linkAvisASMR(File dir, List<Medicament> medicaments, Map<String, String> urlsHAS) throws IOException {
LOG.info("liste avis ASMR");
CsvMapper csvMapper = new CsvMapper();
CsvSchema schema = csvMapper.schemaFor(AvisASMRCSV.class).withArrayElementSeparator(';').withColumnSeparator('\t');
MappingIterator<AvisASMRCSV> it = csvMapper.readerFor(AvisASMRCSV.class).with(schema).readValues(new File(dir, Constants.CIS_HAS_ASMR_BDPM_FILE));
Medicament medicament;
AvisASMR avisASMR;
while (it.hasNext()) {
AvisASMRCSV csv = it.next();
LOG.debug("référencement ASMR médicament - CodeCIS : " + csv.getCodeCIS());
medicament = medicaments.stream()
.filter(x -> x.getCodeCIS().equals(csv.getCodeCIS()))
.findFirst().orElse(null);
if (medicament != null) {
avisASMR = mapper.toAvisASMRES(csv);
avisASMR.setUrlHAS(urlsHAS.get(avisASMR.getCodeDossierHAS()));
medicament.getAvisASMR().add(avisASMR);
}
}
}
项目:datacollector
文件:FilePipelineStateStore.java
@Override
public List<PipelineState> getHistory(String pipelineName, String rev, boolean fromBeginning) throws PipelineStoreException {
if (!pipelineDirExists(pipelineName, rev) || !pipelineStateHistoryFileExists(pipelineName, rev)) {
return Collections.emptyList();
}
try (Reader reader = new FileReader(getPipelineStateHistoryFile(pipelineName, rev))){
ObjectMapper objectMapper = ObjectMapperFactory.get();
JsonParser jsonParser = objectMapper.getFactory().createParser(reader);
MappingIterator<PipelineStateJson> pipelineStateMappingIterator =
objectMapper.readValues(jsonParser, PipelineStateJson.class);
List<PipelineStateJson> pipelineStateJsons = pipelineStateMappingIterator.readAll();
Collections.reverse(pipelineStateJsons);
if (fromBeginning) {
return BeanHelper.unwrapPipelineStatesNewAPI(pipelineStateJsons);
} else {
int toIndex = pipelineStateJsons.size() > 100 ? 100 : pipelineStateJsons.size();
return BeanHelper.unwrapPipelineStatesNewAPI(pipelineStateJsons.subList(0, toIndex));
}
} catch (IOException e) {
throw new PipelineStoreException(ContainerError.CONTAINER_0115, pipelineName, rev, e.toString(), e);
}
}
项目:orcid-update-java
文件:DOIPrefixMapper.java
private ImmutableMultimap<String, String> loadPublisherMap(String file) {
// todo make sortedsetmultimap
Multimap<String, String> temp = LinkedHashMultimap.create();
CsvMapper mapper = new CsvMapper();
mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
try {
MappingIterator<Object[]> it = mapper.reader(Object[].class).readValues(
getClass().getResourceAsStream(file));
while (it.hasNext()) {
Object[] row = it.next();
if (row.length > 1 && (row[0] != null && row[1] != null)
&& (!row[0].toString().isEmpty() && !row[1].toString().isEmpty())) {
temp.put(row[1].toString().trim(), row[0].toString().trim());
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
return ImmutableMultimap.copyOf(temp);
}
项目:storm-data-contracts
文件:TestDataProvider.java
private static Iterator<Object[]> csvToContracts(Class<?> inputClass, Class<?> outputClass, MappingIterator<Object> csvIterator) {
final ContractFactory<?> outputFactory = new ContractFactory(outputClass);
final ContractFactory<?> inputFactory = new ContractFactory(inputClass);
return Iterators.transform(csvIterator, new Function<Object, Object[]>() {
@Override
public Object[] apply(Object o) {
final ObjectNode inputNode = JsonNodeFactory.instance.objectNode();
final ObjectNode outputNode = JsonNodeFactory.instance.objectNode();
for (Map.Entry<String, Object> entry : ((Map<String, Object>) o).entrySet()) {
String value = (String) entry.getValue();
if (entry.getKey().startsWith("input.")) {
inputNode.put(entry.getKey().substring("input.".length()), value);
} else if (entry.getKey().startsWith("output.")) {
outputNode.put(entry.getKey().substring("output.".length()), value);
} else {
Preconditions.checkArgument(false, "CSV header " + entry.getKey() + " must start with " + "input. or output.");
}
}
convertNullsOrJson(inputNode);
convertNullsOrJson(outputNode);
final Object input = createAndValidateContract(inputNode, inputFactory);
final Object output = createAndValidateContract(outputNode, outputFactory);
return new Object[]{input, output};
}
});
}
项目:Llunatic
文件:TCSV.java
public void test() throws IOException {
CsvMapper mapper = new CsvMapper();
mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
CsvSchema schema = CsvSchema.emptySchema();
// schema = schema.withHeader();
// schema = schema.withQuoteChar('\'');
// File csvFile = new File("/Temp/llunatic/doctors/10k/doctor.csv");
File csvFile = new File("/Users/donatello/Temp/chaseBench-workspace/LUBM/data/01k/src-emailAddress.csv");
long start = new Date().getTime();
MappingIterator<String[]> it = mapper.readerFor(String[].class).with(schema).readValues(csvFile);
String[] row = it.next();
System.out.println(Arrays.asList(row));
long end = new Date().getTime();
System.out.println("**** " + (end - start) + " ms");
// while (it.hasNext()) {
// String[] row = it.next();
// System.out.println(Arrays.asList(row));
// }
}
项目:para
文件:GitHubAuthFilter.java
private String fetchUserEmail(Integer githubId, String accessToken) throws IOException {
HttpGet emailsGet = new HttpGet(PROFILE_URL + "/emails");
emailsGet.setHeader(HttpHeaders.AUTHORIZATION, "Bearer " + accessToken);
emailsGet.setHeader(HttpHeaders.ACCEPT, "application/json");
CloseableHttpResponse resp = httpclient.execute(emailsGet);
HttpEntity respEntity = resp.getEntity();
String ctype = resp.getFirstHeader(HttpHeaders.CONTENT_TYPE).getValue();
if (respEntity != null && Utils.isJsonType(ctype)) {
MappingIterator<Map<String, Object>> emails = jreader.readValues(respEntity.getContent());
if (emails != null) {
String email = null;
while (emails.hasNext()) {
Map<String, Object> next = emails.next();
email = (String) next.get("email");
if (next.containsKey("primary") && (Boolean) next.get("primary")) {
break;
}
}
return email;
}
}
return githubId + "@github.com";
}
项目:ABRAID-MP
文件:ParseUtils.java
/**
* Reads a CSV file into a list of the specified type.
* @param csv The CSV file.
* @param responseClass The type of the returned list's elements.
* @param schema A schema representing the CSV file's structure.
* @param <T> The type of the returned list's elements.
* @return A list of parsed elements.
* @throws IOException if parsing failed.
*/
public static <T> List<T> readFromCsv(String csv, Class<T> responseClass, CsvSchema schema) throws IOException {
if (csv == null) {
// Protect against NullPointerException
csv = "";
}
ObjectReader reader = new CsvMapper().reader(responseClass).with(schema);
MappingIterator<T> iterator = reader.readValues(csv);
ArrayList<T> results = new ArrayList<>();
try {
while (iterator.hasNext()) {
results.add(iterator.next());
}
} catch (RuntimeException e) {
// ObjectReader throws (subclasses of) IOException, but MappingIterator wraps them in a RuntimeException.
// We unwrap them for consistency.
Throwable cause = e.getCause();
if (cause != null && cause instanceof IOException) {
throw (IOException) cause;
}
throw e;
}
return results;
}
项目:cdk
文件:ReadJsonBuilder.java
@Override
protected boolean doProcess(Record inputRecord, InputStream in) throws IOException {
MappingIterator iter = reader.readValues(in);
try {
while (iter.hasNextValue()) {
Object rootNode = iter.nextValue();
incrementNumRecords();
LOG.debug("jsonObject: {}", rootNode);
Record outputRecord = inputRecord.copy();
removeAttachments(outputRecord);
outputRecord.put(Fields.ATTACHMENT_BODY, rootNode);
outputRecord.put(Fields.ATTACHMENT_MIME_TYPE, MIME_TYPE);
// pass record to next command in chain:
if (!getChild().process(outputRecord)) {
return false;
}
}
return true;
} finally {
iter.close();
}
}
项目:libraft
文件:LocalStore.java
/**
* Flushes all {@code key=>value} pairs and replaces them with
* the values read from the given {@code snapshotInputStream}.
* This implementation expects the {@code InputStream} to contain
* a sequence of JSON {@link KeyValue} instances.
*
* @param lastAppliedIndex log index >= 0 of the last change contained in this snapshot
* @param snapshotInputStream {@code InputStream} from which all {@code key=> value} pairs should be read
* @throws IOException if valid {@code key=>value} pairs cannot be read from {@code snapshotInputStream}.
* Since no internal state is modified until the {@code snapshotInputStream} is completely read and
* parsed, {@code LocalStore} can still be used safely after an {@code IOException} is thrown
* @throws RuntimeException if the {@code key=>value} pairs in {@code snapshotInputStream} cannot be converted to JSON
*/
synchronized void loadState(long lastAppliedIndex, InputStream snapshotInputStream) throws IOException {
try {
// read values from the snapshot into a temporary map
Map<String, String> snapshotEntries = Maps.newHashMap();
ObjectMapper mapper = new ObjectMapper();
ObjectReader reader = mapper.reader(KeyValue.class);
MappingIterator<KeyValue> it = reader.readValues(snapshotInputStream);
while (it.hasNext()) {
KeyValue keyValue = it.next();
snapshotEntries.put(keyValue.getKey(), keyValue.getValue()); // values are read into this temporary map!
}
// update the index
// NOTE: AFAIU, we should never get a snapshot which contains fewer entries than we've applied
updateLastAppliedIndex(lastAppliedIndex);
// replace the old entries map with the new one generated from the snapshot
entries = snapshotEntries;
} finally {
Closeables.close(snapshotInputStream, true);
}
}
项目:quality
文件:GenerateLODCategories.java
public static void main (String [] args) throws IOException{
System.out.println("Reading Datasets and Categories...");
URL file = Resources.getResource("datasetsAndCategories.tsv");
CsvMapper mapper = new CsvMapper();
mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
File csvFile = new File(file.getFile());
CsvSchema schema = CsvSchema.emptySchema().withColumnSeparator('\t');
MappingIterator<String[]> it = mapper.reader(String[].class).with(schema).readValues(csvFile);
while (it.hasNext()){
String[] arr = it.next();
m.add(m.createStatement(m.createResource(arr[0]), m.createProperty(":hasDomain"), m.createTypedLiteral(arr[1])));
m.add(m.createStatement(m.createResource(arr[0]), m.createProperty(":getFromLOV"), m.createTypedLiteral(true)));
}
System.out.println("Writing dump...");
File f = new File("categories.ttl");
f.createNewFile();
FileOutputStream fos = new FileOutputStream(f);
m.write(fos, "TURTLE");
}
项目:logging-log4j2
文件:SocketAppenderTest.java
@Override
public void run() {
try {
try (final Socket socket = serverSocket.accept()) {
if (socket != null) {
final InputStream is = socket.getInputStream();
while (!shutdown) {
final MappingIterator<LogEvent> mappingIterator = objectMapper.readerFor(Log4jLogEvent.class).readValues(is);
while (mappingIterator.hasNextValue()) {
queue.add(mappingIterator.nextValue());
++count;
}
}
}
}
} catch (final EOFException eof) {
// Socket is closed.
} catch (final Exception e) {
if (!shutdown) {
Throwables.rethrow(e);
}
}
}
项目:synthea_java
文件:SimpleCSV.java
/**
* Parse the data from the given CSV file into a List of Maps, where the key is the
* column name. Uses a LinkedHashMap specifically to ensure the order of columns is preserved in
* the resulting maps.
*
* @param csvData
* Raw CSV data
* @return parsed data
* @throws IOException
* if any exception occurs while parsing the data
*/
public static List<LinkedHashMap<String, String>> parse(String csvData) throws IOException {
// Read schema from the first line; start with bootstrap instance
// to enable reading of schema from the first line
// NOTE: reads schema and uses it for binding
CsvMapper mapper = new CsvMapper();
// use first row as header; otherwise defaults are fine
CsvSchema schema = CsvSchema.emptySchema().withHeader();
MappingIterator<LinkedHashMap<String, String>> it = mapper.readerFor(LinkedHashMap.class)
.with(schema).readValues(csvData);
return it.readAll();
}
项目:chart-parser
文件:TrackRepository.java
public List<Track> findAll() {
CsvSchema schema = CsvSchema.emptySchema().withColumnSeparator(';').withHeader();
try {
try (InputStream tracks = getClass().getClassLoader().getResourceAsStream(FILENAME)) {
MappingIterator<Track> mappingIterator = csvMapper.readerFor(Track.class)
.with(schema).readValues(tracks);
return mappingIterator.readAll();
}
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to read %s as CSV", FILENAME),
e);
}
}
项目:s3-inventory-usage-examples
文件:InventoryReportLineMapper.java
/**
* Map each line of the inventory report into a POJO
* @return List<InventoryReportLine> which is a list of POJOs
* @throws IOException when mapping with schema fails
*/
public List<InventoryReportLine> mapInventoryReportLine(List<String> inventoryReportLine) throws IOException{
CsvMapper mapper = new CsvMapper();
List<InventoryReportLine> inventoryReportLines = new ArrayList();
for (String eachLine : inventoryReportLine) {
MappingIterator<InventoryReportLine> iterator =
mapper.readerFor(InventoryReportLine.class).with(schema).readValues(eachLine);
List<InventoryReportLine> rowValue = iterator.readAll();
inventoryReportLines.add(rowValue.get(0));
}
return inventoryReportLines;
}
项目:nudge-elasticstack-connector
文件:GeoFreeGeoIpImpl.java
private GeoLocation parseGeoLocation(String geoLocationString) throws NudgeESConnectorException {
GeoLocation geoLocation = new GeoLocation();
if (geoLocationString != null && !geoLocationString.equals("")) {
ObjectMapper mapper = new ObjectMapper();
try (MappingIterator<GeoLocation> objectMappingIterator = mapper.reader().forType(GeoLocation.class).readValues(
geoLocationString)) {
geoLocation = objectMappingIterator.next();
} catch (IOException e) {
throw new NudgeESConnectorException("Failed to parse geo data service response", e);
}
}
return geoLocation;
}
项目:fabric8-forge
文件:Models.java
/**
* Saves the json object to the given file
*/
public static <T> List<T> loadJsonValues(File json, Class<T> clazz) throws IOException {
List<T> answer = new ArrayList<>();
if (json.exists() && json.isFile()) {
MappingIterator<T> iter = objectMapper.readerFor(clazz).readValues(json);
while (iter.hasNext()) {
answer.add(iter.next());
}
}
return answer;
}
项目:fabric8-forge
文件:NodeDtos.java
protected static <T> List<T> toList(MappingIterator<T> iter) throws java.io.IOException {
List<T> answer = new ArrayList<>();
while (iter != null && iter.hasNextValue()) {
T value = iter.nextValue();
answer.add(value);
}
return answer;
}
项目:styx
文件:CliMain.java
private void workflowCreate() throws IOException, ExecutionException, InterruptedException {
final String component = namespace.getString(parser.workflowCreateComponentId.getDest());
final File file = namespace.get(parser.workflowCreateFile.getDest());
final ObjectReader workflowReader = Json.YAML_MAPPER.reader()
.forType(WorkflowConfiguration.class);
final MappingIterator<WorkflowConfiguration> iterator;
if (file == null || file.getName().equals("-")) {
iterator = workflowReader.readValues(System.in);
} else {
iterator = workflowReader.readValues(file);
}
final List<WorkflowConfiguration> configurations = iterator.readAll();
// TODO: validate workflows locally before creating them
final List<CompletionStage<Workflow>> futures = configurations.stream()
.map(configuration -> styxClient.createOrUpdateWorkflow(component, configuration))
.collect(toList());
for (CompletionStage<Workflow> future : futures) {
final Workflow created = future.toCompletableFuture().get();
cliOutput.printMessage("Workflow " + created.workflowId() + " in component "
+ created.componentId() + " created.");
}
}
项目:funktion-connectors
文件:Funktions.java
static <T> List<T> parseYamlValues(File file, Class<T> clazz) throws IOException {
ObjectMapper mapper = createObjectMapper();
MappingIterator<T> iter = mapper.readerFor(clazz).readValues(file);
List<T> answer = new ArrayList<>();
while (iter.hasNext()) {
answer.add(iter.next());
}
return answer;
}
项目:registry
文件:TruckEventsCsvConverter.java
public List<String> convertToJsonRecords(InputStream payloadStream, int limit) throws Exception {
MappingIterator<TruckEvent> csvTruckEvents = readTruckEventsFromCsv(payloadStream);
List<String> jsons = new ArrayList<>();
int ct = 0;
for (TruckEvent truckEvent : csvTruckEvents.readAll()) {
truckEvent.setMiles((long) new Random().nextInt(500));
String json = new ObjectMapper().writeValueAsString(truckEvent);
jsons.add(json);
if (++ct == limit) {
break;
}
}
return jsons;
}
项目:registry
文件:TruckEventsCsvConverter.java
public void convertToJsonRecords(String payloadFile, String outputFileName) throws IOException {
try (InputStream csvStream = new FileInputStream(payloadFile);
FileWriter fos = new FileWriter(outputFileName)) {
MappingIterator<TruckEvent> csvTruckEvents = readTruckEventsFromCsv(csvStream);
for (TruckEvent truckEvent : csvTruckEvents.readAll()) {
truckEvent.setMiles((long) new Random().nextInt(500));
String output = new ObjectMapper().writeValueAsString(truckEvent);
fos.write(output);
fos.write(System.lineSeparator());
}
}
}
项目:discoursedb-core
文件:ContributionBinaryLabelImporter.java
private List<BinaryLabeledContributionInterchange> fromCsv(String inputFileName) throws IOException{
List<BinaryLabeledContributionInterchange> itemList = new ArrayList<>();
try(InputStream in = new FileInputStream(inputFileName);) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaFor(BinaryLabeledContributionInterchange.class);
MappingIterator<BinaryLabeledContributionInterchange> it = mapper.readerFor(BinaryLabeledContributionInterchange.class).with(schema).readValues(in);
while (it.hasNextValue()) {
itemList.add(it.next());
}
}
return itemList;
}