public static void main(String[] args) throws IOException { // SequenceInputStream(InputStream s1, InputStream s2) // 需求:把ByteArrayStreamDemo.java和DataStreamDemo.java的内容复制到Copy.java中 InputStream s1 = new FileInputStream("ByteArrayStreamDemo.java"); InputStream s2 = new FileInputStream("DataStreamDemo.java"); SequenceInputStream sis = new SequenceInputStream(s1, s2); BufferedOutputStream bos = new BufferedOutputStream( new FileOutputStream("Copy.java")); // 如何写读写呢,其实很简单,你就按照以前怎么读写,现在还是怎么读写 byte[] bys = new byte[1024]; int len = 0; while ((len = sis.read(bys)) != -1) { bos.write(bys, 0, len); } bos.close(); sis.close(); }
/** * Gets the current contents of this byte stream as a Input Stream. The * returned stream is backed by buffers of <code>this</code> stream, * avoiding memory allocation and copy, thus saving space and time.<br> * * @return the current contents of this output stream. * @see java.io.ByteArrayOutputStream#toByteArray() * @see #reset() * @since 2.0 */ private InputStream toBufferedInputStream() { int remaining = count; if (remaining == 0) { return new ClosedInputStream(); } List<ByteArrayInputStream> list = new ArrayList<ByteArrayInputStream>(buffers.size()); for (byte[] buf : buffers) { int c = Math.min(buf.length, remaining); list.add(new ByteArrayInputStream(buf, 0, c)); remaining -= c; if (remaining == 0) { break; } } return new SequenceInputStream(Collections.enumeration(list)); }
@Override public Optional<InputStream> getContent(final IRI identifier, final List<Range<Integer>> ranges) { requireNonNull(ranges, "Byte ranges may not be null"); return getFileFromIdentifier(identifier).map(file -> { try { if (ranges.isEmpty()) { return new FileInputStream(file); } else { final List<InputStream> iss = new ArrayList<>(); for (final Range<Integer> r : ranges) { final InputStream input = new FileInputStream(file); final long skipped = input.skip(r.getMinimum()); LOGGER.debug("Skipped {} bytes", skipped); iss.add(new BoundedInputStream(input, r.getMaximum() - r.getMinimum())); } return new SequenceInputStream(asEnumeration(iss.iterator())); } } catch (final IOException ex) { throw new UncheckedIOException(ex); } }); }
/** * Gets the current contents of this byte stream as a Input Stream. The * returned stream is backed by buffers of <code>this</code> stream, * avoiding memory allocation and copy, thus saving space and time.<br> * * @return the current contents of this output stream. * @see java.io.ByteArrayOutputStream#toByteArray() * @see #reset() * @since Commons IO 2.0 */ private InputStream toBufferedInputStream() { int remaining = count; if (remaining == 0) { return new ClosedInputStream(); } List<ByteArrayInputStream> list = new ArrayList<ByteArrayInputStream>(buffers.size()); for (byte[] buf : buffers) { int c = Math.min(buf.length, remaining); list.add(new ByteArrayInputStream(buf, 0, c)); remaining -= c; if (remaining == 0) { break; } } return new SequenceInputStream(Collections.enumeration(list)); }
/** * Gets the current contents of this byte stream as a Input Stream. The * returned stream is backed by buffers of <code>this</code> stream, * avoiding memory allocation and copy, thus saving space and time.<br> * * @return the current contents of this output stream. * @see java.io.ByteArrayOutputStream#toByteArray() * @see #reset() * @since 2.0 */ private InputStream toBufferedInputStream() { int remaining = count; if (remaining == 0) { return new ClosedInputStream(); } List<ByteArrayInputStream> list = new ArrayList<>(buffers.size()); for (byte[] buf : buffers) { int c = Math.min(buf.length, remaining); list.add(new ByteArrayInputStream(buf, 0, c)); remaining -= c; if (remaining == 0) { break; } } return new SequenceInputStream(Collections.enumeration(list)); }
/** * Upload a file of quote in add mode. * * @param subscription * The subscription identifier, will be used to filter the locations * from the associated provider. * @param uploadedFile * Instance entries files to import. Currently support only CSV * format. * @param columns * the CSV header names. * @param term * The default {@link ProvInstancePriceTerm} used when no one is * defined in the CSV line * @param ramMultiplier * The multiplier for imported RAM values. Default is 1. * @param encoding * CSV encoding. Default is UTF-8. * @throws IOException * When the CSV stream cannot be written. */ @POST @Consumes(MediaType.MULTIPART_FORM_DATA) @Path("{subscription:\\d+}/upload") public void upload(@PathParam("subscription") final int subscription, @Multipart(value = "csv-file") final InputStream uploadedFile, @Multipart(value = "columns", required = false) final String[] columns, @Multipart(value = "term", required = false) final String term, @Multipart(value = "memoryUnit", required = false) final Integer ramMultiplier, @Multipart(value = "encoding", required = false) final String encoding) throws IOException { subscriptionResource.checkVisibleSubscription(subscription).getNode().getId(); // Check column's name validity final String[] sanitizeColumns = ArrayUtils.isEmpty(columns) ? DEFAULT_COLUMNS : columns; checkHeaders(ACCEPTED_COLUMNS, sanitizeColumns); // Build CSV header from array final String csvHeaders = StringUtils.chop(ArrayUtils.toString(sanitizeColumns)).substring(1).replace(',', ';') + "\n"; // Build entries final String safeEncoding = ObjectUtils.defaultIfNull(encoding, StandardCharsets.UTF_8.name()); csvForBean .toBean(InstanceUpload.class, new InputStreamReader( new SequenceInputStream(new ByteArrayInputStream(csvHeaders.getBytes(safeEncoding)), uploadedFile), safeEncoding)) .stream().filter(Objects::nonNull).forEach(i -> persist(i, subscription, term, ramMultiplier)); }
@Override public InputStream getInputStream() throws IOException { boolean hasseparator = (_separator!=null); InputStream[] streams; if(hasseparator) { streams = new InputStream[(_connections.length *2)-1]; } else { streams = new InputStream[_connections.length]; } for (int i=0, len=_connections.length, sublen = len -1, streamCounter = 0; i < len; i++, streamCounter++) { streams[streamCounter] = _connections[i].getInputStream(); //Add the separator if needed if(hasseparator && (i < sublen)) { streams[++streamCounter] = new SeparatorInputStream(_separator); } } return new SequenceInputStream(new ArrayEnumeration<InputStream>(streams)); }
public static Model mergeResourceWithPrefixes(InputStream inputStreamPrefixes, InputStream inputStreamData) throws IOException { final Resource mergedDataResource = new InputStreamResource(new SequenceInputStream(inputStreamPrefixes, inputStreamData)); ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); RDFWriter turtleWriter = Rio.createWriter(RDFFormat.TURTLE, byteArrayOutputStream); RDFParser trigParser = Rio.createParser(RDFFormat.TRIG); trigParser.setRDFHandler(turtleWriter); trigParser.parse(mergedDataResource.getInputStream(), ""); Model result = Rio.parse(new ByteArrayInputStream(byteArrayOutputStream.toByteArray()), "", RDFFormat.TURTLE); byteArrayOutputStream.close(); inputStreamData.close(); return result; }
private void validate(List<InputStream> configurationStreams) { if (!configurationStreams.isEmpty()) { try (InputStream stream = new SequenceInputStream(Collections.enumeration(configurationStreams))) { final ValidationReport report = shaclValidator.validate(RdfModelTransformer.getModel(stream), RdfModelTransformer.getModel(elmoShapes.getInputStream())); if (!report.isValid()) { throw new ShaclValidationException(report.printReport()); } } catch (IOException ex) { throw new ShaclValidationException("Configuration files could not be read.", ex); } } else { LOG.error("Found no configuration files"); } }
/** * Get Swagger UI main index page. * * @return 200 OK * @throws IOException if unable to get index resource */ @GET @Path("index.html") public Response getIndex() throws IOException { InputStream stream = getClass().getClassLoader().getResourceAsStream(DOCS + "index.html"); nullIsNotFound(stream, "index.html not found"); String index = new String(toByteArray(stream)); int p1s = split(index, 0, INJECT_START); int p1e = split(index, p1s, INJECT_END); int p2s = split(index, p1e, null); StreamEnumeration streams = new StreamEnumeration(of(stream(index, 0, p1s), includeOptions(get(ApiDocService.class)), stream(index, p1e, p2s))); return ok(new SequenceInputStream(streams)) .header(CONTENT_TYPE, TEXT_HTML).build(); }
@GET @Produces(SCRIPT) public Response getMainModule() throws IOException { UiExtensionService service = get(UiExtensionService.class); InputStream jsTemplate = getClass().getClassLoader().getResourceAsStream(MAIN_JS); String js = new String(toByteArray(jsTemplate)); int p1s = split(js, 0, INJECT_VIEW_IDS_START) - INJECT_VIEW_IDS_START.length(); int p1e = split(js, 0, INJECT_VIEW_IDS_END); int p2s = split(js, p1e, null); StreamEnumeration streams = new StreamEnumeration(of(stream(js, 0, p1s), includeViewIds(service), stream(js, p1e, p2s))); return Response.ok(new SequenceInputStream(streams)).build(); }
@GET @Produces(MediaType.TEXT_HTML) public Response getNavigation() throws IOException { UiExtensionService service = get(UiExtensionService.class); InputStream navTemplate = getClass().getClassLoader().getResourceAsStream(NAV_HTML); String html = new String(toByteArray(navTemplate)); int p1s = split(html, 0, INJECT_VIEW_ITEMS_START); int p1e = split(html, 0, INJECT_VIEW_ITEMS_END); int p2s = split(html, p1e, null); StreamEnumeration streams = new StreamEnumeration(of(stream(html, 0, p1s), includeNavItems(service), stream(html, p1e, p2s))); return Response.ok(new SequenceInputStream(streams)).build(); }
private void buildTestData() { List<InputStream> testDataInputStreams = new ArrayList<InputStream>(); for(String testFile: testFiles) { /** start / is required for getResourceAsStream method otherwise files is not loaded.**/ testFile = "/"+testFile; testDataInputStreams.add(TestData.class.getResourceAsStream(testFile)); /** A new line character is added otherwise two files are not mearge properly and yaml parser gives error **/ testDataInputStreams.add(new ByteArrayInputStream("\n".getBytes())); } SequenceInputStream SequenceInputStream = new SequenceInputStream(Collections.enumeration(testDataInputStreams)); //System.out.print(convertToString(SequenceInputStream)); try { Map testData = yamlMapper.readValue(SequenceInputStream, Map.class); this.dataJsonPath = JsonPath.given(jsonMapper.writeValueAsString(testData)); } catch (Exception e) { e.printStackTrace(); } }
/** * Take a non XML section and make it XML just by adding a open and close * Root element * * * @param stream * InputStream to encapsulate in xml * @return InputStream in XML format */ public InputStream inputStreamXmlEncapsulate(InputStream stream) { String frontNode = "<Root>"; String endNode = "</Root>"; String sectionData = ""; try { sectionData = IOUtils.toString(stream, "UTF-8"); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } String escapedXml = StringEscapeUtils.escapeXml(sectionData); List<InputStream> xmlStreamList = Arrays.asList(new ByteArrayInputStream(frontNode.getBytes()), (InputStream) new ByteArrayInputStream(escapedXml.getBytes()), new ByteArrayInputStream(endNode.getBytes())); InputStream endStream = new SequenceInputStream(Collections.enumeration(xmlStreamList)); return endStream; }
/** * Gets the current contents of this byte stream as a Input Stream. The * returned stream is backed by buffers of <code>this</code> stream, * avoiding memory allocation and copy, thus saving space and time.<br> * * @return the current contents of this output stream. * @see java.io.ByteArrayOutputStream#toByteArray() * @see #reset() * @since 2.5 */ public synchronized InputStream toInputStream() { int remaining = count; if (remaining == 0) { return new ClosedInputStream(); } final List<ByteArrayInputStream> list = new ArrayList<ByteArrayInputStream>(buffers.size()); for (final byte[] buf : buffers) { final int c = Math.min(buf.length, remaining); list.add(new ByteArrayInputStream(buf, 0, c)); remaining -= c; if (remaining == 0) { break; } } reuseBuffers = false; return new SequenceInputStream(Collections.enumeration(list)); }
public static void main(String[] args) throws IOException { // 需求:把下面的三个文件的内容复制到Copy.java中 // ByteArrayStreamDemo.java,CopyFileDemo.java,DataStreamDemo.java // SequenceInputStream(Enumeration e) // 通过简单的回顾我们知道了Enumeration是Vector中的一个方法的返回值类型。 // Enumeration<E> elements() Vector<InputStream> v = new Vector<InputStream>(); InputStream s1 = new FileInputStream("ByteArrayStreamDemo.java"); InputStream s2 = new FileInputStream("CopyFileDemo.java"); InputStream s3 = new FileInputStream("DataStreamDemo.java"); v.add(s1); v.add(s2); v.add(s3); Enumeration<InputStream> en = v.elements(); SequenceInputStream sis = new SequenceInputStream(en); BufferedOutputStream bos = new BufferedOutputStream( new FileOutputStream("Copy.java")); // 如何写读写呢,其实很简单,你就按照以前怎么读写,现在还是怎么读写 byte[] bys = new byte[1024]; int len = 0; while ((len = sis.read(bys)) != -1) { bos.write(bys, 0, len); } bos.close(); sis.close(); }
private HashMap<String, ByteArrayOutputStream> populate(Storage s, String context) throws Exception { HashMap<String, ByteArrayOutputStream> appendData = new HashMap<>(); byte[] extraData = new byte[1024]; for (int segmentId = 0; segmentId < SEGMENT_COUNT; segmentId++) { String segmentName = getSegmentName(segmentId, context); createSegment(segmentName, s); val writeHandle = s.openWrite(segmentName).join(); ByteArrayOutputStream writeStream = new ByteArrayOutputStream(); appendData.put(segmentName, writeStream); long offset = 0; for (int j = 0; j < APPENDS_PER_SEGMENT; j++) { byte[] writeData = String.format(APPEND_FORMAT, segmentName, j).getBytes(); // Append some garbage at the end to make sure we only write as much as instructed, and not the whole InputStream. val dataStream = new SequenceInputStream(new ByteArrayInputStream(writeData), new ByteArrayInputStream(extraData)); s.write(writeHandle, offset, dataStream, writeData.length, TIMEOUT).join(); writeStream.write(writeData); offset += writeData.length; } } return appendData; }
private void setUpXMLParser(ReadableByteChannel channel, byte[] lookAhead) throws IOException { try { // We use Woodstox because the StAX implementation provided by OpenJDK reports // character locations incorrectly. Note that Woodstox still currently reports *byte* // locations incorrectly when parsing documents that contain multi-byte characters. XMLInputFactory2 xmlInputFactory = (XMLInputFactory2) XMLInputFactory.newInstance(); this.parser = xmlInputFactory.createXMLStreamReader( new SequenceInputStream( new ByteArrayInputStream(lookAhead), Channels.newInputStream(channel)), getCurrentSource().configuration.getCharset()); // Current offset should be the offset before reading the record element. while (true) { int event = parser.next(); if (event == XMLStreamConstants.START_ELEMENT) { String localName = parser.getLocalName(); if (localName.equals(getCurrentSource().configuration.getRecordElement())) { break; } } } } catch (FactoryConfigurationError | XMLStreamException e) { throw new IOException(e); } }
@Override public InputStream asStream() { return new SequenceInputStream(new Enumeration<InputStream>() { /** * Iterator over all resources associated with this * SequenceResource. */ private final Iterator<Resource> resourceIterator = resources.iterator(); @Override public boolean hasMoreElements() { return resourceIterator.hasNext(); } @Override public InputStream nextElement() { return resourceIterator.next().asStream(); } }); }
@GET @Produces(SCRIPT) public Response getMainModule() throws IOException { UiExtensionService service = get(UiExtensionService.class); InputStream jsTemplate = getClass().getClassLoader().getResourceAsStream(MAIN_JS); String js = new String(toByteArray(jsTemplate)); int p1s = split(js, 0, INJECT_VIEW_IDS_START); int p1e = split(js, 0, INJECT_VIEW_IDS_END); int p2s = split(js, p1e, null); StreamEnumeration streams = new StreamEnumeration(of(stream(js, 0, p1s), includeViewIds(service), stream(js, p1e, p2s))); return Response.ok(new SequenceInputStream(streams)).build(); }
@GET @Produces(MediaType.TEXT_HTML) public Response getNavigation() throws IOException { UiExtensionService service = get(UiExtensionService.class); InputStream navTemplate = getClass().getClassLoader().getResourceAsStream(NAV_HTML); String js = new String(toByteArray(navTemplate)); int p1s = split(js, 0, INJECT_VIEW_ITEMS_START); int p1e = split(js, 0, INJECT_VIEW_ITEMS_END); int p2s = split(js, p1e, null); StreamEnumeration streams = new StreamEnumeration(of(stream(js, 0, p1s), includeNavItems(service), stream(js, p1e, p2s))); return Response.ok(new SequenceInputStream(streams)).build(); }
/** * Merges a list of source files. Create missing parent directories if * needed. * * @param mergedFile output file resulting from the merged step * @throws IOException when the merge step fails */ protected void merge(File mergedFile) throws IOException { mergedFile.getParentFile().mkdirs(); try (InputStream sequence = new SequenceInputStream(new SourceFilesEnumeration(log, files, verbose, charset)); OutputStream out = new FileOutputStream(mergedFile); InputStreamReader sequenceReader = new InputStreamReader(sequence, charset); OutputStreamWriter outWriter = new OutputStreamWriter(out, charset)) { log.info("Creating the merged file [" + ((verbose) ? mergedFile.getPath() : mergedFile.getName()) + "]."); IOUtil.copy(sequenceReader, outWriter, bufferSize); } catch (IOException e) { log.error("Failed to concatenate files.", e); throw e; } }
protected PageHeader readPageHeader() throws IOException { PageHeader pageHeader; int initialPos = this.pos; try { pageHeader = Util.readPageHeader(this); } catch (IOException e) { // this is to workaround a bug where the compressedLength // of the chunk is missing the size of the header of the dictionary // to allow reading older files (using dictionary) we need this. // usually 13 to 19 bytes are missing // if the last page is smaller than this, the page header itself is truncated in the buffer. this.pos = initialPos; // resetting the buffer to the position before we got the error LOG.info("completing the column chunk to read the page header"); pageHeader = Util.readPageHeader(new SequenceInputStream(this, f)); // trying again from the buffer + remainder of the stream. } return pageHeader; }