private static int[] computeTrailerSizeByVersion() { int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1]; for (int version = MIN_FORMAT_VERSION; version <= MAX_FORMAT_VERSION; ++version) { FixedFileTrailer fft = new FixedFileTrailer(version, HFileBlock.MINOR_VERSION_NO_CHECKSUM); DataOutputStream dos = new DataOutputStream(new NullOutputStream()); try { fft.serialize(dos); } catch (IOException ex) { // The above has no reason to fail. throw new RuntimeException(ex); } versionToSize[version] = dos.size(); } return versionToSize; }
private static int[] computeTrailerSizeByVersion() { int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1]; for (int version = MIN_FORMAT_VERSION; version <= MAX_FORMAT_VERSION; ++version) { FixedFileTrailer fft = new FixedFileTrailer(version); DataOutputStream dos = new DataOutputStream(new NullOutputStream()); try { fft.serialize(dos); } catch (IOException ex) { // The above has no reason to fail. throw new RuntimeException(ex); } versionToSize[version] = dos.size(); } return versionToSize; }
private static int[] computeTrailerSizeByVersion() { int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1]; for (int version = HFile.MIN_FORMAT_VERSION; version <= HFile.MAX_FORMAT_VERSION; ++version) { FixedFileTrailer fft = new FixedFileTrailer(version, HFileBlock.MINOR_VERSION_NO_CHECKSUM); DataOutputStream dos = new DataOutputStream(new NullOutputStream()); try { fft.serialize(dos); } catch (IOException ex) { // The above has no reason to fail. throw new RuntimeException(ex); } versionToSize[version] = dos.size(); } return versionToSize; }
/** * Find the size of compressed data assuming that buffer will be compressed * using given algorithm. * @param algo compression algorithm * @param compressor compressor already requested from codec * @param inputBuffer Array to be compressed. * @param offset Offset to beginning of the data. * @param length Length to be compressed. * @return Size of compressed data in bytes. * @throws IOException */ public static int getCompressedSize(Algorithm algo, Compressor compressor, byte[] inputBuffer, int offset, int length) throws IOException { DataOutputStream compressedStream = new DataOutputStream( new NullOutputStream()); if (compressor != null) { compressor.reset(); } OutputStream compressingStream = algo.createCompressionStream( compressedStream, compressor, 0); compressingStream.write(inputBuffer, offset, length); compressingStream.flush(); compressingStream.close(); return compressedStream.size(); }
@Test(timeout=30000) public void testManyClosedSocketsInCache() throws Exception { // Make a small file Configuration clientConf = new Configuration(conf); clientConf.set(DFS_CLIENT_CONTEXT, "testManyClosedSocketsInCache"); DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(cluster.getURI(), clientConf); PeerCache peerCache = ClientContext.getFromConf(clientConf).getPeerCache(); DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L); // Insert a bunch of dead sockets in the cache, by opening // many streams concurrently, reading all of the data, // and then closing them. InputStream[] stms = new InputStream[5]; try { for (int i = 0; i < stms.length; i++) { stms[i] = fs.open(TEST_FILE); } for (InputStream stm : stms) { IOUtils.copyBytes(stm, new NullOutputStream(), 1024); } } finally { IOUtils.cleanup(null, stms); } assertEquals(5, peerCache.size()); // Let all the xceivers timeout Thread.sleep(1500); assertXceiverCount(0); // Client side still has the sockets cached assertEquals(5, peerCache.size()); // Reading should not throw an exception. DFSTestUtil.readFile(fs, TEST_FILE); }
@Test(timeout=30000) public void testManyClosedSocketsInCache() throws Exception { // Make a small file DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L); // Insert a bunch of dead sockets in the cache, by opening // many streams concurrently, reading all of the data, // and then closing them. InputStream[] stms = new InputStream[5]; try { for (int i = 0; i < stms.length; i++) { stms[i] = fs.open(TEST_FILE); } for (InputStream stm : stms) { IOUtils.copyBytes(stm, new NullOutputStream(), 1024); } } finally { IOUtils.cleanup(null, stms); } DFSClient client = ((DistributedFileSystem)fs).dfs; assertEquals(5, client.peerCache.size()); // Let all the xceivers timeout Thread.sleep(1500); assertXceiverCount(0); // Client side still has the sockets cached assertEquals(5, client.peerCache.size()); // Reading should not throw an exception. DFSTestUtil.readFile(fs, TEST_FILE); }
@Test public void testResultRowNumWhenSelectingOnPartitionedTable() throws Exception { try (TajoCli cli2 = new TajoCli(cluster.getConfiguration(), new String[]{}, null, System.in, new NullOutputStream(), new NullOutputStream())) { cli2.executeScript("create table region_part (r_regionkey int8, r_name text) " + "partition by column (r_comment text) as select * from region"); setVar(tajoCli, SessionVars.CLI_FORMATTER_CLASS, TajoCliOutputTestFormatter.class.getName()); tajoCli.executeScript("select r_comment from region_part where r_comment = 'hs use ironic, even requests. s'"); String consoleResult = new String(out.toByteArray()); assertOutputResult(consoleResult); } finally { tajoCli.executeScript("drop table region_part purge"); } }
public OutputRedirector(InputStream inputStream, OutputStream outputStream) { this.inputStream = new BufferedInputStream(inputStream); if (outputStream != null) { this.outputStream = outputStream; } else { this.outputStream = new NullOutputStream(); } }
public OutputStream getOutputStream() { return new NullOutputStream(); }