/** * Tests read/seek/getPos/skipped opeation for input stream. */ private void testChecker(ChecksumFileSystem fileSys, boolean readCS) throws Exception { Path file = new Path("try.dat"); if( readCS ) { writeFile(fileSys, file); } else { writeFile(fileSys.getRawFileSystem(), file); } stm = fileSys.open(file); checkReadAndGetPos(); checkSeek(); checkSkip(); //checkMark assertFalse(stm.markSupported()); stm.close(); cleanupFile(fileSys, file); }
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException { if (fileSys instanceof ChecksumFileSystem) { fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem(); } // Make the buffer size small to trigger code for HADOOP-922 FSDataInputStream stmRaw = fileSys.open(name, 1); byte[] expected = new byte[ONEMB]; Random rand = new Random(seed); rand.nextBytes(expected); // Issue a simple read first. byte[] actual = new byte[128]; stmRaw.seek(100000); stmRaw.read(actual, 0, actual.length); checkAndEraseData(actual, 100000, expected, "First Small Read Test"); // now do a small seek of 4 bytes, within the same block. int newpos1 = 100000 + 128 + 4; stmRaw.seek(newpos1); stmRaw.read(actual, 0, actual.length); checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1"); // seek another 256 bytes this time int newpos2 = newpos1 + 256; stmRaw.seek(newpos2); stmRaw.read(actual, 0, actual.length); checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2"); // all done stmRaw.close(); }
public void testFSInputChecker() throws Exception { Configuration conf = new Configuration(); conf.setLong("dfs.block.size", BLOCK_SIZE); conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM); conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem"); rand.nextBytes(expected); // test DFS MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem(); try { testChecker(fileSys, true); testChecker(fileSys, false); testSeekAndRead(fileSys); } finally { fileSys.close(); cluster.shutdown(); } // test Local FS fileSys = FileSystem.getLocal(conf); try { testChecker(fileSys, true); testChecker(fileSys, false); testFileCorruption((LocalFileSystem)fileSys); testSeekAndRead(fileSys); }finally { fileSys.close(); } }
private void testSeekAndRead(ChecksumFileSystem fileSys) throws IOException { Path file = new Path("try.dat"); writeFile(fileSys, file); stm = fileSys.open(file, fileSys.getConf().getInt("io.file.buffer.size", 4096)); checkSeekAndRead(); stm.close(); cleanupFile(fileSys, file); }
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException { if (fileSys instanceof ChecksumFileSystem) { fileSys = ((ChecksumFileSystem) fileSys).getRawFileSystem(); } // Make the buffer size small to trigger code for HADOOP-922 FSDataInputStream stmRaw = fileSys.open(name, 1); byte[] expected = new byte[ONEMB]; Random rand = new Random(seed); rand.nextBytes(expected); // Issue a simple read first. byte[] actual = new byte[128]; stmRaw.seek(100000); stmRaw.read(actual, 0, actual.length); checkAndEraseData(actual, 100000, expected, "First Small Read Test"); // now do a small seek of 4 bytes, within the same block. int newpos1 = 100000 + 128 + 4; stmRaw.seek(newpos1); stmRaw.read(actual, 0, actual.length); checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1"); // seek another 256 bytes this time int newpos2 = newpos1 + 256; stmRaw.seek(newpos2); stmRaw.read(actual, 0, actual.length); checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2"); // all done stmRaw.close(); }
private FSDataOutputStream create(String filename, boolean noChecksum) throws IOException { Path filePath = qualifiedPath(filename); // even though it was qualified using the default FS, it may not be in it FileSystem fs = filePath.getFileSystem(getConf()); if (noChecksum && fs instanceof ChecksumFileSystem) { fs = ((ChecksumFileSystem) fs).getRawFileSystem(); } return fs.create(filePath, true /* overwrite */); }