Java 类org.apache.hadoop.fs.FileSystem 实例源码
项目:hadoop
文件:TestWebHDFS.java
static void verifyPread(FileSystem fs, Path p, long offset, long length,
byte[] buf, byte[] expected) throws IOException {
long remaining = length - offset;
long checked = 0;
LOG.info("XXX PREAD: offset=" + offset + ", remaining=" + remaining);
final Ticker t = new Ticker("PREAD", "offset=%d, remaining=%d",
offset, remaining);
final FSDataInputStream in = fs.open(p, 64 << 10);
for(; remaining > 0; ) {
t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
final int n = (int)Math.min(remaining, buf.length);
in.readFully(offset, buf, 0, n);
checkData(offset, remaining, n, buf, expected);
offset += n;
remaining -= n;
checked += n;
}
in.close();
t.end(checked);
}
项目:hadoop
文件:TestRead.java
/**
* Regression test for HDFS-7045.
* If deadlock happen, the test will time out.
* @throws Exception
*/
@Test(timeout=60000)
public void testReadReservedPath() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(1).format(true).build();
try {
FileSystem fs = cluster.getFileSystem();
fs.open(new Path("/.reserved/.inodes/file"));
Assert.fail("Open a non existing file should fail.");
} catch (FileNotFoundException e) {
// Expected
} finally {
cluster.shutdown();
}
}
项目:aliyun-maxcompute-data-collectors
文件:TestSplittableBufferedWriter.java
/** Create the directory where we'll write our test files to; and
* make sure it has no files in it.
*/
private void ensureEmptyWriteDir() throws IOException {
FileSystem fs = FileSystem.getLocal(getConf());
Path writeDir = getWritePath();
fs.mkdirs(writeDir);
FileStatus [] stats = fs.listStatus(writeDir);
for (FileStatus stat : stats) {
if (stat.isDir()) {
fail("setUp(): Write directory " + writeDir
+ " contains subdirectories");
}
LOG.debug("setUp(): Removing " + stat.getPath());
if (!fs.delete(stat.getPath(), false)) {
fail("setUp(): Could not delete residual file " + stat.getPath());
}
}
if (!fs.exists(writeDir)) {
fail("setUp: Could not create " + writeDir);
}
}
项目:hadoop
文件:FileOutputCommitter.java
/**
* Create a file output committer
* @param outputPath the job's output path, or null if you want the output
* committer to act as a noop.
* @param context the task's context
* @throws IOException
*/
@Private
public FileOutputCommitter(Path outputPath,
JobContext context) throws IOException {
Configuration conf = context.getConfiguration();
algorithmVersion =
conf.getInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT);
LOG.info("File Output Committer Algorithm version is " + algorithmVersion);
if (algorithmVersion != 1 && algorithmVersion != 2) {
throw new IOException("Only 1 or 2 algorithm version is supported");
}
if (outputPath != null) {
FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
this.outputPath = fs.makeQualified(outputPath);
}
}
项目:ditb
文件:HBaseTestCase.java
/**
* Note that this method must be called after the mini hdfs cluster has
* started or we end up with a local file system.
*/
@Override
protected void setUp() throws Exception {
super.setUp();
localfs =
(conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0);
if (fs == null) {
this.fs = FileSystem.get(conf);
}
try {
if (localfs) {
this.testDir = getUnitTestdir(getName());
if (fs.exists(testDir)) {
fs.delete(testDir, true);
}
} else {
this.testDir = FSUtils.getRootDir(conf);
}
} catch (Exception e) {
LOG.fatal("error during setup", e);
throw e;
}
}
项目:hadoop
文件:TestTokenCache.java
@SuppressWarnings("deprecation")
@Test
public void testGetTokensForNamenodes() throws IOException,
URISyntaxException {
Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data", "test/build/data"));
// ick, but need fq path minus file:/
String binaryTokenFile =
FileSystem.getLocal(conf)
.makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri()
.getPath();
MockFileSystem fs1 = createFileSystemForServiceName("service1");
Credentials creds = new Credentials();
Token<?> token1 = fs1.getDelegationToken(renewer);
creds.addToken(token1.getService(), token1);
// wait to set, else the obtain tokens call above will fail with FNF
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
String fs_addr = fs1.getCanonicalServiceName();
Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr);
assertNotNull("Token for nn is null", nnt);
}
项目:dremio-oss
文件:BackupRestoreUtil.java
private static void validateFileWithChecksum(FileSystem fs, Path filePath, BackupFileInfo backupFileInfo) throws IOException {
final CheckedInputStream cin = new CheckedInputStream(fs.open(filePath), new CRC32());
final BufferedReader reader = new BufferedReader(new InputStreamReader(cin));
final ObjectMapper objectMapper = new ObjectMapper();
String line;
long records = 0;
// parse records just to make sure formatting is correct
while ((line = reader.readLine()) != null) {
objectMapper.readValue(line, BackupRecord.class);
++records;
}
cin.close();
long found = cin.getChecksum().getValue();
if (backupFileInfo.getChecksum() != found) {
throw new IOException(format("Corrupt backup data file %s. Expected checksum %x, found %x", filePath, backupFileInfo.getChecksum(), found));
}
if (backupFileInfo.getRecords() != records) {
throw new IOException(format("Corrupt backup data file %s. Expected records %x, found %x", filePath, backupFileInfo.getRecords(), records));
}
}
项目:hadoop
文件:NameNode.java
/**
* @return address of file system
*/
public static InetSocketAddress getAddress(URI filesystemURI) {
String authority = filesystemURI.getAuthority();
if (authority == null) {
throw new IllegalArgumentException(String.format(
"Invalid URI for NameNode address (check %s): %s has no authority.",
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
}
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
filesystemURI.getScheme())) {
throw new IllegalArgumentException(String.format(
"Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
HdfsConstants.HDFS_URI_SCHEME));
}
return getAddress(authority);
}
项目:circus-train
文件:PathToPathMetadata.java
@Override
public PathMetadata apply(@Nonnull Path location) {
try {
FileSystem fs = location.getFileSystem(conf);
FileStatus fileStatus = fs.getFileStatus(location);
FileChecksum checksum = null;
if (fileStatus.isFile()) {
checksum = fs.getFileChecksum(location);
}
List<PathMetadata> childPathDescriptors = new ArrayList<>();
if (fileStatus.isDirectory()) {
FileStatus[] childStatuses = fs.listStatus(location);
for (FileStatus childStatus : childStatuses) {
childPathDescriptors.add(apply(childStatus.getPath()));
}
}
return new PathMetadata(location, fileStatus.getModificationTime(), checksum, childPathDescriptors);
} catch (IOException e) {
throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e);
}
}
项目:dremio-oss
文件:BackupRestoreUtil.java
public static BackupStats createBackup(FileSystem fs, Path backupRootDir, LocalKVStoreProvider localKVStoreProvider, HomeFileConfig homeFileStore) throws IOException, NamespaceException {
final Date now = new Date();
final BackupStats backupStats = new BackupStats();
final Path backupDir = new Path(backupRootDir, format("%s%s", BACKUP_DIR_PREFIX, DATE_FORMAT.format(now)));
fs.mkdirs(backupDir, DEFAULT_PERMISSIONS);
backupStats.backupPath = backupDir.toUri().getPath();
for (Map.Entry<StoreBuilderConfig, CoreKVStore<?, ?>> entry : localKVStoreProvider.getStores().entrySet()) {
final StoreBuilderConfig storeBuilderConfig = entry.getKey();
if (TokenUtils.TOKENS_TABLE_NAME.equals(storeBuilderConfig.getName())) {
// Skip creating a backup of tokens table
// TODO: In the future, if there are other tables that should not be backed up, this could be part of
// StoreBuilderConfig interface
continue;
}
final BackupFileInfo backupFileInfo = new BackupFileInfo().setKvstoreInfo(DataStoreUtils.toInfo(storeBuilderConfig));
dumpTable(fs, backupDir, backupFileInfo, entry.getValue());
++backupStats.tables;
}
backupUploadedFiles(fs, backupDir, homeFileStore, backupStats);
return backupStats;
}
项目:hadoop-oss
文件:TestCredentialProviderFactory.java
public void checkPermissionRetention(Configuration conf, String ourUrl,
Path path) throws Exception {
CredentialProvider provider = CredentialProviderFactory.getProviders(conf).get(0);
// let's add a new credential and flush and check that permissions are still set to 777
char[] cred = new char[32];
for(int i =0; i < cred.length; ++i) {
cred[i] = (char) i;
}
// create a new key
try {
provider.createCredentialEntry("key5", cred);
} catch (Exception e) {
e.printStackTrace();
throw e;
}
provider.flush();
// get a new instance of the provider to ensure it was saved correctly
provider = CredentialProviderFactory.getProviders(conf).get(0);
assertArrayEquals(cred, provider.getCredentialEntry("key5").getCredential());
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Permissions should have been retained from the preexisting " +
"keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
项目:hadoop
文件:TestFileCreation.java
/**
* Test that server default values can be retrieved on the client side
*/
@Test
public void testServerDefaults() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
FsServerDefaults serverDefaults = fs.getServerDefaults();
assertEquals(DFS_BLOCK_SIZE_DEFAULT, serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT, serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT, serverDefaults.getWritePacketSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1, serverDefaults.getReplication());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT, serverDefaults.getFileBufferSize());
} finally {
fs.close();
cluster.shutdown();
}
}
项目:ditb
文件:HFileOutputFormat2.java
/**
* Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
* <code>splitPoints</code>. Cleans up the partitions file after job exists.
*/
static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
throws IOException {
Configuration conf = job.getConfiguration();
// create the partitions file
FileSystem fs = FileSystem.get(conf);
String hbaseTmpFsDir =
conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID());
fs.makeQualified(partitionsPath);
writePartitions(conf, partitionsPath, splitPoints);
fs.deleteOnExit(partitionsPath);
// configure job to use it
job.setPartitionerClass(TotalOrderPartitioner.class);
TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
}
项目:hadoop
文件:TestFileInputFormat.java
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf,
FileSystem localFs) throws IOException {
Path base1 = new Path(TEST_ROOT_DIR, "input1");
Path base2 = new Path(TEST_ROOT_DIR, "input2");
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
conf.setBoolean(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
true);
localFs.mkdirs(base1);
Path inFile1 = new Path(base1, "file1");
Path inFile2 = new Path(base1, "file2");
localFs.createNewFile(inFile1);
localFs.createNewFile(inFile2);
List<Path> expectedPaths = Lists.newArrayList();
return expectedPaths;
}
项目:hadoop
文件:TestLocalJobSubmission.java
/**
* test the local job submission options of
* -jt local -libjars
* @throws IOException
*/
@Test
public void testLocalJobLibjarsOption() throws IOException {
Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));
Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
final String[] args = {
"-jt" , "local", "-libjars", jarPath.toString(),
"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
};
int res = -1;
try {
res = ToolRunner.run(conf, new SleepJob(), args);
} catch (Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:", 0, res);
}
项目:ditb
文件:TestFSTableDescriptors.java
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(
"testCreateTableDescriptorUpdatesIfThereExistsAlready"));
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
Path tableDir = fstd.getTableDir(htd.getTableName());
Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
FileStatus[] statuses = fs.listStatus(tmpTableDir);
assertTrue(statuses.length == 0);
assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
}
项目:ditb
文件:FSUtils.java
/**
* Runs through the HBase rootdir and creates a reverse lookup map for
* table StoreFile names to the full Path.
* <br>
* Example...<br>
* Key = 3944417774205889744 <br>
* Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
*
* @param fs The file system to use.
* @param hbaseRootDir The root directory to scan.
* @param errors ErrorReporter instance or null
* @return Map keyed by StoreFile name with a value of the full Path.
* @throws IOException When scanning the directory fails.
*/
public static Map<String, Path> getTableStoreFilePathMap(
final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors)
throws IOException {
Map<String, Path> map = new HashMap<String, Path>();
// if this method looks similar to 'getTableFragmentation' that is because
// it was borrowed from it.
// only include the directory paths to tables
for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
getTableStoreFilePathMap(map, fs, hbaseRootDir,
FSUtils.getTableName(tableDir), errors);
}
return map;
}
项目:hadoop
文件:TestCodec.java
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
CompressionType type, int records) throws Exception {
FileSystem fs = FileSystem.get(conf);
LOG.info("Creating MapFiles with " + records +
" records using codec " + clazz.getSimpleName());
Path path = new Path(new Path(
System.getProperty("test.build.data", "/tmp")),
clazz.getSimpleName() + "-" + type + "-" + records);
LOG.info("Writing " + path);
createMapFile(conf, fs, path, clazz.newInstance(), type, records);
MapFile.Reader reader = new MapFile.Reader(path, conf);
Text key1 = new Text("002");
assertNotNull(reader.get(key1, new Text()));
Text key2 = new Text("004");
assertNotNull(reader.get(key2, new Text()));
}
项目:hadoop
文件:TestKeyProviderFactory.java
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// let's add a new key and flush and check that permissions are still set to 777
byte[] key = new byte[16];
for(int i =0; i < key.length; ++i) {
key[i] = (byte) i;
}
// create a new key
try {
provider.createKey("key5", key, KeyProvider.options(conf));
} catch (Exception e) {
e.printStackTrace();
throw e;
}
provider.flush();
// get a new instance of the provider to ensure it was saved correctly
provider = KeyProviderFactory.getProviders(conf).get(0);
assertArrayEquals(key, provider.getCurrentKey("key5").getMaterial());
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
项目:hadoop
文件:TestViewFileSystemWithAcls.java
@Before
public void setUp() throws Exception {
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
fsTarget.delete(targetTestRoot, true);
fsTarget2.delete(targetTestRoot2, true);
fsTarget.mkdirs(targetTestRoot);
fsTarget2.mkdirs(targetTestRoot2);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
}
项目:multiple-dimension-spread
文件:MDSCombineSpreadReader.java
public MDSCombineSpreadReader( final CombineFileSplit split , final TaskAttemptContext context , final Integer index ) throws IOException{
Configuration config = context.getConfiguration();
Path path = split.getPath( index );
FileSystem fs = path.getFileSystem( config );
long fileLength = fs.getLength( path );
InputStream in = fs.open( path );
innerReader = new MDSSpreadReader();
innerReader.setStream( in , fileLength , 0 , fileLength );
}
项目:hadoop
文件:ViewFileSystem.java
@Override
public void setPermission(final Path f, final FsPermission permission)
throws AccessControlException, FileNotFoundException,
IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
res.targetFileSystem.setPermission(res.remainingPath, permission);
}
项目:hadoop-oss
文件:TestGenericOptionsParser.java
@Override
protected void setUp() throws Exception {
super.setUp();
conf = new Configuration();
localFs = FileSystem.getLocal(conf);
testDir = new File(System.getProperty("test.build.data", "/tmp"), "generic");
if(testDir.exists())
localFs.delete(new Path(testDir.toString()), true);
}
项目:hadoop
文件:TestWebHDFS.java
/**
* Test snapshot deletion through WebHdfs
*/
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo, "s1");
final Path spath = webHdfs.createSnapshot(foo, null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// delete the two snapshots
webHdfs.deleteSnapshot(foo, "s1");
Assert.assertFalse(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo, spath.getName());
Assert.assertFalse(webHdfs.exists(spath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
项目:hadoop
文件:AbstractContractRootDirectoryTest.java
@Test
public void testMkDirDepth1() throws Throwable {
FileSystem fs = getFileSystem();
Path dir = new Path("/testmkdirdepth1");
assertPathDoesNotExist("directory already exists", dir);
fs.mkdirs(dir);
ContractTestUtils.assertIsDirectory(getFileSystem(), dir);
assertPathExists("directory already exists", dir);
assertDeleted(dir, true);
}
项目:ditb
文件:PerformanceEvaluation.java
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
// generate splits
List<InputSplit> splitList = new ArrayList<InputSplit>();
for (FileStatus file: listStatus(job)) {
if (file.isDirectory()) {
continue;
}
Path path = file.getPath();
FileSystem fs = path.getFileSystem(job.getConfiguration());
FSDataInputStream fileIn = fs.open(path);
LineReader in = new LineReader(fileIn, job.getConfiguration());
int lineLen = 0;
while(true) {
Text lineText = new Text();
lineLen = in.readLine(lineText);
if(lineLen <= 0) {
break;
}
Matcher m = LINE_PATTERN.matcher(lineText.toString());
if((m != null) && m.matches()) {
TableName tableName = TableName.valueOf(m.group(1));
int startRow = Integer.parseInt(m.group(2));
int rows = Integer.parseInt(m.group(3));
int totalRows = Integer.parseInt(m.group(4));
int clients = Integer.parseInt(m.group(5));
boolean flushCommits = Boolean.parseBoolean(m.group(6));
boolean writeToWAL = Boolean.parseBoolean(m.group(7));
boolean useTags = Boolean.parseBoolean(m.group(8));
int noOfTags = Integer.parseInt(m.group(9));
LOG.debug("tableName=" + tableName +
" split["+ splitList.size() + "] " +
" startRow=" + startRow +
" rows=" + rows +
" totalRows=" + totalRows +
" clients=" + clients +
" flushCommits=" + flushCommits +
" writeToWAL=" + writeToWAL +
" useTags=" + useTags +
" noOfTags=" + noOfTags);
PeInputSplit newSplit =
new PeInputSplit(tableName, startRow, rows, totalRows, clients,
flushCommits, writeToWAL, useTags, noOfTags);
splitList.add(newSplit);
}
}
in.close();
}
LOG.info("Total # of splits: " + splitList.size());
return splitList;
}
项目:hadoop
文件:ViewFileSystem.java
@Override
public long getDefaultBlockSize(Path f) {
try {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getDefaultBlockSize(res.remainingPath);
} catch (FileNotFoundException e) {
throw new NotInMountpointException(f, "getDefaultBlockSize");
}
}
项目:hadoop
文件:TestTextInputFormat.java
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec,
String contents) throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
项目:hadoop-oss
文件:RollingFileSystemSinkTestBase.java
/**
* Read the target log file and append its contents to the StringBuilder.
* @param fs the target FileSystem
* @param logFile the target file path
* @param metrics where to append the file contents
* @throws IOException thrown if the file cannot be read
*/
protected void readLogData(FileSystem fs, Path logFile, StringBuilder metrics)
throws IOException {
FSDataInputStream fsin = fs.open(logFile);
BufferedReader in = new BufferedReader(new InputStreamReader(fsin,
StandardCharsets.UTF_8));
String line = null;
while ((line = in.readLine()) != null) {
metrics.append(line).append("\n");
}
}
项目:angel
文件:ModelLoader.java
/**
* Get model meta
*
* @param modelDir model save directory path
* @return model meta
*/
public static ModelFilesMeta getMeta(String modelDir, Configuration conf) throws IOException {
Path modelPath = new Path(modelDir);
Path meteFilePath = new Path(modelPath, ModelFilesConstent.modelMetaFileName);
ModelFilesMeta meta = new ModelFilesMeta();
FileSystem fs = meteFilePath.getFileSystem(conf);
if (!fs.exists(meteFilePath)) {
throw new IOException("matrix meta file does not exist ");
}
FSDataInputStream input = fs.open(meteFilePath);
meta.read(input);
input.close();
return meta;
}
项目:hadoop-oss
文件:AbstractContractMkdirTest.java
@Test
public void testMkDirRmDir() throws Throwable {
FileSystem fs = getFileSystem();
Path dir = path("testMkDirRmDir");
assertPathDoesNotExist("directory already exists", dir);
fs.mkdirs(dir);
assertPathExists("mkdir failed", dir);
assertDeleted(dir, false);
}
项目:hadoop-oss
文件:BloomMapFile.java
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass,
CompressionType compress, Progressable progress) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress),
progressable(progress));
}
项目:hadoop
文件:MockFileSystem.java
/** Setup and return the underlying {@link FileSystem} mock */
static FileSystem setup() throws IOException {
if (mockFs == null) {
mockFs = mock(FileSystem.class);
}
reset(mockFs);
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "mockfs:///");
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
when(mockFs.getConf()).thenReturn(conf);
return mockFs;
}
项目:hadoop
文件:SwiftTestUtils.java
/**
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
* @param fileSystem filesystem to resolve path against
* @param filename name of the file
* @throws IOException IO problems during file operations
*/
public static void assertIsFile(FileSystem fileSystem, Path filename) throws
IOException {
assertPathExists(fileSystem, "Expected file", filename);
FileStatus status = fileSystem.getFileStatus(filename);
String fileInfo = filename + " " + status;
assertFalse("File claims to be a directory " + fileInfo,
status.isDirectory());
/* disabled for Hadoop v1 compatibility
assertFalse("File claims to be a symlink " + fileInfo,
status.isSymlink());
*/
}
项目:hadoop
文件:TestMapRed.java
private static boolean isSequenceFile(FileSystem fs,
Path f) throws IOException {
DataInputStream in = fs.open(f);
byte[] seq = "SEQ".getBytes();
for(int i=0; i < seq.length; ++i) {
if (seq[i] != in.read()) {
return false;
}
}
return true;
}
项目:hadoop
文件:TestHistograms.java
/**
* @throws IOException
*
* There should be files in the directory named by
* ${test.build.data}/rumen/histogram-test .
*
* There will be pairs of files, inputXxx.json and goldXxx.json .
*
* We read the input file as a HistogramRawTestData in json. Then we
* create a Histogram using the data field, and then a
* LoggedDiscreteCDF using the percentiles and scale field. Finally,
* we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
* deepCompare them.
*/
@Test
public void testHistograms() throws IOException {
final Configuration conf = new Configuration();
final FileSystem lfs = FileSystem.getLocal(conf);
final Path rootInputDir = new Path(
System.getProperty("test.tools.input.dir", "")).makeQualified(lfs);
final Path rootInputFile = new Path(rootInputDir, "rumen/histogram-tests");
FileStatus[] tests = lfs.listStatus(rootInputFile);
for (int i = 0; i < tests.length; ++i) {
Path filePath = tests[i].getPath();
String fileName = filePath.getName();
if (fileName.startsWith("input")) {
String testName = fileName.substring("input".length());
Path goldFilePath = new Path(rootInputFile, "gold"+testName);
assertTrue("Gold file dies not exist", lfs.exists(goldFilePath));
LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs);
System.out.println("Testing a Histogram for " + fileName);
FSDataInputStream goldStream = lfs.open(goldFilePath);
JsonObjectMapperParser<LoggedDiscreteCDF> parser = new JsonObjectMapperParser<LoggedDiscreteCDF>(
goldStream, LoggedDiscreteCDF.class);
try {
LoggedDiscreteCDF dcdf = parser.getNext();
dcdf.deepCompare(newResult, new TreePath(null, "<root>"));
} catch (DeepInequalityException e) {
fail(e.path.toString());
}
finally {
parser.close();
}
}
}
}
项目:ditb
文件:TableNamespaceManager.java
private void create(Table table, NamespaceDescriptor ns) throws IOException {
if (get(table, ns.getName()) != null) {
throw new NamespaceExistException(ns.getName());
}
validateTableAndRegionCount(ns);
FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
fs.mkdirs(FSUtils.getNamespaceDir(
masterServices.getMasterFileSystem().getRootDir(), ns.getName()));
upsert(table, ns);
if (this.masterServices.isInitialized()) {
this.masterServices.getMasterQuotaManager().setNamespaceQuota(ns);
}
}
项目:QDrill
文件:TestImpersonationMetadata.java
private static Map<String , WorkspaceConfig> createTestWorkspaces() throws Exception {
// Create "/tmp" folder and set permissions to "777"
final Path tmpPath = new Path("/tmp");
fs.delete(tmpPath, true);
FileSystem.mkdirs(fs, tmpPath, new FsPermission((short)0777));
Map<String, WorkspaceConfig> workspaces = Maps.newHashMap();
// Create /drillTestGrp0_700 directory with permissions 700 (owned by user running the tests)
createAndAddWorkspace("drillTestGrp0_700", "/drillTestGrp0_700", (short)0700, processUser, group0, workspaces);
// Create /drillTestGrp0_750 directory with permissions 750 (owned by user running the tests)
createAndAddWorkspace("drillTestGrp0_750", "/drillTestGrp0_750", (short)0750, processUser, group0, workspaces);
// Create /drillTestGrp0_755 directory with permissions 755 (owned by user running the tests)
createAndAddWorkspace("drillTestGrp0_755", "/drillTestGrp0_755", (short)0755, processUser, group0, workspaces);
// Create /drillTestGrp0_770 directory with permissions 770 (owned by user running the tests)
createAndAddWorkspace("drillTestGrp0_770", "/drillTestGrp0_770", (short)0770, processUser, group0, workspaces);
// Create /drillTestGrp0_777 directory with permissions 777 (owned by user running the tests)
createAndAddWorkspace("drillTestGrp0_777", "/drillTestGrp0_777", (short)0777, processUser, group0, workspaces);
// Create /drillTestGrp1_700 directory with permissions 700 (owned by user1)
createAndAddWorkspace("drillTestGrp1_700", "/drillTestGrp1_700", (short)0700, user1, group1, workspaces);
// create /user2_workspace1 with 775 permissions (owner by user1)
createAndAddWorkspace("user2_workspace1", "/user2_workspace1", (short)0775, user2, group1, workspaces);
// create /user2_workspace with 755 permissions (owner by user1)
createAndAddWorkspace("user2_workspace2", "/user2_workspace2", (short)0755, user2, group1, workspaces);
return workspaces;
}
项目:hadoop
文件:TestFileQueue.java
@AfterClass
public static void cleanup() throws IOException {
final Configuration conf = new Configuration();
final FileSystem fs = FileSystem.getLocal(conf).getRaw();
final Path p = new Path(System.getProperty("test.build.data", "/tmp"),
"testFileQueue").makeQualified(fs);
fs.delete(p, true);
}