Java 类org.apache.hadoop.fs.permission.FsPermission 实例源码
项目:hadoop-oss
文件:TestCredentialProviderFactory.java
@Test
public void testJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(tmpDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
项目:hadoop-oss
文件:SFTPFileSystem.java
/**
* Convert the file information in LsEntry to a {@link FileStatus} object. *
*
* @param sftpFile
* @param parentPath
* @return file status
* @throws IOException
*/
private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile,
Path parentPath) throws IOException {
SftpATTRS attr = sftpFile.getAttrs();
long length = attr.getSize();
boolean isDir = attr.isDir();
boolean isLink = attr.isLink();
if (isLink) {
String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename();
try {
link = channel.realpath(link);
Path linkParent = new Path("/", link);
FileStatus fstat = getFileStatus(channel, linkParent);
isDir = fstat.isDirectory();
length = fstat.getLen();
} catch (Exception e) {
throw new IOException(e);
}
}
int blockReplication = 1;
// Using default block size since there is no way in SFTP channel to know of
// block sizes on server. The assumption could be less than ideal.
long blockSize = DEFAULT_BLOCK_SIZE;
long modTime = attr.getMTime() * 1000; // convert to milliseconds
long accessTime = 0;
FsPermission permission = getPermissions(sftpFile);
// not be able to get the real user group name, just use the user and group
// id
String user = Integer.toString(attr.getUId());
String group = Integer.toString(attr.getGId());
Path filePath = new Path(parentPath, sftpFile.getFilename());
return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
accessTime, permission, user, group, filePath.makeQualified(
this.getUri(), this.getWorkingDirectory()));
}
项目:ditb
文件:ExportSnapshot.java
/**
* Create the output folder and optionally set ownership.
*/
private void createOutputPath(final Path path) throws IOException {
if (filesUser == null && filesGroup == null) {
outputFs.mkdirs(path);
} else {
Path parent = path.getParent();
if (!outputFs.exists(parent) && !parent.isRoot()) {
createOutputPath(parent);
}
outputFs.mkdirs(path);
if (filesUser != null || filesGroup != null) {
// override the owner when non-null user/group is specified
outputFs.setOwner(path, filesUser, filesGroup);
}
if (filesMode > 0) {
outputFs.setPermission(path, new FsPermission(filesMode));
}
}
}
项目:hadoop
文件:S3FileSystem.java
/**
* @param permission Currently ignored.
*/
@Override
public boolean mkdirs(Path path, FsPermission permission) throws IOException {
Path absolutePath = makeAbsolute(path);
List<Path> paths = new ArrayList<Path>();
do {
paths.add(0, absolutePath);
absolutePath = absolutePath.getParent();
} while (absolutePath != null);
boolean result = true;
for (Path p : paths) {
result &= mkdir(p);
}
return result;
}
项目:hadoop
文件:ChecksumFileSystem.java
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
Progressable progress,
FsPermission permission)
throws IOException {
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
fs.getBytesPerSum()));
int bytesPerSum = fs.getBytesPerSum();
this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
bufferSize, replication, blockSize,
progress);
int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
permission, true, sumBufferSize,
replication, blockSize, null);
sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
sums.writeInt(bytesPerSum);
}
项目:hadoop-oss
文件:FileContextMainOperationsBaseTest.java
@Test
public void testGlobStatusWithNoMatchesInPath() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
getTestRootPath(fc, TEST_DIR_AXA),
getTestRootPath(fc, TEST_DIR_AXX),
getTestRootPath(fc, TEST_DIR_AAA2), };
if (exists(fc, testDirs[0]) == false) {
for (Path path : testDirs) {
fc.mkdir(path, FsPermission.getDefault(), true);
}
}
// should return nothing
FileStatus[] paths = fc.util().globStatus(
getTestRootPath(fc, "test/hadoop/?"));
Assert.assertEquals(0, paths.length);
}
项目:hadoop
文件:FileContextPermissionBase.java
@Test
public void testSetPermission() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
String filename = "foo";
Path f = fileContextTestHelper.getTestRootPath(fc, filename);
createFile(fc, f);
try {
// create files and manipulate them.
FsPermission all = new FsPermission((short)0777);
FsPermission none = new FsPermission((short)0);
fc.setPermission(f, none);
doFilePermissionCheck(none, fc.getFileStatus(f).getPermission());
fc.setPermission(f, all);
doFilePermissionCheck(all, fc.getFileStatus(f).getPermission());
}
finally {cleanupFile(fc, f);}
}
项目:hadoop-oss
文件:FileContextPermissionBase.java
@Test
public void testSetPermission() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
String filename = "foo";
Path f = fileContextTestHelper.getTestRootPath(fc, filename);
createFile(fc, f);
try {
// create files and manipulate them.
FsPermission all = new FsPermission((short)0777);
FsPermission none = new FsPermission((short)0);
fc.setPermission(f, none);
doFilePermissionCheck(none, fc.getFileStatus(f).getPermission());
fc.setPermission(f, all);
doFilePermissionCheck(all, fc.getFileStatus(f).getPermission());
}
finally {cleanupFile(fc, f);}
}
项目:hadoop
文件:FSAclBaseTest.java
@Test
public void testRemoveAclEntriesOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, USER, "bar", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bar", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE) }, returned);
assertPermission((short)010760);
assertAclFeature(true);
}
项目:hadoop-oss
文件:FileSystem.java
/**
* This version of the mkdirs method assumes that the permission is absolute.
* It has been added to support the FileContext that processes the permission
* with umask before calling this method.
* This a temporary method added to support the transition from FileSystem
* to FileContext for user applications.
*/
@Deprecated
protected void primitiveMkdir(Path f, FsPermission absolutePermission,
boolean createParent)
throws IOException {
if (!createParent) { // parent must exist.
// since the this.mkdirs makes parent dirs automatically
// we must throw exception if parent does not exist.
final FileStatus stat = getFileStatus(f.getParent());
if (stat == null) {
throw new FileNotFoundException("Missing parent:" + f);
}
if (!stat.isDirectory()) {
throw new ParentNotDirectoryException("parent is not a dir");
}
// parent does exist - go ahead with mkdir of leaf
}
// Default impl is to assume that permissions do not matter and hence
// calling the regular mkdirs is good enough.
// FSs that implement permissions should override this.
if (!this.mkdirs(f, absolutePermission)) {
throw new IOException("mkdir of "+ f + " failed");
}
}
项目:hadoop-oss
文件:FileSystem.java
/**
* This method provides the default implementation of
* {@link #access(Path, FsAction)}.
*
* @param stat FileStatus to check
* @param mode type of access to check
* @throws IOException for any error
*/
@InterfaceAudience.Private
static void checkAccessPermissions(FileStatus stat, FsAction mode)
throws IOException {
FsPermission perm = stat.getPermission();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String user = ugi.getShortUserName();
List<String> groups = Arrays.asList(ugi.getGroupNames());
if (user.equals(stat.getOwner())) {
if (perm.getUserAction().implies(mode)) {
return;
}
} else if (groups.contains(stat.getGroup())) {
if (perm.getGroupAction().implies(mode)) {
return;
}
} else {
if (perm.getOtherAction().implies(mode)) {
return;
}
}
throw new AccessControlException(String.format(
"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}
项目:dremio-oss
文件:TestPseudoDistributedFileSystem.java
@Before
public void setUpLocalFS() throws IOException {
final FileStatus rootStatus = new FileStatus(4096, true, 0, 0, 37, 42, FsPermission.createImmutable((short) 0555), "root", "wheel", new Path("sabot://10.0.0.1:1234/"));
final FileStatus fooStatus = new FileStatus(38214, true, 0, 0, 45, 67, FsPermission.createImmutable((short) 0755), "root", "wheel", new Path("sabot://10.0.0.1:1234/foo"));
final FileStatus fooBarStatus = new FileStatus(67128, true, 1, 4096, 69, 68, FsPermission.createImmutable((short) 0644), "root", "wheel", new Path("sabot://10.0.0.1:1234/foo/bar"));
final FileStatus fooBarDirStatus = new FileStatus(47, true, 0, 0, 1234, 3645, FsPermission.createImmutable((short) 0755), "admin", "admin", new Path("sabot://10.0.0.1:1234/foo/bar/dir"));
final FileStatus fooBarFile1Status = new FileStatus(1024, false, 1, 4096, 37, 42, FsPermission.createImmutable((short) 0644), "root", "wheel", new Path("sabot://10.0.0.1:1234/foo/bar/file1"));
final FileStatus fooBarFile2Status = new FileStatus(2048, false, 1, 4096, 37, 42, FsPermission.createImmutable((short) 0644), "root", "wheel", new Path("sabot://10.0.0.1:1234/foo/bar/file2"));
doReturn(rootStatus).when(mockLocalFS).getFileStatus(new Path("/"));
doThrow(new FileNotFoundException()).when(mockLocalFS).getFileStatus(any(Path.class));
doReturn(fooBarFile2Status).when(mockLocalFS).getFileStatus(new Path("/foo/bar/file2"));
doReturn(fooBarFile1Status).when(mockLocalFS).getFileStatus(new Path("/foo/bar/file1"));
doReturn(fooBarDirStatus).when(mockLocalFS).getFileStatus(new Path("/foo/bar/dir"));
doReturn(fooBarStatus).when(mockLocalFS).getFileStatus(new Path("/foo/bar"));
doReturn(fooStatus).when(mockLocalFS).getFileStatus(new Path("/foo"));
doReturn(rootStatus).when(mockLocalFS).getFileStatus(new Path("/"));
doThrow(new FileNotFoundException()).when(mockLocalFS).listStatus(any(Path.class));
doReturn(new FileStatus[] { fooBarDirStatus, fooBarFile1Status, fooBarFile2Status }).when(mockLocalFS).listStatus(new Path("/foo/bar"));
doReturn(new FileStatus[] { fooBarStatus }).when(mockLocalFS).listStatus(new Path("/foo"));
doReturn(new FileStatus[] { fooStatus }).when(mockLocalFS).listStatus(new Path("/"));
}
项目:hadoop
文件:AclCommands.java
@Override
protected void processPath(PathData item) throws IOException {
out.println("# file: " + item);
out.println("# owner: " + item.stat.getOwner());
out.println("# group: " + item.stat.getGroup());
FsPermission perm = item.stat.getPermission();
if (perm.getStickyBit()) {
out.println("# flags: --" +
(perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
}
AclStatus aclStatus = item.fs.getAclStatus(item.path);
List<AclEntry> entries = perm.getAclBit() ? aclStatus.getEntries()
: Collections.<AclEntry> emptyList();
ScopedAclEntries scopedEntries = new ScopedAclEntries(
AclUtil.getAclFromPermAndEntries(perm, entries));
printAclEntriesForSingleScope(aclStatus, perm,
scopedEntries.getAccessEntries());
printAclEntriesForSingleScope(aclStatus, perm,
scopedEntries.getDefaultEntries());
out.println();
}
项目:hadoop
文件:FSAclBaseTest.java
@Test
public void testSetPermission() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.setPermission(path, FsPermission.createImmutable((short)0700));
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010700);
assertAclFeature(true);
}
项目:hadoop
文件:TestEncryptionZones.java
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
CipherSuite suite, CryptoProtocolVersion version) throws Exception {
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, new FileEncryptionInfo(suite,
version, new byte[suite.getAlgorithmBlockSize()],
new byte[suite.getAlgorithmBlockSize()],
"fakeKey", "fakeVersion"),
(byte) 0))
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
项目:hadoop-oss
文件:JavaKeyStoreProvider.java
private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
throws NoSuchAlgorithmException, CertificateException,
IOException {
FsPermission perm = null;
try {
perm = loadFromPath(pathToLoad, password);
renameOrFail(pathToLoad, path);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
pathToLoad));
}
if (fs.exists(pathToDelete)) {
fs.delete(pathToDelete, true);
}
} catch (IOException e) {
// Check for password issue : don't want to trash file due
// to wrong password
if (isBadorWrongPassword(e)) {
throw e;
}
}
return perm;
}
项目:hadoop
文件:TestDFSPermission.java
@Test
public void testAccessOthers() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p3 = new Path("/p3");
rootFs.mkdirs(p3);
rootFs.setPermission(p3, new FsPermission((short) 0774));
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.access(p3, FsAction.READ);
try {
fs.access(p3, FsAction.READ_WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username",
e.getMessage().contains(USER1_NAME));
assertTrue("Permission denied messages must carry the path parent",
e.getMessage().contains(
p3.getParent().toUri().getPath()));
}
}
项目:hadoop
文件:TestAuditLogs.java
/** test that denied access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsDenied() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short)0600));
fs.setOwner(file, "root", null);
setupAuditLogs();
try {
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
InputStream istream = webfs.open(file);
int val = istream.read();
fail("open+read must not succeed, got " + val);
} catch(AccessControlException E) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogsRepeat(false, 2);
}
项目:hadoop
文件:TestDistCacheEmulation.java
/**
* Test the configuration property for disabling/enabling emulation of
* distributed cache load.
*/
@Test (timeout=2000)
public void testDistCacheEmulationConfigurability() throws IOException {
Configuration jobConf = GridmixTestUtils.mrvl.getConfig();
Path ioPath = new Path("testDistCacheEmulationConfigurability")
.makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
FileSystem fs = FileSystem.get(jobConf);
FileSystem.mkdirs(fs, ioPath, new FsPermission((short) 0777));
// default config
dce = createDistributedCacheEmulator(jobConf, ioPath, false);
assertTrue("Default configuration of "
+ DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
+ " is wrong.", dce.shouldEmulateDistCacheLoad());
// config property set to false
jobConf.setBoolean(
DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE, false);
dce = createDistributedCacheEmulator(jobConf, ioPath, false);
assertFalse("Disabling of emulation of distributed cache load by setting "
+ DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
+ " to false is not working.", dce.shouldEmulateDistCacheLoad());
}
项目:hadoop
文件:TestEncryptionZones.java
@Test(timeout = 120000)
public void testCreateEZWithNoProvider() throws Exception {
// Unset the key provider and make sure EZ ops don't work
final Configuration clusterConf = cluster.getConfiguration(0);
clusterConf.unset(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI);
cluster.restartNameNode(true);
cluster.waitActive();
final Path zone1 = new Path("/zone1");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
try {
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
fail("expected exception");
} catch (IOException e) {
assertExceptionContains("since no key provider is available", e);
}
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
clusterConf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
);
// Try listing EZs as well
assertNumZones(0);
}
项目:hadoop
文件:TestFavoredNodesEndToEnd.java
@Test(timeout=180000)
public void testWhenFavoredNodesNotPresent() throws Exception {
//when we ask for favored nodes but the nodes are not there, we should
//get some other nodes. In other words, the write to hdfs should not fail
//and if we do getBlockLocations on the file, we should see one blklocation
//and three hosts for that
InetSocketAddress arbitraryAddrs[] = new InetSocketAddress[3];
for (int i = 0; i < 3; i++) {
arbitraryAddrs[i] = getArbitraryLocalHostAddr();
}
Path p = new Path("/filename-foo-bar");
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
4096, (short)3, 4096L, null, arbitraryAddrs);
out.write(SOME_BYTES);
out.close();
getBlockLocations(p);
}
项目:hadoop
文件:FSAclBaseTest.java
@Test
public void testRemoveAclEntriesMinimalDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"),
aclEntry(ACCESS, MASK),
aclEntry(DEFAULT, USER, "foo"),
aclEntry(DEFAULT, MASK));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
项目:hadoop
文件:TestDFSPermission.java
private void create(OpType op, Path name, short umask,
FsPermission permission) throws IOException {
// set umask in configuration, converting to padded octal
conf.set(FsPermission.UMASK_LABEL, String.format("%1$03o", umask));
// create the file/directory
switch (op) {
case CREATE:
FSDataOutputStream out = fs.create(name, permission, true,
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
fs.getDefaultReplication(name), fs.getDefaultBlockSize(name), null);
out.close();
break;
case MKDIRS:
fs.mkdirs(name, permission);
break;
default:
throw new IOException("Unsupported operation: " + op);
}
}
项目:hadoop
文件:WindowsSecureContainerExecutor.java
@Override
protected OutputStream createOutputStreamWithMode(Path f, boolean append,
FsPermission permission) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("EFS:createOutputStreamWithMode: %s %b %s", f,
append, permission));
}
boolean success = false;
OutputStream os = Native.Elevated.create(f, append);
try {
setPermission(f, permission);
success = true;
return os;
} finally {
if (!success) {
IOUtils.cleanup(LOG, os);
}
}
}
项目:hadoop
文件:TestAuditLogs.java
/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short)0644));
fs.setOwner(file, "root", null);
setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
FileStatus st = webfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file", st != null && st.isFile());
}
项目:hadoop
文件:FSAclBaseTest.java
@Test
public void testRemoveAclEntriesMinimal() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0760));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"),
aclEntry(ACCESS, MASK));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0760);
assertAclFeature(false);
}
项目:hadoop
文件:RawLocalFileSystem.java
private boolean mkdirsWithOptionalPermission(Path f, FsPermission permission)
throws IOException {
if(f == null) {
throw new IllegalArgumentException("mkdirs path arg is null");
}
Path parent = f.getParent();
File p2f = pathToFile(f);
File parent2f = null;
if(parent != null) {
parent2f = pathToFile(parent);
if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
throw new ParentNotDirectoryException("Parent path is not a directory: "
+ parent);
}
}
if (p2f.exists() && !p2f.isDirectory()) {
throw new FileNotFoundException("Destination exists" +
" and is not a directory: " + p2f.getCanonicalPath());
}
return (parent == null || parent2f.exists() || mkdirs(parent)) &&
(mkOneDirWithMode(f, p2f, permission) || p2f.isDirectory());
}
项目:hadoop
文件:TestJobHistoryUtils.java
private Path createPath(FileContext fc, Path root, int year, int month,
int day, String id) throws IOException {
Path path = new Path(root, year + Path.SEPARATOR + month + Path.SEPARATOR +
day + Path.SEPARATOR + id);
fc.mkdir(path, FsPermission.getDirDefault(), true);
return path;
}
项目:hadoop-oss
文件:FsShellPermissions.java
@Override
protected void processPath(PathData item) throws IOException {
short newperms = pp.applyNewPermission(item.stat);
if (item.stat.getPermission().toShort() != newperms) {
try {
item.fs.setPermission(item.path, new FsPermission(newperms));
} catch (IOException e) {
LOG.debug("Error changing permissions of " + item, e);
throw new IOException(
"changing permissions of '" + item + "': " + e.getMessage());
}
}
}
项目:hadoop
文件:SwiftFileStatus.java
public SwiftFileStatus(long length,
boolean isdir,
int block_replication,
long blocksize,
long modification_time,
long access_time,
FsPermission permission,
String owner, String group, Path path) {
super(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, path);
}
项目:hadoop
文件:FileContextTestWrapper.java
@Override
public void mkdir(Path dir, FsPermission permission, boolean createParent)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
fc.mkdir(dir, permission, createParent);
}
项目:hadoop
文件:HftpFileSystem.java
@Override
public void startElement(String ns, String localname, String qname,
Attributes attrs) throws SAXException {
if ("listing".equals(qname)) return;
if (!"file".equals(qname) && !"directory".equals(qname)) {
if (RemoteException.class.getSimpleName().equals(qname)) {
throw new SAXException(RemoteException.valueOf(attrs));
}
throw new SAXException("Unrecognized entry: " + qname);
}
long modif;
long atime = 0;
try {
final SimpleDateFormat ldf = df.get();
modif = ldf.parse(attrs.getValue("modified")).getTime();
String astr = attrs.getValue("accesstime");
if (astr != null) {
atime = ldf.parse(astr).getTime();
}
} catch (ParseException e) { throw new SAXException(e); }
FileStatus fs = "file".equals(qname)
? new FileStatus(
Long.parseLong(attrs.getValue("size")), false,
Short.valueOf(attrs.getValue("replication")).shortValue(),
Long.parseLong(attrs.getValue("blocksize")),
modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
attrs.getValue("owner"), attrs.getValue("group"),
HftpFileSystem.this.makeQualified(
new Path(getUri().toString(), attrs.getValue("path"))))
: new FileStatus(0L, true, 0, 0L,
modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
attrs.getValue("owner"), attrs.getValue("group"),
HftpFileSystem.this.makeQualified(
new Path(getUri().toString(), attrs.getValue("path"))));
fslist.add(fs);
}
项目:hadoop
文件:ViewFs.java
@Override
public void setPermission(final Path f, final FsPermission permission)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(f), true);
res.targetFileSystem.setPermission(res.remainingPath, permission);
}
项目:dremio-oss
文件:TestPseudoDistributedFileSystem.java
@Test
public void testMkdirsRemoteFile() throws IOException {
doReturn(true).when(mockLocalFS).mkdirs(
new Path("/foo/bar/dir2"),
FsPermission.getFileDefault());
doReturn(true).when(mockRemoteFS).mkdirs(
new Path("/foo/bar/dir2"),
FsPermission.getFileDefault());
Path path = new Path("/foo/bar/dir2");
assertTrue(fs.mkdirs(path, FsPermission.getFileDefault()));
}
项目:hadoop
文件:TestEncryptionZones.java
/**
* Test running fsck on a system with encryption zones.
*/
@Test(timeout = 60000)
public void testFsckOnEncryptionZones() throws Exception {
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
final Path zone1File = new Path(zone1, "file");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bStream, true);
int errCode = ToolRunner.run(new DFSck(conf, out),
new String[]{ "/" });
assertEquals("Fsck ran with non-zero error code", 0, errCode);
String result = bStream.toString();
assertTrue("Fsck did not return HEALTHY status",
result.contains(NamenodeFsck.HEALTHY_STATUS));
// Run fsck directly on the encryption zone instead of root
errCode = ToolRunner.run(new DFSck(conf, out),
new String[]{ zoneParent.toString() });
assertEquals("Fsck ran with non-zero error code", 0, errCode);
result = bStream.toString();
assertTrue("Fsck did not return HEALTHY status",
result.contains(NamenodeFsck.HEALTHY_STATUS));
}
项目:hadoop
文件:WindowsSecureContainerExecutor.java
@Override
public Path localizeClasspathJar(Path classPathJar, Path pwd, String owner)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("localizeClasspathJar: %s %s o:%s",
classPathJar, pwd, owner));
}
createDir(pwd, new FsPermission(DIR_PERM), true, owner);
String fileName = classPathJar.getName();
Path dst = new Path(pwd, fileName);
Native.Elevated.move(classPathJar, dst, true);
Native.Elevated.chown(dst, owner, nodeManagerGroup);
return dst;
}
项目:hadoop-oss
文件:ViewFileSystemBaseTest.java
private void testRootReadableExecutableInternal(boolean located)
throws IOException {
// verify executable permission on root: cd /
//
Assert.assertFalse("In root before cd",
fsView.getWorkingDirectory().isRoot());
fsView.setWorkingDirectory(new Path("/"));
Assert.assertTrue("Not in root dir after cd",
fsView.getWorkingDirectory().isRoot());
// verify readable
//
verifyRootChildren(listStatusInternal(located,
fsView.getWorkingDirectory()));
// verify permissions
//
final FileStatus rootStatus =
fsView.getFileStatus(fsView.getWorkingDirectory());
final FsPermission perms = rootStatus.getPermission();
Assert.assertTrue("User-executable permission not set!",
perms.getUserAction().implies(FsAction.EXECUTE));
Assert.assertTrue("User-readable permission not set!",
perms.getUserAction().implies(FsAction.READ));
Assert.assertTrue("Group-executable permission not set!",
perms.getGroupAction().implies(FsAction.EXECUTE));
Assert.assertTrue("Group-readable permission not set!",
perms.getGroupAction().implies(FsAction.READ));
Assert.assertTrue("Other-executable permission not set!",
perms.getOtherAction().implies(FsAction.EXECUTE));
Assert.assertTrue("Other-readable permission not set!",
perms.getOtherAction().implies(FsAction.READ));
}
项目:hadoop
文件:TestSpeculativeExecution.java
private Path createTempFile(String filename, String contents)
throws IOException {
Path path = new Path(TEST_ROOT_DIR, filename);
FSDataOutputStream os = localFs.create(path);
os.writeBytes(contents);
os.close();
localFs.setPermission(path, new FsPermission("700"));
return path;
}
项目:hadoop-oss
文件:ChecksumFileSystem.java
private FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, boolean createParent, int bufferSize,
short replication, long blockSize,
Progressable progress) throws IOException {
Path parent = f.getParent();
if (parent != null) {
if (!createParent && !exists(parent)) {
throw new FileNotFoundException("Parent directory doesn't exist: "
+ parent);
} else if (!mkdirs(parent)) {
throw new IOException("Mkdirs failed to create " + parent
+ " (exists=" + exists(parent) + ", cwd=" + getWorkingDirectory()
+ ")");
}
}
final FSDataOutputStream out;
if (writeChecksum) {
out = new FSDataOutputStream(
new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
blockSize, progress, permission), null);
} else {
out = fs.create(f, permission, overwrite, bufferSize, replication,
blockSize, progress);
// remove the checksum file since we aren't writing one
Path checkFile = getChecksumFile(f);
if (fs.exists(checkFile)) {
fs.delete(checkFile, true);
}
}
return out;
}