Java 类org.apache.hadoop.fs.permission.PermissionStatus 实例源码
项目:hadoop
文件:FSNamesystem.java
/**
* Create a symbolic link.
*/
@SuppressWarnings("deprecation")
void createSymlink(String target, String link,
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
throws IOException {
if (!FileSystem.areSymlinksEnabled()) {
throw new UnsupportedOperationException("Symlinks not supported");
}
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create symlink " + link);
auditStat = FSDirSymlinkOp.createSymlinkInt(this, target, link, dirPerms,
createParent, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "createSymlink", link, target, null);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "createSymlink", link, target, auditStat);
}
项目:hadoop
文件:FSNamesystem.java
/**
* Create a new file entry in the namespace.
*
* For description of parameters and exceptions thrown see
* {@link ClientProtocol#create}, except it returns valid file status upon
* success
*/
HdfsFileStatus startFile(String src, PermissionStatus permissions,
String holder, String clientMachine, EnumSet<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, boolean logRetryCache)
throws AccessControlException, SafeModeException,
FileAlreadyExistsException, UnresolvedLinkException,
FileNotFoundException, ParentNotDirectoryException, IOException {
HdfsFileStatus status = null;
try {
status = startFileInt(src, permissions, holder, clientMachine, flag,
createParent, replication, blockSize, supportedVersions,
logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "create", src);
throw e;
}
return status;
}
项目:hadoop
文件:FSNamesystem.java
/**
* Create all the necessary directories
*/
boolean mkdirs(String src, PermissionStatus permissions,
boolean createParent) throws IOException {
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create directory " + src);
auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent);
} catch (AccessControlException e) {
logAuditEvent(false, "mkdirs", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "mkdirs", src, null, auditStat);
return true;
}
项目:hadoop
文件:FSDirMkdirOp.java
/**
* For a given absolute path, create all ancestors as directories along the
* path. All ancestors inherit their parent's permission plus an implicit
* u+wx permission. This is used by create() and addSymlink() for
* implicitly creating all directories along the path.
*
* For example, path="/foo/bar/spam", "/foo" is an existing directory,
* "/foo/bar" is not existing yet, the function will create directory bar.
*
* @return a tuple which contains both the new INodesInPath (with all the
* existing and newly created directories) and the last component in the
* relative path. Or return null if there are errors.
*/
static Map.Entry<INodesInPath, String> createAncestorDirectories(
FSDirectory fsd, INodesInPath iip, PermissionStatus permission)
throws IOException {
final String last = new String(iip.getLastLocalName(), Charsets.UTF_8);
INodesInPath existing = iip.getExistingINodes();
List<String> children = iip.getPath(existing.length(),
iip.length() - existing.length());
int size = children.size();
if (size > 1) { // otherwise all ancestors have been created
List<String> directories = children.subList(0, size - 1);
INode parentINode = existing.getLastINode();
// Ensure that the user can traversal the path by adding implicit
// u+wx permission to all ancestor directories
existing = createChildrenDirectories(fsd, existing, directories,
addImplicitUwx(parentINode.getPermissionStatus(), permission));
if (existing == null) {
return null;
}
}
return new AbstractMap.SimpleImmutableEntry<>(existing, last);
}
项目:hadoop
文件:FSDirMkdirOp.java
private static INodesInPath createSingleDirectory(FSDirectory fsd,
INodesInPath existing, String localName, PermissionStatus perm)
throws IOException {
assert fsd.hasWriteLock();
existing = unprotectedMkdir(fsd, fsd.allocateNewInodeId(), existing,
localName.getBytes(Charsets.UTF_8), perm, null, now());
if (existing == null) {
return null;
}
final INode newNode = existing.getLastINode();
// Directory creation also count towards FilesCreated
// to match count of FilesDeleted metric.
NameNode.getNameNodeMetrics().incrFilesCreated();
String cur = existing.getPath();
fsd.getEditLog().logMkDir(cur, newNode);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("mkdirs: created directory " + cur);
}
return existing;
}
项目:hadoop
文件:FSDirMkdirOp.java
/**
* create a directory at path specified by parent
*/
private static INodesInPath unprotectedMkdir(FSDirectory fsd, long inodeId,
INodesInPath parent, byte[] name, PermissionStatus permission,
List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, AclException, FileAlreadyExistsException {
assert fsd.hasWriteLock();
assert parent.getLastINode() != null;
if (!parent.getLastINode().isDirectory()) {
throw new FileAlreadyExistsException("Parent path is not a directory: " +
parent.getPath() + " " + DFSUtil.bytes2String(name));
}
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
timestamp);
INodesInPath iip = fsd.addLastINode(parent, dir, true);
if (iip != null && aclEntries != null) {
AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
}
return iip;
}
项目:hadoop
文件:FSImageFormat.java
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asFile();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
final long accessTime = in.readLong();
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
accessTime, replication, preferredBlockSize, (byte) 0, null);
}
项目:hadoop
文件:FSImageFormat.java
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asDirectory();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
// Read quotas: quota by storage type does not need to be processed below.
// It is handled only in protobuf based FsImagePBINode class for newer
// fsImages. Tools using this class such as legacy-mode of offline image viewer
// should only load legacy FSImages without newer features.
final long nsQuota = in.readLong();
final long dsQuota = in.readLong();
return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
name, permissions, null, modificationTime, null)
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
null, modificationTime, nsQuota, dsQuota, null, null);
}
项目:hadoop
文件:FSEditLog.java
/**
* Add create directory record to edit log
*/
public void logMkDir(String path, INode newNode) {
PermissionStatus permissions = newNode.getPermissionStatus();
MkdirOp op = MkdirOp.getInstance(cache.get())
.setInodeId(newNode.getId())
.setPath(path)
.setTimestamp(newNode.getModificationTime())
.setPermissionStatus(permissions);
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
XAttrFeature x = newNode.getXAttrFeature();
if (x != null) {
op.setXAttrs(x.getXAttrs());
}
logEdit(op);
}
项目:hadoop
文件:FSImageLoader.java
private PermissionStatus getPermissionStatus(String path) throws IOException {
long id = lookup(path);
FsImageProto.INodeSection.INode inode = fromINodeId(id);
switch (inode.getType()) {
case FILE: {
FsImageProto.INodeSection.INodeFile f = inode.getFile();
return FSImageFormatPBINode.Loader.loadPermission(
f.getPermission(), stringTable);
}
case DIRECTORY: {
FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
return FSImageFormatPBINode.Loader.loadPermission(
d.getPermission(), stringTable);
}
case SYMLINK: {
FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink();
return FSImageFormatPBINode.Loader.loadPermission(
s.getPermission(), stringTable);
}
default: {
return null;
}
}
}
项目:hadoop
文件:TestINodeFile.java
/**
* For a given path, build a tree of INodes and return the leaf node.
*/
private INode createTreeOfInodes(String path) throws QuotaExceededException {
byte[][] components = INode.getPathComponents(path);
FsPermission perm = FsPermission.createImmutable((short)0755);
PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);
long id = 0;
INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0);
INodeDirectory dir = null;
for (byte[] component : components) {
if (component.length == 0) {
continue;
}
System.out.println("Adding component " + DFSUtil.bytes2String(component));
dir = new INodeDirectory(++id, component, permstatus, 0);
prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
prev = dir;
}
return dir; // Last Inode in the chain
}
项目:hadoop
文件:TestGetBlockLocations.java
private static FSNamesystem setupFileSystem() throws IOException {
Configuration conf = new Configuration();
conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
FSEditLog editlog = mock(FSEditLog.class);
FSImage image = mock(FSImage.class);
when(image.getEditLog()).thenReturn(editlog);
final FSNamesystem fsn = new FSNamesystem(conf, image, true);
final FSDirectory fsd = fsn.getFSDirectory();
INodesInPath iip = fsd.getINodesInPath("/", true);
PermissionStatus perm = new PermissionStatus(
"hdfs", "supergroup",
FsPermission.createImmutable((short) 0x1ff));
final INodeFile file = new INodeFile(
MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1,
DFS_BLOCK_SIZE_DEFAULT);
fsn.getFSDirectory().addINode(iip, file);
return fsn;
}
项目:hadoop
文件:TestDefaultBlockPlacementPolicy.java
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
.hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
FsPermission.getDefault());
}
项目:hadoop
文件:TestEditLog.java
@Override
public void run() {
PermissionStatus p = namesystem.createFsOwnerPermissions(
new FsPermission((short)0777));
FSEditLog editLog = namesystem.getEditLog();
for (int i = 0; i < numTransactions; i++) {
INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
inode.toUnderConstruction("", "");
editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
editLog.logCloseFile("/filename" + (startIndex + i), inode);
editLog.logSync();
}
}
项目:hadoop
文件:FSImageTestUtil.java
/**
* Create an aborted in-progress log in the given directory, containing
* only a specified number of "mkdirs" operations.
*/
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
long firstTxId, long newInodeId) throws IOException {
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
editLog.setNextTxId(firstTxId);
editLog.openForWrite();
PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
FsPermission.createImmutable((short)0755));
for (int i = 1; i <= numDirs; i++) {
String dirName = "dir" + i;
INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
DFSUtil.string2Bytes(dirName), perms, 0L);
editLog.logMkDir("/" + dirName, dir);
}
editLog.logSync();
editLog.abortCurrentLogSegment();
}
项目:hadoop
文件:NativeAzureFileSystem.java
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
Path absolutePath = makeAbsolute(p);
String key = pathToKey(absolutePath);
FileMetadata metadata = store.retrieveMetadata(key);
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
permission = applyUMask(permission,
metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory
: UMaskApplyMode.ChangeExistingFile);
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, createPermissionStatus(permission));
} else if (!metadata.getPermissionStatus().getPermission().
equals(permission)) {
store.changePermissionStatus(key, new PermissionStatus(
metadata.getPermissionStatus().getUserName(),
metadata.getPermissionStatus().getGroupName(),
permission));
}
}
项目:hadoop
文件:NativeAzureFileSystem.java
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
Path absolutePath = makeAbsolute(p);
String key = pathToKey(absolutePath);
FileMetadata metadata = store.retrieveMetadata(key);
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
PermissionStatus newPermissionStatus = new PermissionStatus(
username == null ?
metadata.getPermissionStatus().getUserName() : username,
groupname == null ?
metadata.getPermissionStatus().getGroupName() : groupname,
metadata.getPermissionStatus().getPermission());
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, newPermissionStatus);
} else {
store.changePermissionStatus(key, newPermissionStatus);
}
}
项目:big-c
文件:FSEditLog.java
/**
* Add create directory record to edit log
*/
public void logMkDir(String path, INode newNode) {
PermissionStatus permissions = newNode.getPermissionStatus();
MkdirOp op = MkdirOp.getInstance(cache.get())
.setInodeId(newNode.getId())
.setPath(path)
.setTimestamp(newNode.getModificationTime())
.setPermissionStatus(permissions);
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
XAttrFeature x = newNode.getXAttrFeature();
if (x != null) {
op.setXAttrs(x.getXAttrs());
}
logEdit(op);
}
项目:big-c
文件:FSImageLoader.java
private PermissionStatus getPermissionStatus(String path) throws IOException {
long id = lookup(path);
FsImageProto.INodeSection.INode inode = fromINodeId(id);
switch (inode.getType()) {
case FILE: {
FsImageProto.INodeSection.INodeFile f = inode.getFile();
return FSImageFormatPBINode.Loader.loadPermission(
f.getPermission(), stringTable);
}
case DIRECTORY: {
FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
return FSImageFormatPBINode.Loader.loadPermission(
d.getPermission(), stringTable);
}
case SYMLINK: {
FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink();
return FSImageFormatPBINode.Loader.loadPermission(
s.getPermission(), stringTable);
}
default: {
return null;
}
}
}
项目:big-c
文件:FSNamesystem.java
/**
* Create a symbolic link.
*/
@SuppressWarnings("deprecation")
void createSymlink(String target, String link,
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
throws IOException {
if (!FileSystem.areSymlinksEnabled()) {
throw new UnsupportedOperationException("Symlinks not supported");
}
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create symlink " + link);
auditStat = FSDirSymlinkOp.createSymlinkInt(this, target, link, dirPerms,
createParent, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "createSymlink", link, target, null);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "createSymlink", link, target, auditStat);
}
项目:big-c
文件:TestDefaultBlockPlacementPolicy.java
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
.hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
FsPermission.getDefault());
}
项目:aliyun-oss-hadoop-fs
文件:FSImageFormat.java
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asDirectory();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
// Read quotas: quota by storage type does not need to be processed below.
// It is handled only in protobuf based FsImagePBINode class for newer
// fsImages. Tools using this class such as legacy-mode of offline image viewer
// should only load legacy FSImages without newer features.
final long nsQuota = in.readLong();
final long dsQuota = in.readLong();
return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
name, permissions, null, modificationTime, null)
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
null, modificationTime, nsQuota, dsQuota, null, null);
}
项目:big-c
文件:NativeAzureFileSystem.java
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
Path absolutePath = makeAbsolute(p);
String key = pathToKey(absolutePath);
FileMetadata metadata = store.retrieveMetadata(key);
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
permission = applyUMask(permission,
metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory
: UMaskApplyMode.ChangeExistingFile);
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, createPermissionStatus(permission));
} else if (!metadata.getPermissionStatus().getPermission().
equals(permission)) {
store.changePermissionStatus(key, new PermissionStatus(
metadata.getPermissionStatus().getUserName(),
metadata.getPermissionStatus().getGroupName(),
permission));
}
}
项目:aliyun-oss-hadoop-fs
文件:TestTruncateQuotaUpdate.java
private INodeFile createMockFile(long size, short replication) {
ArrayList<BlockInfo> blocks = new ArrayList<>();
long createdSize = 0;
while (createdSize < size) {
long blockSize = Math.min(BLOCKSIZE, size - createdSize);
BlockInfo bi = newBlock(blockSize, replication);
blocks.add(bi);
createdSize += BLOCKSIZE;
}
PermissionStatus perm = new PermissionStatus("foo", "bar", FsPermission
.createImmutable((short) 0x1ff));
return new INodeFile(
++nextMockINodeId, new byte[0], perm, 0, 0,
blocks.toArray(new BlockInfo[blocks.size()]), replication,
BLOCKSIZE);
}
项目:big-c
文件:FSNamesystem.java
/**
* Create all the necessary directories
*/
boolean mkdirs(String src, PermissionStatus permissions,
boolean createParent) throws IOException {
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create directory " + src);
auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent);
} catch (AccessControlException e) {
logAuditEvent(false, "mkdirs", src);
throw e;
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "mkdirs", src, null, auditStat);
return true;
}
项目:aliyun-oss-hadoop-fs
文件:TestINodeFile.java
/**
* For a given path, build a tree of INodes and return the leaf node.
*/
private INode createTreeOfInodes(String path) throws QuotaExceededException {
byte[][] components = INode.getPathComponents(path);
FsPermission perm = FsPermission.createImmutable((short)0755);
PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);
long id = 0;
INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0);
INodeDirectory dir = null;
for (byte[] component : components) {
if (component.length == 0) {
continue;
}
System.out.println("Adding component " + DFSUtil.bytes2String(component));
dir = new INodeDirectory(++id, component, permstatus, 0);
prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
prev = dir;
}
return dir; // Last Inode in the chain
}
项目:aliyun-oss-hadoop-fs
文件:TestDefaultBlockPlacementPolicy.java
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
.hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
FsPermission.getDefault());
}
项目:aliyun-oss-hadoop-fs
文件:TestEditLog.java
@Override
public void run() {
PermissionStatus p = namesystem.createFsOwnerPermissions(
new FsPermission((short)0777));
FSEditLog editLog = namesystem.getEditLog();
for (int i = 0; i < numTransactions; i++) {
INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
inode.toUnderConstruction("", "");
editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
editLog.logCloseFile("/filename" + (startIndex + i), inode);
editLog.logSync();
}
}
项目:aliyun-oss-hadoop-fs
文件:FSImageTestUtil.java
/**
* Create an aborted in-progress log in the given directory, containing
* only a specified number of "mkdirs" operations.
*/
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
long firstTxId, long newInodeId) throws IOException {
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
editLog.setNextTxId(firstTxId);
editLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
FsPermission.createImmutable((short)0755));
for (int i = 1; i <= numDirs; i++) {
String dirName = "dir" + i;
INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
DFSUtil.string2Bytes(dirName), perms, 0L);
editLog.logMkDir("/" + dirName, dir);
}
editLog.logSync();
editLog.abortCurrentLogSegment();
}
项目:aliyun-oss-hadoop-fs
文件:NativeAzureFileSystem.java
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
Path absolutePath = makeAbsolute(p);
String key = pathToKey(absolutePath);
FileMetadata metadata = store.retrieveMetadata(key);
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
permission = applyUMask(permission,
metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory
: UMaskApplyMode.ChangeExistingFile);
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, createPermissionStatus(permission));
} else if (!metadata.getPermissionStatus().getPermission().
equals(permission)) {
store.changePermissionStatus(key, new PermissionStatus(
metadata.getPermissionStatus().getUserName(),
metadata.getPermissionStatus().getGroupName(),
permission));
}
}
项目:aliyun-oss-hadoop-fs
文件:NativeAzureFileSystem.java
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
Path absolutePath = makeAbsolute(p);
String key = pathToKey(absolutePath);
FileMetadata metadata = store.retrieveMetadata(key);
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
PermissionStatus newPermissionStatus = new PermissionStatus(
username == null ?
metadata.getPermissionStatus().getUserName() : username,
groupname == null ?
metadata.getPermissionStatus().getGroupName() : groupname,
metadata.getPermissionStatus().getPermission());
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, newPermissionStatus);
} else {
store.changePermissionStatus(key, newPermissionStatus);
}
}
项目:big-c
文件:NativeAzureFileSystem.java
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
Path absolutePath = makeAbsolute(p);
String key = pathToKey(absolutePath);
FileMetadata metadata = store.retrieveMetadata(key);
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
PermissionStatus newPermissionStatus = new PermissionStatus(
username == null ?
metadata.getPermissionStatus().getUserName() : username,
groupname == null ?
metadata.getPermissionStatus().getGroupName() : groupname,
metadata.getPermissionStatus().getPermission());
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, newPermissionStatus);
} else {
store.changePermissionStatus(key, newPermissionStatus);
}
}
项目:big-c
文件:FSImageFormat.java
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asFile();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
final long accessTime = in.readLong();
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
accessTime, replication, preferredBlockSize, (byte) 0, null);
}
项目:big-c
文件:FSDirMkdirOp.java
/**
* create a directory at path specified by parent
*/
private static INodesInPath unprotectedMkdir(FSDirectory fsd, long inodeId,
INodesInPath parent, byte[] name, PermissionStatus permission,
List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, AclException, FileAlreadyExistsException {
assert fsd.hasWriteLock();
assert parent.getLastINode() != null;
if (!parent.getLastINode().isDirectory()) {
throw new FileAlreadyExistsException("Parent path is not a directory: " +
parent.getPath() + " " + DFSUtil.bytes2String(name));
}
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
timestamp);
INodesInPath iip = fsd.addLastINode(parent, dir, true);
if (iip != null && aclEntries != null) {
AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
}
return iip;
}
项目:hadoop
文件:INodeFileAttributes.java
public SnapshotCopy(byte[] name, PermissionStatus permissions,
AclFeature aclFeature, long modificationTime, long accessTime,
short replication, long preferredBlockSize, byte storagePolicyID,
XAttrFeature xAttrsFeature) {
super(name, permissions, aclFeature, modificationTime, accessTime,
xAttrsFeature);
header = HeaderFormat.toLong(preferredBlockSize, replication, storagePolicyID);
}
项目:hadoop
文件:INodeDirectoryAttributes.java
public CopyWithQuota(byte[] name, PermissionStatus permissions,
AclFeature aclFeature, long modificationTime, long nsQuota,
long dsQuota, EnumCounters<StorageType> typeQuotas, XAttrFeature xAttrsFeature) {
super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
this.quota = new QuotaCounts.Builder().nameSpace(nsQuota).
storageSpace(dsQuota).typeSpaces(typeQuotas).build();
}
项目:hadoop
文件:FSEditLogOp.java
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (this.length != 4) {
throw new IOException("Incorrect data format. "
+ "symlink operation.");
}
}
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
this.inodeId = FSImageSerialization.readLong(in);
} else {
// This id should be updated when the editLogOp is applied
this.inodeId = INodeId.GRANDFATHER_INODE_ID;
}
this.path = FSImageSerialization.readString(in);
this.value = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.mtime = FSImageSerialization.readLong(in);
this.atime = FSImageSerialization.readLong(in);
} else {
this.mtime = readLong(in);
this.atime = readLong(in);
}
this.permissionStatus = PermissionStatus.read(in);
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
项目:hadoop
文件:FSEditLogOp.java
public static void permissionStatusToXml(ContentHandler contentHandler,
PermissionStatus perm) throws SAXException {
contentHandler.startElement("", "", "PERMISSION_STATUS", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "USERNAME", perm.getUserName());
XMLUtils.addSaxString(contentHandler, "GROUPNAME", perm.getGroupName());
fsPermissionToXml(contentHandler, perm.getPermission());
contentHandler.endElement("", "", "PERMISSION_STATUS");
}
项目:hadoop
文件:FSEditLogOp.java
public static PermissionStatus permissionStatusFromXml(Stanza st)
throws InvalidXmlException {
Stanza status = st.getChildren("PERMISSION_STATUS").get(0);
String username = status.getValue("USERNAME");
String groupname = status.getValue("GROUPNAME");
FsPermission mode = fsPermissionFromXml(status);
return new PermissionStatus(username, groupname, mode);
}
项目:hadoop
文件:FSDirSymlinkOp.java
static INodeSymlink unprotectedAddSymlink(FSDirectory fsd, INodesInPath iip,
byte[] localName, long id, String target, long mtime, long atime,
PermissionStatus perm)
throws UnresolvedLinkException, QuotaExceededException {
assert fsd.hasWriteLock();
final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime,
target);
symlink.setLocalName(localName);
return fsd.addINode(iip, symlink) != null ? symlink : null;
}