Java 类org.apache.hadoop.util.ChunkedArrayList 实例源码
项目:hadoop
文件:FSDirRenameOp.java
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
throws QuotaExceededException {
Preconditions.checkState(oldDstChild != null);
List<INode> removedINodes = new ChunkedArrayList<>();
final boolean filesDeleted;
if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
filesDeleted = true;
} else {
filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
.getNameSpace() >= 0;
}
fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
return filesDeleted;
}
项目:aliyun-oss-hadoop-fs
文件:FSDirRenameOp.java
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
throws QuotaExceededException {
Preconditions.checkState(oldDstChild != null);
List<INode> removedINodes = new ChunkedArrayList<>();
List<Long> removedUCFiles = new ChunkedArrayList<>();
INode.ReclaimContext context = new INode.ReclaimContext(
bsps, collectedBlocks, removedINodes, removedUCFiles);
final boolean filesDeleted;
if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
oldDstChild.destroyAndCollectBlocks(context);
filesDeleted = true;
} else {
oldDstChild.cleanSubtree(context, Snapshot.CURRENT_STATE_ID,
dstIIP.getLatestSnapshotId());
filesDeleted = context.quotaDelta().getNsDelta() >= 0;
}
fsd.updateReplicationFactor(context.collectedBlocks()
.toUpdateReplicationInfo());
fsd.getFSNamesystem().removeLeasesAndINodes(
removedUCFiles, removedINodes, false);
return filesDeleted;
}
项目:big-c
文件:FSDirRenameOp.java
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
throws QuotaExceededException {
Preconditions.checkState(oldDstChild != null);
List<INode> removedINodes = new ChunkedArrayList<>();
final boolean filesDeleted;
if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
filesDeleted = true;
} else {
filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
.getNameSpace() >= 0;
}
fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
return filesDeleted;
}
项目:hadoop
文件:FSDirSnapshotOp.java
/**
* Delete a snapshot of a snapshottable directory
* @param snapshotRoot The snapshottable directory
* @param snapshotName The name of the to-be-deleted snapshot
* @throws IOException
*/
static INode.BlocksMapUpdateInfo deleteSnapshot(
FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
String snapshotName, boolean logRetryCache)
throws IOException {
final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot);
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.checkOwner(pc, iip);
}
INode.BlocksMapUpdateInfo collectedBlocks = new INode.BlocksMapUpdateInfo();
ChunkedArrayList<INode> removedINodes = new ChunkedArrayList<INode>();
fsd.writeLock();
try {
snapshotManager.deleteSnapshot(iip, snapshotName, collectedBlocks,
removedINodes);
fsd.removeFromInodeMap(removedINodes);
} finally {
fsd.writeUnlock();
}
removedINodes.clear();
fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
logRetryCache);
return collectedBlocks;
}
项目:aliyun-oss-hadoop-fs
文件:FSDirSnapshotOp.java
/**
* Delete a snapshot of a snapshottable directory
* @param snapshotRoot The snapshottable directory
* @param snapshotName The name of the to-be-deleted snapshot
* @throws IOException
*/
static INode.BlocksMapUpdateInfo deleteSnapshot(
FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
String snapshotName, boolean logRetryCache)
throws IOException {
final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot);
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.checkOwner(pc, iip);
}
INode.BlocksMapUpdateInfo collectedBlocks = new INode.BlocksMapUpdateInfo();
ChunkedArrayList<INode> removedINodes = new ChunkedArrayList<>();
INode.ReclaimContext context = new INode.ReclaimContext(
fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, null);
fsd.writeLock();
try {
snapshotManager.deleteSnapshot(iip, snapshotName, context);
fsd.updateCount(iip, context.quotaDelta(), false);
fsd.removeFromInodeMap(removedINodes);
fsd.updateReplicationFactor(context.collectedBlocks()
.toUpdateReplicationInfo());
} finally {
fsd.writeUnlock();
}
removedINodes.clear();
fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
logRetryCache);
return collectedBlocks;
}
项目:big-c
文件:FSDirSnapshotOp.java
/**
* Delete a snapshot of a snapshottable directory
* @param snapshotRoot The snapshottable directory
* @param snapshotName The name of the to-be-deleted snapshot
* @throws IOException
*/
static INode.BlocksMapUpdateInfo deleteSnapshot(
FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
String snapshotName, boolean logRetryCache)
throws IOException {
final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot);
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.checkOwner(pc, iip);
}
INode.BlocksMapUpdateInfo collectedBlocks = new INode.BlocksMapUpdateInfo();
ChunkedArrayList<INode> removedINodes = new ChunkedArrayList<INode>();
fsd.writeLock();
try {
snapshotManager.deleteSnapshot(iip, snapshotName, collectedBlocks,
removedINodes);
fsd.removeFromInodeMap(removedINodes);
} finally {
fsd.writeUnlock();
}
removedINodes.clear();
fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
logRetryCache);
return collectedBlocks;
}
项目:hadoop
文件:INode.java
public BlocksMapUpdateInfo() {
toDeleteList = new ChunkedArrayList<Block>();
}
项目:aliyun-oss-hadoop-fs
文件:INode.java
public BlocksMapUpdateInfo() {
toDeleteList = new ChunkedArrayList<>();
toUpdateReplicationInfo = new ChunkedArrayList<>();
}
项目:big-c
文件:INode.java
public BlocksMapUpdateInfo() {
toDeleteList = new ChunkedArrayList<Block>();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:FSNamesystem.java
/**
* Remove a file/directory from the namespace.
* <p>
* For large directories, deletion is incremental. The blocks under
* the directory are collected and deleted a small number at a time holding
* the {@link FSNamesystem} lock.
* <p>
* For small directory or file the deletion is done in one shot.
*
* @see ClientProtocol#delete(String, boolean) for description of exceptions
*/
private boolean deleteInternal(String src, boolean recursive,
boolean enforcePermission, boolean logRetryCache)
throws AccessControlException, SafeModeException, UnresolvedLinkException,
IOException {
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<INode>();
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
boolean ret = false;
waitForLoadingFSImage();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete " + src);
src = resolvePath(src, pathComponents);
if (!recursive && dir.isNonEmptyDirectory(src)) {
throw new PathIsNotEmptyDirectoryException(src + " is non empty");
}
if (enforcePermission && isPermissionEnabled) {
checkPermission(pc, src, false, null, FsAction.WRITE, null,
FsAction.ALL, true, false);
}
long mtime = now();
// Unlink the target directory from directory tree
long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
mtime);
if (filesRemoved < 0) {
return false;
}
getEditLog().logDelete(src, mtime, logRetryCache);
incrDeletedFileCount(filesRemoved);
// Blocks/INodes will be handled later
removePathAndBlocks(src, null, removedINodes, true);
ret = true;
} finally {
writeUnlock();
}
getEditLog().logSync();
removeBlocks(collectedBlocks); // Incremental deletion of blocks
collectedBlocks.clear();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
+ src +" is removed");
}
return ret;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:FSNamesystem.java
/**
* Delete a snapshot of a snapshottable directory
* @param snapshotRoot The snapshottable directory
* @param snapshotName The name of the to-be-deleted snapshot
* @throws SafeModeException
* @throws IOException
*/
void deleteSnapshot(String snapshotRoot, String snapshotName)
throws SafeModeException, IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
if (isPermissionEnabled) {
checkOwner(pc, snapshotRoot);
}
List<INode> removedINodes = new ChunkedArrayList<INode>();
dir.writeLock();
try {
snapshotManager.deleteSnapshot(snapshotRoot, snapshotName,
collectedBlocks, removedINodes);
dir.removeFromInodeMap(removedINodes);
} finally {
dir.writeUnlock();
}
removedINodes.clear();
getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
cacheEntry != null);
success = true;
} finally {
writeUnlock();
RetryCache.setState(cacheEntry, success);
}
getEditLog().logSync();
removeBlocks(collectedBlocks);
collectedBlocks.clear();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
logAuditEvent(true, "deleteSnapshot", rootPath, null, null);
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:INode.java
public BlocksMapUpdateInfo() {
toDeleteList = new ChunkedArrayList<Block>();
}
项目:hadoop
文件:DecommissionManager.java
/**
* Returns a list of blocks on a datanode that are insufficiently
* replicated, i.e. are under-replicated enough to prevent decommission.
* <p/>
* As part of this, it also schedules replication work for
* any under-replicated blocks.
*
* @param datanode
* @return List of insufficiently replicated blocks
*/
private AbstractList<BlockInfoContiguous> handleInsufficientlyReplicated(
final DatanodeDescriptor datanode) {
AbstractList<BlockInfoContiguous> insufficient = new ChunkedArrayList<>();
processBlocksForDecomInternal(datanode, datanode.getBlockIterator(),
insufficient, false);
return insufficient;
}
项目:aliyun-oss-hadoop-fs
文件:DecommissionManager.java
/**
* Returns a list of blocks on a datanode that are insufficiently replicated
* or require recovery, i.e. requiring recovery and should prevent
* decommission.
* <p/>
* As part of this, it also schedules replication/recovery work.
*
* @return List of blocks requiring recovery
*/
private AbstractList<BlockInfo> handleInsufficientlyStored(
final DatanodeDescriptor datanode) {
AbstractList<BlockInfo> insufficient = new ChunkedArrayList<>();
processBlocksForDecomInternal(datanode, datanode.getBlockIterator(),
insufficient, false);
return insufficient;
}
项目:big-c
文件:DecommissionManager.java
/**
* Returns a list of blocks on a datanode that are insufficiently
* replicated, i.e. are under-replicated enough to prevent decommission.
* <p/>
* As part of this, it also schedules replication work for
* any under-replicated blocks.
*
* @param datanode
* @return List of insufficiently replicated blocks
*/
private AbstractList<BlockInfoContiguous> handleInsufficientlyReplicated(
final DatanodeDescriptor datanode) {
AbstractList<BlockInfoContiguous> insufficient = new ChunkedArrayList<>();
processBlocksForDecomInternal(datanode, datanode.getBlockIterator(),
insufficient, false);
return insufficient;
}