Java 类org.apache.hadoop.hdfs.protocol.DatanodeInfo 实例源码
项目:hadoop
文件:BlockPlacementPolicyDefault.java
@Override
public BlockPlacementStatus verifyBlockPlacement(String srcPath,
LocatedBlock lBlk, int numberOfReplicas) {
DatanodeInfo[] locs = lBlk.getLocations();
if (locs == null)
locs = DatanodeDescriptor.EMPTY_ARRAY;
int numRacks = clusterMap.getNumOfRacks();
if(numRacks <= 1) // only one rack
return new BlockPlacementStatusDefault(
Math.min(numRacks, numberOfReplicas), numRacks);
int minRacks = Math.min(2, numberOfReplicas);
// 1. Check that all locations are different.
// 2. Count locations on different racks.
Set<String> racks = new TreeSet<String>();
for (DatanodeInfo dn : locs)
racks.add(dn.getNetworkLocation());
return new BlockPlacementStatusDefault(racks.size(), minRacks);
}
项目:hadoop
文件:DFSInputStream.java
private void fetchBlockByteRange(LocatedBlock block, long start, long end,
byte[] buf, int offset,
Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
throws IOException {
block = getBlockAt(block.getStartOffset());
while (true) {
DNAddrPair addressPair = chooseDataNode(block, null);
try {
actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
corruptedBlockMap);
return;
} catch (IOException e) {
// Ignore. Already processed inside the function.
// Loop through to try the next node.
}
}
}
项目:hadoop
文件:DFSInputStream.java
/**
* Seek to given position on a node other than the current node. If
* a node other than the current node is found, then returns true.
* If another node could not be found, then returns false.
*/
@Override
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
boolean markedDead = deadNodes.containsKey(currentNode);
addToDeadNodes(currentNode);
DatanodeInfo oldNode = currentNode;
DatanodeInfo newNode = blockSeekTo(targetPos);
if (!markedDead) {
/* remove it from deadNodes. blockSeekTo could have cleared
* deadNodes and added currentNode again. Thats ok. */
deadNodes.remove(oldNode);
}
if (!oldNode.getDatanodeUuid().equals(newNode.getDatanodeUuid())) {
currentNode = newNode;
return true;
} else {
return false;
}
}
项目:hadoop
文件:InvalidateBlocks.java
/**
* Add a block to the block collection
* which will be invalidated on the specified datanode.
*/
synchronized void add(final Block block, final DatanodeInfo datanode,
final boolean log) {
LightWeightHashSet<Block> set = node2blocks.get(datanode);
if (set == null) {
set = new LightWeightHashSet<Block>();
node2blocks.put(datanode, set);
}
if (set.add(block)) {
numBlocks++;
if (log) {
NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}",
getClass().getSimpleName(), block, datanode);
}
}
}
项目:hadoop
文件:InvalidateBlocks.java
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
final int size = node2blocks.values().size();
out.println("Metasave: Blocks " + numBlocks
+ " waiting deletion from " + size + " datanodes.");
if (size == 0) {
return;
}
for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
final LightWeightHashSet<Block> blocks = entry.getValue();
if (blocks.size() > 0) {
out.println(entry.getKey());
out.println(blocks);
}
}
}
项目:hadoop
文件:TestPBHelper.java
private LocatedBlock createLocatedBlockNoStorageMedia() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
项目:hadoop
文件:TestPBHelper.java
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
assertEquals(dn1.getAdminState(), dn2.getAdminState());
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
assertEquals(dn1.getBlockPoolUsedPercent(),
dn2.getBlockPoolUsedPercent(), DELTA);
assertEquals(dn1.getCapacity(), dn2.getCapacity());
assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent(), DELTA);
assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
assertEquals(dn1.getHostName(), dn2.getHostName());
assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
assertEquals(dn1.getIpcPort(), dn2.getIpcPort());
assertEquals(dn1.getLastUpdate(), dn2.getLastUpdate());
assertEquals(dn1.getLevel(), dn2.getLevel());
assertEquals(dn1.getNetworkLocation(), dn2.getNetworkLocation());
}
项目:hadoop
文件:DFSClient.java
/**
* Connect to the given datanode's datantrasfer port, and return
* the resulting IOStreamPair. This includes encryption wrapping, etc.
*/
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
LocatedBlock lb) throws IOException {
boolean success = false;
Socket sock = null;
try {
sock = socketFactory.createSocket();
String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr);
}
NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
sock.setSoTimeout(timeout);
OutputStream unbufOut = NetUtils.getOutputStream(sock);
InputStream unbufIn = NetUtils.getInputStream(sock);
IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
lb.getBlockToken(), dn);
success = true;
return ret;
} finally {
if (!success) {
IOUtils.closeSocket(sock);
}
}
}
项目:hadoop
文件:ReportBadBlockAction.java
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode,
DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
if (bpRegistration == null) {
return;
}
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
String[] uuids = { storageUuid };
StorageType[] types = { storageType };
LocatedBlock[] locatedBlock = { new LocatedBlock(block,
dnArr, uuids, types) };
try {
bpNamenode.reportBadBlocks(locatedBlock);
} catch (RemoteException re) {
DataNode.LOG.info("reportBadBlock encountered RemoteException for "
+ "block: " + block , re);
} catch (IOException e) {
throw new BPServiceActorActionException("Failed to report bad block "
+ block + " to namenode: ");
}
}
项目:hadoop
文件:DfsClientShmManager.java
public Slot allocSlot(DatanodeInfo datanode, DomainPeer peer,
MutableBoolean usedPeer, ExtendedBlockId blockId,
String clientName) throws IOException {
lock.lock();
try {
if (closed) {
LOG.trace(this + ": the DfsClientShmManager isclosed.");
return null;
}
EndpointShmManager shmManager = datanodes.get(datanode);
if (shmManager == null) {
shmManager = new EndpointShmManager(datanode);
datanodes.put(datanode, shmManager);
}
return shmManager.allocSlot(peer, usedPeer, clientName, blockId);
} finally {
lock.unlock();
}
}
项目:hadoop
文件:TestDFSClientRetries.java
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
LocatedBlock goodLocatedBlock = goodBlockList.get(0);
LocatedBlock badLocatedBlock = new LocatedBlock(
goodLocatedBlock.getBlock(),
new DatanodeInfo[] {
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
},
goodLocatedBlock.getStartOffset(),
false);
List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
badBlocks.add(badLocatedBlock);
return new LocatedBlocks(goodBlockList.getFileLength(), false,
badBlocks, null, true,
null);
}
项目:hadoop
文件:FSNamesystem.java
DatanodeInfo[] datanodeReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
for (int i=0; i<arr.length; i++) {
arr[i] = new DatanodeInfo(results.get(i));
}
return arr;
} finally {
readUnlock();
}
}
项目:hadoop
文件:FSNamesystem.java
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
for (int i = 0; i < reports.length; i++) {
final DatanodeDescriptor d = datanodes.get(i);
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
d.getStorageReports());
}
return reports;
} finally {
readUnlock();
}
}
项目:hadoop
文件:NameNodeRpcServer.java
@Override
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
String[] favoredNodes)
throws IOException {
checkNNStartup();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
+ " fileId=" + fileId + " for " + clientName);
}
Set<Node> excludedNodesSet = null;
if (excludedNodes != null) {
excludedNodesSet = new HashSet<Node>(excludedNodes.length);
for (Node node : excludedNodes) {
excludedNodesSet.add(node);
}
}
List<String> favoredNodesList = (favoredNodes == null) ? null
: Arrays.asList(favoredNodes);
LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
clientName, previous, excludedNodesSet, favoredNodesList);
if (locatedBlock != null)
metrics.incrAddBlockOps();
return locatedBlock;
}
项目:hadoop
文件:Dispatcher.java
private boolean shouldIgnore(DatanodeInfo dn) {
// ignore decommissioned nodes
final boolean decommissioned = dn.isDecommissioned();
// ignore decommissioning nodes
final boolean decommissioning = dn.isDecommissionInProgress();
// ignore nodes in exclude list
final boolean excluded = Util.isExcluded(excludedNodes, dn);
// ignore nodes not in the include list (if include list is not empty)
final boolean notIncluded = !Util.isIncluded(includedNodes, dn);
if (decommissioned || decommissioning || excluded || notIncluded) {
if (LOG.isTraceEnabled()) {
LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", "
+ decommissioning + ", " + excluded + ", " + notIncluded);
}
return true;
}
return false;
}
项目:hadoop
文件:NNThroughputBenchmark.java
private ExtendedBlock addBlocks(String fileName, String clientName)
throws IOException {
ExtendedBlock prevBlock = null;
for(int jdx = 0; jdx < blocksPerFile; jdx++) {
LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
prevBlock, null, INodeId.GRANDFATHER_INODE_ID, null);
prevBlock = loc.getBlock();
for(DatanodeInfo dnInfo : loc.getLocations()) {
int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
loc.getBlock().getLocalBlock(),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
.getBlock().getBlockPoolId(), report);
}
}
return prevBlock;
}
项目:hadoop
文件:BlockManager.java
/**
* Schedule blocks for deletion at datanodes
* @param nodesToProcess number of datanodes to schedule deletion work
* @return total number of block for deletion
*/
int computeInvalidateWork(int nodesToProcess) {
final List<DatanodeInfo> nodes = invalidateBlocks.getDatanodes();
Collections.shuffle(nodes);
nodesToProcess = Math.min(nodes.size(), nodesToProcess);
int blockCnt = 0;
for (DatanodeInfo dnInfo : nodes) {
int blocks = invalidateWorkForOneNode(dnInfo);
if (blocks > 0) {
blockCnt += blocks;
if (--nodesToProcess == 0) {
break;
}
}
}
return blockCnt;
}
项目:hadoop-oss
文件:NuCypherExtUtilClient.java
/**
* Indicate a block replica on the specified datanode is corrupted
*/
public void addCorruptedBlock(ExtendedBlock blk, DatanodeInfo node) {
Set<DatanodeInfo> dnSet = corruptionMap.get(blk);
if (dnSet == null) {
dnSet = new HashSet<>();
corruptionMap.put(blk, dnSet);
}
if (!dnSet.contains(node)) {
dnSet.add(node);
}
}
项目:hadoop
文件:TestDataTransferProtocol.java
void writeBlock(ExtendedBlock block, BlockConstructionStage stage,
long newGS, DataChecksum checksum) throws IOException {
sender.writeBlock(block, StorageType.DEFAULT,
BlockTokenSecretManager.DUMMY_TOKEN, "cl",
new DatanodeInfo[1], new StorageType[1], null, stage,
0, block.getNumBytes(), block.getNumBytes(), newGS,
checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
}
项目:hadoop
文件:TestDFSUtil.java
/**
* Test conversion of LocatedBlock to BlockLocation
*/
@Test
public void testLocatedBlocks2Locations() {
DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] ds = new DatanodeInfo[1];
ds[0] = d;
// ok
ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
LocatedBlock l1 = new LocatedBlock(b1, ds, 0, false);
// corrupt
ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);
List<LocatedBlock> ls = Arrays.asList(l1, l2);
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length,
bs.length == 2);
int corruptCount = 0;
for (BlockLocation b: bs) {
if (b.isCorrupt()) {
corruptCount++;
}
}
assertTrue("expected 1 corrupt files but got " + corruptCount,
corruptCount == 1);
// test an empty location
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0, bs.length);
}
项目:hadoop
文件:DFSOutputStream.java
@VisibleForTesting
public synchronized DatanodeInfo[] getPipeline() {
if (streamer == null) {
return null;
}
DatanodeInfo[] currentNodes = streamer.getNodes();
if (currentNodes == null) {
return null;
}
DatanodeInfo[] value = new DatanodeInfo[currentNodes.length];
for (int i = 0; i < currentNodes.length; i++) {
value[i] = currentNodes[i];
}
return value;
}
项目:hadoop
文件:DFSOutputStream.java
/**
* Note that this is not a public API;
* use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead.
*
* @return the number of valid replicas of the current block
*/
public synchronized int getCurrentBlockReplication() throws IOException {
dfsClient.checkOpen();
checkClosed();
if (streamer == null) {
return blockReplication; // no pipeline, return repl factor of file
}
DatanodeInfo[] currentNodes = streamer.getNodes();
if (currentNodes == null) {
return blockReplication; // no pipeline, return repl factor of file
}
return currentNodes.length;
}
项目:hadoop
文件:DFSUtil.java
@Override
public int compare(DatanodeInfo a, DatanodeInfo b) {
// Decommissioned nodes will still be moved to the end of the list
if (a.isDecommissioned()) {
return b.isDecommissioned() ? 0 : 1;
} else if (b.isDecommissioned()) {
return -1;
}
// Stale nodes will be moved behind the normal nodes
boolean aStale = a.isStale(staleInterval);
boolean bStale = b.isStale(staleInterval);
return aStale == bStale ? 0 : (aStale ? 1 : -1);
}
项目:hadoop
文件:PBHelper.java
public static DatanodeInfoProto.AdminState convert(
final DatanodeInfo.AdminStates inAs) {
switch (inAs) {
case NORMAL: return DatanodeInfoProto.AdminState.NORMAL;
case DECOMMISSION_INPROGRESS:
return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS;
case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED;
default: return DatanodeInfoProto.AdminState.NORMAL;
}
}
项目:hadoop
文件:TestDecommissioningStatus.java
private String decommissionNode(FSNamesystem namesystem, DFSClient client,
FileSystem localFileSys, int nodeIndex) throws IOException {
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
String nodename = info[nodeIndex].getXferAddr();
decommissionNode(namesystem, localFileSys, nodename);
return nodename;
}
项目:hadoop
文件:DFSInputStream.java
/**
* Get the best node from which to stream the data.
* @param block LocatedBlock, containing nodes in priority order.
* @param ignoredNodes Do not choose nodes in this array (may be null)
* @return The DNAddrPair of the best node.
* @throws IOException
*/
private DNAddrPair getBestNodeDNAddrPair(LocatedBlock block,
Collection<DatanodeInfo> ignoredNodes) throws IOException {
DatanodeInfo[] nodes = block.getLocations();
StorageType[] storageTypes = block.getStorageTypes();
DatanodeInfo chosenNode = null;
StorageType storageType = null;
if (nodes != null) {
for (int i = 0; i < nodes.length; i++) {
if (!deadNodes.containsKey(nodes[i])
&& (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) {
chosenNode = nodes[i];
// Storage types are ordered to correspond with nodes, so use the same
// index to get storage type.
if (storageTypes != null && i < storageTypes.length) {
storageType = storageTypes[i];
}
break;
}
}
}
if (chosenNode == null) {
throw new IOException("No live nodes contain block " + block.getBlock() +
" after checking nodes = " + Arrays.toString(nodes) +
", ignoredNodes = " + ignoredNodes);
}
final String dnAddr =
chosenNode.getXferAddr(dfsClient.getConf().connectToDnViaHostname);
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
}
InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
return new DNAddrPair(chosenNode, targetAddr, storageType);
}
项目:hadoop
文件:TestSetTimes.java
private void printDatanodeReport(DatanodeInfo[] info) {
System.out.println("-------------------------------------------------");
for (int i = 0; i < info.length; i++) {
System.out.println(info[i].getDatanodeReport());
System.out.println();
}
}
项目:hadoop
文件:BlockManager.java
/**
* Get blocks to invalidate for <i>nodeId</i>
* in {@link #invalidateBlocks}.
*
* @return number of blocks scheduled for removal during this iteration.
*/
private int invalidateWorkForOneNode(DatanodeInfo dn) {
final List<Block> toInvalidate;
namesystem.writeLock();
try {
// blocks should not be replicated or removed if safe mode is on
if (namesystem.isInSafeMode()) {
LOG.debug("In safemode, not computing replication work");
return 0;
}
try {
DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
if (dnDescriptor == null) {
LOG.warn("DataNode " + dn + " cannot be found with UUID " +
dn.getDatanodeUuid() + ", removing block invalidation work.");
invalidateBlocks.remove(dn);
return 0;
}
toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);
if (toInvalidate == null) {
return 0;
}
} catch(UnregisteredNodeException une) {
return 0;
}
} finally {
namesystem.writeUnlock();
}
blockLog.info("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
dn, toInvalidate);
return toInvalidate.size();
}
项目:hadoop
文件:PBHelper.java
static public DatanodeInfo convert(DatanodeInfoProto di) {
if (di == null) return null;
return new DatanodeInfo(
PBHelper.convert(di.getId()),
di.hasLocation() ? di.getLocation() : null ,
di.getCapacity(), di.getDfsUsed(), di.getRemaining(),
di.getBlockPoolUsed(), di.getCacheCapacity(), di.getCacheUsed(),
di.getLastUpdate(), di.getLastUpdateMonotonic(),
di.getXceiverCount(), PBHelper.convert(di.getAdminState()));
}
项目:hadoop
文件:ClientNamenodeProtocolTranslatorPB.java
@Override
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
GetDatanodeReportRequestProto req = GetDatanodeReportRequestProto
.newBuilder()
.setType(PBHelper.convert(type)).build();
try {
return PBHelper.convert(
rpcProxy.getDatanodeReport(null, req).getDiList());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
项目:hadoop
文件:Receiver.java
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
TraceScope traceScope = continueTraceSpan(proto.getHeader(),
proto.getClass().getSimpleName());
try {
writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
PBHelper.convertStorageType(proto.getStorageType()),
PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
proto.getHeader().getClientName(),
targets,
PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
PBHelper.convert(proto.getSource()),
fromProto(proto.getStage()),
proto.getPipelineSize(),
proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
proto.getLatestGenerationStamp(),
fromProto(proto.getRequestedChecksum()),
(proto.hasCachingStrategy() ?
getCachingStrategy(proto.getCachingStrategy()) :
CachingStrategy.newDefaultStrategy()),
(proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
(proto.hasPinning() ? proto.getPinning(): false),
(PBHelper.convertBooleanList(proto.getTargetPinningsList())));
} finally {
if (traceScope != null) traceScope.close();
}
}
项目:hadoop
文件:DatanodeInfoWithStorage.java
public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
StorageType storageType) {
super(from);
this.storageID = storageID;
this.storageType = storageType;
setSoftwareVersion(from.getSoftwareVersion());
setDependentHostNames(from.getDependentHostNames());
setLevel(from.getLevel());
setParent(from.getParent());
}
项目:hadoop
文件:DataNode.java
private static void logRecoverBlock(String who, RecoveringBlock rb) {
ExtendedBlock block = rb.getBlock();
DatanodeInfo[] targets = rb.getLocations();
LOG.info(who + " calls recoverBlock(" + block
+ ", targets=[" + Joiner.on(", ").join(targets) + "]"
+ ", newGenerationStamp=" + rb.getNewGenerationStamp() + ")");
}
项目:hadoop
文件:PBHelper.java
public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) {
List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList();
DatanodeInfo[] infos = new DatanodeInfo[proto.size()];
for (int i = 0; i < infos.length; i++) {
infos[i] = PBHelper.convert(proto.get(i));
}
return infos;
}
项目:hadoop
文件:DfsClientShmManager.java
@VisibleForTesting
public void visit(Visitor visitor) throws IOException {
lock.lock();
try {
HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info =
new HashMap<DatanodeInfo, PerDatanodeVisitorInfo>();
for (Entry<DatanodeInfo, EndpointShmManager> entry :
datanodes.entrySet()) {
info.put(entry.getKey(), entry.getValue().getVisitorInfo());
}
visitor.visit(info);
} finally {
lock.unlock();
}
}
项目:hadoop
文件:DFSAdmin.java
/**
* Display each rack and the nodes assigned to that rack, as determined
* by the NameNode, in a hierarchical manner. The nodes and racks are
* sorted alphabetically.
*
* @throws IOException If an error while getting datanode report
*/
public int printTopology() throws IOException {
DistributedFileSystem dfs = getDFS();
final DatanodeInfo[] report = dfs.getDataNodeStats();
// Build a map of rack -> nodes from the datanode report
HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
for(DatanodeInfo dni : report) {
String location = dni.getNetworkLocation();
String name = dni.getName();
if(!tree.containsKey(location)) {
tree.put(location, new TreeSet<String>());
}
tree.get(location).add(name);
}
// Sort the racks (and nodes) alphabetically, display in order
ArrayList<String> racks = new ArrayList<String>(tree.keySet());
Collections.sort(racks);
for(String r : racks) {
System.out.println("Rack: " + r);
TreeSet<String> nodes = tree.get(r);
for(String n : nodes) {
System.out.print(" " + n);
String hostname = NetUtils.getHostNameOfIP(n);
if(hostname != null)
System.out.print(" (" + hostname + ")");
System.out.println();
}
System.out.println();
}
return 0;
}
项目:hadoop
文件:TestShortCircuitLocalRead.java
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
LocatedBlocks lb = cluster.getNameNode().getRpcServer()
.getBlockLocations("/tmp/x", 0, 16);
// Create a new block object, because the block inside LocatedBlock at
// namenode is of type BlockInfo.
ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
ClientDatanodeProtocol proxy =
DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
try {
proxy.getBlockLocalPathInfo(blk, token);
Assert.fail("The call should have failed as this user "
+ " is not allowed to call getBlockLocalPathInfo");
} catch (IOException ex) {
Assert.assertTrue(ex.getMessage().contains(
"not allowed to call getBlockLocalPathInfo"));
}
} finally {
fs.close();
cluster.shutdown();
}
}
项目:ditb
文件:RestartRandomDataNodeAction.java
public ServerName[] getDataNodes() throws IOException {
DistributedFileSystem fs = (DistributedFileSystem) FSUtils.getRootDir(getConf())
.getFileSystem(getConf());
DFSClient dfsClient = fs.getClient();
List<ServerName> hosts = new LinkedList<ServerName>();
for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) {
hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1));
}
return hosts.toArray(new ServerName[hosts.size()]);
}
项目:hadoop
文件:NamenodeFsck.java
private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
TreeSet<DatanodeInfo> deadNodes) throws IOException {
if ((nodes == null) ||
(nodes.length - deadNodes.size() < 1)) {
throw new IOException("No live nodes contain current block");
}
DatanodeInfo chosenNode;
do {
chosenNode = nodes[DFSUtil.getRandom().nextInt(nodes.length)];
} while (deadNodes.contains(chosenNode));
return chosenNode;
}
项目:hadoop
文件:PBHelper.java
/**
* Copy from {@code dnInfos} to a target of list of same size starting at
* {@code startIdx}.
*/
public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
DatanodeInfo[] dnInfos, int startIdx) {
if (dnInfos == null)
return null;
ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists
.newArrayListWithCapacity(dnInfos.length);
for (int i = startIdx; i < dnInfos.length; i++) {
protos.add(convert(dnInfos[i]));
}
return protos;
}