Java 类org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics 实例源码

项目:aliyun-oss-hadoop-fs    文件:TestDecommission.java   
@Test
public void testUsedCapacity() throws Exception {
  int numNamenodes = 1;
  int numDatanodes = 2;

  startCluster(numNamenodes,numDatanodes,conf);
  cluster.waitActive();
  FSNamesystem ns = cluster.getNamesystem(0);
  BlockManager blockManager = ns.getBlockManager();
  DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager()
      .getDatanodeStatistics();

  long initialUsedCapacity = datanodeStatistics.getCapacityUsed();
  long initialTotalCapacity = datanodeStatistics.getCapacityTotal();
  long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
  ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
      new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
  namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
  ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
  //decommission one node
  DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
      AdminStates.DECOMMISSIONED);
  decommissionedNodes.add(decomNode);
  long newUsedCapacity = datanodeStatistics.getCapacityUsed();
  long newTotalCapacity = datanodeStatistics.getCapacityTotal();
  long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();

  assertTrue("DfsUsedCapacity should not be the same after a node has " +
      "been decommissioned!", initialUsedCapacity != newUsedCapacity);
  assertTrue("TotalCapacity should not be the same after a node has " +
      "been decommissioned!", initialTotalCapacity != newTotalCapacity);
  assertTrue("BlockPoolUsed should not be the same after a node has " +
      "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed);
}