Java 类org.apache.hadoop.hdfs.server.protocol.BlockCommand 实例源码

项目:hadoop    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
}
项目:hadoop    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hadoop    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:aliyun-oss-hadoop-fs    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  final long blockLength = blockFile.length();
  final long metaLength = metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength);
    volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  case BlockECRecoveryCommand:
    return PBHelper.convert(proto.getBlkECRecoveryCmd());
  default:
    return null;
  }
}
项目:aliyun-oss-hadoop-fs    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:big-c    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
}
项目:big-c    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:big-c    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  IOUtils.cleanup(null, volumeRef);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:hadoop-EAR    文件:TestDatanodeDescriptor.java   
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;

  DatanodeDescriptor dd = new DatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
}
项目:hadoop-EAR    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
项目:hadoop-EAR    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
    // register datanode
    DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
            DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
    if (cmds != null) {
        for (DatanodeCommand cmd : cmds) {
            if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
                // Send a copy of a block to another datanode
                BlockCommand bcmd = (BlockCommand) cmd;
                return transferBlocks(bcmd.getBlocks(),
                        bcmd.getTargets());
            }
        }
    }
    return 0;
}
项目:hadoop-EAR    文件:AvatarNode.java   
public DatanodeCommand blockReportNew(DatanodeRegistration nodeReg, BlockReport rep) throws IOException {
  if (runInfo.shutdown || !runInfo.isRunning) {
    return null;
  }
  if (ignoreDatanodes()) {
    LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
              " to back off");
    // Do not process block reports yet as the ingest thread is catching up
    return AvatarDatanodeCommand.BACKOFF;
  }

  if (currentAvatar == Avatar.STANDBY) {
    Collection<Block> failed = super.blockReportWithRetries(nodeReg, rep);

    // standby should send only DNA_RETRY
    BlockCommand bCmd = new BlockCommand(DatanodeProtocols.DNA_RETRY,
        failed.toArray(new Block[failed.size()]));
    return bCmd;
  } else {
    // only the primary can send DNA_FINALIZE
    return super.blockReport(nodeReg, rep);
  }
}
项目:hadoop-plus    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  if (!blockFile.delete() || (!metaFile.delete() && metaFile.exists())) {
    LOG.warn("Unexpected error trying to delete block "
        + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block);
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
}
项目:hadoop-plus    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:hadoop-plus    文件:PBHelper.java   
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(PBHelper.convert(cmd.getTargets()));
  return builder.build();
}
项目:hadoop-plus    文件:PBHelper.java   
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
项目:hadoop-plus    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0, 0, 0).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
项目:PDHC    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
}
项目:FlexMap    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
}
项目:FlexMap    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:FlexMap    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:hops    文件:BlockManager.java   
public void removeBlock(Block block)
    throws StorageException, TransactionContextException {
  // No need to ACK blocks that are being removed entirely
  // from the namespace, since the removal of the associated
  // file already removes them from the block map below.
  block.setNumBytesNoPersistance(BlockCommand.NO_ACK);
  addToInvalidates(block);
  corruptReplicas.removeFromCorruptReplicasMap(getBlockInfo(block));
  BlockInfo storedBlock = getBlockInfo(block);
  blocksMap.removeBlock(block);
  // Remove the block from pendingReplications
  pendingReplications.remove(storedBlock);
  if (postponedMisreplicatedBlocks.remove(block)) {
    postponedMisreplicatedBlocksCount.decrementAndGet();
  }
}
项目:hops    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  if (!blockFile.delete() || (!metaFile.delete() && metaFile.exists())) {
    LOG.warn("Unexpected error trying to delete block " +
        block.getBlockPoolId() + " " + block.getLocalBlock() + " at file " +
        blockFile + ". Ignored.");
  } else {
    if (block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK) {
      datanode.notifyNamenodeDeletedBlock(block);
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info(
        "Deleted " + block.getBlockPoolId() + " " + block.getLocalBlock() +
            " file " + blockFile);
  }
}
项目:hops    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
    case BalancerBandwidthCommand:
      return PBHelper.convert(proto.getBalancerCmd());
    case BlockCommand:
      return PBHelper.convert(proto.getBlkCmd());
    case BlockRecoveryCommand:
      return PBHelper.convert(proto.getRecoveryCmd());
    case FinalizeCommand:
      return PBHelper.convert(proto.getFinalizeCmd());
    case KeyUpdateCommand:
      return PBHelper.convert(proto.getKeyUpdateCmd());
    case RegisterCommand:
      return REG_CMD;
  }
  return null;
}
项目:hops    文件:PBHelper.java   
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder =
      BlockCommandProto.newBuilder().setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
    case DatanodeProtocol.DNA_TRANSFER:
      builder.setAction(BlockCommandProto.Action.TRANSFER);
      break;
    case DatanodeProtocol.DNA_INVALIDATE:
      builder.setAction(BlockCommandProto.Action.INVALIDATE);
      break;
    case DatanodeProtocol.DNA_SHUTDOWN:
      builder.setAction(BlockCommandProto.Action.SHUTDOWN);
      break;
    default:
      throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (Block block : blocks) {
    builder.addBlocks(PBHelper.convert(block));
  }
  builder.addAllTargets(PBHelper.convert(cmd.getTargets()));
  return builder.build();
}
项目:hops    文件:PBHelper.java   
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
    case TRANSFER:
      action = DatanodeProtocol.DNA_TRANSFER;
      break;
    case INVALIDATE:
      action = DatanodeProtocol.DNA_INVALIDATE;
      break;
    case SHUTDOWN:
      action = DatanodeProtocol.DNA_SHUTDOWN;
      break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
项目:hops    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
// keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep =
      {new StorageReport(dnRegistration.getStorageID(), false, DF_CAPACITY,
          DF_USED, DF_CAPACITY - DF_USED, DF_USED)};
  DatanodeCommand[] cmds =
      nameNodeProto.sendHeartbeat(dnRegistration, rep, 0, 0, 0)
          .getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand) cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
项目:hadoop-TCP    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  if (!blockFile.delete() || (!metaFile.delete() && metaFile.exists())) {
    LOG.warn("Unexpected error trying to delete block "
        + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block);
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
}
项目:hadoop-TCP    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:hadoop-TCP    文件:PBHelper.java   
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(PBHelper.convert(cmd.getTargets()));
  return builder.build();
}
项目:hadoop-TCP    文件:PBHelper.java   
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
项目:hadoop-on-lustre    文件:TestDatanodeDescriptor.java   
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;

  DatanodeDescriptor dd = new DatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
}
项目:hadoop-on-lustre    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
项目:hardfs    文件:FsDatasetAsyncDiskService.java   
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  if (!blockFile.delete() || (!metaFile.delete() && metaFile.exists())) {
    LOG.warn("Unexpected error trying to delete block "
        + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block);
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
}
项目:hardfs    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:hardfs    文件:PBHelper.java   
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(PBHelper.convert(cmd.getTargets()));
  return builder.build();
}