Java 类org.apache.hadoop.hdfs.util.LightWeightHashSet 实例源码

项目:hadoop    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode);
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode, set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}",
          getClass().getSimpleName(), block, datanode);
    }
  }
}
项目:hadoop    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(entry.getKey());
      out.println(blocks);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/**
 * If a block is removed from blocksMap, remove it from excessReplicateMap.
 */
private void removeFromExcessReplicateMap(BlockInfo block) {
  for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
    String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
    LightWeightHashSet<BlockInfo> excessReplicas =
        excessReplicateMap.get(uuid);
    if (excessReplicas != null) {
      if (excessReplicas.remove(block)) {
        excessBlocksCount.decrementAndGet();
        if (excessReplicas.isEmpty()) {
          excessReplicateMap.remove(uuid);
        }
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode);
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode, set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.debug("BLOCK* {}: add {} to {}",
          getClass().getSimpleName(), block, datanode);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(entry.getKey());
      out.println(blocks);
    }
  }
}
项目:big-c    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode);
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode, set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}",
          getClass().getSimpleName(), block, datanode);
    }
  }
}
项目:big-c    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(entry.getKey());
      out.println(blocks);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode);
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode, set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}",
          getClass().getSimpleName(), block, datanode);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(entry.getKey());
      out.println(blocks);
    }
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for (Iterator<DatanodeDescriptor> it =
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
    if (blocks == null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if (machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block,
      machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * Adds block to list of blocks which will be invalidated on
 * specified datanode
 *
 * @param b block
 * @param n datanode
 */
void addToInvalidatesNoLog(Block b, DatanodeInfo n, boolean ackRequired) {
  // We are the standby avatar and we don't want to add blocks to the
  // invalidates list.
  if (this.getNameNode().shouldRetryAbsentBlocks()) {
    return;
  }

  LightWeightHashSet<Block> invalidateSet = recentInvalidateSets.get(n
      .getStorageID());
  if (invalidateSet == null) {
    invalidateSet = new LightWeightHashSet<Block>();
    recentInvalidateSets.put(n.getStorageID(), invalidateSet);
  }
  if(!ackRequired){
    b.setNumBytes(BlockFlags.NO_ACK);
  }
  if (invalidateSet.add(b)) {
    pendingDeletionBlocksCount++;
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * dumps the contents of recentInvalidateSets
 */
void dumpExcessReplicasSets(PrintWriter out) {
  int size = excessReplicateMap.values().size();
  out.println("Metasave: Excess blocks " + excessBlocksCount
    + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }
  for (Map.Entry<String, LightWeightHashSet<Block>> entry : excessReplicateMap
      .entrySet()) {
    LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeMap.get(entry.getKey()).getName());
      blocks.printDetails(out);
    }
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * dumps the contents of recentInvalidateSets
 */
private void dumpRecentInvalidateSets(PrintWriter out) {
  int size = recentInvalidateSets.values().size();
  out.println("Metasave: Blocks " + pendingDeletionBlocksCount
    + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }
  for (Map.Entry<String, LightWeightHashSet<Block>> entry : recentInvalidateSets
      .entrySet()) {
    LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeMap.get(entry.getKey()).getName() + blocks);
    }
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * Clear replication queues. This is used by standby avatar to reclaim memory.
 */
void clearReplicationQueues() {
  writeLock();
  try {
    synchronized (neededReplications) {
      neededReplications.clear();
    }
    underReplicatedBlocksCount = 0;

    corruptReplicas.clear();
    corruptReplicaBlocksCount = 0;

    overReplicatedBlocks.clear();
    raidEncodingTasks.clear();

    excessReplicateMap = new HashMap<String, LightWeightHashSet<Block>>();
    excessBlocksCount = 0;
  } finally {
    writeUnlock();
  }
}
项目:hadoop-EAR    文件:DataBlockScanner.java   
void scanNamespace() {
  startNewPeriod();
  // Create a new processedBlocks structure
  processedBlocks = new LightWeightHashSet<Long>();
  if (verificationLog != null) {
    try {
      verificationLog.openCurFile();
    } catch (FileNotFoundException ex) {
      LOG.warn("Could not open current file");
    }
  }
  if (!assignInitialVerificationTimes()) {
    return;
  }
  // Start scanning
  scan();
}
项目:hadoop-EAR    文件:FSDataset.java   
void getBlocksBeingWrittenInfo(LightWeightHashSet<Block> blockSet) throws IOException { 
  if (rbwDir == null) {
    return;
  }

  File[] blockFiles = rbwDir.listFiles();
  if (blockFiles == null) {
    return;
  }
  String[] blockFileNames = getFileNames(blockFiles);  
  for (int i = 0; i < blockFiles.length; i++) {
    if (!blockFiles[i].isDirectory()) {
    // get each block in the rbwDir directory
      Block block = FSDataset.getBlockFromNames(blockFiles, blockFileNames, i);
      if (block != null) {
        // add this block to block set
        blockSet.add(block);
        if (DataNode.LOG.isDebugEnabled()) {
          DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block);
        }            
      }
    }
  }
}
项目:hadoop-EAR    文件:FSDataset.java   
/**
 * Populate the given blockSet with any child blocks
 * found at this node.
 * @throws IOException 
 */
public void getBlockInfo(LightWeightHashSet<Block> blockSet) throws IOException {
  FSDir[] children = this.getChildren();
  if (children != null) {
    for (int i = 0; i < children.length; i++) {
      children[i].getBlockInfo(blockSet);
    }
  }

  File blockFiles[] = dir.listFiles();
  String[] blockFilesNames = getFileNames(blockFiles);

  for (int i = 0; i < blockFiles.length; i++) {
    Block block = getBlockFromNames(blockFiles, blockFilesNames, i);
    if (block != null) {
      blockSet.add(block);
    }
  }
}
项目:hadoop-EAR    文件:FSDataset.java   
/**
 * Populate the given blockSet with any child blocks
 * found at this node. With each block, return the full path
 * of the block file.
 * @throws IOException 
 */
void getBlockAndFileInfo(LightWeightHashSet<BlockAndFile> blockSet) throws IOException {
  FSDir[] children = this.getChildren();
  if (children != null) {
    for (int i = 0; i < children.length; i++) {
      children[i].getBlockAndFileInfo(blockSet);
    }
  }

  File blockFiles[] = dir.listFiles();
  String[] blockFilesNames = getFileNames(blockFiles);      
  for (int i = 0; i < blockFiles.length; i++) {
    Block block = getBlockFromNames(blockFiles, blockFilesNames, i);
    if (block != null) {
      blockSet.add(new BlockAndFile(blockFiles[i].getAbsoluteFile(), block));
    }
  }
}
项目:hadoop-plus    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode.getStorageID());
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode.getStorageID(), set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
          + ": add " + block + " to " + datanode);
    }
  }
}
项目:hadoop-plus    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeManager.getDatanode(entry.getKey()));
      out.println(blocks);
    }
  }
}
项目:hadoop-plus    文件:InvalidateBlocks.java   
private synchronized List<Block> invalidateWork(
    final String storageId, final DatanodeDescriptor dn) {
  final LightWeightHashSet<Block> set = node2blocks.get(storageId);
  if (set == null) {
    return null;
  }

  // # blocks that can be sent in one message is limited
  final int limit = datanodeManager.blockInvalidateLimit;
  final List<Block> toInvalidate = set.pollN(limit);

  // If we send everything in this message, remove this node entry
  if (set.isEmpty()) {
    remove(storageId);
  }

  dn.addBlocksToBeInvalidated(toInvalidate);
  numBlocks -= toInvalidate.size();
  return toInvalidate;
}
项目:FlexMap    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode);
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode, set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
          + ": add " + block + " to " + datanode);
    }
  }
}
项目:FlexMap    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(entry.getKey());
      out.println(blocks);
    }
  }
}
项目:hadoop-TCP    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode.getStorageID());
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode.getStorageID(), set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
          + ": add " + block + " to " + datanode);
    }
  }
}
项目:hadoop-TCP    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeManager.getDatanode(entry.getKey()));
      out.println(blocks);
    }
  }
}
项目:hadoop-TCP    文件:InvalidateBlocks.java   
private synchronized List<Block> invalidateWork(
    final String storageId, final DatanodeDescriptor dn) {
  final LightWeightHashSet<Block> set = node2blocks.get(storageId);
  if (set == null) {
    return null;
  }

  // # blocks that can be sent in one message is limited
  final int limit = datanodeManager.blockInvalidateLimit;
  final List<Block> toInvalidate = set.pollN(limit);

  // If we send everything in this message, remove this node entry
  if (set.isEmpty()) {
    remove(storageId);
  }

  dn.addBlocksToBeInvalidated(toInvalidate);
  numBlocks -= toInvalidate.size();
  return toInvalidate;
}
项目:hardfs    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode.getStorageID());
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode.getStorageID(), set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
          + ": add " + block + " to " + datanode);
    }
  }
}
项目:hardfs    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeManager.getDatanode(entry.getKey()));
      out.println(blocks);
    }
  }
}
项目:hardfs    文件:InvalidateBlocks.java   
private synchronized List<Block> invalidateWork(
    final String storageId, final DatanodeDescriptor dn) {
  final LightWeightHashSet<Block> set = node2blocks.get(storageId);
  if (set == null) {
    return null;
  }

  // # blocks that can be sent in one message is limited
  final int limit = datanodeManager.blockInvalidateLimit;
  final List<Block> toInvalidate = set.pollN(limit);

  // If we send everything in this message, remove this node entry
  if (set.isEmpty()) {
    remove(storageId);
  }

  dn.addBlocksToBeInvalidated(toInvalidate);
  numBlocks -= toInvalidate.size();
  return toInvalidate;
}
项目:hadoop-on-lustre2    文件:InvalidateBlocks.java   
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode.getDatanodeUuid());
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode.getDatanodeUuid(), set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
          + ": add " + block + " to " + datanode);
    }
  }
}
项目:hadoop-on-lustre2    文件:InvalidateBlocks.java   
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeManager.getDatanode(entry.getKey()));
      out.println(blocks);
    }
  }
}
项目:hadoop-on-lustre2    文件:InvalidateBlocks.java   
private synchronized List<Block> invalidateWork(
    final String storageId, final DatanodeDescriptor dn) {
  final LightWeightHashSet<Block> set = node2blocks.get(storageId);
  if (set == null) {
    return null;
  }

  // # blocks that can be sent in one message is limited
  final int limit = datanodeManager.blockInvalidateLimit;
  final List<Block> toInvalidate = set.pollN(limit);

  // If we send everything in this message, remove this node entry
  if (set.isEmpty()) {
    remove(storageId);
  }

  dn.addBlocksToBeInvalidated(toInvalidate);
  numBlocks -= toInvalidate.size();
  return toInvalidate;
}
项目:RDFS    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for (Iterator<DatanodeDescriptor> it =
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
    if (blocks == null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if (machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block,
      machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:RDFS    文件:FSNamesystem.java   
/**
 * Adds block to list of blocks which will be invalidated on
 * specified datanode
 *
 * @param b block
 * @param n datanode
 */
void addToInvalidatesNoLog(Block b, DatanodeInfo n, boolean ackRequired) {
  // We are the standby avatar and we don't want to add blocks to the
  // invalidates list.
  if (this.getNameNode().shouldRetryAbsentBlocks()) {
    return;
  }

  LightWeightHashSet<Block> invalidateSet = recentInvalidateSets.get(n
      .getStorageID());
  if (invalidateSet == null) {
    invalidateSet = new LightWeightHashSet<Block>();
    recentInvalidateSets.put(n.getStorageID(), invalidateSet);
  }
  if(!ackRequired){
    b.setNumBytes(BlockFlags.NO_ACK);
  }
  if (invalidateSet.add(b)) {
    pendingDeletionBlocksCount++;
  }
}
项目:RDFS    文件:FSNamesystem.java   
/**
 * dumps the contents of recentInvalidateSets
 */
private void dumpRecentInvalidateSets(PrintWriter out) {
  int size = recentInvalidateSets.values().size();
  out.println("Metasave: Blocks " + pendingDeletionBlocksCount
    + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }
  for (Map.Entry<String, LightWeightHashSet<Block>> entry : recentInvalidateSets
      .entrySet()) {
    LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeMap.get(entry.getKey()).getName() + blocks);
    }
  }
}
项目:RDFS    文件:DataBlockScanner.java   
void scanNamespace() {
  startNewPeriod();
  // Create a new processedBlocks structure
  processedBlocks = new LightWeightHashSet<Long>();
  if (verificationLog != null) {
    try {
      verificationLog.openCurFile();
    } catch (FileNotFoundException ex) {
      LOG.warn("Could not open current file");
    }
  }
  if (!assignInitialVerificationTimes()) {
    return;
  }
  // Start scanning
  scan();
}
项目:RDFS    文件:FSDataset.java   
/**
 * Recover blocks that were being written when the datanode
 * was earlier shut down. These blocks get re-inserted into
 * ongoingCreates. Also, send a blockreceived message to the NN
 * for each of these blocks because these are not part of a 
 * block report.
 */
private void recoverBlocksBeingWritten(File bbw) throws IOException {
  FSDir fsd = new FSDir(namespaceId, bbw, this.volume);
  LightWeightHashSet<BlockAndFile> blockSet = new LightWeightHashSet<BlockAndFile>();
  fsd.getBlockAndFileInfo(blockSet);
  for (BlockAndFile b : blockSet) {
    File f = b.pathfile;  // full path name of block file
    lock.writeLock().lock();
    try {
      volumeMap.add(namespaceId, b.block, new DatanodeBlockInfo(volume, f,
          DatanodeBlockInfo.UNFINALIZED));
      volumeMap.addOngoingCreates(namespaceId, b.block, ActiveFile.createStartupRecoveryFile(f));
    } finally {
      lock.writeLock().unlock();
    }
    if (DataNode.LOG.isDebugEnabled()) {
      DataNode.LOG.debug("recoverBlocksBeingWritten for block " + b.block + "namespaceId: "+namespaceId);
    }
  }
}
项目:RDFS    文件:FSDataset.java   
/**
 * Populate the given blockSet with any child blocks
 * found at this node.
 */
public void getBlockInfo(LightWeightHashSet<Block> blockSet) {
  FSDir[] children = this.getChildren();
  if (children != null) {
    for (int i = 0; i < children.length; i++) {
      children[i].getBlockInfo(blockSet);
    }
  }

  File blockFiles[] = dir.listFiles();
  String[] blockFilesNames = getFileNames(blockFiles);

  for (int i = 0; i < blockFiles.length; i++) {
    if (Block.isBlockFilename(blockFilesNames[i])) {
      long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames,
          blockFilesNames[i]);
      blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp));
    }
  }
}
项目:RDFS    文件:FSDataset.java   
/**
 * Populate the given blockSet with any child blocks
 * found at this node. With each block, return the full path
 * of the block file.
 */
void getBlockAndFileInfo(LightWeightHashSet<BlockAndFile> blockSet) {
  FSDir[] children = this.getChildren();
  if (children != null) {
    for (int i = 0; i < children.length; i++) {
      children[i].getBlockAndFileInfo(blockSet);
    }
  }

  File blockFiles[] = dir.listFiles();
  String[] blockFilesNames = getFileNames(blockFiles);      
  for (int i = 0; i < blockFiles.length; i++) {
    if (Block.isBlockFilename(blockFilesNames[i])) {
      long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames,
          blockFilesNames[i]);
      Block block = new Block(blockFiles[i], blockFiles[i].length(), genStamp);
      blockSet.add(new BlockAndFile(blockFiles[i].getAbsoluteFile(), block));
    }
  }
}
项目:hadoop    文件:InvalidateBlocks.java   
/**
 * @return true if the given storage has the given block listed for
 * invalidation. Blocks are compared including their generation stamps:
 * if a block is pending invalidation but with a different generation stamp,
 * returns false.
 */
synchronized boolean contains(final DatanodeInfo dn, final Block block) {
  final LightWeightHashSet<Block> s = node2blocks.get(dn);
  if (s == null) {
    return false; // no invalidate blocks for this storage ID
  }
  Block blockInSet = s.getElement(block);
  return blockInSet != null &&
      block.getGenerationStamp() == blockInSet.getGenerationStamp();
}