Java 类org.apache.hadoop.hdfs.server.common.HdfsServerConstants 实例源码

项目:hadoop    文件:NNStorage.java   
void readProperties(StorageDirectory sd, StartupOption startupOption)
    throws IOException {
  Properties props = readPropertiesFile(sd.getVersionFile());
  if (HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK.matches
      (startupOption)) {
    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
    if (lv > getServiceLayoutVersion()) {
      // we should not use a newer version for rollingUpgrade rollback
      throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
          "storage directory " + sd.getRoot().getAbsolutePath());
    }
    props.setProperty("layoutVersion",
        Integer.toString(HdfsConstants.NAMENODE_LAYOUT_VERSION));
  }
  setFieldsFromProperties(props, sd);
}
项目:aliyun-oss-hadoop-fs    文件:NNStorage.java   
void readProperties(StorageDirectory sd, StartupOption startupOption)
    throws IOException {
  Properties props = readPropertiesFile(sd.getVersionFile());
  if (HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK.matches
      (startupOption)) {
    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
    if (lv > getServiceLayoutVersion()) {
      // we should not use a newer version for rollingUpgrade rollback
      throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
          "storage directory " + sd.getRoot().getAbsolutePath());
    }
    props.setProperty("layoutVersion",
        Integer.toString(HdfsServerConstants.NAMENODE_LAYOUT_VERSION));
  }
  setFieldsFromProperties(props, sd);
}
项目:aliyun-oss-hadoop-fs    文件:EditLogFileInputStream.java   
/**
 * @param file          File being scanned and validated.
 * @param maxTxIdToScan Maximum Tx ID to try to scan.
 *                      The scan returns after reading this or a higher
 *                      ID. The file portion beyond this ID is
 *                      potentially being updated.
 * @return Result of the validation
 * @throws IOException
 */
static FSEditLogLoader.EditLogValidation scanEditLog(File file,
    long maxTxIdToScan, boolean verifyVersion)
    throws IOException {
  EditLogFileInputStream in;
  try {
    in = new EditLogFileInputStream(file);
    // read the header, initialize the inputstream, but do not check the
    // layoutversion
    in.getVersion(verifyVersion);
  } catch (LogHeaderCorruptException e) {
    LOG.warn("Log file " + file + " has no valid header", e);
    return new FSEditLogLoader.EditLogValidation(0,
        HdfsServerConstants.INVALID_TXID, true);
  }

  try {
    return FSEditLogLoader.scanEditLog(in, maxTxIdToScan);
  } finally {
    IOUtils.closeStream(in);
  }
}
项目:aliyun-oss-hadoop-fs    文件:EditLogFileInputStream.java   
/**
 * Read the header of fsedit log
 * @param in fsedit stream
 * @return the edit log version number
 * @throws IOException if error occurs
 */
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
    throws IOException, LogHeaderCorruptException {
  int logVersion;
  try {
    logVersion = in.readInt();
  } catch (EOFException eofe) {
    throw new LogHeaderCorruptException(
        "Reached EOF when reading log header");
  }
  if (verifyLayoutVersion &&
      (logVersion < HdfsServerConstants.NAMENODE_LAYOUT_VERSION || // future version
       logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
    throw new LogHeaderCorruptException(
        "Unexpected version of the file system log file: "
        + logVersion + ". Current version = "
        + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".");
  }
  return logVersion;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeDescriptor.java   
/**
 * Find whether the datanode contains good storage of given type to
 * place block of size <code>blockSize</code>.
 *
 * <p>Currently datanode only cares about the storage type, in this
 * method, the first storage of given type we see is returned.
 *
 * @param t requested storage type
 * @param blockSize requested block size
 */
public DatanodeStorageInfo chooseStorage4Block(StorageType t,
    long blockSize) {
  final long requiredSize =
      blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
  final long scheduledSize = blockSize * getBlocksScheduled(t);
  long remaining = 0;
  DatanodeStorageInfo storage = null;
  for (DatanodeStorageInfo s : getStorageInfos()) {
    if (s.getState() == State.NORMAL && s.getStorageType() == t) {
      if (storage == null) {
        storage = s;
      }
      long r = s.getRemaining();
      if (r >= requiredSize) {
        remaining += r;
      }
    }
  }
  if (requiredSize > remaining - scheduledSize) {
    return null;
  }
  return storage;
}
项目:aliyun-oss-hadoop-fs    文件:DataStorage.java   
void format(StorageDirectory sd, NamespaceInfo nsInfo,
            String datanodeUuid) throws IOException {
  sd.clearDirectory(); // create directory
  this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
  this.clusterID = nsInfo.getClusterID();
  this.namespaceID = nsInfo.getNamespaceID();
  this.cTime = 0;
  setDatanodeUuid(datanodeUuid);

  if (sd.getStorageUuid() == null) {
    // Assign a new Storage UUID.
    sd.setStorageUuid(DatanodeStorage.generateUuid());
  }

  writeProperties(sd);
}
项目:aliyun-oss-hadoop-fs    文件:Journal.java   
/**
 * Scan the local storage directory, and return the segment containing
 * the highest transaction.
 * @return the EditLogFile with the highest transactions, or null
 * if no files exist.
 */
private synchronized EditLogFile scanStorageForLatestEdits() throws IOException {
  if (!fjm.getStorageDirectory().getCurrentDir().exists()) {
    return null;
  }

  LOG.info("Scanning storage " + fjm);
  List<EditLogFile> files = fjm.getLogFiles(0);

  while (!files.isEmpty()) {
    EditLogFile latestLog = files.remove(files.size() - 1);
    latestLog.scanLog(Long.MAX_VALUE, false);
    LOG.info("Latest log is " + latestLog);
    if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
      // the log contains no transactions
      LOG.warn("Latest log " + latestLog + " has no transactions. " +
          "moving it aside and looking for previous log");
      latestLog.moveAsideEmptyFile();
    } else {
      return latestLog;
    }
  }

  LOG.info("No files in " + fjm);
  return null;
}
项目:aliyun-oss-hadoop-fs    文件:Journal.java   
/**
 * @return the current state of the given segment, or null if the
 * segment does not exist.
 */
@VisibleForTesting
SegmentStateProto getSegmentInfo(long segmentTxId)
    throws IOException {
  EditLogFile elf = fjm.getLogFile(segmentTxId);
  if (elf == null) {
    return null;
  }
  if (elf.isInProgress()) {
    elf.scanLog(Long.MAX_VALUE, false);
  }
  if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
    LOG.info("Edit log file " + elf + " appears to be empty. " +
        "Moving it aside...");
    elf.moveAsideEmptyFile();
    return null;
  }
  SegmentStateProto ret = SegmentStateProto.newBuilder()
      .setStartTxId(segmentTxId)
      .setEndTxId(elf.getLastTxId())
      .setIsInProgress(elf.isInProgress())
      .build();
  LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " +
      TextFormat.shortDebugString(ret));
  return ret;
}
项目:aliyun-oss-hadoop-fs    文件:TestAddStripedBlocks.java   
private void checkStripedBlockUC(BlockInfoStriped block,
    boolean checkReplica) {
  assertEquals(0, block.numNodes());
  Assert.assertFalse(block.isComplete());
  Assert.assertEquals(StripedFileTestUtil.NUM_DATA_BLOCKS, block.getDataBlockNum());
  Assert.assertEquals(StripedFileTestUtil.NUM_PARITY_BLOCKS,
      block.getParityBlockNum());
  Assert.assertEquals(0,
      block.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);

  Assert.assertEquals(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
      block.getBlockUCState());
  if (checkReplica) {
    Assert.assertEquals(GROUP_SIZE,
        block.getUnderConstructionFeature().getNumExpectedLocations());
    DatanodeStorageInfo[] storages = block.getUnderConstructionFeature()
        .getExpectedStorageLocations();
    for (DataNode dn : cluster.getDataNodes()) {
      Assert.assertTrue(includeDataNode(dn.getDatanodeId(), storages));
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestEditLogFileInputStream.java   
@Test
public void testReadURL() throws Exception {
  HttpURLConnection conn = mock(HttpURLConnection.class);
  doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
  doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
  doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");

  URLConnectionFactory factory = mock(URLConnectionFactory.class);
  doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
      anyBoolean());

  URL url = new URL("http://localhost/fakeLog");
  EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
      HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID, false);
  // Read the edit log and verify that we got all of the data.
  EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
      .countEditLogOpTypes(elis);
  assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));

  // Check that length header was picked up.
  assertEquals(FAKE_LOG_DATA.length, elis.length());
  elis.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicyWithNodeGroup.java   
/**
 * Test re-replication policy in boundary case.
 * Rack 2 has only one node group & the node in this node group is chosen
 * Rack 1 has two nodegroups & one of them is chosen.
 * Replica policy should choose the node from node group of Rack1 but not the
 * same nodegroup with chosen nodes.
 */
@Test
public void testRereplicateOnBoundaryTopology() throws Exception {
  for(int i=0; i<dataNodesInBoundaryCase.length; i++) {
    updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();
  chosenNodes.add(storagesInBoundaryCase[0]);
  chosenNodes.add(storagesInBoundaryCase[5]);
  DatanodeStorageInfo[] targets;
  targets = chooseTarget(1, dataNodesInBoundaryCase[0], chosenNodes);
  assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[0], targets[0]));
  assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[5], targets[0]));
  assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicyWithNodeGroup.java   
/**
 * In this testcase, passed 2 favored nodes
 * dataNodes[0](Good Node), dataNodes[3](Bad node).
 * 1st replica should be placed on good favored node dataNodes[0].
 * 2nd replica should be on bad favored node's nodegroup dataNodes[4].
 * @throws Exception
 */
@Test
public void testChooseFavoredNodesNodeGroup() throws Exception {
  updateHeartbeatWithUsage(dataNodes[3],
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
      0L, 0L, 0, 0); // no space

  DatanodeStorageInfo[] targets;
  List<DatanodeDescriptor> expectedTargets =
      new ArrayList<DatanodeDescriptor>();
  expectedTargets.add(dataNodes[0]);
  expectedTargets.add(dataNodes[4]);
  List<DatanodeDescriptor> favouredNodes =
      new ArrayList<DatanodeDescriptor>();
  favouredNodes.add(dataNodes[3]);
  favouredNodes.add(dataNodes[0]);
  targets = chooseTarget(2, dataNodes[7], null, favouredNodes);
  assertTrue("1st Replica is incorrect",
    expectedTargets.contains(targets[0].getDatanodeDescriptor()));
  assertTrue("2nd Replica is incorrect",
    expectedTargets.contains(targets[1].getDatanodeDescriptor()));
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicy.java   
/**
 * Test whether the remaining space per storage is individually
 * considered.
 */
@Test
public void testChooseNodeWithMultipleStorages1() throws Exception {
  updateHeartbeatWithUsage(dataNodes[5],
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
      0L, 0L, 0, 0);

  updateHeartbeatForExtraStorage(
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L);

  DatanodeStorageInfo[] targets;
  targets = chooseTarget (1, dataNodes[5],
      new ArrayList<DatanodeStorageInfo>(), null);
  assertEquals(1, targets.length);
  assertEquals(storages[4], targets[0]);

  resetHeartbeatForStorages();
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicy.java   
/**
 * Test whether all storages on the datanode are considered while
 * choosing target to place block.
 */
@Test
public void testChooseNodeWithMultipleStorages2() throws Exception {
  updateHeartbeatWithUsage(dataNodes[5],
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (2*HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE)/3, 0L,
      0L, 0L, 0, 0);

  updateHeartbeatForExtraStorage(
      2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);

  DatanodeStorageInfo[] targets;
  targets = chooseTarget (1, dataNodes[5],
      new ArrayList<DatanodeStorageInfo>(), null);
  assertEquals(1, targets.length);
  assertEquals(dataNodes[5], targets[0].getDatanodeDescriptor());

  resetHeartbeatForStorages();
}
项目:aliyun-oss-hadoop-fs    文件:TestDatanodeRegister.java   
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();

  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();

  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();

  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsServerConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();

  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
项目:aliyun-oss-hadoop-fs    文件:TestDatanodeRegister.java   
@Test
public void testDifferentLayoutVersions() throws Exception {
  // We expect no exceptions to be thrown when the layout versions match.
  assertEquals(HdfsServerConstants.NAMENODE_LAYOUT_VERSION,
      actor.retrieveNamespaceInfo().getLayoutVersion());

  // We expect an exception to be thrown when the NN reports a layout version
  // different from that of the DN.
  doReturn(HdfsServerConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo)
      .getLayoutVersion();
  try {
    actor.retrieveNamespaceInfo();
  } catch (IOException e) {
    fail("Should not fail to retrieve NS info from DN with different layout version");
  }
}
项目:hadoop    文件:LeaseManager.java   
/** Check leases periodically. */
@Override
public void run() {
  for(; shouldRunMonitor && fsnamesystem.isRunning(); ) {
    boolean needSync = false;
    try {
      fsnamesystem.writeLockInterruptibly();
      try {
        if (!fsnamesystem.isInSafeMode()) {
          needSync = checkLeases();
        }
      } finally {
        fsnamesystem.writeUnlock();
        // lease reassignments should to be sync'ed.
        if (needSync) {
          fsnamesystem.getEditLog().logSync();
        }
      }

      Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL);
    } catch(InterruptedException ie) {
      if (LOG.isDebugEnabled()) {
        LOG.debug(name + " is interrupted", ie);
      }
    }
  }
}
项目:hadoop    文件:BackupImage.java   
/**
 * Analyze backup storage directories for consistency.<br>
 * Recover from incomplete checkpoints if required.<br>
 * Read VERSION and fstime files if exist.<br>
 * Do not load image or edits.
 *
 * @throws IOException if the node should shutdown.
 */
void recoverCreateRead() throws IOException {
  for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured storage dirs are inaccessible
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        // for backup node all directories may be unformatted initially
        LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
        LOG.info("Formatting ...");
        sd.clearDirectory(); // create empty current
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if(curState != StorageState.NOT_FORMATTED) {
        // read and verify consistency with other directories
        storage.readProperties(sd);
      }
    } catch(IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
项目:hadoop    文件:TestCommitBlockSynchronization.java   
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
    throws IOException {
  Configuration conf = new Configuration();
  FSImage image = new FSImage(conf);
  final DatanodeStorageInfo[] targets = {};

  FSNamesystem namesystem = new FSNamesystem(conf, image);
  namesystem.setImageLoaded(true);

  // set file's parent as root and put the file to inodeMap, so
  // FSNamesystem's isFileDeleted() method will return false on this file
  if (file.getParent() == null) {
    INodeDirectory mparent = mock(INodeDirectory.class);
    INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0],
        mparent.getPermissionStatus(), mparent.getAccessTime());
    parent.setLocalName(new byte[0]);
    parent.addChild(file);
    file.setParent(parent);
  }
  namesystem.dir.getINodeMap().put(file);

  FSNamesystem namesystemSpy = spy(namesystem);
  BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
      block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
  blockInfo.setBlockCollection(file);
  blockInfo.setGenerationStamp(genStamp);
  blockInfo.initializeBlockRecovery(genStamp);
  doReturn(true).when(file).removeLastBlock(any(Block.class));
  doReturn(true).when(file).isUnderConstruction();
  doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();

  doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
  doReturn(blockInfo).when(file).getLastBlock();
  doReturn("").when(namesystemSpy).closeFileCommitBlocks(
      any(INodeFile.class), any(BlockInfoContiguous.class));
  doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();

  return namesystemSpy;
}
项目:hadoop    文件:TestBlockReplacement.java   
private boolean replaceBlock(
    ExtendedBlock block,
    DatanodeInfo source,
    DatanodeInfo sourceProxy,
    DatanodeInfo destination,
    StorageType targetStorageType) throws IOException, SocketException {
  Socket sock = new Socket();
  try {
    sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
        HdfsServerConstants.READ_TIMEOUT);
    sock.setKeepAlive(true);
    // sendRequest
    DataOutputStream out = new DataOutputStream(sock.getOutputStream());
    new Sender(out).replaceBlock(block, targetStorageType,
        BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
        sourceProxy);
    out.flush();
    // receiveResponse
    DataInputStream reply = new DataInputStream(sock.getInputStream());

    BlockOpResponseProto proto =
        BlockOpResponseProto.parseDelimitedFrom(reply);
    while (proto.getStatus() == Status.IN_PROGRESS) {
      proto = BlockOpResponseProto.parseDelimitedFrom(reply);
    }
    return proto.getStatus() == Status.SUCCESS;
  } finally {
    sock.close();
  }
}
项目:hadoop    文件:TestPipelines.java   
/**
 * Creates and closes a file of certain length.
 * Calls append to allow next write() operation to add to the end of it
 * After write() invocation, calls hflush() to make sure that data sunk through
 * the pipeline and check the state of the last block's replica.
 * It supposes to be in RBW state
 *
 * @throws IOException in case of an error
 */
@Test
public void pipeline_01() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + METHOD_NAME);
  }
  Path filePath = new Path("/" + METHOD_NAME + ".dat");

  DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
  if(LOG.isDebugEnabled()) {
    LOG.debug("Invoking append but doing nothing otherwise...");
  }
  FSDataOutputStream ofs = fs.append(filePath);
  ofs.writeBytes("Some more stuff to write");
  ((DFSOutputStream) ofs.getWrappedStream()).hflush();

  List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(
    filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();

  String bpid = cluster.getNamesystem().getBlockPoolId();
  for (DataNode dn : cluster.getDataNodes()) {
    Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
        .getBlock().getBlockId());

    assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
    assertEquals("Should be RBW replica on " + dn
        + " after sequence of calls append()/write()/hflush()",
        HdfsServerConstants.ReplicaState.RBW, r.getState());
  }
  ofs.close();
}
项目:hadoop    文件:TestLeaseRecovery2.java   
static void checkLease(String f, int size) {
  final String holder = NameNodeAdapter.getLeaseHolderForPath(
      cluster.getNameNode(), f); 
  if (size == 0) {
    assertEquals("lease holder should null, file is closed", null, holder);
  } else {
    assertEquals("lease holder should now be the NN",
        HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder);
  }

}
项目:aliyun-oss-hadoop-fs    文件:DFSUtil.java   
/**
 * Returns if the component is reserved.
 * 
 * <p>
 * Note that some components are only reserved under certain directories, e.g.
 * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
 * @return true, if the component is reserved
 */
public static boolean isReservedPathComponent(String component) {
  for (String reserved : HdfsServerConstants.RESERVED_PATH_COMPONENTS) {
    if (component.equals(reserved)) {
      return true;
    }
  }
  return false;
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogLoader.java   
/**
 * Throw appropriate exception during upgrade from 203, when editlog loading
 * could fail due to opcode conflicts.
 */
private void check203UpgradeFailure(int logVersion, Throwable e)
    throws IOException {
  // 0.20.203 version version has conflicting opcodes with the later releases.
  // The editlog must be emptied by restarting the namenode, before proceeding
  // with the upgrade.
  if (Storage.is203LayoutVersion(logVersion)
      && logVersion != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) {
    String msg = "During upgrade failed to load the editlog version "
        + logVersion + " from release 0.20.203. Please go back to the old "
        + " release and restart the namenode. This empties the editlog "
        + " and saves the namespace. Resume the upgrade after this step.";
    throw new IOException(msg, e);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogLoader.java   
/**
 * Find the last valid transaction ID in the stream.
 * If there are invalid or corrupt transactions in the middle of the stream,
 * scanEditLog will skip over them.
 * This reads through the stream but does not close it.
 *
 * @param maxTxIdToScan Maximum Tx ID to try to scan.
 *                      The scan returns after reading this or a higher ID.
 *                      The file portion beyond this ID is potentially being
 *                      updated.
 */
static EditLogValidation scanEditLog(EditLogInputStream in,
    long maxTxIdToScan) {
  long lastPos;
  long lastTxId = HdfsServerConstants.INVALID_TXID;
  long numValid = 0;
  while (true) {
    long txid;
    lastPos = in.getPosition();
    try {
      if ((txid = in.scanNextOp()) == HdfsServerConstants.INVALID_TXID) {
        break;
      }
    } catch (Throwable t) {
      FSImage.LOG.warn("Caught exception after scanning through "
          + numValid + " ops from " + in
          + " while determining its valid length. Position was "
          + lastPos, t);
      in.resync();
      FSImage.LOG.warn("After resync, position is " + in.getPosition());
      continue;
    }
    if (lastTxId == HdfsServerConstants.INVALID_TXID || txid > lastTxId) {
      lastTxId = txid;
    }
    if (lastTxId >= maxTxIdToScan) {
      break;
    }
    numValid++;
  }
  return new EditLogValidation(lastPos, lastTxId, false);
}
项目:aliyun-oss-hadoop-fs    文件:NNStorage.java   
public void format() throws IOException {
  this.layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
  for (Iterator<StorageDirectory> it =
                         dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    format(sd);
  }
}
项目:big-c    文件:TestBlockReplacement.java   
private boolean replaceBlock(
    ExtendedBlock block,
    DatanodeInfo source,
    DatanodeInfo sourceProxy,
    DatanodeInfo destination,
    StorageType targetStorageType) throws IOException, SocketException {
  Socket sock = new Socket();
  try {
    sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
        HdfsServerConstants.READ_TIMEOUT);
    sock.setKeepAlive(true);
    // sendRequest
    DataOutputStream out = new DataOutputStream(sock.getOutputStream());
    new Sender(out).replaceBlock(block, targetStorageType,
        BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
        sourceProxy);
    out.flush();
    // receiveResponse
    DataInputStream reply = new DataInputStream(sock.getInputStream());

    BlockOpResponseProto proto =
        BlockOpResponseProto.parseDelimitedFrom(reply);
    while (proto.getStatus() == Status.IN_PROGRESS) {
      proto = BlockOpResponseProto.parseDelimitedFrom(reply);
    }
    return proto.getStatus() == Status.SUCCESS;
  } finally {
    sock.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
public long scanOp() throws IOException {
  // Edit logs of this age don't have any length prefix, so we just have
  // to read the entire Op.
  FSEditLogOp op = decodeOp();
  return op == null ?
      HdfsServerConstants.INVALID_TXID : op.getTransactionId();
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
public FSEditLogOp decodeOp() throws IOException {
  limiter.setLimit(maxOpSize);
  in.mark(maxOpSize);
  byte opCodeByte;
  try {
    opCodeByte = in.readByte();
  } catch (EOFException eof) {
    // EOF at an opcode boundary is expected.
    return null;
  }
  FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
  if (opCode == OP_INVALID) {
    verifyTerminator();
    return null;
  }
  FSEditLogOp op = cache.get(opCode);
  if (op == null) {
    throw new IOException("Read invalid opcode " + opCode);
  }
  if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
    op.setTransactionId(in.readLong());
  } else {
    op.setTransactionId(HdfsServerConstants.INVALID_TXID);
  }
  op.readFields(in, logVersion);
  return op;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirectory.java   
/** Verify if the inode name is legal. */
void verifyINodeName(byte[] childName) throws HadoopIllegalArgumentException {
  if (Arrays.equals(HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) {
    String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name.";
    if (!namesystem.isImageLoaded()) {
      s += "  Please rename it before upgrade.";
    }
    throw new HadoopIllegalArgumentException(s);
  }
}
项目:aliyun-oss-hadoop-fs    文件:LeaseManager.java   
/** Check leases periodically. */
@Override
public void run() {
  for(; shouldRunMonitor && fsnamesystem.isRunning(); ) {
    boolean needSync = false;
    try {
      fsnamesystem.writeLockInterruptibly();
      try {
        if (!fsnamesystem.isInSafeMode()) {
          needSync = checkLeases();
        }
      } finally {
        fsnamesystem.writeUnlock();
        // lease reassignments should to be sync'ed.
        if (needSync) {
          fsnamesystem.getEditLog().logSync();
        }
      }

      Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL);
    } catch(InterruptedException ie) {
      if (LOG.isDebugEnabled()) {
        LOG.debug(name + " is interrupted", ie);
      }
    } catch(Throwable e) {
      LOG.warn("Unexpected throwable: ", e);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
private long loadEdits(Iterable<EditLogInputStream> editStreams,
    FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
    throws IOException {
  LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginPhase(Phase.LOADING_EDITS);

  long prevLastAppliedTxId = lastAppliedTxId;  
  try {    
    FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);

    // Load latest edits
    for (EditLogInputStream editIn : editStreams) {
      LOG.info("Reading " + editIn + " expecting start txid #" +
            (lastAppliedTxId + 1));
      try {
        loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
      } finally {
        // Update lastAppliedTxId even in case of error, since some ops may
        // have been successfully applied before the error.
        lastAppliedTxId = loader.getLastAppliedTxId();
      }
      // If we are in recovery mode, we may have skipped over some txids.
      if (editIn.getLastTxId() != HdfsServerConstants.INVALID_TXID) {
        lastAppliedTxId = editIn.getLastTxId();
      }
    }
  } finally {
    FSEditLog.closeAllStreams(editStreams);
  }
  prog.endPhase(Phase.LOADING_EDITS);
  return lastAppliedTxId - prevLastAppliedTxId;
}
项目:big-c    文件:TestCommitBlockSynchronization.java   
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
    throws IOException {
  Configuration conf = new Configuration();
  FSImage image = new FSImage(conf);
  final DatanodeStorageInfo[] targets = {};

  FSNamesystem namesystem = new FSNamesystem(conf, image);
  namesystem.setImageLoaded(true);

  // set file's parent as root and put the file to inodeMap, so
  // FSNamesystem's isFileDeleted() method will return false on this file
  if (file.getParent() == null) {
    INodeDirectory mparent = mock(INodeDirectory.class);
    INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0],
        mparent.getPermissionStatus(), mparent.getAccessTime());
    parent.setLocalName(new byte[0]);
    parent.addChild(file);
    file.setParent(parent);
  }
  namesystem.dir.getINodeMap().put(file);

  FSNamesystem namesystemSpy = spy(namesystem);
  BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
      block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
  blockInfo.setBlockCollection(file);
  blockInfo.setGenerationStamp(genStamp);
  blockInfo.initializeBlockRecovery(genStamp);
  doReturn(true).when(file).removeLastBlock(any(Block.class));
  doReturn(true).when(file).isUnderConstruction();
  doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();

  doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
  doReturn(blockInfo).when(file).getLastBlock();
  doReturn("").when(namesystemSpy).closeFileCommitBlocks(
      any(INodeFile.class), any(BlockInfoContiguous.class));
  doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();

  return namesystemSpy;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
/**
 * Use the default key-value pairs that will be used to determine how to
 * rename reserved paths on upgrade.
 */
@VisibleForTesting
public static void useDefaultRenameReservedPairs() {
  renameReservedMap.clear();
  for (String key: HdfsServerConstants.RESERVED_PATH_COMPONENTS) {
    renameReservedMap.put(
        key,
        key + "." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + "."
            + "UPGRADE_RENAMED");
  }
}
项目:aliyun-oss-hadoop-fs    文件:RedundantEditLogInputStream.java   
RedundantEditLogInputStream(Collection<EditLogInputStream> streams,
    long startTxId) {
  this.curIdx = 0;
  this.prevTxId = (startTxId == HdfsServerConstants.INVALID_TXID) ?
    HdfsServerConstants.INVALID_TXID : (startTxId - 1);
  this.state = (streams.isEmpty()) ? State.EOF : State.SKIP_UNTIL;
  this.prevException = null;
  // EditLogInputStreams in a RedundantEditLogInputStream must be finalized,
  // and can't be pre-transactional.
  EditLogInputStream first = null;
  for (EditLogInputStream s : streams) {
    Preconditions.checkArgument(s.getFirstTxId() !=
        HdfsServerConstants.INVALID_TXID, "invalid first txid in stream: %s", s);
    Preconditions.checkArgument(s.getLastTxId() !=
        HdfsServerConstants.INVALID_TXID, "invalid last txid in stream: %s", s);
    if (first == null) {
      first = s;
    } else {
      Preconditions.checkArgument(s.getFirstTxId() == first.getFirstTxId(),
        "All streams in the RedundantEditLogInputStream must have the same " +
        "start transaction ID!  " + first + " had start txId " +
        first.getFirstTxId() + ", but " + s + " had start txId " +
        s.getFirstTxId());
    }
  }

  this.streams = streams.toArray(new EditLogInputStream[0]);

  // We sort the streams here so that the streams that end later come first.
  Arrays.sort(this.streams, new Comparator<EditLogInputStream>() {
    @Override
    public int compare(EditLogInputStream a, EditLogInputStream b) {
      return Longs.compare(b.getLastTxId(), a.getLastTxId());
    }
  });
}
项目:aliyun-oss-hadoop-fs    文件:NamespaceInfo.java   
public NamespaceInfo(int nsID, String clusterID, String bpID,
    long cT, String buildVersion, String softwareVersion,
    long capabilities) {
  super(HdfsServerConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT,
      NodeType.NAME_NODE);
  blockPoolID = bpID;
  this.buildVersion = buildVersion;
  this.softwareVersion = softwareVersion;
  this.capabilities = capabilities;
}
项目:aliyun-oss-hadoop-fs    文件:RemoteEditLog.java   
@Override
public Long apply(RemoteEditLog log) {
  if (null == log) {
    return HdfsServerConstants.INVALID_TXID;
  }
  return log.getStartTxId();
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/**
 * Returns true if Namenode was started with a RollBack option.
 *
 * @param option - StartupOption
 * @return boolean
 */
private boolean isInRollBackMode(HdfsServerConstants.StartupOption option) {
  if (option == HdfsServerConstants.StartupOption.ROLLBACK) {
    return true;
  }
  if ((option == HdfsServerConstants.StartupOption.ROLLINGUPGRADE) &&
      (option.getRollingUpgradeStartupOption() ==
          HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK)) {
    return true;
  }
  return false;
}
项目:aliyun-oss-hadoop-fs    文件:ReplicaUnderConstruction.java   
ReplicaUnderConstruction(Block block,
    DatanodeStorageInfo target,
    HdfsServerConstants.ReplicaState state) {
  super(block);
  this.expectedLocation = target;
  this.state = state;
  this.chosenAsPrimary = false;
}