Java 类org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption 实例源码

项目:hadoop-EAR    文件:MiniDFSCluster.java   
public MiniDFSCluster(int nameNodePort, 
                      Configuration conf,
                      int numDataNodes,
                      boolean format,
                      boolean manageNameDfsDirs,
                      boolean manageDataDfsDirs,
                      StartupOption operation,
                      String[] racks, String hosts[],
                      long[] simulatedCapacities,
                      boolean waitSafeMode,
                      boolean setupHostsFile,
                      int numNameNodes,
                      boolean federation) throws IOException {
  this(nameNodePort, conf, numDataNodes, format, manageNameDfsDirs,
      manageDataDfsDirs, operation, racks, hosts, simulatedCapacities,
      waitSafeMode, setupHostsFile, numNameNodes, federation, true);
}
项目:hadoop-EAR    文件:MiniDFSCluster.java   
private void createFederatedNameNode(int nnIndex, Configuration conf,
    int numDataNodes, boolean manageNameDfsDirs, boolean format,
    StartupOption operation, String nameserviceId)
    throws IOException {
  conf.set(FSConstants.DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
  NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs,
      format, operation, nameserviceId);
  DFSUtil.setGenericConf(conf, nameserviceId, 
      NameNode.NAMESERVICE_SPECIFIC_KEYS);
  conf.set(DFSUtil.getNameServiceIdKey(
      FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NetUtils.
      toIpPort(nn.getHttpAddress()));
  conf.set(DFSUtil.getNameServiceIdKey(
      NameNode.DATANODE_PROTOCOL_ADDRESS, nameserviceId), NetUtils.
      toIpPort(nn.getNameNodeDNAddress()));
  nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf));
}
项目:hadoop-EAR    文件:TestStartup.java   
/**
 * start with -importCheckpoint option and verify that the files are in separate directories and of the right length
 * @throws IOException
 */
private void checkNameNodeFiles() throws IOException{

  // start namenode with import option
  LOG.info("-- about to start DFS cluster");
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster(0, config, 1, false, false, false,  StartupOption.IMPORT, null, null, null);
    cluster.waitActive();
    LOG.info("--NN started with checkpoint option");
    NameNode nn = cluster.getNameNode();
    assertNotNull(nn);  
    // Verify that image file sizes did not change.
    FSImage image = nn.getFSImage();
    verifyDifferentDirs(image, this.fsimageLength, this.editsLength);
  } finally {
    if(cluster != null)
      cluster.shutdown();
  }
}
项目:hadoop-EAR    文件:AvatarDataNode.java   
/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[],
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
项目:hadoop-EAR    文件:TestAvatarQJMUpgrade.java   
/**
 * This test simulates the scenario where the upgrade fails after saving image
 * and ensures that the recovery on the journal nodes work correctly.
 */
@Test
public void testUpgradeFailureAfterSaveImage() throws Exception {
  h.failAfterSaveImage = true;

  long[] checksums = getChecksums();
  // Upgrade the cluster.
  MiniJournalCluster journalCluster = cluster.getJournalCluster();

  // This upgrade will fail after saving the image.
  try {
    cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
        .format(false).startOpt(StartupOption.UPGRADE)
        .setJournalCluster(journalCluster).instantionRetries(1).build();
    fail("Upgrade did not throw exception");
  } catch (IOException ie) {
    // ignore.
  }

  // This will correctly recover the upgrade directories.
  cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1).format(false)
      .setJournalCluster(cluster.getJournalCluster()).build();

  verifyUpgrade(checksums, true);
}
项目:hadoop-EAR    文件:TestAvatarQJMUpgrade.java   
/**
 * This test verifies that we can rollback the upgrade for journal nodes.
 */
@Test
public void testRollback() throws Exception {
  // Uprade the namenode.
  long[] checksums = doUpgrade(false);

  cluster.shutDownAvatarNodes();
  cluster.shutDownDataNodes();

  // Now rollback the cluster.
  cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1).format(false)
      .startOpt(StartupOption.ROLLBACK)
      .setJournalCluster(cluster.getJournalCluster()).build();

  assertNotNull(h.checksumsAfterRollback);
  verifyRollback(checksums, true, h.checksumsAfterRollback);
}
项目:hadoop-EAR    文件:NNStorage.java   
/**
 * Verify that the distributed upgrade state is valid.
 * @param startOpt the option the namenode was started with.
 */
void verifyDistributedUpgradeProgress(StartupOption startOpt
                                      ) throws IOException {
  if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
    return;

  assert upgradeManager != null : "FSNameSystem.upgradeManager is null.";
  if(startOpt != StartupOption.UPGRADE) {
    if(upgradeManager.getUpgradeState())
      throw new IOException(
                  "\n   Previous distributed upgrade was not completed. "
                + "\n   Please restart NameNode with -upgrade option.");
    if(upgradeManager.getDistributedUpgrades() != null)
      throw new IOException("\n   Distributed upgrade for NameNode version "
                            + upgradeManager.getUpgradeVersion()
                            + " to current LV " + layoutVersion
                            + " is required.\n   Please restart NameNode"
                            + " with -upgrade option.");
  }
}
项目:hadoop-EAR    文件:FSImage.java   
/**
 * For each storage directory, performs recovery of incomplete transitions
 * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
 * state into the dataDirStates map.
 * @param dataDirStates output of storage directory states
 * @return true if there is at least one valid formatted storage directory
 */
private boolean recoverStorageDirs(StartupOption startOpt,
    Map<StorageDirectory, StorageState> dataDirStates) throws IOException {
  boolean isFormatted = false;
  for (Iterator<StorageDirectory> it = 
                    storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    try {
      curState = sd.analyzeStorage(startOpt);
      isFormatted |= NNStorage.recoverDirectory(sd, startOpt, curState, true);
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
    dataDirStates.put(sd,curState);
  }
  return isFormatted;
}
项目:hadoop-EAR    文件:Journal.java   
public void transitionJournal(NamespaceInfo nsInfo,
    Transition transition, StartupOption startOpt) throws IOException {
  switch (transition) {
  case UPGRADE:
    doUpgradeJournal(nsInfo);
    break;
  case COMPLETE_UPGRADE:
    completeUpgradeJournal(nsInfo);
    break;
  case RECOVER:
    recoverJournal(startOpt);
    break;
  case FORMAT:
    formatJournal(nsInfo);
    break;
  case ROLLBACK:
    rollbackJournal(nsInfo);
    break;
  case FINALIZE:
    finalizeJournal();
    break;
  }
}
项目:hadoop-EAR    文件:Journal.java   
public void transitionImage(NamespaceInfo nsInfo,
    Transition transition, StartupOption startOpt) throws IOException {
  switch (transition) {
  case UPGRADE:
    doUpgradeImage(nsInfo);
    break;
  case COMPLETE_UPGRADE:
    completeUpgradeImage(nsInfo);
    break;
  case RECOVER:
    recoverImage(startOpt);
    break;
  case FORMAT:
    formatImage(nsInfo);
    break;
  case ROLLBACK:
    rollbackImage(nsInfo);
    break;
  case FINALIZE:
    finalizeImage();
    break;
  }
}
项目:hadoop-EAR    文件:JNStorage.java   
void recover(StartupOption startOpt) throws IOException {
  LOG.info("Recovering journal " + sd + " with nsid: " + getNamespaceID());

  // Unlock the directory before formatting, because we will
  // re-analyze it after format(). The analyzeStorage() call
  // below is reponsible for re-locking it. This is a no-op
  // if the storage is not currently locked.
  unlockAll();
  try {
    StorageState curState = sd.analyzeStorage(startOpt);
    NNStorage.recoverDirectory(sd, startOpt, curState, false);
  } catch (IOException ioe) {
    sd.unlock();
    throw ioe;
  }
}
项目:RDFS    文件:TestStartup.java   
/**
 * start with -importCheckpoint option and verify that the files are in separate directories and of the right length
 * @throws IOException
 */
private void checkNameNodeFiles() throws IOException{

  // start namenode with import option
  LOG.info("-- about to start DFS cluster");
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster(0, config, 1, false, false, false,  StartupOption.IMPORT, null, null, null);
    cluster.waitActive();
    LOG.info("--NN started with checkpoint option");
    NameNode nn = cluster.getNameNode();
    assertNotNull(nn);  
    // Verify that image file sizes did not change.
    FSImage image = nn.getFSImage();
    verifyDifferentDirs(image, this.fsimageLength, this.editsLength);
  } finally {
    if(cluster != null)
      cluster.shutdown();
  }
}
项目:hadoop-on-lustre    文件:TestDFSUpgradeFromImage.java   
public void testUpgradeFromImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
      System.setProperty("test.build.data", "build/test/data");
    }
    conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off
    cluster = new MiniDFSCluster(0, conf, numDataNodes, false, true,
                                 StartupOption.UPGRADE, null);
    cluster.waitActive();
    DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
    DFSClient dfsClient = dfs.dfs;
    //Safemode will be off only after upgrade is complete. Wait for it.
    while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
      LOG.info("Waiting for SafeMode to be OFF.");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }

    verifyFileSystem(dfs);
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
项目:hadoop-on-lustre    文件:NameNode.java   
private static StartupOption parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else
      return null;
  }
  return startOpt;
}
项目:hadoop-on-lustre    文件:FSImage.java   
private void verifyDistributedUpgradeProgress(StartupOption startOpt
                                              ) throws IOException {
  if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
    return;
  UpgradeManager um = FSNamesystem.getFSNamesystem().upgradeManager;
  assert um != null : "FSNameSystem.upgradeManager is null.";
  if(startOpt != StartupOption.UPGRADE) {
    if(um.getUpgradeState())
      throw new IOException(
                  "\n   Previous distributed upgrade was not completed. "
                + "\n   Please restart NameNode with -upgrade option.");
    if(um.getDistributedUpgrades() != null)
      throw new IOException("\n   Distributed upgrade for NameNode version " 
        + um.getUpgradeVersion() + " to current LV " + FSConstants.LAYOUT_VERSION
        + " is required.\n   Please restart NameNode with -upgrade option.");
  }
}
项目:hadoop-on-lustre    文件:DataNode.java   
/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[], 
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
项目:cumulus    文件:NameNode.java   
private static StartupOption parseArguments(String args[]) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FORMAT;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.BACKUP;
    } else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.CHECKPOINT;
    } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.UPGRADE;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.FINALIZE;
    } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.IMPORT;
    } else
      return null;
  }
  return startOpt;
}
项目:RDFS    文件:FSImage.java   
private void verifyDistributedUpgradeProgress(StartupOption startOpt
                                              ) throws IOException {
  if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
    return;
  UpgradeManager um = getFSNamesystem().upgradeManager;
  assert um != null : "FSNameSystem.upgradeManager is null.";
  if(startOpt != StartupOption.UPGRADE) {
    if(um.getUpgradeState())
      throw new IOException(
                  "\n   Previous distributed upgrade was not completed. "
                + "\n   Please restart NameNode with -upgrade option.");
    if(um.getDistributedUpgrades() != null)
      throw new IOException("\n   Distributed upgrade for NameNode version " 
        + um.getUpgradeVersion() + " to current LV " + FSConstants.LAYOUT_VERSION
        + " is required.\n   Please restart NameNode with -upgrade option.");
  }
}
项目:cumulus    文件:FSImage.java   
private void verifyDistributedUpgradeProgress(StartupOption startOpt
                                              ) throws IOException {
  if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
    return;
  UpgradeManager um = getFSNamesystem().upgradeManager;
  assert um != null : "FSNameSystem.upgradeManager is null.";
  if(startOpt != StartupOption.UPGRADE) {
    if(um.getUpgradeState())
      throw new IOException(
                  "\n   Previous distributed upgrade was not completed. "
                + "\n   Please restart NameNode with -upgrade option.");
    if(um.getDistributedUpgrades() != null)
      throw new IOException("\n   Distributed upgrade for NameNode version " 
        + um.getUpgradeVersion() + " to current LV " + FSConstants.LAYOUT_VERSION
        + " is required.\n   Please restart NameNode with -upgrade option.");
  }
}
项目:cumulus    文件:DataNode.java   
/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[], 
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
项目:RDFS    文件:AvatarDataNode.java   
/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
private static boolean parseArguments(String args[],
                                      Configuration conf) {
  int argsLen = (args == null) ? 0 : args.length;
  StartupOption startOpt = StartupOption.REGULAR;
  for(int i=0; i < argsLen; i++) {
    String cmd = args[i];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      System.exit(-1);
    } else if ("-rollback".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if ("-regular".equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else
      return false;
  }
  setStartupOption(conf, startOpt);
  return true;
}
项目:cumulus    文件:TestHDFSServerPorts.java   
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  String dataDir = getTestingDir();
  // Set up testing environment directories
  hdfsDir = new File(dataDir, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");

  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");

  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
项目:RDFS    文件:MiniDFSCluster.java   
public MiniDFSCluster(int nameNodePort, 
                      Configuration conf,
                      int numDataNodes,
                      boolean format,
                      boolean manageNameDfsDirs,
                      boolean manageDataDfsDirs,
                      StartupOption operation,
                      String[] racks, String hosts[],
                      long[] simulatedCapacities,
                      boolean waitSafeMode,
                      boolean setupHostsFile,
                      int numNameNodes,
                      boolean federation) throws IOException {
  this(nameNodePort, conf, numDataNodes, format, manageNameDfsDirs,
      manageDataDfsDirs, operation, racks, hosts, simulatedCapacities,
      waitSafeMode, setupHostsFile, numNameNodes, federation, true);
}
项目:RDFS    文件:MiniDFSCluster.java   
private void createFederatedNameNode(int nnIndex, Configuration conf,
    int numDataNodes, boolean manageNameDfsDirs, boolean format,
    StartupOption operation, String nameserviceId)
    throws IOException {
  conf.set(FSConstants.DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
  NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs,
      format, operation, nameserviceId);
  DFSUtil.setGenericConf(conf, nameserviceId, 
      NameNode.NAMESERVICE_SPECIFIC_KEYS);
  conf.set(DFSUtil.getNameServiceIdKey(
      FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode
      .getHostPortString(nn.getHttpAddress()));
  conf.set(DFSUtil.getNameServiceIdKey(
      NameNode.DATANODE_PROTOCOL_ADDRESS, nameserviceId), NameNode
      .getHostPortString(nn.getNameNodeDNAddress()));
  nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf));
}
项目:hadoop-EAR    文件:TestDFSUpgrade.java   
/**
 * Attempts to start a NameNode with the given operation.  Starting
 * the NameNode should throw an exception.
 */
void startNameNodeShouldFail(StartupOption operation) {
  try {
    cluster = new MiniDFSCluster(conf, 0, operation); // should fail
    throw new AssertionError("NameNode should have failed to start");
  } catch (Exception expected) {
    // expected
  }
}
项目:hadoop-EAR    文件:TestDFSUpgrade.java   
/**
 * Attempts to start a DataNode with the given operation.  Starting
 * the DataNode should throw an exception.
 */
void startDataNodeShouldFail(StartupOption operation) {
  try {
    cluster.startDataNodes(conf, 1, false, operation, null); // should fail
    throw new AssertionError("DataNode should have failed to start");
  } catch (Exception expected) {
    // expected
    assertFalse(cluster.isDataNodeUp());
  }
}
项目:hadoop-EAR    文件:TestDFSUpgrade.java   
public void testNonFederationClusterUpgradeAfterFederationVersion()
    throws Exception {
  File[] baseDirs;
  UpgradeUtilities.initialize();
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
    log("DataNode upgrade with federation layout version in current", numDirs);
    UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
    try {
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
          new StorageInfo(FSConstants.FEDERATION_VERSION,
                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                          UpgradeUtilities.getCurrentFsscTime(cluster)), 
          cluster.getNameNode().getNamespaceID());
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkResult(DATA_NODE, dataNodeDirs, 0, false);
    } finally {
      if (cluster != null) cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    }
  }
}
项目:hadoop-EAR    文件:TestDFSUpgrade.java   
public void testFederationClusterUpgradeAfterFederationVersion()
    throws Exception {
  File[] baseDirs;
  Configuration baseConf = new Configuration();
  UpgradeUtilities.initialize(2, baseConf, true);
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
     String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
    log("DataNode upgrade with federation layout version in current", numDirs);
    UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs);
    conf.set(FSConstants.DFS_FEDERATION_NAMESERVICES, 
        baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES));
    try {
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      for (int i = 0; i < 2; i++) {
        UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
          new StorageInfo(FSConstants.FEDERATION_VERSION,
                          cluster.getNameNode(i).getNamespaceID(),
                          cluster.getNameNode(i).versionRequest().getCTime()),
          cluster.getNameNode(i).getNamespaceID());
      }
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      for (int i = 0 ;i < 2; i++) {
        checkResult(DATA_NODE, dataNodeDirs, i, false);
      }
    } finally {
      if (cluster != null) cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    }
  }
}
项目:hadoop-EAR    文件:TestDFSUpgrade.java   
public void testFederationClusterUpgradeAfterFederationVersionWithCTimeChange()
    throws Exception {
  File[] baseDirs;
  Configuration baseConf = new Configuration();
  UpgradeUtilities.initialize(2, baseConf, true);
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
    log("DataNode upgrade with federation layout version in current and ctime change",
        numDirs);
    UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs);
    conf.set(FSConstants.DFS_FEDERATION_NAMESERVICES,
        baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES));
    try {
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs,
          "current");
      for (int i = 0; i < 2; i++) {
        UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
            new StorageInfo(FSConstants.FEDERATION_VERSION, cluster
                .getNameNode(i).getNamespaceID(), cluster.getNameNode(i)
                .versionRequest().getCTime() - 1), cluster.getNameNode(i)
                .getNamespaceID());
      }
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);

      for (int i = 0; i < 2; i++) {
        checkResult(DATA_NODE, dataNodeDirs, i, false);
      }
    } finally {
      if (cluster != null)
        cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    }
  }
}
项目:hadoop-EAR    文件:TestNameNodeUpgrade.java   
/**
 * Start the NN in upgrade mode and verify the upgradeTime
 * @return
 * @throws IOException
 * @throws InterruptedException
 */
private MiniDFSCluster startNnInUpgrade() 
  throws IOException, InterruptedException {
  Configuration conf = new Configuration();

  MiniDFSCluster cluster = new MiniDFSCluster(0, conf, 1, true, true, 
      StartupOption.UPGRADE, null);
  Thread.sleep(1000 * 60);
  FSNamesystem ns = cluster.getNameNode().getNamesystem();
  assertTrue(ns.getUpgradeTime() >= 1);  

  return cluster;
}
项目:hadoop-EAR    文件:MiniDFSCluster.java   
public MiniDFSCluster(Configuration conf,
                      int numDataNodes,
                      StartupOption nameNodeOperation,
                      boolean manageDfsDirs,
                      int numNameNodes) throws IOException {
  this(0, conf, numDataNodes, false, manageDfsDirs,
      manageDfsDirs, nameNodeOperation, null, null, null, true, false, 
      numNameNodes, true);
}
项目:hadoop-EAR    文件:MiniDFSCluster.java   
private void createFederationNamenodes(Configuration conf,
    Collection<String> nameserviceIds, boolean manageNameDfsDirs,
    boolean format, StartupOption operation)
    throws IOException {
  // Create namenodes in the cluster
  int nnCounter = 0;
  for (String nameserviceId : nameserviceIds) {
    createFederatedNameNode(nnCounter++, conf, numDataNodes, manageNameDfsDirs,
        format, operation, nameserviceId);
  }
}
项目:hadoop-EAR    文件:MiniDFSCluster.java   
private NameNode createNameNode(int nnIndex, Configuration conf,
    int numDataNodes,
    boolean manageNameDfsDirs,
    boolean format,
    StartupOption operation) throws IOException {
  return createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs,
      format, operation, null); 
}
项目:hadoop-EAR    文件:MiniDFSCluster.java   
public void startDataNodes(Configuration conf, int numDataNodes, 
    boolean manageDfsDirs, StartupOption operation, 
    String[] racks, String[] hosts,
    long[] simulatedCapacities) throws IOException {
  startDataNodes(conf, numDataNodes, manageDfsDirs, operation,
      racks, null, simulatedCapacities, false);
}
项目:hadoop-EAR    文件:MiniDFSCluster.java   
public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
                           boolean manageDfsDirs, StartupOption operation, 
                           String[] racks, String[] hosts,
                           long[] simulatedCapacities,
                           boolean setupHostsFile) throws IOException {
  startDataNodes(conf, numDataNodes, manageDfsDirs, operation,
      racks, null, simulatedCapacities, setupHostsFile, true, null);
}
项目:hadoop-EAR    文件:TestDFSStorageStateRecovery.java   
/**
 * This test iterates over the testCases table and attempts
 * to startup the NameNode normally.
 */
public void testNNStorageStates() throws Exception {
  String[] baseDirs;

  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    for (int i = 0; i < NUM_NN_TEST_CASES; i++) {
      boolean[] testCase = testCases[i];
      boolean shouldRecover = testCase[5];
      boolean curAfterRecover = testCase[6];
      boolean prevAfterRecover = testCase[7];

      log("NAME_NODE recovery", numDirs, i, testCase);
      baseDirs = createStorageState(NAME_NODE, testCase);
      if (shouldRecover) {
        cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
        checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
        cluster.shutdown();
      } else {
        try {
          cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
          throw new AssertionError("NameNode should have failed to start");
        } catch (IOException expected) {
          // the exception is expected
          // check that the message says "not formatted" 
          // when storage directory is empty (case #5)
          if(!testCases[i][0] && !testCases[i][2] 
                && !testCases[i][1] && !testCases[i][3] && !testCases[i][4]) {
            assertTrue(expected.getLocalizedMessage().contains(
                "NameNode is not formatted"));
          }
        }
      }
      cluster.shutdown();
    } // end testCases loop
  } // end numDirs loop
}
项目:hadoop-EAR    文件:TestDFSStorageStateRecovery.java   
/**
 * This test iterates over the testCases table and attempts
 * to startup the DataNode normally.
 */
public void testDNStorageStates() throws Exception {
  String[] baseDirs;

  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    for (int i = 0; i < NUM_DN_TEST_CASES; i++) {
      boolean[] testCase = testCases[i];
      boolean shouldRecover = testCase[5];
      boolean curAfterRecover = testCase[6];
      boolean prevAfterRecover = testCase[7];

      log("DATA_NODE recovery", numDirs, i, testCase);
      createStorageState(NAME_NODE,
                         new boolean[] {true, true, false, false, false});
      cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
      baseDirs = createStorageState(DATA_NODE, testCase);
      if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
        // DataNode will create and format current if no directories exist
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      } else {
        if (shouldRecover) {
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
          checkResult(DATA_NODE, baseDirs, curAfterRecover, prevAfterRecover);
        } else {
          try {
            cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
            throw new AssertionError("DataNode should have failed to start");
          } catch (Exception expected) {
            // expected
          }
        }
      }
      cluster.shutdown();
    } // end testCases loop
  } // end numDirs loop
}
项目:hadoop-EAR    文件:TestDFSFinalize.java   
/**
 * This test attempts to finalize the NameNode and DataNode.
 */
public void testFinalize() throws Exception {
  UpgradeUtilities.initialize();

  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    /* This test requires that "current" directory not change after
     * the upgrade. Actually it is ok for those contents to change.
     * For now disabling block verification so that the contents are 
     * not changed.
     */
    conf = new Configuration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
    String[] dataNodeDirs = conf.getStrings("dfs.data.dir");

    log("Finalize with existing previous dir", numDirs);
    UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
    UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
    UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
    UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
    cluster = new MiniDFSCluster(conf, 1, StartupOption.REGULAR);
    cluster.finalizeCluster(conf);
    checkResult(nameNodeDirs, dataNodeDirs);

    log("Finalize without existing previous dir", numDirs);
    cluster.finalizeCluster(conf);
    checkResult(nameNodeDirs, dataNodeDirs);

    cluster.shutdown();
    UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    UpgradeUtilities.createEmptyDirs(dataNodeDirs);
  } // end numDir loop
}
项目:hadoop-EAR    文件:TestDFSStartupVersions.java   
/**
 * This test ensures the appropriate response (successful or failure) from 
 * a Datanode when the system is started with differing version combinations. 
 * <pre>
 * For each 3-tuple in the cross product
 *   ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
 *    {currentNamespaceId,incorrectNamespaceId},
 *    {pastFsscTime,currentFsscTime,futureFsscTime})
 *      1. Startup Namenode with version file containing 
 *         (currentLayoutVersion,currentNamespaceId,currentFsscTime)
 *      2. Attempt to startup Datanode with version file containing 
 *         this iterations version 3-tuple
 * </pre>
 */
public void testVersions() throws Exception {
  UpgradeUtilities.initialize();
  Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
                                                    new Configuration());
  StorageInfo[] versions = initializeVersions();
  UpgradeUtilities.createStorageDirs(
                                     NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
  cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
  StorageInfo nameNodeVersion = new StorageInfo(
                                                UpgradeUtilities.getCurrentLayoutVersion(),
                                                UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                UpgradeUtilities.getCurrentFsscTime(cluster));
  log("NameNode version info", NAME_NODE, null, nameNodeVersion);
  int namespaceId = cluster.getNameNode().getNamespaceID();
  for (int i = 0; i < versions.length; i++) {
    File[] storage = UpgradeUtilities.createStorageDirs(
                                                        DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
    log("DataNode version info", DATA_NODE, i, versions[i]);
    UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i], namespaceId);
    try {
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    } catch (Exception ignore) {
      // Ignore.  The asserts below will check for problems.
      // ignore.printStackTrace();
    }
    assertTrue(cluster.getNameNode() != null);
    assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
                 cluster.isDataNodeUp());
    cluster.shutdownDataNodes();
  }
}
项目:hadoop-EAR    文件:TestGenericJournalConf.java   
@Override
public void transitionJournal(StorageInfo nsInfo, Transition transition,
    StartupOption startOpt) throws IOException {
  if (Transition.FORMAT == transition) {
    formatCalled = true;
  }
}