Java 类org.apache.hadoop.conf.Configuration 实例源码

项目:aliyun-maxcompute-data-collectors    文件:SQLServerDBInputFormat.java   
@Override
/** {@inheritDoc} */
protected RecordReader<LongWritable, T> createDBRecordReader(
    DBInputSplit split, Configuration conf) throws IOException {

  DBConfiguration dbConf = getDBConf();
  Class<T> inputClass = (Class<T>) (dbConf.getInputClass());
  String dbProductName = getDBProductName();
  LOG.debug("Creating db record reader for db product: " + dbProductName);

  try {
    return new SQLServerDBRecordReader<T>(split, inputClass,
        conf, getConnection(), dbConf, dbConf.getInputConditions(),
        dbConf.getInputFieldNames(), dbConf.getInputTableName(),
        dbProductName);
  } catch (SQLException ex) {
    throw new IOException(ex);
  }
}
项目:hadoop    文件:ConfiguredRMFailoverProxyProvider.java   
@Override
public void init(Configuration configuration, RMProxy<T> rmProxy,
                  Class<T> protocol) {
  this.rmProxy = rmProxy;
  this.protocol = protocol;
  this.rmProxy.checkAllowedProtocols(this.protocol);
  this.conf = new YarnConfiguration(configuration);
  Collection<String> rmIds = HAUtil.getRMHAIds(conf);
  this.rmServiceIds = rmIds.toArray(new String[rmIds.size()]);
  conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]);

  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES,
          YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES));

  conf.setInt(CommonConfigurationKeysPublic.
      IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS,
          YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS));
}
项目:Transwarp-Sample-Code    文件:HDFSSequenceFile.java   
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
项目:big_data    文件:TransformerOutputFormat.java   
/**
 * 定义每条数据的输出格式,输入数据是由reduce任务每次执行write方法输出的数据
 */
@Override
public RecordWriter<BaseDimension, BaseStatsValueWritable> getRecordWriter(TaskAttemptContext context)
        throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    Connection conn = null;
    IDimensionConverter converter = new DimensionConverterImpl();
    try {
        conn = JdbcManager.getConnection(conf, GlobalConstants.WAREHOUSE_OF_REPORT);
        conn.setAutoCommit(false);
    } catch (SQLException e) {
        logger.error("获取数据库连接失败", e);
        throw new IOException("获取数据库连接失败", e);
    }
    return new TransformerRecordWriter(conn, conf, converter);
}
项目:hadoop    文件:DataNode.java   
/**
 * See {@link DirectoryScanner}
 */
private synchronized void initDirectoryScanner(Configuration conf) {
  if (directoryScanner != null) {
    return;
  }
  String reason = null;
  if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 
                  DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
    reason = "verification is turned off by configuration";
  } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
    reason = "verifcation is not supported by SimulatedFSDataset";
  } 
  if (reason == null) {
    directoryScanner = new DirectoryScanner(this, data, conf);
    directoryScanner.start();
  } else {
    LOG.info("Periodic Directory Tree Verification scan is disabled because " +
                 reason);
  }
}
项目:hadoop-oss    文件:TestFailoverController.java   
@Test
public void testFailoverToNonExistantServiceFails() throws Exception {
  DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
  DummyHAService svc2 = spy(new DummyHAService(null, svc2Addr));
  Mockito.doThrow(new IOException("Failed to connect"))
    .when(svc2).getProxy(Mockito.<Configuration>any(),
        Mockito.anyInt());
  svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());

  try {
    doFailover(svc1, svc2, false, false);
    fail("Failed over to a non-existant standby");
  } catch (FailoverFailedException ffe) {
    // Expected
  }

  assertEquals(HAServiceState.ACTIVE, svc1.state);
}
项目:hadoop    文件:TestHttpFSWithKerberos.java   
@SuppressWarnings("deprecation")
private void testDelegationTokenWithFS(Class fileSystemClass)
  throws Exception {
  createHttpFSServer();
  Configuration conf = new Configuration();
  conf.set("fs.webhdfs.impl", fileSystemClass.getName());
  conf.set("fs.hdfs.impl.disable.cache", "true");
  URI uri = new URI( "webhdfs://" +
                     TestJettyHelper.getJettyURL().toURI().getAuthority());
  FileSystem fs = FileSystem.get(uri, conf);
  Token<?> tokens[] = fs.addDelegationTokens("foo", null);
  fs.close();
  Assert.assertEquals(1, tokens.length);
  fs = FileSystem.get(uri, conf);
  ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
  fs.listStatus(new Path("/"));
  fs.close();
}
项目:hadoop    文件:UpgradeUtilities.java   
/**
 * Initialize {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and 
 * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} with the specified 
 * number of directory entries. Also initialize dfs.blockreport.intervalMsec.
 */
public static Configuration initializeStorageStateConf(int numDirs,
                                                       Configuration conf) {
  StringBuffer nameNodeDirs =
    new StringBuffer(new File(TEST_ROOT_DIR, "name1").toString());
  StringBuffer dataNodeDirs =
    new StringBuffer(new File(TEST_ROOT_DIR, "data1").toString());
  for (int i = 2; i <= numDirs; i++) {
    nameNodeDirs.append("," + new File(TEST_ROOT_DIR, "name"+i));
    dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
  }
  if (conf == null) {
    conf = new HdfsConfiguration();
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameNodeDirs.toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameNodeDirs.toString());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDirs.toString());
  conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000);
  return conf;
}
项目:hadoop    文件:MockContainer.java   
public MockContainer(ApplicationAttemptId appAttemptId,
    Dispatcher dispatcher, Configuration conf, String user,
    ApplicationId appId, int uniqId) throws IOException{

  this.user = user;
  this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
  this.id = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId,
      uniqId);
  this.launchContext = recordFactory
      .newRecordInstance(ContainerLaunchContext.class);
  long currentTime = System.currentTimeMillis();
  this.containerTokenIdentifier =
      BuilderUtils.newContainerTokenIdentifier(BuilderUtils
        .newContainerToken(id, "127.0.0.1", 1234, user,
          BuilderUtils.newResource(1024, 1), currentTime + 10000, 123,
          "password".getBytes(), currentTime));
  this.state = ContainerState.NEW;
}
项目:ditb    文件:IntegrationTestBigLinkedList.java   
@Override public int run(String[] args) throws Exception {
  if (args.length < 1) {
    System.err.println("Usage: Clean <output dir>");
    return -1;
  }

  Path p = new Path(args[0]);
  Configuration conf = getConf();
  TableName tableName = getTableName(conf);
  try (FileSystem fs = HFileSystem.get(conf);
      Connection conn = ConnectionFactory.createConnection(conf);
      Admin admin = conn.getAdmin()) {
    if (admin.tableExists(tableName)) {
      admin.disableTable(tableName);
      admin.deleteTable(tableName);
    }

    if (fs.exists(p)) {
      fs.delete(p, true);
    }
  }

  return 0;
}
项目:hadoop    文件:TestServletFilter.java   
/**
 * Similar to the above test case, except that it uses a different API to add
 * the filter. Regression test for HADOOP-8786.
 */
@Test
public void testContextSpecificServletFilterWhenInitThrowsException()
    throws Exception {
  Configuration conf = new Configuration();
  HttpServer2 http = createTestServer(conf);
  HttpServer2.defineFilter(http.webAppContext,
      "ErrorFilter", ErrorFilter.class.getName(),
      null, null);
  try {
    http.start();
    fail("expecting exception");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(
        "Unable to initialize WebAppContext", e);
  }
}
项目:hadoop    文件:Task.java   
public void setConf(Configuration conf) {
  if (conf instanceof JobConf) {
    this.conf = (JobConf) conf;
  } else {
    this.conf = new JobConf(conf);
  }
  this.mapOutputFile = ReflectionUtils.newInstance(
      conf.getClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
        MROutputFiles.class, MapOutputFile.class), conf);
  this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
  // add the static resolutions (this is required for the junit to
  // work on testcases that simulate multiple nodes on a single physical
  // node.
  String hostToResolved[] = conf.getStrings(MRConfig.STATIC_RESOLUTIONS);
  if (hostToResolved != null) {
    for (String str : hostToResolved) {
      String name = str.substring(0, str.indexOf('='));
      String resolvedName = str.substring(str.indexOf('=') + 1);
      NetUtils.addStaticResolution(name, resolvedName);
    }
  }
}
项目:ditb    文件:TestAccessControlFilter.java   
@BeforeClass
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  Configuration conf = TEST_UTIL.getConfiguration();
  // Up the handlers; this test needs more than usual.
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  enableSecurity(conf);
  verifyConfiguration(conf);

  // We expect 0.98 scanning semantics
  conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false);

  TEST_UTIL.startMiniCluster();
  TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000);

  READER = User.createUserForTesting(conf, "reader", new String[0]);
  LIMITED = User.createUserForTesting(conf, "limited", new String[0]);
  DENIED = User.createUserForTesting(conf, "denied", new String[0]);
}
项目:circus-train    文件:DynamicInputFormat.java   
/**
 * Package private, for testability.
 *
 * @param nMaps The number of maps requested for.
 * @param nRecords The number of records to be copied.
 * @param conf The configuration set by users.
 * @return The number of splits each map should handle, ideally.
 */
static int getSplitRatio(int nMaps, int nRecords, Configuration conf) {
  int maxChunksIdeal = getMaxChunksIdeal(conf);
  int minRecordsPerChunk = getMinRecordsPerChunk(conf);
  int splitRatio = getSplitRatio(conf);

  if (nMaps == 1) {
    LOG.warn("nMaps == 1. Why use DynamicInputFormat?");
    return 1;
  }

  if (nMaps > maxChunksIdeal) {
    return splitRatio;
  }

  int nPickups = (int) Math.ceil((float) maxChunksIdeal / nMaps);
  int nRecordsPerChunk = (int) Math.ceil((float) nRecords / (nMaps * nPickups));

  return nRecordsPerChunk < minRecordsPerChunk ? splitRatio : nPickups;
}
项目:ditb    文件:TestHBaseConfiguration.java   
@Test
public void testGetPassword() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.set(ReflectiveCredentialProviderClient.CREDENTIAL_PROVIDER_PATH, "jceks://file"
      + new File(UTIL.getDataTestDir().toUri().getPath(), "foo.jks").getCanonicalPath());
  ReflectiveCredentialProviderClient client = new ReflectiveCredentialProviderClient();
  if (client.isHadoopCredentialProviderAvailable()) {
    char[] keyPass = { 'k', 'e', 'y', 'p', 'a', 's', 's' };
    char[] storePass = { 's', 't', 'o', 'r', 'e', 'p', 'a', 's', 's' };
    client.createEntry(conf, "ssl.keypass.alias", keyPass);
    client.createEntry(conf, "ssl.storepass.alias", storePass);

    String keypass = HBaseConfiguration.getPassword(conf, "ssl.keypass.alias", null);
    assertEquals(keypass, new String(keyPass));

    String storepass = HBaseConfiguration.getPassword(conf, "ssl.storepass.alias", null);
    assertEquals(storepass, new String(storePass));
  }
}
项目:hadoop    文件:LocalResourcesTrackerImpl.java   
LocalResourcesTrackerImpl(String user, ApplicationId appId,
    Dispatcher dispatcher,
    ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc,
    boolean useLocalCacheDirectoryManager, Configuration conf,
    NMStateStoreService stateStore, LocalDirsHandlerService dirHandler) {
  this.appId = appId;
  this.user = user;
  this.dispatcher = dispatcher;
  this.localrsrc = localrsrc;
  this.useLocalCacheDirectoryManager = useLocalCacheDirectoryManager;
  if (this.useLocalCacheDirectoryManager) {
    directoryManagers =
        new ConcurrentHashMap<Path, LocalCacheDirectoryManager>();
    inProgressLocalResourcesMap =
        new ConcurrentHashMap<LocalResourceRequest, Path>();
  }
  this.conf = conf;
  this.stateStore = stateStore;
  this.dirsHandler = dirHandler;
}
项目:hadoop    文件:TestClientToAMTokens.java   
private void verifyNewVersionToken(final Configuration conf, final CustomAM am,
    Token<ClientToAMTokenIdentifier> token, MockRM rm) throws IOException,
    InterruptedException {
  UserGroupInformation ugi;
  ugi = UserGroupInformation.createRemoteUser("me");

  Token<ClientToAMTokenIdentifier> newToken = 
      new Token<ClientToAMTokenIdentifier>(
          new ClientToAMTokenIdentifierForTest(token.decodeIdentifier(), "message"),
          am.getClientToAMTokenSecretManager());
  newToken.setService(token.getService());

  ugi.addToken(newToken);

  ugi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      CustomProtocol client =
          (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L, am.address,
            conf);
      client.ping();
      Assert.assertTrue(am.pinged);
      return null;
    }
  });
}
项目:ditb    文件:TableLockManager.java   
/**
 * Creates and returns a TableLockManager according to the configuration
 */
public static TableLockManager createTableLockManager(Configuration conf,
    ZooKeeperWatcher zkWatcher, ServerName serverName) {
  // Initialize table level lock manager for schema changes, if enabled.
  if (conf.getBoolean(TABLE_LOCK_ENABLE,
      DEFAULT_TABLE_LOCK_ENABLE)) {
    long writeLockTimeoutMs = conf.getLong(TABLE_WRITE_LOCK_TIMEOUT_MS,
        DEFAULT_TABLE_WRITE_LOCK_TIMEOUT_MS);
    long readLockTimeoutMs = conf.getLong(TABLE_READ_LOCK_TIMEOUT_MS,
        DEFAULT_TABLE_READ_LOCK_TIMEOUT_MS);
    long lockExpireTimeoutMs = conf.getLong(TABLE_LOCK_EXPIRE_TIMEOUT,
        DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS);

    return new ZKTableLockManager(zkWatcher, serverName, writeLockTimeoutMs, readLockTimeoutMs, lockExpireTimeoutMs);
  }

  return new NullTableLockManager();
}
项目:ditb    文件:JvmPauseMonitor.java   
/**
 * Simple 'main' to facilitate manual testing of the pause monitor.
 * 
 * This main function just leaks memory into a list. Running this class
 * with a 1GB heap will very quickly go into "GC hell" and result in
 * log messages about the GC pauses.
 */
public static void main(String []args) throws Exception {
  new JvmPauseMonitor(new Configuration()).start();
  List<String> list = Lists.newArrayList();
  int i = 0;
  while (true) {
    list.add(String.valueOf(i++));
  }
}
项目:hadoop-oss    文件:FileUtil.java   
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, FileStatus srcStatus,
                           FileSystem dstFS, Path dst,
                           boolean deleteSource,
                           boolean overwrite,
                           Configuration conf) throws IOException {
  Path src = srcStatus.getPath();
  dst = checkDest(src.getName(), dstFS, dst, overwrite);
  if (srcStatus.isDirectory()) {
    checkDependencies(srcFS, src, dstFS, dst);
    if (!dstFS.mkdirs(dst)) {
      return false;
    }
    FileStatus contents[] = srcFS.listStatus(src);
    for (int i = 0; i < contents.length; i++) {
      copy(srcFS, contents[i], dstFS,
           new Path(dst, contents[i].getPath().getName()),
           deleteSource, overwrite, conf);
    }
  } else {
    InputStream in=null;
    OutputStream out = null;
    try {
      in = srcFS.open(src);
      out = dstFS.create(dst, overwrite);
      IOUtils.copyBytes(in, out, conf, true);
    } catch (IOException e) {
      IOUtils.closeStream(out);
      IOUtils.closeStream(in);
      throw e;
    }
  }
  if (deleteSource) {
    return srcFS.delete(src, true);
  } else {
    return true;
  }

}
项目:ditb    文件:StoreFile.java   
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 *
 * @param fs          The current file system to use.
 * @param fileInfo    The store file information.
 * @param conf        The current configuration.
 * @param cacheConf   The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column family
 *                    configuration. This may or may not be the same as the Bloom filter type actually
 *                    present in the HFile, because column family configuration might change. If this is
 *                    {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
    final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException {
  this.fs = fs;
  this.fileInfo = fileInfo;
  this.cacheConf = cacheConf;

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " + "cfBloomType="
        + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }
}
项目:hadoop-oss    文件:TestFsShellReturnCode.java   
private void testChownUserAndGroupValidity(boolean enableWarning) {
  Configuration conf = new Configuration();
  conf.setBoolean(
      HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY, enableWarning);
  FsCommand chown = new FakeChown();
  chown.setConf(conf);

  // The following are valid (no exception expected).
  chown.run("user", "/path");
  chown.run("user:group", "/path");
  chown.run(":group", "/path");

  // The following are valid only on Windows.
  assertValidArgumentsOnWindows(chown, "User With Spaces", "/path");
  assertValidArgumentsOnWindows(chown, "User With Spaces:group", "/path");
  assertValidArgumentsOnWindows(chown, "User With Spaces:Group With Spaces",
    "/path");
  assertValidArgumentsOnWindows(chown, "user:Group With Spaces", "/path");
  assertValidArgumentsOnWindows(chown, ":Group With Spaces", "/path");

  // The following are invalid (exception expected).
  assertIllegalArguments(chown, "us!er", "/path");
  assertIllegalArguments(chown, "us^er", "/path");
  assertIllegalArguments(chown, "user:gr#oup", "/path");
  assertIllegalArguments(chown, "user:gr%oup", "/path");
  assertIllegalArguments(chown, ":gr#oup", "/path");
  assertIllegalArguments(chown, ":gr%oup", "/path");
}
项目:aliyun-maxcompute-data-collectors    文件:OracleDBRecordReader.java   
public OracleDBRecordReader(DBInputFormat.DBInputSplit split,
    Class<T> inputClass, Configuration conf, Connection conn,
    DBConfiguration dbConfig, String cond, String [] fields,
    String table) throws SQLException {
  super(split, inputClass, conf, conn, dbConfig, cond, fields, table);
  setSessionTimeZone(conf, conn);
}
项目:ditb    文件:HConnectionTestingUtility.java   
public static ClusterConnection getSpiedClusterConnection(final Configuration conf)
throws IOException {
  HConnectionKey connectionKey = new HConnectionKey(conf);
  synchronized (ConnectionManager.CONNECTION_INSTANCES) {
    HConnectionImplementation connection =
        ConnectionManager.CONNECTION_INSTANCES.get(connectionKey);
    if (connection == null) {
      connection = Mockito.spy(new HConnectionImplementation(conf, true));
      ConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection);
    }
    return connection;
  }
}
项目:hadoop    文件:TestScriptBasedMapping.java   
@Test
public void testNoArgsMeansNoResult() {
  Configuration conf = new Configuration();
  conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
              ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
  conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
  conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
  ScriptBasedMapping mapping = createMapping(conf);
  List<String> names = new ArrayList<String>();
  names.add("some.machine.name");
  names.add("other.machine.name");
  List<String> result = mapping.resolve(names);
  assertNull("Expected an empty list", result);
}
项目:ditb    文件:RestoreSnapshotHelper.java   
public RestoreSnapshotHelper(final Configuration conf,
    final FileSystem fs,
    final SnapshotManifest manifest,
    final HTableDescriptor tableDescriptor,
    final Path rootDir,
    final ForeignExceptionDispatcher monitor,
    final MonitoredTask status,
    final boolean createBackRefs)
{
  this.fs = fs;
  this.conf = conf;
  this.snapshotManifest = manifest;
  this.snapshotDesc = manifest.getSnapshotDescription();
  this.snapshotTable = TableName.valueOf(snapshotDesc.getTable());
  this.tableDesc = tableDescriptor;
  this.rootDir = rootDir;
  this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName());
  this.monitor = monitor;
  this.status = status;
  this.createBackRefs = createBackRefs;
}
项目:hadoop    文件:ViewFsBaseTest.java   
@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();

  // Make  user and data dirs - we creates links to them in the mount table
  fcTarget.mkdir(new Path(targetTestRoot,"user"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"data"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"dir2"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"dir3"),
      FileContext.DEFAULT_PERM, true);
  FileContextTestHelper.createFile(fcTarget, new Path(targetTestRoot,"aFile"));


  // Now we use the mount fs to set links to user and dir
  // in the test root

  // Set up the defaultMT in the config with our mount point links
  conf = new Configuration();
  ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
  ConfigUtil.addLink(conf, "/user",
      new Path(targetTestRoot,"user").toUri());
  ConfigUtil.addLink(conf, "/user2",
      new Path(targetTestRoot,"user").toUri());
  ConfigUtil.addLink(conf, "/data",
      new Path(targetTestRoot,"data").toUri());
  ConfigUtil.addLink(conf, "/internalDir/linkToDir2",
      new Path(targetTestRoot,"dir2").toUri());
  ConfigUtil.addLink(conf, "/internalDir/internalDir2/linkToDir3",
      new Path(targetTestRoot,"dir3").toUri());
  ConfigUtil.addLink(conf, "/danglingLink",
      new Path(targetTestRoot,"missingTarget").toUri());
  ConfigUtil.addLink(conf, "/linkToAFile",
      new Path(targetTestRoot,"aFile").toUri());

  fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
  // Also try viewfs://default/    - note authority is name of mount table
}
项目:hadoop    文件:TestSnapshotListing.java   
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  hdfs.mkdirs(dir);
}
项目:hadoop    文件:TestServer.java   
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoLogDir() throws Exception {
  File homeDir = new File(TestDirHelper.getTestDir(), "home");
  assertTrue(homeDir.mkdir());
  assertTrue(new File(homeDir, "conf").mkdir());
  assertTrue(new File(homeDir, "temp").mkdir());
  Configuration conf = new Configuration(false);
  conf.set("server.services", TestService.class.getName());
  Server server = new Server("server", homeDir.getAbsolutePath(), conf);
  server.init();
}
项目:aliyun-maxcompute-data-collectors    文件:CombineFileRecordReader.java   
/**
 * Get the record reader for the next chunk in this CombineFileSplit.
 */
protected boolean initNextRecordReader() throws IOException {

  if (curReader != null) {
    curReader.close();
    curReader = null;
    if (idx > 0) {
      progress += split.getLength(idx-1);    // done processing so far
    }
  }

  // if all chunks have been processed, nothing more to do.
  if (idx == split.getNumPaths()) {
    return false;
  }

  // get a record reader for the idx-th chunk
  try {
    Configuration conf = context.getConfiguration();
    // setup some helper config variables.
    conf.set("map.input.file", split.getPath(idx).toString());
    conf.setLong("map.input.start", split.getOffset(idx));
    conf.setLong("map.input.length", split.getLength(idx));

    curReader =  rrConstructor.newInstance(new Object []
                          {split, context, Integer.valueOf(idx)});

    if (idx > 0) {
      // initialize() for the first RecordReader will be called by MapTask;
      // we're responsible for initializing subsequent RecordReaders.
      curReader.initialize(split, context);
    }
  } catch (Exception e) {
    throw new RuntimeException (e);
  }
  idx++;
  return true;
}
项目:mmsns    文件:CrunchMapReduce.java   
@Override
public void startMapReduce(String taskName) {
    String outputDirectory = propertyConfig.getProperty("sqoop.task." + taskName + ".toJobConfig.outputDirectory");
    String hadoopAddress = propertyConfig.getProperty("sqoop.task." + taskName + ".tolink.linkConfig.uri");
    Pipeline pipeline = new MRPipeline(CrunchMapReduce.class, new Configuration());
    Class<AvroParquetFileSourceTarget> avroParquetFileSourceTargetClass = AvroParquetFileSourceTarget.class;
}
项目:hadoop    文件:TestGroupsService.java   
@Test(expected = RuntimeException.class)
@TestDir
public void invalidGroupsMapping() throws Exception {
  String dir = TestDirHelper.getTestDir().getAbsolutePath();
  Configuration conf = new Configuration(false);
  conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
  conf.set("server.groups.hadoop.security.group.mapping", String.class.getName());
  Server server = new Server("server", dir, dir, dir, dir, conf);
  server.init();
}
项目:hadoop    文件:TestFileSystemAccessService.java   
private void createHadoopConf(Configuration hadoopConf) throws Exception {
  String dir = TestDirHelper.getTestDir().getAbsolutePath();
  File hdfsSite = new File(dir, "hdfs-site.xml");
  OutputStream os = new FileOutputStream(hdfsSite);
  hadoopConf.writeXml(os);
  os.close();
}
项目:hadoop    文件:HadoopArchives.java   
public void setConf(Configuration conf) {
  if (conf instanceof JobConf) {
    this.conf = (JobConf) conf;
  } else {
    this.conf = new JobConf(conf, HadoopArchives.class);
  }

  // This is for test purposes since MR2, different from Streaming
  // here it is not possible to add a JAR to the classpath the tool
  // will when running the mapreduce job.
  String testJar = System.getProperty(TEST_HADOOP_ARCHIVES_JAR_PATH, null);
  if (testJar != null) {
    this.conf.setJar(testJar);
  }
}
项目:hadoop-oss    文件:TestCompositeService.java   
@Test
public void testAddServiceInInit() throws Throwable {
  BreakableService child = new BreakableService();
  assertInState(STATE.NOTINITED, child);
  CompositeServiceAddingAChild composite =
    new CompositeServiceAddingAChild(child);
  composite.init(new Configuration());
  assertInState(STATE.INITED, child);
}
项目:hadoop    文件:TestGroupFallback.java   
@Test
public void testGroupShell() throws Exception {
  Logger.getRootLogger().setLevel(Level.DEBUG);
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
      "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");

  Groups groups = new Groups(conf);

  String username = System.getProperty("user.name");
  List<String> groupList = groups.getGroups(username);

  LOG.info(username + " has GROUPS: " + groupList.toString());
  assertTrue(groupList.size() > 0);
}
项目:angel    文件:MasterService.java   
private int getYarnNMWebPort(Configuration conf) {
  String nmWebAddr = conf.get(YarnConfiguration.NM_WEBAPP_ADDRESS, YarnConfiguration.DEFAULT_NM_WEBAPP_ADDRESS);
  String [] addrItems = nmWebAddr.split(":");
  if(addrItems.length == 2) {
    try {
      return Integer.valueOf(addrItems[1]);
    } catch (Throwable x) {
      LOG.error("can not get nm web port from " + nmWebAddr + ", just return default 8080");
      return 8080;
    }
  } else {
    return 8080;
  }
}
项目:hadoop    文件:BenchmarkThroughput.java   
private void readLocalFile(Path path,
                                  String name,
                                  Configuration conf) throws IOException {
  System.out.print("Reading " + name);
  resetMeasurements();
  InputStream in = new FileInputStream(new File(path.toString()));
  byte[] data = new byte[BUFFER_SIZE];
  long size = 0;
  while (size >= 0) {
    size = in.read(data);
  }
  in.close();
  printMeasurements();
}
项目:hadoop    文件:RegistryCli.java   
public RegistryCli(PrintStream sysout, PrintStream syserr) {
  Configuration conf = new Configuration();
  super.setConf(conf);
  registry = RegistryOperationsFactory.createInstance(conf);
  registry.start();
  this.sysout = sysout;
  this.syserr = syserr;
}
项目:angel    文件:AngelClientFactory.java   
public AngelClient doGet(Configuration conf) {
  String mode = conf.get(AngelConf.ANGEL_DEPLOY_MODE, AngelConf.DEFAULT_ANGEL_DEPLOY_MODE);

  if (mode.equals(AngelDeployMode.LOCAL.toString())) {
    return new AngelLocalClient(conf);
  } else {
    return new AngelYarnClient(conf);
  }
}