Java 类org.apache.hadoop.hdfs.client.HdfsAdmin 实例源码

项目:hadoop    文件:TestReservedRawPaths.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:hadoop    文件:TestEncryptionZones.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:aliyun-oss-hadoop-fs    文件:TestReservedRawPaths.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:aliyun-oss-hadoop-fs    文件:TestEncryptionZones.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:aliyun-oss-hadoop-fs    文件:TestEncryptionZones.java   
@Test(timeout = 120000)
public void testEncryptionZoneWithTrash() throws Exception {
  // Create the encryption zone1
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  final Path zone1 = new Path("/zone1");
  fs.mkdirs(zone1);
  dfsAdmin.createEncryptionZone(zone1, TEST_KEY);

  // Create the encrypted file in zone1
  final Path encFile1 = new Path(zone1, "encFile1");
  final int len = 8192;
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);

  Configuration clientConf = new Configuration(conf);
  clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
  FsShell shell = new FsShell(clientConf);

  // Delete encrypted file from the shell with trash enabled
  // Verify the file is moved to appropriate trash within the zone
  verifyShellDeleteWithTrash(shell, encFile1);

  // Delete encryption zone from the shell with trash enabled
  // Verify the zone is moved to appropriate trash location in user's home dir
  verifyShellDeleteWithTrash(shell, zone1);
}
项目:big-c    文件:TestReservedRawPaths.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:big-c    文件:TestEncryptionZones.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestReservedRawPaths.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestEncryptionZones.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:FlexMap    文件:TestReservedRawPaths.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().provider = cluster.getNameNode().getNamesystem()
      .getProvider();
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:FlexMap    文件:TestEncryptionZones.java   
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
项目:kafka-connect-fs    文件:HdfsFileWatcherPolicy.java   
@Override
protected void configPolicy(Map<String, Object> customConfigs) {
    this.fsEvenStream = new HashMap<>();
    fileSystems.stream()
            .filter(fs -> fs.getWorkingDirectory().toString().startsWith(URI_PREFIX))
            .forEach(fs -> {
                try {
                    HdfsAdmin admin = new HdfsAdmin(fs.getWorkingDirectory().toUri(), fs.getConf());
                    fsEvenStream.put(fs, new EventStreamThread(fs, admin));
                } catch (IOException ioe) {
                    throw new ConnectException("Error creating admin for notifications", ioe);
                }
            });
}
项目:hadoop    文件:TestEncryptionZonesWithHA.java   
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
项目:hadoop    文件:TestEncryptionZones.java   
/**
 * Test listing encryption zones as a non super user.
 */
@Test(timeout = 60000)
public void testListEncryptionZonesAsNonSuperUser() throws Exception {

  final UserGroupInformation user = UserGroupInformation.
      createUserForTesting("user", new String[] { "mygroup" });

  final Path testRoot = new Path("/tmp/TestEncryptionZones");
  final Path superPath = new Path(testRoot, "superuseronly");
  final Path allPath = new Path(testRoot, "accessall");

  fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
  dfsAdmin.createEncryptionZone(superPath, TEST_KEY);

  fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
  dfsAdmin.createEncryptionZone(allPath, TEST_KEY);

  user.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      final HdfsAdmin userAdmin =
          new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
      try {
        userAdmin.listEncryptionZones();
      } catch (AccessControlException e) {
        assertExceptionContains("Superuser privilege is required", e);
      }
      return null;
    }
  });
}
项目:hadoop    文件:TestEncryptionZones.java   
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  // Create a base file for comparison
  final Path baseFile = new Path("/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
  // Create the first enc file
  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Roll the key of the encryption zone
  assertNumZones(1);
  String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
  cluster.getNamesystem().getProvider().rollNewVersion(keyName);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Write a new enc file and validate
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  // FEInfos should be different
  FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
  FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
  assertFalse("EDEKs should be different", Arrays
      .equals(feInfo1.getEncryptedDataEncryptionKey(),
          feInfo2.getEncryptedDataEncryptionKey()));
  assertNotEquals("Key was rolled, versions should be different",
      feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
  // Contents still equal
  verifyFilesEqual(fs, encFile1, encFile2, len);
}
项目:hadoop    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestEncryptionZonesWithHA.java   
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
项目:aliyun-oss-hadoop-fs    文件:TestEncryptionZones.java   
/**
 * Test listing encryption zones as a non super user.
 */
@Test(timeout = 60000)
public void testListEncryptionZonesAsNonSuperUser() throws Exception {

  final UserGroupInformation user = UserGroupInformation.
      createUserForTesting("user", new String[] { "mygroup" });

  final Path testRoot = new Path("/tmp/TestEncryptionZones");
  final Path superPath = new Path(testRoot, "superuseronly");
  final Path allPath = new Path(testRoot, "accessall");

  fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
  dfsAdmin.createEncryptionZone(superPath, TEST_KEY);

  fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
  dfsAdmin.createEncryptionZone(allPath, TEST_KEY);

  user.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      final HdfsAdmin userAdmin =
          new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
      try {
        userAdmin.listEncryptionZones();
      } catch (AccessControlException e) {
        assertExceptionContains("Superuser privilege is required", e);
      }
      return null;
    }
  });
}
项目:aliyun-oss-hadoop-fs    文件:TestEncryptionZones.java   
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  // Create a base file for comparison
  final Path baseFile = new Path("/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
  // Create the first enc file
  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Roll the key of the encryption zone
  assertNumZones(1);
  String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
  cluster.getNamesystem().getProvider().rollNewVersion(keyName);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Write a new enc file and validate
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  // FEInfos should be different
  FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
  FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
  assertFalse("EDEKs should be different", Arrays
      .equals(feInfo1.getEncryptedDataEncryptionKey(),
          feInfo2.getEncryptedDataEncryptionKey()));
  assertNotEquals("Key was rolled, versions should be different",
      feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
  // Contents still equal
  verifyFilesEqual(fs, encFile1, encFile2, len);
}
项目:aliyun-oss-hadoop-fs    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:big-c    文件:TestEncryptionZonesWithHA.java   
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
项目:big-c    文件:TestEncryptionZones.java   
/**
 * Test listing encryption zones as a non super user.
 */
@Test(timeout = 60000)
public void testListEncryptionZonesAsNonSuperUser() throws Exception {

  final UserGroupInformation user = UserGroupInformation.
      createUserForTesting("user", new String[] { "mygroup" });

  final Path testRoot = new Path("/tmp/TestEncryptionZones");
  final Path superPath = new Path(testRoot, "superuseronly");
  final Path allPath = new Path(testRoot, "accessall");

  fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
  dfsAdmin.createEncryptionZone(superPath, TEST_KEY);

  fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
  dfsAdmin.createEncryptionZone(allPath, TEST_KEY);

  user.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      final HdfsAdmin userAdmin =
          new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
      try {
        userAdmin.listEncryptionZones();
      } catch (AccessControlException e) {
        assertExceptionContains("Superuser privilege is required", e);
      }
      return null;
    }
  });
}
项目:big-c    文件:TestEncryptionZones.java   
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  // Create a base file for comparison
  final Path baseFile = new Path("/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
  // Create the first enc file
  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Roll the key of the encryption zone
  assertNumZones(1);
  String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
  cluster.getNamesystem().getProvider().rollNewVersion(keyName);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Write a new enc file and validate
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  // FEInfos should be different
  FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
  FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
  assertFalse("EDEKs should be different", Arrays
      .equals(feInfo1.getEncryptedDataEncryptionKey(),
          feInfo2.getEncryptedDataEncryptionKey()));
  assertNotEquals("Key was rolled, versions should be different",
      feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
  // Contents still equal
  verifyFilesEqual(fs, encFile1, encFile2, len);
}
项目:big-c    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestEncryptionZonesWithHA.java   
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestEncryptionZones.java   
/**
 * Test listing encryption zones as a non super user.
 */
@Test(timeout = 60000)
public void testListEncryptionZonesAsNonSuperUser() throws Exception {

  final UserGroupInformation user = UserGroupInformation.
      createUserForTesting("user", new String[] { "mygroup" });

  final Path testRoot = new Path("/tmp/TestEncryptionZones");
  final Path superPath = new Path(testRoot, "superuseronly");
  final Path allPath = new Path(testRoot, "accessall");

  fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
  dfsAdmin.createEncryptionZone(superPath, TEST_KEY);

  fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
  dfsAdmin.createEncryptionZone(allPath, TEST_KEY);

  user.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      final HdfsAdmin userAdmin =
          new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
      try {
        userAdmin.listEncryptionZones();
      } catch (AccessControlException e) {
        assertExceptionContains("Superuser privilege is required", e);
      }
      return null;
    }
  });
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestEncryptionZones.java   
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  // Create a base file for comparison
  final Path baseFile = new Path("/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
  // Create the first enc file
  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Roll the key of the encryption zone
  assertNumZones(1);
  String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
  cluster.getNamesystem().getProvider().rollNewVersion(keyName);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Write a new enc file and validate
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  // FEInfos should be different
  FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
  FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
  assertFalse("EDEKs should be different", Arrays
      .equals(feInfo1.getEncryptedDataEncryptionKey(),
          feInfo2.getEncryptedDataEncryptionKey()));
  assertNotEquals("Key was rolled, versions should be different",
      feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
  // Contents still equal
  verifyFilesEqual(fs, encFile1, encFile2, len);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:hadoop-plus    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:FlexMap    文件:TestEncryptionZonesWithHA.java   
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().provider = nn0Provider;
}
项目:FlexMap    文件:TestEncryptionZones.java   
/**
 * Test listing encryption zones as a non super user.
 */
@Test(timeout = 60000)
public void testListEncryptionZonesAsNonSuperUser() throws Exception {

  final UserGroupInformation user = UserGroupInformation.
      createUserForTesting("user", new String[] { "mygroup" });

  final Path testRoot = new Path("/tmp/TestEncryptionZones");
  final Path superPath = new Path(testRoot, "superuseronly");
  final Path allPath = new Path(testRoot, "accessall");

  fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
  dfsAdmin.createEncryptionZone(superPath, TEST_KEY);

  fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
  dfsAdmin.createEncryptionZone(allPath, TEST_KEY);

  user.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      final HdfsAdmin userAdmin =
          new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
      try {
        userAdmin.listEncryptionZones();
      } catch (AccessControlException e) {
        assertExceptionContains("Superuser privilege is required", e);
      }
      return null;
    }
  });
}
项目:FlexMap    文件:TestEncryptionZones.java   
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  // Create a base file for comparison
  final Path baseFile = new Path("/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
  // Create the first enc file
  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Roll the key of the encryption zone
  assertNumZones(1);
  String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
  cluster.getNamesystem().getProvider().rollNewVersion(keyName);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Write a new enc file and validate
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  // FEInfos should be different
  FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
  FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
  assertFalse("EDEKs should be different", Arrays
      .equals(feInfo1.getEncryptedDataEncryptionKey(),
          feInfo2.getEncryptedDataEncryptionKey()));
  assertNotEquals("Key was rolled, versions should be different",
      feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
  // Contents still equal
  verifyFilesEqual(fs, encFile1, encFile2, len);
}
项目:FlexMap    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:hops    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:hadoop-TCP    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:hardfs    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:hadoop-on-lustre2    文件:TestHdfsAdmin.java   
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());

    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:caching_benchmarking    文件:CacheTool.java   
public static void main(String[] args) throws Exception {

    if (args.length == 0) {
      usage();
      System.exit(-1);
    }

    conf = new Configuration();
    URI uri = FileSystem.getDefaultUri(conf);
    DistributedFileSystem fs =
      (DistributedFileSystem)FileSystem.get(uri, conf);
    admin = new HdfsAdmin(uri, conf);

    String command = args[0];
    if (command.equals("removeAll")) {
      removeAll();
    } else if (command.equals("cache")) {
      if (args.length != 2) {
        usage();
        System.exit(1);
      }
      final long needed = Long.parseLong(args[1]);
      cache(needed);
    } else if (command.equals("locations")) {
      if (args.length != 2) {
        usage();
        System.exit(1);
      }
      String path = args[1];
      printLocations(fs, path);
    } else {
      usage();
      System.exit(-2);
    }

    System.out.println("Done!");
    System.exit(0);
  }
项目:kafka-connect-fs    文件:HdfsFileWatcherPolicy.java   
protected EventStreamThread(FileSystem fs, HdfsAdmin admin) {
    this.fs = fs;
    this.admin = admin;
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@BeforeClass
public static void setup() throws Exception {
  String currentUser = System.getProperty("user.name");

  config.set("fs.permissions.umask-mode", "u=rwx,g=,o=");
  config.set(DefaultImpersonationProvider.getTestProvider()
      .getProxySuperuserGroupConfKey(currentUser), "*");
  config.set(DefaultImpersonationProvider.getTestProvider()
      .getProxySuperuserIpConfKey(currentUser), "*");
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);

  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  dfsAdmin = new HdfsAdmin(cluster.getURI(), config);

  // Use ephemeral ports in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  // Start NFS with allowed.hosts set to "* rw"
  config.set("dfs.nfs.exports.allowed.hosts", "* rw");
  nfs = new Nfs3(config);
  nfs.startServiceInternal(false);
  nfsd = (RpcProgramNfs3) nfs.getRpcProgram();

  hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, config);

  // Mock SecurityHandler which returns system user.name
  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(currentUser);

  // Mock SecurityHandler which returns a dummy username "harry"
  securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry");
}