Java 类org.apache.hadoop.fs.CommonConfigurationKeys 实例源码
项目:hadoop
文件:TestZKFailoverControllerStress.java
/**
* Have the services fail their health checks half the time,
* causing the master role to bounce back and forth in the
* cluster. Meanwhile, causes ZK to disconnect clients every
* 50ms, to trigger the retry code and failures to become active.
*/
@Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
public void testRandomHealthAndDisconnects() throws Exception {
long runFor = STRESS_RUNTIME_SECS * 1000;
Mockito.doAnswer(new RandomlyThrow(0))
.when(cluster.getService(0).proxy).monitorHealth();
Mockito.doAnswer(new RandomlyThrow(1))
.when(cluster.getService(1).proxy).monitorHealth();
conf.setInt(CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_KEY, 100);
// Don't start until after the above mocking. Otherwise we can get
// Mockito errors if the HM calls the proxy in the middle of
// setting up the mock.
cluster.start();
long st = Time.now();
while (Time.now() - st < runFor) {
cluster.getTestContext().checkException();
serverFactory.closeAll();
Thread.sleep(50);
}
}
项目:hadoop
文件:TestZlibCompressorDecompressor.java
@Test
public void testZlibCompressorDecompressorWithConfiguration() {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
byte[] rawData;
int tryNumber = 5;
int BYTE_SIZE = 10 * 1024;
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
rawData = generate(BYTE_SIZE);
try {
for (int i = 0; i < tryNumber; i++)
compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor,
(ZlibDecompressor) zlibDecompressor);
zlibCompressor.reinit(conf);
} catch (Exception ex) {
fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
}
} else {
assertTrue("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
}
项目:hadoop
文件:DFSAdmin.java
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
throws IOException {
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration
Configuration conf = getConf();
// For datanode proxy the server principal should be DN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
// Create the client
ClientDatanodeProtocol dnProtocol =
DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
return dnProtocol;
}
项目:hadoop
文件:TestGroupFallback.java
@Test
public void testNetgroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
"test the normal path and 'mvn -DTestGroupFallback clear test' will" +
" test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
项目:hadoop
文件:TestHealthMonitor.java
@Before
public void setupHM() throws InterruptedException, IOException {
Configuration conf = new Configuration();
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
svc = createDummyHAService();
hm = new HealthMonitor(conf, svc) {
@Override
protected HAServiceProtocol createProxy() throws IOException {
createProxyCount.incrementAndGet();
if (throwOOMEOnCreate) {
throw new OutOfMemoryError("oome");
}
return super.createProxy();
}
};
LOG.info("Starting health monitor");
hm.start();
LOG.info("Waiting for HEALTHY signal");
waitForState(hm, HealthMonitor.State.SERVICE_HEALTHY);
}
项目:hadoop-oss
文件:TestGroupFallback.java
@Test
public void testNetgroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
"test the normal path and 'mvn -DTestGroupFallback clear test' will" +
" test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
项目:hadoop-oss
文件:TestGroupsCaching.java
@Test
public void testCachePreventsImplRequest() throws Exception {
// Disable negative cache.
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
Groups groups = new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
assertEquals(0, FakeGroupMapping.getRequestCount());
// First call hits the wire
assertTrue(groups.getGroups("me").size() == 2);
assertEquals(1, FakeGroupMapping.getRequestCount());
// Second count hits cache
assertTrue(groups.getGroups("me").size() == 2);
assertEquals(1, FakeGroupMapping.getRequestCount());
}
项目:hadoop
文件:TestProtoBufRpc.java
@Before
public void setUp() throws IOException { // Setup server for both protocols
conf = new Configuration();
conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
// Create server side implementation
PBServerImpl serverImpl = new PBServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(serverImpl);
// Get RPC server for server side implementation
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
.setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build();
addr = NetUtils.getConnectAddress(server);
// now the second protocol
PBServer2Impl server2Impl = new PBServer2Impl();
BlockingService service2 = TestProtobufRpc2Proto
.newReflectiveBlockingService(server2Impl);
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class,
service2);
server.start();
}
项目:hadoop-oss
文件:TestServiceAuthorization.java
@Test
public void testDefaultAcl() {
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
// test without setting a default acl
conf.set(ACL_CONFIG, "user1 group1");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
AccessControlList acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
assertEquals("user1 group1", acl.getAclString());
acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
assertEquals(AccessControlList.WILDCARD_ACL_VALUE, acl.getAclString());
// test with a default acl
conf.set(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL,
"user2 group2");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
assertEquals("user1 group1", acl.getAclString());
acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
assertEquals("user2 group2", acl.getAclString());
}
项目:hadoop
文件:TestDatanodeProtocolRetryPolicy.java
/**
* Starts an instance of DataNode
* @throws IOException
*/
@Before
public void startUp() throws IOException, URISyntaxException {
tearDownDone = false;
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
FileSystem.setDefaultUri(conf,
"hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
File dataDir = new File(DATA_DIR);
FileUtil.fullyDelete(dataDir);
dataDir.mkdirs();
StorageLocation location = StorageLocation.parse(dataDir.getPath());
locations.add(location);
}
项目:hadoop
文件:TestGroupsCaching.java
@Test
public void testCacheEntriesExpire() throws Exception {
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
FakeTimer timer = new FakeTimer();
final Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
// We make an entry
groups.getGroups("me");
int startingRequestCount = FakeGroupMapping.getRequestCount();
timer.advance(20 * 1000);
// Cache entry has expired so it results in a new fetch
groups.getGroups("me");
assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
项目:hadoop-oss
文件:MiniZKFCCluster.java
public MiniZKFCCluster(Configuration conf, ZooKeeperServer zks) {
this.conf = conf;
// Fast check interval so tests run faster
conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
svcs = new ArrayList<DummyHAService>(2);
// remove any existing instances we are keeping track of
DummyHAService.instances.clear();
for (int i = 0; i < 2; i++) {
addSvcs(svcs, i);
}
this.ctx = new TestContext();
this.zks = zks;
}
项目:hadoop
文件:TestWebHDFS.java
@Test
public void testDTInInsecureClusterWithFallback()
throws IOException, URISyntaxException {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
conf.setBoolean(CommonConfigurationKeys
.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, true);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
Assert.assertNull(webHdfs.getDelegationToken(null));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
项目:hadoop
文件:TestCodec.java
@Test
public void testLz4Codec() throws IOException {
if (NativeCodeLoader.isNativeCodeLoaded()) {
if (Lz4Codec.isNativeCodeLoaded()) {
conf.setBoolean(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
false);
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec");
conf.setBoolean(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
true);
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec");
} else {
Assert.fail("Native hadoop library available but lz4 not");
}
}
}
项目:hadoop
文件:TestCodec.java
@Test
public void testCodecPoolGzipReuse() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
return;
}
GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
Compressor c1 = CodecPool.getCompressor(gzc);
Compressor c2 = CodecPool.getCompressor(dfc);
CodecPool.returnCompressor(c1);
CodecPool.returnCompressor(c2);
assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
}
项目:hadoop
文件:Server.java
ConnectionManager() {
this.idleScanTimer = new Timer(
"IPC Server idle connection scanner for port " + getPort(), true);
this.idleScanThreshold = conf.getInt(
CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_DEFAULT);
this.idleScanInterval = conf.getInt(
CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,
CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT);
this.maxIdleTime = 2 * conf.getInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
this.maxIdleToClose = conf.getInt(
CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_DEFAULT);
this.maxConnections = conf.getInt(
CommonConfigurationKeysPublic.IPC_SERVER_MAX_CONNECTIONS_KEY,
CommonConfigurationKeysPublic.IPC_SERVER_MAX_CONNECTIONS_DEFAULT);
// create a set with concurrency -and- a thread-safe iterator, add 2
// for listener and idle closer threads
this.connections = Collections.newSetFromMap(
new ConcurrentHashMap<Connection,Boolean>(
maxQueueSize, 0.75f, readThreads+2));
}
项目:hadoop-oss
文件:TestHttpServer.java
@Test
public void testRequiresAuthorizationAccess() throws Exception {
Configuration conf = new Configuration();
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
//requires admin access to instrumentation, FALSE by default
Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
//requires admin access to instrumentation, TRUE
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
}
项目:hadoop-oss
文件:TestIdentityProviders.java
@Test
public void testPluggableIdentityProvider() {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
"org.apache.hadoop.ipc.UserIdentityProvider");
List<IdentityProvider> providers = conf.getInstances(
CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
IdentityProvider.class);
assertTrue(providers.size() == 1);
IdentityProvider ip = providers.get(0);
assertNotNull(ip);
assertEquals(ip.getClass(), UserIdentityProvider.class);
}
项目:hadoop
文件:TestDFSPermission.java
private void create(OpType op, Path name, short umask,
FsPermission permission) throws IOException {
// set umask in configuration, converting to padded octal
conf.set(FsPermission.UMASK_LABEL, String.format("%1$03o", umask));
// create the file/directory
switch (op) {
case CREATE:
FSDataOutputStream out = fs.create(name, permission, true,
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
fs.getDefaultReplication(name), fs.getDefaultBlockSize(name), null);
out.close();
break;
case MKDIRS:
fs.mkdirs(name, permission);
break;
default:
throw new IOException("Unsupported operation: " + op);
}
}
项目:hadoop-oss
文件:TestSaslRPC.java
@Before
public void setup() {
LOG.info("---------------------------------");
LOG.info("Testing QOP:"+ getQOPNames(qop));
LOG.info("---------------------------------");
conf = new Configuration();
// the specific tests for kerberos will enable kerberos. forcing it
// for all tests will cause tests to fail if the user has a TGT
conf.set(HADOOP_SECURITY_AUTHENTICATION, SIMPLE.toString());
conf.set(HADOOP_RPC_PROTECTION, getQOPNames(qop));
if (saslPropertiesResolver != null){
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
saslPropertiesResolver);
}
UserGroupInformation.setConfiguration(conf);
enableSecretManager = null;
forceSecretManager = null;
clientFallBackToSimpleAllowed = true;
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
}
项目:hadoop-oss
文件:TestRPC.java
@Test
public void testAuthorization() throws Exception {
Configuration myConf = new Configuration();
myConf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true);
// Expect to succeed
myConf.set(ACL_CONFIG, "*");
doRPCs(myConf, false);
// Reset authorization to expect failure
myConf.set(ACL_CONFIG, "invalid invalid");
doRPCs(myConf, true);
myConf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
// Expect to succeed
myConf.set(ACL_CONFIG, "*");
doRPCs(myConf, false);
// Reset authorization to expect failure
myConf.set(ACL_CONFIG, "invalid invalid");
doRPCs(myConf, true);
}
项目:hadoop
文件:TestGroupFallback.java
@Test
public void testGroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
"test the normal path and 'mvn -DTestGroupFallback clear test' will" +
" test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
项目:hadoop
文件:NfsExports.java
/**
* Constructor.
* @param cacheSize The size of the access privilege cache.
* @param expirationPeriodNano The period
* @param matchingHosts A string specifying one or multiple matchers.
*/
NfsExports(int cacheSize, long expirationPeriodNano, String matchHosts) {
this.cacheExpirationPeriod = expirationPeriodNano;
accessCache = new LightWeightCache<AccessCacheEntry, AccessCacheEntry>(
cacheSize, cacheSize, expirationPeriodNano, 0);
String[] matchStrings = matchHosts.split(
CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR);
mMatches = new ArrayList<Match>(matchStrings.length);
for(String mStr : matchStrings) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing match string '" + mStr + "'");
}
mStr = mStr.trim();
if(!mStr.isEmpty()) {
mMatches.add(getMatch(mStr));
}
}
}
项目:hadoop-oss
文件:Groups.java
public Groups(Configuration conf, final Timer timer) {
impl =
ReflectionUtils.newInstance(
conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
ShellBasedUnixGroupsMapping.class,
GroupMappingServiceProvider.class),
conf);
cacheTimeout =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
negativeCacheTimeout =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000;
warningDeltaMs =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT);
parseStaticMapping(conf);
this.timer = timer;
this.cache = CacheBuilder.newBuilder()
.refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
.ticker(new TimerToTickerAdapter(timer))
.expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
.build(new GroupCacheLoader());
if(negativeCacheTimeout > 0) {
Cache<String, Boolean> tempMap = CacheBuilder.newBuilder()
.expireAfterWrite(negativeCacheTimeout, TimeUnit.MILLISECONDS)
.ticker(new TimerToTickerAdapter(timer))
.build();
negativeCache = Collections.newSetFromMap(tempMap.asMap());
}
if(LOG.isDebugEnabled())
LOG.debug("Group mapping impl=" + impl.getClass().getName() +
"; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" +
warningDeltaMs);
}
项目:hadoop
文件:TestFileAppend3.java
@BeforeClass
public static void setUp() throws java.lang.Exception {
AppendTestUtil.LOG.info("setUp()");
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
fs = cluster.getFileSystem();
}
项目:hadoop-oss
文件:ZKFailoverController.java
private void initZK() throws HadoopIllegalArgumentException, IOException,
KeeperException {
zkQuorum = conf.get(ZK_QUORUM_KEY);
int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
ZK_SESSION_TIMEOUT_DEFAULT);
// Parse ACLs from configuration.
String zkAclConf = conf.get(ZK_ACL_KEY, ZK_ACL_DEFAULT);
zkAclConf = ZKUtil.resolveConfIndirection(zkAclConf);
List<ACL> zkAcls = ZKUtil.parseACLs(zkAclConf);
if (zkAcls.isEmpty()) {
zkAcls = Ids.CREATOR_ALL_ACL;
}
// Parse authentication from configuration.
String zkAuthConf = conf.get(ZK_AUTH_KEY);
zkAuthConf = ZKUtil.resolveConfIndirection(zkAuthConf);
List<ZKAuthInfo> zkAuths;
if (zkAuthConf != null) {
zkAuths = ZKUtil.parseAuth(zkAuthConf);
} else {
zkAuths = Collections.emptyList();
}
// Sanity check configuration.
Preconditions.checkArgument(zkQuorum != null,
"Missing required configuration '%s' for ZooKeeper quorum",
ZK_QUORUM_KEY);
Preconditions.checkArgument(zkTimeout > 0,
"Invalid ZK session timeout %s", zkTimeout);
int maxRetryNum = conf.getInt(
CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_KEY,
CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
elector = new ActiveStandbyElector(zkQuorum,
zkTimeout, getParentZnode(), zkAcls, zkAuths,
new ElectorCallbacks(), maxRetryNum);
}
项目:hadoop-oss
文件:HAAdmin.java
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
rpcTimeoutForChecks = conf.getInt(
CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
}
}
项目:hadoop
文件:TestHttpServer.java
@Test
public void testHasAdministratorAccess() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(null);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRemoteUser()).thenReturn(null);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
//authorization OFF
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
//authorization ON & user NULL
response = Mockito.mock(HttpServletResponse.class);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
Assert.assertFalse(HttpServer2.hasAdministratorAccess(context, request, response));
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), Mockito.anyString());
//authorization ON & user NOT NULL & ACLs NULL
response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getRemoteUser()).thenReturn("foo");
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
//authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs
response = Mockito.mock(HttpServletResponse.class);
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer2.hasAdministratorAccess(context, request, response));
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), Mockito.anyString());
//authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs
response = Mockito.mock(HttpServletResponse.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
}
项目:hadoop
文件:TestCodec.java
@Test
public void testGzipCodecRead() throws IOException {
// Create a gzipped file and try to read it back, using a decompressor
// from the CodecPool.
// Don't use native libs for this test.
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
assertFalse("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
// Ensure that the CodecPool has a BuiltInZlibInflater in it.
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!", zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",
zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
// Now create a GZip text file.
String tmpDir = System.getProperty("test.build.data", "/tmp/");
Path f = new Path(new Path(tmpDir), "testGzipCodecRead.txt.gz");
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
new GZIPOutputStream(new FileOutputStream(f.toString()))));
final String msg = "This is the message in the file!";
bw.write(msg);
bw.close();
// Now read it back, using the CodecPool to establish the
// decompressor to use.
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(f);
Decompressor decompressor = CodecPool.getDecompressor(codec);
FileSystem fs = FileSystem.getLocal(conf);
InputStream is = fs.open(f);
is = codec.createInputStream(is, decompressor);
BufferedReader br = new BufferedReader(new InputStreamReader(is));
String line = br.readLine();
assertEquals("Didn't get the same message back!", msg, line);
br.close();
}
项目:ditb
文件:HBaseKerberosUtils.java
public static void setSecuredConfiguration(Configuration conf) {
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos");
conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true);
conf.set(KRB_KEYTAB_FILE, System.getProperty(KRB_KEYTAB_FILE));
conf.set(KRB_PRINCIPAL, System.getProperty(KRB_PRINCIPAL));
conf.set(MASTER_KRB_PRINCIPAL, System.getProperty(KRB_PRINCIPAL));
}
项目:hadoop
文件:TestDecommissioningStatus.java
private void writeFile(FileSystem fileSys, Path name, short repl)
throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
项目:hadoop
文件:TestCheckpoint.java
static void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
byte[] buffer = new byte[TestCheckpoint.fileSize];
Random rand = new Random(TestCheckpoint.seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
项目:hadoop
文件:RPCCallBenchmark.java
private Server startServer(MyOptions opts) throws IOException {
if (opts.serverThreads <= 0) {
return null;
}
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,
opts.serverReaderThreads);
RPC.Server server;
// Get RPC server for server side implementation
if (opts.rpcEngine == ProtobufRpcEngine.class) {
// Create server side implementation
PBServerImpl serverImpl = new PBServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(serverImpl);
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
.setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
.setNumHandlers(opts.serverThreads).setVerbose(false).build();
} else if (opts.rpcEngine == WritableRpcEngine.class) {
server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
.setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
.setVerbose(false).build();
} else {
throw new RuntimeException("Bad engine: " + opts.rpcEngine);
}
server.start();
return server;
}
项目:hadoop
文件:TestHttpServer.java
/**
* Verify the access for /logs, /stacks, /conf, /logLevel and /metrics
* servlets, when authentication filters are set, but authorization is not
* enabled.
* @throws Exception
*/
@Test
public void testDisabledAuthorizationOfDefaultServlets() throws Exception {
Configuration conf = new Configuration();
// Authorization is disabled by default
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
DummyFilterInitializer.class.getName());
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
MyGroupsProvider.class.getName());
Groups.getUserToGroupsMappingService(conf);
MyGroupsProvider.clearMapping();
MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA"));
MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
HttpServer2 myServer = new HttpServer2.Builder().setName("test")
.addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
myServer.start();
String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
for (String servlet : new String[] { "conf", "logs", "stacks",
"logLevel", "metrics" }) {
for (String user : new String[] { "userA", "userB" }) {
assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL
+ servlet, user));
}
}
myServer.stop();
}
项目:hadoop-oss
文件:Client.java
/** Construct an IPC client whose values are of the given {@link Writable}
* class. */
public Client(Class<? extends Writable> valueClass, Configuration conf,
SocketFactory factory) {
this.valueClass = valueClass;
this.conf = conf;
this.socketFactory = factory;
this.connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY,
CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
this.fallbackAllowed = conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
this.clientId = ClientId.getClientId();
this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();
}
项目:hadoop
文件:TestJMXGet.java
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
项目:hadoop
文件:TestGroupFallback.java
@Test
public void testNetgroupShell() throws Exception {
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
项目:hadoop-oss
文件:DecayRpcScheduler.java
private IdentityProvider parseIdentityProvider(String ns, Configuration conf) {
List<IdentityProvider> providers = conf.getInstances(
ns + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
IdentityProvider.class);
if (providers.size() < 1) {
LOG.info("IdentityProvider not specified, " +
"defaulting to UserIdentityProvider");
return new UserIdentityProvider();
}
return providers.get(0); // use the first
}
项目:hadoop-oss
文件:Lz4Codec.java
/**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
if (!isNativeCodeLoaded()) {
throw new RuntimeException("native lz4 library not available");
}
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
boolean useLz4HC = conf.getBoolean(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT);
return new Lz4Compressor(bufferSize, useLz4HC);
}