/** * All initialization needed before we go register with Master. * * @throws IOException * @throws InterruptedException */ private void preRegistrationInitialization() { try { setupClusterConnection(); // Health checker thread. if (isHealthCheckerConfigured()) { int sleepTime = this.conf .getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration()); } this.pauseMonitor = new JvmPauseMonitor(conf); pauseMonitor.start(); initializeZooKeeper(); if (!isStopped() && !isAborted()) { initializeThreads(); } } catch (Throwable t) { // Call stop if error or process will stick around for ever since server // puts up non-daemon threads. this.rpcServices.stop(); abort("Initialization of RS failed. Hence aborting RS.", t); } }
/** * All initialization needed before we go register with Master. * * @throws IOException * @throws InterruptedException */ private void preRegistrationInitialization() { try { setupClusterConnection();//1.初始化HConnection 2.初始化MetaTableLocator // Health checker thread. if (isHealthCheckerConfigured()) {//健康检测线程 int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration()); } this.pauseMonitor = new JvmPauseMonitor(conf);//JVM 暂停检测 pauseMonitor.start(); initializeZooKeeper();//zookeeper相关的初始化 if (!isStopped() && !isAborted()) { initializeThreads(); } } catch (Throwable t) { // Call stop if error or process will stick around for ever since server // puts up non-daemon threads. this.rpcServices.stop(); abort("Initialization of RS failed. Hence aborting RS.", t); } }
@Test public void testPauseMonitor() { Configuration conf = new Configuration(); conf.setLong(JvmPauseMonitor.INFO_THRESHOLD_KEY, 1000L); conf.setLong(JvmPauseMonitor.WARN_THRESHOLD_KEY, 10000L); JvmPauseMonitor monitor = new JvmPauseMonitor(conf, serverSource); monitor.updateMetrics(1500, false); HELPER.assertCounter("pauseInfoThresholdExceeded", 1, serverSource); HELPER.assertCounter("pauseWarnThresholdExceeded", 0, serverSource); HELPER.assertCounter("pauseTimeWithoutGc_num_ops", 1, serverSource); HELPER.assertCounter("pauseTimeWithGc_num_ops", 0, serverSource); monitor.updateMetrics(15000, true); HELPER.assertCounter("pauseInfoThresholdExceeded", 1, serverSource); HELPER.assertCounter("pauseWarnThresholdExceeded", 1, serverSource); HELPER.assertCounter("pauseTimeWithoutGc_num_ops", 1, serverSource); HELPER.assertCounter("pauseTimeWithGc_num_ops", 1, serverSource); }
/** * Constructor with existing configuration * @param conf existing configuration * @param userProvider the login user provider * @throws IOException */ RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException { this.realUser = userProvider.getCurrent().getUGI(); this.conf = conf; registerCustomFilter(conf); int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000); int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000); connectionCache = new ConnectionCache( conf, userProvider, cleanInterval, maxIdleTime); if (supportsProxyuser()) { ProxyUsers.refreshSuperUserGroupsConfiguration(conf); } metrics = new MetricsREST(); pauseMonitor = new JvmPauseMonitor(conf, metrics.getSource()); pauseMonitor.start(); }
private void initializeThreads() throws IOException { // Cache flushing thread. this.cacheFlusher = new MemStoreFlusher(conf, this); // Compaction thread this.compactSplitThread = new CompactSplitThread(this); // Background thread to check for compactions; needed if region has not gotten updates // in a while. It will take care of not checking too frequently on store-by-store basis. this.compactionChecker = new CompactionChecker(this, this.threadWakeFrequency, this); this.periodicFlusher = new PeriodicMemstoreFlusher(this.threadWakeFrequency, this); // Health checker thread. int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); if (isHealthCheckerConfigured()) { healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration()); } this.leases = new Leases(this.threadWakeFrequency); // Create the thread to clean the moved regions list movedRegionsCleaner = MovedRegionsCleaner.createAndStart(this); if (this.nonceManager != null) { // Create the chore that cleans up nonces. nonceManagerChore = this.nonceManager.createCleanupChore(this); } // Setup RPC client for master communication rpcClient = new RpcClient(conf, clusterId, new InetSocketAddress( this.isa.getAddress(), 0)); this.pauseMonitor = new JvmPauseMonitor(conf); pauseMonitor.start(); }
private void initializeThreads() throws IOException { // Cache flushing thread. this.cacheFlusher = new MemStoreFlusher(conf, this); // Compaction thread this.compactSplitThread = new CompactSplitThread(this); // Background thread to check for compactions; needed if region has not gotten updates // in a while. It will take care of not checking too frequently on store-by-store basis. this.compactionChecker = new CompactionChecker(this, this.threadWakeFrequency, this); this.periodicFlusher = new PeriodicMemstoreFlusher(this.threadWakeFrequency, this); // Health checker thread. int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); if (isHealthCheckerConfigured()) { healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration()); } this.leases = new Leases(this.threadWakeFrequency); // Create the thread to clean the moved regions list movedRegionsCleaner = MovedRegionsCleaner.createAndStart(this); if (this.nonceManager != null) { // Create the chore that cleans up nonces. nonceManagerChore = this.nonceManager.createCleanupChore(this); } // Setup RPC client for master communication rpcClient = new RpcClient(conf, clusterId, new InetSocketAddress( rpcServices.isa.getAddress(), 0)); this.pauseMonitor = new JvmPauseMonitor(conf); pauseMonitor.start(); }
private void initializeThreads() throws IOException { // Cache flushing thread. this.cacheFlusher = new MemStoreFlusher(conf, this); // Compaction thread this.compactSplitThread = new CompactSplitThread(this); // Background thread to check for compactions; needed if region has not gotten updates // in a while. It will take care of not checking too frequently on store-by-store basis. this.compactionChecker = new CompactionChecker(this, this.threadWakeFrequency, this); this.periodicFlusher = new PeriodicMemstoreFlusher(this.threadWakeFrequency, this); // Health checker thread. int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); if (isHealthCheckerConfigured()) { healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration()); } this.leases = new Leases(this.threadWakeFrequency); // Create the thread to clean the moved regions list movedRegionsCleaner = MovedRegionsCleaner.createAndStart(this); // Setup RPC client for master communication rpcClient = new RpcClient(conf, clusterId, new InetSocketAddress( this.isa.getAddress(), 0)); this.pauseMonitor = new JvmPauseMonitor(conf); pauseMonitor.start(); }
public ThriftServerRunner(Configuration conf) throws IOException { UserProvider userProvider = UserProvider.instantiate(conf); // login the server principal (if using secure Hadoop) securityEnabled = userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled(); if (securityEnabled) { host = Strings.domainNamePointerToHostName(DNS.getDefaultHost( conf.get("hbase.thrift.dns.interface", "default"), conf.get("hbase.thrift.dns.nameserver", "default"))); userProvider.login("hbase.thrift.keytab.file", "hbase.thrift.kerberos.principal", host); } this.conf = HBaseConfiguration.create(conf); this.listenPort = conf.getInt(PORT_CONF_KEY, DEFAULT_LISTEN_PORT); this.metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE); this.pauseMonitor = new JvmPauseMonitor(conf, this.metrics.getSource()); this.hbaseHandler = new HBaseHandler(conf, userProvider); this.hbaseHandler.initMetrics(metrics); this.handler = HbaseHandlerMetricsProxy.newInstance( hbaseHandler, metrics, conf); this.realUser = userProvider.getCurrent().getUGI(); String strQop = conf.get(THRIFT_QOP_KEY); if (strQop != null) { this.qop = SaslUtil.getQop(strQop); } doAsEnabled = conf.getBoolean(THRIFT_SUPPORT_PROXYUSER, false); if (doAsEnabled) { if (!conf.getBoolean(USE_HTTP_CONF_KEY, false)) { LOG.warn("Fail to enable the doAs feature. hbase.regionserver.thrift.http is not " + "configured "); } } if (qop != null) { if (qop != QualityOfProtection.AUTHENTICATION && qop != QualityOfProtection.INTEGRITY && qop != QualityOfProtection.PRIVACY) { throw new IOException(String.format("Invalide %s: It must be one of %s, %s, or %s.", THRIFT_QOP_KEY, QualityOfProtection.AUTHENTICATION.name(), QualityOfProtection.INTEGRITY.name(), QualityOfProtection.PRIVACY.name())); } checkHttpSecurity(qop, conf); if (!securityEnabled) { throw new IOException("Thrift server must" + " run in secure mode to support authentication"); } } }