/** * This method is the base split method that splits HLog files matching a filter. * Callers should pass the appropriate filter for meta and non-meta HLogs. * @param serverNames * @param filter * @throws IOException */ public void splitLog(final List<ServerName> serverNames, PathFilter filter) throws IOException { long splitTime = 0, splitLogSize = 0; List<Path> logDirs = getLogDirs(serverNames); if (logDirs.isEmpty()) { LOG.info("No logs to split"); return; } boolean lockAcquired = false; if (distributedLogSplitting) { try { if (!this.services.isServerShutdownHandlerEnabled()) { // process one log splitting task at one time before SSH is enabled. // because ROOT SSH and HMaster#assignMeta could both log split a same server this.splitLogLock.lock(); lockAcquired = true; } splitLogManager.handleDeadWorkers(serverNames); splitTime = EnvironmentEdgeManager.currentTimeMillis(); splitLogSize = splitLogManager.splitLogDistributed(logDirs, filter); splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime; } finally { if (lockAcquired) { this.splitLogLock.unlock(); } } } else { for(Path logDir: logDirs){ // splitLogLock ensures that dead region servers' logs are processed // one at a time this.splitLogLock.lock(); try { HLogSplitter splitter = HLogSplitter.createLogSplitter( conf, rootdir, logDir, oldLogDir, this.fs); try { // If FS is in safe mode, just wait till out of it. FSUtils.waitOnSafeMode(conf, conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 1000)); splitter.splitLog(); } catch (OrphanHLogAfterSplitException e) { LOG.warn("Retrying splitting because of:", e); //An HLogSplitter instance can only be used once. Get new instance. splitter = HLogSplitter.createLogSplitter(conf, rootdir, logDir, oldLogDir, this.fs); splitter.splitLog(); } splitTime = splitter.getTime(); splitLogSize = splitter.getSize(); } finally { this.splitLogLock.unlock(); } } } if (this.metrics != null) { this.metrics.addSplit(splitTime, splitLogSize); } }
/** * This method is the base split method that splits HLog files matching a filter. * Callers should pass the appropriate filter for meta and non-meta HLogs. * @param serverNames * @param filter * @throws IOException */ public void splitLog(final List<ServerName> serverNames, PathFilter filter) throws IOException { long splitTime = 0, splitLogSize = 0; List<Path> logDirs = getLogDirs(serverNames); if (logDirs.isEmpty()) { LOG.info("No logs to split"); return; } if (distributedLogSplitting) { splitLogManager.handleDeadWorkers(serverNames); splitTime = EnvironmentEdgeManager.currentTimeMillis(); splitLogSize = splitLogManager.splitLogDistributed(logDirs,filter); splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime; } else { for(Path logDir: logDirs){ // splitLogLock ensures that dead region servers' logs are processed // one at a time this.splitLogLock.lock(); try { HLogSplitter splitter = HLogSplitter.createLogSplitter( conf, rootdir, logDir, oldLogDir, this.fs); try { // If FS is in safe mode, just wait till out of it. FSUtils.waitOnSafeMode(conf, conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 1000)); splitter.splitLog(); } catch (OrphanHLogAfterSplitException e) { LOG.warn("Retrying splitting because of:", e); //An HLogSplitter instance can only be used once. Get new instance. splitter = HLogSplitter.createLogSplitter(conf, rootdir, logDir, oldLogDir, this.fs); splitter.splitLog(); } splitTime = splitter.getTime(); splitLogSize = splitter.getSize(); } finally { this.splitLogLock.unlock(); } } } if (this.metricsMaster != null) { this.metricsMaster.addSplit(splitTime, splitLogSize); } }