/** * The caller will block until all the log files of the given region server * have been processed - successfully split or an error is encountered - by an * available worker region server. This method must only be called after the * region servers have been brought online. * * @param logDirs List of log dirs to split * @throws IOException If there was an error while splitting any log file * @return cumulative size of the logfiles split */ public long splitLogDistributed(final List<Path> logDirs) throws IOException { if (logDirs.isEmpty()) { return 0; } Set<ServerName> serverNames = new HashSet<ServerName>(); for (Path logDir : logDirs) { try { ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(logDir); if (serverName != null) { serverNames.add(serverName); } } catch (IllegalArgumentException e) { // ignore invalid format error. LOG.warn("Cannot parse server name from " + logDir); } } return splitLogDistributed(serverNames, logDirs, null); }
@Override public Void call() throws IOException { NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(this.fs, regiondir); if (files == null || files.size() == 0) return null; // copy over each file. // this is really inefficient (could be trivially parallelized), but is // really simple to reason about. for (Path source : files) { // check to see if the file is zero length, in which case we can skip it FileStatus stat = fs.getFileStatus(source); if (stat.getLen() <= 0) continue; // its not zero length, so copy over the file Path out = new Path(outputDir, source.getName()); LOG.debug("Copying " + source + " to " + out); FileUtil.copy(fs, source, fs, out, true, fs.getConf()); // check for errors to the running operation after each file this.rethrowException(); } return null; }
/** * Setup WAL log and replication if enabled. * Replication setup is done in here because it wants to be hooked up to WAL. * @return A WAL instance. * @throws IOException */ private HLog setupWALAndReplication() throws IOException { final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); final String logName = HLogUtil.getHLogDirectoryName(this.serverNameFromMasterPOV.toString()); Path logdir = new Path(rootDir, logName); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); if (this.fs.exists(logdir)) { throw new RegionServerRunningException("Region server has already " + "created directory at " + this.serverNameFromMasterPOV.toString()); } // Instantiate replication manager if replication enabled. Pass it the // log directories. createNewReplicationInstance(conf, this, this.fs, logdir, oldLogDir); return instantiateHLog(rootDir, logName); }
private void verifyRecoverEdits(final Path tableDir, final TableName tableName, final Map<byte[], byte[]> regionsMap) throws IOException { for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) { assertTrue(regionStatus.getPath().getName().startsWith(tableName.getNameAsString())); Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionStatus.getPath()); byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName()); assertFalse(regionsMap.containsKey(regionName)); for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) { HLog.Reader reader = HLogFactory.createReader(fs, logStatus.getPath(), conf); try { HLog.Entry entry; while ((entry = reader.next()) != null) { HLogKey key = entry.getKey(); assertEquals(tableName, key.getTablename()); assertArrayEquals(regionName, key.getEncodedRegionName()); } } finally { reader.close(); } } } }
/** * Check that we don't get an exception if there is no recovered edits directory to copy * @throws Exception on failure */ @Test public void testNoEditsDir() throws Exception { SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class); FileSystem fs = UTIL.getTestFileSystem(); Path root = UTIL.getDataTestDir(); String regionName = "regionA"; Path regionDir = new Path(root, regionName); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root); try { // doesn't really matter where the region's snapshot directory is, but this is pretty close Path snapshotRegionDir = new Path(workingDir, regionName); fs.mkdirs(snapshotRegionDir); Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionDir); assertFalse("Edits dir exists already - it shouldn't", fs.exists(regionEdits)); CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir); task.call(); } finally { // cleanup the working directory FSUtils.delete(fs, regionDir, true); FSUtils.delete(fs, workingDir, true); } }
/** * Setup WAL log and replication if enabled. * Replication setup is done in here because it wants to be hooked up to WAL. * @return A WAL instance. * @throws IOException */ private HLog setupWALAndReplication() throws IOException { final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); final String logName = HLogUtil.getHLogDirectoryName(this.serverName.toString()); Path logdir = new Path(rootDir, logName); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); if (this.fs.exists(logdir)) { throw new RegionServerRunningException("Region server has already " + "created directory at " + this.serverName.toString()); } // Instantiate replication manager if replication enabled. Pass it the // log directories. createNewReplicationInstance(conf, this, this.fs, logdir, oldLogDir); return instantiateHLog(rootDir, logName); }
private List<Path> getLogDirs(final List<ServerName> serverNames) throws IOException { List<Path> logDirs = new ArrayList<Path>(); for (ServerName serverName: serverNames) { Path logDir = new Path(this.rootdir, HLogUtil.getHLogDirectoryName(serverName.toString())); Path splitDir = logDir.suffix(HLog.SPLITTING_EXT); // Rename the directory so a rogue RS doesn't create more HLogs if (fs.exists(logDir)) { if (!this.fs.rename(logDir, splitDir)) { throw new IOException("Failed fs.rename for log split: " + logDir); } logDir = splitDir; LOG.debug("Renamed region directory: " + splitDir); } else if (!fs.exists(splitDir)) { LOG.info("Log dir for server " + serverName + " does not exist"); continue; } logDirs.add(splitDir); } return logDirs; }
@Override public void map(HLogKey key, WALEdit value, Context context) throws IOException { try { // skip all other tables if (Bytes.equals(table, key.getTablename())) { for (KeyValue kv : value.getKeyValues()) { if (HLogUtil.isMetaFamily(kv.getFamily())) continue; context.write(new ImmutableBytesWritable(kv.getRow()), kv); } } } catch (InterruptedException e) { e.printStackTrace(); } }
/** * Test the findMemstoresWithEditsEqualOrOlderThan method. * @throws IOException */ @Test public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException { Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>(); for (int i = 0; i < 10; i++) { Long l = Long.valueOf(i); regionsToSeqids.put(l.toString().getBytes(), l); } byte [][] regions = HLogUtil.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids); assertEquals(2, regions.length); assertTrue(Bytes.equals(regions[0], "0".getBytes()) || Bytes.equals(regions[0], "1".getBytes())); regions = HLogUtil.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids); int count = 4; assertEquals(count, regions.length); // Regions returned are not ordered. for (int i = 0; i < count; i++) { assertTrue(Bytes.equals(regions[i], "0".getBytes()) || Bytes.equals(regions[i], "1".getBytes()) || Bytes.equals(regions[i], "2".getBytes()) || Bytes.equals(regions[i], "3".getBytes())); } }
@Test public void testGetServerNameFromHLogDirectoryName() throws IOException { String hl = conf.get(HConstants.HBASE_DIR) + "/"+ HLogUtil.getHLogDirectoryName(new ServerName("hn", 450, 1398).toString()); // Must not throw exception Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, null)); Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, conf.get(HConstants.HBASE_DIR) + "/")); Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, "") ); Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, " ") ); Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, hl) ); Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, hl+"qdf") ); Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, "sfqf"+hl+"qdf") ); Assert.assertNotNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, conf.get( HConstants.HBASE_DIR) + "/.logs/localhost,32984,1343316388997/localhost%2C32984%2C1343316388997.1343316390417" )); Assert.assertNotNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, hl+"/qdf") ); }
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException { ServerName sn = HLogUtil.getServerNameFromHLogDirectoryName(conf, src); if (sn == null) { // It's not an HLOG return; } // Ok, so it's an HLog String hostName = sn.getHostname(); if (LOG.isTraceEnabled()) { LOG.trace(src + " is an HLog file, so reordering blocks, last hostname will be:" + hostName); } // Just check for all blocks for (LocatedBlock lb : lbs.getLocatedBlocks()) { DatanodeInfo[] dnis = lb.getLocations(); if (dnis != null && dnis.length > 1) { boolean found = false; for (int i = 0; i < dnis.length - 1 && !found; i++) { if (hostName.equals(dnis[i].getHostName())) { // advance the other locations by one and put this one at the last place. DatanodeInfo toLast = dnis[i]; System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1); dnis[dnis.length - 1] = toLast; found = true; } } } } }
private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException { List<Path> logDirs = new ArrayList<Path>(); boolean needReleaseLock = false; if (!this.services.isInitialized()) { // during master initialization, we could have multiple places splitting a same wal this.splitLogLock.lock(); needReleaseLock = true; } try { for (ServerName serverName : serverNames) { Path logDir = new Path(this.rootdir, HLogUtil.getHLogDirectoryName(serverName.toString())); Path splitDir = logDir.suffix(HLog.SPLITTING_EXT); // Rename the directory so a rogue RS doesn't create more HLogs if (fs.exists(logDir)) { if (!this.fs.rename(logDir, splitDir)) { throw new IOException("Failed fs.rename for log split: " + logDir); } logDir = splitDir; LOG.debug("Renamed region directory: " + splitDir); } else if (!fs.exists(splitDir)) { LOG.info("Log dir for server " + serverName + " does not exist"); continue; } logDirs.add(splitDir); } } finally { if (needReleaseLock) { this.splitLogLock.unlock(); } } return logDirs; }
/** * Verify one of a snapshot's region's recovered.edits, has been at the surface (file names, * length), match the original directory. * @param fs filesystem on which the snapshot had been taken * @param rootDir full path to the root hbase directory * @param regionInfo info for the region * @param snapshot description of the snapshot that was taken * @throws IOException if there is an unexpected error talking to the filesystem */ public static void verifyRecoveredEdits(FileSystem fs, Path rootDir, HRegionInfo regionInfo, SnapshotDescription snapshot) throws IOException { Path regionDir = HRegion.getRegionDir(rootDir, regionInfo); Path editsDir = HLogUtil.getRegionDirRecoveredEditsDir(regionDir); Path snapshotRegionDir = TakeSnapshotUtils.getRegionSnapshotDirectory(snapshot, rootDir, regionInfo.getEncodedName()); Path snapshotEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir); FileStatus[] edits = FSUtils.listStatus(fs, editsDir); FileStatus[] snapshotEdits = FSUtils.listStatus(fs, snapshotEditsDir); if (edits == null) { assertNull(snapshot, "Snapshot has edits but table doesn't", snapshotEdits); return; } assertNotNull(snapshot, "Table has edits, but snapshot doesn't", snapshotEdits); // check each of the files assertEquals(snapshot, "Not same number of edits in snapshot as table", edits.length, snapshotEdits.length); // make sure we have a file with the same name as the original // it would be really expensive to verify the content matches the original for (FileStatus edit : edits) { for (FileStatus sEdit : snapshotEdits) { if (sEdit.getPath().equals(edit.getPath())) { assertEquals(snapshot, "Snapshot file" + sEdit.getPath() + " length not equal to the original: " + edit.getPath(), edit.getLen(), sEdit.getLen()); break; } } assertTrue(snapshot, "No edit in snapshot with name:" + edit.getPath(), false); } }
/** * Create a LogWriter for specified region if not already created. */ private LogWriter getOrCreateWriter(final byte[] regionName, long seqId) throws IOException { LogWriter writer = regionLogWriters.get(regionName); if (writer == null) { Path regionDir = HRegion.getRegionDir(tableDir, Bytes.toString(regionName)); Path dir = HLogUtil.getRegionDirRecoveredEditsDir(regionDir); fs.mkdirs(dir); writer = new LogWriter(conf, fs, dir, seqId); regionLogWriters.put(regionName, writer); } return(writer); }
/** * @param snapshot Snapshot being taken * @param monitor error monitor for the snapshot * @param fs {@link FileSystem} where the snapshot is being taken * @param regionDir directory for the region to examine for edits * @param snapshotRegionDir directory for the region in the snapshot */ public CopyRecoveredEditsTask(SnapshotDescription snapshot, ForeignExceptionDispatcher monitor, FileSystem fs, Path regionDir, Path snapshotRegionDir) { super(snapshot, monitor); this.fs = fs; this.regiondir = regionDir; this.outputDir = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir); }
/** * Iterate over recovered.edits of the specified region * * @param fs {@link FileSystem} * @param regionDir {@link Path} to the Region directory * @param visitor callback object to get the recovered.edits files * @throws IOException if an error occurred while scanning the directory */ public static void visitRegionRecoveredEdits(final FileSystem fs, final Path regionDir, final FSVisitor.RecoveredEditsVisitor visitor) throws IOException { NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regionDir); if (files == null || files.size() == 0) return; for (Path source: files) { // check to see if the file is zero length, in which case we can skip it FileStatus stat = fs.getFileStatus(source); if (stat.getLen() <= 0) continue; visitor.recoveredEdits(regionDir.getName(), source.getName()); } }
private HLog getMetaWAL() throws IOException { if (this.hlogForMeta != null) return this.hlogForMeta; final String logName = HLogUtil.getHLogDirectoryName(this.serverNameFromMasterPOV.toString()); Path logdir = new Path(rootDir, logName); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); this.hlogForMeta = HLogFactory.createMetaHLog(this.fs.getBackingFs(), rootDir, logName, this.conf, getMetaWALActionListeners(), this.serverNameFromMasterPOV.toString()); return this.hlogForMeta; }
private void createRecoverEdits(final Path tableDir, final Set<String> tableRegions, final Set<String> recoverEdits) throws IOException { for (String region: tableRegions) { Path regionEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(new Path(tableDir, region)); long seqId = System.currentTimeMillis(); for (int i = 0; i < 3; ++i) { String editName = String.format("%019d", seqId + i); recoverEdits.add(editName); FSDataOutputStream stream = fs.create(new Path(regionEditsDir, editName)); stream.write(Bytes.toBytes("test")); stream.close(); } } }
private HLog getMetaWAL() throws IOException { if (this.hlogForMeta != null) return this.hlogForMeta; final String logName = HLogUtil.getHLogDirectoryName(this.serverName.toString()); Path logdir = new Path(rootDir, logName); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); this.hlogForMeta = HLogFactory.createMetaHLog(this.fs.getBackingFs(), rootDir, logName, this.conf, getMetaWALActionListeners(), this.serverName.toString()); return this.hlogForMeta; }
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException { ServerName sn = HLogUtil.getServerNameFromHLogDirectoryName(conf, src); if (sn == null) { // It's not an HLOG return; } // Ok, so it's an HLog String hostName = sn.getHostname(); LOG.debug(src + " is an HLog file, so reordering blocks, last hostname will be:" + hostName); // Just check for all blocks for (LocatedBlock lb : lbs.getLocatedBlocks()) { DatanodeInfo[] dnis = lb.getLocations(); if (dnis != null && dnis.length > 1) { boolean found = false; for (int i = 0; i < dnis.length - 1 && !found; i++) { if (hostName.equals(dnis[i].getHostName())) { // advance the other locations by one and put this one at the last place. DatanodeInfo toLast = dnis[i]; System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1); dnis[dnis.length - 1] = toLast; found = true; } } } } }
private HLog getMetaWAL() throws IOException { if (this.hlogForMeta == null) { final String logName = HLogUtil.getHLogDirectoryName(this.serverNameFromMasterPOV.toString()); Path logdir = new Path(rootDir, logName); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); this.hlogForMeta = HLogFactory.createMetaHLog(this.fs.getBackingFs(), rootDir, logName, this.conf, getMetaWALActionListeners(), this.serverNameFromMasterPOV.toString()); } return this.hlogForMeta; }