/** * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This * way ensures that the secondary will be able to continue reading the store files even if * they are moved to archive after compaction * @throws IOException */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, Path path) throws IOException { // if this is a primary region, just return the StoreFileInfo constructed from path if (regionInfo.equals(regionInfoForFs)) { return new StoreFileInfo(conf, fs, path); } // else create a store file link. The link file does not exists on filesystem though. HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, path.getName()); if (StoreFileInfo.isReference(path)) { Reference reference = Reference.read(fs, path); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference); } return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link); }
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd, final HTableDescriptor htdClone) throws IOException { List<String> files = SnapshotTestingUtils.listHFileNames(fs, FSUtils.getTableDir(rootDir, htdClone.getTableName())); assertEquals(12, files.size()); for (int i = 0; i < files.size(); i += 2) { String linkFile = files.get(i); String refFile = files.get(i+1); assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile)); assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile)); assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile)); Path refPath = getReferredToFile(refFile); LOG.debug("get reference name for file " + refFile + " = " + refPath); assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName())); assertEquals(linkFile, refPath.getName()); } }
/** * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This * way ensures that the secondary will be able to continue reading the store files even if * they are moved to archive after compaction * @throws IOException */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, FileStatus status) throws IOException { // if this is a primary region, just return the StoreFileInfo constructed from path if (regionInfo.equals(regionInfoForFs)) { return new StoreFileInfo(conf, fs, status); } // else create a store file link. The link file does not exists on filesystem though. HFileLink link = new HFileLink(conf, HFileLink.createPath(regionInfoForFs.getTable(), regionInfoForFs.getEncodedName() , familyName, status.getPath().getName())); return new StoreFileInfo(conf, fs, status, link); }
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd, final HTableDescriptor htdClone) throws IOException { String[] files = SnapshotTestingUtils.listHFileNames(fs, FSUtils.getTableDir(rootDir, htdClone.getTableName())); assertEquals(12, files.length); for (int i = 0; i < files.length; i += 2) { String linkFile = files[i]; String refFile = files[i+1]; assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile)); assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile)); assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile)); Path refPath = getReferredToFile(refFile); LOG.debug("get reference name for file " + refFile + " = " + refPath); assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName())); assertEquals(linkFile, refPath.getName()); } }
/** * Returns the location where the inputPath will be copied. * - hfiles are encoded as hfile links hfile-region-table * - logs are encoded as serverName/logName */ private Path getOutputPath(final Path inputPath) throws IOException { Path path; if (HFileLink.isHFileLink(inputPath) || StoreFileInfo.isReference(inputPath)) { String family = inputPath.getParent().getName(); TableName table = HFileLink.getReferencedTableName(inputPath.getName()); String region = HFileLink.getReferencedRegionName(inputPath.getName()); String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); path = new Path(FSUtils.getTableDir(new Path("./"), table), new Path(region, new Path(family, hfile))); } else if (isHLogLinkPath(inputPath)) { String logName = inputPath.getName(); path = new Path(new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME), logName); } else { path = inputPath; } return new Path(outputArchive, path); }
/** * Try to open the "source" file. * Throws an IOException if the communication with the inputFs fail or * if the file is not found. */ private FSDataInputStream openSourceFile(Context context, final Path path) throws IOException { try { if (HFileLink.isHFileLink(path) || StoreFileInfo.isReference(path)) { return new HFileLink(inputRoot, inputArchive, path).open(inputFs); } else if (isHLogLinkPath(path)) { String serverName = path.getParent().getName(); String logName = path.getName(); return new HLogLink(inputRoot, serverName, logName).open(inputFs); } return inputFs.open(path); } catch (IOException e) { context.getCounter(Counter.MISSING_FILES).increment(1); LOG.error("Unable to open source file=" + path, e); throw e; } }
/** * Create reference file(s) of merging regions under the merges directory * @param env MasterProcedureEnv * @param regionFs region file system * @param mergedDir the temp directory of merged region * @throws IOException */ private void mergeStoreFiles( final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Configuration conf = env.getMasterConfiguration(); final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); for (String family: regionFs.getFamilies()) { final ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(family)); final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null && storeFiles.size() > 0) { final CacheConfig cacheConf = new CacheConfig(conf, hcd); for (StoreFileInfo storeFileInfo: storeFiles) { // Create reference file(s) of the region in mergedDir regionFs.mergeStoreFile(mergedRegion, family, new HStoreFile(mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true), mergedDir); } } } }
private void addReferenceFiles(RegionVisitor visitor, Object regionData, Object familyData, Collection<StoreFileInfo> storeFiles, boolean isMob) throws IOException { final String fileType = isMob ? "mob file" : "hfile"; if (LOG.isDebugEnabled()) { LOG.debug(String.format("Adding snapshot references for %s %ss", storeFiles, fileType)); } int i = 0; int sz = storeFiles.size(); for (StoreFileInfo storeFile: storeFiles) { monitor.rethrowException(); LOG.debug(String.format("Adding reference for %s (%d/%d): %s", fileType, ++i, sz, storeFile.getPath())); // create "reference" to this store file. visitor.storeFile(regionData, familyData, storeFile); } }
/** * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This * way ensures that the secondary will be able to continue reading the store files even if * they are moved to archive after compaction * @throws IOException */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) throws IOException { // if this is a primary region, just return the StoreFileInfo constructed from path if (RegionInfo.COMPARATOR.compare(regionInfo, regionInfoForFs) == 0) { return new StoreFileInfo(conf, fs, path); } // else create a store file link. The link file does not exists on filesystem though. HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, path.getName()); if (StoreFileInfo.isReference(path)) { Reference reference = Reference.read(fs, path); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference); } return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link); }
/** * Gets the number of files in the mob path. * @param isMobFile gets number of the mob files or del files * @param familyName the family name * @return the number of the files */ private int countFiles(TableName tableName, boolean isMobFile, String familyName) throws IOException { Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName); int count = 0; if (fs.exists(mobDirPath)) { FileStatus[] files = fs.listStatus(mobDirPath); for (FileStatus file : files) { if (isMobFile == true) { if (!StoreFileInfo.isDelFile(file.getPath())) { count++; } } else { if (StoreFileInfo.isDelFile(file.getPath())) { count++; } } } } return count; }
private void verifyRestore(final Path rootDir, final TableDescriptor sourceHtd, final TableDescriptor htdClone) throws IOException { List<String> files = SnapshotTestingUtils.listHFileNames(fs, FSUtils.getTableDir(rootDir, htdClone.getTableName())); assertEquals(12, files.size()); for (int i = 0; i < files.size(); i += 2) { String linkFile = files.get(i); String refFile = files.get(i+1); assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile)); assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile)); assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile)); Path refPath = getReferredToFile(refFile); LOG.debug("get reference name for file " + refFile + " = " + refPath); assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName())); assertEquals(linkFile, refPath.getName()); } }
@Test public void testIfWeHaveNewReferenceFilesButOldStoreFiles() throws Exception { // this tests that reference files that are new, but have older timestamps for the files // they reference still will get compacted. TableName table = TableName.valueOf("TestMajorCompactor"); TableDescriptor htd = UTILITY.createTableDescriptor(table, Bytes.toBytes(FAMILY)); RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, UTILITY.getConfiguration(), htd); Configuration configuration = mock(Configuration.class); // the reference file timestamp is newer List<StoreFileInfo> storeFiles = mockStoreFiles(regionStoreDir, 4, 101); List<Path> paths = storeFiles.stream().map(StoreFileInfo::getPath).collect(Collectors.toList()); // the files that are referenced are older, thus we still compact. HRegionFileSystem fileSystem = mockFileSystem(region.getRegionInfo(), true, storeFiles, 50); MajorCompactionRequest majorCompactionRequest = spy(new MajorCompactionRequest(configuration, region.getRegionInfo(), Sets.newHashSet(FAMILY), 100)); doReturn(mock(Connection.class)).when(majorCompactionRequest).getConnection(eq(configuration)); doReturn(paths).when(majorCompactionRequest).getReferenceFilePaths(any(FileSystem.class), any(Path.class)); doReturn(fileSystem).when(majorCompactionRequest).getFileSystem(any(Connection.class)); Set<String> result = majorCompactionRequest.getStoresRequiringCompaction(Sets.newHashSet("a")); assertEquals(FAMILY, Iterables.getOnlyElement(result)); }
private HRegionFileSystem mockFileSystem(RegionInfo info, boolean hasReferenceFiles, List<StoreFileInfo> storeFiles, long referenceFileTimestamp) throws IOException { FileSystem fileSystem = mock(FileSystem.class); if (hasReferenceFiles) { FileStatus fileStatus = mock(FileStatus.class); doReturn(referenceFileTimestamp).when(fileStatus).getModificationTime(); doReturn(fileStatus).when(fileSystem).getFileLinkStatus(isA(Path.class)); } HRegionFileSystem mockSystem = mock(HRegionFileSystem.class); doReturn(info).when(mockSystem).getRegionInfo(); doReturn(regionStoreDir).when(mockSystem).getStoreDir(FAMILY); doReturn(hasReferenceFiles).when(mockSystem).hasReferences(anyString()); doReturn(storeFiles).when(mockSystem).getStoreFiles(anyString()); doReturn(fileSystem).when(mockSystem).getFileSystem(); return mockSystem; }
private FileStatus getFileStatus(final FileSystem fs, final Path path) { try { if (HFileLink.isHFileLink(path) || StoreFileInfo.isReference(path)) { HFileLink link = new HFileLink(inputRoot, inputArchive, path); return link.getFileStatus(fs); } else if (isHLogLinkPath(path)) { String serverName = path.getParent().getName(); String logName = path.getName(); return new HLogLink(inputRoot, serverName, logName).getFileStatus(fs); } return fs.getFileStatus(path); } catch (IOException e) { LOG.warn("Unable to get the status for file=" + path); return null; } }
@Override protected boolean validate(Path file) { if (HFileLink.isBackReferencesDir(file) || HFileLink.isBackReferencesDir(file.getParent())) { return true; } return StoreFileInfo.validateStoreFileName(file.getName()); }
@Override public boolean accept(Path rd) { try { // only files return !fs.getFileStatus(rd).isDirectory() && StoreFileInfo.isHFile(rd); } catch (IOException ioe) { // Maybe the file was moved or the fs was disconnected. LOG.warn("Skipping file " + rd +" due to IOException", ioe); return false; } }
@Override public boolean accept(Path rd) { try { // only files can be references. return !fs.getFileStatus(rd).isDirectory() && StoreFileInfo.isReference(rd); } catch (IOException ioe) { // Maybe the file was moved or the fs was disconnected. LOG.warn("Skipping file " + rd +" due to IOException", ioe); return false; } }
@Test public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception { setupCluster(); TableName tableName = TableName.valueOf("testRestoreSnapshotDoesNotCreateBackRefLinks"); String snapshotName = "foo"; try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Path tmpTableDir = UTIL.getRandomDir(); testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir); Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration()); for (Path regionDir : FSUtils.getRegionDirs(fs, FSUtils.getTableDir(rootDir, tableName))) { for (Path storeDir : FSUtils.getFamilyDirs(fs, regionDir)) { for (FileStatus status : fs.listStatus(storeDir)) { System.out.println(status.getPath()); if (StoreFileInfo.isValid(status)) { Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(UTIL.getConfiguration(), tableName, regionDir.getName(), storeDir.getName()); Path path = HFileLink.getBackReferencesDir(storeDir, status.getPath().getName()); // assert back references directory is empty assertFalse("There is a back reference in " + path, fs.exists(path)); path = HFileLink.getBackReferencesDir(archiveStoreDir, status.getPath().getName()); // assert back references directory is empty assertFalse("There is a back reference in " + path, fs.exists(path)); } } } } } finally { UTIL.getHBaseAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }
private void verifyStoreFile(final Path snapshotDir, final HRegionInfo regionInfo, final String family, final String fileName) throws IOException { Path refPath = null; if (StoreFileInfo.isReference(fileName)) { // If is a reference file check if the parent file is present in the snapshot Path snapshotHFilePath = new Path(new Path( new Path(snapshotDir, regionInfo.getEncodedName()), family), fileName); refPath = StoreFileInfo.getReferredToFile(snapshotHFilePath); if (!fs.exists(refPath)) { throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName, snapshot); } } Path linkPath; if (refPath != null && HFileLink.isHFileLink(refPath)) { linkPath = new Path(family, refPath.getName()); } else if (HFileLink.isHFileLink(fileName)) { linkPath = new Path(family, fileName); } else { linkPath = new Path(family, HFileLink.createHFileLinkName(tableName, regionInfo.getEncodedName(), fileName)); } // check if the linked file exists (in the archive, or in the table dir) HFileLink link = new HFileLink(services.getConfiguration(), linkPath); if (!link.exists(fs)) { throw new CorruptedSnapshotException("Can't find hfile: " + fileName + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath() + ") directory for the primary table.", snapshot); } }
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd, final HTableDescriptor htdClone) throws IOException { String[] files = getHFiles(FSUtils.getTableDir(rootDir, htdClone.getTableName())); assertEquals(2, files.length); assertTrue(files[0] + " should be a HFileLink", HFileLink.isHFileLink(files[0])); assertTrue(files[1] + " should be a Referene", StoreFileInfo.isReference(files[1])); assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(files[0])); assertEquals(TEST_HFILE, HFileLink.getReferencedHFileName(files[0])); Path refPath = getReferredToFile(files[1]); assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName())); assertEquals(files[0], refPath.getName()); }
@VisibleForTesting protected void addMobRegion(RegionInfo regionInfo, RegionVisitor visitor) throws IOException { // 1. dump region meta info into the snapshot directory LOG.debug("Storing mob region '" + regionInfo + "' region-info for snapshot."); Object regionData = visitor.regionOpen(regionInfo); monitor.rethrowException(); // 2. iterate through all the stores in the region LOG.debug("Creating references for mob files"); Path mobRegionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable()); for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { // 2.1. build the snapshot reference for the store if it's a mob store if (!hcd.isMobEnabled()) { continue; } Object familyData = visitor.familyOpen(regionData, hcd.getName()); monitor.rethrowException(); Path storePath = MobUtils.getMobFamilyPath(mobRegionPath, hcd.getNameAsString()); List<StoreFileInfo> storeFiles = getStoreFiles(storePath); if (storeFiles == null) { if (LOG.isDebugEnabled()) { LOG.debug("No mob files under family: " + hcd.getNameAsString()); } continue; } addReferenceFiles(visitor, regionData, familyData, storeFiles, true); visitor.familyClose(regionData, familyData); } visitor.regionClose(regionData); }
private List<StoreFileInfo> getStoreFiles(Path storeDir) throws IOException { FileStatus[] stats = FSUtils.listStatus(fs, storeDir); if (stats == null) return null; ArrayList<StoreFileInfo> storeFiles = new ArrayList<>(stats.length); for (int i = 0; i < stats.length; ++i) { storeFiles.add(new StoreFileInfo(conf, fs, stats[i])); } return storeFiles; }
Set<String> getStoresRequiringCompaction(Set<String> requestedStores) throws IOException { try(Connection connection = getConnection(configuration)) { HRegionFileSystem fileSystem = getFileSystem(connection); Set<String> familiesToCompact = Sets.newHashSet(); for (String family : requestedStores) { // do we have any store files? Collection<StoreFileInfo> storeFiles = fileSystem.getStoreFiles(family); if (storeFiles == null) { LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem .getRegionInfo().getEncodedName(), " has no store files"); continue; } // check for reference files if (fileSystem.hasReferences(family) && familyHasReferenceFile(fileSystem, family)) { familiesToCompact.add(family); LOG.info("Including store: " + family + " with: " + storeFiles.size() + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); continue; } // check store file timestamps boolean includeStore = false; for (StoreFileInfo storeFile : storeFiles) { if (storeFile.getModificationTime() < timestamp) { LOG.info("Including store: " + family + " with: " + storeFiles.size() + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); familiesToCompact.add(family); includeStore = true; break; } } if (!includeStore) { LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem .getRegionInfo().getEncodedName(), " already compacted"); } } return familiesToCompact; } }
@Override protected boolean accept(Path p, @CheckForNull Boolean isDir) { if (!StoreFileInfo.isHFile(p)) { return false; } try { return isFile(fs, isDir, p); } catch (IOException ioe) { // Maybe the file was moved or the fs was disconnected. LOG.warn("Skipping file " + p +" due to IOException", ioe); return false; } }
@Override protected boolean accept(Path p, @CheckForNull Boolean isDir) { if (!StoreFileInfo.isReference(p)) { return false; } try { // only files can be references. return isFile(fs, isDir, p); } catch (IOException ioe) { // Maybe the file was moved or the fs was disconnected. LOG.warn("Skipping file " + p +" due to IOException", ioe); return false; } }
/** * Gets the number of files. * @param size the size of the file * @param tableName the current table name * @param familyName the family name * @return the number of files large than the size */ private int countLargeFiles(int size, TableName tableName, String familyName) throws IOException { Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName); int count = 0; if (fs.exists(mobDirPath)) { FileStatus[] files = fs.listStatus(mobDirPath); for (FileStatus file : files) { // ignore the del files in the mob path if ((!StoreFileInfo.isDelFile(file.getPath())) && (file.getLen() > size)) { count++; } } } return count; }
private void assertRefFileNameEqual(String familyName) throws IOException { Scan scan = new Scan(); scan.addFamily(Bytes.toBytes(familyName)); // Do not retrieve the mob data when scanning scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE)); ResultScanner results = table.getScanner(scan); Path mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, familyName); List<Path> actualFilePaths = new ArrayList<>(); List<Path> expectFilePaths = new ArrayList<>(); for (Result res : results) { for (Cell cell : res.listCells()) { byte[] referenceValue = CellUtil.cloneValue(cell); String fileName = Bytes.toString(referenceValue, Bytes.SIZEOF_INT, referenceValue.length - Bytes.SIZEOF_INT); Path targetPath = new Path(mobFamilyPath, fileName); if(!actualFilePaths.contains(targetPath)) { actualFilePaths.add(targetPath); } } } results.close(); if (fs.exists(mobFamilyPath)) { FileStatus[] files = fs.listStatus(mobFamilyPath); for (FileStatus file : files) { if (!StoreFileInfo.isDelFile(file.getPath())) { expectFilePaths.add(file.getPath()); } } } Collections.sort(actualFilePaths); Collections.sort(expectFilePaths); assertEquals(expectFilePaths, actualFilePaths); }
@Test public void testStoresNeedingCompaction() throws Exception { // store files older than timestamp List<StoreFileInfo> storeFiles = mockStoreFiles(regionStoreDir, 5, 10); MajorCompactionRequest request = makeMockRequest(100, storeFiles, false); Optional<MajorCompactionRequest> result = request.createRequest(mock(Configuration.class), Sets.newHashSet(FAMILY)); assertTrue(result.isPresent()); // store files newer than timestamp storeFiles = mockStoreFiles(regionStoreDir, 5, 101); request = makeMockRequest(100, storeFiles, false); result = request.createRequest(mock(Configuration.class), Sets.newHashSet(FAMILY)); assertFalse(result.isPresent()); }
private List<StoreFileInfo> mockStoreFiles(Path regionStoreDir, int howMany, long timestamp) throws IOException { List<StoreFileInfo> infos = Lists.newArrayList(); int i = 0; while (i < howMany) { StoreFileInfo storeFileInfo = mock(StoreFileInfo.class); doReturn(timestamp).doReturn(timestamp).when(storeFileInfo).getModificationTime(); doReturn(new Path(regionStoreDir, RandomStringUtils.randomAlphabetic(10))).when(storeFileInfo) .getPath(); infos.add(storeFileInfo); i++; } return infos; }
private MajorCompactionRequest makeMockRequest(long timestamp, List<StoreFileInfo> storeFiles, boolean references) throws IOException { Configuration configuration = mock(Configuration.class); RegionInfo regionInfo = mock(RegionInfo.class); when(regionInfo.getEncodedName()).thenReturn("HBase"); when(regionInfo.getTable()).thenReturn(TableName.valueOf("foo")); MajorCompactionRequest request = new MajorCompactionRequest(configuration, regionInfo, Sets.newHashSet("a"), timestamp); MajorCompactionRequest spy = spy(request); HRegionFileSystem fileSystem = mockFileSystem(regionInfo, references, storeFiles); doReturn(fileSystem).when(spy).getFileSystem(isA(Connection.class)); doReturn(mock(Connection.class)).when(spy).getConnection(eq(configuration)); return spy; }
@Test public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception { setupCluster(); TableName tableName = TableName.valueOf("testRestoreSnapshotDoesNotCreateBackRefLinks"); String snapshotName = "foo"; try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir); Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration()); for (Path regionDir : FSUtils.getRegionDirs(fs, FSUtils.getTableDir(rootDir, tableName))) { for (Path storeDir : FSUtils.getFamilyDirs(fs, regionDir)) { for (FileStatus status : fs.listStatus(storeDir)) { System.out.println(status.getPath()); if (StoreFileInfo.isValid(status)) { Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(UTIL.getConfiguration(), tableName, regionDir.getName(), storeDir.getName()); Path path = HFileLink.getBackReferencesDir(storeDir, status.getPath().getName()); // assert back references directory is empty assertFalse("There is a back reference in " + path, fs.exists(path)); path = HFileLink.getBackReferencesDir(archiveStoreDir, status.getPath().getName()); // assert back references directory is empty assertFalse("There is a back reference in " + path, fs.exists(path)); } } } } } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }