private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } if (!tInfo.checkRegionChain(handler)) { // should dump info as well. errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
private SortedMap<String, TableInfo> checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } if (!tInfo.checkRegionChain(handler)) { // should dump info as well. errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } if (!tInfo.checkRegionChain(handler)) { // should dump info as well. errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
/** * Suggest fixes for each table */ private void suggestFixes( SortedMap<TableName, TableInfo> tablesInfo) throws IOException { logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); tInfo.checkRegionChain(handler); } }
/** * Suggest fixes for each table */ private void suggestFixes(SortedMap<String, TableInfo> tablesInfo) throws IOException { for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); tInfo.checkRegionChain(handler); } }
/** * Suggest fixes for each table */ private void suggestFixes( SortedMap<TableName, TableInfo> tablesInfo) throws IOException { for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); tInfo.checkRegionChain(handler); } }
/** * Checks tables integrity. Goes over all regions and scans the tables. * Collects all the pieces for each table and checks if there are missing, * repeated or overlapping ones. * @throws IOException */ SortedMap<TableName, TableInfo> checkIntegrity() throws IOException { tablesInfo = new TreeMap<TableName,TableInfo> (); LOG.debug("There are " + regionInfoMap.size() + " region info entries"); for (HbckInfo hbi : regionInfoMap.values()) { // Check only valid, working regions if (hbi.metaEntry == null) { // this assumes that consistency check has run loadMetaEntry Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi); } // TODO test. continue; } if (hbi.metaEntry.regionServer == null) { errors.detail("Skipping region because no region server: " + hbi); continue; } if (hbi.metaEntry.isOffline()) { errors.detail("Skipping region because it is offline: " + hbi); continue; } if (hbi.containsOnlyHdfsEdits()) { errors.detail("Skipping region because it only contains edits" + hbi); continue; } // Missing regionDir or over-deployment is checked elsewhere. Include // these cases in modTInfo, so we can evaluate those regions as part of // the region chain in META //if (hbi.foundRegionDir == null) continue; //if (hbi.deployedOn.size() != 1) continue; if (hbi.deployedOn.size() == 0) continue; // We should be safe here TableName tableName = hbi.metaEntry.getTable(); TableInfo modTInfo = tablesInfo.get(tableName); if (modTInfo == null) { modTInfo = new TableInfo(tableName); } for (ServerName server : hbi.deployedOn) { modTInfo.addServer(server); } if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi); } tablesInfo.put(tableName, modTInfo); } loadTableInfosForTablesWithNoRegion(); logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
WorkItemOverlapMerge(Collection<HbckInfo> overlapgroup, TableIntegrityErrorHandler handler) { this.handler = handler; this.overlapgroup = overlapgroup; }
/** * Checks tables integrity. Goes over all regions and scans the tables. * Collects all the pieces for each table and checks if there are missing, * repeated or overlapping ones. * @throws IOException */ SortedMap<String, TableInfo> checkIntegrity() throws IOException { tablesInfo = new TreeMap<String,TableInfo> (); List<HbckInfo> noHDFSRegionInfos = new ArrayList<HbckInfo>(); LOG.debug("There are " + regionInfoMap.size() + " region info entries"); for (HbckInfo hbi : regionInfoMap.values()) { // Check only valid, working regions if (hbi.metaEntry == null) { // this assumes that consistency check has run loadMetaEntry noHDFSRegionInfos.add(hbi); Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi); } // TODO test. continue; } if (hbi.metaEntry.regionServer == null) { errors.detail("Skipping region because no region server: " + hbi); continue; } if (hbi.metaEntry.isOffline()) { errors.detail("Skipping region because it is offline: " + hbi); continue; } if (hbi.containsOnlyHdfsEdits()) { errors.detail("Skipping region because it only contains edits" + hbi); continue; } // Missing regionDir or over-deployment is checked elsewhere. Include // these cases in modTInfo, so we can evaluate those regions as part of // the region chain in META //if (hbi.foundRegionDir == null) continue; //if (hbi.deployedOn.size() != 1) continue; if (hbi.deployedOn.size() == 0) continue; // We should be safe here String tableName = hbi.metaEntry.getTableNameAsString(); TableInfo modTInfo = tablesInfo.get(tableName); if (modTInfo == null) { modTInfo = new TableInfo(tableName); } for (ServerName server : hbi.deployedOn) { modTInfo.addServer(server); } if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi); } tablesInfo.put(tableName, modTInfo); } for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
/** * Checks tables integrity. Goes over all regions and scans the tables. * Collects all the pieces for each table and checks if there are missing, * repeated or overlapping ones. * @throws IOException */ SortedMap<TableName, TableInfo> checkIntegrity() throws IOException { tablesInfo = new TreeMap<TableName,TableInfo> (); LOG.debug("There are " + regionInfoMap.size() + " region info entries"); for (HbckInfo hbi : regionInfoMap.values()) { // Check only valid, working regions if (hbi.metaEntry == null) { // this assumes that consistency check has run loadMetaEntry Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi); } // TODO test. continue; } if (hbi.metaEntry.regionServer == null) { errors.detail("Skipping region because no region server: " + hbi); continue; } if (hbi.metaEntry.isOffline()) { errors.detail("Skipping region because it is offline: " + hbi); continue; } if (hbi.containsOnlyHdfsEdits()) { errors.detail("Skipping region because it only contains edits" + hbi); continue; } // Missing regionDir or over-deployment is checked elsewhere. Include // these cases in modTInfo, so we can evaluate those regions as part of // the region chain in META //if (hbi.foundRegionDir == null) continue; //if (hbi.deployedOn.size() != 1) continue; if (hbi.deployedOn.size() == 0) continue; // We should be safe here TableName tableName = hbi.metaEntry.getTable(); TableInfo modTInfo = tablesInfo.get(tableName); if (modTInfo == null) { modTInfo = new TableInfo(tableName); } for (ServerName server : hbi.deployedOn) { modTInfo.addServer(server); } if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi); } tablesInfo.put(tableName, modTInfo); } loadTableInfosForTablesWithNoRegion(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
/** * Checks tables integrity. Goes over all regions and scans the tables. * Collects all the pieces for each table and checks if there are missing, * repeated or overlapping ones. * @throws IOException */ SortedMap<TableName, TableInfo> checkIntegrity() throws IOException { tablesInfo = new TreeMap<TableName,TableInfo> (); List<HbckInfo> noHDFSRegionInfos = new ArrayList<HbckInfo>(); LOG.debug("There are " + regionInfoMap.size() + " region info entries"); for (HbckInfo hbi : regionInfoMap.values()) { // Check only valid, working regions if (hbi.metaEntry == null) { // this assumes that consistency check has run loadMetaEntry noHDFSRegionInfos.add(hbi); Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi); } // TODO test. continue; } if (hbi.metaEntry.regionServer == null) { errors.detail("Skipping region because no region server: " + hbi); continue; } if (hbi.metaEntry.isOffline()) { errors.detail("Skipping region because it is offline: " + hbi); continue; } if (hbi.containsOnlyHdfsEdits()) { errors.detail("Skipping region because it only contains edits" + hbi); continue; } // Missing regionDir or over-deployment is checked elsewhere. Include // these cases in modTInfo, so we can evaluate those regions as part of // the region chain in META //if (hbi.foundRegionDir == null) continue; //if (hbi.deployedOn.size() != 1) continue; if (hbi.deployedOn.size() == 0) continue; // We should be safe here TableName tableName = hbi.metaEntry.getTable(); TableInfo modTInfo = tablesInfo.get(tableName); if (modTInfo == null) { modTInfo = new TableInfo(tableName); } for (ServerName server : hbi.deployedOn) { modTInfo.addServer(server); } if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi); } tablesInfo.put(tableName, modTInfo); } loadTableInfosForTablesWithNoRegion(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
/** * Checks tables integrity. Goes over all regions and scans the tables. * Collects all the pieces for each table and checks if there are missing, * repeated or overlapping ones. * @throws IOException */ SortedMap<TableName, TableInfo> checkIntegrity() throws IOException { tablesInfo = new TreeMap<>(); LOG.debug("There are " + regionInfoMap.size() + " region info entries"); for (HbckInfo hbi : regionInfoMap.values()) { // Check only valid, working regions if (hbi.metaEntry == null) { // this assumes that consistency check has run loadMetaEntry Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi); } // TODO test. continue; } if (hbi.metaEntry.regionServer == null) { errors.detail("Skipping region because no region server: " + hbi); continue; } if (hbi.metaEntry.isOffline()) { errors.detail("Skipping region because it is offline: " + hbi); continue; } if (hbi.containsOnlyHdfsEdits()) { errors.detail("Skipping region because it only contains edits" + hbi); continue; } // Missing regionDir or over-deployment is checked elsewhere. Include // these cases in modTInfo, so we can evaluate those regions as part of // the region chain in META //if (hbi.foundRegionDir == null) continue; //if (hbi.deployedOn.size() != 1) continue; if (hbi.deployedOn.isEmpty()) continue; // We should be safe here TableName tableName = hbi.metaEntry.getTable(); TableInfo modTInfo = tablesInfo.get(tableName); if (modTInfo == null) { modTInfo = new TableInfo(tableName); } for (ServerName server : hbi.deployedOn) { modTInfo.addServer(server); } if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi); } tablesInfo.put(tableName, modTInfo); } loadTableInfosForTablesWithNoRegion(); logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }