Java 类org.apache.hadoop.hbase.client.HConnection 实例源码
项目:ditb
文件:WALSplitter.java
/**
* Get a writer and path for a log starting at the given entry. This function is threadsafe so
* long as multiple threads are always acting on different regions.
* @return null if this region shouldn't output any logs
*/
private RegionServerWriter getRegionServerWriter(String loc) throws IOException {
RegionServerWriter ret = writers.get(loc);
if (ret != null) {
return ret;
}
TableName tableName = getTableFromLocationStr(loc);
if(tableName == null){
throw new IOException("Invalid location string:" + loc + " found. Replay aborted.");
}
HConnection hconn = getConnectionByTableName(tableName);
synchronized (writers) {
ret = writers.get(loc);
if (ret == null) {
ret = new RegionServerWriter(conf, tableName, hconn);
writers.put(loc, ret);
}
}
return ret;
}
项目:ditb
文件:HBaseFsck.java
/**
* Load the list of disabled tables in ZK into local set.
* @throws ZooKeeperConnectionException
* @throws IOException
*/
private void loadDisabledTables()
throws ZooKeeperConnectionException, IOException {
HConnectionManager.execute(new HConnectable<Void>(getConf()) {
@Override
public Void connect(HConnection connection) throws IOException {
ZooKeeperWatcher zkw = createZooKeeperWatcher();
try {
for (TableName tableName :
ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) {
disabledTables.add(tableName);
}
} catch (KeeperException ke) {
throw new IOException(ke);
} catch (InterruptedException e) {
throw new InterruptedIOException();
} finally {
zkw.close();
}
return null;
}
});
}
项目:ditb
文件:TestMetaMigrationConvertingToPB.java
/**
* Verify that every hbase:meta row is updated
*/
void verifyMetaRowsAreUpdated(HConnection hConnection)
throws IOException {
List<Result> results = MetaTableAccessor.fullScan(hConnection);
assertTrue(results.size() >= REGION_COUNT);
for (Result result : results) {
byte[] hriBytes = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
assertTrue(hriBytes != null && hriBytes.length > 0);
assertTrue(MetaMigrationConvertingToPB.isMigrated(hriBytes));
byte[] splitA = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SPLITA_QUALIFIER);
if (splitA != null && splitA.length > 0) {
assertTrue(MetaMigrationConvertingToPB.isMigrated(splitA));
}
byte[] splitB = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SPLITB_QUALIFIER);
if (splitB != null && splitB.length > 0) {
assertTrue(MetaMigrationConvertingToPB.isMigrated(splitB));
}
}
}
项目:ditb
文件:TestLoadIncrementalHFilesSplitRecovery.java
@SuppressWarnings("deprecation")
private HConnection getMockedConnection(final Configuration conf)
throws IOException, ServiceException {
HConnection c = Mockito.mock(HConnection.class);
Mockito.when(c.getConfiguration()).thenReturn(conf);
Mockito.doNothing().when(c).close();
// Make it so we return a particular location when asked.
final HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
ServerName.valueOf("example.org", 1234, 0));
Mockito.when(c.getRegionLocation((TableName) Mockito.any(),
(byte[]) Mockito.any(), Mockito.anyBoolean())).
thenReturn(loc);
Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())).
thenReturn(loc);
ClientProtos.ClientService.BlockingInterface hri =
Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
Mockito.when(hri.bulkLoadHFile((RpcController)Mockito.any(), (BulkLoadHFileRequest)Mockito.any())).
thenThrow(new ServiceException(new IOException("injecting bulk load error")));
Mockito.when(c.getClient(Mockito.any(ServerName.class))).
thenReturn(hri);
return c;
}
项目:ditb
文件:TestHBaseFsck.java
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
for (ServerName hsi : regionServers) {
AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
项目:hadooparchitecturebook
文件:RunLocalTest.java
private static void populateUserProfileData(HConnection connection) throws Exception {
UserProfile up1 = new UserProfile();
up1.userId = "101";
up1.lastUpdatedTimeStamp = System.currentTimeMillis() - 1000;
up1.historicAvg90PercentSingleDaySpend = 90.0;
up1.historicAvgSingleDaySpend = 50.0;
up1.todayMaxSpend = 0.0;
up1.todayNumOfPurchases = 0l;
HBaseUtils.populateUserProfile(connection, up1);
up1.userId = "102";
up1.lastUpdatedTimeStamp = System.currentTimeMillis() - 1000;
up1.historicAvg90PercentSingleDaySpend = 90.0;
up1.historicAvgSingleDaySpend = 50.0;
up1.todayMaxSpend = 0.0;
up1.todayNumOfPurchases = 0l;
HBaseUtils.populateUserProfile(connection, up1);
up1.userId = "103";
up1.lastUpdatedTimeStamp = System.currentTimeMillis() - 1000;
up1.historicAvg90PercentSingleDaySpend = 90.0;
up1.historicAvgSingleDaySpend = 50.0;
up1.todayMaxSpend = 0.0;
up1.todayNumOfPurchases = 0l;
HBaseUtils.populateUserProfile(connection, up1);
}
项目:HIndex
文件:DistributedHBaseCluster.java
@Override
public ServerName getServerHoldingRegion(byte[] regionName) throws IOException {
HConnection connection = admin.getConnection();
HRegionLocation regionLoc = connection.locateRegion(regionName);
if (regionLoc == null) {
LOG.warn("Cannot find region server holding region " + Bytes.toString(regionName)
+ " for table " + HRegionInfo.getTableName(regionName) + ", start key [" +
Bytes.toString(HRegionInfo.getStartKey(regionName)) + "]");
return null;
}
AdminProtos.AdminService.BlockingInterface client =
connection.getAdmin(regionLoc.getServerName());
ServerInfo info = ProtobufUtil.getServerInfo(client);
return ProtobufUtil.toServerName(info.getServerName());
}
项目:LCIndex-HBase-0.94.16
文件:TestCatalogTracker.java
/**
* Test get of root region fails properly if nothing to connect to.
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
*/
@Test
public void testVerifyRootRegionLocationFails()
throws IOException, InterruptedException, KeeperException {
HConnection connection = Mockito.mock(HConnection.class);
ConnectException connectException =
new ConnectException("Connection refused");
final HRegionInterface implementation =
Mockito.mock(HRegionInterface.class);
Mockito.when(implementation.getRegionInfo((byte [])Mockito.any())).
thenThrow(connectException);
Mockito.when(connection.getHRegionConnection(Mockito.anyString(),
Mockito.anyInt(), Mockito.anyBoolean())).
thenReturn(implementation);
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
RootLocationEditor.setRootLocation(this.watcher,
new ServerName("example.com", 1234, System.currentTimeMillis()));
Assert.assertFalse(ct.verifyRootRegionLocation(100));
}
项目:LCIndex-HBase-0.94.16
文件:TestCatalogTracker.java
/**
* Test waiting on root w/ no timeout specified.
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
*/
@Test public void testNoTimeoutWaitForRoot()
throws IOException, InterruptedException, KeeperException {
HConnection connection = Mockito.mock(HConnection.class);
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
ServerName hsa = ct.getRootLocation();
Assert.assertNull(hsa);
// Now test waiting on root location getting set.
Thread t = new WaitOnMetaThread(ct);
startWaitAliveThenWaitItLives(t, 1000);
// Set a root location.
hsa = setRootLocation();
// Join the thread... should exit shortly.
t.join();
// Now root is available.
Assert.assertTrue(ct.getRootLocation().equals(hsa));
}
项目:LCIndex-HBase-0.94.16
文件:TestCatalogTracker.java
/**
* @param implementation An {@link HRegionInterface} instance; you'll likely
* want to pass a mocked HRS; can be null.
* @return Mock up a connection that returns a {@link org.apache.hadoop.conf.Configuration} when
* {@link HConnection#getConfiguration()} is called, a 'location' when
* {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called,
* and that returns the passed {@link HRegionInterface} instance when
* {@link HConnection#getHRegionConnection(String, int)}
* is called (Be sure call
* {@link HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)}
* when done with this mocked Connection.
* @throws IOException
*/
private HConnection mockConnection(final HRegionInterface implementation)
throws IOException {
HConnection connection =
HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
Mockito.doNothing().when(connection).close();
// Make it so we return any old location when asked.
final HRegionLocation anyLocation =
new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN.getHostname(),
SN.getPort());
Mockito.when(connection.getRegionLocation((byte[]) Mockito.any(),
(byte[]) Mockito.any(), Mockito.anyBoolean())).
thenReturn(anyLocation);
Mockito.when(connection.locateRegion((byte[]) Mockito.any(),
(byte[]) Mockito.any())).
thenReturn(anyLocation);
if (implementation != null) {
// If a call to getHRegionConnection, return this implementation.
Mockito.when(connection.getHRegionConnection(Mockito.anyString(), Mockito.anyInt())).
thenReturn(implementation);
}
return connection;
}
项目:HIndex
文件:TestCatalogTracker.java
/**
* Test get of meta region fails properly if nothing to connect to.
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
* @throws ServiceException
*/
@Test
public void testVerifyMetaRegionLocationFails()
throws IOException, InterruptedException, KeeperException, ServiceException {
HConnection connection = Mockito.mock(HConnection.class);
ServiceException connectException =
new ServiceException(new ConnectException("Connection refused"));
final AdminProtos.AdminService.BlockingInterface implementation =
Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),
(GetRegionInfoRequest)Mockito.any())).thenThrow(connectException);
Mockito.when(connection.getAdmin(Mockito.any(ServerName.class), Mockito.anyBoolean())).
thenReturn(implementation);
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
MetaRegionTracker.setMetaLocation(this.watcher,
ServerName.valueOf("example.com", 1234, System.currentTimeMillis()));
Assert.assertFalse(ct.verifyMetaRegionLocation(100));
}
项目:LCIndex-HBase-0.94.16
文件:TestLoadIncrementalHFilesSplitRecovery.java
private HConnection getMockedConnection(final Configuration conf)
throws IOException {
HConnection c = Mockito.mock(HConnection.class);
Mockito.when(c.getConfiguration()).thenReturn(conf);
Mockito.doNothing().when(c).close();
// Make it so we return a particular location when asked.
final HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
"example.org", 1234);
Mockito.when(c.getRegionLocation((byte[]) Mockito.any(),
(byte[]) Mockito.any(), Mockito.anyBoolean())).
thenReturn(loc);
Mockito.when(c.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())).
thenReturn(loc);
HRegionInterface hri = Mockito.mock(HRegionInterface.class);
Mockito.when(hri.bulkLoadHFiles(Mockito.anyList(), (byte [])Mockito.any(),
Mockito.anyBoolean())).thenThrow(new IOException("injecting bulk load error"));
Mockito.when(c.getHRegionConnection(Mockito.anyString(), Mockito.anyInt())).
thenReturn(hri);
return c;
}
项目:LCIndex-HBase-0.94.16
文件:TestHBaseFsck.java
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(HBaseAdmin admin)
throws IOException {
ClusterStatus status = admin.getMaster().getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
HConnection connection = admin.getConnection();
for (ServerName hsi : regionServers) {
HRegionInterface server =
connection.getHRegionConnection(hsi.getHostname(), hsi.getPort());
// list all online regions from this region server
List<HRegionInfo> regions = server.getOnlineRegions();
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
项目:HIndex
文件:TestHBaseFsck.java
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(
final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
HConnection connection = admin.getConnection();
for (ServerName hsi : regionServers) {
AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
项目:pbase
文件:WALSplitter.java
/**
* Get a writer and path for a log starting at the given entry. This function is threadsafe so
* long as multiple threads are always acting on different regions.
* @return null if this region shouldn't output any logs
*/
private RegionServerWriter getRegionServerWriter(String loc) throws IOException {
RegionServerWriter ret = writers.get(loc);
if (ret != null) {
return ret;
}
TableName tableName = getTableFromLocationStr(loc);
if(tableName == null){
throw new IOException("Invalid location string:" + loc + " found. Replay aborted.");
}
HConnection hconn = getConnectionByTableName(tableName);
synchronized (writers) {
ret = writers.get(loc);
if (ret == null) {
ret = new RegionServerWriter(conf, tableName, hconn);
writers.put(loc, ret);
}
}
return ret;
}
项目:pbase
文件:ConnectionCache.java
/**
* Get the cached connection for the current user.
* If none or timed out, create a new one.
*/
ConnectionInfo getCurrentConnection() throws IOException {
String userName = getEffectiveUser();
ConnectionInfo connInfo = connections.get(userName);
if (connInfo == null || !connInfo.updateAccessTime()) {
Lock lock = locker.acquireLock(userName);
try {
connInfo = connections.get(userName);
if (connInfo == null) {
UserGroupInformation ugi = realUser;
if (!userName.equals(realUserName)) {
ugi = UserGroupInformation.createProxyUser(userName, realUser);
}
User user = userProvider.create(ugi);
HConnection conn = HConnectionManager.createConnection(conf, user);
connInfo = new ConnectionInfo(conn, userName);
connections.put(userName, connInfo);
}
} finally {
lock.unlock();
}
}
return connInfo;
}
项目:pbase
文件:SplitTransaction.java
private void offlineParentInMetaAndputMetaEntries(HConnection hConnection,
HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
ServerName serverName, List<Mutation> metaEntries) throws IOException {
List<Mutation> mutations = metaEntries;
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
//Put for parent
Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
mutations.add(putParent);
//Puts for daughters
Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
addLocation(putB, serverName, 1);
mutations.add(putA);
mutations.add(putB);
MetaTableAccessor.mutateMetaTable(hConnection, mutations);
}
项目:pbase
文件:TestMetaMigrationConvertingToPB.java
/**
* Verify that every hbase:meta row is updated
*/
void verifyMetaRowsAreUpdated(HConnection hConnection)
throws IOException {
List<Result> results = MetaTableAccessor.fullScan(hConnection);
assertTrue(results.size() >= REGION_COUNT);
for (Result result : results) {
byte[] hriBytes = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
assertTrue(hriBytes != null && hriBytes.length > 0);
assertTrue(MetaMigrationConvertingToPB.isMigrated(hriBytes));
byte[] splitA = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SPLITA_QUALIFIER);
if (splitA != null && splitA.length > 0) {
assertTrue(MetaMigrationConvertingToPB.isMigrated(splitA));
}
byte[] splitB = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SPLITB_QUALIFIER);
if (splitB != null && splitB.length > 0) {
assertTrue(MetaMigrationConvertingToPB.isMigrated(splitB));
}
}
}
项目:pbase
文件:TestHBaseFsck.java
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
for (ServerName hsi : regionServers) {
AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
项目:pbase
文件:TestEndToEndSplitTransaction.java
/**
* attempt to locate the region and perform a get and scan
* @return True if successful, False otherwise.
*/
private boolean test(HConnection con, TableName tableName, byte[] row,
HRegionServer server) {
// not using HTable to avoid timeouts and retries
try {
byte[] regionName = con.relocateRegion(tableName, row).getRegionInfo()
.getRegionName();
// get and scan should now succeed without exception
ClientProtos.GetRequest request =
RequestConverter.buildGetRequest(regionName, new Get(row));
server.getRSRpcServices().get(null, request);
ScanRequest scanRequest = RequestConverter.buildScanRequest(
regionName, new Scan(row), 1, true);
try {
server.getRSRpcServices().scan(
new PayloadCarryingRpcController(), scanRequest);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
} catch (IOException x) {
return false;
} catch (ServiceException e) {
return false;
}
return true;
}
项目:HIndex
文件:HBaseFsck.java
/**
* Load the list of disabled tables in ZK into local set.
* @throws ZooKeeperConnectionException
* @throws IOException
*/
private void loadDisabledTables()
throws ZooKeeperConnectionException, IOException {
HConnectionManager.execute(new HConnectable<Void>(getConf()) {
@Override
public Void connect(HConnection connection) throws IOException {
ZooKeeperWatcher zkw = createZooKeeperWatcher();
try {
for (TableName tableName :
ZKTableReadOnly.getDisabledOrDisablingTables(zkw)) {
disabledTables.add(tableName);
}
} catch (KeeperException ke) {
throw new IOException(ke);
} finally {
zkw.close();
}
return null;
}
});
}
项目:ditb
文件:MetaMigrationConvertingToPB.java
/**
* @param hConnection connection to be used
* @return True if the meta table has been migrated.
* @throws IOException
*/
static boolean isMetaTableUpdated(final HConnection hConnection) throws IOException {
List<Result> results = MetaTableAccessor.fullScanOfMeta(hConnection);
if (results == null || results.isEmpty()) {
LOG.info("hbase:meta doesn't have any entries to update.");
return true;
}
for (Result r : results) {
byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (!isMigrated(value)) {
return false;
}
}
return true;
}
项目:ditb
文件:ReplicationSinkManager.java
/**
* Instantiate for a single replication peer cluster.
* @param conn connection to the peer cluster
* @param peerClusterId identifier of the peer cluster
* @param endpoint replication endpoint for inter cluster replication
* @param conf HBase configuration, used for determining replication source ratio and bad peer
* threshold
*/
public ReplicationSinkManager(HConnection conn, String peerClusterId,
HBaseReplicationEndpoint endpoint, Configuration conf) {
this.conn = conn;
this.peerClusterId = peerClusterId;
this.endpoint = endpoint;
this.badReportCounts = Maps.newHashMap();
this.ratio = conf.getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO);
this.badSinkThreshold = conf.getInt("replication.bad.sink.threshold",
DEFAULT_BAD_SINK_THRESHOLD);
this.random = new Random();
}
项目:ditb
文件:WALSplitter.java
private HConnection getConnectionByTableName(final TableName tableName) throws IOException {
HConnection hconn = this.tableNameToHConnectionMap.get(tableName);
if (hconn == null) {
synchronized (this.tableNameToHConnectionMap) {
hconn = this.tableNameToHConnectionMap.get(tableName);
if (hconn == null) {
hconn = HConnectionManager.getConnection(conf);
this.tableNameToHConnectionMap.put(tableName, hconn);
}
}
}
return hconn;
}
项目:ditb
文件:LoadIncrementalHFiles.java
/**
* @deprecated As of release 0.96
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
* This will be removed in HBase 2.0.0.
* Use {@link #tryAtomicRegionLoad(Connection, TableName, byte[], Collection)}.
*/
@Deprecated
protected List<LoadQueueItem> tryAtomicRegionLoad(final HConnection conn,
final byte [] tableName, final byte[] first, Collection<LoadQueueItem> lqis)
throws IOException {
return tryAtomicRegionLoad(conn, TableName.valueOf(tableName), first, lqis);
}
项目:ditb
文件:HFileArchiveManager.java
public HFileArchiveManager(HConnection connection, Configuration conf)
throws ZooKeeperConnectionException, IOException {
this.zooKeeper = new ZooKeeperWatcher(conf, "hfileArchiveManager-on-" + connection.toString(),
connection);
this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(),
this.zooKeeper);
}
项目:ditb
文件:HMerge.java
/**
* Scans the table and merges two adjacent regions if they are small. This
* only happens when a lot of rows are deleted.
*
* When merging the hbase:meta region, the HBase instance must be offline.
* When merging a normal table, the HBase instance must be online, but the
* table must be disabled.
*
* @param conf - configuration object for HBase
* @param fs - FileSystem where regions reside
* @param tableName - Table to be compacted
* @param testMasterRunning True if we are to verify master is down before
* running merge
* @throws IOException
*/
public static void merge(Configuration conf, FileSystem fs,
final TableName tableName, final boolean testMasterRunning)
throws IOException {
boolean masterIsRunning = false;
if (testMasterRunning) {
masterIsRunning = HConnectionManager
.execute(new HConnectable<Boolean>(conf) {
@Override
public Boolean connect(HConnection connection) throws IOException {
return connection.isMasterRunning();
}
});
}
if (tableName.equals(TableName.META_TABLE_NAME)) {
if (masterIsRunning) {
throw new IllegalStateException(
"Can not compact hbase:meta table if instance is on-line");
}
// TODO reenable new OfflineMerger(conf, fs).process();
} else {
if(!masterIsRunning) {
throw new IllegalStateException(
"HBase instance must be running to merge a normal table");
}
Admin admin = new HBaseAdmin(conf);
try {
if (!admin.isTableDisabled(tableName)) {
throw new TableNotDisabledException(tableName);
}
} finally {
admin.close();
}
new OnlineMerger(conf, fs, tableName).process();
}
}
项目:ditb
文件:MultiHConnection.java
/**
* Create multiple HConnection instances and initialize a thread pool executor
* @param conf configuration
* @param noOfConnections total no of HConnections to create
* @throws IOException
*/
public MultiHConnection(Configuration conf, int noOfConnections)
throws IOException {
this.noOfConnections = noOfConnections;
synchronized (this.hConnectionsLock) {
hConnections = new HConnection[noOfConnections];
for (int i = 0; i < noOfConnections; i++) {
HConnection conn = HConnectionManager.createConnection(conf);
hConnections[i] = conn;
}
}
createBatchPool(conf);
}
项目:ditb
文件:HBaseFsck.java
WorkItemRegion(HBaseFsck hbck, ServerName info,
ErrorReporter errors, HConnection connection) {
this.hbck = hbck;
this.rsinfo = info;
this.errors = errors;
this.connection = connection;
}
项目:ditb
文件:HBaseFsckRepair.java
/**
* Fix multiple assignment by doing silent closes on each RS hosting the region
* and then force ZK unassigned node to OFFLINE to trigger assignment by
* master.
*
* @param connection HBase connection to the cluster
* @param region Region to undeploy
* @param servers list of Servers to undeploy from
*/
public static void fixMultiAssignment(HConnection connection, HRegionInfo region,
List<ServerName> servers)
throws IOException, KeeperException, InterruptedException {
HRegionInfo actualRegion = new HRegionInfo(region);
// Close region on the servers silently
for(ServerName server : servers) {
closeRegionSilentlyAndWait(connection, server, actualRegion);
}
// Force ZK node to OFFLINE so master assigns
forceOfflineInZK(connection.getAdmin(), actualRegion);
}
项目:ditb
文件:HBaseFsckRepair.java
/**
* Contacts a region server and waits up to hbase.hbck.close.timeout ms
* (default 120s) to close the region. This bypasses the active hmaster.
*/
@SuppressWarnings("deprecation")
public static void closeRegionSilentlyAndWait(HConnection connection,
ServerName server, HRegionInfo region) throws IOException, InterruptedException {
long timeout = connection.getConfiguration()
.getLong("hbase.hbck.close.timeout", 120000);
ServerManager.closeRegionSilentlyAndWait((ClusterConnection)connection, server,
region, timeout);
}
项目:ditb
文件:SplitTransactionImpl.java
private void offlineParentInMetaAndputMetaEntries(HConnection hConnection,
HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
ServerName serverName, List<Mutation> metaEntries, int regionReplication)
throws IOException {
List<Mutation> mutations = metaEntries;
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
//Put for parent
Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
mutations.add(putParent);
//Puts for daughters
Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
addLocation(putB, serverName, 1);
mutations.add(putA);
mutations.add(putB);
// Add empty locations for region replicas of daughters so that number of replicas can be
// cached whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
addEmptyLocation(putA, i);
addEmptyLocation(putB, i);
}
MetaTableAccessor.mutateMetaTable(hConnection, mutations);
}
项目:ditb
文件:WALEditsReplaySink.java
/**
* Create a sink for WAL log entries replay
* @param conf
* @param tableName
* @param conn
* @throws IOException
*/
public WALEditsReplaySink(Configuration conf, TableName tableName, HConnection conn)
throws IOException {
this.conf = conf;
this.metrics = new MetricsWALEditsReplay();
this.conn = conn;
this.tableName = tableName;
this.skipErrors = conf.getBoolean(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS,
HConstants.DEFAULT_HREGION_EDITS_REPLAY_SKIP_ERRORS);
// a single replay operation time out and default is 60 seconds
this.replayTimeout = conf.getInt("hbase.regionserver.logreplay.timeout", 60000);
this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
}
项目:ditb
文件:WALEditsReplaySink.java
ReplayServerCallable(final HConnection connection, final TableName tableName,
final HRegionLocation regionLoc, final HRegionInfo regionInfo,
final List<Entry> entries) {
super(connection, tableName, null);
this.entries = entries;
this.regionInfo = regionInfo;
setLocation(regionLoc);
}
项目:ditb
文件:RegionMergeTransactionImpl.java
private void mergeRegionsAndPutMetaEntries(HConnection hConnection,
HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
ServerName serverName, List<Mutation> metaEntries,
int regionReplication) throws IOException {
prepareMutationsForMerge(mergedRegion, regionA, regionB, serverName, metaEntries,
regionReplication);
MetaTableAccessor.mutateMetaTable(hConnection, metaEntries);
}
项目:ditb
文件:TestReplicationSinkManager.java
@Before
public void setUp() {
replicationPeers = mock(ReplicationPeers.class);
replicationEndpoint = mock(HBaseReplicationEndpoint.class);
sinkManager = new ReplicationSinkManager(mock(HConnection.class),
PEER_CLUSTER_ID, replicationEndpoint, new Configuration());
}
项目:ditb
文件:TestMetaMigrationConvertingToPB.java
@Test
public void testMetaMigration() throws Exception {
LOG.info("Starting testMetaMigration");
final byte [] FAMILY = Bytes.toBytes("family");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testMetaMigration"));
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
Configuration conf = TEST_UTIL.getConfiguration();
byte[][] regionNames = new byte[][]{
HConstants.EMPTY_START_ROW,
Bytes.toBytes("region_a"),
Bytes.toBytes("region_b")};
createMultiRegionsWithWritableSerialization(conf,
htd.getTableName().getName(),
regionNames);
HConnection masterHConnection =
TEST_UTIL.getMiniHBaseCluster().getMaster().getConnection();
// Erase the current version of root meta for this test.
undoVersionInRoot();
MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
LOG.info("Meta Print completed.testMetaMigration");
long numMigratedRows = MetaMigrationConvertingToPB.updateMeta(
TEST_UTIL.getHBaseCluster().getMaster());
MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
// Should be one entry only and it should be for the table we just added.
assertEquals(regionNames.length, numMigratedRows);
// Assert that the flag in ROOT is updated to reflect the correct status
boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated(masterHConnection);
assertEquals(true, metaUpdated);
verifyMetaRowsAreUpdated(masterHConnection);
}
项目:ditb
文件:TestMetaMigrationConvertingToPB.java
/**
* This test assumes a master crash/failure during the meta migration process
* and attempts to continue the meta migration process when a new master takes over.
* When a master dies during the meta migration we will have some rows of
* META.CatalogFamily updated with PB serialization and some
* still hanging with writable serialization. When the backup master/ or
* fresh start of master attempts the migration it will encounter some rows of META
* already updated with new HRI and some still legacy. This test will simulate this
* scenario and validates that the migration process can safely skip the updated
* rows and migrate any pending rows at startup.
* @throws Exception
*/
@Test
public void testMasterCrashDuringMetaMigration() throws Exception {
final byte[] FAMILY = Bytes.toBytes("family");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf
("testMasterCrashDuringMetaMigration"));
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
Configuration conf = TEST_UTIL.getConfiguration();
// Create 10 New regions.
createMultiRegionsWithPBSerialization(conf, htd.getTableName().getName(), 10);
// Create 10 Legacy regions.
createMultiRegionsWithWritableSerialization(conf,
htd.getTableName().getName(), 10);
HConnection masterHConnection =
TEST_UTIL.getMiniHBaseCluster().getMaster().getConnection();
// Erase the current version of root meta for this test.
undoVersionInRoot();
MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
long numMigratedRows =
MetaMigrationConvertingToPB.updateMetaIfNecessary(
TEST_UTIL.getHBaseCluster().getMaster());
assertEquals(numMigratedRows, 10);
// Assert that the flag in ROOT is updated to reflect the correct status
boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated(masterHConnection);
assertEquals(true, metaUpdated);
verifyMetaRowsAreUpdated(masterHConnection);
LOG.info("END testMasterCrashDuringMetaMigration");
}
项目:ditb
文件:TestHBaseFsck.java
/**
* This method is used to undeploy a region -- close it and attempt to
* remove its state from the Master.
*/
private void undeployRegion(Connection conn, ServerName sn,
HRegionInfo hri) throws IOException, InterruptedException {
try {
HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) conn, sn, hri);
if (!hri.isMetaTable()) {
admin.offline(hri.getRegionName());
}
} catch (IOException ioe) {
LOG.warn("Got exception when attempting to offline region "
+ Bytes.toString(hri.getRegionName()), ioe);
}
}
项目:hadooparchitecturebook
文件:RunLocalTest.java
private static void populateValidationRules(HConnection connection) throws Exception {
HashSet<String> banndedVandors = new HashSet<String>();
banndedVandors.add("badVendor");
ValidationRules rules = new ValidationRules(banndedVandors, 2.0);
HBaseUtils.populateValidationRules(connection, rules);
}