public static void main(String[] args) { try { Configuration conf = HBaseConfiguration.create(); // create a new hbase connection object. (singleton) conn = HConnectionManager.createConnection(conf); QueryAll("t"); } catch (IOException e) { e.printStackTrace(); } // createTable("wujintao"); // insertData("wujintao"); // QueryByCondition1("wujintao"); // QueryByCondition2("wujintao"); //QueryByCondition3("wujintao"); //deleteRow("wujintao","abcdef"); //deleteByCondition("wujintao","abcdef"); }
@Override public void init(Context context) throws IOException { super.init(context); this.conf = HBaseConfiguration.create(ctx.getConfiguration()); decorateConf(); this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier); // TODO: This connection is replication specific or we should make it particular to // replication and make replication specific settings such as compression or codec to use // passing Cells. this.conn = HConnectionManager.createConnection(this.conf); this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); // ReplicationQueueInfo parses the peerId out of the znode for us this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf); // per sink thread pool this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); this.exec = new ThreadPoolExecutor(maxThreads, maxThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); this.exec.allowCoreThreadTimeOut(true); }
/** * Load the list of disabled tables in ZK into local set. * @throws ZooKeeperConnectionException * @throws IOException */ private void loadDisabledTables() throws ZooKeeperConnectionException, IOException { HConnectionManager.execute(new HConnectable<Void>(getConf()) { @Override public Void connect(HConnection connection) throws IOException { ZooKeeperWatcher zkw = createZooKeeperWatcher(); try { for (TableName tableName : ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) { disabledTables.add(tableName); } } catch (KeeperException ke) { throw new IOException(ke); } catch (InterruptedException e) { throw new InterruptedIOException(); } finally { zkw.close(); } return null; } }); }
@Test public void testMultipleZK() throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException { Table localMeta = new HTable(new Configuration(TEST_UTIL.getConfiguration()), TableName.META_TABLE_NAME); Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration()); otherConf.set(HConstants.ZOOKEEPER_QUORUM, "127.0.0.1"); Table ipMeta = new HTable(otherConf, TableName.META_TABLE_NAME); // dummy, just to open the connection final byte [] row = new byte [] {'r'}; localMeta.exists(new Get(row)); ipMeta.exists(new Get(row)); // make sure they aren't the same ZooKeeperWatcher z1 = getZooKeeperWatcher(HConnectionManager.getConnection(localMeta.getConfiguration())); ZooKeeperWatcher z2 = getZooKeeperWatcher(HConnectionManager.getConnection(otherConf)); assertFalse(z1 == z2); assertFalse(z1.getQuorum().equals(z2.getQuorum())); localMeta.close(); ipMeta.close(); }
@Override public void initialize(Configuration conf) throws IOException { this.conf = conf; this.hBaseAdmin = new HBaseAdmin(conf); this.connection = HConnectionManager.createConnection(conf); final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); LOG.info("Initializing plugin with state table {}:{}", stateTable.getNamespaceAsString(), stateTable.getNameAsString()); createPruneTable(stateTable); this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public HTableInterface get() throws IOException { return connection.getTable(stateTable); } }); }
@Before public void beforeTest() throws Exception { pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); HTable table = createTable(pruneStateTable.getName(), new byte[][]{DataJanitorState.FAMILY}, false, // Prune state table is a non-transactional table, hence no transaction co-processor Collections.<String>emptyList()); table.close(); connection = HConnectionManager.createConnection(conf); dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public HTableInterface get() throws IOException { return connection.getTable(pruneStateTable); } }); }
/** * Sets up common resources required by all clients. */ public void init() throws IOException { Injector injector = Guice.createInjector( new ConfigModule(conf), new ZKModule(), new DiscoveryModules().getDistributedModules(), new TransactionModules().getDistributedModules(), new TransactionClientModule() ); zkClient = injector.getInstance(ZKClientService.class); zkClient.startAndWait(); txClient = injector.getInstance(TransactionServiceClient.class); createTableIfNotExists(conf, TABLE, new byte[][]{ FAMILY }); conn = HConnectionManager.createConnection(conf); }
/** * Load the list of disabled tables in ZK into local set. * @throws ZooKeeperConnectionException * @throws IOException */ private void loadDisabledTables() throws ZooKeeperConnectionException, IOException { HConnectionManager.execute(new HConnectable<Void>(getConf()) { @Override public Void connect(HConnection connection) throws IOException { ZooKeeperWatcher zkw = connection.getZooKeeperWatcher(); try { for (String tableName : ZKTableReadOnly.getDisabledOrDisablingTables(zkw)) { disabledTables.add(Bytes.toBytes(tableName)); } } catch (KeeperException ke) { throw new IOException(ke); } return null; } }); }
@Test public void testMultipleZK() { try { HTable localMeta = new HTable(new Configuration(TEST_UTIL.getConfiguration()), HConstants.META_TABLE_NAME); Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration()); otherConf.set(HConstants.ZOOKEEPER_QUORUM, "127.0.0.1"); HTable ipMeta = new HTable(otherConf, HConstants.META_TABLE_NAME); // dummy, just to open the connection localMeta.exists(new Get(HConstants.LAST_ROW)); ipMeta.exists(new Get(HConstants.LAST_ROW)); // make sure they aren't the same assertFalse(HConnectionManager.getConnection(localMeta.getConfiguration()).getZooKeeperWatcher() == HConnectionManager.getConnection(otherConf).getZooKeeperWatcher()); assertFalse(HConnectionManager.getConnection(localMeta.getConfiguration()) .getZooKeeperWatcher().getQuorum().equals(HConnectionManager .getConnection(otherConf).getZooKeeperWatcher().getQuorum())); localMeta.close(); ipMeta.close(); } catch (Exception e) { e.printStackTrace(); fail(); } }
public synchronized static void waitForConnection(long timeout, TimeUnit timeoutUnit) { long before = System.currentTimeMillis(); long after; long timeoutMS = TimeUnit.MILLISECONDS.convert(timeout, timeoutUnit); do { try { HConnection hc = HConnectionManager.createConnection(HBaseConfiguration.create()); hc.close(); after = System.currentTimeMillis(); log.info("HBase server to started after about {} ms", after - before); return; } catch (IOException e) { log.info("Exception caught while waiting for the HBase server to start", e); } after = System.currentTimeMillis(); } while (timeoutMS > after - before); after = System.currentTimeMillis(); log.warn("HBase server did not start in {} ms", after - before); }
@Override public void init(Context context) throws IOException { super.init(context); this.conf = HBaseConfiguration.create(ctx.getConfiguration()); decorateConf(); this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier); // TODO: This connection is replication specific or we should make it particular to // replication and make replication specific settings such as compression or codec to use // passing Cells. this.conn = HConnectionManager.createConnection(this.conf); this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); // ReplicationQueueInfo parses the peerId out of the znode for us this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf); }
/** * Get the cached connection for the current user. * If none or timed out, create a new one. */ ConnectionInfo getCurrentConnection() throws IOException { String userName = getEffectiveUser(); ConnectionInfo connInfo = connections.get(userName); if (connInfo == null || !connInfo.updateAccessTime()) { Lock lock = locker.acquireLock(userName); try { connInfo = connections.get(userName); if (connInfo == null) { UserGroupInformation ugi = realUser; if (!userName.equals(realUserName)) { ugi = UserGroupInformation.createProxyUser(userName, realUser); } User user = userProvider.create(ugi); HConnection conn = HConnectionManager.createConnection(conf, user); connInfo = new ConnectionInfo(conn, userName); connections.put(userName, connInfo); } } finally { lock.unlock(); } } return connInfo; }
public static ImageInfo getImage(Configuration conf, Long id) throws DataException { HConnection connection = null; HTable table = null; try { connection = HConnectionManager.createConnection(conf); table = new HTable(conf, IMG_TABLE); Get get = new Get(Bytes.toBytes(id)); Result res = table.get(get); return getImageFromResult(res); } catch (IOException e) { throw new DataException("Error reading image: " + e.getMessage()); } finally { close(connection, table); } }
private ConnectionInfo getCurrentConnection() throws IOException { String userName = effectiveUser.get().getUserName(); ConnectionInfo connInfo = connections.get(userName); if (connInfo == null || !connInfo.updateAccessTime()) { Lock lock = locker.acquireLock(userName); try { connInfo = connections.get(userName); if (connInfo == null) { User user = userProvider.create(effectiveUser.get()); HConnection conn = HConnectionManager.createConnection(conf, user); connInfo = new ConnectionInfo(conn, userName); connections.put(userName, connInfo); } } finally { lock.unlock(); } } return connInfo; }
/** * Load the list of disabled tables in ZK into local set. * @throws ZooKeeperConnectionException * @throws IOException */ private void loadDisabledTables() throws ZooKeeperConnectionException, IOException { HConnectionManager.execute(new HConnectable<Void>(getConf()) { @Override public Void connect(HConnection connection) throws IOException { ZooKeeperWatcher zkw = createZooKeeperWatcher(); try { for (TableName tableName : ZKTableReadOnly.getDisabledOrDisablingTables(zkw)) { disabledTables.add(tableName); } } catch (KeeperException ke) { throw new IOException(ke); } finally { zkw.close(); } return null; } }); }
@Test public void testMultipleZK() throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException { HTable localMeta = new HTable(new Configuration(TEST_UTIL.getConfiguration()), TableName.META_TABLE_NAME); Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration()); otherConf.set(HConstants.ZOOKEEPER_QUORUM, "127.0.0.1"); HTable ipMeta = new HTable(otherConf, TableName.META_TABLE_NAME); // dummy, just to open the connection final byte [] row = new byte [] {'r'}; localMeta.exists(new Get(row)); ipMeta.exists(new Get(row)); // make sure they aren't the same ZooKeeperWatcher z1 = getZooKeeperWatcher(HConnectionManager.getConnection(localMeta.getConfiguration())); ZooKeeperWatcher z2 = getZooKeeperWatcher(HConnectionManager.getConnection(otherConf)); assertFalse(z1 == z2); assertFalse(z1.getQuorum().equals(z2.getQuorum())); localMeta.close(); ipMeta.close(); }
protected void map(NullWritable key, PeInputSplit value, final Context context) throws IOException, InterruptedException { Status status = new Status() { public void setStatus(String msg) { context.setStatus(msg); } }; // Evaluation task pe.tableName = value.getTableName(); long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(), value.getRows(), value.getTotalRows(), value.isFlushCommits(), value.isWriteToWAL(), value.isUseTags(), value.getNoOfTags(), HConnectionManager.createConnection(context.getConfiguration()), status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime); context.getCounter(Counter.ROWS).increment(value.rows); context.write(new LongWritable(value.startRow), new LongWritable(elapsedTime)); context.progress(); }
/** * Constructor that creates a connection to the local ZooKeeper ensemble. * @param conf Configuration to use * @throws IOException if an internal replication error occurs * @throws RuntimeException if replication isn't enabled. */ public ReplicationAdmin(Configuration conf) throws IOException { if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT)) { throw new RuntimeException("hbase.replication isn't true, please " + "enable it in order to use replication"); } this.connection = HConnectionManager.getConnection(conf); ZooKeeperWatcher zkw = createZooKeeperWatcher(); try { this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf, this.connection); this.replicationPeers.init(); this.replicationQueuesClient = ReplicationFactory.getReplicationQueuesClient(zkw, conf, this.connection); this.replicationQueuesClient.init(); } catch (ReplicationException e) { throw new IOException("Error initializing the replication admin client.", e); } }
@Override public boolean initializeAdapter(Map<String, Object> config) { // Initialize HBase Table Configuration conf = null; conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", _quorum); conf.set("hbase.zookeeper.property.clientPort", _port); try { LOGGER.debug("=======Connecting to HBASE==========="); LOGGER.debug("=======ZOOKEEPER = {}", conf.get("hbase.zookeeper.quorum")); HConnection connection = HConnectionManager.createConnection(conf); table = connection.getTable(_tableName); return true; } catch (IOException e) { LOGGER.debug("=======Unable to Connect to HBASE==========="); e.printStackTrace(); } return false; }
@Override public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { this.collector = collector; try { this.connection = HConnectionManager.createConnection(constructConfiguration()); this.dangerousEventsTable = connection.getTable(DANGEROUS_EVENTS_TABLE_NAME); this.eventsCountTable = connection.getTable(EVENTS_COUNT_TABLE_NAME); this.eventsTable = connection.getTable(EVENTS_TABLE_NAME); } catch (Exception e) { String errMsg = "Error retrievinging connection and access to dangerousEventsTable"; LOG.error(errMsg, e); throw new RuntimeException(errMsg, e); } }
public void restart(byte[] firstRow) throws IOException { if (connection == null) { connection = HConnectionManager.createConnection(conf); } currentScan = new Scan(scan); currentScan.setStartRow(firstRow); currentScan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); if (this.scanner != null) { if (logScannerActivity) { LOG.info("Closing the previously opened scanner object."); } this.scanner.close(); } // TODO : should use the same timestamp when the Transaction first constructed? this.transaction = new Transaction(connection); this.scanner = transaction.getScanner(tableName, new ThemisScan(currentScan)); if (logScannerActivity) { LOG.info("Current scan=" + currentScan.toString()); timestamp = System.currentTimeMillis(); rowcount = 0; totalRowCount = 0; } }
/** * insert into ArrayList,ArrayList param is Put * * @param tableName * @param alists */ public static void insert(String tableName, ArrayList<Put> alists) { try { HConnection connection = HConnectionManager.createConnection(conf); HTableInterface table = connection.getTable(tableName); try { if (connection.isTableAvailable(TableName.valueOf(tableName))) { table.put(alists); log.info("add [" + tableName + "] success!"); } else { log.info(tableName + " table does not exist!"); } } catch (Exception e) { e.printStackTrace(); log.error("insert into ArrayList has error -> " + e.getMessage()); } finally { table.close(); connection.close(); } } catch (Exception ex) { ex.printStackTrace(); log.error("insert into ArrayList has error -> " + ex.getMessage()); } }
/** * Load the list of disabled tables in ZK into local set. * @throws ZooKeeperConnectionException * @throws IOException */ private void loadDisabledTables() throws ZooKeeperConnectionException, IOException { HConnectionManager.execute(new HConnectable<Void>(conf) { @Override public Void connect(HConnection connection) throws IOException { ZooKeeperWatcher zkw = connection.getZooKeeperWatcher(); try { for (String tableName : ZKTable.getDisabledOrDisablingTables(zkw)) { disabledTables.add(Bytes.toBytes(tableName)); } } catch (KeeperException ke) { throw new IOException(ke); } return null; } }); }
public static HConnection get(String url) { // find configuration Configuration conf = ConfigCache.get(url); if (conf == null) { conf = HadoopUtil.newHBaseConfiguration(url); ConfigCache.put(url, conf); } HConnection connection = ConnPool.get(url); try { // I don't use DCL since recreate a connection is not a big issue. if (connection == null) { connection = HConnectionManager.createConnection(conf); ConnPool.put(url, connection); } } catch (Throwable t) { throw new StorageException("Error when open connection " + url, t); } return connection; }
@Override public boolean initializeAdapter() { // Initialize HBase Table Configuration conf = null; conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", _quorum); conf.set("hbase.zookeeper.property.clientPort", _port); try { LOGGER.debug("=======Connecting to HBASE==========="); LOGGER.debug("=======ZOOKEEPER = " + conf.get("hbase.zookeeper.quorum")); HConnection connection = HConnectionManager.createConnection(conf); table = connection.getTable(_tableName); return true; } catch (IOException e) { // TODO Auto-generated catch block LOGGER.debug("=======Unable to Connect to HBASE==========="); e.printStackTrace(); } return false; }
@Override public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { this.collector = collector; try { this.hbaseConnection = HConnectionManager.createConnection(constructConfiguration()); this.eventsCountTable = hbaseConnection.getTable(EVENTS_COUNT_TABLE_NAME); this.phoenixConnection = DriverManager.getConnection(phoenixConnectionUrl); this.phoenixConnection.setAutoCommit(true); } catch (Exception e) { String errMsg = "Error retrievinging connection and access to dangerousEventsTable"; LOG.error(errMsg, e); throw new RuntimeException(errMsg, e); } }
private ConnectionInfo getCurrentConnection() throws IOException { String userName = effectiveUser.get(); ConnectionInfo connInfo = connections.get(userName); if (connInfo == null || !connInfo.updateAccessTime()) { Lock lock = locker.acquireLock(userName); try { connInfo = connections.get(userName); if (connInfo == null) { UserGroupInformation ugi = realUser; if (!userName.equals(NULL_USERNAME)) { ugi = UserGroupInformation.createProxyUser(userName, realUser); } User user = userProvider.create(ugi); HConnection conn = HConnectionManager.createConnection(conf, user); connInfo = new ConnectionInfo(conn, userName); connections.put(userName, connInfo); } } finally { lock.unlock(); } } return connInfo; }