Java 类org.apache.hadoop.hbase.client.HTablePool 实例源码
项目:SparkDemo
文件:MyClass.java
public static void QueryByCondition1(String tableName) {
HTablePool pool = new HTablePool(configuration, 1000);
HTable table = (HTable) pool.getTable(tableName);
try {
Get scan = new Get("abcdef".getBytes());// 根据rowkey查询
Result r = table.get(scan);
System.out.println("获得到rowkey:" + new String(r.getRow()));
for (KeyValue keyValue : r.raw()) {
System.out.println("列:" + new String(keyValue.getFamily())
+ "====值:" + new String(keyValue.getValue()));
}
} catch (IOException e) {
e.printStackTrace();
}
}
项目:SparkDemo
文件:MyClass.java
public static void QueryByCondition2(String tableName) {
try {
HTablePool pool = new HTablePool(configuration, 1000);
HTable table = (HTable) pool.getTable(tableName);
Filter filter = new SingleColumnValueFilter(Bytes
.toBytes("column1"), null, CompareOp.EQUAL, Bytes
.toBytes("aaa")); // 当列column1的值为aaa时进行查询
Scan s = new Scan();
s.setFilter(filter);
ResultScanner rs = table.getScanner(s);
for (Result r : rs) {
System.out.println("获得到rowkey:" + new String(r.getRow()));
for (KeyValue keyValue : r.raw()) {
System.out.println("列:" + new String(keyValue.getFamily())
+ "====值:" + new String(keyValue.getValue()));
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
项目:maker
文件:KNNQuery.java
public static void main(String[] args) throws IOException {
if (args.length != 3) {
System.out.println(usage);
System.exit(0);
}
double lon = Double.parseDouble(args[0]);
double lat = Double.parseDouble(args[1]);
int n = Integer.parseInt(args[2]);
HTablePool pool = new HTablePool();
KNNQuery q = new KNNQuery(pool);
Queue<QueryMatch> ret = q.queryKNN(lat, lon, n);
QueryMatch m;
while ((m = ret.poll()) != null) {
System.out.println(m);
}
pool.close();
}
项目:tddl5
文件:HbFactory.java
private void initConfiguration() {
if (clusterConfig.get(HbaseConf.cluster_name) == null || "".equals(clusterConfig.get(HbaseConf.cluster_name))) {
throw new IllegalArgumentException("cluster name can not be null or ''!");
}
clusterName = clusterConfig.get(HbaseConf.cluster_name);
Configuration conf = HBaseConfiguration.create();
conf.set(HbaseConf.hbase_quorum, clusterConfig.get(HbaseConf.hbase_quorum));
conf.set(HbaseConf.hbase_clientPort, clusterConfig.get(HbaseConf.hbase_clientPort));
if (null != clusterConfig.get(HbaseConf.hbase_znode_parent)) {
conf.set(HbaseConf.hbase_znode_parent, clusterConfig.get(HbaseConf.hbase_znode_parent));
}
conf.set("hbase.client.retries.number", "5");
conf.set("hbase.client.pause", "200");
conf.set("ipc.ping.interval", "3000");
conf.setBoolean("hbase.ipc.client.tcpnodelay", true);
if (this.checkConfiguration(clusterConfig.get(HbaseConf.cluster_name), conf)) {
conficuration = conf;
tablePool = new HTablePool(conf, 100);
}
}
项目:apple-data
文件:HBaseDataSource.java
/**
* init dataSource.
* */
public void init() {
try {
System.setProperty("javax.xml.parsers.DocumentBuilderFactory",
"com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl");
System.setProperty("javax.xml.parsers.SAXParserFactory",
"com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl");
initHbaseConfiguration();
tablePool = new HTablePool(hbaseConfiguration, tablePoolMaxSize);
tableFactory = new PooledHTableFactory(tablePool);
log.info(this);
} catch (Exception e) {
log.error(e);
throw new SimpleHBaseException(e);
}
}
项目:apple-data
文件:HBaseDataSource.java
/**
* init dataSource.
* */
public void init() {
try {
System.setProperty("javax.xml.parsers.DocumentBuilderFactory",
"com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl");
System.setProperty("javax.xml.parsers.SAXParserFactory",
"com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl");
initHbaseConfiguration();
tablePool = new HTablePool(hbaseConfiguration, tablePoolMaxSize);
tableFactory = new PooledHTableFactory(tablePool);
log.info(this);
} catch (Exception e) {
log.error(e);
throw new SimpleHBaseException(e);
}
}
项目:apple-data
文件:HBaseDataSource.java
/**
* init dataSource.
* */
public void init() {
try {
System.setProperty("javax.xml.parsers.DocumentBuilderFactory",
"com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl");
System.setProperty("javax.xml.parsers.SAXParserFactory",
"com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl");
initHbaseConfiguration();
tablePool = new HTablePool(hbaseConfiguration, tablePoolMaxSize);
tableFactory = new PooledHTableFactory(tablePool);
log.info(this);
} catch (Exception e) {
log.error(e);
throw new SimpleHBaseException(e);
}
}
项目:oceandata
文件:Pool.java
/**
*
* @param args
* @creatTime 下午1:57:57
* @author XuYi
* @throws IOException
*/
public static void main(String[] args) throws IOException {
Configuration config = HBaseConfiguration.create();
config.set("hbase.zookeeper.quorum", "master.hadoop,slave1.hadoop,slave2.hadoop");
pool = new HTablePool(config, 10);
// HTable table = (HTable) pool.getTable(Bytes.toBytes("manageLog"));
// execute(table);
// pool.putTable(table);
// HTable table2 = (HTable) pool.getTable(Bytes.toBytes("manageLog"));
// execute(table2);
// pool.putTable(table2);
for (int i = 0; i < 30; i++) {
new Thread(new TestThread()).start();
}
}
项目:oceandata
文件:PoolTest0921.java
public static void main(String[] args) throws IOException {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "master.hadoop,slave1.hadoop,slave2.hadoop");
HTablePool pool = new HTablePool(conf, 1, new TableFactory2());
HTableInterface table = pool.getTable(Bytes.toBytes("test3"));
Get get1 = new Get(Bytes.toBytes("1"));
table.get(get1);
System.out.println(table);
table.close();
HTableInterface table2 = pool.getTable(Bytes.toBytes("test3"));
table.get(get1);
System.out.println(table2);
table2.close();
}
项目:Tales
文件:HBaseDataRepository.java
public HBaseDataRepository( Configuration theConfiguration, int theTablePoolsSize ) {
Preconditions.checkNotNull( theConfiguration, "need configuration" );
Preconditions.checkNotNull( theTablePoolsSize > 0, "need a pool size greater than 0" );
configuration = theConfiguration;
tablePool = new HTablePool( configuration, theTablePoolsSize );
facilityManager = new SimpleFacilityManager();
StorageTypeFacility storageTypeFacility = new StorageTypeFacility( );
NameValidator typeNameValidator = new NopNameValidator( );
NameValidator memberNameValidator = new LowerCaseEntityNameValidator( );
JsonTranslationFacility jsonFacility = new JsonTranslationFacility(
new StorageTypeSource( storageTypeFacility ),
Readability.MACHINE,
typeNameValidator,
memberNameValidator );
HBaseTranslationFacility mapFacility = new HBaseTranslationFacility( storageTypeFacility, jsonFacility );
facilityManager.addFacility( StorageTypeFacility.class, storageTypeFacility );
facilityManager.addFacility( HBaseTranslationFacility.class, mapFacility );
facilityManager.addFacility( DefinitionFacility.class, new HBaseDefinitionFacility( ) );
facilityManager.addFacility( LifecycleFacility.class, new StandardLifecycleFacility<HBaseDataRepository, HBaseDataContext>( ) );
}
项目:cdk
文件:UserProfileExample.java
/**
* The constructor will start by registering the schemas with the meta store
* table in HBase, and create the required tables to run.
*/
public UserProfileExample() {
Configuration conf = HBaseConfiguration.create();
HTablePool pool = new HTablePool(conf, 10);
SchemaManager schemaManager = new DefaultSchemaManager(pool);
registerSchemas(conf, schemaManager);
userProfileDao = new SpecificAvroDao<UserProfileModel>(pool,
"cdk_example_user_profiles", "UserProfileModel", schemaManager);
userActionsDao = new SpecificAvroDao<UserActionsModel>(pool,
"cdk_example_user_profiles", "UserActionsModel", schemaManager);
userProfileActionsDao = SpecificAvroDao.buildCompositeDaoWithEntityManager(
pool, "cdk_example_user_profiles", UserProfileActionsModel.class,
schemaManager);
}
项目:cdk
文件:SpecificAvroDao.java
/**
* Create a CompositeDao, which will return SpecificRecord instances
* in a Map container.
*
* @param tablePool
* An HTablePool instance to use for connecting to HBase
* @param tableName
* The table name this dao will read from and write to
* @param keySchemaString
* The Avro schema string that represents the StorageKey structure for row
* keys in this table.
* @param subEntitySchemaStrings
* The list of entities that make up the composite.
* @param keyClass
* The class of the SpecificRecord representing the StorageKey of rows this
* dao will fetch.
* @return The CompositeDao instance.
* @throws SchemaNotFoundException
* @throws SchemaValidationException
*/
@SuppressWarnings("unchecked")
public static <K extends SpecificRecord, S extends SpecificRecord> Dao<
Map<String, S>> buildCompositeDao(
HTablePool tablePool, String tableName,
List<String> subEntitySchemaStrings) {
List<EntityMapper<S>> entityMappers = new ArrayList<EntityMapper<S>>();
for (String subEntitySchemaString : subEntitySchemaStrings) {
AvroEntitySchema subEntitySchema = parser
.parseEntitySchema(subEntitySchemaString);
Class<S> subEntityClass;
try {
subEntityClass = (Class<S>) Class.forName(subEntitySchema
.getAvroSchema().getFullName());
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
entityMappers.add(SpecificAvroDao.<S> buildEntityMapper(
subEntitySchemaString, subEntitySchemaString,
subEntityClass));
}
return new SpecificMapCompositeAvroDao<S>(tablePool, tableName, entityMappers);
}
项目:cdk
文件:SpecificAvroDao.java
/**
* Create a CompositeDao, which will return SpecificRecord instances
* in a Map container.
*
* @param tablePool
* An HTablePool instance to use for connecting to HBase.
* @param tableName
* The table name of the managed schema.
* @param subEntityClasses
* The classes that make up the subentities.
* @param schemaManager
* The SchemaManager which will use to create the entity mapper that
* will power this dao.
* @return The CompositeDao instance.
* @throws SchemaNotFoundException
*/
public static <K extends SpecificRecord, S extends SpecificRecord> Dao<Map<String, S>> buildCompositeDaoWithEntityManager(
HTablePool tablePool, String tableName, List<Class<S>> subEntityClasses,
SchemaManager schemaManager) {
List<EntityMapper<S>> entityMappers = new ArrayList<EntityMapper<S>>();
for (Class<S> subEntityClass : subEntityClasses) {
String entityName = getSchemaFromEntityClass(subEntityClass).getName();
entityMappers.add(new VersionedAvroEntityMapper.Builder()
.setSchemaManager(schemaManager).setTableName(tableName)
.setEntityName(entityName).setSpecific(true)
.<S> build());
}
return new SpecificMapCompositeAvroDao<S>(tablePool, tableName,
entityMappers);
}
项目:cdk
文件:SpecificAvroDao.java
public SpecificCompositeAvroDao(HTablePool tablePool, String tableName,
List<EntityMapper<S>> entityMappers, Class<E> entityClass) {
super(tablePool, tableName, entityMappers);
this.entityClass = entityClass;
try {
entityConstructor = entityClass.getConstructor();
entitySchema = (Schema) entityClass.getDeclaredField("SCHEMA$").get(
null);
} catch (Throwable e) {
LOG.error(
"Error getting constructor or schema field for entity of type: "
+ entityClass.getName(), e);
throw new DatasetException(e);
}
}
项目:cdk
文件:BaseEntityBatch.java
/**
* Checks an HTable out of the HTablePool and modifies it to take advantage of
* batch puts. This is very useful when performing many consecutive puts.
*
* @param clientTemplate
* The client template to use
* @param entityMapper
* The EntityMapper to use for mapping
* @param pool
* The HBase table pool
* @param tableName
* The name of the HBase table
* @param writeBufferSize
* The batch buffer size in bytes.
*/
public BaseEntityBatch(HBaseClientTemplate clientTemplate,
EntityMapper<E> entityMapper, HTablePool pool, String tableName,
long writeBufferSize) {
this.table = pool.getTable(tableName);
this.table.setAutoFlush(false);
this.clientTemplate = clientTemplate;
this.entityMapper = entityMapper;
this.state = ReaderWriterState.NEW;
/**
* If the writeBufferSize is less than the currentBufferSize, then the
* buffer will get flushed automatically by HBase. This should never happen,
* since we're getting a fresh table out of the pool, and the writeBuffer
* should be empty.
*/
try {
table.setWriteBufferSize(writeBufferSize);
} catch (IOException e) {
throw new DatasetIOException("Error flushing commits for table ["
+ table + "]", e);
}
}
项目:SparkDemo
文件:MyClass.java
public static void insertData(String tableName) {
System.out.println("start insert data ......");
HTablePool pool = new HTablePool(configuration, 1000);
HTable table = (HTable) pool.getTable(tableName);
Put put = new Put("112233bbbcccc".getBytes());// 一个PUT代表一行数据,再NEW一个PUT表示第二行数据,每行一个唯一的ROWKEY,此处rowkey为put构造方法中传入的值
put.add("column1".getBytes(), null, "aaa".getBytes());// 本行数据的第一列
put.add("column2".getBytes(), null, "bbb".getBytes());// 本行数据的第三列
put.add("column3".getBytes(), null, "ccc".getBytes());// 本行数据的第三列
try {
table.put(put);
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("end insert data ......");
}
项目:LCIndex-HBase-0.94.16
文件:RowResultGenerator.java
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 2 && split[1].length != 0) {
get.addColumn(split[0], split[1]);
} else {
get.addFamily(split[0]);
}
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.list().iterator();
}
} catch (DoNotRetryIOException e) {
// Warn here because Stargate will return 404 in the case if multiple
// column families were specified but one did not exist -- currently
// HBase will fail the whole Get.
// Specifying multiple columns in a URI should be uncommon usage but
// help to avoid confusion by leaving a record of what happened here in
// the log.
LOG.warn(StringUtils.stringifyException(e));
} finally {
table.close();
}
}
项目:LCIndex-HBase-0.94.16
文件:RESTServlet.java
/**
* Constructor with existing configuration
* @param conf existing configuration
* @throws IOException.
*/
RESTServlet(Configuration conf) throws IOException {
this.conf = conf;
int maxSize = conf.getInt("hbase.rest.htablepool.size", 10);
this.pool = new HTablePool(conf, maxSize);
this.admin = new HBaseAdmin(conf);
}
项目:LCIndex-HBase-0.94.16
文件:SchemaResource.java
private HTableDescriptor getTableSchema() throws IOException,
TableNotFoundException {
HTablePool pool = servlet.getTablePool();
HTableInterface table = pool.getTable(tableResource.getName());
try {
return table.getTableDescriptor();
} finally {
table.close();
}
}
项目:LCIndex-HBase-0.94.16
文件:ScannerResultGenerator.java
public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Scan scan;
if (rowspec.hasEndRow()) {
scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
} else {
scan = new Scan(rowspec.getStartRow());
}
if (rowspec.hasColumns()) {
byte[][] columns = rowspec.getColumns();
for (byte[] column: columns) {
byte[][] split = KeyValue.parseColumn(column);
if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
scan.addColumn(split[0], split[1]);
} else {
scan.addFamily(split[0]);
}
}
}
scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
scan.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
scan.setFilter(filter);
}
// always disable block caching on the cluster when scanning
scan.setCacheBlocks(false);
scanner = table.getScanner(scan);
cached = null;
id = Long.toString(System.currentTimeMillis()) +
Integer.toHexString(scanner.hashCode());
} finally {
table.close();
}
}
项目:maker
文件:WithinQuery.java
public static void main(String[] args)
throws IOException, ParseException {
if (args.length != 2 || (!"local".equals(args[0]) && !"remote".equals(args[0]))) {
System.out.println(usage);
System.exit(0);
}
WKTReader reader = new WKTReader();
Geometry query = reader.read(args[1]);
HTablePool pool = new HTablePool();
WithinQuery q = new WithinQuery(pool);
Set<QueryMatch> results;
if ("local".equals(args[0])) {
results = q.query(query);
} else {
results = q.queryWithFilter(query);
}
System.out.println("Query matched " + results.size() + " points.");
for (QueryMatch result : results) {
System.out.println(result);
}
pool.close();
}
项目:openyu-commons
文件:HTablePoolTest.java
@BeforeClass
public static void setUpBeforeClass() throws Exception {
Configuration configuration = createConfiguration();
//
htablePool = new HTablePool(configuration, 10,
PoolMap.PoolType.ThreadLocal);
}
项目:IRIndex
文件:RowResultGenerator.java
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 2 && split[1].length != 0) {
get.addColumn(split[0], split[1]);
} else {
get.addFamily(split[0]);
}
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.list().iterator();
}
} catch (DoNotRetryIOException e) {
// Warn here because Stargate will return 404 in the case if multiple
// column families were specified but one did not exist -- currently
// HBase will fail the whole Get.
// Specifying multiple columns in a URI should be uncommon usage but
// help to avoid confusion by leaving a record of what happened here in
// the log.
LOG.warn(StringUtils.stringifyException(e));
} finally {
table.close();
}
}
项目:IRIndex
文件:RESTServlet.java
/**
* Constructor with existing configuration
* @param conf existing configuration
* @throws IOException.
*/
RESTServlet(Configuration conf) throws IOException {
this.conf = conf;
int maxSize = conf.getInt("hbase.rest.htablepool.size", 10);
this.pool = new HTablePool(conf, maxSize);
this.admin = new HBaseAdmin(conf);
}
项目:IRIndex
文件:SchemaResource.java
private HTableDescriptor getTableSchema() throws IOException,
TableNotFoundException {
HTablePool pool = servlet.getTablePool();
HTableInterface table = pool.getTable(tableResource.getName());
try {
return table.getTableDescriptor();
} finally {
table.close();
}
}
项目:IRIndex
文件:ScannerResultGenerator.java
public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Scan scan;
if (rowspec.hasEndRow()) {
scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
} else {
scan = new Scan(rowspec.getStartRow());
}
if (rowspec.hasColumns()) {
byte[][] columns = rowspec.getColumns();
for (byte[] column: columns) {
byte[][] split = KeyValue.parseColumn(column);
if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
scan.addColumn(split[0], split[1]);
} else {
scan.addFamily(split[0]);
}
}
}
scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
scan.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
scan.setFilter(filter);
}
// always disable block caching on the cluster when scanning
scan.setCacheBlocks(false);
scanner = table.getScanner(scan);
cached = null;
id = Long.toString(System.currentTimeMillis()) +
Integer.toHexString(scanner.hashCode());
} finally {
table.close();
}
}
项目:RStore
文件:ReplicationSink.java
/**
* Create a sink for replication
*
* @param conf conf object
* @param stopper boolean to tell this thread to stop
* @throws IOException thrown when HDFS goes bad or bad file name
*/
public ReplicationSink(Configuration conf, Stoppable stopper)
throws IOException {
this.conf = conf;
this.pool = new HTablePool(this.conf,
conf.getInt("replication.sink.htablepool.capacity", 10));
this.stopper = stopper;
this.metrics = new ReplicationSinkMetrics();
}
项目:RStore
文件:RegionsResource.java
private Map<HRegionInfo,HServerAddress> getTableRegions()
throws IOException {
HTablePool pool = servlet.getTablePool();
HTableInterface table = pool.getTable(tableResource.getName());
try {
return ((HTable)table).getRegionsInfo();
} finally {
pool.putTable(table);
}
}
项目:RStore
文件:SchemaResource.java
private HTableDescriptor getTableSchema() throws IOException,
TableNotFoundException {
HTablePool pool = servlet.getTablePool();
HTableInterface table = pool.getTable(tableResource.getName());
try {
return table.getTableDescriptor();
} finally {
pool.putTable(table);
}
}
项目:oceandata
文件:HbaseDataSource.java
/**
* 构造函数
*
* @creatTime 下午3:57:06
* @author XuYi
*/
private HbaseDataSource() {
// 初始化 Configuration
config = HBaseConfiguration.create();
config.set("hbase.zookeeper.quorum", "master.hadoop,slave1.hadoop,slave2.hadoop");
// 初始化HTablePool
pool = new HTablePool(config, maxSize, new TableFactory());
initJMX();
}
项目:Tales
文件:HBaseDataRepository.java
public HBaseDataRepository( Configuration theConfiguration, int theTablePoolsSize, HBaseTranslationFacility theTranslationFacility ) {
Preconditions.checkNotNull( theConfiguration, "need configuration" );
Preconditions.checkNotNull( theTablePoolsSize > 0, "need a pool size greater than 0" );
Preconditions.checkNotNull( theTranslationFacility , "need a translation facility" );
configuration = theConfiguration;
tablePool = new HTablePool( configuration, theTablePoolsSize );
facilityManager = new SimpleFacilityManager();
facilityManager.addFacility( StorageTypeFacility.class, theTranslationFacility.getStorageTypeFacility() );
facilityManager.addFacility( HBaseTranslationFacility.class, theTranslationFacility );
facilityManager.addFacility( DefinitionFacility.class, new HBaseDefinitionFacility( ) );
facilityManager.addFacility( LifecycleFacility.class, new StandardLifecycleFacility<HBaseDataRepository, HBaseDataContext>( ) );
}
项目:foxtrot
文件:HbaseTableConnection.java
@Override
public void start() throws Exception {
logger.info("Starting HBase Connection");
Configuration configuration = HBaseUtil.create(hbaseConfig);
this.tablePool = new HTablePool(configuration, 10, PoolMap.PoolType.Reusable);
this.hBaseAdmin = new HBaseAdmin(configuration);
logger.info("Started HBase Connection");
}
项目:HGraph
文件:Graph.java
/**
* @param pool
* @param conf
*/
protected Graph(HTablePool pool, Configuration conf) {
super();
this.POOL = pool;
this.CONF = conf;
String vertexTableName = this.CONF.get(HBaseGraphConstants.HBASE_GRAPH_TABLE_VERTEX_NAME_KEY);
Validate.notEmpty(vertexTableName, HBaseGraphConstants.HBASE_GRAPH_TABLE_VERTEX_NAME_KEY + " shall not be null or empty");
this.VERTEX_TABLE_NAME = vertexTableName;
String edgeTableName = this.CONF.get(HBaseGraphConstants.HBASE_GRAPH_TABLE_EDGE_NAME_KEY);
Validate.notEmpty(edgeTableName, HBaseGraphConstants.HBASE_GRAPH_TABLE_EDGE_NAME_KEY + " shall not be null or empty");
this.EDGE_TABLE_NAME = edgeTableName;
}
项目:haeinsa
文件:HaeinsaTestingCluster.java
private HaeinsaTestingCluster() throws Exception {
Configuration conf = HBaseConfiguration.create();
HBaseTestingUtility utility = new HBaseTestingUtility(conf);
utility.cleanupTestDir();
cluster = utility.startMiniCluster();
configuration = cluster.getConfiguration();
threadPool = Executors.newCachedThreadPool();
haeinsaTablePool = TestingUtility.createHaeinsaTablePool(configuration, threadPool);
hbaseTablePool = new HTablePool(configuration, 128, PoolType.Reusable);
transactionManager = new HaeinsaTransactionManager(haeinsaTablePool);
createdTableNames = Sets.newHashSet();
}
项目:James
文件:TablePool.java
/**
* Use getInstance to get an instance of the {@link HTablePool}.
*
* You can give at first call a specific {@link HBaseConfiguration} to suit your needs.
*
* @param configuration
* @return An instance of {@link HTablePool}
* @throws IOException
*/
public static synchronized TablePool getInstance(Configuration configuration) throws IOException {
if (hbaseSchema == null) {
TablePool.configuration = configuration;
TablePool.hbaseSchema = new TablePool();
TablePool.htablePool = new HTablePool(configuration, 100);
ensureTable(HDomainList.TABLE_NAME, HDomainList.COLUMN_FAMILY_NAME);
ensureTable(HRecipientRewriteTable.TABLE_NAME, HRecipientRewriteTable.COLUMN_FAMILY_NAME);
ensureTable(HUsersRepository.TABLE_NAME, HUsersRepository.COLUMN_FAMILY_NAME);
}
return hbaseSchema;
}
项目:cdk
文件:SpecificAvroDao.java
public SpecificMapCompositeAvroDao(HTablePool tablePool, String tableName,
List<EntityMapper<S>> entityMappers) {
super(tablePool, tableName, entityMappers);
subEntitySchemas = Lists.newArrayList();
for (EntityMapper<S> entityMapper : entityMappers) {
subEntitySchemas.add(parser.parseEntitySchema(entityMapper.getEntitySchema().getRawSchema()).getAvroSchema());
}
}
项目:cdk
文件:ManagedDaoTest.java
@Before
public void before() throws Exception {
tablePool = new HTablePool(HBaseTestUtils.getConf(), 10);
SchemaTool tool = new SchemaTool(new HBaseAdmin(HBaseTestUtils.getConf()),
new DefaultSchemaManager(tablePool));
tool.createOrMigrateSchema(tableName, testRecord, true);
tool.createOrMigrateSchema(tableName, testRecordv2, true);
tool.createOrMigrateSchema(compositeTableName, compositeSubrecord1, true);
tool.createOrMigrateSchema(compositeTableName, compositeSubrecord2, true);
tool.createOrMigrateSchema(incrementTableName, testIncrement, true);
}
项目:cdk
文件:HBaseTestUtils.java
public static SchemaManager initializeSchemaManager(
HTablePool tablePool, String directory) throws Exception {
SchemaManager entityManager = new DefaultSchemaManager(
tablePool);
SchemaTool schemaTool = new SchemaTool(new HBaseAdmin(getConf()),
entityManager);
schemaTool.createOrMigrateSchemaDirectory(directory, true);
return entityManager;
}
项目:HBase-Research
文件:RowResultGenerator.java
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 2 && split[1].length != 0) {
get.addColumn(split[0], split[1]);
} else {
get.addFamily(split[0]);
}
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.list().iterator();
}
} catch (DoNotRetryIOException e) {
// Warn here because Stargate will return 404 in the case if multiple
// column families were specified but one did not exist -- currently
// HBase will fail the whole Get.
// Specifying multiple columns in a URI should be uncommon usage but
// help to avoid confusion by leaving a record of what happened here in
// the log.
LOG.warn(StringUtils.stringifyException(e));
} finally {
table.close();
}
}
项目:HBase-Research
文件:RESTServlet.java
/**
* Constructor with existing configuration
* @param conf existing configuration
* @throws IOException.
*/
RESTServlet(Configuration conf) throws IOException {
this.conf = conf;
int maxSize = conf.getInt("hbase.rest.htablepool.size", 10);
this.pool = new HTablePool(conf, maxSize);
this.admin = new HBaseAdmin(conf);
}