Java 类org.apache.hadoop.hbase.client.Put 实例源码
项目:stroom-stats
文件:HBaseUniqueIdReverseMapTable.java
@Override
public boolean checkAndPutName(final byte[] bNewUid, final byte[] name) {
if (LOGGER.isTraceEnabled()) {
final String rowKeyStr = ByteArrayUtils.byteArrayToHex(bNewUid);
final String valueStr = Bytes.toString(name);
LOGGER.trace("checkAndPutName - Key: [" + rowKeyStr + "], Value: [" + valueStr + "]");
}
final Put put = new Put(bNewUid);
put.addColumn(NAME_FAMILY, NAME_COL_QUALIFIER, name);
boolean result;
// pass null as the expected value to ensure we only put if it didn't
// exist before
result = doCheckAndPut(bNewUid, NAME_FAMILY, NAME_COL_QUALIFIER, null, put);
return result;
}
项目:ditb
文件:TestRemoteTable.java
@Test
public void testCheckAndDelete() throws IOException {
Get get = new Get(ROW_1);
Result result = remoteTable.get(get);
byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
assertNotNull(value1);
assertTrue(Bytes.equals(VALUE_1, value1));
assertNull(value2);
assertTrue(remoteTable.exists(get));
assertEquals(1, remoteTable.existsAll(Collections.singletonList(get)).length);
Delete delete = new Delete(ROW_1);
remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete);
assertFalse(remoteTable.exists(get));
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
remoteTable.put(put);
assertTrue(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1,
put));
assertFalse(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_2,
put));
}
项目:ditb
文件:MultiThreadedWriterWithACL.java
private void recordFailure(final Table table, final Put put, final long keyBase,
final long start, IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to insert: " + keyBase + " after " + (System.currentTimeMillis() - start)
+ "ms; region information: " + getRegionDebugInfoSafe(table, put.getRow()) + "; errors: "
+ exceptionInfo);
}
项目:flume-release-1.7.0
文件:SimpleHbaseEventSerializer.java
@Override
public List<Row> getActions() throws FlumeException {
List<Row> actions = new LinkedList<Row>();
if (plCol != null) {
byte[] rowKey;
try {
if (keyType == KeyType.TS) {
rowKey = SimpleRowKeyGenerator.getTimestampKey(rowPrefix);
} else if (keyType == KeyType.RANDOM) {
rowKey = SimpleRowKeyGenerator.getRandomKey(rowPrefix);
} else if (keyType == KeyType.TSNANO) {
rowKey = SimpleRowKeyGenerator.getNanoTimestampKey(rowPrefix);
} else {
rowKey = SimpleRowKeyGenerator.getUUIDKey(rowPrefix);
}
Put put = new Put(rowKey);
put.add(cf, plCol, payload);
actions.add(put);
} catch (Exception e) {
throw new FlumeException("Could not get row key!", e);
}
}
return actions;
}
项目:ditb
文件:TestCompactionState.java
private static void loadData(final Table ht, final byte[][] families,
final int rows, final int flushes) throws IOException {
List<Put> puts = new ArrayList<Put>(rows);
byte[] qualifier = Bytes.toBytes("val");
for (int i = 0; i < flushes; i++) {
for (int k = 0; k < rows; k++) {
byte[] row = Bytes.toBytes(random.nextLong());
Put p = new Put(row);
for (int j = 0; j < families.length; ++j) {
p.add(families[ j ], qualifier, row);
}
puts.add(p);
}
ht.put(puts);
TEST_UTIL.flush();
puts.clear();
}
}
项目:ditb
文件:TestHRegion.java
@Test
public void testWriteRequestsCounter() throws IOException {
byte[] fam = Bytes.toBytes("info");
byte[][] families = { fam };
this.region = initHRegion(tableName, method, CONF, families);
Assert.assertEquals(0L, region.getWriteRequestsCount());
Put put = new Put(row);
put.add(fam, fam, fam);
Assert.assertEquals(0L, region.getWriteRequestsCount());
region.put(put);
Assert.assertEquals(1L, region.getWriteRequestsCount());
region.put(put);
Assert.assertEquals(2L, region.getWriteRequestsCount());
region.put(put);
Assert.assertEquals(3L, region.getWriteRequestsCount());
region.delete(new Delete(row));
Assert.assertEquals(4L, region.getWriteRequestsCount());
HRegion.closeHRegion(this.region);
this.region = null;
}
项目:ditb
文件:PerformanceEvaluation.java
@Override
void testRow(final int i) throws IOException {
byte[] row = getRandomRow(this.rand, this.totalRows);
Put put = new Put(row);
byte[] value = generateData(this.rand, ROW_LENGTH);
if (useTags) {
byte[] tag = generateData(this.rand, TAG_LENGTH);
Tag[] tags = new Tag[noOfTags];
for (int n = 0; n < noOfTags; n++) {
Tag t = new Tag((byte) n, tag);
tags[n] = t;
}
KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
value, tags);
put.add(kv);
} else {
put.add(FAMILY_NAME, QUALIFIER_NAME, value);
}
put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
mutator.mutate(put);
}
项目:ditb
文件:TestVisibilityLabelsWithACL.java
private static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
throws Exception {
Table table = null;
try {
table = TEST_UTIL.createTable(tableName, fam);
int i = 1;
List<Put> puts = new ArrayList<Put>();
for (String labelExp : labelExps) {
Put put = new Put(Bytes.toBytes("row" + i));
put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
put.setCellVisibility(new CellVisibility(labelExp));
puts.add(put);
i++;
}
table.put(puts);
} finally {
if (table != null) {
table.close();
}
}
return table;
}
项目:ditb
文件:TestHRegion.java
@Test
public void testFlushedFileWithNoTags() throws Exception {
String method = "testFlushedFileWithNoTags";
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(fam1));
region = initHRegion(Bytes.toBytes(method), method, TEST_UTIL.getConfiguration(), fam1);
Put put = new Put(Bytes.toBytes("a-b-0-0"));
put.addColumn(fam1, qual1, Bytes.toBytes("c1-value"));
region.put(put);
region.flush(true);
Store store = region.getStore(fam1);
Collection<StoreFile> storefiles = store.getStorefiles();
for (StoreFile sf : storefiles) {
assertFalse("Tags should not be present "
,sf.getReader().getHFileReader().getFileContext().isIncludesTags());
}
}
项目:ignite-hbase
文件:HBaseCacheStoreTest.java
@Test
public void testManualHBaseInsertion() throws ServiceException, IOException {
IgniteConfiguration cfg = prepareConfig(false);
IgniteConfiguration cfg2 = new IgniteConfiguration(cfg);
cfg.setGridName("first");
cfg2.setGridName("second");
String cacheName = "myCache";
try (Ignite ignite = Ignition.getOrStart(cfg); Ignite ignite2 = Ignition.getOrStart(cfg2)) {
IgniteCache<String, String> cache = ignite.getOrCreateCache(cacheName);
cache.remove("Hello");
assertNull(cache.get("Hello"));
try (Connection conn = getHBaseConnection()) {
TableName tableName = TableName.valueOf(TABLE_NAME);
Table table = conn.getTable(tableName);
Serializer<Object> serializer = ObjectSerializer.INSTANCE;
Put put = new Put(serializer.serialize("Hello"));
put.addColumn(cacheName.getBytes(), QUALIFIER, serializer.serialize("World"));
table.put(put);
}
assertEquals("World", cache.get("Hello"));
}
}
项目:ditb
文件:LMDIndexWriter.java
/**
* parse put, add index put into mdRecordList
*/
private void processPut(List<KeyValue> mdRecordList, Put put) throws IOException {
if (put == null) return;
byte[] rawRowkey = put.getRow();
int[] arr = new int[dimensions];
int i = 0;
for (Map.Entry<byte[], TreeSet<byte[]>> entry : tableRelation.getIndexFamilyMap().entrySet()) {
for (byte[] qualifier : entry.getValue()) {
arr[i] = Bytes.toInt(put.get(entry.getKey(), qualifier).get(0).getValue());
++i;
}
}
byte[] mdKey = MDUtils.bitwiseZip(arr, dimensions);
KeyValue keyValue =
new KeyValue(mdKey, LMDIndexConstants.FAMILY, rawRowkey, put.getTimeStamp(), Type.Put,
LMDIndexConstants.VALUE);
mdRecordList.add(keyValue);
}
项目:ditb
文件:TestServerCustomProtocol.java
@Before
public void before() throws Exception {
final byte[][] SPLIT_KEYS = new byte[][] { ROW_B, ROW_C };
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
Put puta = new Put( ROW_A );
puta.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
table.put(puta);
Put putb = new Put( ROW_B );
putb.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
table.put(putb);
Put putc = new Put( ROW_C );
putc.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
table.put(putc);
}
项目:ditb
文件:MDIndex.java
/**
* @param dropIfExists
*/
public void createTable(boolean dropIfExists) throws IOException {
if (admin.tableExists(secondaryTableName)) {
if (dropIfExists) {
admin.disableTable(bucketTableName);
admin.deleteTable(bucketTableName);
admin.disableTable(secondaryTableName);
admin.deleteTable(secondaryTableName);
} else {
secondaryTable = conn.getTable(secondaryTableName);
bucketTable = conn.getTable(bucketTableName);
return;
}
}
// secondary table
HTableDescriptor secondaryDesc = new HTableDescriptor(secondaryTableName);
secondaryDesc
.addFamily(IndexTableRelation.getDefaultColumnDescriptor(MDHBaseAdmin.SECONDARY_FAMILY));
admin.createTable(secondaryDesc);
secondaryTable = conn.getTable(secondaryTableName);
// bucket table
HTableDescriptor bucketDesc = new HTableDescriptor(bucketTableName);
bucketDesc.addFamily(IndexTableRelation.getDefaultColumnDescriptor(MDHBaseAdmin.BUCKET_FAMILY));
admin.createTable(bucketDesc);
bucketTable = conn.getTable(bucketTableName);
// init when init
int[] starts = new int[dimensions];
Arrays.fill(starts, 0);
Put put = new Put(MDUtils.bitwiseZip(starts, dimensions));
put.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_PREFIX_LEN_QUALIFIER,
Bytes.toBytes(dimensions));
put.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_SIZE_QUALIFIER,
Bytes.toBytes(0L));
bucketTable.put(put);
}
项目:ditb
文件:TestZooKeeper.java
/**
* Make sure we can use the cluster
* @throws Exception
*/
private void testSanity(final String testName) throws Exception{
String tableName = testName + "_" + System.currentTimeMillis();
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
HColumnDescriptor family = new HColumnDescriptor("fam");
desc.addFamily(family);
LOG.info("Creating table " + tableName);
Admin admin = TEST_UTIL.getHBaseAdmin();
try {
admin.createTable(desc);
} finally {
admin.close();
}
Table table =
new HTable(new Configuration(TEST_UTIL.getConfiguration()), desc.getTableName());
Put put = new Put(Bytes.toBytes("testrow"));
put.add(Bytes.toBytes("fam"),
Bytes.toBytes("col"), Bytes.toBytes("testdata"));
LOG.info("Putting table " + tableName);
table.put(put);
table.close();
}
项目:SkyEye
文件:HbaseStore.java
@Override
public Map<String, List<Put>> store(String spanJson, Span span) {
// 将所有的Put返回到上游
Map<String, List<Put>> puts = new HashMap<String, List<Put>>();
if (span.getSample()) {
// 区分出所有的annotation
Map<String, Annotation> annotationMap = this.distinguishAnnotation(span.getAnnotations());
Put spanPut = this.storeSpan(span, spanJson, annotationMap);
Put tracePut = this.storeTrace(span, annotationMap);
List<Put> annotationPuts = this.storeAnnotation(span, annotationMap);
puts.put(Constants.TABLE_TRACE, Lists.newArrayList(spanPut));
if (null != tracePut) {
puts.put(Constants.TABLE_TIME_CONSUME, Lists.newArrayList(tracePut));
}
if (null != annotationPuts) {
puts.put(Constants.TABLE_ANNOTATION, annotationPuts);
}
}
return puts;
}
项目:ditb
文件:TestRegionObserverInterface.java
@Test(timeout = 300000)
public void testHBASE14489() throws IOException {
TableName tableName = TableName.valueOf("testHBASE14489");
HTable table = util.createTable(tableName, new byte[][] { A });
Put put = new Put(ROW);
put.addColumn(A, A, A);
table.put(put);
Scan s = new Scan();
s.setFilter(new FilterAllFilter());
ResultScanner scanner = table.getScanner(s);
try {
for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
}
} finally {
scanner.close();
}
verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerFilterRowCalled" },
tableName, new Boolean[] { true });
util.deleteTable(tableName);
table.close();
}
项目:ditb
文件:TestVisibilityLabelsWithDeletes.java
public static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
throws Exception {
Table table = null;
table = TEST_UTIL.createTable(tableName, fam);
int i = 1;
List<Put> puts = new ArrayList<Put>();
for (String labelExp : labelExps) {
Put put = new Put(Bytes.toBytes("row" + i));
put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
put.setCellVisibility(new CellVisibility(labelExp));
puts.add(put);
table.put(put);
i++;
}
// table.put(puts);
return table;
}
项目:ditb
文件:RegionCoprocessorHost.java
/**
* @param row row to check
* @param family column family
* @param qualifier column qualifier
* @param compareOp the comparison operation
* @param comparator the comparator
* @param put data to put if check succeeds
* @return true or false to return to client if default processing should
* be bypassed, or null otherwise
* @throws IOException e
*/
public Boolean preCheckAndPutAfterRowLock(final byte[] row, final byte[] family,
final byte[] qualifier, final CompareOp compareOp, final ByteArrayComparable comparator,
final Put put) throws IOException {
return execOperationWithResult(true, false,
coprocessors.isEmpty() ? null : new RegionOperationWithResult<Boolean>() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
setResult(oserver.preCheckAndPutAfterRowLock(ctx, row, family, qualifier,
compareOp, comparator, put, getResult()));
}
});
}
项目:ditb
文件:TestMasterReplication.java
private void putAndWait(byte[] row, byte[] fam, Table source, Table target)
throws Exception {
Put put = new Put(row);
put.add(fam, row, row);
source.put(put);
wait(row, target, false);
}
项目:stroom-stats
文件:HBaseTable.java
/**
* Gets a tableInterface, does the passed put on this table and then closes
* the tableInterface
*
* @param put
* The HBase put to put
*/
public void doPut(final Put put) {
final Table tableInterface = getTable();
try {
doPut(tableInterface, put);
} finally {
closeTable(tableInterface);
}
}
项目:ditb
文件:TestVisibilityLabels.java
@Test
public void testLabelsWithIncrement() throws Throwable {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try (Table table = TEST_UTIL.createTable(tableName, fam)) {
byte[] row1 = Bytes.toBytes("row1");
byte[] val = Bytes.toBytes(1L);
Put put = new Put(row1);
put.add(fam, qual, HConstants.LATEST_TIMESTAMP, val);
put.setCellVisibility(new CellVisibility(SECRET + " & " + CONFIDENTIAL));
table.put(put);
Get get = new Get(row1);
get.setAuthorizations(new Authorizations(SECRET));
Result result = table.get(get);
assertTrue(result.isEmpty());
table.incrementColumnValue(row1, fam, qual, 2L);
result = table.get(get);
assertTrue(result.isEmpty());
Increment increment = new Increment(row1);
increment.addColumn(fam, qual, 2L);
increment.setCellVisibility(new CellVisibility(SECRET));
table.increment(increment);
result = table.get(get);
assertTrue(!result.isEmpty());
}
}
项目:ditb
文件:TestHRegionReplayEvents.java
@Test
public void testSeqIdsFromReplay() throws IOException {
// test the case where seqId's coming from replayed WALEdits are made persisted with their
// original seqIds and they are made visible through mvcc read point upon replay
String method = name.getMethodName();
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
HRegion region = initHRegion(tableName, method, family);
try {
// replay an entry that is bigger than current read point
long readPoint = region.getMVCC().getReadPoint();
long origSeqId = readPoint + 100;
Put put = new Put(row).add(family, row, row);
put.setDurability(Durability.SKIP_WAL); // we replay with skip wal
replay(region, put, origSeqId);
// read point should have advanced to this seqId
assertGet(region, family, row);
// region seqId should have advanced at least to this seqId
assertEquals(origSeqId, region.getSequenceId());
// replay an entry that is smaller than current read point
// caution: adding an entry below current read point might cause partial dirty reads. Normal
// replay does not allow reads while replay is going on.
put = new Put(row2).add(family, row2, row2);
put.setDurability(Durability.SKIP_WAL);
replay(region, put, origSeqId - 50);
assertGet(region, family, row2);
} finally {
region.close();
}
}
项目:ditb
文件:TestConstraint.java
/**
* Test that constraints will fail properly
* @throws Exception
*/
@SuppressWarnings("unchecked")
@Test(timeout = 60000)
public void testConstraintFails() throws Exception {
// create the table
// it would be nice if this was also a method on the util
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family));
}
// add a constraint that is sure to fail
Constraints.add(desc, AllFailConstraint.class);
util.getHBaseAdmin().createTable(desc);
Table table = new HTable(util.getConfiguration(), tableName);
// test that we do fail on violation
Put put = new Put(row1);
put.add(dummy, new byte[0], "fail".getBytes());
LOG.warn("Doing put in table");
try {
table.put(put);
fail("This put should not have suceeded - AllFailConstraint was not run!");
} catch (RetriesExhaustedWithDetailsException e) {
List<Throwable> causes = e.getCauses();
assertEquals(
"More than one failure cause - should only be the failure constraint exception",
1, causes.size());
Throwable t = causes.get(0);
assertEquals(ConstraintException.class, t.getClass());
}
table.close();
}
项目:ditb
文件:TestMergeTable.java
private HRegion createRegion(final HTableDescriptor desc,
byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
throws IOException {
HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc);
LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString());
for(int i = firstRow; i < firstRow + nrows; i++) {
Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
put.setDurability(Durability.SKIP_WAL);
put.add(COLUMN_NAME, null, VALUE);
region.put(put);
if (i % 10000 == 0) {
LOG.info("Flushing write #" + i);
region.flush(true);
}
}
HRegion.closeHRegion(region);
return region;
}
项目:big_data
文件:AnalyserLogDataRunner.java
@Override
public int run(String[] args) throws Exception {
Configuration conf = this.getConf();
this.processArgs(conf, args);
Job job = Job.getInstance(conf, "analyser_logdata");
// 设置本地提交job,集群运行,需要代码
// File jarFile = EJob.createTempJar("target/classes");
// ((JobConf) job.getConfiguration()).setJar(jarFile.toString());
// 设置本地提交job,集群运行,需要代码结束
job.setJarByClass(AnalyserLogDataRunner.class);
job.setMapperClass(AnalyserLogDataMapper.class);
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(Put.class);
// 设置reducer配置
// 1. 集群上运行,打成jar运行(要求addDependencyJars参数为true,默认就是true)
// TableMapReduceUtil.initTableReducerJob(EventLogConstants.HBASE_NAME_EVENT_LOGS,
// null, job);
// 2. 本地运行,要求参数addDependencyJars为false
TableMapReduceUtil.initTableReducerJob(EventLogConstants.HBASE_NAME_EVENT_LOGS, null, job, null, null, null,
null, false);
job.setNumReduceTasks(0);
// 设置输入路径
this.setJobInputPaths(job);
return job.waitForCompletion(true) ? 0 : -1;
}
项目:ditb
文件:TestRegionObserverBypass.java
/**
* do a single put that is bypassed by a RegionObserver
* @throws Exception
*/
@Test
public void testSimple() throws Exception {
Table t = new HTable(util.getConfiguration(), tableName);
Put p = new Put(row1);
p.add(test,dummy,dummy);
// before HBASE-4331, this would throw an exception
t.put(p);
checkRowAndDelete(t,row1,0);
t.close();
}
项目:ditb
文件:HBaseTestingUtility.java
/**
* Load table of multiple column families with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Array of Families to load
* @param value the values of the cells. If null is passed, the row key is used as value
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
List<Put> puts = new ArrayList<>();
for (byte[] row : HBaseTestingUtility.ROWS) {
Put put = new Put(row);
put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
for (int i = 0; i < f.length; i++) {
put.add(f[i], null, value != null ? value : row);
}
puts.add(put);
}
t.put(puts);
return puts.size();
}
项目:ditb
文件:TestForceCacheImportantBlocks.java
private void writeTestData(Region region) throws IOException {
for (int i = 0; i < NUM_ROWS; ++i) {
Put put = new Put(Bytes.toBytes("row" + i));
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
put.add(CF_BYTES, Bytes.toBytes("col" + j), ts,
Bytes.toBytes("value" + i + "_" + j + "_" + ts));
}
}
region.put(put);
if ((i + 1) % ROWS_PER_HFILE == 0) {
region.flush(true);
}
}
}
项目:ditb
文件:IntegrationTestMTTR.java
@Override
protected boolean doAction() throws Exception {
Put p = new Put(Bytes.toBytes(RandomStringUtils.randomAlphanumeric(5)));
p.add(FAMILY, Bytes.toBytes("\0"), Bytes.toBytes(RandomStringUtils.randomAscii(5)));
table.put(p);
return true;
}
项目:hbase-client
文件:BatchIt.java
@Test( description = "Test invalid batch parameter: batch size",
expectedExceptions = IllegalArgumentException.class )
public void testInvalidBatchSize() throws Exception {
Batch.newBuilder().withBatchSize(-20)
.withObjectCollection(Collections.emptyList())
.withMapper(o -> new Put(new byte[0]))
.withTable(testTable)
.build();
}
项目:hbase-client
文件:BatchIt.java
@Test( description = "Test invalid batch parameter: empty object collection",
expectedExceptions = NullPointerException.class )
public void testInvalidObjectCollection() throws Exception {
Batch.newBuilder().withBatchSize(10)
.withObjectCollection(( Iterator<Object> ) null)
.withMapper(o -> new Put(new byte[0]))
.withTable(testTable)
.build();
}
项目:ditb
文件:SnapshotTestingUtils.java
private static Put createPut(final byte[][] families, final byte[] key, final byte[] value) {
byte[] q = Bytes.toBytes("q");
Put put = new Put(key);
put.setDurability(Durability.SKIP_WAL);
for (byte[] family: families) {
put.add(family, q, value);
}
return put;
}
项目:hbase-client
文件:BatchIt.java
@Test( description = "Create valid batch instance and call batch method",
dataProvider = "objCollection" )
public void testCreateValidBatchInstanceAndCallBatch( List<String> strCollection )
throws Exception {
final byte[] valQualifier = Bytes.toBytes("val");
Batch.<String>newBuilder()
.withBatchSize(3)
.withObjectCollection(strCollection)
.withMapper(string -> {
final byte[] key = Bytes.toBytes(string);
return new Put(key).addImmutable(TEST_CF_BYTES,
valQualifier,
key);
})
.withTable(testTable)
.build()
.call();
final ResultScanner scanner = testTable.getScanner(TEST_CF_BYTES, valQualifier);
final Integer rowCount
= StreamSupport.stream(scanner.spliterator(), false)
.reduce(0,
( curVal, hresult ) -> curVal + hresult.size(),
( val1, val2 ) -> val1 + val2);
assertThat(rowCount).isEqualTo(strCollection.size());
}
项目:hbase-client
文件:BatchIt.java
@Test( description = "Create valid batch instance and call batch method on Iterator",
dataProvider = "objCollection" )
public void testCreateValidBatchInstanceAndCallBatchOnIterator( List<String> strCollection )
throws Exception {
final byte[] valQualifier = Bytes.toBytes("val");
final Batch<String> batch = Batch.<String>newBuilder()
.withBatchSize(3)
.withObjectCollection(strCollection.iterator())
.withMapper(string -> {
final byte[] key = Bytes.toBytes(string);
return new Put(key).addImmutable(TEST_CF_BYTES,
valQualifier,
key);
})
.withTable(testTable)
.build();
// batch on iterator instance perform only one time
// other calls must have no effect(and must not fail)
batch.call();
batch.call();
batch.call();
final ResultScanner scanner = testTable.getScanner(TEST_CF_BYTES, valQualifier);
final Integer rowCount
= StreamSupport.stream(scanner.spliterator(), false)
.reduce(0,
( curVal, hresult ) -> curVal + hresult.size(),
( val1, val2 ) -> val1 + val2);
assertThat(rowCount).isEqualTo(strCollection.size());
}
项目:ditb
文件:TestHRegion.java
/**
* Test for HBASE-14229: Flushing canceled by coprocessor still leads to memstoreSize set down
*/
@Test
public void testMemstoreSizeWithFlushCanceling() throws IOException {
FileSystem fs = FileSystem.get(CONF);
Path rootDir = new Path(dir + "testMemstoreSizeWithFlushCanceling");
FSHLog hLog = new FSHLog(fs, rootDir, "testMemstoreSizeWithFlushCanceling", CONF);
HRegion region = initHRegion(tableName, null, null, name.getMethodName(),
CONF, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES);
Store store = region.getStore(COLUMN_FAMILY_BYTES);
assertEquals(0, region.getMemstoreSize());
// Put some value and make sure flush could be completed normally
byte [] value = Bytes.toBytes(name.getMethodName());
Put put = new Put(value);
put.add(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
region.put(put);
long onePutSize = region.getMemstoreSize();
assertTrue(onePutSize > 0);
region.flush(true);
assertEquals("memstoreSize should be zero", 0, region.getMemstoreSize());
assertEquals("flushable size should be zero", 0, store.getFlushableSize());
// save normalCPHost and replaced by mockedCPHost, which will cancel flush requests
RegionCoprocessorHost normalCPHost = region.getCoprocessorHost();
RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class);
when(mockedCPHost.preFlush(Mockito.isA(HStore.class), Mockito.isA(InternalScanner.class))).
thenReturn(null);
region.setCoprocessorHost(mockedCPHost);
region.put(put);
region.flush(true);
assertEquals("memstoreSize should NOT be zero", onePutSize, region.getMemstoreSize());
assertEquals("flushable size should NOT be zero", onePutSize, store.getFlushableSize());
// set normalCPHost and flush again, the snapshot will be flushed
region.setCoprocessorHost(normalCPHost);
region.flush(true);
assertEquals("memstoreSize should be zero", 0, region.getMemstoreSize());
assertEquals("flushable size should be zero", 0, store.getFlushableSize());
HRegion.closeHRegion(region);
}
项目:ditb
文件:TestSplitTransactionOnCluster.java
@Test
public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck()
throws Exception {
final TableName tableName =
TableName.valueOf("testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck");
try {
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("f"));
htd.addFamily(new HColumnDescriptor("i_f"));
htd.setRegionSplitPolicyClassName(CustomSplitPolicy.class.getName());
admin.createTable(htd);
List<HRegion> regions = awaitTableRegions(tableName);
HRegion region = regions.get(0);
for(int i = 3;i<9;i++) {
Put p = new Put(Bytes.toBytes("row"+i));
p.add(Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("value"+i));
p.add(Bytes.toBytes("i_f"), Bytes.toBytes("q"), Bytes.toBytes("value"+i));
region.put(p);
}
region.flush(true);
Store store = region.getStore(Bytes.toBytes("f"));
Collection<StoreFile> storefiles = store.getStorefiles();
assertEquals(storefiles.size(), 1);
assertFalse(region.hasReferences());
Path referencePath =
region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "f",
storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy());
assertNull(referencePath);
referencePath =
region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "i_f",
storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy());
assertNotNull(referencePath);
} finally {
TESTING_UTIL.deleteTable(tableName);
}
}
项目:ditb
文件:TestRegionObserverStacking.java
@Override
public void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit,
final Durability durability)
throws IOException {
id = System.currentTimeMillis();
try {
Thread.sleep(10);
} catch (InterruptedException ex) {
}
}
项目:ditb
文件:TableOutputFormat.java
/**
* Writes a key/value pair into the table.
*
* @param key The key.
* @param value The value.
* @throws IOException When writing fails.
* @see RecordWriter#write(Object, Object)
*/
@Override
public void write(KEY key, Mutation value)
throws IOException {
if (!(value instanceof Put) && !(value instanceof Delete)) {
throw new IOException("Pass a Delete or a Put");
}
mutator.mutate(value);
}
项目:QDrill
文件:TestTableGenerator.java
public static void generateHBaseDatasetBigIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
byte[] bytes = new byte[9];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
org.apache.hadoop.hbase.util.Order.ASCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
项目:QDrill
文件:TestTableGenerator.java
public static void generateHBaseDatasetBigIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
byte[] bytes = new byte[9];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
org.apache.hadoop.hbase.util.Order.DESCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}