@Override void testRow(final int i) throws IOException { byte[] row = format(i); Put put = new Put(row); byte[] value = generateData(this.rand, ROW_LENGTH); if (useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); Tag[] tags = new Tag[noOfTags]; for (int n = 0; n < noOfTags; n++) { Tag t = new Tag((byte) n, tag); tags[n] = t; } KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); } else { put.add(FAMILY_NAME, QUALIFIER_NAME, value); } put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); }
/** * Writes an action (Put or Delete) to the specified table. * * @param tableName * the table being updated. * @param action * the update, either a put or a delete. * @throws IllegalArgumentException * if the action is not a put or a delete. */ @Override public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException { BufferedMutator mutator = getBufferedMutator(tableName); // The actions are not immutable, so we defensively copy them if (action instanceof Put) { Put put = new Put((Put) action); put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); } else if (action instanceof Delete) { Delete delete = new Delete((Delete) action); mutator.mutate(delete); } else throw new IllegalArgumentException( "action must be either Delete or Put"); }
/** * Convert a protobuf Durability into a client Durability */ public static Durability toDurability( final ClientProtos.MutationProto.Durability proto) { switch(proto) { case USE_DEFAULT: return Durability.USE_DEFAULT; case SKIP_WAL: return Durability.SKIP_WAL; case ASYNC_WAL: return Durability.ASYNC_WAL; case SYNC_WAL: return Durability.SYNC_WAL; case FSYNC_WAL: return Durability.FSYNC_WAL; default: return Durability.USE_DEFAULT; } }
@Override void testRow(final int i) throws IOException { byte[] row = getRandomRow(this.rand, this.totalRows); Put put = new Put(row); byte[] value = generateData(this.rand, ROW_LENGTH); if (useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); Tag[] tags = new Tag[noOfTags]; for (int n = 0; n < noOfTags; n++) { Tag t = new Tag((byte) n, tag); tags[n] = t; } KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); } else { put.add(FAMILY_NAME, QUALIFIER_NAME, value); } put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); }
@Override void testRow(final int i) throws IOException { byte[] row = getRandomRow(this.rand, opts.totalRows); Put put = new Put(row); for (int column = 0; column < opts.columns; column++) { byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); Tag[] tags = new Tag[opts.noOfTags]; for (int n = 0; n < opts.noOfTags; n++) { Tag t = new Tag((byte) n, tag); tags[n] = t; } KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { put.add(FAMILY_NAME, qualifier, value); updateValueSize(value.length); } } put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); }
@Override void testRow(final int i) throws IOException { byte[] row = format(i); Put put = new Put(row); for (int column = 0; column < opts.columns; column++) { byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); Tag[] tags = new Tag[opts.noOfTags]; for (int n = 0; n < opts.noOfTags; n++) { Tag t = new Tag((byte) n, tag); tags[n] = t; } KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { put.add(FAMILY_NAME, qualifier, value); updateValueSize(value.length); } } put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); }
int createMultiRegionsWithPBSerialization(final Configuration c, final TableName tableName, byte [][] startKeys) throws IOException { Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); HTable meta = new HTable(c, TableName.META_TABLE_NAME); List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length); int count = 0; for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]); Put put = MetaTableAccessor.makePutFromRegionInfo(hri); put.setDurability(Durability.SKIP_WAL); meta.put(put); LOG.info("createMultiRegionsWithPBSerialization: PUT inserted " + hri.toString()); newRegions.add(hri); count++; } meta.close(); return count; }
/** * @param tableName * @param startKey * @param stopKey * @param callingMethod * @param conf * @param isReadOnly * @param families * @throws IOException * @return A region on which you must call * {@link HRegion#closeHRegion(HRegion)} when done. */ public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.setReadOnly(isReadOnly); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); // Set default to be three versions. hcd.setMaxVersions(Integer.MAX_VALUE); htd.addFamily(hcd); } htd.setDurability(durability); HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false); return createLocalHRegion(info, htd, wal); }
private static int insertData(TableName tableName, String column, double prob) throws IOException { byte[] k = new byte[3]; byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column)); List<Put> puts = new ArrayList<>(); for (int i = 0; i < 9; i++) { Put put = new Put(Bytes.toBytes("row" + i)); put.setDurability(Durability.SKIP_WAL); put.add(famAndQf[0], famAndQf[1], k); put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET)); puts.add(put); } try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) { table.put(puts); } return puts.size(); }
/** * Convert a client Durability into a protbuf Durability */ public static ClientProtos.MutationProto.Durability toDurability( final Durability d) { switch(d) { case USE_DEFAULT: return ClientProtos.MutationProto.Durability.USE_DEFAULT; case SKIP_WAL: return ClientProtos.MutationProto.Durability.SKIP_WAL; case ASYNC_WAL: return ClientProtos.MutationProto.Durability.ASYNC_WAL; case SYNC_WAL: return ClientProtos.MutationProto.Durability.SYNC_WAL; case FSYNC_WAL: return ClientProtos.MutationProto.Durability.FSYNC_WAL; default: return ClientProtos.MutationProto.Durability.USE_DEFAULT; } }
static void writeTestDataBatch(Configuration conf, TableName tableName, int batchId) throws Exception { LOG.debug("Writing test data batch " + batchId); List<Put> puts = new ArrayList<>(); for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) { Put put = new Put(getRowKey(batchId, i)); for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { put.add(CF_BYTES, getQualifier(j), getValue(batchId, i, j)); } put.setDurability(Durability.SKIP_WAL); puts.add(put); } try (Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(tableName)) { table.put(puts); } }
@Test public void testIncrementWithReturnResultsSetToFalse() throws Exception { byte[] row1 = Bytes.toBytes("row1"); byte[] col1 = Bytes.toBytes("col1"); // Setting up region final WALFactory wals = new WALFactory(CONF, null, "testIncrementWithReturnResultsSetToFalse"); byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse"); final WAL wal = wals.getWAL(tableName); HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT); Increment inc1 = new Increment(row1); inc1.setReturnResults(false); inc1.addColumn(FAMILY, col1, 1); Result res = region.increment(inc1); assertNull(res); }
private HRegion createHRegion (byte [] tableName, String callingMethod, WAL log, Durability durability) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.setDurability(durability); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + callingMethod); if (FS.exists(path)) { if (!FS.delete(path, true)) { throw new IOException("Failed delete of " + path); } } return HRegion.createHRegion(info, path, CONF, htd, log); }
private int addWideContent(HRegion region) throws IOException { int count = 0; for (char c = 'a'; c <= 'c'; c++) { byte[] row = Bytes.toBytes("ab" + c); int i, j; long ts = System.currentTimeMillis(); for (i = 0; i < 100; i++) { byte[] b = Bytes.toBytes(String.format("%10d", i)); for (j = 0; j < 100; j++) { Put put = new Put(row); put.setDurability(Durability.SKIP_WAL); put.add(COLUMNS[rng.nextInt(COLUMNS.length)], b, ++ts, b); region.put(put); count++; } } } return count; }
private static Durability durabilityFromThrift(TDurability tDurability) { switch (tDurability.getValue()) { case 1: return Durability.SKIP_WAL; case 2: return Durability.ASYNC_WAL; case 3: return Durability.SYNC_WAL; case 4: return Durability.FSYNC_WAL; default: return null; } }
@Override public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit, Durability durability) throws IOException { // check the put against the stored constraints for (Constraint c : constraints) { c.check(put); } // if we made it here, then the Put is valid }
@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put, final WALEdit edit, final Durability durability) throws IOException { User user = getActiveUser(); checkForReservedTagPresence(user, put); // Require WRITE permission to the table, CF, or top visible value, if any. // NOTE: We don't need to check the permissions for any earlier Puts // because we treat the ACLs in each Put as timestamped like any other // HBase value. A new ACL in a new Put applies to that Put. It doesn't // change the ACL of any previous Put. This allows simple evolution of // security policy over time without requiring expensive updates. RegionCoprocessorEnvironment env = c.getEnvironment(); Map<byte[],? extends Collection<Cell>> families = put.getFamilyCellMap(); AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE); logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { put.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } // Add cell ACLs from the operation to the cells themselves byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL); if (bytes != null) { if (cellFeaturesEnabled) { addCellPermissions(bytes, put.getFamilyCellMap()); } else { throw new DoNotRetryIOException("Cell ACLs cannot be persisted"); } } }
private HRegion getRegion(final Configuration conf, final String tableName) throws IOException { WAL wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(), TEST_UTIL.getDataTestDir().toString(), conf); return (HRegion)TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, tableName, conf, false, Durability.SKIP_WAL, wal, INCREMENT_BYTES); }
@Override public void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c, final Delete delete, final WALEdit edit, final Durability durability) throws IOException { // An ACL on a delete is useless, we shouldn't allow it if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) { throw new DoNotRetryIOException("ACL on delete has no effect: " + delete.toString()); } // Require WRITE permissions on all cells covered by the delete. Unlike // for Puts we need to check all visible prior versions, because a major // compaction could remove them. If the user doesn't have permission to // overwrite any of the visible versions ('visible' defined as not covered // by a tombstone already) then we have to disallow this operation. RegionCoprocessorEnvironment env = c.getEnvironment(); Map<byte[],? extends Collection<Cell>> families = delete.getFamilyCellMap(); User user = getActiveUser(); AuthResult authResult = permissionGranted(OpType.DELETE, user, env, families, Action.WRITE); logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } }
@Override public void run() { for (int i = 0; i < numIncrements; i++) { try { Increment inc = new Increment(row); inc.addColumn(fam1, qual1, amount); inc.addColumn(fam1, qual2, amount*2); inc.addColumn(fam2, qual3, amount*3); inc.setDurability(Durability.ASYNC_WAL); region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE); // verify: Make sure we only see completed increments Get g = new Get(row); Result result = region.get(g); if (result != null) { assertTrue(result.getValue(fam1, qual1) != null); assertTrue(result.getValue(fam1, qual2) != null); assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2))); assertTrue(result.getValue(fam2, qual3) != null); assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3))); } } catch (IOException e) { e.printStackTrace(); } } }
@Override public Durability useDurability() { // return true when at least one mutation requested a WAL flush (default) Durability durability = Durability.USE_DEFAULT; for (Mutation m : mutations) { if (m.getDurability().ordinal() > durability.ordinal()) { durability = m.getDurability(); } } return durability; }
public HTableDescriptor setValue(final ImmutableBytesWritable key, final ImmutableBytesWritable value) { if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) { boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get())); LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " + "use " + DURABILITY + " instead"); setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY); return this; } values.put(key, value); return this; }
/** * @param put The Put object * @param edit The WALEdit object. * @param durability The durability used * @exception IOException Exception */ public void postPut(final Put put, final WALEdit edit, final Durability durability) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { @Override public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException { oserver.postPut(ctx, put, edit, durability); } }); }
/** * @param delete The Delete object * @param edit The WALEdit object. * @param durability The durability used * @return true if default processing should be bypassed * @exception IOException Exception */ public boolean preDelete(final Delete delete, final WALEdit edit, final Durability durability) throws IOException { return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { @Override public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException { oserver.preDelete(ctx, delete, edit, durability); } }); }
/** * @param delete The Delete object * @param edit The WALEdit object. * @param durability The durability used * @exception IOException Exception */ public void postDelete(final Delete delete, final WALEdit edit, final Durability durability) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { @Override public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException { oserver.postDelete(ctx, delete, edit, durability); } }); }
@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put, final WALEdit edit, final Durability durability) throws IOException { byte[] attribute = put.getAttribute("visibility"); byte[] cf = null; List<Cell> updatedCells = new ArrayList<Cell>(); if (attribute != null) { for (List<? extends Cell> edits : put.getFamilyCellMap().values()) { for (Cell cell : edits) { KeyValue kv = KeyValueUtil.ensureKeyValue(cell); if (cf == null) { cf = kv.getFamily(); } Tag tag = new Tag(TAG_TYPE, attribute); List<Tag> tagList = new ArrayList<Tag>(); tagList.add(tag); KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0, kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(), kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0, kv.getValueLength(), tagList); ((List<Cell>) updatedCells).add(newKV); } } put.getFamilyCellMap().remove(cf); // Update the family map put.getFamilyCellMap().put(cf, updatedCells); } }
@Override public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put m, WALEdit edit, Durability durability) throws IOException { byte[] attribute = m.getAttribute(NON_VISIBILITY); byte[] cf = null; List<Cell> updatedCells = new ArrayList<Cell>(); if (attribute != null) { for (List<? extends Cell> edits : m.getFamilyCellMap().values()) { for (Cell cell : edits) { KeyValue kv = KeyValueUtil.ensureKeyValue(cell); if (cf == null) { cf = kv.getFamily(); } Tag tag = new Tag((byte) NON_VIS_TAG_TYPE, attribute); List<Tag> tagList = new ArrayList<Tag>(); tagList.add(tag); tagList.addAll(kv.getTags()); byte[] fromList = Tag.fromList(tagList); TagRewriteCell newcell = new TagRewriteCell(kv, fromList); KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0, kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(), kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0, kv.getValueLength(), tagList); ((List<Cell>) updatedCells).add(newcell); } } m.getFamilyCellMap().remove(cf); // Update the family map m.getFamilyCellMap().put(cf, updatedCells); } }
/** * Inserts multiple regions into hbase:meta using Writable serialization instead of PB */ public int createMultiRegionsWithWritableSerialization(final Configuration c, final TableName tableName, byte [][] startKeys) throws IOException { Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); HTable meta = new HTable(c, TableName.META_TABLE_NAME); List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length); int count = 0; for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]); Put put = new Put(hri.getRegionName()); put.setDurability(Durability.SKIP_WAL); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, getBytes(hri)); //this is the old Writable serialization //also add the region as it's daughters put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, getBytes(hri)); //this is the old Writable serialization put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, getBytes(hri)); //this is the old Writable serialization meta.put(put); LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString()); newRegions.add(hri); count++; } meta.close(); return count; }
/** * Load region with rows from 'aaa' to 'zzz'. * @param r Region * @param f Family * @param flush flush the cache if true * @return Count of rows loaded. * @throws IOException */ public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException { byte[] k = new byte[3]; int rowCount = 0; for (byte b1 = 'a'; b1 <= 'z'; b1++) { for (byte b2 = 'a'; b2 <= 'z'; b2++) { for (byte b3 = 'a'; b3 <= 'z'; b3++) { k[0] = b1; k[1] = b2; k[2] = b3; Put put = new Put(k); put.setDurability(Durability.SKIP_WAL); put.add(f, null, k); if (r.getWAL() == null) { put.setDurability(Durability.SKIP_WAL); } int preRowCount = rowCount; int pause = 10; int maxPause = 1000; while (rowCount == preRowCount) { try { r.put(put); rowCount++; } catch (RegionTooBusyException e) { pause = (pause * 2 >= maxPause) ? maxPause : pause * 2; Threads.sleep(pause); } } } } if (flush) { r.flush(true); } } return rowCount; }
/** * Creates a pre-split table for load testing. If the table already exists, * logs a warning and continues. * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding) throws IOException { return createPreSplitLoadTestTable(conf, tableName, columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, Durability.USE_DEFAULT); }
/** * Creates a pre-split table for load testing. If the table already exists, * logs a warning and continues. * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); desc.setDurability(durability); desc.setRegionReplication(regionReplication); HColumnDescriptor hcd = new HColumnDescriptor(columnFamily); hcd.setDataBlockEncoding(dataBlockEncoding); hcd.setCompressionType(compression); return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer); }
private static int addToEachStartKey(final int expected) throws IOException { HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); HTable meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME); int rows = 0; Scan scan = new Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); ResultScanner s = meta.getScanner(scan); for (Result r = null; (r = s.next()) != null;) { HRegionInfo hri = HRegionInfo.getHRegionInfo(r); if (hri == null) break; if(!hri.getTable().equals(TABLENAME)) { continue; } // If start key, add 'aaa'. byte [] row = getStartKey(hri); Put p = new Put(row); p.setDurability(Durability.SKIP_WAL); p.add(getTestFamily(), getTestQualifier(), row); t.put(p); rows++; } s.close(); Assert.assertEquals(expected, rows); t.close(); meta.close(); return rows; }
private static Put createPut(final byte[][] families, final byte[] key, final byte[] value) { byte[] q = Bytes.toBytes("q"); Put put = new Put(key); put.setDurability(Durability.SKIP_WAL); for (byte[] family: families) { put.add(family, q, value); } return put; }
private static int addToEachStartKey(final int expected) throws IOException { Table t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); Table meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME); int rows = 0; Scan scan = new Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); ResultScanner s = meta.getScanner(scan); for (Result r = null; (r = s.next()) != null;) { HRegionInfo hri = HRegionInfo.getHRegionInfo(r); if (hri == null) break; if (!hri.getTable().equals(TABLENAME)) { continue; } // If start key, add 'aaa'. if(!hri.getTable().equals(TABLENAME)) { continue; } byte [] row = getStartKey(hri); Put p = new Put(row); p.setDurability(Durability.SKIP_WAL); p.add(getTestFamily(), getTestQualifier(), row); t.put(p); rows++; } s.close(); Assert.assertEquals(expected, rows); t.close(); meta.close(); return rows; }
@Override public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit, Durability durability) throws IOException { Region region = e.getEnvironment().getRegion(); if (!region.getRegionInfo().isMetaTable() && !region.getRegionInfo().getTable().isSystemTable()) { // The put carries the TTL attribute if (put.getTTL() != Long.MAX_VALUE) { return; } throw new IOException("Operation does not have TTL set"); } }
/** * Create a protocol buffer MutateRequest for a client increment * * @param regionName * @param row * @param family * @param qualifier * @param amount * @param durability * @return a mutate request */ public static MutateRequest buildIncrementRequest( final byte[] regionName, final byte[] row, final byte[] family, final byte[] qualifier, final long amount, final Durability durability, long nonceGroup, long nonce) { MutateRequest.Builder builder = MutateRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier( RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); MutationProto.Builder mutateBuilder = MutationProto.newBuilder(); mutateBuilder.setRow(ByteStringer.wrap(row)); mutateBuilder.setMutateType(MutationType.INCREMENT); mutateBuilder.setDurability(ProtobufUtil.toDurability(durability)); ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); columnBuilder.setFamily(ByteStringer.wrap(family)); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); valueBuilder.setValue(ByteStringer.wrap(Bytes.toBytes(amount))); valueBuilder.setQualifier(ByteStringer.wrap(qualifier)); columnBuilder.addQualifierValue(valueBuilder.build()); mutateBuilder.addColumnValue(columnBuilder.build()); if (nonce != HConstants.NO_NONCE) { mutateBuilder.setNonce(nonce); } builder.setMutation(mutateBuilder.build()); if (nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } return builder.build(); }
/** * Test written as a verifier for HBASE-7051, CheckAndPut should properly read * MVCC. * * Moved into TestAtomicOperation from its original location, TestHBase7051 */ @Test public void testPutAndCheckAndPutInParallel() throws Exception { final String tableName = "testPutAndCheckAndPut"; Configuration conf = TEST_UTIL.getConfiguration(); conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class); final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName), null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family)); Put[] puts = new Put[1]; Put put = new Put(Bytes.toBytes("r1")); put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10")); puts[0] = put; region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE); MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf); ctx.addThread(new PutThread(ctx, region)); ctx.addThread(new CheckAndPutThread(ctx, region)); ctx.startThreads(); while (testStep != TestStep.CHECKANDPUT_COMPLETED) { Thread.sleep(100); } ctx.stop(); Scan s = new Scan(); RegionScanner scanner = region.getScanner(s); List<Cell> results = new ArrayList<Cell>(); ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build(); scanner.next(results, scannerContext); for (Cell keyValue : results) { assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue))); } }
@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put, final WALEdit edit, final Durability durability) throws IOException { Table table = e.getEnvironment().getTable(otherTable); table.put(put); completed[0] = true; table.close(); }
@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put, final WALEdit edit, final Durability durability) throws IOException { Table table = e.getEnvironment().getTable(otherTable, getPool()); Put p = new Put(new byte[] { 'a' }); p.add(family, null, new byte[] { 'a' }); try { table.batch(Collections.singletonList(put)); } catch (InterruptedException e1) { throw new IOException(e1); } completedWithPool[0] = true; table.close(); }
@SuppressWarnings("null") @Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put, final WALEdit edit, final Durability durability) { String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(); if (tableName.equals("observed_table")) { // Trigger a NPE to fail the coprocessor Integer i = null; i = i + 1; } }