Java 类org.apache.hadoop.hbase.client.Consistency 实例源码
项目:ditb
文件:HBaseTestingUtility.java
public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
int replicaId)
throws IOException {
for (int i = startRow; i < endRow; i++) {
String failMsg = "Failed verification of row :" + i;
byte[] data = Bytes.toBytes(String.valueOf(i));
Get get = new Get(data);
get.setReplicaId(replicaId);
get.setConsistency(Consistency.TIMELINE);
Result result = table.get(get);
assertTrue(failMsg, result.containsColumn(f, null));
assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
Cell cell = result.getColumnLatestCell(f, null);
assertTrue(failMsg,
Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength()));
}
}
项目:ditb
文件:MultiThreadedReader.java
protected Get createGet(long keyToRead) throws IOException {
Get get = new Get(dataGenerator.getDeterministicUniqueKey(keyToRead));
String cfsString = "";
byte[][] columnFamilies = dataGenerator.getColumnFamilies();
for (byte[] cf : columnFamilies) {
get.addFamily(cf);
if (verbose) {
if (cfsString.length() > 0) {
cfsString += ", ";
}
cfsString += "[" + Bytes.toStringBinary(cf) + "]";
}
}
get = dataGenerator.beforeGet(keyToRead, get);
if (regionReplicaId > 0) {
get.setReplicaId(regionReplicaId);
get.setConsistency(Consistency.TIMELINE);
}
if (verbose) {
LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString);
}
return get;
}
项目:ditb
文件:TestRegionReplicas.java
@Test(timeout = 60000)
public void testGetOnTargetRegionReplica() throws Exception {
try {
//load some data to primary
HTU.loadNumericRows(table, f, 0, 1000);
// assert that we can read back from primary
Assert.assertEquals(1000, HTU.countRows(table));
// flush so that region replica can read
Region region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName());
region.flush(true);
openRegion(HTU, getRS(), hriSecondary);
// try directly Get against region replica
byte[] row = Bytes.toBytes(String.valueOf(42));
Get get = new Get(row);
get.setConsistency(Consistency.TIMELINE);
get.setReplicaId(1);
Result result = table.get(get);
Assert.assertArrayEquals(row, result.getValue(f, null));
} finally {
HTU.deleteNumericRows(table, HConstants.CATALOG_FAMILY, 0, 1000);
closeRegion(HTU, getRS(), hriSecondary);
}
}
项目:pbase
文件:MultiThreadedReader.java
protected Get createGet(long keyToRead) throws IOException {
Get get = new Get(dataGenerator.getDeterministicUniqueKey(keyToRead));
String cfsString = "";
byte[][] columnFamilies = dataGenerator.getColumnFamilies();
for (byte[] cf : columnFamilies) {
get.addFamily(cf);
if (verbose) {
if (cfsString.length() > 0) {
cfsString += ", ";
}
cfsString += "[" + Bytes.toStringBinary(cf) + "]";
}
}
get = dataGenerator.beforeGet(keyToRead, get);
if (regionReplicaId > 0) {
get.setReplicaId(regionReplicaId);
get.setConsistency(Consistency.TIMELINE);
}
if (verbose) {
LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString);
}
return get;
}
项目:pbase
文件:TestRegionReplicas.java
@Test(timeout = 60000)
public void testGetOnTargetRegionReplica() throws Exception {
try {
//load some data to primary
HTU.loadNumericRows(table, f, 0, 1000);
// assert that we can read back from primary
Assert.assertEquals(1000, HTU.countRows(table));
// flush so that region replica can read
getRS().getRegionByEncodedName(hriPrimary.getEncodedName()).flushcache();
openRegion(hriSecondary);
// try directly Get against region replica
byte[] row = Bytes.toBytes(String.valueOf(42));
Get get = new Get(row);
get.setConsistency(Consistency.TIMELINE);
get.setReplicaId(1);
Result result = table.get(get);
Assert.assertArrayEquals(row, result.getValue(f, null));
} finally {
HTU.deleteNumericRows(table, HConstants.CATALOG_FAMILY, 0, 1000);
closeRegion(hriSecondary);
}
}
项目:hbase
文件:HBaseTestingUtility.java
public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
int replicaId)
throws IOException {
for (int i = startRow; i < endRow; i++) {
String failMsg = "Failed verification of row :" + i;
byte[] data = Bytes.toBytes(String.valueOf(i));
Get get = new Get(data);
get.setReplicaId(replicaId);
get.setConsistency(Consistency.TIMELINE);
Result result = table.get(get);
assertTrue(failMsg, result.containsColumn(f, null));
assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
Cell cell = result.getColumnLatestCell(f, null);
assertTrue(failMsg,
Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength()));
}
}
项目:hbase
文件:MultiThreadedReader.java
protected Get createGet(long keyToRead) throws IOException {
Get get = new Get(dataGenerator.getDeterministicUniqueKey(keyToRead));
String cfsString = "";
byte[][] columnFamilies = dataGenerator.getColumnFamilies();
for (byte[] cf : columnFamilies) {
get.addFamily(cf);
if (verbose) {
if (cfsString.length() > 0) {
cfsString += ", ";
}
cfsString += "[" + Bytes.toStringBinary(cf) + "]";
}
}
get = dataGenerator.beforeGet(keyToRead, get);
if (regionReplicaId > 0) {
get.setReplicaId(regionReplicaId);
get.setConsistency(Consistency.TIMELINE);
}
if (verbose) {
LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString);
}
return get;
}
项目:hbase
文件:TestRegionReplicas.java
@Test(timeout = 60000)
public void testGetOnTargetRegionReplica() throws Exception {
try {
//load some data to primary
HTU.loadNumericRows(table, f, 0, 1000);
// assert that we can read back from primary
Assert.assertEquals(1000, HTU.countRows(table));
// flush so that region replica can read
HRegion region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName());
region.flush(true);
openRegion(HTU, getRS(), hriSecondary);
// try directly Get against region replica
byte[] row = Bytes.toBytes(String.valueOf(42));
Get get = new Get(row);
get.setConsistency(Consistency.TIMELINE);
get.setReplicaId(1);
Result result = table.get(get);
Assert.assertArrayEquals(row, result.getValue(f, null));
} finally {
HTU.deleteNumericRows(table, HConstants.CATALOG_FAMILY, 0, 1000);
closeRegion(HTU, getRS(), hriSecondary);
}
}
项目:hbase
文件:MetaTableAccessor.java
private static Scan getMetaScan(Connection connection, int rowUpperLimit) {
Scan scan = new Scan();
int scannerCaching = connection.getConfiguration()
.getInt(HConstants.HBASE_META_SCANNER_CACHING,
HConstants.DEFAULT_HBASE_META_SCANNER_CACHING);
if (connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS,
HConstants.DEFAULT_USE_META_REPLICAS)) {
scan.setConsistency(Consistency.TIMELINE);
}
if (rowUpperLimit > 0) {
scan.setLimit(rowUpperLimit);
scan.setReadType(Scan.ReadType.PREAD);
}
scan.setCaching(scannerCaching);
return scan;
}
项目:ditb
文件:PerformanceEvaluation.java
RandomReadTest(Connection con, TestOptions options, Status status) {
super(con, options, status);
consistency = options.replicas == DEFAULT_OPTS.replicas ? null : Consistency.TIMELINE;
if (opts.multiGet > 0) {
LOG.info("MultiGet enabled. Sending GETs in batches of " + opts.multiGet + ".");
this.gets = new ArrayList<Get>(opts.multiGet);
}
}
项目:ditb
文件:TestRegionReplicaFailover.java
/**
* Tests the case where a newly created table with region replicas and no data, the secondary
* region replicas are available to read immediately.
*/
@Test(timeout = 60000)
public void testSecondaryRegionWithEmptyRegion() throws IOException {
// Create a new table with region replication, don't put any data. Test that the secondary
// region replica is available to read.
try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(htd.getTableName())) {
Get get = new Get(row);
get.setConsistency(Consistency.TIMELINE);
get.setReplicaId(1);
table.get(get); // this should not block
}
}
项目:ditb
文件:TestRegionReplicaFailover.java
/**
* Tests the case where we are creating a table with a lot of regions and replicas. Opening region
* replicas should not block handlers on RS indefinitely.
*/
@Test (timeout = 120000)
public void testLotsOfRegionReplicas() throws IOException {
int numRegions = NB_SERVERS * 20;
int regionReplication = 10;
String tableName = htd.getTableName().getNameAsString() + "2";
htd = HTU.createTableDescriptor(tableName);
htd.setRegionReplication(regionReplication);
// dont care about splits themselves too much
byte[] startKey = Bytes.toBytes("aaa");
byte[] endKey = Bytes.toBytes("zzz");
byte[][] splits = HTU.getRegionSplitStartKeys(startKey, endKey, numRegions);
HTU.getHBaseAdmin().createTable(htd, startKey, endKey, numRegions);
try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(htd.getTableName())) {
for (int i = 1; i < splits.length; i++) {
for (int j = 0; j < regionReplication; j++) {
Get get = new Get(splits[i]);
get.setConsistency(Consistency.TIMELINE);
get.setReplicaId(j);
table.get(get); // this should not block. Regions should be coming online
}
}
}
HTU.deleteTableIfAny(TableName.valueOf(tableName));
}
项目:ditb
文件:IntegrationTestBulkLoad.java
/**
* After adding data to the table start a mr job to
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
LOG.info("Running check");
Configuration conf = getConf();
String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime();
Path p = util.getDataTestDirOnTestFS(jobName);
Job job = new Job(conf);
job.setJarByClass(getClass());
job.setJobName(jobName);
job.setPartitionerClass(NaturalKeyPartitioner.class);
job.setGroupingComparatorClass(NaturalKeyGroupingComparator.class);
job.setSortComparatorClass(CompositeKeyComparator.class);
Scan scan = new Scan();
scan.addFamily(CHAIN_FAM);
scan.addFamily(SORT_FAM);
scan.setMaxVersions(1);
scan.setCacheBlocks(false);
scan.setBatch(1000);
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
if (replicaCount != NUM_REPLICA_COUNT_DEFAULT) {
scan.setConsistency(Consistency.TIMELINE);
}
TableMapReduceUtil.initTableMapperJob(
getTablename().getName(),
scan,
LinkedListCheckingMapper.class,
LinkKey.class,
LinkChain.class,
job
);
job.setReducerClass(LinkedListCheckingReducer.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
FileOutputFormat.setOutputPath(job, p);
assertEquals(true, job.waitForCompletion(true));
// Delete the files.
util.getTestFileSystem().delete(p, true);
}
项目:ditb
文件:ProtobufUtil.java
public static Consistency toConsistency(ClientProtos.Consistency consistency) {
switch (consistency) {
case STRONG : return Consistency.STRONG;
case TIMELINE : return Consistency.TIMELINE;
default : return Consistency.STRONG;
}
}
项目:ditb
文件:ProtobufUtil.java
public static ClientProtos.Consistency toConsistency(Consistency consistency) {
switch (consistency) {
case STRONG : return ClientProtos.Consistency.STRONG;
case TIMELINE : return ClientProtos.Consistency.TIMELINE;
default : return ClientProtos.Consistency.STRONG;
}
}
项目:pbase
文件:PerformanceEvaluation.java
RandomReadTest(Connection con, TestOptions options, Status status) {
super(con, options, status);
consistency = options.replicas == DEFAULT_OPTS.replicas ? null : Consistency.TIMELINE;
if (opts.multiGet > 0) {
LOG.info("MultiGet enabled. Sending GETs in batches of " + opts.multiGet + ".");
this.gets = new ArrayList<Get>(opts.multiGet);
}
}
项目:pbase
文件:ProtobufUtil.java
public static Consistency toConsistency(ClientProtos.Consistency consistency) {
switch (consistency) {
case STRONG : return Consistency.STRONG;
case TIMELINE : return Consistency.TIMELINE;
default : return Consistency.STRONG;
}
}
项目:pbase
文件:ProtobufUtil.java
public static ClientProtos.Consistency toConsistency(Consistency consistency) {
switch (consistency) {
case STRONG : return ClientProtos.Consistency.STRONG;
case TIMELINE : return ClientProtos.Consistency.TIMELINE;
default : return ClientProtos.Consistency.STRONG;
}
}
项目:hbase
文件:TestRegionReplicaFailover.java
/**
* Tests the case where a newly created table with region replicas and no data, the secondary
* region replicas are available to read immediately.
*/
@Test(timeout = 60000)
public void testSecondaryRegionWithEmptyRegion() throws IOException {
// Create a new table with region replication, don't put any data. Test that the secondary
// region replica is available to read.
try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(htd.getTableName())) {
Get get = new Get(row);
get.setConsistency(Consistency.TIMELINE);
get.setReplicaId(1);
table.get(get); // this should not block
}
}
项目:hbase
文件:TestRegionReplicaFailover.java
/**
* Tests the case where we are creating a table with a lot of regions and replicas. Opening region
* replicas should not block handlers on RS indefinitely.
*/
@Test (timeout = 120000)
public void testLotsOfRegionReplicas() throws IOException {
int numRegions = NB_SERVERS * 20;
int regionReplication = 10;
String tableName = htd.getTableName().getNameAsString() + "2";
htd = HTU.createTableDescriptor(tableName);
htd.setRegionReplication(regionReplication);
// dont care about splits themselves too much
byte[] startKey = Bytes.toBytes("aaa");
byte[] endKey = Bytes.toBytes("zzz");
byte[][] splits = HTU.getRegionSplitStartKeys(startKey, endKey, numRegions);
HTU.getAdmin().createTable(htd, startKey, endKey, numRegions);
try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(htd.getTableName())) {
for (int i = 1; i < splits.length; i++) {
for (int j = 0; j < regionReplication; j++) {
Get get = new Get(splits[i]);
get.setConsistency(Consistency.TIMELINE);
get.setReplicaId(j);
table.get(get); // this should not block. Regions should be coming online
}
}
}
HTU.deleteTableIfAny(TableName.valueOf(tableName));
}
项目:hbase
文件:IntegrationTestSparkBulkLoad.java
/**
* After adding data to the table start a mr job to check the bulk load.
*/
public void runCheck() throws Exception {
LOG.info("Running check");
String jobName = IntegrationTestSparkBulkLoad.class.getSimpleName() + "_check" +
EnvironmentEdgeManager.currentTime();
SparkConf sparkConf = new SparkConf().setAppName(jobName).setMaster("local");
Configuration hbaseConf = new Configuration(getConf());
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, hbaseConf);
Scan scan = new Scan();
scan.addFamily(CHAIN_FAM);
scan.addFamily(SORT_FAM);
scan.setMaxVersions(1);
scan.setCacheBlocks(false);
scan.setBatch(1000);
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, DEFAULT_NUM_REPLICA_COUNT);
if (replicaCount != DEFAULT_NUM_REPLICA_COUNT) {
scan.setConsistency(Consistency.TIMELINE);
}
// 1. Using TableInputFormat to get data from HBase table
// 2. Mimic LinkedListCheckingMapper in mapreduce.IntegrationTestBulkLoad
// 3. Sort LinkKey by its order ID
// 4. Group LinkKey if they have same chainId, and repartition RDD by NaturalKeyPartitioner
// 5. Check LinkList in each Partition using LinkedListCheckingFlatMapFunc
hbaseContext.hbaseRDD(getTablename(), scan).flatMapToPair(new LinkedListCheckingFlatMapFunc())
.sortByKey()
.combineByKey(new createCombinerFunc(), new mergeValueFunc(), new mergeCombinersFunc(),
new NaturalKeyPartitioner(new SerializableWritable<>(hbaseConf)))
.foreach(new LinkedListCheckingForeachFunc(new SerializableWritable<>(hbaseConf)));
jsc.close();
}
项目:hbase
文件:PerformanceEvaluation.java
AsyncRandomReadTest(AsyncConnection con, TestOptions options, Status status) {
super(con, options, status);
consistency = options.replicas == DEFAULT_OPTS.replicas ? null : Consistency.TIMELINE;
if (opts.multiGet > 0) {
LOG.info("MultiGet enabled. Sending GETs in batches of " + opts.multiGet + ".");
this.gets = new ArrayList<>(opts.multiGet);
}
}
项目:hbase
文件:PerformanceEvaluation.java
RandomReadTest(Connection con, TestOptions options, Status status) {
super(con, options, status);
consistency = options.replicas == DEFAULT_OPTS.replicas ? null : Consistency.TIMELINE;
if (opts.multiGet > 0) {
LOG.info("MultiGet enabled. Sending GETs in batches of " + opts.multiGet + ".");
this.gets = new ArrayList<>(opts.multiGet);
}
}
项目:hbase
文件:AsyncMetaTableAccessor.java
private static Scan getMetaScan(AsyncTable<?> metaTable, int rowUpperLimit) {
Scan scan = new Scan();
int scannerCaching = metaTable.getConfiguration().getInt(HConstants.HBASE_META_SCANNER_CACHING,
HConstants.DEFAULT_HBASE_META_SCANNER_CACHING);
if (metaTable.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS,
HConstants.DEFAULT_USE_META_REPLICAS)) {
scan.setConsistency(Consistency.TIMELINE);
}
if (rowUpperLimit <= scannerCaching) {
scan.setLimit(rowUpperLimit);
}
int rows = Math.min(rowUpperLimit, scannerCaching);
scan.setCaching(rows);
return scan;
}
项目:hbase
文件:ProtobufUtil.java
public static Consistency toConsistency(ClientProtos.Consistency consistency) {
switch (consistency) {
case STRONG : return Consistency.STRONG;
case TIMELINE : return Consistency.TIMELINE;
default : return Consistency.STRONG;
}
}
项目:hbase
文件:ProtobufUtil.java
public static ClientProtos.Consistency toConsistency(Consistency consistency) {
switch (consistency) {
case STRONG : return ClientProtos.Consistency.STRONG;
case TIMELINE : return ClientProtos.Consistency.TIMELINE;
default : return ClientProtos.Consistency.STRONG;
}
}
项目:hbase
文件:ProtobufUtil.java
public static Consistency toConsistency(ClientProtos.Consistency consistency) {
switch (consistency) {
case STRONG : return Consistency.STRONG;
case TIMELINE : return Consistency.TIMELINE;
default : return Consistency.STRONG;
}
}
项目:hbase
文件:ProtobufUtil.java
public static ClientProtos.Consistency toConsistency(Consistency consistency) {
switch (consistency) {
case STRONG : return ClientProtos.Consistency.STRONG;
case TIMELINE : return ClientProtos.Consistency.TIMELINE;
default : return ClientProtos.Consistency.STRONG;
}
}
项目:ditb
文件:IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
@Override
protected Get createGet(long keyToRead) throws IOException {
Get get = super.createGet(keyToRead);
get.setConsistency(Consistency.TIMELINE);
return get;
}
项目:ditb
文件:ProtobufUtil.java
/**
* Create a protocol buffer Get based on a client Get.
*
* @param get the client Get
* @return a protocol buffer Get
* @throws IOException
*/
public static ClientProtos.Get toGet(
final Get get) throws IOException {
ClientProtos.Get.Builder builder =
ClientProtos.Get.newBuilder();
builder.setRow(ByteStringer.wrap(get.getRow()));
builder.setCacheBlocks(get.getCacheBlocks());
builder.setMaxVersions(get.getMaxVersions());
if (get.getFilter() != null) {
builder.setFilter(ProtobufUtil.toFilter(get.getFilter()));
}
for (Entry<byte[], TimeRange> cftr : get.getColumnFamilyTimeRange().entrySet()) {
HBaseProtos.ColumnFamilyTimeRange.Builder b = HBaseProtos.ColumnFamilyTimeRange.newBuilder();
b.setColumnFamily(ByteString.copyFrom(cftr.getKey()));
b.setTimeRange(timeRangeToProto(cftr.getValue()));
builder.addCfTimeRange(b);
}
TimeRange timeRange = get.getTimeRange();
if (!timeRange.isAllTime()) {
HBaseProtos.TimeRange.Builder timeRangeBuilder =
HBaseProtos.TimeRange.newBuilder();
timeRangeBuilder.setFrom(timeRange.getMin());
timeRangeBuilder.setTo(timeRange.getMax());
builder.setTimeRange(timeRangeBuilder.build());
}
Map<String, byte[]> attributes = get.getAttributesMap();
if (!attributes.isEmpty()) {
NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
for (Map.Entry<String, byte[]> attribute: attributes.entrySet()) {
attributeBuilder.setName(attribute.getKey());
attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue()));
builder.addAttribute(attributeBuilder.build());
}
}
if (get.hasFamilies()) {
Column.Builder columnBuilder = Column.newBuilder();
Map<byte[], NavigableSet<byte[]>> families = get.getFamilyMap();
for (Map.Entry<byte[], NavigableSet<byte[]>> family: families.entrySet()) {
NavigableSet<byte[]> qualifiers = family.getValue();
columnBuilder.setFamily(ByteStringer.wrap(family.getKey()));
columnBuilder.clearQualifier();
if (qualifiers != null && qualifiers.size() > 0) {
for (byte[] qualifier: qualifiers) {
columnBuilder.addQualifier(ByteStringer.wrap(qualifier));
}
}
builder.addColumn(columnBuilder.build());
}
}
if (get.getMaxResultsPerColumnFamily() >= 0) {
builder.setStoreLimit(get.getMaxResultsPerColumnFamily());
}
if (get.getRowOffsetPerColumnFamily() > 0) {
builder.setStoreOffset(get.getRowOffsetPerColumnFamily());
}
if (get.isCheckExistenceOnly()){
builder.setExistenceOnly(true);
}
if (get.isClosestRowBefore()){
builder.setClosestRowBefore(true);
}
if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
builder.setConsistency(toConsistency(get.getConsistency()));
}
return builder.build();
}
项目:AbacusUtil
文件:AnyScan.java
public Consistency getConsistency() {
return scan.getConsistency();
}
项目:AbacusUtil
文件:AnyScan.java
public AnyScan setConsistency(Consistency consistency) {
scan.setConsistency(consistency);
return this;
}
项目:AbacusUtil
文件:AnyGet.java
public Consistency getConsistency() {
return get.getConsistency();
}
项目:AbacusUtil
文件:AnyGet.java
public AnyGet setConsistency(Consistency consistency) {
get.setConsistency(consistency);
return this;
}
项目:pbase
文件:ProtobufUtil.java
/**
* Create a protocol buffer Get based on a client Get.
*
* @param get the client Get
* @return a protocol buffer Get
* @throws IOException
*/
public static ClientProtos.Get toGet(
final Get get) throws IOException {
ClientProtos.Get.Builder builder =
ClientProtos.Get.newBuilder();
builder.setRow(ByteStringer.wrap(get.getRow()));
builder.setCacheBlocks(get.getCacheBlocks());
builder.setMaxVersions(get.getMaxVersions());
if (get.getFilter() != null) {
builder.setFilter(ProtobufUtil.toFilter(get.getFilter()));
}
TimeRange timeRange = get.getTimeRange();
if (!timeRange.isAllTime()) {
HBaseProtos.TimeRange.Builder timeRangeBuilder =
HBaseProtos.TimeRange.newBuilder();
timeRangeBuilder.setFrom(timeRange.getMin());
timeRangeBuilder.setTo(timeRange.getMax());
builder.setTimeRange(timeRangeBuilder.build());
}
Map<String, byte[]> attributes = get.getAttributesMap();
if (!attributes.isEmpty()) {
NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
for (Map.Entry<String, byte[]> attribute: attributes.entrySet()) {
attributeBuilder.setName(attribute.getKey());
attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue()));
builder.addAttribute(attributeBuilder.build());
}
}
if (get.hasFamilies()) {
Column.Builder columnBuilder = Column.newBuilder();
Map<byte[], NavigableSet<byte[]>> families = get.getFamilyMap();
for (Map.Entry<byte[], NavigableSet<byte[]>> family: families.entrySet()) {
NavigableSet<byte[]> qualifiers = family.getValue();
columnBuilder.setFamily(ByteStringer.wrap(family.getKey()));
columnBuilder.clearQualifier();
if (qualifiers != null && qualifiers.size() > 0) {
for (byte[] qualifier: qualifiers) {
columnBuilder.addQualifier(ByteStringer.wrap(qualifier));
}
}
builder.addColumn(columnBuilder.build());
}
}
if (get.getMaxResultsPerColumnFamily() >= 0) {
builder.setStoreLimit(get.getMaxResultsPerColumnFamily());
}
if (get.getRowOffsetPerColumnFamily() > 0) {
builder.setStoreOffset(get.getRowOffsetPerColumnFamily());
}
if (get.isCheckExistenceOnly()){
builder.setExistenceOnly(true);
}
if (get.isClosestRowBefore()){
builder.setClosestRowBefore(true);
}
if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
builder.setConsistency(toConsistency(get.getConsistency()));
}
return builder.build();
}
项目:hbase
文件:IntegrationTestBulkLoad.java
/**
* After adding data to the table start a mr job to
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
LOG.info("Running check");
Configuration conf = getConf();
String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime();
Path p = util.getDataTestDirOnTestFS(jobName);
Job job = new Job(conf);
job.setJarByClass(getClass());
job.setJobName(jobName);
job.setPartitionerClass(NaturalKeyPartitioner.class);
job.setGroupingComparatorClass(NaturalKeyGroupingComparator.class);
job.setSortComparatorClass(CompositeKeyComparator.class);
Scan scan = new Scan();
scan.addFamily(CHAIN_FAM);
scan.addFamily(SORT_FAM);
scan.setMaxVersions(1);
scan.setCacheBlocks(false);
scan.setBatch(1000);
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
if (replicaCount != NUM_REPLICA_COUNT_DEFAULT) {
scan.setConsistency(Consistency.TIMELINE);
}
TableMapReduceUtil.initTableMapperJob(
getTablename().getName(),
scan,
LinkedListCheckingMapper.class,
LinkKey.class,
LinkChain.class,
job
);
job.setReducerClass(LinkedListCheckingReducer.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
FileOutputFormat.setOutputPath(job, p);
assertEquals(true, job.waitForCompletion(true));
// Delete the files.
util.getTestFileSystem().delete(p, true);
}
项目:hbase
文件:IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
@Override
protected Get createGet(long keyToRead) throws IOException {
Get get = super.createGet(keyToRead);
get.setConsistency(Consistency.TIMELINE);
return get;
}
项目:hbase
文件:ProtobufUtil.java
/**
* Create a protocol buffer Get based on a client Get.
*
* @param get the client Get
* @return a protocol buffer Get
* @throws IOException
*/
public static ClientProtos.Get toGet(
final Get get) throws IOException {
ClientProtos.Get.Builder builder =
ClientProtos.Get.newBuilder();
builder.setRow(ByteStringer.wrap(get.getRow()));
builder.setCacheBlocks(get.getCacheBlocks());
builder.setMaxVersions(get.getMaxVersions());
if (get.getFilter() != null) {
builder.setFilter(ProtobufUtil.toFilter(get.getFilter()));
}
for (Entry<byte[], TimeRange> cftr : get.getColumnFamilyTimeRange().entrySet()) {
HBaseProtos.ColumnFamilyTimeRange.Builder b = HBaseProtos.ColumnFamilyTimeRange.newBuilder();
b.setColumnFamily(ByteStringer.wrap(cftr.getKey()));
b.setTimeRange(timeRangeToProto(cftr.getValue()));
builder.addCfTimeRange(b);
}
TimeRange timeRange = get.getTimeRange();
if (!timeRange.isAllTime()) {
HBaseProtos.TimeRange.Builder timeRangeBuilder =
HBaseProtos.TimeRange.newBuilder();
timeRangeBuilder.setFrom(timeRange.getMin());
timeRangeBuilder.setTo(timeRange.getMax());
builder.setTimeRange(timeRangeBuilder.build());
}
Map<String, byte[]> attributes = get.getAttributesMap();
if (!attributes.isEmpty()) {
NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
for (Map.Entry<String, byte[]> attribute: attributes.entrySet()) {
attributeBuilder.setName(attribute.getKey());
attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue()));
builder.addAttribute(attributeBuilder.build());
}
}
if (get.hasFamilies()) {
Column.Builder columnBuilder = Column.newBuilder();
Map<byte[], NavigableSet<byte[]>> families = get.getFamilyMap();
for (Map.Entry<byte[], NavigableSet<byte[]>> family: families.entrySet()) {
NavigableSet<byte[]> qualifiers = family.getValue();
columnBuilder.setFamily(ByteStringer.wrap(family.getKey()));
columnBuilder.clearQualifier();
if (qualifiers != null && qualifiers.size() > 0) {
for (byte[] qualifier: qualifiers) {
columnBuilder.addQualifier(ByteStringer.wrap(qualifier));
}
}
builder.addColumn(columnBuilder.build());
}
}
if (get.getMaxResultsPerColumnFamily() >= 0) {
builder.setStoreLimit(get.getMaxResultsPerColumnFamily());
}
if (get.getRowOffsetPerColumnFamily() > 0) {
builder.setStoreOffset(get.getRowOffsetPerColumnFamily());
}
if (get.isCheckExistenceOnly()){
builder.setExistenceOnly(true);
}
if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
builder.setConsistency(toConsistency(get.getConsistency()));
}
Boolean loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
if (loadColumnFamiliesOnDemand != null) {
builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand);
}
return builder.build();
}
项目:hbase
文件:ProtobufUtil.java
/**
* Create a protocol buffer Get based on a client Get.
*
* @param get the client Get
* @return a protocol buffer Get
* @throws IOException
*/
public static ClientProtos.Get toGet(
final Get get) throws IOException {
ClientProtos.Get.Builder builder =
ClientProtos.Get.newBuilder();
builder.setRow(UnsafeByteOperations.unsafeWrap(get.getRow()));
builder.setCacheBlocks(get.getCacheBlocks());
builder.setMaxVersions(get.getMaxVersions());
if (get.getFilter() != null) {
builder.setFilter(ProtobufUtil.toFilter(get.getFilter()));
}
for (Entry<byte[], TimeRange> cftr : get.getColumnFamilyTimeRange().entrySet()) {
HBaseProtos.ColumnFamilyTimeRange.Builder b = HBaseProtos.ColumnFamilyTimeRange.newBuilder();
b.setColumnFamily(UnsafeByteOperations.unsafeWrap(cftr.getKey()));
b.setTimeRange(timeRangeToProto(cftr.getValue()));
builder.addCfTimeRange(b);
}
TimeRange timeRange = get.getTimeRange();
if (!timeRange.isAllTime()) {
HBaseProtos.TimeRange.Builder timeRangeBuilder =
HBaseProtos.TimeRange.newBuilder();
timeRangeBuilder.setFrom(timeRange.getMin());
timeRangeBuilder.setTo(timeRange.getMax());
builder.setTimeRange(timeRangeBuilder.build());
}
Map<String, byte[]> attributes = get.getAttributesMap();
if (!attributes.isEmpty()) {
NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder();
for (Map.Entry<String, byte[]> attribute: attributes.entrySet()) {
attributeBuilder.setName(attribute.getKey());
attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue()));
builder.addAttribute(attributeBuilder.build());
}
}
if (get.hasFamilies()) {
Column.Builder columnBuilder = Column.newBuilder();
Map<byte[], NavigableSet<byte[]>> families = get.getFamilyMap();
for (Map.Entry<byte[], NavigableSet<byte[]>> family: families.entrySet()) {
NavigableSet<byte[]> qualifiers = family.getValue();
columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey()));
columnBuilder.clearQualifier();
if (qualifiers != null && qualifiers.size() > 0) {
for (byte[] qualifier: qualifiers) {
columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier));
}
}
builder.addColumn(columnBuilder.build());
}
}
if (get.getMaxResultsPerColumnFamily() >= 0) {
builder.setStoreLimit(get.getMaxResultsPerColumnFamily());
}
if (get.getRowOffsetPerColumnFamily() > 0) {
builder.setStoreOffset(get.getRowOffsetPerColumnFamily());
}
if (get.isCheckExistenceOnly()){
builder.setExistenceOnly(true);
}
if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
builder.setConsistency(toConsistency(get.getConsistency()));
}
Boolean loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
if (loadColumnFamiliesOnDemand != null) {
builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand);
}
return builder.build();
}