Java 类org.apache.hadoop.util.DataChecksum.Type 实例源码
项目:hadoop
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb the located block
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:aliyun-oss-hadoop-fs
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb the located block
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(), lb);
try {
DataOutputStream out = new DataOutputStream(
new BufferedOutputStream(pair.out, smallBufferSize));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
String logInfo = "trying to read " + lb.getBlock() + " from datanode " +
dn;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
return PBHelperClient.convert(
reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtilsClient.cleanup(null, pair.in, pair.out);
}
}
项目:big-c
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb the located block
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:hadoop
文件:DFSOutputStream.java
/**
* @return the object for computing checksum.
* The type is NULL if checksum is not computed.
*/
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
HdfsFileStatus stat) {
if (isLazyPersist(stat) && stat.getReplication() == 1) {
// do not compute checksum for writing to single replica to memory
return DataChecksum.newDataChecksum(Type.NULL,
checksum.getBytesPerChecksum());
}
return checksum;
}
项目:hadoop
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:aliyun-oss-hadoop-fs
文件:DFSOutputStream.java
/**
* @return the object for computing checksum.
* The type is NULL if checksum is not computed.
*/
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
HdfsFileStatus stat) {
if (DataStreamer.isLazyPersist(stat) && stat.getReplication() == 1) {
// do not compute checksum for writing to single replica to memory
return DataChecksum.newDataChecksum(Type.NULL,
checksum.getBytesPerChecksum());
}
return checksum;
}
项目:big-c
文件:DFSOutputStream.java
/**
* @return the object for computing checksum.
* The type is NULL if checksum is not computed.
*/
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
HdfsFileStatus stat) {
if (isLazyPersist(stat) && stat.getReplication() == 1) {
// do not compute checksum for writing to single replica to memory
return DataChecksum.newDataChecksum(Type.NULL,
checksum.getBytesPerChecksum());
}
return checksum;
}
项目:big-c
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:DFSOutputStream.java
/**
* @return the object for computing checksum.
* The type is NULL if checksum is not computed.
*/
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
HdfsFileStatus stat) {
if (isLazyPersist(stat) && stat.getReplication() == 1) {
// do not compute checksum for writing to single replica to memory
return DataChecksum.newDataChecksum(Type.NULL,
checksum.getBytesPerChecksum());
}
return checksum;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb the located block
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) {
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " trying to read "
+ lb.getBlock() + " from datanode " + dn);
}
}
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:hadoop-plus
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:hadoop-plus
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param in input stream from datanode
* @param out output stream to datanode
* @param lb the located block
* @param clientName the name of the DFSClient requesting the checksum
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private static Type inferChecksumTypeByReading(
String clientName, SocketFactory socketFactory, int socketTimeout,
LocatedBlock lb, DatanodeInfo dn,
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
throws IOException {
IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
encryptionKey, dn, socketTimeout);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) {
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " trying to read "
+ lb.getBlock() + " from datanode " + dn);
}
}
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:FlexMap
文件:DFSOutputStream.java
/**
* @return the object for computing checksum.
* The type is NULL if checksum is not computed.
*/
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
HdfsFileStatus stat) {
if (isLazyPersist(stat) && stat.getReplication() == 1) {
// do not compute checksum for writing to single replica to memory
return DataChecksum.newDataChecksum(Type.NULL,
checksum.getBytesPerChecksum());
}
return checksum;
}
项目:FlexMap
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:FlexMap
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb the located block
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) {
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " trying to read "
+ lb.getBlock() + " from datanode " + dn);
}
}
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:hops
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch (IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default " +
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type
.valueOf(DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:hops
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb
* the located block
* @param clientName
* the name of the DFSClient requesting the checksum
* @param dn
* the connected datanode
* @return the inferred checksum type
* @throws IOException
* if an error occurs
*/
private static Type inferChecksumTypeByReading(String clientName,
SocketFactory socketFactory, int socketTimeout, LocatedBlock lb,
DatanodeInfo dn, DataEncryptionKey encryptionKey,
boolean connectToDnViaHostname) throws IOException {
IOStreamPair pair =
connectToDN(socketFactory, connectToDnViaHostname, encryptionKey, dn,
socketTimeout);
try {
DataOutputStream out = new DataOutputStream(
new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out)
.readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) {
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException();
} else {
throw new IOException(
"Bad response " + reply + " trying to read " + lb.getBlock() +
" from datanode " + dn);
}
}
return PBHelper
.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:hadoop-TCP
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:hadoop-TCP
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param in input stream from datanode
* @param out output stream to datanode
* @param lb the located block
* @param clientName the name of the DFSClient requesting the checksum
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private static Type inferChecksumTypeByReading(
String clientName, SocketFactory socketFactory, int socketTimeout,
LocatedBlock lb, DatanodeInfo dn,
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
throws IOException {
IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
encryptionKey, dn, socketTimeout);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) {
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " trying to read "
+ lb.getBlock() + " from datanode " + dn);
}
}
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:hardfs
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:hardfs
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param in input stream from datanode
* @param out output stream to datanode
* @param lb the located block
* @param clientName the name of the DFSClient requesting the checksum
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private static Type inferChecksumTypeByReading(
String clientName, SocketFactory socketFactory, int socketTimeout,
LocatedBlock lb, DatanodeInfo dn,
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
throws IOException {
IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
encryptionKey, dn, socketTimeout);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) {
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " trying to read "
+ lb.getBlock() + " from datanode " + dn);
}
}
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:hadoop-on-lustre2
文件:DFSClient.java
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
项目:hadoop-on-lustre2
文件:DFSClient.java
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param in input stream from datanode
* @param out output stream to datanode
* @param lb the located block
* @param clientName the name of the DFSClient requesting the checksum
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private static Type inferChecksumTypeByReading(
String clientName, SocketFactory socketFactory, int socketTimeout,
LocatedBlock lb, DatanodeInfo dn,
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
throws IOException {
IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
encryptionKey, dn, socketTimeout);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) {
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " trying to read "
+ lb.getBlock() + " from datanode " + dn);
}
}
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
项目:hadoop
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
项目:big-c
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
项目:hadoop-plus
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
项目:FlexMap
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
项目:hops
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
项目:hadoop-TCP
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
项目:hardfs
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
项目:hadoop-on-lustre2
文件:DFSClient.java
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}