Java 类org.apache.hadoop.io.file.tfile.BCFile.Reader.BlockReader 实例源码
项目:hadoop-oss
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:aliyun-oss-hadoop-fs
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:big-c
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-EAR
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-plus
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hops
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-TCP
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-on-lustre
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hardfs
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-on-lustre2
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:RDFS
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-0.20
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hortonworks-extension
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hortonworks-extension
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-gpu
文件:TFile.java
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-oss
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:aliyun-oss-hadoop-fs
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:big-c
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-EAR
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-plus
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hops
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-TCP
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-on-lustre
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hardfs
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-on-lustre2
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:RDFS
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-0.20
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hortonworks-extension
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hortonworks-extension
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-gpu
文件:TFile.java
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
项目:hadoop-oss
文件:TFile.java
BlockReader getBlockReader(int blockIndex) throws IOException {
return readerBCF.getDataBlock(blockIndex);
}
项目:hadoop
文件:TFile.java
BlockReader getBlockReader(int blockIndex) throws IOException {
return readerBCF.getDataBlock(blockIndex);
}
项目:aliyun-oss-hadoop-fs
文件:TFile.java
BlockReader getBlockReader(int blockIndex) throws IOException {
return readerBCF.getDataBlock(blockIndex);
}
项目:big-c
文件:TFile.java
BlockReader getBlockReader(int blockIndex) throws IOException {
return readerBCF.getDataBlock(blockIndex);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TFile.java
BlockReader getBlockReader(int blockIndex) throws IOException {
return readerBCF.getDataBlock(blockIndex);
}
项目:hadoop-EAR
文件:TFile.java
BlockReader getBlockReader(int blockIndex) throws IOException {
return readerBCF.getDataBlock(blockIndex);
}