Java 类org.apache.hadoop.mapred.LineRecordReader 实例源码
项目:hadoop
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:aliyun-oss-hadoop-fs
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:big-c
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-EAR
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-plus
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:StuJava
文件:SplitVideoFileInputFormat.java
public RecordReader<LongWritable,Text> getRecordReader(InputSplit split,JobConf job,Reporter reporter)throws IOException
{
reporter.setStatus(split.toString());
String delimiter = job.get("textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
}
return new LineRecordReader(job, (FileSplit) split,recordDelimiterBytes);
}
项目:FlexMap
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:bigdata-tutorial
文件:Base64TextInputFormat.java
public RecordReader<LongWritable, BytesWritable> getRecordReader(
InputSplit genericSplit, JobConf job, Reporter reporter) throws IOException {
reporter.setStatus(genericSplit.toString());
Base64LineRecordReader reader = new Base64LineRecordReader(
new LineRecordReader(job, (FileSplit) genericSplit));
reader.configure(job);
return reader;
}
项目:bigdata-tutorial
文件:MyDemoInputFormat.java
@Override
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit, JobConf job, Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
MyDemoRecordReader reader = new MyDemoRecordReader(
new LineRecordReader(job, (FileSplit) genericSplit));
return reader;
}
项目:hops
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-TCP
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-on-lustre
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hardfs
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-on-lustre2
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:RDFS
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-0.20
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:mapreduce-fork
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hortonworks-extension
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hortonworks-extension
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-gpu
文件:NLineInputFormat.java
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TeraInputFormat.java
public TeraRecordReader(Configuration job,
FileSplit split) throws IOException {
in = new LineRecordReader(job, split);
}
项目:hadoop-EAR
文件:TeraInputFormat.java
public TeraRecordReader(Configuration job,
FileSplit split) throws IOException {
in = new LineRecordReader(job, split);
}
项目:Acacia
文件:CSRConverter.java
public ParagraphRecordReader(JobConf conf, FileSplit split)
throws IOException {
lineRecord = new LineRecordReader(conf, split);
lineKey = lineRecord.createKey();
lineValue = lineRecord.createValue();
}
项目:Acacia
文件:NotInFinder.java
public ParagraphRecordReader(JobConf conf, FileSplit split)
throws IOException {
lineRecord = new LineRecordReader(conf, split);
lineKey = lineRecord.createKey();
lineValue = lineRecord.createValue();
}
项目:Acacia
文件:EdgeDistributor.java
public ParagraphRecordReader(JobConf conf, FileSplit split)
throws IOException {
lineRecord = new LineRecordReader(conf, split);
lineKey = lineRecord.createKey();
lineValue = lineRecord.createValue();
}
项目:bigdata-tutorial
文件:Base64TextInputFormat.java
public Base64LineRecordReader(LineRecordReader reader) {
this.reader = reader;
text = reader.createValue();
}
项目:bigdata-tutorial
文件:MyDemoInputFormat.java
public MyDemoRecordReader(LineRecordReader reader) {
this.reader = reader;
text = reader.createValue();
}
项目:verteilteWebInf
文件:DataInputFormat.java
@Override
public RecordReader<LongWritable, Text> getRecordReader(InputSplit inputSplit, JobConf jobConf,
Reporter reporter) throws IOException {
return new LineRecordReader(jobConf, (FileSplit) inputSplit);
}
项目:mgfsm
文件:SequenceInputFormat.java
public SequenceRecordReader(JobConf job, FileSplit split) throws IOException {
lineReader = new LineRecordReader(job, split);
lineKey = lineReader.createKey();
lineValue = lineReader.createValue();
}
项目:hadoop-on-lustre
文件:TeraInputFormat.java
public TeraRecordReader(Configuration job,
FileSplit split) throws IOException {
in = new LineRecordReader(job, split);
}
项目:RDFS
文件:TeraInputFormat.java
public TeraRecordReader(Configuration job,
FileSplit split) throws IOException {
in = new LineRecordReader(job, split);
}
项目:hadoop-0.20
文件:TeraInputFormat.java
public TeraRecordReader(Configuration job,
FileSplit split) throws IOException {
in = new LineRecordReader(job, split);
}
项目:pmr-common
文件:JsonInputFormat.java
public SimpleJsonRecordReader(Configuration conf, FileSplit split)
throws IOException {
rdr = new LineRecordReader(conf, split);
}
项目:hadoop-book
文件:TeraInputFormat.java
public TeraRecordReader(Configuration job,
FileSplit split) throws IOException {
in = new LineRecordReader(job, split);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:TeraInputFormat.java
public TeraRecordReader(Configuration job,
FileSplit split) throws IOException {
in = new LineRecordReader(job, split);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:TeraInputFormat.java
public TeraRecordReader(Configuration job,
FileSplit split) throws IOException {
in = new LineRecordReader(job, split);
}
项目:hadoop-extensions
文件:FilenameKeyLineRecordReader.java
public FilenameKeyLineRecordReader(Configuration job,
FileSplit split) throws IOException {
lineRecordReader = new LineRecordReader(job, (FileSplit) split);
fileName = split.getPath().getName();
longKey = new LongWritable();
}