Java 类org.apache.hadoop.mapred.JobHistory 实例源码
项目:hadoop-2.6.0-cdh5.4.3
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hadoop-EAR
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hiped2
文件:TaskThroughput.java
public static void printTasks(JobHistory.JobInfo job)
throws ParseException, IllegalAccessException,
InvocationTargetException, NoSuchMethodException {
List<TaskMetrics> mapMetrics = JobHistoryHelper.getMapTaskMetrics(
job);
List<TaskMetrics> reduceMetrics = JobHistoryHelper.getReduceTaskMetrics(
job);
decorateHeader("MAP TASKS");
dumpTasks(mapMetrics, "throughput",
new TaskMetrics.OverallThroughputComparator(), "getOverallThroughputBytesPerSecond",
false, false);
decorateHeader("REDUCE TASKS");
dumpTasks(reduceMetrics, "throughput",
new TaskMetrics.OverallThroughputComparator(), "getOverallThroughputBytesPerSecond",
false, true);
}
项目:hiped2
文件:JobHistoryHelper.java
public static JobHistory.JobInfo getJobInfoFromHdfsOutputDir(String outputDir, Configuration conf)
throws IOException {
Path output = new Path(outputDir);
Path historyLogDir = new Path(output, "_logs/history");
FileSystem fs = output.getFileSystem(conf);
if (!fs.exists(output)) {
throw new IOException("History directory " + historyLogDir.toString()
+ " does not exist");
}
Path[] jobFiles = FileUtil.stat2Paths(fs.listStatus(historyLogDir,
jobLogFileFilter));
if (jobFiles.length == 0) {
throw new IOException("Not a valid history directory "
+ historyLogDir.toString());
}
String[] jobDetails =
JobHistory.JobInfo.decodeJobHistoryFileName(jobFiles[0].getName()).
split("_");
String jobId = jobDetails[2] + "_" + jobDetails[3] + "_" + jobDetails[4];
JobHistory.JobInfo job = new JobHistory.JobInfo(jobId);
DefaultJobHistoryParser.parseJobTasks(jobFiles[0].toString(), job, fs);
return job;
}
项目:hiped2
文件:DataSkewGnuplot.java
public static void dumpTaskTimes(String... args)
throws Exception {
JobHistory.JobInfo job = JobHistoryHelper.getJobInfoFromCliArgs(args);
List<TaskMetrics> mapMetrics =
JobHistoryHelper.getMapTaskMetrics(job);
List<TaskMetrics> reduceMetrics =
JobHistoryHelper.getReduceTaskMetrics(
job);
System.out.println("# MAP-EXEC-TIME-SECS\tMAP_INPUT_BYTES");
dumpTaskTimes(mapMetrics, new TaskMetrics.ExecTimeComparator());
System.out.println();
System.out.println("# REDUCE-EXEC-TIME-SECS\tREDUCE_INPUT_BYTES");
dumpTaskTimes(reduceMetrics, new TaskMetrics.ExecTimeComparator());
}
项目:hiped2
文件:ExtractJobMetrics.java
public static void printAllTaskAttempts(JobHistory.JobInfo job)
throws ParseException {
PaddedTable table = new PaddedTable();
table
.addColumnTitle("Type")
.addColumnTitle("TaskId")
.addColumnTitle("Status")
.addColumnTitle("Host")
.addColumnTitle("OverallTime(HH:MM:SS)")
.addColumnTitle("ShuffleTime(HH:MM:SS)")
.addColumnTitle("SortTime(HH:MM:SS)")
.addColumnTitle("MapInputBytes")
.addColumnTitle("MapOutputBytes")
.addColumnTitle("InputRecords")
.addColumnTitle("OputputRecords");
printAllTaskAttempts(table, job, JobHistory.Values.MAP.name());
printAllTaskAttempts(table, job, JobHistory.Values.REDUCE.name());
printAllTaskAttempts(table, job, JobHistory.Values.SETUP.name());
printAllTaskAttempts(table, job, JobHistory.Values.CLEANUP.name());
System.out.println(table);
}
项目:spork-streaming
文件:HadoopJobHistoryLoader.java
private static Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(
JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task
.getTaskAttempts();
int size = taskAttempts.size();
Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts
.entrySet().iterator();
for (int i = 0; i < size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (null != attempt && null != attempt.getValues() && attempt.getValues().containsKey(JobHistory.Keys.TASK_STATUS) && attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals(
"SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:spork
文件:HadoopJobHistoryLoader.java
private static Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(
JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task
.getTaskAttempts();
int size = taskAttempts.size();
Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts
.entrySet().iterator();
for (int i = 0; i < size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (null != attempt && null != attempt.getValues() && attempt.getValues().containsKey(JobHistory.Keys.TASK_STATUS) && attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals(
"SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hadoop-on-lustre
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:RDFS
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hadoop-0.20
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hanoi-hadoop-2.0.0-cdh
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hortonworks-extension
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hortonworks-extension
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hadoop-gpu
文件:JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:sedge
文件:HadoopJobHistoryLoader.java
private static Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(
JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task
.getTaskAttempts();
int size = taskAttempts.size();
Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts
.entrySet().iterator();
for (int i = 0; i < size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals(
"SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
//System.out.println("JobHistory.JobKeys."+key+": "+value);
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:hadoop-EAR
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
//System.out.println("JobHistory.JobKeys."+key+": "+value);
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:hiped2
文件:DataSkewMetrics.java
public static void printTasks(JobHistory.JobInfo job)
throws ParseException, IllegalAccessException,
InvocationTargetException, NoSuchMethodException {
List<TaskMetrics> mapMetrics = JobHistoryHelper.getMapTaskMetrics(
job);
List<TaskMetrics> reduceMetrics = JobHistoryHelper.getReduceTaskMetrics(
job);
decorateHeader("MAP TASKS");
dumpTasks(mapMetrics, "execution time",
new TaskMetrics.ExecTimeComparator(), "getOverallTimeMillis",
true);
dumpTasks(mapMetrics, "input records",
new TaskMetrics.InputRecordsComparator(), "getInputRecords",
false);
dumpTasks(mapMetrics, "input bytes",
new TaskMetrics.InputBytesComparator(), "getInputBytes",
false);
decorateHeader("REDUCE TASKS");
dumpTasks(reduceMetrics, "execution time",
new TaskMetrics.ExecTimeComparator(), "getOverallTimeMillis", true);
dumpTasks(reduceMetrics, "input records",
new TaskMetrics.InputRecordsComparator(), "getInputRecords",
false);
dumpTasks(reduceMetrics, "input bytes",
new TaskMetrics.InputBytesComparator(), "getInputBytes",
false);
}
项目:hiped2
文件:JobHistoryHelper.java
public static JobHistory.JobInfo getJobInfoFromCliArgs(Configuration conf, String ... args)
throws IOException {
String usage = "Expected 2 arguments, either --hdfsdir <dir> or --localfile <path>";
if(args.length != 2) {
throw new IOException(usage);
}
if("--hdfsdir".equals(args[0])) {
return getJobInfoFromHdfsOutputDir(args[1], conf);
} else if("--localfile".equals(args[0])) {
return getJobInfoFromLocalFile(args[1], conf);
}
throw new IOException("Unexpected option '" + args[0] + "' \n" + usage);
}
项目:hiped2
文件:JobHistoryHelper.java
public static JobHistory.JobInfo getJobInfoFromLocalFile(String outputFile, Configuration conf)
throws IOException {
FileSystem fs = FileSystem.getLocal(conf);
Path outputFilePath = new Path(outputFile);
String[] jobDetails =
JobHistory.JobInfo.decodeJobHistoryFileName(outputFilePath.getName()).
split("_");
String jobId = jobDetails[2] + "_" + jobDetails[3] + "_" + jobDetails[4];
JobHistory.JobInfo job = new JobHistory.JobInfo(jobId);
DefaultJobHistoryParser.parseJobTasks(outputFile, job, fs);
return job;
}
项目:hiped2
文件:JobHistoryHelper.java
public static List<TaskMetrics> getMapTaskMetrics(
JobHistory.JobInfo job)
throws ParseException {
List<TaskMetrics> metrics = new ArrayList<TaskMetrics>();
addTask(metrics, job, JobHistory.Values.MAP.name());
return metrics;
}
项目:hiped2
文件:JobHistoryHelper.java
public static List<TaskMetrics> getReduceTaskMetrics(
JobHistory.JobInfo job)
throws ParseException {
List<TaskMetrics> metrics = new ArrayList<TaskMetrics>();
addTask(metrics, job, JobHistory.Values.REDUCE.name());
return metrics;
}
项目:spork-streaming
文件:HadoopJobHistoryLoader.java
private static void populateJob (Map<JobHistory.Keys, String> jobC, Map<String, String> job) {
int size = jobC.size();
Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++) {
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID.toString(), value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME.toString(), value); break;
case JOBID: job.put(JobKeys.JOBID.toString(), value); break;
case JOBNAME: job.put(JobKeys.JOBNAME.toString(), value); break;
case USER: job.put(JobKeys.USER.toString(), value); break;
case JOBCONF: job.put(JobKeys.JOBCONF.toString(), value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME.toString(), value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME.toString(), value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS.toString(), value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES.toString(), value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS.toString(), value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES.toString(), value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS.toString(), value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES.toString(), value); break;
case JOB_STATUS: job.put(JobKeys.STATUS.toString(), value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default:
LOG.debug("JobHistory.Keys."+ key + " : NOT INCLUDED IN LOADER RETURN VALUE");
break;
}
}
}
项目:spork
文件:HadoopJobHistoryLoader.java
private static void populateJob (Map<JobHistory.Keys, String> jobC, Map<String, String> job) {
int size = jobC.size();
Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++) {
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID.toString(), value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME.toString(), value); break;
case JOBID: job.put(JobKeys.JOBID.toString(), value); break;
case JOBNAME: job.put(JobKeys.JOBNAME.toString(), value); break;
case USER: job.put(JobKeys.USER.toString(), value); break;
case JOBCONF: job.put(JobKeys.JOBCONF.toString(), value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME.toString(), value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME.toString(), value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS.toString(), value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES.toString(), value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS.toString(), value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES.toString(), value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS.toString(), value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES.toString(), value); break;
case JOB_STATUS: job.put(JobKeys.STATUS.toString(), value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default:
LOG.debug("JobHistory.Keys."+ key + " : NOT INCLUDED IN LOADER RETURN VALUE");
break;
}
}
}
项目:hadoop-on-lustre
文件:TraceBuilder.java
/**
* @param fileName
* @return the jobID String, parsed out of the file name. We return a valid
* String for either a history log file or a config file. Otherwise,
* [especially for .crc files] we return null.
*/
static String extractJobID(String fileName) {
String jobId = applyParser(fileName, JobHistory.JOBHISTORY_FILENAME_REGEX);
if (jobId == null) {
// check if its a pre21 jobhistory file
jobId = applyParser(fileName,
Pre21JobHistoryConstants.JOBHISTORY_FILENAME_REGEX);
}
return jobId;
}
项目:hadoop-on-lustre
文件:TraceBuilder.java
static boolean isJobConfXml(String fileName, InputStream input) {
String jobId = applyParser(fileName, JobHistory.CONF_FILENAME_REGEX);
if (jobId == null) {
// check if its a pre21 jobhistory conf file
jobId = applyParser(fileName,
Pre21JobHistoryConstants.CONF_FILENAME_REGEX);
}
return jobId != null;
}
项目:hadoop-on-lustre
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
//System.out.println("JobHistory.JobKeys."+key+": "+value);
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:fiware-cosmos-ambari
文件:TestJobHistoryParsing.java
public void test(String workflowId, String workflowName, String workflowNodeName, Map<String,String[]> adjacencies) {
Configuration conf = new Configuration();
setProperties(conf, workflowId, workflowName, workflowNodeName, adjacencies);
String log = log("JOB", new String[] {ID, NAME, NODE, ADJ},
new String[] {conf.get(ID_PROP), conf.get(NAME_PROP), conf.get(NODE_PROP), JobHistory.JobInfo.getWorkflowAdjacencies(conf)});
ParsedLine line = new ParsedLine(log);
JobID jobid = new JobID("id", 1);
JobSubmittedEvent event = new JobSubmittedEvent(jobid, workflowName, "", 0l, "", null, "", line.get(ID), line.get(NAME), line.get(NODE), line.get(ADJ));
WorkflowContext context = MapReduceJobHistoryUpdater.buildWorkflowContext(event);
String resultingWorkflowId = workflowId;
if (workflowId.isEmpty())
resultingWorkflowId = jobid.toString().replace("job_", "mr_");
assertEquals("Didn't recover workflowId", resultingWorkflowId, context.getWorkflowId());
assertEquals("Didn't recover workflowName", workflowName, context.getWorkflowName());
assertEquals("Didn't recover workflowNodeName", workflowNodeName, context.getWorkflowEntityName());
Map<String,String[]> resultingAdjacencies = adjacencies;
if (resultingAdjacencies.size() == 0) {
resultingAdjacencies = new HashMap<String,String[]>();
resultingAdjacencies.put(workflowNodeName, new String[] {});
}
assertEquals("Got incorrect number of adjacencies", resultingAdjacencies.size(), context.getWorkflowDag().getEntries().size());
for (WorkflowDagEntry entry : context.getWorkflowDag().getEntries()) {
String[] sTargets = resultingAdjacencies.get(entry.getSource());
assertNotNull("No original targets for " + entry.getSource(), sTargets);
List<String> dTargets = entry.getTargets();
assertEquals("Got incorrect number of targets for " + entry.getSource(), sTargets.length, dTargets.size());
for (int i = 0; i < sTargets.length; i++) {
assertEquals("Got incorrect target for " + entry.getSource(), sTargets[i], dTargets.get(i));
}
}
}
项目:RDFS
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
//System.out.println("JobHistory.JobKeys."+key+": "+value);
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:hadoop-0.20
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
//case START_TIME: job.put(JobKeys., value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:hanoi-hadoop-2.0.0-cdh
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
//System.out.println("JobHistory.JobKeys."+key+": "+value);
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:hortonworks-extension
文件:TraceBuilder.java
/**
* @param fileName
* @return the jobID String, parsed out of the file name. We return a valid
* String for either a history log file or a config file. Otherwise,
* [especially for .crc files] we return null.
*/
static String extractJobID(String fileName) {
String jobId = applyParser(fileName, JobHistory.JOBHISTORY_FILENAME_REGEX);
if (jobId == null) {
// check if its a pre21 jobhistory file
jobId = applyParser(fileName,
Pre21JobHistoryConstants.JOBHISTORY_FILENAME_REGEX);
}
return jobId;
}
项目:hortonworks-extension
文件:TraceBuilder.java
static boolean isJobConfXml(String fileName, InputStream input) {
String jobId = applyParser(fileName, JobHistory.CONF_FILENAME_REGEX);
if (jobId == null) {
// check if its a pre21 jobhistory conf file
jobId = applyParser(fileName,
Pre21JobHistoryConstants.CONF_FILENAME_REGEX);
}
return jobId != null;
}
项目:hortonworks-extension
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
//System.out.println("JobHistory.JobKeys."+key+": "+value);
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:hortonworks-extension
文件:TraceBuilder.java
/**
* @param fileName
* @return the jobID String, parsed out of the file name. We return a valid
* String for either a history log file or a config file. Otherwise,
* [especially for .crc files] we return null.
*/
static String extractJobID(String fileName) {
String jobId = applyParser(fileName, JobHistory.JOBHISTORY_FILENAME_REGEX);
if (jobId == null) {
// check if its a pre21 jobhistory file
jobId = applyParser(fileName,
Pre21JobHistoryConstants.JOBHISTORY_FILENAME_REGEX);
}
return jobId;
}
项目:hortonworks-extension
文件:TraceBuilder.java
static boolean isJobConfXml(String fileName, InputStream input) {
String jobId = applyParser(fileName, JobHistory.CONF_FILENAME_REGEX);
if (jobId == null) {
// check if its a pre21 jobhistory conf file
jobId = applyParser(fileName,
Pre21JobHistoryConstants.CONF_FILENAME_REGEX);
}
return jobId != null;
}
项目:hortonworks-extension
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
//System.out.println("JobHistory.JobKeys."+key+": "+value);
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:hadoop-gpu
文件:JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
int size = jobC.size();
java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++)
{
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
//case START_TIME: job.put(JobKeys., value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
case JOBID: job.put(JobKeys.JOBID, value); break;
case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
case USER: job.put(JobKeys.USER, value); break;
case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default: System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
break;
}
}
}
项目:sedge
文件:HadoopJobHistoryLoader.java
private static void populateJob (Map<JobHistory.Keys, String> jobC, Map<String, String> job) {
int size = jobC.size();
Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
for (int i = 0; i < size; i++) {
Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
JobHistory.Keys key = entry.getKey();
String value = entry.getValue();
switch (key) {
case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID.toString(), value); break;
case FINISH_TIME: job.put(JobKeys.FINISH_TIME.toString(), value); break;
case JOBID: job.put(JobKeys.JOBID.toString(), value); break;
case JOBNAME: job.put(JobKeys.JOBNAME.toString(), value); break;
case USER: job.put(JobKeys.USER.toString(), value); break;
case JOBCONF: job.put(JobKeys.JOBCONF.toString(), value); break;
case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME.toString(), value); break;
case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME.toString(), value); break;
case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS.toString(), value); break;
case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES.toString(), value); break;
case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS.toString(), value); break;
case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES.toString(), value); break;
case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS.toString(), value); break;
case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES.toString(), value); break;
case JOB_STATUS: job.put(JobKeys.STATUS.toString(), value); break;
case COUNTERS:
value.concat(",");
parseAndAddJobCounters(job, value);
break;
default:
LOG.debug("JobHistory.Keys."+ key + " : NOT INCLUDED IN LOADER RETURN VALUE");
break;
}
}
}