Java 类org.apache.hadoop.mapreduce.TaskID 实例源码
项目:hadoop
文件:TaskFailedEvent.java
public void setDatum(Object odatum) {
this.datum = (TaskFailed)odatum;
this.id =
TaskID.forName(datum.taskid.toString());
this.taskType =
TaskType.valueOf(datum.taskType.toString());
this.finishTime = datum.finishTime;
this.error = datum.error.toString();
this.failedDueToAttempt =
datum.failedDueToAttempt == null
? null
: TaskAttemptID.forName(
datum.failedDueToAttempt.toString());
this.status = datum.status.toString();
this.counters =
EventReader.fromAvro(datum.counters);
}
项目:circus-train
文件:DynamicInputFormat.java
private List<InputSplit> createSplits(JobContext jobContext, List<DynamicInputChunk> chunks) throws IOException {
int numMaps = getNumMapTasks(jobContext.getConfiguration());
final int nSplits = Math.min(numMaps, chunks.size());
List<InputSplit> splits = new ArrayList<>(nSplits);
for (int i = 0; i < nSplits; ++i) {
TaskID taskId = new TaskID(jobContext.getJobID(), TaskType.MAP, i);
chunks.get(i).assignTo(taskId);
splits.add(new FileSplit(chunks.get(i).getPath(), 0,
// Setting non-zero length for FileSplit size, to avoid a possible
// future when 0-sized file-splits are considered "empty" and skipped
// over.
getMinRecordsPerChunk(jobContext.getConfiguration()), null));
}
ConfigurationUtil.publish(jobContext.getConfiguration(), CONF_LABEL_NUM_SPLITS, splits.size());
return splits;
}
项目:hadoop
文件:TestEvents.java
/**
* test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testTaskAttemptFinishedEvent() throws Exception {
JobID jid = new JobID("001", 1);
TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3);
Counters counters = new Counters();
TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
counters);
assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());
assertEquals(test.getCounters(), counters);
assertEquals(test.getFinishTime(), 123L);
assertEquals(test.getHostname(), "HOSTNAME");
assertEquals(test.getRackName(), "RAKNAME");
assertEquals(test.getState(), "STATUS");
assertEquals(test.getTaskId(), tid);
assertEquals(test.getTaskStatus(), "TEST");
assertEquals(test.getTaskType(), TaskType.REDUCE);
}
项目:hadoop
文件:PartialFileOutputCommitter.java
@Override
public void cleanUpPartialOutputForTask(TaskAttemptContext context)
throws IOException {
// we double check this is never invoked from a non-preemptable subclass.
// This should never happen, since the invoking codes is checking it too,
// but it is safer to double check. Errors handling this would produce
// inconsistent output.
if (!this.getClass().isAnnotationPresent(Checkpointable.class)) {
throw new IllegalStateException("Invoking cleanUpPartialOutputForTask() " +
"from non @Preemptable class");
}
FileSystem fs =
fsFor(getTaskAttemptPath(context), context.getConfiguration());
LOG.info("cleanUpPartialOutputForTask: removing everything belonging to " +
context.getTaskAttemptID().getTaskID() + " in: " +
getCommittedTaskPath(context).getParent());
final TaskAttemptID taid = context.getTaskAttemptID();
final TaskID tid = taid.getTaskID();
Path pCommit = getCommittedTaskPath(context).getParent();
// remove any committed output
for (int i = 0; i < taid.getId(); ++i) {
TaskAttemptID oldId = new TaskAttemptID(tid, i);
Path pTask = new Path(pCommit, oldId.toString());
if (fs.exists(pTask) && !fs.delete(pTask, true)) {
throw new IOException("Failed to delete " + pTask);
}
}
}
项目:hadoop
文件:FileOutputFormat.java
/**
* Generate a unique filename, based on the task id, name, and extension
* @param context the task that is calling this
* @param name the base filename
* @param extension the filename extension
* @return a string like $name-[mrsct]-$id$extension
*/
public synchronized static String getUniqueFile(TaskAttemptContext context,
String name,
String extension) {
TaskID taskId = context.getTaskAttemptID().getTaskID();
int partition = taskId.getId();
StringBuilder result = new StringBuilder();
result.append(name);
result.append('-');
result.append(
TaskID.getRepresentingCharacter(taskId.getTaskType()));
result.append('-');
result.append(NUMBER_FORMAT.format(partition));
result.append(extension);
return result.toString();
}
项目:hadoop
文件:TaskFinishedEvent.java
public void setDatum(Object oDatum) {
this.datum = (TaskFinished)oDatum;
this.taskid = TaskID.forName(datum.taskid.toString());
if (datum.successfulAttemptId != null) {
this.successfulAttemptId = TaskAttemptID
.forName(datum.successfulAttemptId.toString());
}
this.finishTime = datum.finishTime;
this.taskType = TaskType.valueOf(datum.taskType.toString());
this.status = datum.status.toString();
this.counters = EventReader.fromAvro(datum.counters);
}
项目:hadoop
文件:HistoryViewer.java
private void printFailedAttempts(FilteredJob filteredJob) {
Map<String, Set<TaskID>> badNodes = filteredJob.getFilteredMap();
StringBuffer attempts = new StringBuffer();
if (badNodes.size() > 0) {
attempts.append("\n").append(filteredJob.getFilter());
attempts.append(" task attempts by nodes");
attempts.append("\nHostname\tFailedTasks");
attempts.append("\n===============================");
System.out.println(attempts.toString());
for (Map.Entry<String,
Set<TaskID>> entry : badNodes.entrySet()) {
String node = entry.getKey();
Set<TaskID> failedTasks = entry.getValue();
attempts.setLength(0);
attempts.append(node).append("\t");
for (TaskID t : failedTasks) {
attempts.append(t).append(", ");
}
System.out.println(attempts.toString());
}
}
}
项目:hadoop
文件:CompletedJob.java
private void loadAllTasks() {
if (tasksLoaded.get()) {
return;
}
tasksLock.lock();
try {
if (tasksLoaded.get()) {
return;
}
for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
TaskInfo taskInfo = entry.getValue();
Task task = new CompletedTask(yarnTaskID, taskInfo);
tasks.put(yarnTaskID, task);
if (task.getType() == TaskType.MAP) {
mapTasks.put(task.getID(), task);
} else if (task.getType() == TaskType.REDUCE) {
reduceTasks.put(task.getID(), task);
}
}
tasksLoaded.set(true);
} finally {
tasksLock.unlock();
}
}
项目:hadoop
文件:TestJobHistoryParsing.java
private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
int numSuccessfulMaps) {
if (numMaps == numSuccessfulMaps) {
return jobInfo.getFinishedMaps();
}
long numFinishedMaps = 0;
Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
.getAllTasks();
for (TaskInfo taskInfo : taskInfos.values()) {
if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
++numFinishedMaps;
}
}
return numFinishedMaps;
}
项目:hadoop
文件:TestCompletedTask.java
/**
* test some methods of CompletedTaskAttempt
*/
@Test (timeout=5000)
public void testCompletedTaskAttempt(){
TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
when(attemptInfo.getRackname()).thenReturn("Rackname");
when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
when(attemptInfo.getSortFinishTime()).thenReturn(12L);
when(attemptInfo.getShufflePort()).thenReturn(10);
JobID jobId= new JobID("12345",0);
TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
assertEquals( "Rackname", taskAttemt.getNodeRackName());
assertEquals( Phase.CLEANUP, taskAttemt.getPhase());
assertTrue( taskAttemt.isFinished());
assertEquals( 11L, taskAttemt.getShuffleFinishTime());
assertEquals( 12L, taskAttemt.getSortFinishTime());
assertEquals( 10, taskAttemt.getShufflePort());
}
项目:hadoop
文件:Task20LineHistoryEventEmitter.java
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String taskType = line.get("TASK_TYPE");
String startTime = line.get("START_TIME");
String splits = line.get("SPLITS");
if (startTime != null && taskType != null) {
Task20LineHistoryEventEmitter that =
(Task20LineHistoryEventEmitter) thatg;
that.originalStartTime = Long.parseLong(startTime);
that.originalTaskType =
Version20LogInterfaceUtils.get20TaskType(taskType);
return new TaskStartedEvent(taskID, that.originalStartTime,
that.originalTaskType, splits);
}
return null;
}
项目:hadoop
文件:Task20LineHistoryEventEmitter.java
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String finishTime = line.get("FINISH_TIME");
if (finishTime != null) {
return new TaskUpdatedEvent(taskID, Long.parseLong(finishTime));
}
return null;
}
项目:hadoop
文件:ZombieJob.java
/**
* Mask the job ID part in a {@link TaskAttemptID}.
*
* @param attemptId
* raw {@link TaskAttemptID} read from trace
* @return masked {@link TaskAttemptID} with empty {@link JobID}.
*/
private TaskAttemptID maskAttemptID(TaskAttemptID attemptId) {
JobID jobId = new JobID();
TaskType taskType = attemptId.getTaskType();
TaskID taskId = attemptId.getTaskID();
return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), taskType,
taskId.getId(), attemptId.getId());
}
项目:aliyun-oss-hadoop-fs
文件:TestEvents.java
/**
* test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testTaskAttemptFinishedEvent() throws Exception {
JobID jid = new JobID("001", 1);
TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3);
Counters counters = new Counters();
TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
counters);
assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());
assertEquals(test.getCounters(), counters);
assertEquals(test.getFinishTime(), 123L);
assertEquals(test.getHostname(), "HOSTNAME");
assertEquals(test.getRackName(), "RAKNAME");
assertEquals(test.getState(), "STATUS");
assertEquals(test.getTaskId(), tid);
assertEquals(test.getTaskStatus(), "TEST");
assertEquals(test.getTaskType(), TaskType.REDUCE);
}
项目:aliyun-oss-hadoop-fs
文件:FileOutputFormat.java
/**
* Generate a unique filename, based on the task id, name, and extension
* @param context the task that is calling this
* @param name the base filename
* @param extension the filename extension
* @return a string like $name-[mrsct]-$id$extension
*/
public synchronized static String getUniqueFile(TaskAttemptContext context,
String name,
String extension) {
TaskID taskId = context.getTaskAttemptID().getTaskID();
int partition = taskId.getId();
StringBuilder result = new StringBuilder();
result.append(name);
result.append('-');
result.append(
TaskID.getRepresentingCharacter(taskId.getTaskType()));
result.append('-');
result.append(NUMBER_FORMAT.format(partition));
result.append(extension);
return result.toString();
}
项目:big-c
文件:CompletedJob.java
private void loadAllTasks() {
if (tasksLoaded.get()) {
return;
}
tasksLock.lock();
try {
if (tasksLoaded.get()) {
return;
}
for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
TaskInfo taskInfo = entry.getValue();
Task task = new CompletedTask(yarnTaskID, taskInfo);
tasks.put(yarnTaskID, task);
if (task.getType() == TaskType.MAP) {
mapTasks.put(task.getID(), task);
} else if (task.getType() == TaskType.REDUCE) {
reduceTasks.put(task.getID(), task);
}
}
tasksLoaded.set(true);
} finally {
tasksLock.unlock();
}
}
项目:big-c
文件:TestCompletedTask.java
/**
* test some methods of CompletedTaskAttempt
*/
@Test (timeout=5000)
public void testCompletedTaskAttempt(){
TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
when(attemptInfo.getRackname()).thenReturn("Rackname");
when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
when(attemptInfo.getSortFinishTime()).thenReturn(12L);
when(attemptInfo.getShufflePort()).thenReturn(10);
JobID jobId= new JobID("12345",0);
TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
assertEquals( "Rackname", taskAttemt.getNodeRackName());
assertEquals( Phase.CLEANUP, taskAttemt.getPhase());
assertTrue( taskAttemt.isFinished());
assertEquals( 11L, taskAttemt.getShuffleFinishTime());
assertEquals( 12L, taskAttemt.getSortFinishTime());
assertEquals( 10, taskAttemt.getShufflePort());
}
项目:aliyun-oss-hadoop-fs
文件:TaskFailedEvent.java
public void setDatum(Object odatum) {
this.datum = (TaskFailed)odatum;
this.id =
TaskID.forName(datum.getTaskid().toString());
this.taskType =
TaskType.valueOf(datum.getTaskType().toString());
this.finishTime = datum.getFinishTime();
this.error = datum.getError().toString();
this.failedDueToAttempt =
datum.getFailedDueToAttempt() == null
? null
: TaskAttemptID.forName(
datum.getFailedDueToAttempt().toString());
this.status = datum.getStatus().toString();
this.counters =
EventReader.fromAvro(datum.getCounters());
}
项目:aliyun-oss-hadoop-fs
文件:CompletedJob.java
private void loadAllTasks() {
if (tasksLoaded.get()) {
return;
}
tasksLock.lock();
try {
if (tasksLoaded.get()) {
return;
}
for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
TaskInfo taskInfo = entry.getValue();
Task task = new CompletedTask(yarnTaskID, taskInfo);
tasks.put(yarnTaskID, task);
if (task.getType() == TaskType.MAP) {
mapTasks.put(task.getID(), task);
} else if (task.getType() == TaskType.REDUCE) {
reduceTasks.put(task.getID(), task);
}
}
tasksLoaded.set(true);
} finally {
tasksLock.unlock();
}
}
项目:aliyun-oss-hadoop-fs
文件:TestJobHistoryParsing.java
private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
int numSuccessfulMaps) {
if (numMaps == numSuccessfulMaps) {
return jobInfo.getFinishedMaps();
}
long numFinishedMaps = 0;
Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
.getAllTasks();
for (TaskInfo taskInfo : taskInfos.values()) {
if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
++numFinishedMaps;
}
}
return numFinishedMaps;
}
项目:aliyun-oss-hadoop-fs
文件:TestCompletedTask.java
/**
* test some methods of CompletedTaskAttempt
*/
@Test (timeout=5000)
public void testCompletedTaskAttempt(){
TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
when(attemptInfo.getRackname()).thenReturn("Rackname");
when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
when(attemptInfo.getSortFinishTime()).thenReturn(12L);
when(attemptInfo.getShufflePort()).thenReturn(10);
JobID jobId= new JobID("12345",0);
TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
assertEquals( "Rackname", taskAttemt.getNodeRackName());
assertEquals( Phase.CLEANUP, taskAttemt.getPhase());
assertTrue( taskAttemt.isFinished());
assertEquals( 11L, taskAttemt.getShuffleFinishTime());
assertEquals( 12L, taskAttemt.getSortFinishTime());
assertEquals( 10, taskAttemt.getShufflePort());
}
项目:aliyun-oss-hadoop-fs
文件:Task20LineHistoryEventEmitter.java
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String taskType = line.get("TASK_TYPE");
String startTime = line.get("START_TIME");
String splits = line.get("SPLITS");
if (startTime != null && taskType != null) {
Task20LineHistoryEventEmitter that =
(Task20LineHistoryEventEmitter) thatg;
that.originalStartTime = Long.parseLong(startTime);
that.originalTaskType =
Version20LogInterfaceUtils.get20TaskType(taskType);
return new TaskStartedEvent(taskID, that.originalStartTime,
that.originalTaskType, splits);
}
return null;
}
项目:aliyun-oss-hadoop-fs
文件:Task20LineHistoryEventEmitter.java
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String finishTime = line.get("FINISH_TIME");
if (finishTime != null) {
return new TaskUpdatedEvent(taskID, Long.parseLong(finishTime));
}
return null;
}
项目:big-c
文件:TestJobHistoryParsing.java
private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
int numSuccessfulMaps) {
if (numMaps == numSuccessfulMaps) {
return jobInfo.getFinishedMaps();
}
long numFinishedMaps = 0;
Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
.getAllTasks();
for (TaskInfo taskInfo : taskInfos.values()) {
if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
++numFinishedMaps;
}
}
return numFinishedMaps;
}
项目:big-c
文件:FileOutputFormat.java
/**
* Generate a unique filename, based on the task id, name, and extension
* @param context the task that is calling this
* @param name the base filename
* @param extension the filename extension
* @return a string like $name-[mrsct]-$id$extension
*/
public synchronized static String getUniqueFile(TaskAttemptContext context,
String name,
String extension) {
TaskID taskId = context.getTaskAttemptID().getTaskID();
int partition = taskId.getId();
StringBuilder result = new StringBuilder();
result.append(name);
result.append('-');
result.append(
TaskID.getRepresentingCharacter(taskId.getTaskType()));
result.append('-');
result.append(NUMBER_FORMAT.format(partition));
result.append(extension);
return result.toString();
}
项目:big-c
文件:Task20LineHistoryEventEmitter.java
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName,
HistoryEventEmitter thatg) {
if (taskIDName == null) {
return null;
}
TaskID taskID = TaskID.forName(taskIDName);
String finishTime = line.get("FINISH_TIME");
if (finishTime != null) {
return new TaskUpdatedEvent(taskID, Long.parseLong(finishTime));
}
return null;
}
项目:big-c
文件:HistoryViewer.java
private void printFailedAttempts(FilteredJob filteredJob) {
Map<String, Set<TaskID>> badNodes = filteredJob.getFilteredMap();
StringBuffer attempts = new StringBuffer();
if (badNodes.size() > 0) {
attempts.append("\n").append(filteredJob.getFilter());
attempts.append(" task attempts by nodes");
attempts.append("\nHostname\tFailedTasks");
attempts.append("\n===============================");
System.out.println(attempts.toString());
for (Map.Entry<String,
Set<TaskID>> entry : badNodes.entrySet()) {
String node = entry.getKey();
Set<TaskID> failedTasks = entry.getValue();
attempts.setLength(0);
attempts.append(node).append("\t");
for (TaskID t : failedTasks) {
attempts.append(t).append(", ");
}
System.out.println(attempts.toString());
}
}
}
项目:big-c
文件:TaskFailedEvent.java
public void setDatum(Object odatum) {
this.datum = (TaskFailed)odatum;
this.id =
TaskID.forName(datum.taskid.toString());
this.taskType =
TaskType.valueOf(datum.taskType.toString());
this.finishTime = datum.finishTime;
this.error = datum.error.toString();
this.failedDueToAttempt =
datum.failedDueToAttempt == null
? null
: TaskAttemptID.forName(
datum.failedDueToAttempt.toString());
this.status = datum.status.toString();
this.counters =
EventReader.fromAvro(datum.counters);
}
项目:circus-train
文件:DynamicInputChunk.java
/**
* Reassigns the chunk to a specified Map-Task, for consumption.
*
* @param taskId The Map-Task to which a the chunk is to be reassigned.
* @throws IOException Exception on failure to reassign.
*/
public void assignTo(TaskID taskId) throws IOException {
Path newPath = new Path(chunkRootPath, taskId.toString());
if (!fs.rename(chunkFilePath, newPath)) {
LOG.warn("{} could not be assigned to {}", chunkFilePath, taskId);
}
}
项目:hadoop
文件:TestEvents.java
/**
* simple test TaskUpdatedEvent and TaskUpdated
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testTaskUpdated() throws Exception {
JobID jid = new JobID("001", 1);
TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
TaskUpdatedEvent test = new TaskUpdatedEvent(tid, 1234L);
assertEquals(test.getTaskId().toString(), tid.toString());
assertEquals(test.getFinishTime(), 1234L);
}
项目:hadoop
文件:JobHistoryParser.java
/** Create a job info object where job information will be stored
* after a parse
*/
public JobInfo() {
submitTime = launchTime = finishTime = -1;
totalMaps = totalReduces = failedMaps = failedReduces = 0;
finishedMaps = finishedReduces = 0;
username = jobname = jobConfPath = jobQueueName = "";
tasksMap = new HashMap<TaskID, TaskInfo>();
completedTaskAttemptsMap = new HashMap<TaskAttemptID, TaskAttemptInfo>();
jobACLs = new HashMap<JobACL, AccessControlList>();
priority = JobPriority.NORMAL;
}
项目:hadoop
文件:TaskFinishedEvent.java
/**
* Create an event to record the successful completion of a task
* @param id Task ID
* @param attemptId Task Attempt ID of the successful attempt for this task
* @param finishTime Finish time of the task
* @param taskType Type of the task
* @param status Status string
* @param counters Counters for the task
*/
public TaskFinishedEvent(TaskID id, TaskAttemptID attemptId, long finishTime,
TaskType taskType,
String status, Counters counters) {
this.taskid = id;
this.successfulAttemptId = attemptId;
this.finishTime = finishTime;
this.taskType = taskType;
this.status = status;
this.counters = counters;
}
项目:hadoop
文件:TaskStartedEvent.java
/**
* Create an event to record start of a task
* @param id Task Id
* @param startTime Start time of the task
* @param taskType Type of the task
* @param splitLocations Split locations, applicable for map tasks
*/
public TaskStartedEvent(TaskID id, long startTime,
TaskType taskType, String splitLocations) {
datum.taskid = new Utf8(id.toString());
datum.splitLocations = new Utf8(splitLocations);
datum.startTime = startTime;
datum.taskType = new Utf8(taskType.name());
}
项目:hadoop
文件:HistoryViewer.java
private void printTasks(TaskType taskType, String status) {
Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
StringBuffer header = new StringBuffer();
header.append("\n").append(status).append(" ");
header.append(taskType).append(" task list for ").append(jobId);
header.append("\nTaskId\t\tStartTime\tFinishTime\tError");
if (TaskType.MAP.equals(taskType)) {
header.append("\tInputSplits");
}
header.append("\n====================================================");
StringBuffer taskList = new StringBuffer();
for (JobHistoryParser.TaskInfo task : tasks.values()) {
if (taskType.equals(task.getTaskType()) &&
(status.equals(task.getTaskStatus())
|| status.equalsIgnoreCase("ALL"))) {
taskList.setLength(0);
taskList.append(task.getTaskId());
taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, task.getStartTime(), 0));
taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, task.getFinishTime(),
task.getStartTime()));
taskList.append("\t").append(task.getError());
if (TaskType.MAP.equals(taskType)) {
taskList.append("\t").append(task.getSplitLocations());
}
if (taskList != null) {
System.out.println(header.toString());
System.out.println(taskList.toString());
}
}
}
}
项目:hadoop
文件:HistoryViewer.java
/** Apply the filter (status) on the parsed job and generate summary */
public FilteredJob(JobInfo job, String status) {
filter = status;
Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
for (JobHistoryParser.TaskInfo task : tasks.values()) {
Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
task.getAllTaskAttempts();
for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
if (attempt.getTaskStatus().equals(status)) {
String hostname = attempt.getHostname();
TaskID id = attempt.getAttemptId().getTaskID();
Set<TaskID> set = badNodesToFilteredTasks.get(hostname);
if (set == null) {
set = new TreeSet<TaskID>();
set.add(id);
badNodesToFilteredTasks.put(hostname, set);
}else{
set.add(id);
}
}
}
}
}
项目:hadoop
文件:TaskFailedEvent.java
/**
* Create an event to record task failure
* @param id Task ID
* @param finishTime Finish time of the task
* @param taskType Type of the task
* @param error Error String
* @param status Status
* @param failedDueToAttempt The attempt id due to which the task failed
* @param counters Counters for the task
*/
public TaskFailedEvent(TaskID id, long finishTime,
TaskType taskType, String error, String status,
TaskAttemptID failedDueToAttempt, Counters counters) {
this.id = id;
this.finishTime = finishTime;
this.taskType = taskType;
this.error = error;
this.status = status;
this.failedDueToAttempt = failedDueToAttempt;
this.counters = counters;
}
项目:hadoop
文件:ShuffleSchedulerImpl.java
public synchronized void tipFailed(TaskID taskId) {
if (!finishedMaps[taskId.getId()]) {
finishedMaps[taskId.getId()] = true;
if (--remainingMaps == 0) {
notifyAll();
}
updateStatus();
}
}
项目:hadoop
文件:TestJobInfo.java
@Test(timeout = 5000)
public void testTaskID() throws IOException, InterruptedException {
JobID jobid = new JobID("1014873536921", 6);
TaskID tid = new TaskID(jobid, TaskType.MAP, 0);
org.apache.hadoop.mapred.TaskID tid1 =
org.apache.hadoop.mapred.TaskID.downgrade(tid);
org.apache.hadoop.mapred.TaskReport treport =
new org.apache.hadoop.mapred.TaskReport(tid1, 0.0f,
State.FAILED.toString(), null, TIPStatus.FAILED, 100, 100,
new org.apache.hadoop.mapred.Counters());
Assert
.assertEquals(treport.getTaskId(), "task_1014873536921_0006_m_000000");
Assert.assertEquals(treport.getTaskID().toString(),
"task_1014873536921_0006_m_000000");
}
项目:hadoop
文件:TestShuffleScheduler.java
@SuppressWarnings("rawtypes")
@Test
public void testTipFailed() throws Exception {
JobConf job = new JobConf();
job.setNumMapTasks(2);
TaskStatus status = new TaskStatus() {
@Override
public boolean getIsMap() {
return false;
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
}
};
Progress progress = new Progress();
TaskAttemptID reduceId = new TaskAttemptID("314159", 0, TaskType.REDUCE,
0, 0);
ShuffleSchedulerImpl scheduler = new ShuffleSchedulerImpl(job, status,
reduceId, null, progress, null, null, null);
JobID jobId = new JobID();
TaskID taskId1 = new TaskID(jobId, TaskType.REDUCE, 1);
scheduler.tipFailed(taskId1);
Assert.assertEquals("Progress should be 0.5", 0.5f, progress.getProgress(),
0.0f);
Assert.assertFalse(scheduler.waitUntilDone(1));
TaskID taskId0 = new TaskID(jobId, TaskType.REDUCE, 0);
scheduler.tipFailed(taskId0);
Assert.assertEquals("Progress should be 1.0", 1.0f, progress.getProgress(),
0.0f);
Assert.assertTrue(scheduler.waitUntilDone(1));
}
项目:hadoop
文件:CompletedJob.java
@Override
public Task getTask(TaskId taskId) {
if (tasksLoaded.get()) {
return tasks.get(taskId);
} else {
TaskID oldTaskId = TypeConverter.fromYarn(taskId);
CompletedTask completedTask =
new CompletedTask(taskId, jobInfo.getAllTasks().get(oldTaskId));
return completedTask;
}
}