Java 类org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent 实例源码
项目:hadoop
文件:TaskImpl.java
@Override
public void handle(TaskEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing " + event.getTaskID() + " of type "
+ event.getType());
}
try {
writeLock.lock();
TaskStateInternal oldState = getInternalState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitonException e) {
LOG.error("Can't handle this event at current state for "
+ this.taskId, e);
internalError(event.getType());
}
if (oldState != getInternalState()) {
LOG.info(taskId + " Task Transitioned from " + oldState + " to "
+ getInternalState());
}
} finally {
writeLock.unlock();
}
}
项目:hadoop
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
// The nextAttemptNumber is commit pending, decide on set the commitAttempt
TaskAttemptId attemptID = ev.getTaskAttemptID();
if (task.commitAttempt == null) {
// TODO: validate attemptID
task.commitAttempt = attemptID;
LOG.info(attemptID + " given a go for committing the task output.");
} else {
// Don't think this can be a pluggable decision, so simply raise an
// event for the TaskAttempt to delete its output.
LOG.info(task.commitAttempt
+ " already given a go for committing the task output, so killing "
+ attemptID);
task.eventHandler.handle(new TaskAttemptKillEvent(attemptID,
SPECULATION + task.commitAttempt + " committed first!"));
}
}
项目:hadoop
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.SUCCEEDED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
task.successfulAttempt = taskAttemptId;
task.sendTaskSucceededEvents();
for (TaskAttempt attempt : task.attempts.values()) {
if (attempt.getID() != task.successfulAttempt &&
// This is okay because it can only talk us out of sending a
// TA_KILL message to an attempt that doesn't need one for
// other reasons.
!attempt.isFinished()) {
LOG.info("Issuing kill to other attempt " + attempt.getID());
task.eventHandler.handle(new TaskAttemptKillEvent(attempt.getID(),
SPECULATION + task.successfulAttempt + " succeeded first!"));
}
}
task.finished(TaskStateInternal.SUCCEEDED);
}
项目:hadoop
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.KILLED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
if (task.successfulAttempt == null) {
task.addAndScheduleAttempt(Avataar.VIRGIN);
}
if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) {
task.commitAttempt = null;
}
}
项目:hadoop
文件:TaskImpl.java
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
task.finishedAttempts.add(taskAttemptId);
// check whether all attempts are finished
if (task.finishedAttempts.size() == task.attempts.size()) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
finalState, null); // TODO JH verify failedAttempt null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
} else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(
new JobTaskEvent(task.taskId, getExternalState(finalState)));
return finalState;
}
return task.getInternalState();
}
项目:hadoop
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
}else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(new JobTaskEvent(task.taskId,
getExternalState(TaskStateInternal.KILLED)));
task.metrics.endWaitingTask(task);
}
项目:hadoop
文件:MRClientService.java
@SuppressWarnings("unchecked")
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
TaskId taskId = request.getTaskId();
UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
String message = "Kill task " + taskId + " received from " + callerUGI
+ " at " + Server.getRemoteAddress();
LOG.info(message);
verifyAndGetTask(taskId, JobACL.MODIFY_JOB);
appContext.getEventHandler().handle(
new TaskEvent(taskId, TaskEventType.T_KILL));
KillTaskResponse response =
recordFactory.newRecordInstance(KillTaskResponse.class);
return response;
}
项目:hadoop
文件:TestMRApp.java
@Test
public void testJobError() throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
//send an invalid event on task at current state
app.getContext().getEventHandler().handle(
new TaskEvent(
task.getID(), TaskEventType.T_SCHEDULE));
//this must lead to job error
app.waitForState(job, JobState.ERROR);
}
项目:aliyun-oss-hadoop-fs
文件:TaskImpl.java
@Override
public void handle(TaskEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing " + event.getTaskID() + " of type "
+ event.getType());
}
try {
writeLock.lock();
TaskStateInternal oldState = getInternalState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitionException e) {
LOG.error("Can't handle this event at current state for "
+ this.taskId, e);
internalError(event.getType());
}
if (oldState != getInternalState()) {
LOG.info(taskId + " Task Transitioned from " + oldState + " to "
+ getInternalState());
}
} finally {
writeLock.unlock();
}
}
项目:aliyun-oss-hadoop-fs
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
// The nextAttemptNumber is commit pending, decide on set the commitAttempt
TaskAttemptId attemptID = ev.getTaskAttemptID();
if (task.commitAttempt == null) {
// TODO: validate attemptID
task.commitAttempt = attemptID;
LOG.info(attemptID + " given a go for committing the task output.");
} else {
// Don't think this can be a pluggable decision, so simply raise an
// event for the TaskAttempt to delete its output.
LOG.info(task.commitAttempt
+ " already given a go for committing the task output, so killing "
+ attemptID);
task.eventHandler.handle(new TaskAttemptKillEvent(attemptID,
SPECULATION + task.commitAttempt + " committed first!"));
}
}
项目:aliyun-oss-hadoop-fs
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.SUCCEEDED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
task.successfulAttempt = taskAttemptId;
task.sendTaskSucceededEvents();
for (TaskAttempt attempt : task.attempts.values()) {
if (attempt.getID() != task.successfulAttempt &&
// This is okay because it can only talk us out of sending a
// TA_KILL message to an attempt that doesn't need one for
// other reasons.
!attempt.isFinished()) {
LOG.info("Issuing kill to other attempt " + attempt.getID());
task.eventHandler.handle(new TaskAttemptKillEvent(attempt.getID(),
SPECULATION + task.successfulAttempt + " succeeded first!"));
}
}
task.finished(TaskStateInternal.SUCCEEDED);
}
项目:aliyun-oss-hadoop-fs
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.KILLED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
if (task.successfulAttempt == null) {
task.addAndScheduleAttempt(Avataar.VIRGIN);
}
if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) {
task.commitAttempt = null;
}
}
项目:aliyun-oss-hadoop-fs
文件:TaskImpl.java
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
task.finishedAttempts.add(taskAttemptId);
// check whether all attempts are finished
if (task.finishedAttempts.size() == task.attempts.size()) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
finalState, null); // TODO JH verify failedAttempt null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
} else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(
new JobTaskEvent(task.taskId, getExternalState(finalState)));
return finalState;
}
return task.getInternalState();
}
项目:aliyun-oss-hadoop-fs
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
}else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(new JobTaskEvent(task.taskId,
getExternalState(TaskStateInternal.KILLED)));
task.metrics.endWaitingTask(task);
}
项目:aliyun-oss-hadoop-fs
文件:MRClientService.java
@SuppressWarnings("unchecked")
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
TaskId taskId = request.getTaskId();
UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
String message = "Kill task " + taskId + " received from " + callerUGI
+ " at " + Server.getRemoteAddress();
LOG.info(message);
verifyAndGetTask(taskId, JobACL.MODIFY_JOB);
appContext.getEventHandler().handle(
new TaskEvent(taskId, TaskEventType.T_KILL));
KillTaskResponse response =
recordFactory.newRecordInstance(KillTaskResponse.class);
return response;
}
项目:aliyun-oss-hadoop-fs
文件:TestMRApp.java
@Test
public void testJobError() throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
//send an invalid event on task at current state
app.getContext().getEventHandler().handle(
new TaskEvent(
task.getID(), TaskEventType.T_SCHEDULE));
//this must lead to job error
app.waitForState(job, JobState.ERROR);
}
项目:big-c
文件:TaskImpl.java
@Override
public void handle(TaskEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing " + event.getTaskID() + " of type "
+ event.getType());
}
try {
writeLock.lock();
TaskStateInternal oldState = getInternalState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitonException e) {
LOG.error("Can't handle this event at current state for "
+ this.taskId, e);
internalError(event.getType());
}
if (oldState != getInternalState()) {
LOG.info(taskId + " Task Transitioned from " + oldState + " to "
+ getInternalState());
}
} finally {
writeLock.unlock();
}
}
项目:big-c
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
// The nextAttemptNumber is commit pending, decide on set the commitAttempt
TaskAttemptId attemptID = ev.getTaskAttemptID();
if (task.commitAttempt == null) {
// TODO: validate attemptID
task.commitAttempt = attemptID;
LOG.info(attemptID + " given a go for committing the task output.");
} else {
// Don't think this can be a pluggable decision, so simply raise an
// event for the TaskAttempt to delete its output.
LOG.info(task.commitAttempt
+ " already given a go for committing the task output, so killing "
+ attemptID);
task.eventHandler.handle(new TaskAttemptKillEvent(attemptID,
SPECULATION + task.commitAttempt + " committed first!"));
}
}
项目:big-c
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.SUCCEEDED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
task.successfulAttempt = taskAttemptId;
task.sendTaskSucceededEvents();
for (TaskAttempt attempt : task.attempts.values()) {
if (attempt.getID() != task.successfulAttempt &&
// This is okay because it can only talk us out of sending a
// TA_KILL message to an attempt that doesn't need one for
// other reasons.
!attempt.isFinished()) {
LOG.info("Issuing kill to other attempt " + attempt.getID());
task.eventHandler.handle(new TaskAttemptKillEvent(attempt.getID(),
SPECULATION + task.successfulAttempt + " succeeded first!"));
}
}
task.finished(TaskStateInternal.SUCCEEDED);
}
项目:big-c
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.KILLED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
if (task.successfulAttempt == null) {
task.addAndScheduleAttempt(Avataar.VIRGIN);
}
if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) {
task.commitAttempt = null;
}
}
项目:big-c
文件:TaskImpl.java
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
task.finishedAttempts.add(taskAttemptId);
// check whether all attempts are finished
if (task.finishedAttempts.size() == task.attempts.size()) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
finalState, null); // TODO JH verify failedAttempt null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
} else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(
new JobTaskEvent(task.taskId, getExternalState(finalState)));
return finalState;
}
return task.getInternalState();
}
项目:big-c
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
}else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(new JobTaskEvent(task.taskId,
getExternalState(TaskStateInternal.KILLED)));
task.metrics.endWaitingTask(task);
}
项目:big-c
文件:MRClientService.java
@SuppressWarnings("unchecked")
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
TaskId taskId = request.getTaskId();
UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
String message = "Kill task " + taskId + " received from " + callerUGI
+ " at " + Server.getRemoteAddress();
LOG.info(message);
verifyAndGetTask(taskId, JobACL.MODIFY_JOB);
appContext.getEventHandler().handle(
new TaskEvent(taskId, TaskEventType.T_KILL));
KillTaskResponse response =
recordFactory.newRecordInstance(KillTaskResponse.class);
return response;
}
项目:big-c
文件:TestMRApp.java
@Test
public void testJobError() throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
//send an invalid event on task at current state
app.getContext().getEventHandler().handle(
new TaskEvent(
task.getID(), TaskEventType.T_SCHEDULE));
//this must lead to job error
app.waitForState(job, JobState.ERROR);
}
项目:hops
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.KILLED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
if (task.successfulAttempt == null) {
boolean rescheduleNewAttempt = false;
if (event instanceof TaskTAttemptKilledEvent) {
rescheduleNewAttempt =
((TaskTAttemptKilledEvent)event).getRescheduleAttempt();
}
task.addAndScheduleAttempt(Avataar.VIRGIN, rescheduleNewAttempt);
}
if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) {
task.commitAttempt = null;
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TaskImpl.java
@Override
public void handle(TaskEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing " + event.getTaskID() + " of type "
+ event.getType());
}
try {
writeLock.lock();
TaskStateInternal oldState = getInternalState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitonException e) {
LOG.error("Can't handle this event at current state for "
+ this.taskId, e);
internalError(event.getType());
}
if (oldState != getInternalState()) {
LOG.info(taskId + " Task Transitioned from " + oldState + " to "
+ getInternalState());
}
} finally {
writeLock.unlock();
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
// The nextAttemptNumber is commit pending, decide on set the commitAttempt
TaskAttemptId attemptID = ev.getTaskAttemptID();
if (task.commitAttempt == null) {
// TODO: validate attemptID
task.commitAttempt = attemptID;
LOG.info(attemptID + " given a go for committing the task output.");
} else {
// Don't think this can be a pluggable decision, so simply raise an
// event for the TaskAttempt to delete its output.
LOG.info(task.commitAttempt
+ " already given a go for committing the task output, so killing "
+ attemptID);
task.eventHandler.handle(new TaskAttemptKillEvent(attemptID,
SPECULATION + task.commitAttempt + " committed first!"));
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.SUCCEEDED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
task.successfulAttempt = taskAttemptId;
task.sendTaskSucceededEvents();
for (TaskAttempt attempt : task.attempts.values()) {
if (attempt.getID() != task.successfulAttempt &&
// This is okay because it can only talk us out of sending a
// TA_KILL message to an attempt that doesn't need one for
// other reasons.
!attempt.isFinished()) {
LOG.info("Issuing kill to other attempt " + attempt.getID());
task.eventHandler.handle(new TaskAttemptKillEvent(attempt.getID(),
SPECULATION + task.successfulAttempt + " succeeded first!"));
}
}
task.finished(TaskStateInternal.SUCCEEDED);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.KILLED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
if (task.successfulAttempt == null) {
task.addAndScheduleAttempt(Avataar.VIRGIN);
}
if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) {
task.commitAttempt = null;
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TaskImpl.java
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
task.finishedAttempts.add(taskAttemptId);
// check whether all attempts are finished
if (task.finishedAttempts.size() == task.attempts.size()) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
finalState, null); // TODO JH verify failedAttempt null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
} else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(
new JobTaskEvent(task.taskId, getExternalState(finalState)));
return finalState;
}
return task.getInternalState();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
}else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(new JobTaskEvent(task.taskId,
getExternalState(TaskStateInternal.KILLED)));
task.metrics.endWaitingTask(task);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:MRClientService.java
@SuppressWarnings("unchecked")
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
TaskId taskId = request.getTaskId();
UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
String message = "Kill task " + taskId + " received from " + callerUGI
+ " at " + Server.getRemoteAddress();
LOG.info(message);
verifyAndGetTask(taskId, JobACL.MODIFY_JOB);
appContext.getEventHandler().handle(
new TaskEvent(taskId, TaskEventType.T_KILL));
KillTaskResponse response =
recordFactory.newRecordInstance(KillTaskResponse.class);
return response;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestMRApp.java
@Test
public void testJobError() throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
//send an invalid event on task at current state
app.getContext().getEventHandler().handle(
new TaskEvent(
task.getID(), TaskEventType.T_SCHEDULE));
//this must lead to job error
app.waitForState(job, JobState.ERROR);
}
项目:hadoop-plus
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.SUCCEEDED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
task.successfulAttempt = taskAttemptId;
task.sendTaskSucceededEvents();
for (TaskAttempt attempt : task.attempts.values()) {
if (attempt.getID() != task.successfulAttempt &&
// This is okay because it can only talk us out of sending a
// TA_KILL message to an attempt that doesn't need one for
// other reasons.
!attempt.isFinished()) {
LOG.info("Issuing kill to other attempt " + attempt.getID());
task.eventHandler.handle(
new TaskAttemptEvent(attempt.getID(),
TaskAttemptEventType.TA_KILL));
}
}
task.finished(TaskStateInternal.SUCCEEDED);
}
项目:hops
文件:MRClientService.java
@SuppressWarnings("unchecked")
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
TaskId taskId = request.getTaskId();
UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
String message = "Kill task " + taskId + " received from " + callerUGI
+ " at " + Server.getRemoteAddress();
LOG.info(message);
verifyAndGetTask(taskId, JobACL.MODIFY_JOB);
appContext.getEventHandler().handle(
new TaskEvent(taskId, TaskEventType.T_KILL));
KillTaskResponse response =
recordFactory.newRecordInstance(KillTaskResponse.class);
return response;
}
项目:hadoop-plus
文件:TaskImpl.java
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
task.finishedAttempts.add(taskAttemptId);
// check whether all attempts are finished
if (task.finishedAttempts.size() == task.attempts.size()) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
finalState, null); // TODO JH verify failedAttempt null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
} else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(
new JobTaskEvent(task.taskId, getExternalState(finalState)));
return finalState;
}
return task.getInternalState();
}
项目:hadoop-plus
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
}else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(new JobTaskEvent(task.taskId,
getExternalState(TaskStateInternal.KILLED)));
task.metrics.endWaitingTask(task);
}
项目:hadoop-plus
文件:TestMRApp.java
@Test
public void testJobError() throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
//send an invalid event on task at current state
app.getContext().getEventHandler().handle(
new TaskEvent(
task.getID(), TaskEventType.T_SCHEDULE));
//this must lead to job error
app.waitForState(job, JobState.ERROR);
}
项目:hops
文件:TaskImpl.java
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
task.finishedAttempts.add(taskAttemptId);
// check whether all attempts are finished
if (task.finishedAttempts.size() == task.attempts.size()) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
finalState, null); // TODO JH verify failedAttempt null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
} else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(
new JobTaskEvent(task.taskId, getExternalState(finalState)));
return finalState;
}
return task.getInternalState();
}
项目:hops
文件:TaskImpl.java
@Override
public void transition(TaskImpl task, TaskEvent event) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
}else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(new JobTaskEvent(task.taskId,
getExternalState(TaskStateInternal.KILLED)));
task.metrics.endWaitingTask(task);
}