本文整理汇总了Java中org.apache.hadoop.mapred.TaskStatus.Phase类的典型用法代码示例。如果您正苦于以下问题:Java Phase类的具体用法?Java Phase怎么用?Java Phase使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Phase类属于org.apache.hadoop.mapred.TaskStatus包,在下文中一共展示了Phase类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: TaskInProgress
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) {
this.task = task;
this.launcher = launcher;
this.lastProgressReport = System.currentTimeMillis();
this.ttConf = conf;
localJobConf = null;
taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(),
0.0f,
task.getNumSlotsRequired(),
task.getState(),
diagnosticInfo.toString(),
"initializing",
getName(),
task.isTaskCleanupTask() ?
TaskStatus.Phase.CLEANUP :
task.isMapTask()? TaskStatus.Phase.MAP:
TaskStatus.Phase.SHUFFLE,
task.getCounters());
taskTimeout = (10 * 60 * 1000);
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:21,代码来源:TaskTracker.java
示例2: setTaskFailState
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
private void setTaskFailState(boolean wasFailure) {
// go FAILED_UNCLEAN -> FAILED and KILLED_UNCLEAN -> KILLED always
if (taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) {
taskStatus.setRunState(TaskStatus.State.FAILED);
} else if (taskStatus.getRunState() ==
TaskStatus.State.KILLED_UNCLEAN) {
taskStatus.setRunState(TaskStatus.State.KILLED);
} else if (task.isMapOrReduce() &&
taskStatus.getPhase() != TaskStatus.Phase.CLEANUP) {
if (wasFailure) {
taskStatus.setRunState(TaskStatus.State.FAILED_UNCLEAN);
} else {
taskStatus.setRunState(TaskStatus.State.KILLED_UNCLEAN);
}
} else {
if (wasFailure) {
taskStatus.setRunState(TaskStatus.State.FAILED);
} else {
taskStatus.setRunState(TaskStatus.State.KILLED);
}
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:23,代码来源:TaskTracker.java
示例3: processingRate
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public void processingRate(TaskAttemptID taskId, Task.Counter counterName,
long counterValue, float progress, Phase p) {
TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId);
Counters counters = tip.getCounters();
if(tip.isMapTask()) {
assert p == Phase.MAP : "Map task but phase is " + p;
} else {
assert ((p != Phase.SHUFFLE) &&
(p != Phase.SORT) &&
(p != Phase.REDUCE)) : "Reduce task, but phase is " + p;
}
TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId,
progress, 1, TaskStatus.State.RUNNING, "", "",
tip.machineWhereTaskRan(taskId), p , counters);
//need to keep the time
TaskStatus oldStatus = tip.getTaskStatus(taskId);
status.setStartTime(oldStatus.getStartTime());
if(!tip.isMapTask()) {
status.setShuffleFinishTime(oldStatus.getShuffleFinishTime());
status.setSortFinishTime(oldStatus.getSortFinishTime());
}
tip.getCounters().findCounter(counterName).setValue(counterValue);
updateTaskStatus(tip, status);
LOG.info(tip.getCounters().toString());
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:FakeObjectUtilities.java
示例4: TaskInProgress
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) {
this.task = task;
this.launcher = launcher;
this.lastProgressReport = System.currentTimeMillis();
this.defaultJobConf = conf;
localJobConf = null;
taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(),
0.0f,
task.getState(),
diagnosticInfo.toString(),
"initializing",
getName(),
task.isTaskCleanupTask() ?
TaskStatus.Phase.CLEANUP :
task.isMapTask()? TaskStatus.Phase.MAP:
TaskStatus.Phase.SHUFFLE,
task.getCounters());
taskTimeout = (10 * 60 * 1000);
}
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:20,代码来源:TaskTracker.java
示例5: TaskInProgress
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) {
this.task = task;
this.launcher = launcher;
this.lastProgressReport = System.currentTimeMillis();
this.defaultJobConf = conf;
localJobConf = null;
taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(),
0.0f,
task.getState(),
diagnosticInfo.toString(),
"initializing",
getName(),
task.isTaskCleanupTask() ?
TaskStatus.Phase.CLEANUP :
task.isMapTask()? TaskStatus.Phase.MAP:
TaskStatus.Phase.SHUFFLE,
task.runOnGPU(),
task.getCounters());
taskTimeout = (10 * 60 * 1000);
}
开发者ID:koichi626,项目名称:hadoop-gpu,代码行数:21,代码来源:TaskTracker.java
示例6: reducesInShuffle
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
private List <FetchStatus> reducesInShuffle() {
List <FetchStatus> fList = new ArrayList<FetchStatus>();
for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) {
RunningJob rjob = item.getValue();
if (!rjob.localized) {
continue;
}
JobID jobId = item.getKey();
FetchStatus f;
synchronized (rjob) {
f = rjob.getFetchStatus();
for (TaskInProgress tip : rjob.tasks) {
Task task = tip.getTask();
if (!task.isMapTask()) {
if (((ReduceTask)task).getPhase() ==
TaskStatus.Phase.SHUFFLE) {
if (rjob.getFetchStatus() == null) {
//this is a new job; we start fetching its map events
f = new FetchStatus(jobId,
((ReduceTask)task).getNumMaps());
rjob.setFetchStatus(f);
}
f = rjob.getFetchStatus();
fList.add(f);
break; //no need to check any more tasks belonging to this
}
}
}
}
}
//at this point, we have information about for which of
//the running jobs do we need to query the jobtracker for map
//outputs (actually map events).
return fList;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:TaskTracker.java
示例7: finishTask
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public void finishTask(TaskAttemptID taskId) {
TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId);
TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId,
1.0f, 1, TaskStatus.State.SUCCEEDED, "", "",
tip.machineWhereTaskRan(taskId),
tip.isMapTask() ? Phase.MAP : Phase.REDUCE, new Counters());
updateTaskStatus(tip, status);
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:9,代码来源:FakeObjectUtilities.java
示例8: makeRunning
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
private void makeRunning(TaskAttemptID taskId, TaskInProgress tip,
String taskTracker) {
addRunningTaskToTIP(tip, taskId, new TaskTrackerStatus(taskTracker,
JobInProgress.convertTrackerNameToHostName(taskTracker)), true);
TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId,
0.0f, 1, TaskStatus.State.RUNNING, "", "", taskTracker,
tip.isMapTask() ? Phase.MAP : Phase.REDUCE, new Counters());
updateTaskStatus(tip, status);
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:10,代码来源:FakeObjectUtilities.java
示例9: progressMade
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public void progressMade(TaskAttemptID taskId, float progress) {
TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId);
TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId,
progress, 1, TaskStatus.State.RUNNING, "", "",
tip.machineWhereTaskRan(taskId),
tip.isMapTask() ? Phase.MAP : Phase.REDUCE, new Counters());
updateTaskStatus(tip, status);
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:9,代码来源:FakeObjectUtilities.java
示例10: getRate
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public double getRate(Phase p) {
if (p == Phase.MAP) {
return this.mapRate;
} else if (p == Phase.SHUFFLE) {
return this.copyRate;
} else if (p == Phase.SORT) {
return this.sortRate;
} else if (p == Phase.REDUCE) {
return this.reduceRate;
} else {
throw new RuntimeException("Invalid phase " + p);
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:14,代码来源:TaskInProgress.java
示例11: getProcessingRate
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
/**
* Get the processing rate for this task (e.g. bytes/ms in reduce)
*/
public double getProcessingRate(TaskStatus.Phase phase) {
// we don't have processing rate information for the starting and cleaning
// up phase
if (phase != TaskStatus.Phase.MAP &&
phase != TaskStatus.Phase.SHUFFLE &&
phase != TaskStatus.Phase.SORT &&
phase != TaskStatus.Phase.REDUCE) {
return 0;
}
return processingRates.getRate(getProcessingPhase());
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:TaskInProgress.java
示例12: canBeSpeculatedUsingProcessingRate
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
/**
* For the map task, using the bytes processed/sec as the processing rate
* For the reduce task, using different rate for different phase:
* copy: using the bytes copied/sec as the processing rate
* sort: using the accumulated progress rate as the processing rate
* reduce: using the the bytes processed/sec as the processing rate
* @param currentTime
* @return
*/
boolean canBeSpeculatedUsingProcessingRate(long currentTime) {
TaskStatus.Phase p = getProcessingPhase();
// check if the task is on one of following four phases
if ((p != TaskStatus.Phase.MAP) &&
(p != TaskStatus.Phase.SHUFFLE) &&
(p != TaskStatus.Phase.SORT) &&
(p != TaskStatus.Phase.REDUCE)) {
return false;
}
DataStatistics taskStats = job.getRunningTaskStatistics(p);
if (LOG.isDebugEnabled()) {
LOG.debug("TaskID: " + this.id + "processing phase is " + p +
" and processing rate for this phase is " +
getProcessingRate(p));
}
// Find if task should be speculated based on standard deviation
// the max difference allowed between the tasks's progress rate
// and the mean progress rate of sibling tasks.
double maxDiff = (taskStats.std() == 0 ?
taskStats.mean()/3 :
job.getSlowTaskThreshold() * taskStats.std());
// if stddev > mean - we are stuck. cap the max difference at a
// more meaningful number.
maxDiff = Math.min(maxDiff, taskStats.mean() * job.getStddevMeanRatioMax());
return (taskStats.mean() - processingRates.getRate(p) > maxDiff);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:41,代码来源:TaskInProgress.java
示例13: makeRunning
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
private void makeRunning(TaskAttemptID taskId, TaskInProgress tip,
String taskTracker, long startTime) {
Phase phase = tip.isMapTask() ? Phase.MAP : Phase.REDUCE;
addRunningTaskToTIP(tip, taskId, new TaskTrackerStatus(taskTracker,
JobInProgress.convertTrackerNameToHostName(taskTracker)), true);
TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId,
0.0f, 1, TaskStatus.State.RUNNING, "", "", taskTracker,
phase, new Counters());
status.setStartTime(startTime);
updateTaskStatus(tip, status);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:13,代码来源:FakeObjectUtilities.java
示例14: failTask
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public void failTask(TaskAttemptID taskId) {
TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId);
TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId,
1.0f, 1, TaskStatus.State.FAILED, "", "", tip
.machineWhereTaskRan(taskId), tip.isMapTask() ? Phase.MAP
: Phase.REDUCE, new Counters());
updateTaskStatus(tip, status);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:FakeObjectUtilities.java
示例15: killTask
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public void killTask(TaskAttemptID taskId) {
TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId);
TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId,
1.0f, 1, TaskStatus.State.KILLED, "", "", tip
.machineWhereTaskRan(taskId), tip.isMapTask() ? Phase.MAP
: Phase.REDUCE, new Counters());
updateTaskStatus(tip, status);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:FakeObjectUtilities.java
示例16: parseMap
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public void parseMap(Map<String, Object> trackerInfo) {
active = (Boolean) trackerInfo.get("active");
lastSeen = (Long) trackerInfo.get("last_seen");
maxMapTasks = ((Long) trackerInfo.get("map_tasks_max")).intValue();
maxReduceTasks = ((Long) trackerInfo.get("reduce_tasks_max")).intValue();
Object[] tasks = (Object[]) trackerInfo.get("tasks");
for (Object task : tasks) {
Map<String, Object> taskMap = (Map<String, Object>) task;
int jobId = ((Long) taskMap.get("job_id")).intValue();
int taskId = ((Long) taskMap.get("task_id")).intValue();
int attempt = ((Long) taskMap.get("attempt")).intValue();
boolean map = taskMap.get("type").equals("map");
double taskProgress = (Double) taskMap.get("progress");
long startTime = (Long) taskMap.get("start_time");
long runningTime = (Long) taskMap.get("running_time");
TaskStatus.State taskState =
TaskStatus.State.valueOf(taskMap.get("state").toString());
TaskStatus.Phase taskPhase =
TaskStatus.Phase.valueOf(taskMap.get("phase").toString());
TaskInfo taskInfo = new TaskInfo(jobId, taskId, attempt, map,
startTime, runningTime, taskProgress, taskPhase, taskState);
if (map &&
taskState == TaskStatus.State.SUCCEEDED ||
taskState == TaskStatus.State.RUNNING) {
totalMapTasks++;
}
localTasksInfo.add(taskInfo);
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:35,代码来源:TaskTrackerLoadInfo.java
示例17: TaskInfo
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public TaskInfo(int jobId, int taskId, int attempt, boolean map,
long startTime, long runningTime, double taskProgress,
Phase taskPhase, State taskState) {
this.jobId = jobId;
this.taskId = taskId;
this.attempt = attempt;
this.map = map;
this.startTime = startTime;
this.taskProgress = taskProgress;
this.taskPhase = taskPhase;
this.taskState = taskState;
this.runningTime = runningTime;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:TaskTrackerLoadInfo.java
示例18: handleKillTaskAction
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
/**
* Kills a task attempt.
*
* @param action contains the task attempt to kill
* @param now current simulation time
* @return new events generated in response, empty
*/
private List<SimulatorEvent> handleKillTaskAction(KillTaskAction action, long now) {
TaskAttemptID taskId = action.getTaskID();
// we don't have a nice(r) toString() in Hadoop's TaskActions
if (LOG.isDebugEnabled()) {
LOG.debug("Handling kill task action, taskId=" + taskId + ", now=" + now);
}
SimulatorTaskInProgress tip = tasks.get(taskId);
// Safety check: We might get a KillTaskAction even for completed reduces
if (tip == null) {
return SimulatorEngine.EMPTY_EVENTS;
}
progressTaskStatus(tip, now); // make progress up to date
TaskStatus finalStatus = (TaskStatus)tip.getTaskStatus().clone();
finalStatus.setFinishTime(now);
finalStatus.setRunState(State.KILLED);
finishRunningTask(finalStatus, now);
if (finalStatus.getIsMap() || finalStatus.getPhase() == Phase.REDUCE) {
// if we have already created a task attempt completion event we remember
// the task id, so that we can safely ignore the event when its delivered
orphanTaskCompletions.add(taskId);
}
return SimulatorEngine.EMPTY_EVENTS;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:35,代码来源:SimulatorTaskTracker.java
示例19: handleAllMapsCompletedTaskAction
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
/**
* Starts "running" the REDUCE phase of reduce upon being notified that
* all map tasks are (successfully) done.
*
* @param action contains the notification for one of the reduce tasks
* @param now current simulation time
* @return new events generated, a single TaskAttemptCompletionEvent for the
* reduce
*/
private List<SimulatorEvent> handleAllMapsCompletedTaskAction(
AllMapsCompletedTaskAction action, long now) {
if (LOG.isDebugEnabled()) {
LOG.debug("Handling all maps completed task action " + action);
}
TaskAttemptID taskId = action.getTaskID();
SimulatorTaskInProgress tip = tasks.get(taskId);
// If tip is null here it is because the task attempt to be notified is
// unknown to this TaskTracker.
TaskStatus status = tip.getTaskStatus();
if (status.getIsMap()) {
throw new IllegalStateException(
"Invalid AllMapsCompletedTaskAction, task attempt "
+ "to be notified is a map: " + taskId + " " + status);
}
if (status.getPhase() != Phase.SHUFFLE) {
throw new IllegalArgumentException(
"Reducer task attempt already notified: " + taskId + " " + status);
}
// Warning: setPhase() uses System.currentTimeMillis() internally to
// set shuffle and sort times, but we overwrite that manually anyway
status.setPhase(Phase.REDUCE);
status.setShuffleFinishTime(now);
status.setSortFinishTime(now);
// Forecast the completion of this reduce
TaskAttemptCompletionEvent e = createTaskAttemptCompletionEvent(tip, now);
return Collections.<SimulatorEvent>singletonList(e);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:41,代码来源:SimulatorTaskTracker.java
示例20: expectMapTask
import org.apache.hadoop.mapred.TaskStatus.Phase; //导入依赖的package包/类
public void expectMapTask(SimulatorTaskTracker taskTracker,
TaskAttemptID taskId,
long mapStart, long mapRuntime) {
long mapDone = mapStart + mapRuntime;
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
MapTaskStatus status = new MapTaskStatus(taskIdOldApi, 1.0f, 1,
State.SUCCEEDED, null, null, null, Phase.MAP, null);
status.setFinishTime(mapDone);
TaskAttemptCompletionEvent completionEvent =
new TaskAttemptCompletionEvent(taskTracker, status);
addExpected(mapStart, completionEvent);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:14,代码来源:CheckedEventQueue.java
注:本文中的org.apache.hadoop.mapred.TaskStatus.Phase类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论