Java 类org.apache.hadoop.util.LinuxResourceCalculatorPlugin 实例源码
项目:hadoop-2.6.0-cdh5.4.3
文件:TestTTResourceReporting.java
/**
* Test that verifies that total memory values are calculated and reported
* correctly.
*
* @throws Exception
*/
public void testResourceValuesOnLinux()
throws Exception {
if (!System.getProperty("os.name").startsWith("Linux")) {
return;
}
JobConf conf = new JobConf();
LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
// In this case, we only check these four fields because they are static
conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
conf.setLong("numProcessors", plugin.getNumProcessors());
try {
setUpCluster(conf);
runSleepJob(miniMRCluster.createJobConf());
verifyTestResults(true);
} finally {
tearDownCluster();
}
}
项目:hadoop-EAR
文件:TestTTResourceReporting.java
/**
* Test that verifies that total memory values are calculated and reported
* correctly.
*
* @throws Exception
*/
@Test(timeout=60000)
public void testResourceValuesOnLinux()
throws Exception {
if (!System.getProperty("os.name").startsWith("Linux")) {
return;
}
JobConf conf = new JobConf();
LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
// In this case, we only check these three fields because they are static
conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
conf.setLong("numProcessors", plugin.getNumProcessors());
try {
setUpCluster(conf);
runSleepJob(miniMRCluster.createJobConf());
verifyTestResults(true);
} finally {
tearDownCluster();
}
}
项目:hadoop-on-lustre
文件:TestTTResourceReporting.java
/**
* Test that verifies that total memory values are calculated and reported
* correctly.
*
* @throws Exception
*/
public void testResourceValuesOnLinux()
throws Exception {
if (!System.getProperty("os.name").startsWith("Linux")) {
return;
}
JobConf conf = new JobConf();
LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
// In this case, we only check these four fields because they are static
conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
conf.setLong("numProcessors", plugin.getNumProcessors());
try {
setUpCluster(conf);
runSleepJob(miniMRCluster.createJobConf());
verifyTestResults(true);
} finally {
tearDownCluster();
}
}
项目:RDFS
文件:TestTTResourceReporting.java
/**
* Test that verifies that total memory values are calculated and reported
* correctly.
*
* @throws Exception
*/
public void testResourceValuesOnLinux()
throws Exception {
if (!System.getProperty("os.name").startsWith("Linux")) {
return;
}
JobConf conf = new JobConf();
LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
// In this case, we only check these three fields because they are static
conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
conf.setLong("numProcessors", plugin.getNumProcessors());
try {
setUpCluster(conf);
runSleepJob(miniMRCluster.createJobConf());
verifyTestResults(true);
} finally {
tearDownCluster();
}
}
项目:hanoi-hadoop-2.0.0-cdh
文件:TestTTResourceReporting.java
/**
* Test that verifies that total memory values are calculated and reported
* correctly.
*
* @throws Exception
*/
public void testResourceValuesOnLinux()
throws Exception {
if (!System.getProperty("os.name").startsWith("Linux")) {
return;
}
JobConf conf = new JobConf();
LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
// In this case, we only check these four fields because they are static
conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
conf.setLong("numProcessors", plugin.getNumProcessors());
try {
setUpCluster(conf);
runSleepJob(miniMRCluster.createJobConf());
verifyTestResults(true);
} finally {
tearDownCluster();
}
}
项目:hortonworks-extension
文件:TestTTResourceReporting.java
/**
* Test that verifies that total memory values are calculated and reported
* correctly.
*
* @throws Exception
*/
public void testResourceValuesOnLinux()
throws Exception {
if (!System.getProperty("os.name").startsWith("Linux")) {
return;
}
JobConf conf = new JobConf();
LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
// In this case, we only check these four fields because they are static
conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
conf.setLong("numProcessors", plugin.getNumProcessors());
try {
setUpCluster(conf);
runSleepJob(miniMRCluster.createJobConf());
verifyTestResults(true);
} finally {
tearDownCluster();
}
}
项目:hortonworks-extension
文件:TestTTResourceReporting.java
/**
* Test that verifies that total memory values are calculated and reported
* correctly.
*
* @throws Exception
*/
public void testResourceValuesOnLinux()
throws Exception {
if (!System.getProperty("os.name").startsWith("Linux")) {
return;
}
JobConf conf = new JobConf();
LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
// In this case, we only check these four fields because they are static
conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
conf.setLong("numProcessors", plugin.getNumProcessors());
try {
setUpCluster(conf);
runSleepJob(miniMRCluster.createJobConf());
verifyTestResults(true);
} finally {
tearDownCluster();
}
}
项目:hadoop-EAR
文件:TestTaskTrackerMemoryManager.java
/**
* Test for verifying that tasks causing cumulative usage of physical memory
* to go beyond TT's limit get killed.
*
* @throws Exception
*/
public void testTasksCumulativelyExceedingTTPhysicalLimits()
throws Exception {
// Run the test only if memory management is enabled
if (!isProcfsBasedTreeAvailable()) {
return;
}
// Start cluster with proper configuration.
JobConf fConf = new JobConf();
// very small value, so that no task escapes to successful completion.
fConf.set(TaskMemoryManagerThread.TT_MEMORY_MANAGER_MONITORING_INTERVAL,
String.valueOf(300));
// reserve all memory on TT so that the job will exceed memory limits
LinuxResourceCalculatorPlugin memoryCalculatorPlugin =
new LinuxResourceCalculatorPlugin();
long totalPhysicalMemory = memoryCalculatorPlugin.getPhysicalMemorySize();
long reservedPhysicalMemory = totalPhysicalMemory / (1024 * 1024) + 1;
fConf.setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, 1024 * 1024L);
fConf.setLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY, 1024 * 1024L);
fConf.setLong(TaskMemoryManagerThread.TT_RESERVED_PHYSICAL_MEMORY_MB,
reservedPhysicalMemory);
long maxRssMemoryAllowedForAllTasks = totalPhysicalMemory -
reservedPhysicalMemory * 1024 * 1024L;
Pattern physicalMemoryOverLimitPattern = Pattern.compile(
"Killing.*" + maxRssMemoryAllowedForAllTasks);
TaskMemoryManagerThread.disableUpdateReservedPhysicalMemory();
startCluster(fConf);
Matcher mat = null;
// Set up job.
JobConf conf = new JobConf(miniMRCluster.createJobConf());
JobClient jClient = new JobClient(conf);
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(conf);
// Start the job
RunningJob job =
jClient.submitJob(sleepJob.setupJobConf(1, 1, 5000, 1, 1000, 1));
boolean TTOverFlowMsgPresent = false;
while (true) {
List<TaskReport> allTaskReports = new ArrayList<TaskReport>();
allTaskReports.addAll(Arrays.asList(jClient
.getSetupTaskReports((org.apache.hadoop.mapred.JobID) job.getID())));
allTaskReports.addAll(Arrays.asList(jClient
.getMapTaskReports((org.apache.hadoop.mapred.JobID) job.getID())));
for (TaskReport tr : allTaskReports) {
String[] diag = tr.getDiagnostics();
for (String str : diag) {
mat = physicalMemoryOverLimitPattern.matcher(str);
if (mat.find()) {
TTOverFlowMsgPresent = true;
}
}
}
if (TTOverFlowMsgPresent) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// nothing
}
}
// If it comes here without a test-timeout, it means there was a task that
// was killed because of crossing cumulative TT limit.
// Test succeeded, kill the job.
job.killJob();
}
项目:RDFS
文件:TestTaskTrackerMemoryManager.java
/**
* Test for verifying that tasks causing cumulative usage of physical memory
* to go beyond TT's limit get killed.
*
* @throws Exception
*/
public void testTasksCumulativelyExceedingTTPhysicalLimits()
throws Exception {
// Run the test only if memory management is enabled
if (!isProcfsBasedTreeAvailable()) {
return;
}
// Start cluster with proper configuration.
JobConf fConf = new JobConf();
// very small value, so that no task escapes to successful completion.
fConf.set(TaskMemoryManagerThread.TT_MEMORY_MANAGER_MONITORING_INTERVAL,
String.valueOf(300));
// reserve all memory on TT so that the job will exceed memory limits
LinuxResourceCalculatorPlugin memoryCalculatorPlugin =
new LinuxResourceCalculatorPlugin();
long totalPhysicalMemory = memoryCalculatorPlugin.getPhysicalMemorySize();
long reservedPhysicalMemory = totalPhysicalMemory / (1024 * 1024) + 1;
fConf.setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, 1024 * 1024L);
fConf.setLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY, 1024 * 1024L);
fConf.setLong(TaskMemoryManagerThread.TT_RESERVED_PHYSICAL_MEMORY_MB,
reservedPhysicalMemory);
long maxRssMemoryAllowedForAllTasks = totalPhysicalMemory -
reservedPhysicalMemory * 1024 * 1024L;
Pattern physicalMemoryOverLimitPattern = Pattern.compile(
"Killing.*" + maxRssMemoryAllowedForAllTasks);
TaskMemoryManagerThread.disableUpdateReservedPhysicalMemory();
startCluster(fConf);
Matcher mat = null;
// Set up job.
JobConf conf = new JobConf(miniMRCluster.createJobConf());
JobClient jClient = new JobClient(conf);
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(conf);
// Start the job
RunningJob job =
jClient.submitJob(sleepJob.setupJobConf(1, 1, 5000, 1, 1000, 1));
boolean TTOverFlowMsgPresent = false;
while (true) {
List<TaskReport> allTaskReports = new ArrayList<TaskReport>();
allTaskReports.addAll(Arrays.asList(jClient
.getSetupTaskReports((org.apache.hadoop.mapred.JobID) job.getID())));
allTaskReports.addAll(Arrays.asList(jClient
.getMapTaskReports((org.apache.hadoop.mapred.JobID) job.getID())));
for (TaskReport tr : allTaskReports) {
String[] diag = tr.getDiagnostics();
for (String str : diag) {
mat = physicalMemoryOverLimitPattern.matcher(str);
if (mat.find()) {
TTOverFlowMsgPresent = true;
}
}
}
if (TTOverFlowMsgPresent) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// nothing
}
}
// If it comes here without a test-timeout, it means there was a task that
// was killed because of crossing cumulative TT limit.
// Test succeeded, kill the job.
job.killJob();
}