Java 类org.apache.hadoop.mapreduce.test.system.MRCluster 实例源码
项目:hadoop-2.6.0-cdh5.4.3
文件:HealthScriptHelper.java
/**
* Will verify that given task tracker is not blacklisted
* @param client tasktracker info
* @param conf modified configuration object
* @param cluster mrcluster instance
* @throws IOException thrown if verification fails
*/
public void verifyTTNotBlackListed(TTClient client, Configuration conf,
MRCluster cluster) throws IOException {
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
String defaultHealthScript = conf.get("mapred.healthChecker.script.path");
Assert.assertTrue("Task tracker is not healthy",
nodeHealthStatus(client, true) == true);
TaskTrackerStatus status = client.getStatus();
JTClient jclient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to healthy list",
jclient.getProxy().isBlackListed(status.getTrackerName()) == false);
Assert.assertTrue("Health script was not set",defaultHealthScript != null);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:HealthScriptHelper.java
/**
* Verifies that the given task tracker is blacklisted
* @param conf modified Configuration object
* @param client tasktracker info
* @param errorMessage that needs to be asserted
* @param cluster mr cluster instance
* @throws IOException is thrown when verification fails
*/
public void verifyTTBlackList(Configuration conf, TTClient client,
String errorMessage, MRCluster cluster) throws IOException{
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
//TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("Task tracker was never blacklisted ",
nodeHealthStatus(client, false) == true);
TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("The custom error message did not appear",
status.getHealthStatus().getHealthReport().trim().
equals(errorMessage));
JTClient jClient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to blacklisted list",
jClient.getProxy().isBlackListed(status.getTrackerName()) == true);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:HealthScriptHelper.java
/**
* This will copy the error inducing health script from local node running
* the system tests to the node where the task tracker runs
* @param scriptName name of the scirpt to be copied
* @param hostname identifies the task tracker
* @param remoteLocation location in remote task tracker node
* @param cluster mrcluster instance
* @throws IOException thrown if copy file fails.
*/
public void copyFileToRemoteHost(String scriptName, String hostname,
String remoteLocation,MRCluster cluster) throws IOException {
ArrayList<String> cmdArgs = new ArrayList<String>();
String scriptDir = cluster.getConf().get(
HadoopDaemonRemoteCluster.CONF_SCRIPTDIR);
StringBuffer localFile = new StringBuffer();
localFile.append(scriptDir).append(File.separator).append(scriptName);
cmdArgs.add("scp");
cmdArgs.add(localFile.toString());
StringBuffer remoteFile = new StringBuffer();
remoteFile.append(hostname).append(":");
remoteFile.append(remoteLocation).append(File.separator).append(scriptName);
cmdArgs.add(remoteFile.toString());
executeCommand(cmdArgs);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestChildsKillingOfMemoryExceedsTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.cluster.max.map.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.map.memory.mb", 1024L);
prop.put("mapred.cluster.max.reduce.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.reduce.memory.mb", 1024L);
prop.put("mapred.map.max.attempts", 1L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestNodeDecommissioning.java
@BeforeClass
public static void setUp() throws Exception {
cluster = MRCluster.createCluster(new Configuration());
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
jobClient = cluster.getJTClient().getClient();
conf = cluster.getJTClient().getProxy().getDaemonConf();
String confFile = "mapred-site.xml";
Hashtable<String,String> prop = new Hashtable<String,String>();
prop.put("mapred.hosts.exclude", "/tmp/mapred.exclude");
prop.put("mapreduce.cluster.administrators", " gridadmin,hadoop,users");
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestChildsKillingOfSuspendTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.map.max.attempts",1L);
prop.put("mapred.task.timeout",30000L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException","org.apache.hadoop.metrics2.MetricsException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hadoop-on-lustre
文件:HealthScriptHelper.java
/**
* Will verify that given task tracker is not blacklisted
* @param client tasktracker info
* @param conf modified configuration object
* @param cluster mrcluster instance
* @throws IOException thrown if verification fails
*/
public void verifyTTNotBlackListed(TTClient client, Configuration conf,
MRCluster cluster) throws IOException {
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
String defaultHealthScript = conf.get("mapred.healthChecker.script.path");
Assert.assertTrue("Task tracker is not healthy",
nodeHealthStatus(client, true) == true);
TaskTrackerStatus status = client.getStatus();
JTClient jclient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to healthy list",
jclient.getProxy().isBlackListed(status.getTrackerName()) == false);
Assert.assertTrue("Health script was not set",defaultHealthScript != null);
}
项目:hadoop-on-lustre
文件:HealthScriptHelper.java
/**
* Verifies that the given task tracker is blacklisted
* @param conf modified Configuration object
* @param client tasktracker info
* @param errorMessage that needs to be asserted
* @param cluster mr cluster instance
* @throws IOException is thrown when verification fails
*/
public void verifyTTBlackList(Configuration conf, TTClient client,
String errorMessage, MRCluster cluster) throws IOException{
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
//TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("Task tracker was never blacklisted ",
nodeHealthStatus(client, false) == true);
TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("The custom error message did not appear",
status.getHealthStatus().getHealthReport().trim().
equals(errorMessage));
JTClient jClient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to blacklisted list",
jClient.getProxy().isBlackListed(status.getTrackerName()) == true);
}
项目:hadoop-on-lustre
文件:HealthScriptHelper.java
/**
* This will copy the error inducing health script from local node running
* the system tests to the node where the task tracker runs
* @param scriptName name of the scirpt to be copied
* @param hostname identifies the task tracker
* @param remoteLocation location in remote task tracker node
* @param cluster mrcluster instance
* @throws IOException thrown if copy file fails.
*/
public void copyFileToRemoteHost(String scriptName, String hostname,
String remoteLocation,MRCluster cluster) throws IOException {
ArrayList<String> cmdArgs = new ArrayList<String>();
String scriptDir = cluster.getConf().get(
HadoopDaemonRemoteCluster.CONF_SCRIPTDIR);
StringBuffer localFile = new StringBuffer();
localFile.append(scriptDir).append(File.separator).append(scriptName);
cmdArgs.add("scp");
cmdArgs.add(localFile.toString());
StringBuffer remoteFile = new StringBuffer();
remoteFile.append(hostname).append(":");
remoteFile.append(remoteLocation).append(File.separator).append(scriptName);
cmdArgs.add(remoteFile.toString());
executeCommand(cmdArgs);
}
项目:hadoop-on-lustre
文件:TestChildsKillingOfMemoryExceedsTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.cluster.max.map.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.map.memory.mb", 1024L);
prop.put("mapred.cluster.max.reduce.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.reduce.memory.mb", 1024L);
prop.put("mapred.map.max.attempts", 1L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hadoop-on-lustre
文件:TestNodeDecommissioning.java
@BeforeClass
public static void setUp() throws Exception {
cluster = MRCluster.createCluster(new Configuration());
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
jobClient = cluster.getJTClient().getClient();
conf = cluster.getJTClient().getProxy().getDaemonConf();
String confFile = "mapred-site.xml";
Hashtable<String,String> prop = new Hashtable<String,String>();
prop.put("mapred.hosts.exclude", "/tmp/mapred.exclude");
prop.put("mapreduce.cluster.administrators", " gridadmin,hadoop,users");
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
}
项目:hadoop-on-lustre
文件:TestChildsKillingOfSuspendTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.map.max.attempts",1L);
prop.put("mapred.task.timeout",30000L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException","org.apache.hadoop.metrics2.MetricsException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:HealthScriptHelper.java
/**
* Will verify that given task tracker is not blacklisted
* @param client tasktracker info
* @param conf modified configuration object
* @param cluster mrcluster instance
* @throws IOException thrown if verification fails
*/
public void verifyTTNotBlackListed(TTClient client, Configuration conf,
MRCluster cluster) throws IOException {
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
String defaultHealthScript = conf.get("mapred.healthChecker.script.path");
Assert.assertTrue("Task tracker is not healthy",
nodeHealthStatus(client, true) == true);
TaskTrackerStatus status = client.getStatus();
JTClient jclient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to healthy list",
jclient.getProxy().isBlackListed(status.getTrackerName()) == false);
Assert.assertTrue("Health script was not set",defaultHealthScript != null);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:HealthScriptHelper.java
/**
* Verifies that the given task tracker is blacklisted
* @param conf modified Configuration object
* @param client tasktracker info
* @param errorMessage that needs to be asserted
* @param cluster mr cluster instance
* @throws IOException is thrown when verification fails
*/
public void verifyTTBlackList(Configuration conf, TTClient client,
String errorMessage, MRCluster cluster) throws IOException{
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
//TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("Task tracker was never blacklisted ",
nodeHealthStatus(client, false) == true);
TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("The custom error message did not appear",
status.getHealthStatus().getHealthReport().trim().
equals(errorMessage));
JTClient jClient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to blacklisted list",
jClient.getProxy().isBlackListed(status.getTrackerName()) == true);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:HealthScriptHelper.java
/**
* This will copy the error inducing health script from local node running
* the system tests to the node where the task tracker runs
* @param scriptName name of the scirpt to be copied
* @param hostname identifies the task tracker
* @param remoteLocation location in remote task tracker node
* @param cluster mrcluster instance
* @throws IOException thrown if copy file fails.
*/
public void copyFileToRemoteHost(String scriptName, String hostname,
String remoteLocation,MRCluster cluster) throws IOException {
ArrayList<String> cmdArgs = new ArrayList<String>();
String scriptDir = cluster.getConf().get(
HadoopDaemonRemoteCluster.CONF_SCRIPTDIR);
StringBuffer localFile = new StringBuffer();
localFile.append(scriptDir).append(File.separator).append(scriptName);
cmdArgs.add("scp");
cmdArgs.add(localFile.toString());
StringBuffer remoteFile = new StringBuffer();
remoteFile.append(hostname).append(":");
remoteFile.append(remoteLocation).append(File.separator).append(scriptName);
cmdArgs.add(remoteFile.toString());
executeCommand(cmdArgs);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:TestChildsKillingOfMemoryExceedsTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.cluster.max.map.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.map.memory.mb", 1024L);
prop.put("mapred.cluster.max.reduce.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.reduce.memory.mb", 1024L);
prop.put("mapred.map.max.attempts", 1L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:TestNodeDecommissioning.java
@BeforeClass
public static void setUp() throws Exception {
cluster = MRCluster.createCluster(new Configuration());
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
jobClient = cluster.getJTClient().getClient();
conf = cluster.getJTClient().getProxy().getDaemonConf();
String confFile = "mapred-site.xml";
Hashtable<String,String> prop = new Hashtable<String,String>();
prop.put("mapred.hosts.exclude", "/tmp/mapred.exclude");
prop.put("mapreduce.cluster.administrators", " gridadmin,hadoop,users");
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
}
项目:hanoi-hadoop-2.0.0-cdh
文件:TestChildsKillingOfSuspendTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.map.max.attempts",1L);
prop.put("mapred.task.timeout",30000L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException","org.apache.hadoop.metrics2.MetricsException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hortonworks-extension
文件:TestNodeDecommissioning.java
@BeforeClass
public static void setUp() throws Exception {
cluster = MRCluster.createCluster(new Configuration());
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
jobClient = cluster.getJTClient().getClient();
conf = cluster.getJTClient().getProxy().getDaemonConf();
String confFile = "mapred-site.xml";
Hashtable<String,String> prop = new Hashtable<String,String>();
prop.put("mapred.hosts.exclude", "/tmp/mapred.exclude");
prop.put("mapreduce.cluster.administrators", " gridadmin,hadoop,users");
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
}
项目:hortonworks-extension
文件:HealthScriptHelper.java
/**
* Will verify that given task tracker is not blacklisted
* @param client tasktracker info
* @param conf modified configuration object
* @param cluster mrcluster instance
* @throws IOException thrown if verification fails
*/
public void verifyTTNotBlackListed(TTClient client, Configuration conf,
MRCluster cluster) throws IOException {
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
String defaultHealthScript = conf.get("mapred.healthChecker.script.path");
Assert.assertTrue("Task tracker is not healthy",
nodeHealthStatus(client, true) == true);
TaskTrackerStatus status = client.getStatus();
JTClient jclient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to healthy list",
jclient.getProxy().isBlackListed(status.getTrackerName()) == false);
Assert.assertTrue("Health script was not set",defaultHealthScript != null);
}
项目:hortonworks-extension
文件:HealthScriptHelper.java
/**
* Verifies that the given task tracker is blacklisted
* @param conf modified Configuration object
* @param client tasktracker info
* @param errorMessage that needs to be asserted
* @param cluster mr cluster instance
* @throws IOException is thrown when verification fails
*/
public void verifyTTBlackList(Configuration conf, TTClient client,
String errorMessage, MRCluster cluster) throws IOException{
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
//TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("Task tracker was never blacklisted ",
nodeHealthStatus(client, false) == true);
TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("The custom error message did not appear",
status.getHealthStatus().getHealthReport().trim().
equals(errorMessage));
JTClient jClient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to blacklisted list",
jClient.getProxy().isBlackListed(status.getTrackerName()) == true);
}
项目:hortonworks-extension
文件:HealthScriptHelper.java
/**
* This will copy the error inducing health script from local node running
* the system tests to the node where the task tracker runs
* @param scriptName name of the scirpt to be copied
* @param hostname identifies the task tracker
* @param remoteLocation location in remote task tracker node
* @param cluster mrcluster instance
* @throws IOException thrown if copy file fails.
*/
public void copyFileToRemoteHost(String scriptName, String hostname,
String remoteLocation,MRCluster cluster) throws IOException {
ArrayList<String> cmdArgs = new ArrayList<String>();
String scriptDir = cluster.getConf().get(
HadoopDaemonRemoteCluster.CONF_SCRIPTDIR);
StringBuffer localFile = new StringBuffer();
localFile.append(scriptDir).append(File.separator).append(scriptName);
cmdArgs.add("scp");
cmdArgs.add(localFile.toString());
StringBuffer remoteFile = new StringBuffer();
remoteFile.append(hostname).append(":");
remoteFile.append(remoteLocation).append(File.separator).append(scriptName);
cmdArgs.add(remoteFile.toString());
executeCommand(cmdArgs);
}
项目:hortonworks-extension
文件:TestChildsKillingOfMemoryExceedsTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.cluster.max.map.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.map.memory.mb", 1024L);
prop.put("mapred.cluster.max.reduce.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.reduce.memory.mb", 1024L);
prop.put("mapred.map.max.attempts", 1L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hortonworks-extension
文件:TestNodeDecommissioning.java
@BeforeClass
public static void setUp() throws Exception {
cluster = MRCluster.createCluster(new Configuration());
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
jobClient = cluster.getJTClient().getClient();
conf = cluster.getJTClient().getProxy().getDaemonConf();
String confFile = "mapred-site.xml";
Hashtable<String,String> prop = new Hashtable<String,String>();
prop.put("mapred.hosts.exclude", "/tmp/mapred.exclude");
prop.put("mapreduce.cluster.administrators", " gridadmin,hadoop,users");
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
}
项目:hortonworks-extension
文件:TestChildsKillingOfSuspendTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.map.max.attempts",1L);
prop.put("mapred.task.timeout",30000L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException","org.apache.hadoop.metrics2.MetricsException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hortonworks-extension
文件:HealthScriptHelper.java
/**
* Verifies that the given task tracker is blacklisted
* @param conf modified Configuration object
* @param client tasktracker info
* @param errorMessage that needs to be asserted
* @param cluster mr cluster instance
* @throws IOException is thrown when verification fails
*/
public void verifyTTBlackList(Configuration conf, TTClient client,
String errorMessage, MRCluster cluster) throws IOException{
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
//TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("Task tracker was never blacklisted ",
nodeHealthStatus(client, false) == true);
TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("The custom error message did not appear",
status.getHealthStatus().getHealthReport().trim().
equals(errorMessage));
JTClient jClient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to blacklisted list",
jClient.getProxy().isBlackListed(status.getTrackerName()) == true);
}
项目:hortonworks-extension
文件:HealthScriptHelper.java
/**
* This will copy the error inducing health script from local node running
* the system tests to the node where the task tracker runs
* @param scriptName name of the scirpt to be copied
* @param hostname identifies the task tracker
* @param remoteLocation location in remote task tracker node
* @param cluster mrcluster instance
* @throws IOException thrown if copy file fails.
*/
public void copyFileToRemoteHost(String scriptName, String hostname,
String remoteLocation,MRCluster cluster) throws IOException {
ArrayList<String> cmdArgs = new ArrayList<String>();
String scriptDir = cluster.getConf().get(
HadoopDaemonRemoteCluster.CONF_SCRIPTDIR);
StringBuffer localFile = new StringBuffer();
localFile.append(scriptDir).append(File.separator).append(scriptName);
cmdArgs.add("scp");
cmdArgs.add(localFile.toString());
StringBuffer remoteFile = new StringBuffer();
remoteFile.append(hostname).append(":");
remoteFile.append(remoteLocation).append(File.separator).append(scriptName);
cmdArgs.add(remoteFile.toString());
executeCommand(cmdArgs);
}
项目:hortonworks-extension
文件:TestChildsKillingOfSuspendTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.map.max.attempts",1L);
prop.put("mapred.task.timeout",30000L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException","org.apache.hadoop.metrics2.MetricsException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hortonworks-extension
文件:TestChildsKillingOfMemoryExceedsTask.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.cluster.max.map.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.map.memory.mb", 1024L);
prop.put("mapred.cluster.max.reduce.memory.mb", 2 * 1024L);
prop.put("mapred.cluster.reduce.memory.mb", 1024L);
prop.put("mapred.map.max.attempts", 1L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens", false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestJobHistoryLocation.java
@BeforeClass
public static void setUp() throws Exception {
cluster = MRCluster.createCluster(new Configuration());
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException", "org.apache.hadoop.metrics2.MetricsException"};
cluster.setExcludeExpList(expExcludeList);
conf = new Configuration(cluster.getConf());
cluster.setUp();
jobClient = cluster.getJTClient().getClient();
dfs = jobClient.getFs();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestJobSummary.java
@BeforeClass
public static void before() throws Exception {
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
createInput(inputDir, conf);
jtClient = cluster.getJTClient();
jobClient = jtClient.getClient();
remoteJTClient = cluster.getJTClient().getProxy();
conf = remoteJTClient.getDaemonConf();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestTaskTrackerInfoSuccessfulFailedJobs.java
@BeforeClass
public static void setUp() throws Exception {
cluster = MRCluster.createCluster(new Configuration());
cluster.setUp();
conf = new Configuration(cluster.getConf());
conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
remoteJTClient = cluster.getJTClient().getProxy();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestHiRamJobWithBlackListTT.java
@BeforeClass
public static void setUp() throws java.lang.Exception {
String [] expExcludeList = new String[2];
expExcludeList[0] = "java.net.ConnectException";
expExcludeList[1] = "java.io.IOException";
cluster = MRCluster.createCluster(new Configuration());
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
bListHelper = new HealthScriptHelper();
remotePath = cluster.getConf().get(TestHealthScriptError.remoteHSPath);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestCMExceptionDuringRunJob.java
@BeforeClass
public static void setUp() throws java.lang.Exception {
String [] expExcludeList = new String[2];
expExcludeList[0] = "java.net.ConnectException";
expExcludeList[1] = "java.io.IOException";
cluster = MRCluster.createCluster(new Configuration());
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestTaskChildsKilling.java
@BeforeClass
public static void before() throws Exception {
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.map.max.attempts", 1L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens",false);
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestPushConfig.java
@BeforeClass
public static void before() throws Exception {
String [] expExcludeList = new String[2];
expExcludeList[0] = "java.net.ConnectException";
expExcludeList[1] = "java.io.IOException";
cluster = MRCluster.createCluster(new Configuration());
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestLostTaskTracker.java
@BeforeClass
public static void before() throws Exception {
String [] expExcludeList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
Hashtable<String,Object> prop = new Hashtable<String,Object>();
prop.put("mapred.tasktracker.expiry.interval",30000L);
prop.put("mapreduce.job.complete.cancel.delegation.tokens",false);
cluster.restartClusterWithNewConfig(prop, confFile);
UtilsForTests.waitFor(1000);
conf = cluster.getJTClient().getProxy().getDaemonConf();
createInput(inputDir, conf);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestHealthScriptPathError.java
@BeforeClass
public static void setUp() throws java.lang.Exception {
String [] expExcludeList = new String[2];
expExcludeList[0] = "java.net.ConnectException";
expExcludeList[1] = "java.io.IOException";
cluster = MRCluster.createCluster(new Configuration());
cluster.setExcludeExpList(expExcludeList);
cluster.setUp();
remotePath = cluster.getConf().get(remoteHSPath);
helper = new HealthScriptHelper();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestLinuxTaskControllerOtherUser.java
@BeforeClass
public static void before() throws Exception {
cluster = MRCluster.createCluster(conf);
cluster.setUp();
jtClient = cluster.getJTClient();
jobClient = jtClient.getClient();
remoteJTClient = cluster.getJTClient().getProxy();
conf = remoteJTClient.getDaemonConf();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestJobKill.java
@BeforeClass
public static void setUp() throws Exception {
cluster = MRCluster.createCluster(new Configuration());
cluster.setUp();
fs = inDir.getFileSystem(cluster.getJTClient().getConf());
if(!fs.exists(inDir)){
fs.create(inDir);
}
if (fs.exists(outDir)) {
fs.delete(outDir,true);
}
}