Java 类org.apache.hadoop.util.ExitUtil 实例源码
项目:hadoop
文件:TestYarnUncaughtExceptionHandler.java
/**
* <p>
* Throw {@code Error} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code System.exit(-1) }
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithError()
throws InterruptedException {
ExitUtil.disableSystemExit();
final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler);
final java.lang.Error error = new java.lang.Error("test-error");
final Thread errorThread = new Thread(new Runnable() {
@Override
public void run() {
throw error;
}
});
errorThread.setUncaughtExceptionHandler(spyErrorHandler);
assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler());
errorThread.start();
errorThread.join();
verify(spyErrorHandler).uncaughtException(errorThread, error);
}
项目:hadoop
文件:TestYarnUncaughtExceptionHandler.java
/**
* <p>
* Throw {@code OutOfMemoryError} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code Runtime.getRuntime().halt(-1)}
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithOutOfMemoryError()
throws InterruptedException {
ExitUtil.disableSystemHalt();
final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler);
final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error");
final Thread oomThread = new Thread(new Runnable() {
@Override
public void run() {
throw oomError;
}
});
oomThread.setUncaughtExceptionHandler(spyOomHandler);
assertSame(spyOomHandler, oomThread.getUncaughtExceptionHandler());
oomThread.start();
oomThread.join();
verify(spyOomHandler).uncaughtException(oomThread, oomError);
}
项目:hadoop
文件:NodeManager.java
protected void shutDown() {
new Thread() {
@Override
public void run() {
try {
NodeManager.this.stop();
} catch (Throwable t) {
LOG.error("Error while shutting down NodeManager", t);
} finally {
if (shouldExitOnShutdownEvent
&& !ShutdownHookManager.get().isShutdownInProgress()) {
ExitUtil.terminate(-1);
}
}
}
}.start();
}
项目:hadoop
文件:ApplicationHistoryServer.java
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
Thread
.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
LOG);
ApplicationHistoryServer appHistoryServer = null;
try {
appHistoryServer = new ApplicationHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(appHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration();
new GenericOptionsParser(conf, args);
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
}
项目:hadoop
文件:TestApplicationHistoryServer.java
@Test(timeout = 60000)
public void testLaunch() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
项目:hadoop
文件:TestApplicationHistoryServer.java
@Test(timeout = 60000)
public void testLaunchWithArguments() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
String[] args = new String[2];
args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(args);
Configuration conf = historyServer.getConfig();
assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
项目:hadoop
文件:ApplicationMaster.java
/**
* @param args Command line args
*/
public static void main(String[] args) {
boolean result = false;
try {
ApplicationMaster appMaster = new ApplicationMaster();
LOG.info("Initializing ApplicationMaster");
boolean doRun = appMaster.init(args);
if (!doRun) {
System.exit(0);
}
appMaster.run();
result = appMaster.finish();
} catch (Throwable t) {
LOG.fatal("Error running ApplicationMaster", t);
LogManager.shutdown();
ExitUtil.terminate(1, t);
}
if (result) {
LOG.info("Application Master completed successfully. exiting");
System.exit(0);
} else {
LOG.info("Application Master failed. exiting");
System.exit(2);
}
}
项目:hadoop
文件:JobHistoryServer.java
static JobHistoryServer launchJobHistoryServer(String[] args) {
Thread.
setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
JobHistoryServer jobHistoryServer = null;
try {
jobHistoryServer = new JobHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(jobHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
new GenericOptionsParser(conf, args);
jobHistoryServer.init(conf);
jobHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting JobHistoryServer", t);
ExitUtil.terminate(-1, "Error starting JobHistoryServer");
}
return jobHistoryServer;
}
项目:hadoop
文件:TestClusterId.java
@Before
public void setUp() throws IOException {
ExitUtil.disableSystemExit();
String baseDir = PathUtils.getTestDirName(getClass());
hdfsDir = new File(baseDir, "dfs/name");
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException("Could not delete test directory '" + hdfsDir + "'");
}
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
// as some tests might change these values we reset them to defaults before
// every test
StartupOption.FORMAT.setForceFormat(false);
StartupOption.FORMAT.setInteractiveFormat(true);
config = new Configuration();
config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
项目:scheduling-connector-for-hadoop
文件:HPCLogAggregateHandler.java
public static void main(String[] args) {
try {
Configuration conf = new YarnConfiguration();
String appId = args[0];
String appUser = args[1];
HPCLogAggregateHandler aggregateHandler = new HPCLogAggregateHandler(
appId, appUser);
initAndStartAggregation(conf, appUser, aggregateHandler);
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(aggregateHandler),
SHUTDOWN_HOOK_PRIORITY);
} catch (Throwable t) {
LOG.fatal("Error starting Log Aggregation", t);
ExitUtil.terminate(1, t);
}
}
项目:aliyun-oss-hadoop-fs
文件:TestYarnUncaughtExceptionHandler.java
/**
* <p>
* Throw {@code Error} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code System.exit(-1) }
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithError()
throws InterruptedException {
ExitUtil.disableSystemExit();
final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler);
final java.lang.Error error = new java.lang.Error("test-error");
final Thread errorThread = new Thread(new Runnable() {
@Override
public void run() {
throw error;
}
});
errorThread.setUncaughtExceptionHandler(spyErrorHandler);
assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler());
errorThread.start();
errorThread.join();
verify(spyErrorHandler).uncaughtException(errorThread, error);
}
项目:aliyun-oss-hadoop-fs
文件:TestYarnUncaughtExceptionHandler.java
/**
* <p>
* Throw {@code OutOfMemoryError} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code Runtime.getRuntime().halt(-1)}
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithOutOfMemoryError()
throws InterruptedException {
ExitUtil.disableSystemHalt();
final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler);
final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error");
final Thread oomThread = new Thread(new Runnable() {
@Override
public void run() {
throw oomError;
}
});
oomThread.setUncaughtExceptionHandler(spyOomHandler);
assertSame(spyOomHandler, oomThread.getUncaughtExceptionHandler());
oomThread.start();
oomThread.join();
verify(spyOomHandler).uncaughtException(oomThread, oomError);
}
项目:aliyun-oss-hadoop-fs
文件:NodeManager.java
protected void shutDown() {
new Thread() {
@Override
public void run() {
try {
NodeManager.this.stop();
} catch (Throwable t) {
LOG.error("Error while shutting down NodeManager", t);
} finally {
if (shouldExitOnShutdownEvent
&& !ShutdownHookManager.get().isShutdownInProgress()) {
ExitUtil.terminate(-1);
}
}
}
}.start();
}
项目:aliyun-oss-hadoop-fs
文件:ApplicationHistoryServer.java
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
Thread
.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
LOG);
ApplicationHistoryServer appHistoryServer = null;
try {
appHistoryServer = new ApplicationHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(appHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration();
new GenericOptionsParser(conf, args);
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
}
项目:aliyun-oss-hadoop-fs
文件:TestApplicationHistoryServer.java
@Test(timeout = 60000)
public void testLaunch() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
项目:aliyun-oss-hadoop-fs
文件:TestApplicationHistoryServer.java
@Test(timeout = 60000)
public void testLaunchWithArguments() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
String[] args = new String[2];
args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(args);
Configuration conf = historyServer.getConfig();
assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
项目:aliyun-oss-hadoop-fs
文件:ApplicationMaster.java
/**
* @param args Command line args
*/
public static void main(String[] args) {
boolean result = false;
try {
ApplicationMaster appMaster = new ApplicationMaster();
LOG.info("Initializing ApplicationMaster");
boolean doRun = appMaster.init(args);
if (!doRun) {
System.exit(0);
}
appMaster.run();
result = appMaster.finish();
} catch (Throwable t) {
LOG.fatal("Error running ApplicationMaster", t);
LogManager.shutdown();
ExitUtil.terminate(1, t);
}
if (result) {
LOG.info("Application Master completed successfully. exiting");
System.exit(0);
} else {
LOG.info("Application Master failed. exiting");
System.exit(2);
}
}
项目:aliyun-oss-hadoop-fs
文件:MRAppMaster.java
/** MRAppMaster exit method which has been instrumented for both runtime and
* unit testing.
* If the main thread has not been started, this method was called from a
* test. In that case, configure the ExitUtil object to not exit the JVM.
*
* @param status integer indicating exit status
* @param t throwable exception that could be null
*/
private void exitMRAppMaster(int status, Throwable t) {
if (!mainStarted) {
ExitUtil.disableSystemExit();
}
try {
if (t != null) {
ExitUtil.terminate(status, t);
} else {
ExitUtil.terminate(status);
}
} catch (ExitUtil.ExitException ee) {
// ExitUtil.ExitException is only thrown from the ExitUtil test code when
// SystemExit has been disabled. It is always thrown in in the test code,
// even when no error occurs. Ignore the exception so that tests don't
// need to handle it.
}
}
项目:aliyun-oss-hadoop-fs
文件:JobHistoryServer.java
static JobHistoryServer launchJobHistoryServer(String[] args) {
Thread.
setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
JobHistoryServer jobHistoryServer = null;
try {
jobHistoryServer = new JobHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(jobHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
new GenericOptionsParser(conf, args);
jobHistoryServer.init(conf);
jobHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting JobHistoryServer", t);
ExitUtil.terminate(-1, "Error starting JobHistoryServer");
}
return jobHistoryServer;
}
项目:aliyun-oss-hadoop-fs
文件:DelegationTokenFetcher.java
private static void printUsage(PrintStream err) {
err.println("fetchdt retrieves delegation tokens from the NameNode");
err.println();
err.println("fetchdt <opts> <token file>");
err.println("Options:");
err.println(" --webservice <url> Url to contact NN on (starts with " +
"http:// or https://)");
err.println(" --renewer <name> Name of the delegation token renewer");
err.println(" --cancel Cancel the delegation token");
err.println(" --renew Renew the delegation token. " +
"Delegation " + "token must have been fetched using the --renewer" +
" <name> option.");
err.println(" --print Print the delegation token");
err.println();
GenericOptionsParser.printGenericCommandUsage(err);
ExitUtil.terminate(1);
}
项目:aliyun-oss-hadoop-fs
文件:TestClusterId.java
@Before
public void setUp() throws IOException {
ExitUtil.disableSystemExit();
String baseDir = PathUtils.getTestDirName(getClass());
hdfsDir = new File(baseDir, "dfs/name");
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException("Could not delete test directory '" + hdfsDir + "'");
}
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
// as some tests might change these values we reset them to defaults before
// every test
StartupOption.FORMAT.setForceFormat(false);
StartupOption.FORMAT.setInteractiveFormat(true);
config = new Configuration();
config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
项目:aliyun-oss-hadoop-fs
文件:TestStartup.java
@Before
public void setUp() throws Exception {
ExitUtil.disableSystemExit();
ExitUtil.resetFirstExitException();
config = new HdfsConfiguration();
hdfsDir = new File(MiniDFSCluster.getBaseDirectory());
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name")).toString());
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
new File(hdfsDir, "data").getPath());
config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(hdfsDir, "secondary")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
WILDCARD_HTTP_HOST + "0");
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
项目:big-c
文件:TestYarnUncaughtExceptionHandler.java
/**
* <p>
* Throw {@code Error} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code System.exit(-1) }
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithError()
throws InterruptedException {
ExitUtil.disableSystemExit();
final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler);
final java.lang.Error error = new java.lang.Error("test-error");
final Thread errorThread = new Thread(new Runnable() {
@Override
public void run() {
throw error;
}
});
errorThread.setUncaughtExceptionHandler(spyErrorHandler);
assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler());
errorThread.start();
errorThread.join();
verify(spyErrorHandler).uncaughtException(errorThread, error);
}
项目:big-c
文件:TestYarnUncaughtExceptionHandler.java
/**
* <p>
* Throw {@code OutOfMemoryError} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code Runtime.getRuntime().halt(-1)}
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithOutOfMemoryError()
throws InterruptedException {
ExitUtil.disableSystemHalt();
final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler);
final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error");
final Thread oomThread = new Thread(new Runnable() {
@Override
public void run() {
throw oomError;
}
});
oomThread.setUncaughtExceptionHandler(spyOomHandler);
assertSame(spyOomHandler, oomThread.getUncaughtExceptionHandler());
oomThread.start();
oomThread.join();
verify(spyOomHandler).uncaughtException(oomThread, oomError);
}
项目:big-c
文件:NodeManager.java
protected void shutDown() {
new Thread() {
@Override
public void run() {
try {
NodeManager.this.stop();
} catch (Throwable t) {
LOG.error("Error while shutting down NodeManager", t);
} finally {
if (shouldExitOnShutdownEvent
&& !ShutdownHookManager.get().isShutdownInProgress()) {
ExitUtil.terminate(-1);
}
}
}
}.start();
}
项目:big-c
文件:ApplicationHistoryServer.java
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
Thread
.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
LOG);
ApplicationHistoryServer appHistoryServer = null;
try {
appHistoryServer = new ApplicationHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(appHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration();
new GenericOptionsParser(conf, args);
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
}
项目:big-c
文件:TestApplicationHistoryServer.java
@Test(timeout = 60000)
public void testLaunch() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
项目:big-c
文件:TestApplicationHistoryServer.java
@Test(timeout = 60000)
public void testLaunchWithArguments() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
String[] args = new String[2];
args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(args);
Configuration conf = historyServer.getConfig();
assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
项目:big-c
文件:ApplicationMaster.java
/**
* @param args Command line args
*/
public static void main(String[] args) {
boolean result = false;
try {
ApplicationMaster appMaster = new ApplicationMaster();
LOG.info("Initializing ApplicationMaster");
boolean doRun = appMaster.init(args);
if (!doRun) {
System.exit(0);
}
appMaster.run();
result = appMaster.finish();
} catch (Throwable t) {
LOG.fatal("Error running ApplicationMaster", t);
LogManager.shutdown();
ExitUtil.terminate(1, t);
}
if (result) {
LOG.info("Application Master completed successfully. exiting");
System.exit(0);
} else {
LOG.info("Application Master failed. exiting");
System.exit(2);
}
}
项目:big-c
文件:JobHistoryServer.java
static JobHistoryServer launchJobHistoryServer(String[] args) {
Thread.
setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
JobHistoryServer jobHistoryServer = null;
try {
jobHistoryServer = new JobHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(jobHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
new GenericOptionsParser(conf, args);
jobHistoryServer.init(conf);
jobHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting JobHistoryServer", t);
ExitUtil.terminate(-1, "Error starting JobHistoryServer");
}
return jobHistoryServer;
}
项目:big-c
文件:TestClusterId.java
@Before
public void setUp() throws IOException {
ExitUtil.disableSystemExit();
String baseDir = PathUtils.getTestDirName(getClass());
hdfsDir = new File(baseDir, "dfs/name");
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException("Could not delete test directory '" + hdfsDir + "'");
}
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
// as some tests might change these values we reset them to defaults before
// every test
StartupOption.FORMAT.setForceFormat(false);
StartupOption.FORMAT.setInteractiveFormat(true);
config = new Configuration();
config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestYarnUncaughtExceptionHandler.java
/**
* <p>
* Throw {@code Error} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code System.exit(-1) }
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithError()
throws InterruptedException {
ExitUtil.disableSystemExit();
final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler);
final java.lang.Error error = new java.lang.Error("test-error");
final Thread errorThread = new Thread(new Runnable() {
@Override
public void run() {
throw error;
}
});
errorThread.setUncaughtExceptionHandler(spyErrorHandler);
assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler());
errorThread.start();
errorThread.join();
verify(spyErrorHandler).uncaughtException(errorThread, error);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestYarnUncaughtExceptionHandler.java
/**
* <p>
* Throw {@code OutOfMemoryError} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code Runtime.getRuntime().halt(-1)}
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithOutOfMemoryError()
throws InterruptedException {
ExitUtil.disableSystemHalt();
final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler);
final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error");
final Thread oomThread = new Thread(new Runnable() {
@Override
public void run() {
throw oomError;
}
});
oomThread.setUncaughtExceptionHandler(spyOomHandler);
assertSame(spyOomHandler, oomThread.getUncaughtExceptionHandler());
oomThread.start();
oomThread.join();
verify(spyOomHandler).uncaughtException(oomThread, oomError);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:ApplicationHistoryServer.java
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
Thread
.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
LOG);
ApplicationHistoryServer appHistoryServer = null;
try {
appHistoryServer = new ApplicationHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(appHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration();
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestApplicationHistoryServer.java
@Test(timeout = 60000)
public void testLaunch() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:ApplicationMaster.java
/**
* @param args Command line args
*/
public static void main(String[] args) {
boolean result = false;
try {
ApplicationMaster appMaster = new ApplicationMaster();
LOG.info("Initializing ApplicationMaster");
boolean doRun = appMaster.init(args);
if (!doRun) {
System.exit(0);
}
appMaster.run();
result = appMaster.finish();
} catch (Throwable t) {
LOG.fatal("Error running ApplicationMaster", t);
LogManager.shutdown();
ExitUtil.terminate(1, t);
}
if (result) {
LOG.info("Application Master completed successfully. exiting");
System.exit(0);
} else {
LOG.info("Application Master failed. exiting");
System.exit(2);
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:JobHistoryServer.java
static JobHistoryServer launchJobHistoryServer(String[] args) {
Thread.
setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
JobHistoryServer jobHistoryServer = null;
try {
jobHistoryServer = new JobHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(jobHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
jobHistoryServer.init(conf);
jobHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting JobHistoryServer", t);
ExitUtil.terminate(-1, "Error starting JobHistoryServer");
}
return jobHistoryServer;
}
项目:hops
文件:TestClusterId.java
@Before
public void setUp() throws IOException {
ExitUtil.disableSystemExit();
String baseDir = System.getProperty("test.build.data", "build/test/data");
hdfsDir = new File(baseDir, "dfs/name");
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException(
"Could not delete test directory '" + hdfsDir + "'");
}
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
// as some tests might change these values we reset them to defaults before
// every test
StartupOption.FORMAT.setForceFormat(false);
StartupOption.FORMAT.setInteractiveFormat(true);
config = new Configuration();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestClusterId.java
@Before
public void setUp() throws IOException {
ExitUtil.disableSystemExit();
String baseDir = PathUtils.getTestDirName(getClass());
hdfsDir = new File(baseDir, "dfs/name");
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException("Could not delete test directory '" + hdfsDir + "'");
}
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
// as some tests might change these values we reset them to defaults before
// every test
StartupOption.FORMAT.setForceFormat(false);
StartupOption.FORMAT.setInteractiveFormat(true);
config = new Configuration();
config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestHAStress.java
@Before
public void setUp() throws Exception {
ExitUtil.disableSystemExit();
conf = new Configuration();
conf.set(HAUtil.MR_HA_FENCING_METHODS_KEY,
AlwaysSucceedFencer.class.getName());
conf.setLong(HAUtil.MR_HA_ACTIVE_CHECK_MILLIS, 1000);
conf.setBoolean("mapred.job.tracker.persist.jobstatus.active", true);
conf.setInt("mapred.job.tracker.persist.jobstatus.hours", 1);
conf.set("mapred.job.tracker.persist.jobstatus.dir", "/tmp/jobtracker/jobsInfo");
cluster = new MiniMRHACluster(conf);
cluster.getJobTrackerHaDaemon(0).makeActive();
cluster.startTaskTracker(0, 1);
cluster.waitActive();
jt1 = cluster.getJobTrackerHaDaemon(0);
jt2 = cluster.getJobTrackerHaDaemon(1);
target1 = new JobTrackerHAServiceTarget(jt1.getConf());
target2 = new JobTrackerHAServiceTarget(jt2.getConf());
}