Java 类org.apache.hadoop.util.StringUtils 实例源码
项目:aliyun-maxcompute-data-collectors
文件:MSSQLTestUtils.java
public void createTableFromSQL(String sql) throws SQLException {
Connection dbcon = this.getConnection();
System.out.println("SQL : " + sql);
this.dropTableIfExists("TPCH1M_LINEITEM");
try {
Statement st = dbcon.createStatement();
int res = st.executeUpdate(sql);
System.out.println("Result : " + res);
} catch (SQLException e) {
LOG.error("Got SQLException during creating table: " + StringUtils.stringifyException(e));
}
}
项目:ditb
文件:MultiThreadedUpdaterWithACL.java
private void recordFailure(final Mutation m, final long keyBase,
final long start, IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to mutate: " + keyBase + " after " + (System.currentTimeMillis() - start)
+ "ms; region information: " + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: "
+ exceptionInfo);
}
项目:hadoop-oss
文件:AccessControlList.java
/**
* Build ACL from the given two Strings.
* The Strings contain comma separated values.
*
* @param aclString build ACL from array of Strings
*/
private void buildACL(String[] userGroupStrings) {
users = new HashSet<String>();
groups = new HashSet<String>();
for (String aclPart : userGroupStrings) {
if (aclPart != null && isWildCardACLValue(aclPart)) {
allAllowed = true;
break;
}
}
if (!allAllowed) {
if (userGroupStrings.length >= 1 && userGroupStrings[0] != null) {
users = StringUtils.getTrimmedStringCollection(userGroupStrings[0]);
}
if (userGroupStrings.length == 2 && userGroupStrings[1] != null) {
groups = StringUtils.getTrimmedStringCollection(userGroupStrings[1]);
groupsMapping.cacheGroupsAdd(new LinkedList<String>(groups));
}
}
}
项目:aliyun-maxcompute-data-collectors
文件:CreateHiveTableTool.java
@Override
/** {@inheritDoc} */
public int run(SqoopOptions options) {
if (!init(options)) {
return 1;
}
try {
HiveImport hiveImport = new HiveImport(options, manager,
options.getConf(), false);
hiveImport.importTable(options.getTableName(),
options.getHiveTableName(), true);
} catch (IOException ioe) {
LOG.error("Encountered IOException running create table job: "
+ StringUtils.stringifyException(ioe));
if (System.getProperty(Sqoop.SQOOP_RETHROW_PROPERTY) != null) {
throw new RuntimeException(ioe);
} else {
return 1;
}
} finally {
destroy(options);
}
return 0;
}
项目:aliyun-maxcompute-data-collectors
文件:HsqldbMetaStore.java
/**
* Blocks the current thread until the server is shut down.
*/
public void waitForServer() {
while (true) {
int curState = server.getState();
if (curState == ServerConstants.SERVER_STATE_SHUTDOWN) {
LOG.info("Got shutdown notification");
break;
}
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
LOG.info("Interrupted while blocking for server:"
+ StringUtils.stringifyException(ie));
}
}
}
项目:hadoop
文件:CacheAdmin.java
@Override
public int run(Configuration conf, List<String> args) throws IOException {
String name = StringUtils.popFirstNonOption(args);
if (name == null) {
System.err.println("You must specify a name when deleting a " +
"cache pool.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("Usage is " + getShortUsage());
return 1;
}
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
dfs.removeCachePool(name);
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
System.out.println("Successfully removed cache pool " + name + ".");
return 0;
}
项目:hadoop
文件:SharedCacheManager.java
public static void main(String[] args) {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(SharedCacheManager.class, args, LOG);
try {
Configuration conf = new YarnConfiguration();
SharedCacheManager sharedCacheManager = new SharedCacheManager();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(sharedCacheManager),
SHUTDOWN_HOOK_PRIORITY);
sharedCacheManager.init(conf);
sharedCacheManager.start();
} catch (Throwable t) {
LOG.fatal("Error starting SharedCacheManager", t);
System.exit(-1);
}
}
项目:hadoop
文件:TestFileSystemAccessService.java
@Test
@TestDir
public void serviceHadoopConf() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO");
server.destroy();
}
项目:hadoop
文件:AccessControlList.java
/**
* Build ACL from the given two Strings.
* The Strings contain comma separated values.
*
* @param aclString build ACL from array of Strings
*/
private void buildACL(String[] userGroupStrings) {
users = new HashSet<String>();
groups = new HashSet<String>();
for (String aclPart : userGroupStrings) {
if (aclPart != null && isWildCardACLValue(aclPart)) {
allAllowed = true;
break;
}
}
if (!allAllowed) {
if (userGroupStrings.length >= 1 && userGroupStrings[0] != null) {
users = StringUtils.getTrimmedStringCollection(userGroupStrings[0]);
}
if (userGroupStrings.length == 2 && userGroupStrings[1] != null) {
groups = StringUtils.getTrimmedStringCollection(userGroupStrings[1]);
groupsMapping.cacheGroupsAdd(new LinkedList<String>(groups));
}
}
}
项目:hadoop
文件:Application.java
/**
* Abort the application and wait for it to finish.
* @param t the exception that signalled the problem
* @throws IOException A wrapper around the exception that was passed in
*/
void abort(Throwable t) throws IOException {
LOG.info("Aborting because of " + StringUtils.stringifyException(t));
try {
downlink.abort();
downlink.flush();
} catch (IOException e) {
// IGNORE cleanup problems
}
try {
handler.waitForFinish();
} catch (Throwable ignored) {
process.destroy();
}
IOException wrapper = new IOException("pipe child exception");
wrapper.initCause(t);
throw wrapper;
}
项目:aliyun-maxcompute-data-collectors
文件:MSSQLTestUtils.java
private Connection getConnection() {
if (conn == null) {
try {
Connection con = DriverManager.getConnection(HOST_URL,
DATABASE_USER, DATABASE_PASSWORD);
conn = con;
return con;
} catch (SQLException e) {
LOG.error("Get SQLException during setting up connection: " + StringUtils.stringifyException(e));
return null;
}
}
return conn;
}
项目:hadoop
文件:TestMRApps.java
@Test (timeout = 120000)
public void testSetClasspathWithJobClassloader() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
Map<String, String> env = new HashMap<String, String>();
MRApps.setClasspath(env, conf);
String cp = env.get("CLASSPATH");
String appCp = env.get("APP_CLASSPATH");
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the"
+ " classpath!", cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"));
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",
cp.contains("PWD"));
String expectedAppClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
Arrays.asList(ApplicationConstants.Environment.PWD.$$(), "job.jar/job.jar",
"job.jar/classes/", "job.jar/lib/*",
ApplicationConstants.Environment.PWD.$$() + "/*"));
assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app"
+ " classpath!", expectedAppClasspath, appCp);
}
项目:hadoop
文件:HamletGen.java
void genNewElementMethod(String className, Method method, int indent) {
String methodName = method.getName();
String retName = method.getReturnType().getSimpleName();
Class<?>[] params = method.getParameterTypes();
echo(indent, "\n",
"@Override\n",
"public ", retName, "<", className, topMode ? "> " : "<T>> ",
methodName, "(");
if (params.length == 0) {
puts(0, ") {");
puts(indent,
topMode ? "" : " closeAttrs();\n",
" return ", StringUtils.toLowerCase(retName), "_" + "(this, ",
isInline(className, retName), ");\n", "}");
} else if (params.length == 1) {
puts(0, "String selector) {");
puts(indent,
" return setSelector(", methodName, "(), selector);\n", "}");
} else {
throwUnhandled(className, method);
}
}
项目:hadoop
文件:HistoryViewer.java
private void printJobDetails() {
StringBuffer jobDetails = new StringBuffer();
jobDetails.append("\nHadoop job: " ).append(job.getJobId());
jobDetails.append("\n=====================================");
jobDetails.append("\nUser: ").append(job.getUsername());
jobDetails.append("\nJobName: ").append(job.getJobname());
jobDetails.append("\nJobConf: ").append(job.getJobConfPath());
jobDetails.append("\nSubmitted At: ").append(StringUtils.
getFormattedTimeWithDiff(dateFormat,
job.getSubmitTime(), 0));
jobDetails.append("\nLaunched At: ").append(StringUtils.
getFormattedTimeWithDiff(dateFormat,
job.getLaunchTime(),
job.getSubmitTime()));
jobDetails.append("\nFinished At: ").append(StringUtils.
getFormattedTimeWithDiff(dateFormat,
job.getFinishTime(),
job.getLaunchTime()));
jobDetails.append("\nStatus: ").append(((job.getJobStatus() == null) ?
"Incomplete" :job.getJobStatus()));
printCounters(jobDetails, job.getTotalCounters(), job.getMapCounters(),
job.getReduceCounters());
jobDetails.append("\n");
jobDetails.append("\n=====================================");
System.out.println(jobDetails.toString());
}
项目:ditb
文件:LruBlockCache.java
public void logStats() {
// Log size
long totalSize = heapSize();
long freeSize = maxSize - totalSize;
LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " +
"freeSize=" + StringUtils.byteDesc(freeSize) + ", " +
"max=" + StringUtils.byteDesc(this.maxSize) + ", " +
"blockCount=" + getBlockCount() + ", " +
"accesses=" + stats.getRequestCount() + ", " +
"hits=" + stats.getHitCount() + ", " +
"hitRatio=" + (stats.getHitCount() == 0 ?
"0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " +
"cachingAccesses=" + stats.getRequestCachingCount() + ", " +
"cachingHits=" + stats.getHitCachingCount() + ", " +
"cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ?
"0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) +
"evictions=" + stats.getEvictionCount() + ", " +
"evicted=" + stats.getEvictedCount() + ", " +
"evictedPerRun=" + stats.evictedPerEviction());
}
项目:hadoop
文件:ShortCircuitCache.java
/**
* Trim the eviction lists.
*/
private void trimEvictionMaps() {
long now = Time.monotonicNow();
demoteOldEvictableMmaped(now);
while (true) {
long evictableSize = evictable.size();
long evictableMmappedSize = evictableMmapped.size();
if (evictableSize + evictableMmappedSize <= maxTotalSize) {
return;
}
ShortCircuitReplica replica;
if (evictableSize == 0) {
replica = evictableMmapped.firstEntry().getValue();
} else {
replica = evictable.firstEntry().getValue();
}
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": trimEvictionMaps is purging " + replica +
StringUtils.getStackTrace(Thread.currentThread()));
}
purge(replica);
}
}
项目:ditb
文件:HStore.java
/**
* Similar to commit, but called in secondary region replicas for replaying the flush cache from
* primary region. Adds the new files to the store, and drops the snapshot depending on
* dropMemstoreSnapshot argument.
*
* @param fileNames names of the flushed files
* @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot
* @throws IOException
*/
@Override public void replayFlush(List<String> fileNames, boolean dropMemstoreSnapshot)
throws IOException {
List<StoreFile> storeFiles = new ArrayList<StoreFile>(fileNames.size());
for (String file : fileNames) {
// open the file as a store file (hfile link, etc)
StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file);
StoreFile storeFile = createStoreFileAndReader(storeFileInfo);
storeFiles.add(storeFile);
HStore.this.storeSize += storeFile.getReader().length();
HStore.this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
if (LOG.isInfoEnabled()) {
LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() + " added " + storeFile
+ ", entries=" + storeFile.getReader().getEntries() + ", sequenceid=" + +storeFile
.getReader().getSequenceID() + ", filesize=" + StringUtils
.humanReadableInt(storeFile.getReader().length()));
}
}
long snapshotId = -1; // -1 means do not drop
if (dropMemstoreSnapshot && snapshot != null) {
snapshotId = snapshot.getId();
}
HStore.this.updateStorefiles(storeFiles, snapshotId);
}
项目:hadoop
文件:Command.java
/**
* Display an exception prefaced with the command name. Also increments
* the error count for the command which will result in a non-zero exit
* code.
* @param e exception to display
*/
public void displayError(Exception e) {
// build up a list of exceptions that occurred
exceptions.add(e);
String errorMessage = e.getLocalizedMessage();
if (errorMessage == null) {
// this is an unexpected condition, so dump the whole exception since
// it's probably a nasty internal error where the backtrace would be
// useful
errorMessage = StringUtils.stringifyException(e);
LOG.debug(errorMessage);
} else {
errorMessage = errorMessage.split("\n", 2)[0];
}
displayError(errorMessage);
}
项目:aliyun-maxcompute-data-collectors
文件:OracleExportTest.java
@Before
public void setUp() {
super.setUp();
SqoopOptions options = new SqoopOptions(OracleUtils.CONNECT_STRING,
getTableName());
OracleUtils.setOracleAuth(options);
this.manager = new OracleManager(options);
try {
this.conn = manager.getConnection();
this.conn.setAutoCommit(false);
} catch (SQLException sqlE) {
LOG.error(StringUtils.stringifyException(sqlE));
fail("Failed with sql exception in setup: " + sqlE);
}
}
项目:hadoop
文件:QuorumJournalManager.java
private static List<InetSocketAddress> getLoggerAddresses(URI uri)
throws IOException {
String authority = uri.getAuthority();
Preconditions.checkArgument(authority != null && !authority.isEmpty(),
"URI has no authority: " + uri);
String[] parts = StringUtils.split(authority, ';');
for (int i = 0; i < parts.length; i++) {
parts[i] = parts[i].trim();
}
if (parts.length % 2 == 0) {
LOG.warn("Quorum journal URI '" + uri + "' has an even number " +
"of Journal Nodes specified. This is not recommended!");
}
List<InetSocketAddress> addrs = Lists.newArrayList();
for (String addr : parts) {
addrs.add(NetUtils.createSocketAddr(
addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT));
}
return addrs;
}
项目:hadoop-oss
文件:TimedOutTestsListener.java
static String buildThreadDump() {
StringBuilder dump = new StringBuilder();
Map<Thread, StackTraceElement[]> stackTraces = Thread.getAllStackTraces();
for (Map.Entry<Thread, StackTraceElement[]> e : stackTraces.entrySet()) {
Thread thread = e.getKey();
dump.append(String.format(
"\"%s\" %s prio=%d tid=%d %s\njava.lang.Thread.State: %s",
thread.getName(),
(thread.isDaemon() ? "daemon" : ""),
thread.getPriority(),
thread.getId(),
Thread.State.WAITING.equals(thread.getState()) ?
"in Object.wait()" :
StringUtils.toLowerCase(thread.getState().name()),
Thread.State.WAITING.equals(thread.getState()) ?
"WAITING (on object monitor)" : thread.getState()));
for (StackTraceElement stackTraceElement : e.getValue()) {
dump.append("\n at ");
dump.append(stackTraceElement);
}
dump.append("\n");
}
return dump.toString();
}
项目:hadoop
文件:TestFileSystemAccessService.java
@Test
@TestException(exception = FileSystemAccessException.class, msgRegExp = "H05.*")
@TestDir
public void NameNodeNotinWhitelists() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.name.node.whitelist", "NN");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NNx");
}
项目:hadoop-oss
文件:TestUTF8.java
/**
* Test encoding and decoding of UTF8 outside the basic multilingual plane.
*
* This is a regression test for HADOOP-9103.
*/
@Test
public void testNonBasicMultilingualPlane() throws Exception {
// Test using the "CAT FACE" character (U+1F431)
// See http://www.fileformat.info/info/unicode/char/1f431/index.htm
String catFace = "\uD83D\uDC31";
// This encodes to 4 bytes in UTF-8:
byte[] encoded = catFace.getBytes("UTF-8");
assertEquals(4, encoded.length);
assertEquals("f09f90b1", StringUtils.byteToHexString(encoded));
// Decode back to String using our own decoder
String roundTrip = UTF8.fromBytes(encoded);
assertEquals(catFace, roundTrip);
}
项目:hadoop
文件:LeaseRenewer.java
private LeaseRenewer(Factory.Key factorykey) {
this.factorykey = factorykey;
unsyncSetGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
if (LOG.isTraceEnabled()) {
instantiationTrace = StringUtils.stringifyException(
new Throwable("TRACE"));
} else {
instantiationTrace = null;
}
}
项目:hadoop
文件:TestEditLog.java
/**
* Construct FSEditLog with default configuration, taking editDirs from NNStorage
*
* @param storage Storage object used by namenode
*/
private static FSEditLog getFSEditLog(NNStorage storage) throws IOException {
Configuration conf = new Configuration();
// Make sure the edits dirs are set in the provided configuration object.
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
StringUtils.join(",", storage.getEditsDirectories()));
FSEditLog log = new FSEditLog(conf, storage, FSNamesystem.getNamespaceEditsDirs(conf));
return log;
}
项目:hadoop
文件:TestParam.java
@Test
public void testConcatSourcesParam() {
final String[] strings = {"/", "/foo", "/bar"};
for(int n = 0; n < strings.length; n++) {
final String[] sub = new String[n];
final Path[] paths = new Path[n];
for(int i = 0; i < paths.length; i++) {
paths[i] = new Path(sub[i] = strings[i]);
}
final String expected = StringUtils.join(",", Arrays.asList(sub));
final ConcatSourcesParam computed = new ConcatSourcesParam(paths);
Assert.assertEquals(expected, computed.getValue());
}
}
项目:hadoop
文件:Dispatcher.java
/**
* Read set of host names from a file
*
* @return set of host names
*/
static Set<String> getHostListFromFile(String fileName, String type) {
Set<String> nodes = new HashSet<String>();
try {
HostsFileReader.readFileToSet(type, fileName, nodes);
return StringUtils.getTrimmedStrings(nodes);
} catch (IOException e) {
throw new IllegalArgumentException(
"Failed to read host list from file: " + fileName);
}
}
项目:hadoop
文件:XAttrHelper.java
/**
* Get name with prefix from <code>XAttr</code>
*/
public static String getPrefixName(XAttr xAttr) {
if (xAttr == null) {
return null;
}
String namespace = xAttr.getNameSpace().toString();
return StringUtils.toLowerCase(namespace) + "." + xAttr.getName();
}
项目:hadoop
文件:TestShortCircuitLocalRead.java
static private void checkData(byte[] actual, int from, byte[] expected, int len,
String message) {
for (int idx = 0; idx < len; idx++) {
if (expected[from + idx] != actual[idx]) {
Assert.fail(message + " byte " + (from + idx) + " differs. expected "
+ expected[from + idx] + " actual " + actual[idx] +
"\nexpected: " + StringUtils.byteToHexString(expected, from, from + len) +
"\nactual: " + StringUtils.byteToHexString(actual, 0, len));
}
}
}
项目:hadoop-oss
文件:SecurityUtil.java
/**
* Construct the service key for a token
* @param addr InetSocketAddress of remote connection with a token
* @return "ip:port" or "host:port" depending on the value of
* hadoop.security.token.service.use_ip
*/
public static Text buildTokenService(InetSocketAddress addr) {
String host = null;
if (useIpForTokenService) {
if (addr.isUnresolved()) { // host has no ip address
throw new IllegalArgumentException(
new UnknownHostException(addr.getHostName())
);
}
host = addr.getAddress().getHostAddress();
} else {
host = StringUtils.toLowerCase(addr.getHostName());
}
return new Text(host + ":" + addr.getPort());
}
项目:hadoop-oss
文件:SecurityUtil.java
public static AuthenticationMethod getAuthenticationMethod(Configuration conf) {
String value = conf.get(HADOOP_SECURITY_AUTHENTICATION, "simple");
try {
return Enum.valueOf(AuthenticationMethod.class,
StringUtils.toUpperCase(value));
} catch (IllegalArgumentException iae) {
throw new IllegalArgumentException("Invalid attribute value for " +
HADOOP_SECURITY_AUTHENTICATION + " of " + value);
}
}
项目:hadoop-oss
文件:SecurityUtil.java
public static void setAuthenticationMethod(
AuthenticationMethod authenticationMethod, Configuration conf) {
if (authenticationMethod == null) {
authenticationMethod = AuthenticationMethod.SIMPLE;
}
conf.set(HADOOP_SECURITY_AUTHENTICATION,
StringUtils.toLowerCase(authenticationMethod.toString()));
}
项目:hadoop-oss
文件:ActiveStandbyElector.java
/**
* If there is a breadcrumb node indicating that another node may need
* fencing, try to fence that node.
* @return the Stat of the breadcrumb node that was read, or null
* if no breadcrumb node existed
*/
private Stat fenceOldActive() throws InterruptedException, KeeperException {
final Stat stat = new Stat();
byte[] data;
LOG.info("Checking for any old active which needs to be fenced...");
try {
data = zkDoWithRetries(new ZKAction<byte[]>() {
@Override
public byte[] run() throws KeeperException, InterruptedException {
return zkClient.getData(zkBreadCrumbPath, false, stat);
}
});
} catch (KeeperException ke) {
if (isNodeDoesNotExist(ke.code())) {
LOG.info("No old node to fence");
return null;
}
// If we failed to read for any other reason, then likely we lost
// our session, or we don't have permissions, etc. In any case,
// we probably shouldn't become active, and failing the whole
// thing is the best bet.
throw ke;
}
LOG.info("Old node exists: " + StringUtils.byteToHexString(data));
if (Arrays.equals(data, appData)) {
LOG.info("But old node has our own data, so don't need to fence it.");
} else {
appClient.fenceOldActive(data);
}
return stat;
}
项目:hadoop
文件:TestFileSystemAccessService.java
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H09.*")
@TestDir
public void invalidSecurity() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "foo");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
项目:hadoop-oss
文件:MetricsSystemImpl.java
private InitMode initMode() {
LOG.debug("from system property: "+ System.getProperty(MS_INIT_MODE_KEY));
LOG.debug("from environment variable: "+ System.getenv(MS_INIT_MODE_KEY));
String m = System.getProperty(MS_INIT_MODE_KEY);
String m2 = m == null ? System.getenv(MS_INIT_MODE_KEY) : m;
return InitMode.valueOf(
StringUtils.toUpperCase((m2 == null ? InitMode.NORMAL.name() : m2)));
}
项目:hadoop
文件:DistCpUtils.java
/**
* Converts a FileStatus to a CopyListingFileStatus. If preserving ACLs,
* populates the CopyListingFileStatus with the ACLs. If preserving XAttrs,
* populates the CopyListingFileStatus with the XAttrs.
*
* @param fileSystem FileSystem containing the file
* @param fileStatus FileStatus of file
* @param preserveAcls boolean true if preserving ACLs
* @param preserveXAttrs boolean true if preserving XAttrs
* @param preserveRawXAttrs boolean true if preserving raw.* XAttrs
* @throws IOException if there is an I/O error
*/
public static CopyListingFileStatus toCopyListingFileStatus(
FileSystem fileSystem, FileStatus fileStatus, boolean preserveAcls,
boolean preserveXAttrs, boolean preserveRawXAttrs) throws IOException {
CopyListingFileStatus copyListingFileStatus =
new CopyListingFileStatus(fileStatus);
if (preserveAcls) {
FsPermission perm = fileStatus.getPermission();
if (perm.getAclBit()) {
List<AclEntry> aclEntries = fileSystem.getAclStatus(
fileStatus.getPath()).getEntries();
copyListingFileStatus.setAclEntries(aclEntries);
}
}
if (preserveXAttrs || preserveRawXAttrs) {
Map<String, byte[]> srcXAttrs = fileSystem.getXAttrs(fileStatus.getPath());
if (preserveXAttrs && preserveRawXAttrs) {
copyListingFileStatus.setXAttrs(srcXAttrs);
} else {
Map<String, byte[]> trgXAttrs = Maps.newHashMap();
final String rawNS =
StringUtils.toLowerCase(XAttr.NameSpace.RAW.name());
for (Map.Entry<String, byte[]> ent : srcXAttrs.entrySet()) {
final String xattrName = ent.getKey();
if (xattrName.startsWith(rawNS)) {
if (preserveRawXAttrs) {
trgXAttrs.put(xattrName, ent.getValue());
}
} else if (preserveXAttrs) {
trgXAttrs.put(xattrName, ent.getValue());
}
}
copyListingFileStatus.setXAttrs(trgXAttrs);
}
}
return copyListingFileStatus;
}
项目:hadoop
文件:SliveReducer.java
@Override // Reducer
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
OperationOutput collector = null;
int reduceAm = 0;
int errorAm = 0;
logAndSetStatus(reporter, "Iterating over reduction values for key " + key);
while (values.hasNext()) {
Text value = values.next();
try {
OperationOutput val = new OperationOutput(key, value);
if (collector == null) {
collector = val;
} else {
collector = OperationOutput.merge(collector, val);
}
LOG.info("Combined " + val + " into/with " + collector);
++reduceAm;
} catch (Exception e) {
++errorAm;
logAndSetStatus(reporter, "Error iterating over reduction input "
+ value + " due to : " + StringUtils.stringifyException(e));
if (getConfig().shouldExitOnFirstError()) {
break;
}
}
}
logAndSetStatus(reporter, "Reduced " + reduceAm + " values with " + errorAm
+ " errors");
if (collector != null) {
logAndSetStatus(reporter, "Writing output " + collector.getKey() + " : "
+ collector.getOutputValue());
output.collect(collector.getKey(), collector.getOutputValue());
}
}
项目:hadoop-oss
文件:FileSystem.java
Key(URI uri, Configuration conf, long unique) throws IOException {
scheme = uri.getScheme()==null ?
"" : StringUtils.toLowerCase(uri.getScheme());
authority = uri.getAuthority()==null ?
"" : StringUtils.toLowerCase(uri.getAuthority());
this.unique = unique;
this.ugi = UserGroupInformation.getCurrentUser();
}
项目:ditb
文件:HBaseAdmin.java
private boolean isEncodedRegionName(byte[] regionName) throws IOException {
try {
HRegionInfo.parseRegionName(regionName);
return false;
} catch (IOException e) {
if (StringUtils.stringifyException(e)
.contains(HRegionInfo.INVALID_REGION_NAME_FORMAT_MESSAGE)) {
return true;
}
throw e;
}
}