Java 类org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix 实例源码
项目:ditb
文件:StripeStoreFileManager.java
private void debugDumpState(String string) {
if (!LOG.isDebugEnabled()) return;
StringBuilder sb = new StringBuilder();
sb.append("\n" + string + "; current stripe state is as such:");
sb.append("\n level 0 with ")
.append(state.level0Files.size())
.append(
" files: "
+ TraditionalBinaryPrefix.long2String(
StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";");
for (int i = 0; i < state.stripeFiles.size(); ++i) {
String endRow = (i == state.stripeEndRows.length)
? "(end)" : "[" + Bytes.toString(state.stripeEndRows[i]) + "]";
sb.append("\n stripe ending in ")
.append(endRow)
.append(" with ")
.append(state.stripeFiles.get(i).size())
.append(
" files: "
+ TraditionalBinaryPrefix.long2String(
StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";");
}
sb.append("\n").append(state.stripeFiles.size()).append(" stripes total.");
sb.append("\n").append(getStorefileCount()).append(" files total.");
LOG.debug(sb.toString());
}
项目:ditb
文件:MemStoreFlusher.java
/**
* @param conf
* @param server
*/
public MemStoreFlusher(final Configuration conf,
final HRegionServer server) {
super();
this.conf = conf;
this.server = server;
this.threadWakeFrequency =
conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
float globalMemStorePercent = HeapMemorySizeUtil.getGlobalMemStorePercent(conf, true);
this.globalMemStoreLimit = (long) (max * globalMemStorePercent);
this.globalMemStoreLimitLowMarkPercent =
HeapMemorySizeUtil.getGlobalMemStoreLowerMark(conf, globalMemStorePercent);
this.globalMemStoreLimitLowMark =
(long) (this.globalMemStoreLimit * this.globalMemStoreLimitLowMarkPercent);
this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime",
90000);
int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2);
this.flushHandlers = new FlushHandler[handlerCount];
LOG.info("globalMemStoreLimit="
+ TraditionalBinaryPrefix.long2String(this.globalMemStoreLimit, "", 1)
+ ", globalMemStoreLimitLowMark="
+ TraditionalBinaryPrefix.long2String(this.globalMemStoreLimitLowMark, "", 1)
+ ", maxHeap=" + TraditionalBinaryPrefix.long2String(max, "", 1));
}
项目:ditb
文件:HStore.java
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status)
throws IOException {
// Write-out finished successfully, move into the right spot
Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path, this, true);
status.setStatus("Flushing " + this + ": reopening flushed file");
StoreFile sf = createStoreFileAndReader(dstPath);
StoreFile.Reader r = sf.getReader();
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
if (LOG.isInfoEnabled()) {
LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId
+ ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1));
}
return sf;
}
项目:hbase
文件:StripeStoreFileManager.java
private void debugDumpState(String string) {
if (!LOG.isDebugEnabled()) return;
StringBuilder sb = new StringBuilder();
sb.append("\n" + string + "; current stripe state is as such:");
sb.append("\n level 0 with ")
.append(state.level0Files.size())
.append(
" files: "
+ TraditionalBinaryPrefix.long2String(
StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";");
for (int i = 0; i < state.stripeFiles.size(); ++i) {
String endRow = (i == state.stripeEndRows.length)
? "(end)" : "[" + Bytes.toString(state.stripeEndRows[i]) + "]";
sb.append("\n stripe ending in ")
.append(endRow)
.append(" with ")
.append(state.stripeFiles.get(i).size())
.append(
" files: "
+ TraditionalBinaryPrefix.long2String(
StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";");
}
sb.append("\n").append(state.stripeFiles.size()).append(" stripes total.");
sb.append("\n").append(getStorefileCount()).append(" files total.");
LOG.debug(sb.toString());
}
项目:hbase
文件:MemStoreFlusher.java
/**
* @param conf
* @param server
*/
public MemStoreFlusher(final Configuration conf,
final HRegionServer server) {
super();
this.conf = conf;
this.server = server;
this.threadWakeFrequency =
conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime",
90000);
int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2);
this.flushHandlers = new FlushHandler[handlerCount];
LOG.info("globalMemStoreLimit="
+ TraditionalBinaryPrefix
.long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), "", 1)
+ ", globalMemStoreLimitLowMark="
+ TraditionalBinaryPrefix.long2String(
this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1)
+ ", Offheap="
+ (this.server.getRegionServerAccounting().isOffheap()));
}
项目:hbase
文件:HStore.java
/**
* @param path The pathname of the tmp file into which the store was flushed
* @param logCacheFlushId
* @param status
* @return store file created.
* @throws IOException
*/
private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status)
throws IOException {
// Write-out finished successfully, move into the right spot
Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);
status.setStatus("Flushing " + this + ": reopening flushed file");
HStoreFile sf = createStoreFileAndReader(dstPath);
StoreFileReader r = sf.getReader();
this.storeSize.addAndGet(r.length());
this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());
if (LOG.isInfoEnabled()) {
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
", sequenceid=" + logCacheFlushId +
", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1));
}
return sf;
}
项目:hadoop
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:hadoop
文件:DistCpV1.java
private long parseLong(String[] args, int offset) {
if (offset == args.length) {
throw new IllegalArgumentException("<n> not specified in " + cmd);
}
long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]);
if (n <= 0) {
throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd);
}
return n;
}
项目:hadoop
文件:DistCpV1.java
/**
* Copies single file to the path specified by tmpfile.
* @param srcstat src path and metadata
* @param tmpfile temporary file to which copy is to be done
* @param absdst actual destination path to which copy is to be done
* @param reporter
* @return Number of bytes copied
*/
private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
Reporter reporter) throws IOException {
long bytesCopied = 0L;
Path srcPath = srcstat.getPath();
// open src file
try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
// open tmp file
try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
LOG.info("Copying file " + srcPath + " of size " +
srcstat.getLen() + " bytes...");
// copy file
for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
out.write(buffer, 0, bytesRead);
bytesCopied += bytesRead;
reporter.setStatus(
String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
+ absdst + " [ " +
TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
+ TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
+ " ]");
}
}
}
return bytesCopied;
}
项目:ditb
文件:HStore.java
private void removeUnneededFiles() throws IOException {
if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) return;
if (getFamily().getMinVersions() > 0) {
LOG.debug("Skipping expired store file removal due to min version being " + getFamily()
.getMinVersions());
return;
}
this.lock.readLock().lock();
Collection<StoreFile> delSfs = null;
try {
synchronized (filesCompacting) {
long cfTtl = getStoreFileTtl();
if (cfTtl != Long.MAX_VALUE) {
delSfs = storeEngine.getStoreFileManager()
.getUnneededFiles(EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting);
addToCompactingFiles(delSfs);
}
}
} finally {
this.lock.readLock().unlock();
}
if (delSfs == null || delSfs.isEmpty()) return;
Collection<StoreFile> newFiles = new ArrayList<StoreFile>(); // No new
// files.
writeCompactionWalRecord(delSfs, newFiles);
replaceStoreFiles(delSfs, newFiles);
completeCompaction(delSfs);
LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " + this
+ " of " + this.getRegionInfo().getRegionNameAsString() + "; total size for store is "
+ TraditionalBinaryPrefix.long2String(storeSize, "", 1));
}
项目:aliyun-oss-hadoop-fs
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:big-c
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:big-c
文件:DistCpV1.java
private long parseLong(String[] args, int offset) {
if (offset == args.length) {
throw new IllegalArgumentException("<n> not specified in " + cmd);
}
long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]);
if (n <= 0) {
throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd);
}
return n;
}
项目:big-c
文件:DistCpV1.java
/**
* Copies single file to the path specified by tmpfile.
* @param srcstat src path and metadata
* @param tmpfile temporary file to which copy is to be done
* @param absdst actual destination path to which copy is to be done
* @param reporter
* @return Number of bytes copied
*/
private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
Reporter reporter) throws IOException {
long bytesCopied = 0L;
Path srcPath = srcstat.getPath();
// open src file
try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
// open tmp file
try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
LOG.info("Copying file " + srcPath + " of size " +
srcstat.getLen() + " bytes...");
// copy file
for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
out.write(buffer, 0, bytesRead);
bytesCopied += bytesRead;
reporter.setStatus(
String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
+ absdst + " [ " +
TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
+ TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
+ " ]");
}
}
}
return bytesCopied;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:hadoop-plus
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:hops
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:hops
文件:DistCpV1.java
private long parseLong(String[] args, int offset) {
if (offset == args.length) {
throw new IllegalArgumentException("<n> not specified in " + cmd);
}
long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]);
if (n <= 0) {
throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd);
}
return n;
}
项目:hops
文件:DistCpV1.java
/**
* Copies single file to the path specified by tmpfile.
* @param srcstat src path and metadata
* @param tmpfile temporary file to which copy is to be done
* @param absdst actual destination path to which copy is to be done
* @param reporter
* @return Number of bytes copied
*/
private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
Reporter reporter) throws IOException {
long bytesCopied = 0L;
Path srcPath = srcstat.getPath();
// open src file
try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
// open tmp file
try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
LOG.info("Copying file " + srcPath + " of size " +
srcstat.getLen() + " bytes...");
// copy file
for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
out.write(buffer, 0, bytesRead);
bytesCopied += bytesRead;
reporter.setStatus(
String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
+ absdst + " [ " +
TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
+ TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
+ " ]");
}
}
}
return bytesCopied;
}
项目:hbase
文件:CompactionRequestImpl.java
@Override
public String toString() {
String fsList = filesToCompact.stream().filter(f -> f.getReader() != null)
.map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1))
.collect(Collectors.joining(", "));
return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" +
this.getFiles().size() + ", fileSize=" +
TraditionalBinaryPrefix.long2String(totalSize, "", 1) +
((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" +
selectionTime;
}
项目:hbase
文件:HStore.java
/**
* Log a very elaborate compaction completion message.
* @param cr Request.
* @param sfs Resulting files.
* @param compactionStartTime Start time.
*/
private void logCompactionEndMessage(
CompactionRequestImpl cr, List<HStoreFile> sfs, long now, long compactionStartTime) {
StringBuilder message = new StringBuilder(
"Completed" + (cr.isMajor() ? " major" : "") + " compaction of "
+ cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in "
+ this + " of " + this.getRegionInfo().getShortNameToLog() + " into ");
if (sfs.isEmpty()) {
message.append("none, ");
} else {
for (HStoreFile sf: sfs) {
message.append(sf.getPath().getName());
message.append("(size=");
message.append(TraditionalBinaryPrefix.long2String(sf.getReader().length(), "", 1));
message.append("), ");
}
}
message.append("total size for store is ")
.append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1))
.append(". This selection was in queue for ")
.append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime()))
.append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime))
.append(" to execute.");
LOG.info(message.toString());
if (LOG.isTraceEnabled()) {
int fileCount = storeEngine.getStoreFileManager().getStorefileCount();
long resultSize = getTotalSize(sfs);
String traceMessage = "COMPACTION start,end,size out,files in,files out,store size,"
+ "store files [" + compactionStartTime + "," + now + "," + resultSize + ","
+ cr.getFiles().size() + "," + sfs.size() + "," + storeSize + "," + fileCount + "]";
LOG.trace(traceMessage);
}
}
项目:hbase
文件:HStore.java
private void removeUnneededFiles() throws IOException {
if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) return;
if (getColumnFamilyDescriptor().getMinVersions() > 0) {
LOG.debug("Skipping expired store file removal due to min version being {}",
getColumnFamilyDescriptor().getMinVersions());
return;
}
this.lock.readLock().lock();
Collection<HStoreFile> delSfs = null;
try {
synchronized (filesCompacting) {
long cfTtl = getStoreFileTtl();
if (cfTtl != Long.MAX_VALUE) {
delSfs = storeEngine.getStoreFileManager().getUnneededFiles(
EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting);
addToCompactingFiles(delSfs);
}
}
} finally {
this.lock.readLock().unlock();
}
if (CollectionUtils.isEmpty(delSfs)) {
return;
}
Collection<HStoreFile> newFiles = Collections.emptyList(); // No new files.
writeCompactionWalRecord(delSfs, newFiles);
replaceStoreFiles(delSfs, newFiles);
completeCompaction(delSfs);
LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in "
+ this + " of " + this.getRegionInfo().getRegionNameAsString()
+ "; total size for store is "
+ TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1));
}
项目:hbase
文件:HStore.java
/**
* Similar to commit, but called in secondary region replicas for replaying the
* flush cache from primary region. Adds the new files to the store, and drops the
* snapshot depending on dropMemstoreSnapshot argument.
* @param fileNames names of the flushed files
* @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot
* @throws IOException
*/
@Override
public void replayFlush(List<String> fileNames, boolean dropMemstoreSnapshot)
throws IOException {
List<HStoreFile> storeFiles = new ArrayList<>(fileNames.size());
for (String file : fileNames) {
// open the file as a store file (hfile link, etc)
StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file);
HStoreFile storeFile = createStoreFileAndReader(storeFileInfo);
storeFiles.add(storeFile);
HStore.this.storeSize.addAndGet(storeFile.getReader().length());
HStore.this.totalUncompressedBytes
.addAndGet(storeFile.getReader().getTotalUncompressedBytes());
if (LOG.isInfoEnabled()) {
LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() +
" added " + storeFile + ", entries=" + storeFile.getReader().getEntries() +
", sequenceid=" + +storeFile.getReader().getSequenceID() + ", filesize="
+ TraditionalBinaryPrefix.long2String(storeFile.getReader().length(), "", 1));
}
}
long snapshotId = -1; // -1 means do not drop
if (dropMemstoreSnapshot && snapshot != null) {
snapshotId = snapshot.getId();
snapshot.close();
}
HStore.this.updateStorefiles(storeFiles, snapshotId);
}
项目:hadoop-TCP
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:hardfs
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:hadoop-on-lustre2
文件:ContainersMonitorImpl.java
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
项目:hadoop
文件:DistCpV1.java
static String bytesString(long b) {
return b + " bytes (" +
TraditionalBinaryPrefix.long2String(b, "", 1) + ")";
}
项目:ditb
文件:Compactor.java
/**
* Extracts some details about the files to compact that are commonly needed by compactors.
*
* @param filesToCompact Files.
* @param allFiles Whether all files are included for compaction
* @return The result.
*/
protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles)
throws IOException {
FileDetails fd = new FileDetails();
long oldestHFileTimeStampToKeepMVCC =
System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
for (StoreFile file : filesToCompact) {
if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
// when isAllFiles is true, all files are compacted so we can calculate the smallest
// MVCC value to keep
if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
fd.minSeqIdToKeep = file.getMaxMemstoreTS();
}
}
long seqNum = file.getMaxSequenceId();
fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
StoreFile.Reader r = file.getReader();
if (r == null) {
LOG.warn("Null reader for " + file.getPath());
continue;
}
// NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
// blooms can cause progress to be miscalculated or if the user switches bloom
// type (e.g. from ROW to ROWCOL)
long keyCount = r.getEntries();
fd.maxKeyCount += keyCount;
// calculate the latest MVCC readpoint in any of the involved store files
Map<byte[], byte[]> fileInfo = r.loadFileInfo();
byte tmp[] = null;
// Get and set the real MVCCReadpoint for bulk loaded files, which is the
// SeqId number.
if (r.isBulkLoaded()) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
} else {
tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
if (tmp != null) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
}
}
tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
if (tmp != null) {
fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
}
// If required, calculate the earliest put timestamp of all involved storefiles.
// This is used to remove family delete marker during compaction.
long earliestPutTs = 0;
if (allFiles) {
tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
if (tmp == null) {
// There's a file with no information, must be an old one
// assume we have very old puts
fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
} else {
earliestPutTs = Bytes.toLong(tmp);
fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r
.getBloomFilterType().toString() + ", size=" + TraditionalBinaryPrefix
.long2String(r.length(), "", 1) + ", encoding=" + r.getHFileReader()
.getDataBlockEncoding() + ", seqNum=" + seqNum + (allFiles ?
", earliestPutTs=" + earliestPutTs :
""));
}
}
return fd;
}
项目:ditb
文件:MemStoreFlusher.java
/**
* Check if the regionserver's memstore memory usage is greater than the
* limit. If so, flush regions with the biggest memstores until we're down
* to the lower limit. This method blocks callers until we're down to a safe
* amount of memstore consumption.
*/
public void reclaimMemStoreMemory() {
TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
if (isAboveHighWaterMark()) {
if (Trace.isTracing()) {
scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
}
long start = EnvironmentEdgeManager.currentTime();
synchronized (this.blockSignal) {
boolean blocked = false;
long startTime = 0;
boolean interrupted = false;
try {
while (isAboveHighWaterMark() && !server.isStopped()) {
if (!blocked) {
startTime = EnvironmentEdgeManager.currentTime();
LOG.info("Blocking updates on "
+ server.toString()
+ ": the global memstore size "
+ TraditionalBinaryPrefix.long2String(server.getRegionServerAccounting()
.getGlobalMemstoreSize(), "", 1) + " is >= than blocking "
+ TraditionalBinaryPrefix.long2String(globalMemStoreLimit, "", 1) + " size");
}
blocked = true;
wakeupFlushThread();
try {
// we should be able to wait forever, but we've seen a bug where
// we miss a notify, so put a 5 second bound on it at least.
blockSignal.wait(5 * 1000);
} catch (InterruptedException ie) {
LOG.warn("Interrupted while waiting");
interrupted = true;
}
long took = EnvironmentEdgeManager.currentTime() - start;
LOG.warn("Memstore is above high water mark and block " + took + "ms");
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
if(blocked){
final long totalTime = EnvironmentEdgeManager.currentTime() - startTime;
if(totalTime > 0){
this.updatesBlockedMsHighWater.add(totalTime);
}
LOG.info("Unblocking updates for server " + server.toString());
}
}
} else if (isAboveLowWaterMark()) {
wakeupFlushThread();
}
scope.close();
}
项目:big-c
文件:DistCpV1.java
static String bytesString(long b) {
return b + " bytes (" +
TraditionalBinaryPrefix.long2String(b, "", 1) + ")";
}
项目:hadoop-plus
文件:ContainersMonitorImpl.java
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.monitoringInterval =
conf.getLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_MON_INTERVAL_MS);
Class<? extends ResourceCalculatorPlugin> clazz =
conf.getClass(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, null,
ResourceCalculatorPlugin.class);
this.resourceCalculatorPlugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
LOG.info(" Using ResourceCalculatorPlugin : "
+ this.resourceCalculatorPlugin);
processTreeClass = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null,
ResourceCalculatorProcessTree.class);
this.conf = conf;
LOG.info(" Using ResourceCalculatorProcessTree : "
+ this.processTreeClass);
long configuredPMemForContainers = conf.getLong(
YarnConfiguration.NM_PMEM_MB,
YarnConfiguration.DEFAULT_NM_PMEM_MB) * 1024 * 1024l;
// Setting these irrespective of whether checks are enabled. Required in
// the UI.
// ///////// Physical memory configuration //////
this.maxPmemAllottedForContainers = configuredPMemForContainers;
// ///////// Virtual memory configuration //////
float vmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
Preconditions.checkArgument(vmemRatio > 0.99f,
YarnConfiguration.NM_VMEM_PMEM_RATIO + " should be at least 1.0");
this.maxVmemAllottedForContainers =
(long) (vmemRatio * configuredPMemForContainers);
pmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
vmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);
if (pmemCheckEnabled) {
// Logging if actual pmem cannot be determined.
long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
if (this.resourceCalculatorPlugin != null) {
totalPhysicalMemoryOnNM = this.resourceCalculatorPlugin
.getPhysicalMemorySize();
if (totalPhysicalMemoryOnNM <= 0) {
LOG.warn("NodeManager's totalPmem could not be calculated. "
+ "Setting it to " + UNKNOWN_MEMORY_LIMIT);
totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
}
}
if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT &&
this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) {
LOG.warn("NodeManager configured with "
+ TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers,
"", 1)
+ " physical memory allocated to containers, which is more than "
+ "80% of the total physical memory available ("
+ TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "",
1) + "). Thrashing might happen.");
}
}
super.serviceInit(conf);
}
项目:hops
文件:DistCpV1.java
static String bytesString(long b) {
return b + " bytes (" +
TraditionalBinaryPrefix.long2String(b, "", 1) + ")";
}
项目:hbase
文件:Compactor.java
/**
* Extracts some details about the files to compact that are commonly needed by compactors.
* @param filesToCompact Files.
* @param allFiles Whether all files are included for compaction
* @return The result.
*/
private FileDetails getFileDetails(
Collection<HStoreFile> filesToCompact, boolean allFiles) throws IOException {
FileDetails fd = new FileDetails();
long oldestHFileTimeStampToKeepMVCC = System.currentTimeMillis() -
(1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
for (HStoreFile file : filesToCompact) {
if(allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
// when isAllFiles is true, all files are compacted so we can calculate the smallest
// MVCC value to keep
if(fd.minSeqIdToKeep < file.getMaxMemStoreTS()) {
fd.minSeqIdToKeep = file.getMaxMemStoreTS();
}
}
long seqNum = file.getMaxSequenceId();
fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
StoreFileReader r = file.getReader();
if (r == null) {
LOG.warn("Null reader for " + file.getPath());
continue;
}
// NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
// blooms can cause progress to be miscalculated or if the user switches bloom
// type (e.g. from ROW to ROWCOL)
long keyCount = r.getEntries();
fd.maxKeyCount += keyCount;
// calculate the latest MVCC readpoint in any of the involved store files
Map<byte[], byte[]> fileInfo = r.loadFileInfo();
byte[] tmp = null;
// Get and set the real MVCCReadpoint for bulk loaded files, which is the
// SeqId number.
if (r.isBulkLoaded()) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
}
else {
tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
if (tmp != null) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
}
}
tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
if (tmp != null) {
fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
}
// If required, calculate the earliest put timestamp of all involved storefiles.
// This is used to remove family delete marker during compaction.
long earliestPutTs = 0;
if (allFiles) {
tmp = fileInfo.get(EARLIEST_PUT_TS);
if (tmp == null) {
// There's a file with no information, must be an old one
// assume we have very old puts
fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
} else {
earliestPutTs = Bytes.toLong(tmp);
fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
}
}
tmp = fileInfo.get(TIMERANGE_KEY);
fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax();
if (LOG.isDebugEnabled()) {
LOG.debug("Compacting " + file +
", keycount=" + keyCount +
", bloomtype=" + r.getBloomFilterType().toString() +
", size=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1) +
", encoding=" + r.getHFileReader().getDataBlockEncoding() +
", seqNum=" + seqNum +
(allFiles ? ", earliestPutTs=" + earliestPutTs: ""));
}
}
return fd;
}
项目:hbase
文件:MemStoreFlusher.java
private void logMsg(String string1, long val, long max) {
LOG.info("Blocking updates on " + server.toString() + ": " + string1 + " "
+ TraditionalBinaryPrefix.long2String(val, "", 1) + " is >= than blocking "
+ TraditionalBinaryPrefix.long2String(max, "", 1) + " size");
}
项目:hadoop-TCP
文件:ContainersMonitorImpl.java
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.monitoringInterval =
conf.getLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_MON_INTERVAL_MS);
Class<? extends ResourceCalculatorPlugin> clazz =
conf.getClass(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, null,
ResourceCalculatorPlugin.class);
this.resourceCalculatorPlugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
LOG.info(" Using ResourceCalculatorPlugin : "
+ this.resourceCalculatorPlugin);
processTreeClass = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null,
ResourceCalculatorProcessTree.class);
this.conf = conf;
LOG.info(" Using ResourceCalculatorProcessTree : "
+ this.processTreeClass);
long configuredPMemForContainers = conf.getLong(
YarnConfiguration.NM_PMEM_MB,
YarnConfiguration.DEFAULT_NM_PMEM_MB) * 1024 * 1024l;
// Setting these irrespective of whether checks are enabled. Required in
// the UI.
// ///////// Physical memory configuration //////
this.maxPmemAllottedForContainers = configuredPMemForContainers;
// ///////// Virtual memory configuration //////
float vmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
Preconditions.checkArgument(vmemRatio > 0.99f,
YarnConfiguration.NM_VMEM_PMEM_RATIO + " should be at least 1.0");
this.maxVmemAllottedForContainers =
(long) (vmemRatio * configuredPMemForContainers);
pmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
vmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);
if (pmemCheckEnabled) {
// Logging if actual pmem cannot be determined.
long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
if (this.resourceCalculatorPlugin != null) {
totalPhysicalMemoryOnNM = this.resourceCalculatorPlugin
.getPhysicalMemorySize();
if (totalPhysicalMemoryOnNM <= 0) {
LOG.warn("NodeManager's totalPmem could not be calculated. "
+ "Setting it to " + UNKNOWN_MEMORY_LIMIT);
totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
}
}
if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT &&
this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) {
LOG.warn("NodeManager configured with "
+ TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers,
"", 1)
+ " physical memory allocated to containers, which is more than "
+ "80% of the total physical memory available ("
+ TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "",
1) + "). Thrashing might happen.");
}
}
super.serviceInit(conf);
}
项目:hardfs
文件:ContainersMonitorImpl.java
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.monitoringInterval =
conf.getLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_MON_INTERVAL_MS);
Class<? extends ResourceCalculatorPlugin> clazz =
conf.getClass(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, null,
ResourceCalculatorPlugin.class);
this.resourceCalculatorPlugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
LOG.info(" Using ResourceCalculatorPlugin : "
+ this.resourceCalculatorPlugin);
processTreeClass = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null,
ResourceCalculatorProcessTree.class);
this.conf = conf;
LOG.info(" Using ResourceCalculatorProcessTree : "
+ this.processTreeClass);
long configuredPMemForContainers = conf.getLong(
YarnConfiguration.NM_PMEM_MB,
YarnConfiguration.DEFAULT_NM_PMEM_MB) * 1024 * 1024l;
// Setting these irrespective of whether checks are enabled. Required in
// the UI.
// ///////// Physical memory configuration //////
this.maxPmemAllottedForContainers = configuredPMemForContainers;
// ///////// Virtual memory configuration //////
float vmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
Preconditions.checkArgument(vmemRatio > 0.99f,
YarnConfiguration.NM_VMEM_PMEM_RATIO + " should be at least 1.0");
this.maxVmemAllottedForContainers =
(long) (vmemRatio * configuredPMemForContainers);
pmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
vmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);
if (pmemCheckEnabled) {
// Logging if actual pmem cannot be determined.
long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
if (this.resourceCalculatorPlugin != null) {
totalPhysicalMemoryOnNM = this.resourceCalculatorPlugin
.getPhysicalMemorySize();
if (totalPhysicalMemoryOnNM <= 0) {
LOG.warn("NodeManager's totalPmem could not be calculated. "
+ "Setting it to " + UNKNOWN_MEMORY_LIMIT);
totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
}
}
if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT &&
this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) {
LOG.warn("NodeManager configured with "
+ TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers,
"", 1)
+ " physical memory allocated to containers, which is more than "
+ "80% of the total physical memory available ("
+ TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "",
1) + "). Thrashing might happen.");
}
}
super.serviceInit(conf);
}
项目:hadoop-on-lustre2
文件:ContainersMonitorImpl.java
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.monitoringInterval =
conf.getLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_MON_INTERVAL_MS);
Class<? extends ResourceCalculatorPlugin> clazz =
conf.getClass(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, null,
ResourceCalculatorPlugin.class);
this.resourceCalculatorPlugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
LOG.info(" Using ResourceCalculatorPlugin : "
+ this.resourceCalculatorPlugin);
processTreeClass = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null,
ResourceCalculatorProcessTree.class);
this.conf = conf;
LOG.info(" Using ResourceCalculatorProcessTree : "
+ this.processTreeClass);
long configuredPMemForContainers = conf.getLong(
YarnConfiguration.NM_PMEM_MB,
YarnConfiguration.DEFAULT_NM_PMEM_MB) * 1024 * 1024l;
// Setting these irrespective of whether checks are enabled. Required in
// the UI.
// ///////// Physical memory configuration //////
this.maxPmemAllottedForContainers = configuredPMemForContainers;
// ///////// Virtual memory configuration //////
float vmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
Preconditions.checkArgument(vmemRatio > 0.99f,
YarnConfiguration.NM_VMEM_PMEM_RATIO + " should be at least 1.0");
this.maxVmemAllottedForContainers =
(long) (vmemRatio * configuredPMemForContainers);
pmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
vmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);
if (pmemCheckEnabled) {
// Logging if actual pmem cannot be determined.
long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
if (this.resourceCalculatorPlugin != null) {
totalPhysicalMemoryOnNM = this.resourceCalculatorPlugin
.getPhysicalMemorySize();
if (totalPhysicalMemoryOnNM <= 0) {
LOG.warn("NodeManager's totalPmem could not be calculated. "
+ "Setting it to " + UNKNOWN_MEMORY_LIMIT);
totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
}
}
if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT &&
this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) {
LOG.warn("NodeManager configured with "
+ TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers,
"", 1)
+ " physical memory allocated to containers, which is more than "
+ "80% of the total physical memory available ("
+ TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "",
1) + "). Thrashing might happen.");
}
}
super.serviceInit(conf);
}