Java 类android.os.Trace 实例源码
项目:FlickLauncher
文件:LauncherProvider.java
/**
* Overridden in tests
*/
protected synchronized void createDbIfNotExists() {
if (mOpenHelper == null) {
if (LauncherAppState.PROFILE_STARTUP) {
Trace.beginSection("Opening workspace DB");
}
mOpenHelper = new DatabaseHelper(getContext(), mListenerHandler);
if (RestoreDbTask.isPending(getContext())) {
if (!RestoreDbTask.performRestore(mOpenHelper)) {
mOpenHelper.createEmptyDB(mOpenHelper.getWritableDatabase());
}
// Set is pending to false irrespective of the result, so that it doesn't get
// executed again.
RestoreDbTask.setPending(getContext(), false);
}
if (LauncherAppState.PROFILE_STARTUP) {
Trace.endSection();
}
}
}
项目:BlockCanaryEx
文件:ViewPerformanceSampler.java
static void install() {
if (!installed) {
installed = true;
if (isSupported()) {
try {
Method traceBegin = Trace.class.getDeclaredMethod("traceBegin", long.class, String.class);
Method traceEnd = Trace.class.getDeclaredMethod("traceEnd", long.class);
Hook.hook(traceBegin, ViewPerformanceSampler.class.getDeclaredMethod("traceBegin", long.class, String.class));
Hook.hook(traceEnd, ViewPerformanceSampler.class.getDeclaredMethod("traceEnd", long.class));
} catch (NoSuchMethodException e) {
e.printStackTrace();
}
}
}
}
项目:FlickLauncher
文件:Launcher.java
/**
* Refreshes the shortcuts shown on the workspace.
*
* Implementation of the method from LauncherModel.Callbacks.
*/
public void startBinding() {
if (LauncherAppState.PROFILE_STARTUP) {
Trace.beginSection("Starting page bind");
}
setWorkspaceLoading(true);
// Clear the workspace because it's going to be rebound
mWorkspace.clearDropTargets();
mWorkspace.removeAllWorkspaceScreens();
mWidgetsToAdvance.clear();
if (mHotseat != null) {
mHotseat.resetLayout();
}
if (LauncherAppState.PROFILE_STARTUP) {
Trace.endSection();
}
}
项目:SimpleUILauncher
文件:Launcher.java
/**
* Refreshes the shortcuts shown on the workspace.
*
* Implementation of the method from LauncherModel.Callbacks.
*/
public void startBinding() {
if (LauncherAppState.PROFILE_STARTUP) {
Trace.beginSection("Starting page bind");
}
setWorkspaceLoading(true);
// Clear the workspace because it's going to be rebound
mWorkspace.clearDropTargets();
mWorkspace.removeAllWorkspaceScreens();
mWidgetsToAdvance.clear();
if (mHotseat != null) {
mHotseat.resetLayout();
}
if (LauncherAppState.PROFILE_STARTUP) {
Trace.endSection();
}
}
项目:SimpleUILauncher
文件:LauncherProvider.java
/**
* Overridden in tests
*/
protected synchronized void createDbIfNotExists() {
if (mOpenHelper == null) {
if (LauncherAppState.PROFILE_STARTUP) {
Trace.beginSection("Opening workspace DB");
}
mOpenHelper = new DatabaseHelper(getContext(), mListenerHandler);
if (RestoreDbTask.isPending(getContext())) {
if (!RestoreDbTask.performRestore(mOpenHelper)) {
mOpenHelper.createEmptyDB(mOpenHelper.getWritableDatabase());
}
// Set is pending to false irrespective of the result, so that it doesn't get
// executed again.
RestoreDbTask.setPending(getContext(), false);
}
if (LauncherAppState.PROFILE_STARTUP) {
Trace.endSection();
}
}
}
项目:droidddle
文件:MaterialColorMapUtils.java
/**
* Return primary and secondary colors from the Material color palette that are similar to
* {@param color}.
*/
public MaterialPalette calculatePrimaryAndSecondaryColor(int color) {
Trace.beginSection("calculatePrimaryAndSecondaryColor");
final float colorHue = hue(color);
float minimumDistance = Float.MAX_VALUE;
int indexBestMatch = 0;
for (int i = 0; i < sPrimaryColors.length(); i++) {
final int primaryColor = sPrimaryColors.getColor(i, 0);
final float comparedHue = hue(primaryColor);
// No need to be perceptually accurate when calculating color distances since
// we are only mapping to 15 colors. Being slightly inaccurate isn't going to change
// the mapping very often.
final float distance = Math.abs(comparedHue - colorHue);
if (distance < minimumDistance) {
minimumDistance = distance;
indexBestMatch = i;
}
}
Trace.endSection();
return new MaterialPalette(sPrimaryColors.getColor(indexBestMatch, 0), sSecondaryColors.getColor(indexBestMatch, 0));
}
项目:FMTech
文件:aga.java
public final void b(VH paramVH, int paramInt)
{
paramVH.b = paramInt;
if (this.b) {
paramVH.d = b(paramInt);
}
paramVH.a(1, 519);
if (Build.VERSION.SDK_INT >= 18) {
Trace.beginSection("RV OnBindView");
}
if ((0x400 & paramVH.i) == 0) {
if ((paramVH.k != null) && (paramVH.k.size() != 0)) {}
}
for (;;)
{
a(paramVH, paramInt);
paramVH.f();
if (Build.VERSION.SDK_INT >= 18) {
Trace.endSection();
}
return;
}
}
项目:android-perf-testing
文件:EnableLogcatDump.java
/**
* Extract logcat buffer to a file ater test run.
*/
public void after() {
try {
if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.beginSection("Taking logcat");
}
ProcessBuilder processBuilder = new ProcessBuilder();
processBuilder.command("logcat", "-d",
"-f", PerfTestingUtils.getTestFile(mTestClass, mTestName, "logcat.log")
.getAbsolutePath());
processBuilder.redirectErrorStream();
Process process = processBuilder.start();
process.waitFor();
if (process.exitValue() != 0) {
Log.e(LOG_TAG, "Error exit value while extracting logcat, exitValue=" +
process.exitValue());
}
} catch (Exception ignored) {
Log.e(LOG_TAG, "Error while extracting logcat", ignored);
} finally {
if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.endSection();
}
}
}
项目:snu-artoon
文件:TensorFlowInferenceInterface.java
private void loadGraph(InputStream is, Graph g) throws IOException {
final long startMs = System.currentTimeMillis();
Trace.beginSection("initializeTensorFlow");
Trace.beginSection("readGraphDef");
// TODO(ashankar): Can we somehow mmap the contents instead of copying them?
byte[] graphDef = new byte[is.available()];
final int numBytesRead = is.read(graphDef);
if (numBytesRead != graphDef.length) {
throw new IOException(
"read error: read only "
+ numBytesRead
+ " of the graph, expected to read "
+ graphDef.length);
}
Trace.endSection();
Trace.beginSection("importGraphDef");
try {
g.importGraphDef(graphDef);
} catch (IllegalArgumentException e) {
throw new IOException("Not a valid TensorFlow Graph serialization: " + e.getMessage());
}
Trace.endSection();
Trace.endSection(); // initializeTensorFlow.
final long endMs = System.currentTimeMillis();
Log.i(
TAG,
"Model load took " + (endMs - startMs) + "ms, TensorFlow version: " + TensorFlow.version());
}
项目:TensorflowAndroidDemo
文件:TensorFlowInferenceInterface.java
private void loadGraph(InputStream is, Graph g) throws IOException {
final long startMs = System.currentTimeMillis();
Trace.beginSection("initializeTensorFlow");
Trace.beginSection("readGraphDef");
// TODO(ashankar): Can we somehow mmap the contents instead of copying them?
byte[] graphDef = new byte[is.available()];
final int numBytesRead = is.read(graphDef);
if (numBytesRead != graphDef.length) {
throw new IOException(
"read error: read only "
+ numBytesRead
+ " of the graph, expected to read "
+ graphDef.length);
}
Trace.endSection();
Trace.beginSection("importGraphDef");
try {
g.importGraphDef(graphDef);
} catch (IllegalArgumentException e) {
throw new IOException("Not a valid TensorFlow Graph serialization: " + e.getMessage());
}
Trace.endSection();
Trace.endSection(); // initializeTensorFlow.
final long endMs = System.currentTimeMillis();
Log.i(
TAG,
"Model load took " + (endMs - startMs) + "ms, TensorFlow version: " + TensorFlow.version());
}
项目:realtime-stopwatch
文件:MainActivity.java
@Override
public void run() {
Trace.beginSection("UPDATE");
mTextView.setText(String.valueOf(System.currentTimeMillis() - mStartTime));
mTextView.post(mUpdateTextView);
Trace.endSection();
}
项目:Virtualview-Android
文件:ViewBase.java
final public void setVData(Object data, boolean isAppend) {
if (VERSION.SDK_INT >= 18) {
Trace.beginSection("ViewBase.setVData");
}
mViewCache.setComponentData(data);
if (data instanceof JSONObject) {
boolean invalidate = false;
if (((JSONObject)data).optBoolean(FLAG_INVALIDATE)) {
invalidate = true;
}
List<ViewBase> cacheView = mViewCache.getCacheView();
if (cacheView != null) {
for (int i = 0, size = cacheView.size(); i < size; i++) {
ViewBase viewBase = cacheView.get(i);
List<Item> items = mViewCache.getCacheItem(viewBase);
if (null != items) {
for(int j = 0, length = items.size(); j < length; j++) {
Item item = items.get(j);
if (invalidate) {
item.invalidate(data.hashCode());
}
item.bind(data, isAppend);
}
viewBase.onParseValueFinished();
if (!viewBase.isRoot() && viewBase.supportExposure()) {
mContext.getEventManager().emitEvent(EventManager.TYPE_Exposure,
EventData
.obtainData(mContext, viewBase));
}
}
}
}
((JSONObject)data).remove(FLAG_INVALIDATE);
}
if (VERSION.SDK_INT >= 18) {
Trace.endSection();
}
}
项目:grafika
文件:MultiSurfaceActivity.java
/**
* Clears the surface, then draws a filled circle with a shadow.
* <p>
* Similar to drawCircleSurface(), but the position changes based on the value of "i".
*/
private void drawBouncingCircle(Surface surface, int i) {
Paint paint = new Paint(Paint.ANTI_ALIAS_FLAG);
paint.setColor(Color.WHITE);
paint.setStyle(Paint.Style.FILL);
Canvas canvas = surface.lockCanvas(null);
try {
Trace.beginSection("drawBouncingCircle");
Trace.beginSection("drawColor");
canvas.drawColor(Color.TRANSPARENT, PorterDuff.Mode.CLEAR);
Trace.endSection(); // drawColor
int width = canvas.getWidth();
int height = canvas.getHeight();
int radius, x, y;
if (width < height) {
// portrait
radius = width / 4;
x = width / 4 + ((width / 2 * i) / BOUNCE_STEPS);
y = height * 3 / 4;
} else {
// landscape
radius = height / 4;
x = width * 3 / 4;
y = height / 4 + ((height / 2 * i) / BOUNCE_STEPS);
}
paint.setShadowLayer(radius / 4 + 1, 0, 0, Color.RED);
canvas.drawCircle(x, y, radius, paint);
Trace.endSection(); // drawBouncingCircle
} finally {
surface.unlockCanvasAndPost(canvas);
}
}
项目:tensorflow-on-android
文件:TensorFlowTrainingInterface.java
private void loadGraph(InputStream is, Graph g) throws IOException {
final long startMs = System.currentTimeMillis();
Trace.beginSection("initializeTensorFlow");
Trace.beginSection("readGraphDef");
// TODO(ashankar): Can we somehow mmap the contents instead of copying them?
byte[] graphDef = new byte[is.available()];
final int numBytesRead = is.read(graphDef);
if (numBytesRead != graphDef.length) {
throw new IOException(
"read error: read only "
+ numBytesRead
+ " of the graph, expected to read "
+ graphDef.length);
}
Trace.endSection();
Trace.beginSection("importGraphDef");
try {
g.importGraphDef(graphDef);
} catch (IllegalArgumentException e) {
throw new IOException("Not a valid TensorFlow Graph serialization: " + e.getMessage());
}
Trace.endSection();
Trace.endSection(); // initializeTensorFlow.
final long endMs = System.currentTimeMillis();
Log.i(
TAG,
"Model load took " + (endMs - startMs) + "ms, TensorFlow version: " + TensorFlow.version());
}
项目:android-image-classification
文件:ImageNetClassifier.java
public Classification[] classifyImage(Bitmap bitmap) {
Trace.beginSection("create Image Buffer");
ByteBuffer byteBuffer = ByteBuffer.allocate(bitmap.getByteCount());
bitmap.copyPixelsToBuffer(byteBuffer);
byte[] bytes = byteBuffer.array();
Trace.endSection();
Trace.beginSection("color adaption");
float[] colors;
if (modelNeedsMeanAdjust) {
colors = subtractMean(bytes);
} else {
colors = extractRGBData(bytes);
}
Trace.endSection();
Trace.beginSection("Model execution");
final long startTime = SystemClock.uptimeMillis();
mPredictor.forward("data", colors);
mActivity.setLasProcessingTimeMs(SystemClock.uptimeMillis() - startTime);
final float[] result = mPredictor.getOutput(0);
Trace.endSection();
Trace.beginSection("gather top results");
Classification[] results = getTopKresults(result, 5);
Trace.endSection();
mActivity.requestRender();
return results;
}
项目:FlickLauncher
文件:Launcher.java
/**
* Callback saying that there aren't any more items to bind.
*
* Implementation of the method from LauncherModel.Callbacks.
*/
public void finishBindingItems() {
Runnable r = new Runnable() {
public void run() {
finishBindingItems();
}
};
if (waitUntilResume(r)) {
return;
}
if (LauncherAppState.PROFILE_STARTUP) {
Trace.beginSection("Page bind completed");
}
if (mSavedState != null) {
if (!mWorkspace.hasFocus()) {
mWorkspace.getChildAt(mWorkspace.getCurrentPage()).requestFocus();
}
mSavedState = null;
}
mWorkspace.restoreInstanceStateForRemainingPages();
setWorkspaceLoading(false);
if (mPendingActivityResult != null) {
handleActivityResult(mPendingActivityResult.requestCode,
mPendingActivityResult.resultCode, mPendingActivityResult.data);
mPendingActivityResult = null;
}
InstallShortcutReceiver.disableAndFlushInstallQueue(this);
if (mLauncherCallbacks != null) {
mLauncherCallbacks.finishBindingItems(false);
}
if (LauncherAppState.PROFILE_STARTUP) {
Trace.endSection();
}
}
项目:SimpleUILauncher
文件:Launcher.java
/**
* Callback saying that there aren't any more items to bind.
*
* Implementation of the method from LauncherModel.Callbacks.
*/
public void finishBindingItems() {
Runnable r = new Runnable() {
public void run() {
finishBindingItems();
}
};
if (waitUntilResume(r)) {
return;
}
if (LauncherAppState.PROFILE_STARTUP) {
Trace.beginSection("Page bind completed");
}
if (mSavedState != null) {
if (!mWorkspace.hasFocus()) {
mWorkspace.getChildAt(mWorkspace.getCurrentPage()).requestFocus();
}
mSavedState = null;
}
mWorkspace.restoreInstanceStateForRemainingPages();
setWorkspaceLoading(false);
if (mPendingActivityResult != null) {
handleActivityResult(mPendingActivityResult.requestCode,
mPendingActivityResult.resultCode, mPendingActivityResult.data);
mPendingActivityResult = null;
}
InstallShortcutReceiver.disableAndFlushInstallQueue(this);
if (mLauncherCallbacks != null) {
mLauncherCallbacks.finishBindingItems(false);
}
if (LauncherAppState.PROFILE_STARTUP) {
Trace.endSection();
}
}
项目:mediacodec
文件:MultiSurfaceActivity.java
/**
* Clears the surface, then draws a filled circle with a shadow.
* <p>
* Similar to drawCircleSurface(), but the position changes based on the value of "i".
*/
private void drawBouncingCircle(Surface surface, int i) {
Paint paint = new Paint(Paint.ANTI_ALIAS_FLAG);
paint.setColor(Color.WHITE);
paint.setStyle(Paint.Style.FILL);
Canvas canvas = surface.lockCanvas(null);
try {
Trace.beginSection("drawBouncingCircle");
Trace.beginSection("drawColor");
canvas.drawColor(Color.TRANSPARENT, PorterDuff.Mode.CLEAR);
Trace.endSection(); // drawColor
int width = canvas.getWidth();
int height = canvas.getHeight();
int radius, x, y;
if (width < height) {
// portrait
radius = width / 4;
x = width / 4 + ((width / 2 * i) / BOUNCE_STEPS);
y = height * 3 / 4;
} else {
// landscape
radius = height / 4;
x = width * 3 / 4;
y = height / 4 + ((height / 2 * i) / BOUNCE_STEPS);
}
paint.setShadowLayer(radius / 4 + 1, 0, 0, Color.RED);
canvas.drawCircle(x, y, radius, paint);
Trace.endSection(); // drawBouncingCircle
} finally {
surface.unlockCanvasAndPost(canvas);
}
}
项目:Paideia
文件:TensorflowClassifier.java
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("Recognize");
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
for (final String result : classifyImageBmp(bitmap).split("\n")) {
Log.i(TAG, "Parsing [" + result + "]");
// Clean up the string as needed
final StringTokenizer st = new StringTokenizer(result);
if (!st.hasMoreTokens()) {
continue;
}
final String id = st.nextToken();
final String confidenceString = st.nextToken();
final float confidence = Float.parseFloat(confidenceString);
final String title =
result.substring(id.length() + confidenceString.length() + 2, result.length());
if (!title.isEmpty()) {
recognitions.add(new Recognition(id, title, confidence, null));
}
}
Trace.endSection();
return recognitions;
}
项目:DialogUtil
文件:TestTool.java
/**
* 仅用于SysTrace开启tag,不是用于控制SysTrace的开启,你也控制不了
* 一定要配合命令行-a 使用,否则自定义tag无效
* 需要debug包
* Trace的begin与end必须在同一线程之中执行
* @param tag
*/
public static void startSysTraceSection(String tag){
if(DEBUG){
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.beginSection(tag);
}
}
}
项目:DialogUtil
文件:TestTool.java
public static void stopSysTraceSection(){
if(DEBUG){
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.endSection();
}
}
}
项目:pause-resume-video-recording
文件:MultiSurfaceActivity.java
/**
* Clears the surface, then draws a filled circle with a shadow.
* <p>
* Similar to drawCircleSurface(), but the position changes based on the value of "i".
*/
private void drawBouncingCircle(Surface surface, int i) {
Paint paint = new Paint(Paint.ANTI_ALIAS_FLAG);
paint.setColor(Color.WHITE);
paint.setStyle(Paint.Style.FILL);
Canvas canvas = surface.lockCanvas(null);
try {
Trace.beginSection("drawBouncingCircle");
Trace.beginSection("drawColor");
canvas.drawColor(Color.TRANSPARENT, PorterDuff.Mode.CLEAR);
Trace.endSection(); // drawColor
int width = canvas.getWidth();
int height = canvas.getHeight();
int radius, x, y;
if (width < height) {
// portrait
radius = width / 4;
x = width / 4 + ((width / 2 * i) / BOUNCE_STEPS);
y = height * 3 / 4;
} else {
// landscape
radius = height / 4;
x = width * 3 / 4;
y = height / 4 + ((height / 2 * i) / BOUNCE_STEPS);
}
paint.setShadowLayer(radius / 4 + 1, 0, 0, Color.RED);
canvas.drawCircle(x, y, radius, paint);
Trace.endSection(); // drawBouncingCircle
} finally {
surface.unlockCanvasAndPost(canvas);
}
}
项目:FMTech
文件:RecyclerView.java
protected void onLayout(boolean paramBoolean, int paramInt1, int paramInt2, int paramInt3, int paramInt4)
{
a();
if (Build.VERSION.SDK_INT >= 18) {
Trace.beginSection("RV OnLayout");
}
i();
if (Build.VERSION.SDK_INT >= 18) {
Trace.endSection();
}
a(false);
this.l = true;
}
项目:HardwareEncodingTest
文件:MultiSurfaceActivity.java
/**
* Clears the surface, then draws a filled circle with a shadow.
* <p>
* Similar to drawCircleSurface(), but the position changes based on the value of "i".
*/
private void drawBouncingCircle(Surface surface, int i) {
Paint paint = new Paint(Paint.ANTI_ALIAS_FLAG);
paint.setColor(Color.WHITE);
paint.setStyle(Paint.Style.FILL);
Canvas canvas = surface.lockCanvas(null);
try {
Trace.beginSection("drawBouncingCircle");
Trace.beginSection("drawColor");
canvas.drawColor(Color.TRANSPARENT, PorterDuff.Mode.CLEAR);
Trace.endSection(); // drawColor
int width = canvas.getWidth();
int height = canvas.getHeight();
int radius, x, y;
if (width < height) {
// portrait
radius = width / 4;
x = width / 4 + ((width / 2 * i) / BOUNCE_STEPS);
y = height * 3 / 4;
} else {
// landscape
radius = height / 4;
x = width * 3 / 4;
y = height / 4 + ((height / 2 * i) / BOUNCE_STEPS);
}
paint.setShadowLayer(radius / 4 + 1, 0, 0, Color.RED);
canvas.drawCircle(x, y, radius, paint);
Trace.endSection(); // drawBouncingCircle
} finally {
surface.unlockCanvasAndPost(canvas);
}
}
项目:PerchBroadcast-Android-SDK
文件:FFmpegMuxer.java
@Override
public void handleMessage(Message inputMessage) {
int what = inputMessage.what;
Object obj = inputMessage.obj;
FFmpegMuxer muxer = mWeakMuxer.get();
if (muxer == null) {
Log.w(TAG, "FFmpegHandler.handleMessage: muxer is null");
return;
}
switch (what) {
case MSG_ADD_TRACK:
if (TRACE) Trace.beginSection("addTrack");
muxer.handleAddTrack((MediaFormat) obj);
if (TRACE) Trace.endSection();
break;
case MSG_WRITE_FRAME:
if (TRACE) Trace.beginSection("writeSampleData");
WritePacketData data = (WritePacketData) obj;
muxer.handleWriteSampleData(data.mEncoder,
data.mTrackIndex,
data.mBufferIndex,
data.mData,
data.getBufferInfo());
if (TRACE) Trace.endSection();
break;
case MSG_FORCE_SHUTDOWN:
muxer.handleForceStop();
break;
default:
throw new RuntimeException("Unexpected msg what=" + what);
}
}
项目:droidel
文件:AbstractThreadedSyncAdapter.java
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
// Trace this sync instance. Note, conceptually this should be in
// SyncStorageEngine.insertStartSyncEvent(), but the trace functions require unique
// threads in order to track overlapping operations, so we'll do it here for now.
Trace.traceBegin(Trace.TRACE_TAG_SYNC_MANAGER, mAuthority);
SyncResult syncResult = new SyncResult();
ContentProviderClient provider = null;
try {
if (isCanceled()) {
return;
}
provider = mContext.getContentResolver().acquireContentProviderClient(mAuthority);
if (provider != null) {
AbstractThreadedSyncAdapter.this.onPerformSync(mAccount, mExtras,
mAuthority, provider, syncResult);
} else {
syncResult.databaseError = true;
}
} finally {
Trace.traceEnd(Trace.TRACE_TAG_SYNC_MANAGER);
if (provider != null) {
provider.release();
}
if (!isCanceled()) {
mSyncContext.onFinished(syncResult);
}
// synchronize so that the assignment will be seen by other threads
// that also synchronize accesses to mSyncThreads
synchronized (mSyncThreadLock) {
mSyncThreads.remove(mThreadsKey);
}
}
}
项目:kickflip-android-sdk
文件:FFmpegMuxer.java
@Override
public void handleMessage(Message inputMessage) {
int what = inputMessage.what;
Object obj = inputMessage.obj;
FFmpegMuxer muxer = mWeakMuxer.get();
if (muxer == null) {
Log.w(TAG, "FFmpegHandler.handleMessage: muxer is null");
return;
}
switch (what) {
case MSG_ADD_TRACK:
if (TRACE) Trace.beginSection("addTrack");
muxer.handleAddTrack((MediaFormat) obj);
if (TRACE) Trace.endSection();
break;
case MSG_WRITE_FRAME:
if (TRACE) Trace.beginSection("writeSampleData");
WritePacketData data = (WritePacketData) obj;
muxer.handleWriteSampleData(data.mEncoder,
data.mTrackIndex,
data.mBufferIndex,
data.mData,
data.getBufferInfo());
if (TRACE) Trace.endSection();
break;
case MSG_FORCE_SHUTDOWN:
muxer.handleForceStop();
break;
default:
throw new RuntimeException("Unexpected msg what=" + what);
}
}
项目:hugo
文件:Hugo.java
private static void enterMethod(JoinPoint joinPoint) {
if (!enabled) return;
CodeSignature codeSignature = (CodeSignature) joinPoint.getSignature();
Class<?> cls = codeSignature.getDeclaringType();
String methodName = codeSignature.getName();
String[] parameterNames = codeSignature.getParameterNames();
Object[] parameterValues = joinPoint.getArgs();
StringBuilder builder = new StringBuilder("\u21E2 ");
builder.append(methodName).append('(');
for (int i = 0; i < parameterValues.length; i++) {
if (i > 0) {
builder.append(", ");
}
builder.append(parameterNames[i]).append('=');
builder.append(Strings.toString(parameterValues[i]));
}
builder.append(')');
if (Looper.myLooper() != Looper.getMainLooper()) {
builder.append(" [Thread:\"").append(Thread.currentThread().getName()).append("\"]");
}
Log.v(asTag(cls), builder.toString());
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
final String section = builder.toString().substring(2);
Trace.beginSection(section);
}
}
项目:hugo
文件:Hugo.java
private static void exitMethod(JoinPoint joinPoint, Object result, long lengthMillis) {
if (!enabled) return;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.endSection();
}
Signature signature = joinPoint.getSignature();
Class<?> cls = signature.getDeclaringType();
String methodName = signature.getName();
boolean hasReturnType = signature instanceof MethodSignature
&& ((MethodSignature) signature).getReturnType() != void.class;
StringBuilder builder = new StringBuilder("\u21E0 ")
.append(methodName)
.append(" [")
.append(lengthMillis)
.append("ms]");
if (hasReturnType) {
builder.append(" = ");
builder.append(Strings.toString(result));
}
Log.v(asTag(cls), builder.toString());
}
项目:AndroidTensorFlowMachineLearningExample
文件:TensorFlowImageClassifier.java
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap");
// Preprocess the image data from 0-255 int to normalized float based
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i = 0; i < intValues.length; ++i) {
final int val = intValues[i];
floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
}
Trace.endSection();
// Copy the input data into TensorFlow.
Trace.beginSection("fillNodeFloat");
inferenceInterface.fillNodeFloat(
inputName, new int[]{1, inputSize, inputSize, 3}, floatValues);
Trace.endSection();
// Run the inference call.
Trace.beginSection("runInference");
inferenceInterface.runInference(outputNames);
Trace.endSection();
// Copy the output Tensor back into the output array.
Trace.beginSection("readNodeFloat");
inferenceInterface.readNodeFloat(outputName, outputs);
Trace.endSection();
// Find the best classifications.
PriorityQueue<Recognition> pq =
new PriorityQueue<Recognition>(
3,
new Comparator<Recognition>() {
@Override
public int compare(Recognition lhs, Recognition rhs) {
// Intentionally reversed to put high confidence at the head of the queue.
return Float.compare(rhs.getConfidence(), lhs.getConfidence());
}
});
for (int i = 0; i < outputs.length; ++i) {
if (outputs[i] > THRESHOLD) {
pq.add(
new Recognition(
"" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null));
}
}
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
int recognitionsSize = Math.min(pq.size(), MAX_RESULTS);
for (int i = 0; i < recognitionsSize; ++i) {
recognitions.add(pq.poll());
}
Trace.endSection(); // "recognizeImage"
return recognitions;
}
项目:GitHub
文件:VirtualLayoutManager.java
@Override
public void onLayoutChildren(RecyclerView.Recycler recycler, RecyclerView.State state) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.beginSection(TRACE_LAYOUT);
}
if (mNoScrolling && state.didStructureChange()) {
mSpaceMeasured = false;
mSpaceMeasuring = true;
}
runPreLayout(recycler, state);
try {
super.onLayoutChildren(recycler, state);
} catch (Exception e) {
e.printStackTrace();
throw e;
} finally {
// MaX_VALUE means invalidate scrolling offset - no scroll
runPostLayout(recycler, state, Integer.MAX_VALUE); // hack to indicate its an initial layout
}
if ((mNestedScrolling || mNoScrolling) && mSpaceMeasuring) {
// measure required, so do measure
mSpaceMeasured = true;
// get last child
int childCount = getChildCount();
View lastChild = getChildAt(childCount - 1);
if (lastChild != null) {
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) lastChild.getLayoutParams();
// found the end of last child view
mMeasuredFullSpace = getDecoratedBottom(lastChild) + params.bottomMargin + computeAlignOffset(lastChild, true, false);
if (mRecyclerView != null && mNestedScrolling) {
ViewParent parent = mRecyclerView.getParent();
if (parent instanceof View) {
// make sure the fullspace be the min value of measured space and parent's height
mMeasuredFullSpace = Math.min(mMeasuredFullSpace, ((View) parent).getMeasuredHeight());
}
}
} else {
mSpaceMeasuring = false;
}
mSpaceMeasuring = false;
if (mRecyclerView != null && getItemCount() > 0) {
// relayout
mRecyclerView.post(new Runnable() {
@Override
public void run() {
// post relayout
if (mRecyclerView != null)
mRecyclerView.requestLayout();
}
});
}
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.endSection();
}
}
项目:GitHub
文件:VirtualLayoutManager.java
/**
* Entry method for scrolling
* {@inheritDoc}
*/
@Override
protected int scrollInternalBy(int dy, RecyclerView.Recycler recycler, RecyclerView.State state) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.beginSection(TRACE_SCROLL);
}
runPreLayout(recycler, state);
int scrolled = 0;
try {
if (!mNoScrolling) {
scrolled = super.scrollInternalBy(dy, recycler, state);
} else {
if (getChildCount() == 0 || dy == 0) {
return 0;
}
mLayoutState.mRecycle = true;
ensureLayoutStateExpose();
final int layoutDirection = dy > 0 ? LayoutState.LAYOUT_END : LayoutState.LAYOUT_START;
final int absDy = Math.abs(dy);
updateLayoutStateExpose(layoutDirection, absDy, true, state);
final int freeScroll = mLayoutState.mScrollingOffset;
final int consumed = freeScroll + fill(recycler, mLayoutState, state, false);
if (consumed < 0) {
return 0;
}
scrolled = absDy > consumed ? layoutDirection * consumed : dy;
}
} catch (Exception e) {
Log.w(TAG, Log.getStackTraceString(e), e);
if (sDebuggable)
throw e;
} finally {
runPostLayout(recycler, state, scrolled);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.endSection();
}
return scrolled;
}
项目:GitHub
文件:VirtualLayoutManager.java
@Override
public void onLayoutChildren(RecyclerView.Recycler recycler, RecyclerView.State state) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.beginSection(TRACE_LAYOUT);
}
if (mNoScrolling && state.didStructureChange()) {
mSpaceMeasured = false;
mSpaceMeasuring = true;
}
runPreLayout(recycler, state);
try {
super.onLayoutChildren(recycler, state);
} catch (Exception e) {
e.printStackTrace();
throw e;
} finally {
// MaX_VALUE means invalidate scrolling offset - no scroll
runPostLayout(recycler, state, Integer.MAX_VALUE); // hack to indicate its an initial layout
}
if ((mNestedScrolling || mNoScrolling) && mSpaceMeasuring) {
// measure required, so do measure
mSpaceMeasured = true;
// get last child
int childCount = getChildCount();
View lastChild = getChildAt(childCount - 1);
if (lastChild != null) {
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) lastChild.getLayoutParams();
// found the end of last child view
mMeasuredFullSpace = getDecoratedBottom(lastChild) + params.bottomMargin + computeAlignOffset(lastChild, true, false);
if (mRecyclerView != null && mNestedScrolling) {
ViewParent parent = mRecyclerView.getParent();
if (parent instanceof View) {
// make sure the fullspace be the min value of measured space and parent's height
mMeasuredFullSpace = Math.min(mMeasuredFullSpace, ((View) parent).getMeasuredHeight());
}
}
} else {
mSpaceMeasuring = false;
}
mSpaceMeasuring = false;
if (mRecyclerView != null && getItemCount() > 0) {
// relayout
mRecyclerView.post(new Runnable() {
@Override
public void run() {
// post relayout
if (mRecyclerView != null)
mRecyclerView.requestLayout();
}
});
}
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.endSection();
}
}
项目:GitHub
文件:VirtualLayoutManager.java
/**
* Entry method for scrolling
* {@inheritDoc}
*/
@Override
protected int scrollInternalBy(int dy, RecyclerView.Recycler recycler, RecyclerView.State state) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.beginSection(TRACE_SCROLL);
}
runPreLayout(recycler, state);
int scrolled = 0;
try {
if (!mNoScrolling) {
scrolled = super.scrollInternalBy(dy, recycler, state);
} else {
if (getChildCount() == 0 || dy == 0) {
return 0;
}
mLayoutState.mRecycle = true;
ensureLayoutStateExpose();
final int layoutDirection = dy > 0 ? LayoutState.LAYOUT_END : LayoutState.LAYOUT_START;
final int absDy = Math.abs(dy);
updateLayoutStateExpose(layoutDirection, absDy, true, state);
final int freeScroll = mLayoutState.mScrollingOffset;
final int consumed = freeScroll + fill(recycler, mLayoutState, state, false);
if (consumed < 0) {
return 0;
}
scrolled = absDy > consumed ? layoutDirection * consumed : dy;
}
} catch (Exception e) {
Log.w(TAG, Log.getStackTraceString(e), e);
if (sDebuggable)
throw e;
} finally {
runPostLayout(recycler, state, scrolled);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
Trace.endSection();
}
return scrolled;
}
项目:FaceRecognition
文件:TensorflowFaceClassifier.java
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap");
// Preprocess the image data from 0-255 int to normalized float based
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i = 0; i < intValues.length; ++i) {
final int val = intValues[i];
floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
}
Trace.endSection();
// Copy the input data into TensorFlow.
Trace.beginSection("fillNodeFloat");
inferenceInterface.fillNodeFloat(
inputName, new int[]{1, inputSize, inputSize, 3}, floatValues);
Trace.endSection();
// Run the inference call.
Trace.beginSection("runInference");
inferenceInterface.runInference(outputNames);
Trace.endSection();
// Copy the output Tensor back into the output array.
Trace.beginSection("readNodeFloat");
inferenceInterface.readNodeFloat(outputName, outputs);
Trace.endSection();
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
for (int i = 0; i < outputs.length; ++i) {
recognitions.add(
new Recognition(
"" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null));
}
Trace.endSection(); // "recognizeImage"
return recognitions;
}
项目:TensorflowAndroidDemo
文件:TensorFlowImageClassifier.java
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap");
// Preprocess the image data from 0-255 int to normalized float based
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i = 0; i < intValues.length; ++i) {
final int val = intValues[i];
floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
}
Trace.endSection();
// Copy the input data into TensorFlow.
Trace.beginSection("feed");
inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);
Trace.endSection();
// Run the inference call.
Trace.beginSection("run");
inferenceInterface.run(outputNames, logStats);
Trace.endSection();
// Copy the output Tensor back into the output array.
Trace.beginSection("fetch");
inferenceInterface.fetch(outputName, outputs);
Trace.endSection();
// Find the best classifications.
PriorityQueue<Recognition> pq =
new PriorityQueue<Recognition>(
3,
new Comparator<Recognition>() {
@Override
public int compare(Recognition lhs, Recognition rhs) {
// Intentionally reversed to put high confidence at the head of the queue.
return Float.compare(rhs.getConfidence(), lhs.getConfidence());
}
});
for (int i = 0; i < outputs.length; ++i) {
if (outputs[i] > THRESHOLD) {
pq.add(
new Recognition(
"" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null));
}
}
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
int recognitionsSize = Math.min(pq.size(), MAX_RESULTS);
for (int i = 0; i < recognitionsSize; ++i) {
recognitions.add(pq.poll());
}
Trace.endSection(); // "recognizeImage"
return recognitions;
}
项目:TensorflowAndroidDemo
文件:TensorFlowMultiBoxDetector.java
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap");
// Preprocess the image data from 0-255 int to normalized float based
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i = 0; i < intValues.length; ++i) {
floatValues[i * 3 + 0] = ((intValues[i] & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 1] = (((intValues[i] >> 8) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 2] = (((intValues[i] >> 16) & 0xFF) - imageMean) / imageStd;
}
Trace.endSection(); // preprocessBitmap
// Copy the input data into TensorFlow.
Trace.beginSection("feed");
inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);
Trace.endSection();
// Run the inference call.
Trace.beginSection("run");
inferenceInterface.run(outputNames, logStats);
Trace.endSection();
// Copy the output Tensor back into the output array.
Trace.beginSection("fetch");
final float[] outputScoresEncoding = new float[numLocations];
final float[] outputLocationsEncoding = new float[numLocations * 4];
inferenceInterface.fetch(outputNames[0], outputLocationsEncoding);
inferenceInterface.fetch(outputNames[1], outputScoresEncoding);
Trace.endSection();
outputLocations = decodeLocationsEncoding(outputLocationsEncoding);
outputScores = decodeScoresEncoding(outputScoresEncoding);
// Find the best detections.
final PriorityQueue<Recognition> pq =
new PriorityQueue<Recognition>(
1,
new Comparator<Recognition>() {
@Override
public int compare(final Recognition lhs, final Recognition rhs) {
// Intentionally reversed to put high confidence at the head of the queue.
return Float.compare(rhs.getConfidence(), lhs.getConfidence());
}
});
// Scale them back to the input size.
for (int i = 0; i < outputScores.length; ++i) {
final RectF detection =
new RectF(
outputLocations[4 * i] * inputSize,
outputLocations[4 * i + 1] * inputSize,
outputLocations[4 * i + 2] * inputSize,
outputLocations[4 * i + 3] * inputSize);
pq.add(new Recognition("" + i, null, outputScores[i], detection));
}
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
for (int i = 0; i < Math.min(pq.size(), MAX_RESULTS); ++i) {
recognitions.add(pq.poll());
}
Trace.endSection(); // "recognizeImage"
return recognitions;
}
项目:TensorflowAndroidDemo
文件:ClassifierActivity.java
@Override
public void onImageAvailable(final ImageReader reader) {
Image image = null;
try {
image = reader.acquireLatestImage();
if (image == null) {
return;
}
if (computing) {
image.close();
return;
}
computing = true;
Trace.beginSection("imageAvailable");
final Plane[] planes = image.getPlanes();
fillBytes(planes, yuvBytes);
final int yRowStride = planes[0].getRowStride();
final int uvRowStride = planes[1].getRowStride();
final int uvPixelStride = planes[1].getPixelStride();
ImageUtils.convertYUV420ToARGB8888(
yuvBytes[0],
yuvBytes[1],
yuvBytes[2],
rgbBytes,
previewWidth,
previewHeight,
yRowStride,
uvRowStride,
uvPixelStride,
false);
image.close();
} catch (final Exception e) {
if (image != null) {
image.close();
}
LOGGER.e(e, "Exception!");
Trace.endSection();
return;
}
rgbFrameBitmap.setPixels(rgbBytes, 0, previewWidth, 0, 0, previewWidth, previewHeight);
final Canvas canvas = new Canvas(croppedBitmap);
canvas.drawBitmap(rgbFrameBitmap, frameToCropTransform, null);
// For examining the actual TF input.
if (SAVE_PREVIEW_BITMAP) {
ImageUtils.saveBitmap(croppedBitmap);
}
runInBackground(
new Runnable() {
@Override
public void run() {
final long startTime = SystemClock.uptimeMillis();
final List<Classifier.Recognition> results = classifier.recognizeImage(croppedBitmap);
lastProcessingTimeMs = SystemClock.uptimeMillis() - startTime;
cropCopyBitmap = Bitmap.createBitmap(croppedBitmap);
resultsView.setResults(results);
requestRender();
computing = false;
}
});
Trace.endSection();
}
项目:TensorFlowDetector-App
文件:TensorFlowImageClassifier.java
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap");
// Preprocess the image data from 0-255 int to normalized float based
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i = 0; i < intValues.length; ++i) {
final int val = intValues[i];
floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
}
Trace.endSection();
// Copy the input data into TensorFlow.
Trace.beginSection("fillNodeFloat");
inferenceInterface.fillNodeFloat(
inputName, new int[]{1, inputSize, inputSize, 3}, floatValues);
Trace.endSection();
// Run the inference call.
Trace.beginSection("runInference");
inferenceInterface.runInference(outputNames);
Trace.endSection();
// Copy the output Tensor back into the output array.
Trace.beginSection("readNodeFloat");
inferenceInterface.readNodeFloat(outputName, outputs);
Trace.endSection();
// Find the best classifications.
PriorityQueue<Recognition> pq =
new PriorityQueue<Recognition>(
3,
new Comparator<Recognition>() {
@Override
public int compare(Recognition lhs, Recognition rhs) {
// Intentionally reversed to put high confidence at the head of the queue.
return Float.compare(rhs.getConfidence(), lhs.getConfidence());
}
});
for (int i = 0; i < outputs.length; ++i) {
if (outputs[i] > THRESHOLD) {
pq.add(
new Recognition(
"" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null));
}
}
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
int recognitionsSize = Math.min(pq.size(), MAX_RESULTS);
for (int i = 0; i < recognitionsSize; ++i) {
recognitions.add(pq.poll());
}
Trace.endSection(); // "recognizeImage"
return recognitions;
}
项目:tensorflow-classifier-android
文件:TensorFlowImageClassifier.java
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap");
// Preprocess the image data from 0-255 int to normalized float based
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i = 0; i < intValues.length; ++i) {
final int val = intValues[i];
floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
}
Trace.endSection();
// Copy the input data into TensorFlow.
Trace.beginSection("feed");
inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);
Trace.endSection();
// Run the inference call.
Trace.beginSection("run");
inferenceInterface.run(outputNames, logStats);
Trace.endSection();
// Copy the output Tensor back into the output array.
Trace.beginSection("fetch");
inferenceInterface.fetch(outputName, outputs);
Trace.endSection();
// Find the best classifications.
PriorityQueue<Recognition> pq =
new PriorityQueue<Recognition>(
3,
new Comparator<Recognition>() {
@Override
public int compare(Recognition lhs, Recognition rhs) {
// Intentionally reversed to put high confidence at the head of the queue.
return Float.compare(rhs.getConfidence(), lhs.getConfidence());
}
});
for (int i = 0; i < outputs.length; ++i) {
if (outputs[i] > THRESHOLD) {
pq.add(
new Recognition(
"" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null));
}
}
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
int recognitionsSize = Math.min(pq.size(), MAX_RESULTS);
for (int i = 0; i < recognitionsSize; ++i) {
recognitions.add(pq.poll());
}
Trace.endSection(); // "recognizeImage"
return recognitions;
}