@Test public void asyncCRUDShouldSucceed() throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); Somethingcomposite something = createSomething(0, 0); SomethingcompositeRecord somethingcompositeRecord = new SomethingcompositeRecord(); somethingcompositeRecord.from(something); compositeDao.insertExecAsync(something). thenCompose( v-> compositeDao.findByIdAsync(somethingcompositeRecord.key())). thenCompose(fetchSomething -> { fetchSomething.getSomejsonobject().put("foo", "bar"); return compositeDao.updateExecAsync(fetchSomething); }). thenCompose(v2->compositeDao.deleteExecAsync(somethingcompositeRecord.key())). whenComplete(failOrCountDown(latch)); await(latch); }
/** * isTerminated is false before termination, true after */ public void testIsTerminated() throws InterruptedException { final ThreadPoolExecutor p = new ThreadPoolExecutor(1, 1, LONG_DELAY_MS, MILLISECONDS, new ArrayBlockingQueue<Runnable>(10)); try (PoolCleaner cleaner = cleaner(p)) { final CountDownLatch threadStarted = new CountDownLatch(1); final CountDownLatch done = new CountDownLatch(1); assertFalse(p.isTerminating()); p.execute(new CheckedRunnable() { public void realRun() throws InterruptedException { assertFalse(p.isTerminating()); threadStarted.countDown(); await(done); }}); await(threadStarted); assertFalse(p.isTerminating()); done.countDown(); try { p.shutdown(); } catch (SecurityException ok) { return; } assertTrue(p.awaitTermination(LONG_DELAY_MS, MILLISECONDS)); assertTrue(p.isTerminated()); assertFalse(p.isTerminating()); } }
public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); serviceA.registerRequestHandler("foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { try { latch2.await(); logger.info("Stop ServiceB now"); serviceB.stop(); } catch (Exception e) { fail(e.getMessage()); } } }); TransportFuture<TransportResponse.Empty> foobar = serviceB.submitRequest(nodeA, "foobar", new StringMessageRequest(""), TransportRequestOptions.EMPTY, EmptyTransportResponseHandler.INSTANCE_SAME); latch2.countDown(); try { foobar.txGet(); fail("TransportException expected"); } catch (TransportException ex) { } }
@Test public void testNodes3() throws IOException, InterruptedException, KeeperException { int testIterations = 3; final CountDownLatch latch = new CountDownLatch(testIterations); final AtomicInteger failureCounter = new AtomicInteger(); for (int i = 0; i < testIterations; i++) { runElectionSupportThread(latch, failureCounter); } Assert.assertEquals(0, failureCounter.get()); if (!latch.await(10, TimeUnit.SECONDS)) { logger .info( "Waited for all threads to start, but timed out. We had {} failures.", failureCounter); } }
/** * awaitAdvance continues waiting if interrupted before waiting */ public void testAwaitAdvanceAfterInterrupt() { final Phaser phaser = new Phaser(); assertEquals(0, phaser.register()); final CountDownLatch pleaseArrive = new CountDownLatch(1); Thread t = newStartedThread(new CheckedRunnable() { public void realRun() { Thread.currentThread().interrupt(); assertEquals(0, phaser.register()); assertEquals(0, phaser.arrive()); pleaseArrive.countDown(); assertTrue(Thread.currentThread().isInterrupted()); assertEquals(1, phaser.awaitAdvance(0)); assertTrue(Thread.interrupted()); }}); await(pleaseArrive); assertThreadBlocks(t, Thread.State.WAITING); assertEquals(0, phaser.arrive()); awaitTermination(t); Thread.currentThread().interrupt(); assertEquals(1, phaser.awaitAdvance(0)); assertTrue(Thread.interrupted()); }
/** * This sample performs a count, inserts data and performs a count again using reactive operator chaining. */ @Test public void shouldInsertAndCountData() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(1); repository.count() // .doOnNext(System.out::println) // .thenMany(repository.save(Flux.just(new Person("Hank", "Schrader", 43), // new Person("Mike", "Ehrmantraut", 62)))) // .last() // .flatMap(v -> repository.count()) // .doOnNext(System.out::println) // .doOnComplete(countDownLatch::countDown) // .doOnError(throwable -> countDownLatch.countDown()) // .subscribe(); countDownLatch.await(); }
@Test public void dropHeadWhenFull_dropHeadStrategy() throws InterruptedException { SizeBoundedQueue queue = new SizeBoundedQueue(16, OverflowStrategy.dropHead); CountDownLatch countDown = new CountDownLatch(1); for (int i = 0; i < queue.maxSize; i++) { Message next = newMessage(i); Deferred<Object, MessageDroppedException, Integer> deferred = new DeferredObject<>(); queue.offer(next, deferred); if (i == 0) { deferred.fail(ex -> { assertEquals(0, ((TestMessage)ex.dropped.get(0)).key); countDown.countDown(); }); } } Message overflow = newMessage(queue.maxSize); queue.offer(overflow, new DeferredObject<>()); countDown.await(); Object[] ids = collectKeys(queue); assertArrayEquals(new Object[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, ids); }
/** No guarantees, but effective in practice. */ private static void forceFullGc() { CountDownLatch finalizeDone = new CountDownLatch(1); WeakReference<?> ref = new WeakReference<Object>(new Object() { protected void finalize() { finalizeDone.countDown(); }}); try { for (int i = 0; i < 10; i++) { System.gc(); if (finalizeDone.await(1L, SECONDS) && ref.get() == null) { System.runFinalization(); // try to pick up stragglers return; } } } catch (InterruptedException unexpected) { throw new AssertionError("unexpected InterruptedException"); } throw new AssertionError("failed to do a \"full\" gc"); }
@Test public void chainedDeferred() throws InterruptedException { reporter = AsyncReporter.builder(new JdbcTemplateSender(underlying)) .messageTimeout(10, TimeUnit.MILLISECONDS) .build(); batchJdbcTemplate = new BatchJdbcTemplate(underlying, reporter); CountDownLatch countDown = new CountDownLatch(1); batchJdbcTemplate.update(INSERTION, new Object[]{randomString(), new Date()}).done(d -> { assertEquals(new Integer(1), d); String expected = randomString(); batchJdbcTemplate.update(MODIFICATION, new Object[]{expected}).done(dd -> { assertEquals(new Integer(1), dd); int rowCount = batchJdbcTemplate.queryForObject(ROW_COUNT, Integer.class); assertEquals(1, rowCount); Object data = batchJdbcTemplate .queryForObject("SELECT data FROM test LIMIT 1", String.class); assertEquals(expected, data); countDown.countDown(); }); }); countDown.await(); }
@Test public void should_create_job_with_unique_job_number() throws Throwable { // given: final Node flow = createRootFlow("flow-job-number", "yml/demo_flow2.yaml"); final int numOfJob = 10; final CountDownLatch countDown = new CountDownLatch(numOfJob); // when: for (int i = 0; i < numOfJob; i++) { taskExecutor.execute(() -> { jobService.createFromFlowYml(flow.getPath(), JobCategory.MANUAL, null, mockUser); countDown.countDown(); }); } // then: countDown.await(30, TimeUnit.SECONDS); Assert.assertEquals(numOfJob, jobDao.numOfJob(flow.getPath()).intValue()); }
public void testRunnableRunsAtMostOnceAfterCancellation() throws Exception { final int iterations = scaledRandomIntBetween(1, 12); final AtomicInteger counter = new AtomicInteger(); final CountDownLatch doneLatch = new CountDownLatch(iterations); final Runnable countingRunnable = () -> { counter.incrementAndGet(); doneLatch.countDown(); }; final Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, TimeValue.timeValueMillis(10L), Names.GENERIC); doneLatch.await(); cancellable.cancel(); final int counterValue = counter.get(); assertThat(counterValue, isOneOf(iterations, iterations + 1)); if (rarely()) { awaitBusy(() -> { final int value = counter.get(); return value == iterations || value == iterations + 1; }, 50L, TimeUnit.MILLISECONDS); } }
@BeforeAll public static void start() { port = getFreePort(); httpDataProvider = mock(HttpDataProvider.class); logger.info("Starting embedded HTTP server on port: {}", port); vertx = Vertx.vertx(); DeploymentOptions options = new DeploymentOptions().setConfig(new JsonObject().put("http.port", port)).setInstances(1); CountDownLatch latch = new CountDownLatch(1); vertx.deployVerticle( new HttpServerSimulatorVerticle(httpDataProvider), options, result -> { logger.info("Started embedded HTTP server with result: {}", result); latch.countDown(); }); try { latch.await(); } catch (InterruptedException e) { logger.warn("Failed to wait for the embedded HTTP server to start!"); } }
@Test public void testRacingRegistrations() throws InterruptedException { for (int i = 0; i < ITERATIONS; i++) { startLatch = new CountDownLatch(THREAD_COUNT); registeredLatch = new CountDownLatch(THREAD_COUNT); canUnregisterLatch = new CountDownLatch(1); unregisteredLatch = new CountDownLatch(THREAD_COUNT); List<SubscriberThread> threads = startThreads(); registeredLatch.await(); eventBus.post("42"); canUnregisterLatch.countDown(); for (int t = 0; t < THREAD_COUNT; t++) { int eventCount = threads.get(t).eventCount; if (eventCount != 1) { fail("Failed in iteration " + i + ": thread #" + t + " has event count of " + eventCount); } } // Wait for threads to be done unregisteredLatch.await(); } }
public static JobFactory<?> getFactory( JobSubmitter submitter, Path scratch, int numJobs, Configuration conf, CountDownLatch startFlag, UserResolver resolver) throws IOException { GridmixJobSubmissionPolicy policy = GridmixJobSubmissionPolicy.getPolicy( conf, GridmixJobSubmissionPolicy.STRESS); if (policy == GridmixJobSubmissionPolicy.REPLAY) { return new DebugReplayJobFactory( submitter, scratch, numJobs, conf, startFlag, resolver); } else if (policy == GridmixJobSubmissionPolicy.STRESS) { return new DebugStressJobFactory( submitter, scratch, numJobs, conf, startFlag, resolver); } else if (policy == GridmixJobSubmissionPolicy.SERIAL) { return new DebugSerialJobFactory( submitter, scratch, numJobs, conf, startFlag, resolver); } return null; }
@Test public void testPhantomRead() throws Exception { logger.info("start phantom read"); queryReadAccount.setFirstWaitTime(0); queryReadAccount.setSecondWaitTime(2000); queryWriteAccount.setAccountNumber(953); queryWriteAccount.setAmount(456.77); queryWriteAccount.setWaitTime(1000); latch = new CountDownLatch(1); defaultExecutor.submit(queryWriteAccount); defaultExecutor.submit(queryReadAccount); latch.await(3000, MILLISECONDS); assertEquals("the transaction B add a new account", 456.77, queryWriteAccount.getResult(), 0.0); assertEquals("the first query in the transaction A before the transaction ends", 8, queryReadAccount.getFirstResult()); assertEquals("the second query in the transaction A after the transaction ends", 9, queryReadAccount.getSecondResult()); }
/** * isTerminated is false before termination, true after */ public void testIsTerminated() throws InterruptedException { final CountDownLatch done = new CountDownLatch(1); final ThreadPoolExecutor p = new CustomExecutor(1); try (PoolCleaner cleaner = cleaner(p)) { final CountDownLatch threadStarted = new CountDownLatch(1); p.execute(new CheckedRunnable() { public void realRun() throws InterruptedException { assertFalse(p.isTerminated()); threadStarted.countDown(); await(done); }}); await(threadStarted); assertFalse(p.isTerminated()); assertFalse(p.isTerminating()); done.countDown(); try { p.shutdown(); } catch (SecurityException ok) { return; } assertTrue(p.awaitTermination(LONG_DELAY_MS, MILLISECONDS)); assertTrue(p.isTerminated()); } }
/** * Note that you may call only await() from one thread! If the instance is already in used * it may produce unwanted behavior (e.g. dead-lock). * * @return value from the flag that raised the latch * @throws InterruptedException */ public TYPE await() throws PogamutInterruptedException { synchronized(mutex) { synchronized(latchAccessMutex) { latch = new CountDownLatch(1); } // instantiation checks whether we doesn't have desired result already, // if not adds itself as a listener to a flag Listener listener = new Listener(flag); if (isResult) return result; try { latch.await(); // the latch is raised whenever a listener receive a correct result } catch (InterruptedException e) { throw new PogamutInterruptedException(e, this); } synchronized(latchAccessMutex) { flag.removeListener(listener); latch = null; } return result; } }
@Test public void testNodes20() throws IOException, InterruptedException, KeeperException { int testIterations = 20; final CountDownLatch latch = new CountDownLatch(testIterations); final AtomicInteger failureCounter = new AtomicInteger(); for (int i = 0; i < testIterations; i++) { runElectionSupportThread(latch, failureCounter); } Assert.assertEquals(0, failureCounter.get()); if (!latch.await(10, TimeUnit.SECONDS)) { logger .info( "Waited for all threads to start, but timed out. We had {} failures.", failureCounter); } }
/** * Wait for the given CountDownLatch to countdown or to exceed its timeout * (10000ms if no time specified). The noFail argument stops JUnit.fail * from being called when the latch is not released. */ public static void waitForEvent(CountDownLatch latch, long timeout, Boolean noFail) throws InterruptedException { //We may get stuck if the consumer finishes processing faster than the test works through //If so, we need to test for a non-empty status set with last bean status equal to our expectation //Once finished, check whether the latch was released or timedout boolean released = latch.await(timeout, TimeUnit.MILLISECONDS); if (released) { System.out.println("~~~~~~~~~~~~~~~~~~~~~~~~~\n Final state reached\n~~~~~~~~~~~~~~~~~~~~~~~~~"); } else { System.out.println("#########################\n No final state reported\n#########################"); if (!noFail) { fail("No final state reported"); } } }
public void latch(ResponseWaiter waiter) throws EventException, InterruptedException { if (waiter==null) waiter = new ResponseWaiter.Dont(); if (getResponseType()==ResponseType.ONE) { this.latch = new CountDownLatch(1); boolean ok = latch.await(timeout, timeUnit); while (!ok && waiter.waitAgain()) { ok = latch.await(timeout, timeUnit); } ok = latch.await(timeout, timeUnit); // This is because waitAgain() could be false leaving ok as false, we recheck it! if (!ok) throw new EventException("The timeout of "+timeout+" "+timeUnit+" was reached and no response occurred!"); } else if (getResponseType()==ResponseType.ONE_OR_MORE) { somethingFound = false; Thread.sleep(timeUnit.toMillis(timeout)); while (waiter.waitAgain()) { Thread.sleep(timeUnit.toMillis(timeout)); } if (!somethingFound) throw new EventException("The timeout of "+timeout+" "+timeUnit+" was reached and no response occurred!"); } }
/** * Tests an event for a host becoming available that matches an intent. * * @throws InterruptedException if the latch wait fails. */ @Test public void testEventHostAvailableMatch() throws Exception { // we will expect 2 delegate calls delegate.latch = new CountDownLatch(2); Device host = device("host1"); DeviceEvent deviceEvent = new DeviceEvent(DeviceEvent.Type.DEVICE_ADDED, host); reasons.add(deviceEvent); Key key = Key.of(0x333L, APP_ID); Collection<NetworkResource> resources = ImmutableSet.of(host.id()); tracker.addTrackedResources(key, resources); reasons.add(deviceEvent); TopologyEvent event = new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED, topology, reasons); listener.event(event); assertThat(delegate.latch.await(WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS), is(true)); assertThat(delegate.intentIdsFromEvent, hasSize(1)); assertThat(delegate.compileAllFailedFromEvent, is(true)); assertThat(delegate.intentIdsFromEvent.get(0).toString(), equalTo("0x333")); }
private synchronized void connectDelegateService(String processName) { if(mRemoteDelegate!=null && mRemoteDelegate.asBinder().isBinderAlive()){ return ; } mCountDownLatch = new CountDownLatch(1); if(targetIntent==null){ Intent service = new Intent(); String delegateComponentName = BridgeUtil.getBridgeName(BridgeUtil.TYPE_SERVICEBRIDGE,processName); service.setClassName(RuntimeVariables.androidApplication, delegateComponentName); targetIntent = service; } RuntimeVariables.androidApplication.bindService(targetIntent, mDelegateConnection, Context.BIND_AUTO_CREATE); try { mCountDownLatch.await(); } catch (InterruptedException e) { e.printStackTrace(); } }
@Test public void testCreateDocument_Async() throws Exception { // create a document Document doc = new Document(String.format("{ 'id': 'doc%d', 'counter': '%d'}", 1, 1)); Observable<ResourceResponse<Document>> createDocumentObservable = asyncClient.createDocument(createdCollection.getSelfLink(), doc, null, true); final CountDownLatch doneLatch = new CountDownLatch(1); // subscribe to events emitted by the observable createDocumentObservable .single() // we know there will be one response .subscribe( documentResourceResponse -> { System.out.println(documentResourceResponse.getActivityId()); doneLatch.countDown(); }, error -> { System.err.println("an error happened in document creation: actual cause: " + error.getMessage()); }); // wait till document creation completes doneLatch.await(); }
private void enterSatisfyGuardAndLeaveInAnotherThread() { final CountDownLatch startedLatch = new CountDownLatch(1); startThread(new Runnable() { @Override public void run() { startedLatch.countDown(); enterSatisfyGuardAndLeaveInCurrentThread(); } }); awaitUninterruptibly(startedLatch); }
@Test public void runTest() throws Exception { final CountDownLatch latch = new CountDownLatch(1); runner.run(new Function0<Unit>() { @Override public Unit invoke() { latch.countDown(); return Unit.INSTANCE; } }); latch.await(MEDIUM, TimeUnit.MILLISECONDS); }
@Test public void testThreadedProduce() throws Exception { int numThreads = 20; Map<String, String> overrides = getOverridesForEncryption(); overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(10000)); overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY, String.valueOf(100)); channel = createFileChannel(overrides); channel.start(); Assert.assertTrue(channel.isOpen()); Executor executor = Executors.newFixedThreadPool(numThreads); final AtomicBoolean error = new AtomicBoolean(false); final CountDownLatch startLatch = new CountDownLatch(numThreads); final CountDownLatch stopLatch = new CountDownLatch(numThreads); final Set<String> in = Collections.synchronizedSet(new HashSet<String>()); for (int i = 0; i < numThreads; i++) { executor.execute(new Runnable() { @Override public void run() { try { startLatch.countDown(); startLatch.await(); in.addAll(putEvents(channel, "thread-produce", 10, 10000, true)); } catch (Throwable t) { error.set(true); LOGGER.error("Error in put thread", t); } finally { stopLatch.countDown(); } } }); } stopLatch.await(); Set<String> out = consumeChannel(channel); Assert.assertFalse(error.get()); compareInputAndOut(in, out); }
DecodeThread(ScannerActivity activity, Collection<BarcodeFormat> decodeFormats, String characterSet ) { this.activity = activity; handlerInitLatch = new CountDownLatch(1); hints = new EnumMap<>(DecodeHintType.class); hints.put(DecodeHintType.POSSIBLE_FORMATS, decodeFormats); if (characterSet != null) { hints.put(DecodeHintType.CHARACTER_SET, characterSet); } hints.put(DecodeHintType.TRY_HARDER, true); }
public void testDisabledActionDoesNotCauseAnInfiniteLoop() { List<ProgressSupport.Action> actions = new ArrayList<ProgressSupport.Action>(); final CountDownLatch sync = new CountDownLatch(1); final AtomicBoolean ran = new AtomicBoolean(); actions.add(new ProgressSupport.EventThreadAction() { public void run(ProgressSupport.Context actionContext) { synchronized (sync) { ran.set(true); sync.countDown(); } } public boolean isEnabled() { return false; } }); RequestProcessor.getDefault().post(new Runnable() { public void run() { try { sync.await(10, TimeUnit.SECONDS); } catch (InterruptedException e) {} if (!ran.get()) { // hmm, anything better? System.exit(1); } } }); ProgressSupport.invoke(actions); }
/** * interrupt during wait for exchange throws InterruptedException */ public void testExchange_InterruptedException() { final Exchanger e = new Exchanger(); final CountDownLatch threadStarted = new CountDownLatch(1); Thread t = newStartedThread(new CheckedInterruptedRunnable() { public void realRun() throws InterruptedException { threadStarted.countDown(); e.exchange(one); }}); await(threadStarted); t.interrupt(); awaitTermination(t); }
@Test public void testQueue() throws Exception { int maxQueueSize = StatsdMetricsConfig.DEFAULT_QUEUE_SIZE; CountDownLatch resume = new CountDownLatch(1); writeDelegate = (db, points, original) -> { await(resume); original.run(); }; publisher.start("test-host", 9999); for (int i = 0; i < maxQueueSize * 2; i++) { publisher.publish(singleton(new TestMetric("test.metric" + i, i))); } resume.countDown(); busyWait("empty queue", () -> publisher.queueSize() == 0 ); publisher.stop(); for (int i = 0; i < maxQueueSize; i++) { assertEquals("test_host__9999.test.metric" + i + ":" + i + "|g", receiveNext()); } }
public void testDoesNotRescheduleUntilExecutionFinished() throws Exception { final TimeValue delay = TimeValue.timeValueMillis(100L); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch pauseLatch = new CountDownLatch(1); ThreadPool threadPool = mock(ThreadPool.class); final Runnable runnable = () -> { // notify that the runnable is started startLatch.countDown(); try { // wait for other thread to un-pause pauseLatch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }; ReschedulingRunnable reschedulingRunnable = new ReschedulingRunnable(runnable, delay, Names.GENERIC, threadPool); // this call was made during construction of the runnable verify(threadPool, times(1)).schedule(delay, Names.GENERIC, reschedulingRunnable); // create a thread and start the runnable Thread runThread = new Thread() { @Override public void run() { reschedulingRunnable.run(); } }; runThread.start(); // wait for the runnable to be started and ensure the runnable hasn't used the threadpool again startLatch.await(); verifyNoMoreInteractions(threadPool); // un-pause the runnable and allow it to complete execution pauseLatch.countDown(); runThread.join(); // validate schedule was called again verify(threadPool, times(2)).schedule(delay, Names.GENERIC, reschedulingRunnable); }
private void startCrop() { if (isFinishing()) { return; } imageView.setImageRotateBitmapResetBase(rotateBitmap, true); CropUtil.startBackgroundJob(this, null, getResources().getString(R.string.crop__wait), new Runnable() { public void run() { final CountDownLatch latch = new CountDownLatch(1); handler.post(new Runnable() { public void run() { if (imageView.getScale() == 1F) { imageView.center(); } latch.countDown(); } }); try { latch.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } new Cropper().crop(); } }, handler ); }
public void testNoLongerPrimaryShardException() throws InterruptedException { final String index = "test"; setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); ShardRouting failedShard = getRandomShardRouting(index); AtomicReference<Throwable> failure = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); long primaryTerm = clusterService.state().metaData().index(index).primaryTerm(failedShard.id()); assertThat(primaryTerm, greaterThanOrEqualTo(1L)); shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), primaryTerm + 1, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { failure.set(null); latch.countDown(); } @Override public void onFailure(Exception e) { failure.set(e); latch.countDown(); } }); ShardStateAction.NoLongerPrimaryShardException catastrophicError = new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "dummy failure"); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); transport.handleRemoteError(capturedRequests[0].requestId, catastrophicError); latch.await(); assertNotNull(failure.get()); assertThat(failure.get(), instanceOf(ShardStateAction.NoLongerPrimaryShardException.class)); assertThat(failure.get().getMessage(), equalTo(catastrophicError.getMessage())); }
private Thread runElectionSupportThread(final CountDownLatch latch, final AtomicInteger failureCounter, final long sleepDuration) { final LeaderElectionSupport electionSupport = createLeaderElectionSupport(); Thread t = new Thread() { @Override public void run() { try { electionSupport.start(); Thread.sleep(sleepDuration); electionSupport.stop(); latch.countDown(); } catch (Exception e) { logger.warn("Failed to run leader election due to: {}", e.getMessage()); failureCounter.incrementAndGet(); } } }; t.start(); return t; }
@Test public void testParallelNoThrottle() throws Exception { final int numThreads = 50; final LearnerSnapshotThrottler throttler = new LearnerSnapshotThrottler(numThreads); ExecutorService threadPool = Executors.newFixedThreadPool(numThreads); final CountDownLatch threadStartLatch = new CountDownLatch(numThreads); final CountDownLatch snapshotProgressLatch = new CountDownLatch(numThreads); List<Future<Boolean>> results = new ArrayList<Future<Boolean>>(numThreads); for (int i = 0; i < numThreads; i++) { results.add(threadPool.submit(new Callable<Boolean>() { @Override public Boolean call() { threadStartLatch.countDown(); try { threadStartLatch.await(); throttler.beginSnapshot(false); snapshotProgressLatch.countDown(); snapshotProgressLatch.await(); throttler.endSnapshot(); } catch (Exception e) { return false; } return true; } })); } for (Future<Boolean> result : results) { Assert.assertTrue(result.get()); } }
@Override public void run() { try { while ( !handler.allFinished() ) { if (failure) throw new RuntimeException("FAILURE DETECTED!"); List<Thread> thrds = new LinkedList<Thread>(); for ( TeamedAgentId id : localWorldViews.keySet() ) { if ( handler.cyclesToRun(id) >= 0) { thrds.add( new EventGenerator(eventsPerCycle, currentTime, localWorldViews.get(id)) ); } } latch2 = new CountDownLatch(thrds.size()); for (Thread t : thrds) { t.start(); } latch2.await(GLOBAL_TIMEOUT_IN_MINUTES, TimeUnit.MINUTES); if (latch2.getCount() > 0 || failure) { throw new RuntimeException("FAILURE DETECTED!"); } ++currentTime; sleep(sleepTime); } } catch (Exception e) { e.printStackTrace(); failure = true; totalCountDown(); return; } latch.countDown(); }
@Override public WatchKey register(final Watchable folder, final WatchEvent.Kind<?>[] events, final WatchEvent.Modifier... modifiers) throws IOException { if(log.isInfoEnabled()) { log.info(String.format("Register file %s for events %s", folder, Arrays.toString(events))); } final Pointer[] values = { CFStringRef.toCFString(folder.toString()).getPointer()}; final MacOSXWatchKey key = new MacOSXWatchKey(folder, this, events); final double latency = 1.0; // Latency in seconds final Map<File, Long> timestamps = createLastModifiedMap(new File(folder.toString())); final FSEvents.FSEventStreamCallback callback = new Callback(key, timestamps); final FSEventStreamRef stream = library.FSEventStreamCreate( Pointer.NULL, callback, Pointer.NULL, library.CFArrayCreate(null, values, CFIndex.valueOf(1), null), -1, latency, kFSEventStreamCreateFlagNoDefer); final CountDownLatch lock = new CountDownLatch(1); final CFRunLoop loop = new CFRunLoop(lock, stream); threadFactory.newThread(loop).start(); try { lock.await(); } catch(InterruptedException e) { throw new IOException(String.format("Failure registering for events in %s", folder), e); } loops.put(key, loop); callbacks.put(key, callback); return key; }
public TestFuture() { resolved = false; getCalledLatch = new CountDownLatch(1); resolveOnGet = false; resolveOnGetResult = null; resolveOnGetException = null; }
public void testObjectListPostRequest() throws InterruptedException { server.enqueue(new MockResponse().setBody("[{\"firstName\":\"Amit\", \"lastName\":\"Shekhar\"}]")); final AtomicReference<String> firstNameRef = new AtomicReference<>(); final AtomicReference<String> lastNameRef = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); AndroidNetworking.post(server.url("/").toString()) .addBodyParameter("fistName", "Amit") .addBodyParameter("lastName", "Shekhar") .build() .getAsObjectList(User.class, new ParsedRequestListener<List<User>>() { @Override public void onResponse(List<User> userList) { firstNameRef.set(userList.get(0).firstName); lastNameRef.set(userList.get(0).lastName); latch.countDown(); } @Override public void onError(ANError anError) { assertTrue(false); } }); assertTrue(latch.await(2, SECONDS)); assertEquals("Amit", firstNameRef.get()); assertEquals("Shekhar", lastNameRef.get()); }
@Test public void testUnReadFile() throws Exception { Log.d(TAG, "testUnReadFile "); final CountDownLatch signal = new CountDownLatch(1); FileUploader.sliceUpload(null, InstrumentationRegistry.getTargetContext(), WcsTestConfig.TOKEN, new File("sfdfo"), null, new SliceUploaderListener() { @Override public void onSliceUploadSucceed(JSONObject reponseJSON) { Log.d(TAG, "onSuccess: " + reponseJSON); signal.countDown(); Assert.assertNull(reponseJSON); } @Override public void onSliceUploadFailured(HashSet<String> errorMessages) { StringBuilder sb = new StringBuilder(); for (String string : errorMessages) { sb.append(string + "\r\n"); Log.e(TAG, "errorMessage : " + string); } signal.countDown(); Assert.assertTrue(sb.toString().contains("file no exists")); } }); signal.await(WAIT_TIME, TimeUnit.MILLISECONDS); }