@Test public void testAddListener() throws Exception { Task<String> task = Tasks.forResult("test"); ApiFuture<String> future = new TaskToApiFuture<>(task); final AtomicBoolean result = new AtomicBoolean(false); future.addListener(new Runnable() { @Override public void run() { result.set(true); } }, MoreExecutors.directExecutor()); assertEquals("test", future.get()); assertTrue(result.get()); assertFalse(future.isCancelled()); assertTrue(future.isDone()); }
/** * Broadcasts the list of signed transactions. * @param transactionsRaw transactions in raw byte[] format */ public ArrayList<Transaction> broadcastTransactions(ArrayList<byte[]> transactionsRaw) { ArrayList<Transaction> transactions = new ArrayList<>(); for (byte[] transactionRaw : transactionsRaw) { final Wallet.SendResult result = new Wallet.SendResult(); result.tx = new Transaction(params, transactionRaw); result.broadcast = kit.peerGroup().broadcastTransaction(result.tx); result.broadcastComplete = result.broadcast.future(); result.broadcastComplete.addListener(new Runnable() { @Override public void run() { System.out.println("Asset spent! txid: " + result.tx.getHashAsString()); } }, MoreExecutors.directExecutor()); transactions.add(result.tx); } return transactions; }
public static void main(String... args) { MongoClient client = new MongoClient("localhost"); RepositorySetup setup = RepositorySetup.builder() .database(client.getDatabase("test")) .executor(MoreExecutors.listeningDecorator(Executors.newCachedThreadPool())) .gson(new GsonBuilder() .registerTypeAdapterFactory(new GsonAdaptersEnt()) .create()) .build(); EntRepository repository = new EntRepository(setup); EntRepository.Criteria where = repository.criteria() .uuid("8b7a881c-6ccb-4ada-8f6a-60cc99e6aa20") .actionIn("BAN", "IPBAN"); Criteria or = where.expiresAbsent() .or() .with(where) .expiresGreaterThan(TimeInstant.of(1467364749679L)); System.out.println(or); repository.find(or).fetchAll().getUnchecked(); }
@Setup(Level.Trial) @Override public void setUp() throws Exception { ListeningExecutorService dsExec = MoreExecutors.newDirectExecutorService(); executor = MoreExecutors.listeningDecorator( MoreExecutors.getExitingExecutorService((ThreadPoolExecutor) Executors.newFixedThreadPool(1), 1L, TimeUnit.SECONDS)); InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", dsExec); InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", dsExec); Map<LogicalDatastoreType, DOMStore> datastores = ImmutableMap.of( LogicalDatastoreType.OPERATIONAL, (DOMStore)operStore, LogicalDatastoreType.CONFIGURATION, configStore); domBroker = new SerializedDOMDataBroker(datastores, executor); schemaContext = BenchmarkModel.createTestContext(); configStore.onGlobalContextUpdated(schemaContext); operStore.onGlobalContextUpdated(schemaContext); initTestNode(); }
@Test public void testSValue() throws Exception { // Check that we never generate an S value that is larger than half the curve order. This avoids a malleability // issue that can allow someone to change a transaction [hash] without invalidating the signature. final int ITERATIONS = 10; ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(ITERATIONS)); List<ListenableFuture<ECKey.ECDSASignature>> sigFutures = Lists.newArrayList(); final ECKey key = new ECKey(); for (byte i = 0; i < ITERATIONS; i++) { final byte[] hash = HashUtil.sha3(new byte[]{i}); sigFutures.add(executor.submit(new Callable<ECKey.ECDSASignature>() { @Override public ECKey.ECDSASignature call() throws Exception { return key.doSign(hash); } })); } List<ECKey.ECDSASignature> sigs = Futures.allAsList(sigFutures).get(); for (ECKey.ECDSASignature signature : sigs) { assertTrue(signature.s.compareTo(ECKey.HALF_CURVE_ORDER) <= 0); } final ECKey.ECDSASignature duplicate = new ECKey.ECDSASignature(sigs.get(0).r, sigs.get(0).s); assertEquals(sigs.get(0), duplicate); assertEquals(sigs.get(0).hashCode(), duplicate.hashCode()); }
public synchronized void start(CommandExecutor commandExecutor) { if (isStarted()) { return; } this.commandExecutor = requireNonNull(commandExecutor, "commandExecutor"); scheduler = MoreExecutors.listeningDecorator(Executors.newSingleThreadScheduledExecutor( new DefaultThreadFactory("mirroring-scheduler", true))); worker = MoreExecutors.listeningDecorator( new ThreadPoolExecutor(0, numThreads, 1, TimeUnit.MINUTES, new SynchronousQueue<>(), new DefaultThreadFactory("mirroring-worker", true))); final ListenableScheduledFuture<?> future = scheduler.scheduleWithFixedDelay( this::schedulePendingMirrors, TICK.getSeconds(), TICK.getSeconds(), TimeUnit.SECONDS); FuturesExtra.addFailureCallback( future, cause -> logger.error("Git-to-CD mirroring scheduler stopped due to an unexpected exception:", cause)); }
@Override public Future<RpcResult<Void>> backupDatastore(final BackupDatastoreInput input) { LOG.debug("backupDatastore: {}", input); if (Strings.isNullOrEmpty(input.getFilePath())) { return newFailedRpcResultFuture("A valid file path must be specified"); } final SettableFuture<RpcResult<Void>> returnFuture = SettableFuture.create(); ListenableFuture<List<DatastoreSnapshot>> future = sendMessageToShardManagers(GetSnapshot.INSTANCE); Futures.addCallback(future, new FutureCallback<List<DatastoreSnapshot>>() { @Override public void onSuccess(List<DatastoreSnapshot> snapshots) { saveSnapshotsToFile(new DatastoreSnapshotList(snapshots), input.getFilePath(), returnFuture); } @Override public void onFailure(Throwable failure) { onDatastoreBackupFailure(input.getFilePath(), returnFuture, failure); } }, MoreExecutors.directExecutor()); return returnFuture; }
@Override public ListenableFuture<Void> prepare() { LOG.debug("Preparing transaction for shard {}", shardRoot); checkTransactionReadied(); final List<ListenableFuture<Void>> futures = cohorts.stream().map(DOMStoreThreePhaseCommitCohort::preCommit).collect(Collectors.toList()); final SettableFuture<Void> ret = SettableFuture.create(); Futures.addCallback(Futures.allAsList(futures), new FutureCallback<List<Void>>() { @Override public void onSuccess(final List<Void> result) { ret.set(null); } @Override public void onFailure(final Throwable throwable) { ret.setException(throwable); } }, MoreExecutors.directExecutor()); return ret; }
/** * @deprecated This method is only used from configuration modules and thus callers of it * should use service injection to make the executor configurable. */ @Deprecated public static synchronized ListeningExecutorService getDefaultCommitExecutor() { if (COMMIT_EXECUTOR == null) { final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-commit-%d").build(); /* * FIXME: this used to be newCacheThreadPool(), but MD-SAL does not have transaction * ordering guarantees, which means that using a concurrent threadpool results * in application data being committed in random order, potentially resulting * in inconsistent data being present. Once proper primitives are introduced, * concurrency can be reintroduced. */ final ExecutorService executor = Executors.newSingleThreadExecutor(factory); COMMIT_EXECUTOR = MoreExecutors.listeningDecorator(executor); } return COMMIT_EXECUTOR; }
/** * @throws java.lang.Exception */ @Before public void setUp() throws Exception { final BindingBrokerTestFactory testFactory = new BindingBrokerTestFactory(); testFactory.setExecutor(MoreExecutors.newDirectExecutorService()); testFactory.setStartWithParsedSchema(true); testContext = testFactory.getTestContext(); testContext.start(); domMountPointService = testContext.getDomMountProviderService(); bindingMountPointService = testContext.getBindingMountPointService(); assertNotNull(domMountPointService); final InputStream moduleStream = BindingReflections.getModuleInfo( OpendaylightTestRpcServiceService.class) .getModuleSourceStream(); assertNotNull(moduleStream); final List<InputStream> rpcModels = Collections.singletonList(moduleStream); schemaContext = YangParserTestUtils.parseYangStreams(rpcModels); }
@Test public void testSValue() throws Exception { // Check that we never generate an S value that is larger than half the curve order. This avoids a malleability // issue that can allow someone to change a transaction [hash] without invalidating the signature. final int ITERATIONS = 10; ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(ITERATIONS)); List<ListenableFuture<ECKey.ECDSASignature>> sigFutures = Lists.newArrayList(); final ECKey key = new ECKey(); for (byte i = 0; i < ITERATIONS; i++) { final byte[] hash = HashUtil.sha3(new byte[]{i}); sigFutures.add(executor.submit(new Callable<ECDSASignature>() { @Override public ECKey.ECDSASignature call() throws Exception { return key.doSign(hash); } })); } List<ECKey.ECDSASignature> sigs = Futures.allAsList(sigFutures).get(); for (ECKey.ECDSASignature signature : sigs) { assertTrue(signature.s.compareTo(ECKey.HALF_CURVE_ORDER) <= 0); } final ECKey.ECDSASignature duplicate = new ECKey.ECDSASignature(sigs.get(0).r, sigs.get(0).s); assertEquals(sigs.get(0), duplicate); assertEquals(sigs.get(0).hashCode(), duplicate.hashCode()); }
private static void forwardCoins(Transaction tx) { try { Coin value = tx.getValueSentToMe(kit.wallet()); System.out.println("Forwarding " + value.toFriendlyString()); // Now send the coins back! Send with a small fee attached to ensure rapid confirmation. final Coin amountToSend = value.subtract(Transaction.REFERENCE_DEFAULT_MIN_TX_FEE); final Wallet.SendResult sendResult = kit.wallet().sendCoins(kit.peerGroup(), forwardingAddress, amountToSend); checkNotNull(sendResult); // We should never try to send more coins than we have! System.out.println("Sending ..."); // Register a callback that is invoked when the transaction has propagated across the network. // This shows a second style of registering ListenableFuture callbacks, it works when you don't // need access to the object the future returns. sendResult.broadcastComplete.addListener(new Runnable() { @Override public void run() { // The wallet has changed now, it'll get auto saved shortly or when the app shuts down. System.out.println("Sent coins onwards! Transaction hash is " + sendResult.tx.getHashAsString()); } }, MoreExecutors.sameThreadExecutor()); } catch (KeyCrypterException | InsufficientMoneyException e) { // We don't use encrypted wallets in this example - can never happen. throw new RuntimeException(e); } }
public static ExecutorService getDefaultChangeEventExecutor() { if (CHANGE_EVENT_EXECUTOR == null) { final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-change-%d").build(); /* * FIXME: this used to be newCacheThreadPool(), but MD-SAL does not have transaction * ordering guarantees, which means that using a concurrent threadpool results * in application data being committed in random order, potentially resulting * in inconsistent data being present. Once proper primitives are introduced, * concurrency can be reintroduced. */ final ExecutorService executor = Executors.newSingleThreadExecutor(factory); CHANGE_EVENT_EXECUTOR = MoreExecutors.listeningDecorator(executor); } return CHANGE_EVENT_EXECUTOR; }
@Test public void testMuxedFileSizeCacheIsEmptyBeforeMuxing() throws Exception { // Given mux2fs = new MuxFs(mirrorRoot, tempDir, muxerFactory, sleeper, fileChannelCloser, MoreExecutors.newDirectExecutorService()); fs = mux2fs; StatFiller stat = mock(StatFiller.class); Path mkv = mockPath("file.mkv", 700000000L); Path srt = mockPath("file.srt", 2000L); mockDirectoryStream(mirrorRoot, srt, mkv); when(stat.statWithSize(eq(mkv), sizeGetterCaptor.capture(), extraSizeGetterCaptor.capture())).thenReturn(mock(UnixFileStat.class)); mockAttributes(mkv, 234); FileInfo info = FileInfo.of(mkv); // When int result = fs.getattr("file.mkv", stat); // Then assertThat(result).isEqualTo(SUCCESS); verify(stat).statWithSize(eq(mkv), any(), any()); verifyNoMoreInteractions(stat); assertThat(sizeGetterCaptor.getValue().apply(info)).isEmpty(); assertThat(extraSizeGetterCaptor.getValue().get()).isEqualTo(2000L); }
public void startNewDomDataBroker() { checkState(this.executor != null, "Executor needs to be set"); final InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService()); final InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService()); this.newDatastores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder() .put(LogicalDatastoreType.OPERATIONAL, operStore) .put(LogicalDatastoreType.CONFIGURATION, configStore) .build(); this.newDOMDataBroker = new SerializedDOMDataBroker(this.newDatastores, this.executor); this.mockSchemaService.registerSchemaContextListener(configStore); this.mockSchemaService.registerSchemaContextListener(operStore); }
@Inject public ShardCollectSource(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, Functions functions, ClusterService clusterService, ThreadPool threadPool, TransportActionProvider transportActionProvider, BulkRetryCoordinatorPool bulkRetryCoordinatorPool, RemoteCollectorFactory remoteCollectorFactory, SystemCollectSource systemCollectSource, NodeSysExpression nodeSysExpression) { this.settings = settings; this.indexNameExpressionResolver = indexNameExpressionResolver; this.indicesService = indicesService; this.functions = functions; this.clusterService = clusterService; this.threadPool = threadPool; this.remoteCollectorFactory = remoteCollectorFactory; this.systemCollectSource = systemCollectSource; this.executor = MoreExecutors.listeningDecorator((ExecutorService) threadPool.executor(ThreadPool.Names.SEARCH)); this.transportActionProvider = transportActionProvider; this.bulkRetryCoordinatorPool = bulkRetryCoordinatorPool; this.nodeSysExpression = nodeSysExpression; }
public IterableRowEmitter(RowReceiver rowReceiver, final Iterable<? extends Row> rows, Optional<Executor> executor) { this.rowReceiver = rowReceiver; topRowUpstream = new TopRowUpstream( executor.or(MoreExecutors.directExecutor()), this, new Runnable() { @Override public void run() { rowsIt = rows.iterator(); IterableRowEmitter.this.run(); } } ); rowReceiver.setUpstream(topRowUpstream); this.rowsIt = rows.iterator(); }
@Override public CheckedFuture<Void, TransactionCommitFailedException> submit( final DOMDataWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) { checkNotFailed(); checkNotClosed(); final CheckedFuture<Void, TransactionCommitFailedException> ret = broker.submit(transaction, cohorts); COUNTER_UPDATER.incrementAndGet(this); Futures.addCallback(ret, new FutureCallback<Void>() { @Override public void onSuccess(final Void result) { transactionCompleted(); } @Override public void onFailure(final Throwable failure) { transactionFailed(transaction, failure); } }, MoreExecutors.directExecutor()); return ret; }
/** * Run the job now. * The job must set its own state to DISABLED or PAUSED when failed, otherwise it is set to ACTIVE. * @param yadaJob * @return */ public void runJob(Long yadaJobId) { log.debug("Running job id {}", yadaJobId); YadaJob toRun = yadaJobRepository.findOne(yadaJobId); if (toRun==null) { log.info("Job not found when trying to run it, id={}", toRun); return; } yadaJobRepository.internalSetRunning(yadaJobId, YadaJobState.RUNNING.toId(), YadaJobState.ACTIVE.toId()); final YadaJob wiredYadaJob = (YadaJob) yadaUtil.autowire(toRun); // YadaJob instances can have @Autowire fields ListenableFuture<Void> jobHandle = jobScheduler.submit(wiredYadaJob); jobHandles.put(yadaJobId, jobHandle); Futures.addCallback(jobHandle, new FutureCallback<Void>() { // The callback is run in executor public void onSuccess(Void result) { // result is always null jobHandles.remove(yadaJobId); yadaJobSchedulerDao.internalJobSuccessful(wiredYadaJob); } public void onFailure(Throwable thrown) { jobHandles.remove(yadaJobId); yadaJobSchedulerDao.internalJobFailed(wiredYadaJob, thrown); } }, MoreExecutors.directExecutor()); }
@Before public void setup() { BindingBrokerTestFactory testFactory = new BindingBrokerTestFactory(); testFactory.setExecutor(MoreExecutors.newDirectExecutorService()); testFactory.setStartWithParsedSchema(true); testContext = testFactory.getTestContext(); testContext.start(); providerRegistry = testContext.getBindingRpcRegistry(); provisionRegistry = testContext.getDomRpcRegistry(); biRpcInvoker = testContext.getDomRpcInvoker(); assertNotNull(providerRegistry); assertNotNull(provisionRegistry); knockService = MessageCapturingFlowService.create(providerRegistry); }
private QuorumJournalManager createSpyingQJM() throws IOException, URISyntaxException { AsyncLogger.Factory spyFactory = new AsyncLogger.Factory() { @Override public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo, String journalId, InetSocketAddress addr) { AsyncLogger logger = new IPCLoggerChannel(conf, nsInfo, journalId, addr) { protected ExecutorService createSingleThreadExecutor() { // Don't parallelize calls to the quorum in the tests. // This makes the tests more deterministic. return MoreExecutors.sameThreadExecutor(); } }; return Mockito.spy(logger); } }; return closeLater(new QuorumJournalManager( conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO, spyFactory)); }
private <V> ListenableFuture<V> addFutureCallback(final ListenableFuture<V> future) { Futures.addCallback(future, new FutureCallback<V>() { @Override public void onSuccess(final V result) { // no-op } @Override public void onFailure(final Throwable failure) { log.warn("Transaction {} failed with error \"{}\" - was allocated in the following context", transactionId, failure, debugContext); } }, MoreExecutors.directExecutor()); return future; }
@Before public void setupStore() { InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService()); InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService()); schemaContext = TestModel.createTestContext(); operStore.onGlobalContextUpdated(schemaContext); configStore.onGlobalContextUpdated(schemaContext); ImmutableMap<LogicalDatastoreType, DOMStore> stores = ImmutableMap.<LogicalDatastoreType, DOMStore> builder() // .put(CONFIGURATION, configStore) // .put(OPERATIONAL, operStore) // .build(); ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor()); domBroker = new SerializedDOMDataBroker(stores, executor); }
@Override public Embedder configuredEmbedder() { StepEventBus.getEventBus().setUniqueSession(true); Embedder embedder = super.configuredEmbedder(); embedder.useExecutorService(MoreExecutors.sameThreadExecutor()); embedder.configuration().useStepCollector(stepCollector); embedder.useStepsFactory(new ScanningStepsFactory(embedder .configuration(), this.getClass(), pages)); embedder.configuration() .useParameterControls(new ParameterControls() .useDelimiterNamedParameters(true)); embedder.configuration() .storyReporterBuilder() .withReporters(new SatisfyStoryReporter() , new ConsoleOutput()); embedder.useEmbedderMonitor(new NullEmbedderMonitor()); embedder.useMetaFilters(getSatisfyProperties().getMetaFilters()); return embedder; }
@Before public void setUp() { coreService = createMock(CoreService.class); expect(coreService.registerApplication(appId.name())) .andReturn(appId).anyTimes(); replay(coreService); provider.cfgService = new ComponentConfigAdapter(); provider.coreService = coreService; provider.providerRegistry = hostRegistry; provider.topologyService = topoService; provider.packetService = packetService; provider.deviceService = deviceService; provider.hostService = hostService; provider.activate(CTX_FOR_NO_REMOVE); provider.eventHandler = MoreExecutors.newDirectExecutorService(); }
@Activate public void activate(ComponentContext context) { configService.registerProperties(getClass()); setupCodecs(); messageReceivingExecutor = receiveOnIOLoopThread ? MoreExecutors.directExecutor() : Executors.newFixedThreadPool( totalReceiverThreads, groupedThreads("onos/net-perf-test", "receiver-%d")); registerMessageHandlers(); startTest(); reporter.scheduleWithFixedDelay(this::reportPerformance, reportIntervalSeconds, reportIntervalSeconds, TimeUnit.SECONDS); logConfig("Started"); }
private void scheduleBalance() { if (isLeader.get() && nextTask.get() == null) { ListenableScheduledFuture task = executorService.schedule(mastershipAdminService::balanceRoles, SCHEDULE_PERIOD, TimeUnit.SECONDS); task.addListener(() -> { log.info("Completed balance roles"); nextTask.set(null); }, MoreExecutors.directExecutor() ); if (!nextTask.compareAndSet(null, task)) { task.cancel(false); } } }
@Before public void setupStore() { InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService()); InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService()); schemaContext = TestModel.createTestContext(); operStore.onGlobalContextUpdated(schemaContext); configStore.onGlobalContextUpdated(schemaContext); ImmutableMap<LogicalDatastoreType, DOMStore> stores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder() // .put(CONFIGURATION, configStore) // .put(OPERATIONAL, operStore) // .build(); commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor()); futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB"); executor = new DeadlockDetectingListeningExecutorService(commitExecutor, TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor); domBroker = new SerializedDOMDataBroker(stores, executor); }
public ChunkCache() { cache = Caffeine.newBuilder() .maximumWeight(cacheSize) .executor(MoreExecutors.directExecutor()) .weigher((key, buffer) -> ((Buffer) buffer).buffer.capacity()) .removalListener(this) .build(this); metrics = new CacheMissMetrics("ChunkCache", this); }
@Test public void testChainedTransactionFailureWithSingleShard() throws Exception { initDatastoresWithCars("testChainedTransactionFailureWithSingleShard"); final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put( LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(), MoreExecutors.directExecutor()); final TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); final DOMTransactionChain txChain = broker.createTransactionChain(listener); final DOMDataWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier( new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME)) .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build(); writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData); try { writeTx.submit().checkedGet(5, TimeUnit.SECONDS); fail("Expected TransactionCommitFailedException"); } catch (final TransactionCommitFailedException e) { // Expected } verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class)); txChain.close(); broker.close(); }
private static ListeningExecutorService newExecutor() { return MoreExecutors.listeningDecorator( MoreExecutors.getExitingExecutorService( new ThreadPoolExecutor( DEFAULT_THREAD_POOL_CORE_SIZE, DEFAULT_THREAD_POOL_MAXIMUM_SIZE, DEFAULT_THREAD_POOL_KEEP_ALIVE_MILLIS, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), DEFAULT_THREAD_FACTORY))); }
/** * Wraps listenable future with a fluent future. * @param <V> value type * @param future future * @return fluent instance */ public static <V> FluentFuture<V> from(ListenableFuture<V> future) { if (future instanceof FluentFuture<?>) { return (FluentFuture<V>) future; } return new WrapingFluentFuture<>(future, MoreExecutors.directExecutor()); }
private ListeningExecutorService createNamedExecutorService() { return MoreExecutors.listeningDecorator(Executors.newCachedThreadPool(r -> new Thread(Thread.currentThread().getThreadGroup(), "encryptionKeyWatcher") { @Override public void run() { r.run(); } })); }
@Override protected void onInit() throws Exception { RxNetty.useMetricListenersFactory(new ServoEventsListenerFactory()); documentClient = new DocumentClient(cfg.getServiceEndpoint(), cfg.getMasterKey(), cfg.getConnectionPolicy(), cfg.getConsistencyLevel()); ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(cfg.getConcurrency(), cfg.getConcurrency(), 10, TimeUnit.MINUTES, new ArrayBlockingQueue<Runnable>(cfg.getConcurrency(), true), new ThreadPoolExecutor.CallerRunsPolicy()); this.executor = MoreExecutors.listeningDecorator(threadPoolExecutor); }
/** * 启动过滤器 * 启动注册服务 * 启动服务 * * @throws Exception */ public void start() throws Exception { GuiceDI.getInstance(ServerRegistry.class).startAsync().addListener(new Listener() { @Override public void running() { logger.info("zk registry running"); } }, MoreExecutors.directExecutor()); GuiceDI.getInstance(RemotingNettyServer.class).startAsync().awaitRunning(); }
public void shutdown() { GuiceDI.getInstance(ServerRegistry.class).stopAsync(); GuiceDI.getInstance(RemotingNettyServer.class).stopAsync().addListener(new Listener() { @Override public void terminated(State from) { super.terminated(from); logger.info("zk registry stopped"); } }, MoreExecutors.directExecutor()); }
private void doCanCommit(final AsyncNotifyingSettableFuture clientSubmitFuture, final DOMDataWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) { final long startTime = System.nanoTime(); final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator(); // Not using Futures.allAsList here to avoid its internal overhead. FutureCallback<Boolean> futureCallback = new FutureCallback<Boolean>() { @Override public void onSuccess(final Boolean result) { if (result == null || !result) { handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, new TransactionCommitFailedException("Can Commit failed, no detailed cause available.")); } else if (!cohortIterator.hasNext()) { // All cohorts completed successfully - we can move on to the preCommit phase doPreCommit(startTime, clientSubmitFuture, transaction, cohorts); } else { Futures.addCallback(cohortIterator.next().canCommit(), this, MoreExecutors.directExecutor()); } } @Override public void onFailure(final Throwable failure) { handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure); } }; ListenableFuture<Boolean> canCommitFuture = cohortIterator.next().canCommit(); Futures.addCallback(canCommitFuture, futureCallback, MoreExecutors.directExecutor()); }
@Test public void testFailedRegistration() { new JavaTestKit(getSystem()) { { ActorSystem mockActorSystem = mock(ActorSystem.class); ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class), "testFailedRegistration"); doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class)); ExecutionContextExecutor executor = ExecutionContexts.fromExecutor( MoreExecutors.directExecutor()); ActorContext actorContext = mock(ActorContext.class); doReturn(executor).when(actorContext).getClientDispatcher(); String shardName = "shard-1"; final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy( shardName, actorContext, mockListener); doReturn(mockActorSystem).when(actorContext).getActorSystem(); doReturn(duration("5 seconds")).when(actorContext).getOperationDuration(); doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName)); doReturn(Futures.failed(new RuntimeException("mock"))) .when(actorContext).executeOperationAsync(any(ActorRef.class), any(Object.class), any(Timeout.class)); doReturn(mock(DatastoreContext.class)).when(actorContext).getDatastoreContext(); proxy.init(YangInstanceIdentifier.of(TestModel.TEST_QNAME), AsyncDataBroker.DataChangeScope.ONE); Assert.assertEquals("getListenerRegistrationActor", null, proxy.getListenerRegistrationActor()); proxy.close(); } }; }
@BeforeClass public static void setupHikari() { ds = Hikari.createHikari("jdbc:hsqldb:mem:JdbcUtilsTest", "sa", "", new Properties()); ex = MoreExecutors.listeningDecorator(Hikari.createExecutorFor(ds, false)); dbAsync = new GuavaDbInterpreter(ds, ex); }
@Test public void testFailedRegistration() { new JavaTestKit(getSystem()) { { ActorSystem mockActorSystem = mock(ActorSystem.class); ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class), "testFailedRegistration"); doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class)); ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.directExecutor()); ActorContext actorContext = mock(ActorContext.class); final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME); doReturn(executor).when(actorContext).getClientDispatcher(); doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext(); doReturn(mockActorSystem).when(actorContext).getActorSystem(); String shardName = "shard-1"; final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>( actorContext, mockListener, path); doReturn(duration("5 seconds")).when(actorContext).getOperationDuration(); doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName)); doReturn(Futures.failed(new RuntimeException("mock"))).when(actorContext) .executeOperationAsync(any(ActorRef.class), any(Object.class), any(Timeout.class)); doReturn(mock(DatastoreContext.class)).when(actorContext).getDatastoreContext(); proxy.init("shard-1"); Assert.assertEquals("getListenerRegistrationActor", null, proxy.getListenerRegistrationActor()); proxy.close(); } }; }