@Override protected void setUp() { final ExecutorService executor = Executors.newSingleThreadExecutor(); tearDownStack.addTearDown(new TearDown() { @Override public void tearDown() { executor.shutdownNow(); } }); sleeper = new SleepingRunnable(1000); delayedFuture = executor.submit(sleeper, true); tearDownStack.addTearDown(new TearDown() { @Override public void tearDown() { Thread.interrupted(); } }); }
private static TServer getTHsHaServer(TProtocolFactory protocolFactory, TProcessor processor, TTransportFactory transportFactory, int workerThreads, InetSocketAddress inetSocketAddress, ThriftMetrics metrics) throws TTransportException { TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(inetSocketAddress); log.info("starting HBase HsHA Thrift server on " + inetSocketAddress.toString()); THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport); if (workerThreads > 0) { // Could support the min & max threads, avoiding to preserve existing functionality. serverArgs.minWorkerThreads(workerThreads).maxWorkerThreads(workerThreads); } ExecutorService executorService = createExecutor( workerThreads, metrics); serverArgs.executorService(executorService); serverArgs.processor(processor); serverArgs.transportFactory(transportFactory); serverArgs.protocolFactory(protocolFactory); return new THsHaServer(serverArgs); }
/** * from javase7 doc * * @param pool */ private void shutdownAndAwaitTermination(ExecutorService pool) { pool.shutdown(); // Disable new tasks from being submitted try { // Wait a while for existing tasks to terminate if (!pool.awaitTermination(5, TimeUnit.SECONDS)) { pool.shutdownNow(); // Cancel currently executing tasks // Wait a while for tasks to respond to being cancelled if (!pool.awaitTermination(5, TimeUnit.SECONDS)) System.err.println("Pool did not terminate"); } } catch (InterruptedException ie) { // (Re-)Cancel if current thread also interrupted pool.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } }
/** * Gather ResourceSync Framework documents from a source in ResultIndexes. * * @param url the starting url to explore * @return List of resultIndexes of the exploration * @throws URISyntaxException if the url could not be converted to a URI. * @throws InterruptedException at Executor interrupts. */ public List<ResultIndex> explore(String url) throws URISyntaxException, InterruptedException { URI uri = new URI(url); ExecutorService executor = Executors.newWorkStealingPool(); List<Callable<ResultIndex>> callables = new ArrayList<>(); callables.add(() -> exploreWellKnown(uri)); callables.add(() -> exploreLinks(uri)); callables.add(() -> exploreRobotsTxt(uri)); callables.add(() -> exploreRsDocumentUri(uri)); return executor.invokeAll(callables) .stream() .map(future -> { try { return future.get(); } catch (Exception e) { throw new IllegalStateException(e); } }) .collect(Collectors.toList()); }
/** * Constructs a new watcher for copy operation, and then immediately submits * it to the thread pool. * * @param manager * The {@link TransferManager} that owns this copy request. * @param threadPool * The {@link ExecutorService} to which we should submit new * tasks. * @param multipartCopyCallable * The callable responsible for processing the copy * asynchronously * @param copyObjectRequest * The original CopyObject request */ public static CopyMonitor create( TransferManager manager, CopyImpl transfer, ExecutorService threadPool, CopyCallable multipartCopyCallable, CopyObjectRequest copyObjectRequest, ProgressListenerChain progressListenerChain) { CopyMonitor copyMonitor = new CopyMonitor(manager, transfer, threadPool, multipartCopyCallable, copyObjectRequest, progressListenerChain); Future<CopyResult> thisFuture = threadPool.submit(copyMonitor); // Use an atomic compareAndSet to prevent a possible race between the // setting of the CopyMonitor's futureReference, and setting the // CompleteMultipartCopy's futureReference within the call() method. // We only want to set the futureReference to CopyMonitor's futureReference if the // current value is null, otherwise the futureReference that's set is // CompleteMultipartCopy's which is ultimately what we want. copyMonitor.futureReference.compareAndSet(null, thisFuture); return copyMonitor; }
/** * Runs an operation in the background in a separate thread. * The execution is started immediately. * * @param operation The operation to execute. */ public BackgroundOperation(final Operation<T> operation) { ApfloatContext ctx = ApfloatContext.getContext(); ExecutorService executorService = ctx.getExecutorService(); Callable<T> callable = new Callable<T>() { public T call() { return operation.execute(); } }; this.future = executorService.submit(callable); }
public static void main(String[] args) throws Exception { try (Server server = new Server()) { URI uri = new URI("http://127.0.0.1:" + server.getPort() + "/"); // sanity success(uri, new StringRequestBody(STRING_BODY, 0)); success(uri, new ByteArrayRequestBody(BYTE_ARRAY_BODY, 0)); success(uri, new FileRequestBody(FILE_BODY, 0)); for (int i=1; i< BODY_OFFSETS.length; i++) { failureBlocking(uri, new StringRequestBody(STRING_BODY, BODY_OFFSETS[i])); failureBlocking(uri, new ByteArrayRequestBody(BYTE_ARRAY_BODY, BODY_OFFSETS[i])); failureBlocking(uri, new FileRequestBody(FILE_BODY, BODY_OFFSETS[i])); failureNonBlocking(uri, new StringRequestBody(STRING_BODY, BODY_OFFSETS[i])); failureNonBlocking(uri, new ByteArrayRequestBody(BYTE_ARRAY_BODY, BODY_OFFSETS[i])); failureNonBlocking(uri, new FileRequestBody(FILE_BODY, BODY_OFFSETS[i])); } } finally { Executor def = defaultClient().executor(); if (def instanceof ExecutorService) { ((ExecutorService)def).shutdownNow(); } } }
public void runTest() { ScheduledExecutorService s=new ScheduledThreadPoolExecutor(2, new NamedThreadFactory("Scheduled")); ExecutorService es=Executors.newCachedThreadPool(); es.execute(this::writeTest); es.execute(this::writeTest); es.execute(this::writeTest); es.execute(this::writeTest); es.execute(this::readTest); es.execute(this::readTest); es.execute(this::readTest); es.execute(this::readTest); es.execute(this::readTest); es.execute(this::readTest); s.scheduleAtFixedRate(this::printInfo,5, 5, TimeUnit.SECONDS); }
void start() { new Thread( () -> { ExecutorService executorService = Executors.newCachedThreadPool(); try (ServerSocket serverSocket = new ServerSocket(port, 0, InetAddress.getLoopbackAddress())) { while (true) { Socket socket = serverSocket.accept(); executorService.execute(() -> process(socket)); } } catch (IOException e) { throw new RuntimeException(e); } }) .start(); }
private void generateCMAX_SETs() throws AlgorithmExecutionException { if (this.optimize()) { this.cmaxSet = new CopyOnWriteArrayList<CMAX_SET>(); ExecutorService exec = this.getExecuter(); for (int i = 0; i < this.numberOfAttributes; ++i) { exec.execute(new CMAX_SET_JOB(i)); } this.awaitExecuter(exec); } else { this.cmaxSet = new LinkedList<CMAX_SET>(); for (int i = 0; i < this.numberOfAttributes; ++i) { executeCMAX_SET_Task(i); } } }
public static void main(String[] args) throws InterruptedException { ExecutorService pool = Executors.newFixedThreadPool(300); for(int i=0;i<10000;i++){ pool.execute(new Runnable() { @Override public void run() { try { executeHttp(); } catch (InterruptedException e) { e.printStackTrace(); } } }); } Thread.sleep(1000000L); }
public void testThreadPool2() { System.out.println(); ExecutorService service = Executors.newFixedThreadPool(2); for (int i = 0; i < 10; i++) { service.submit(new SimpleThread2(i)); } try { Thread.sleep(5 * 1000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } service.shutdown(); }
public void close() { try { if (executor instanceof ExecutorService) { ((ExecutorService)executor).shutdown(); } } catch (Throwable t) { logger.warn("fail to destroy thread pool of server: " + t.getMessage(), t); } }
public AbstractServer(URL url, ChannelHandler handler) throws RemotingException { super(url, handler); localAddress = getUrl().toInetSocketAddress(); String host = url.getParameter(Constants.ANYHOST_KEY, false) || NetUtils.isInvalidLocalHost(getUrl().getHost()) ? NetUtils.ANYHOST : getUrl().getHost(); bindAddress = new InetSocketAddress(host, getUrl().getPort()); this.accepts = url.getParameter(Constants.ACCEPTS_KEY, Constants.DEFAULT_ACCEPTS); this.idleTimeout = url.getParameter(Constants.IDLE_TIMEOUT_KEY, Constants.DEFAULT_IDLE_TIMEOUT); try { doOpen(); if (logger.isInfoEnabled()) { logger.info("Start " + getClass().getSimpleName() + " bind " + getBindAddress() + ", export " + getLocalAddress()); } } catch (Throwable t) { throw new RemotingException(url.toInetSocketAddress(), null, "Failed to bind " + getClass().getSimpleName() + " on " + getLocalAddress() + ", cause: " + t.getMessage(), t); } executor = (ExecutorService) ExtensionLoader.getExtensionLoader(DataStore.class) .getDefaultExtension().get(Constants.EXECUTOR_SERVICE_COMPONENT_KEY, Integer.toString(url.getPort())); }
@Override @Deprecated public <R> void processBatchCallback( List<? extends Row> list, byte[] tableName, ExecutorService pool, Object[] results, Batch.Callback<R> callback) throws IOException, InterruptedException { processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback); }
/** * 创建线程池,执行kafka消费者 */ public void go() { Constant constant = new Constant(); kafkaProperties kafkaProperties = new kafkaProperties(); ConsumerConfig config = new ConsumerConfig(kafkaProperties.properties()); ExecutorService executorService = Executors.newFixedThreadPool(Integer.parseInt(constant.THREAD_POOL_SIZE)); String topic = constant.TOPIC; // Task[] tasks = new Task[Integer.parseInt(constant.THREAD_NUM)]; Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, new Integer(constant.THREAD_NUM)); ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); for (KafkaStream stream : streams) { executorService.submit(new Task(stream)); } executorService.shutdown(); }
/** * Main method of the class. * @param args */ public static void main(String[] args) { // Create an executor ExecutorService executor=Executors.newCachedThreadPool(); //Create five tasks ResultTask resultTasks[]=new ResultTask[5]; for (int i=0; i<5; i++) { ExecutableTask executableTask=new ExecutableTask("Task "+i); resultTasks[i]=new ResultTask(executableTask); executor.submit(resultTasks[i]); } // Sleep the thread five seconds try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException e1) { e1.printStackTrace(); } // Cancel all the tasks. In the tasks that have finished before this moment, this // cancellation has no effects for (int i=0; i<resultTasks.length; i++) { resultTasks[i].cancel(true); } // Write the results of those tasks that haven't been cancelled for (int i=0; i<resultTasks.length; i++) { try { if (!resultTasks[i].isCancelled()){ System.out.printf("%s\n",resultTasks[i].get()); } } catch (InterruptedException | ExecutionException e) { e.printStackTrace(); } } // Finish the executor. executor.shutdown(); }
/** * Test concurrent acquire/release semantics. */ @Test public void verifyConcurrentAcquireAndRelease() throws Exception { final ExecutorService executor = Executors.newFixedThreadPool(CONCURRENT_SIZE); try { testConcurrency(executor, Arrays.asList(getConcurrentLocks("concurrent-new"))); } catch (final Exception e) { LOGGER.debug("testConcurrentAcquireAndRelease produced an error", e); fail("testConcurrentAcquireAndRelease failed."); } finally { executor.shutdownNow(); } }
@Override public Future<?> submit (Runnable task) { ExecutorService executor = getThreadPoolExecutor (); try { return executor.submit (task); } catch (RejectedExecutionException ex) { throw new TaskRejectedException ("Executor [" + executor + "] did not accept task: " + task, ex); } }
@Override protected void configure() { // We want a global binding for @ModelSync ExecutorService, but each plugin has // its own executors, so just use the API plugin's executor globally. bind(Key.get(ExecutorService.class, ModelSync.class)) .to(Key.get(ExecutorService.class, Sync.immediate)); final PluginFacetBinder facets = new PluginFacetBinder(binder()); facets.register(ModelCommands.class); facets.register(ModelCommands.Parent.class); }
@Test(timeout = 100000) public void testRun() throws Exception { ArgumentCaptor<JSONObject> commentCaptor = ArgumentCaptor.forClass(JSONObject.class); ArgumentCaptor<JSONObject> postCaptor = ArgumentCaptor.forClass(JSONObject.class); ExecutorService service = Executors.newSingleThreadExecutor(); service.execute(aggregatorRunnable); Thread.sleep(TIME_TO_RUN); aggregatorRunnable.stop(); Mockito.verify(queueService, atLeastOnce()).enqueueComment(commentCaptor.capture()); Mockito.verify(queueService, atLeastOnce()).enqueuePost(postCaptor.capture()); List<JSONObject> capturedComments = commentCaptor.getAllValues(); List<JSONObject> capturedPosts = commentCaptor.getAllValues(); Assert.assertTrue(capturedComments.size() > 100); Assert.assertTrue(capturedPosts.size() > 100); }
/** * 根据id集合获取数据 * * @param ids id集合 */ public List<T> getList(List<Long> ids) { List<T> list = InstanceHelper.newArrayList(); if (ids != null) { for (int i = 0; i < ids.size(); i++) { list.add(null); } ExecutorService executorService = Executors.newFixedThreadPool(10); for (int i = 0; i < ids.size(); i++) { final int index = i; executorService.execute(() -> list.set(index, queryById(ids.get(index)))); } executorService.shutdown(); try { executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); } catch (InterruptedException e) { logger.error("awaitTermination", "", e); } } return list; }
public List<String> run(List<Path> classes) throws IOException, InterruptedException, ExecutionException, ConstantPoolException { ExecutorService pool = Executors.newFixedThreadPool(numThreads); for (Path path : classes) { ClassFileReader reader = ClassFileReader.newInstance(path); for (ClassFile cf : reader.getClassFiles()) { String classFileName = cf.getName(); // for each ClassFile // parse constant pool to find matching method refs // parse each method (caller) // - visit and find method references matching the given method name pool.submit(getTask(cf)); } } waitForCompletion(); pool.shutdown(); return csMethodsMissingAnnotation; }
public static void main(String[] args) throws Exception { HttpServer server = HttpServer.create(new InetSocketAddress(0), 0); try { server.setExecutor(Executors.newFixedThreadPool(1)); server.createContext(someContext, new HttpHandler() { @Override public void handle(HttpExchange msg) { try { try { msg.sendResponseHeaders(noMsgCode, -1); } catch(IOException ioe) { ioe.printStackTrace(); } } finally { msg.close(); } } }); server.start(); System.out.println("Server started at port " + server.getAddress().getPort()); runRawSocketHttpClient("localhost", server.getAddress().getPort()); } finally { ((ExecutorService)server.getExecutor()).shutdown(); server.stop(0); } System.out.println("Server finished."); }
@Inject LifecycleManagerImpl(DatabaseComponent db, EventBus eventBus, CryptoComponent crypto, AuthorFactory authorFactory, IdentityManager identityManager) { this.db = db; this.eventBus = eventBus; this.crypto = crypto; this.authorFactory = authorFactory; this.identityManager = identityManager; services = new CopyOnWriteArrayList<Service>(); clients = new CopyOnWriteArrayList<Client>(); executors = new CopyOnWriteArrayList<ExecutorService>(); }
@Test public void testGlobalThreadManagerWithMultipleApps() { MockThreadManager threadManager = new MockThreadManager(); FirebaseOptions options = new FirebaseOptions.Builder() .setCredentials(new MockGoogleCredentials()) .build(); FirebaseApp defaultApp = FirebaseApp.initializeApp(options); FirebaseApp customApp = FirebaseApp.initializeApp(options, "customApp"); assertEquals(0, threadManager.initCount); ExecutorService exec1 = threadManager.getExecutor(defaultApp); ExecutorService exec2 = threadManager.getExecutor(customApp); assertEquals(1, threadManager.initCount); assertFalse(exec1.isShutdown()); // Should return the same executor for both invocations. assertSame(exec1, exec2); threadManager.releaseExecutor(defaultApp, exec1); assertFalse(exec1.isShutdown()); threadManager.releaseExecutor(customApp, exec2); assertTrue(exec1.isShutdown()); }
public static void main(String[] args) throws Exception { ExecutorService exec = Executors.newCachedThreadPool(); for (int i = 0; i < 5; i++) { exec.execute(new Accessor(i)); } TimeUnit.SECONDS.sleep(3); // Run for a while exec.shutdownNow(); // All Accessors will quit }
@Test public void testCounter() throws Exception { final long countResetTimePeriodMs = 200L; final Counter c = new Counter(countResetTimePeriodMs); final int n = DFSUtil.getRandom().nextInt(512) + 512; final List<Future<Integer>> futures = new ArrayList<Future<Integer>>(n); final ExecutorService pool = Executors.newFixedThreadPool(32); try { // increment for(int i = 0; i < n; i++) { futures.add(pool.submit(new Callable<Integer>() { @Override public Integer call() throws Exception { return (int)c.increment(); } })); } // sort and wait for the futures Collections.sort(futures, CMP); } finally { pool.shutdown(); } // check futures Assert.assertEquals(n, futures.size()); for(int i = 0; i < n; i++) { Assert.assertEquals(i + 1, futures.get(i).get().intValue()); } Assert.assertEquals(n, c.getCount()); // test auto-reset Thread.sleep(countResetTimePeriodMs + 100); Assert.assertEquals(1, c.increment()); }
private static AsynchronousChannelGroup createAsynchronousChannelGroup() { // Need to do this with the right thread context class loader else the // first web app to call this will trigger a leak ClassLoader original = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader( AsyncIOThreadFactory.class.getClassLoader()); // These are the same settings as the default // AsynchronousChannelGroup int initialSize = Runtime.getRuntime().availableProcessors(); ExecutorService executorService = new ThreadPoolExecutor( 0, Integer.MAX_VALUE, Long.MAX_VALUE, TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), new AsyncIOThreadFactory()); try { return AsynchronousChannelGroup.withCachedThreadPool( executorService, initialSize); } catch (IOException e) { // No good reason for this to happen. throw new IllegalStateException(sm.getString("asyncChannelGroup.createFail")); } } finally { Thread.currentThread().setContextClassLoader(original); } }
@Override @Deprecated public void processBatch(List<? extends Row> list, final TableName tableName, ExecutorService pool, Object[] results) throws IOException, InterruptedException { // This belongs in HTable!!! Not in here. St.Ack // results must be the same size as list if (results.length != list.size()) { throw new IllegalArgumentException( "argument results must be the same size as argument list"); } processBatchCallback(list, tableName, pool, results, null); }
/** * invokeAll(c) throws NPE if c has null elements */ public void testInvokeAll3() throws Exception { final ExecutorService e = new CustomTPE(2, 2, LONG_DELAY_MS, MILLISECONDS, new ArrayBlockingQueue<Runnable>(10)); try (PoolCleaner cleaner = cleaner(e)) { List<Callable<String>> l = new ArrayList<>(); l.add(new StringTask()); l.add(null); try { e.invokeAll(l); shouldThrow(); } catch (NullPointerException success) {} } }
private CompletableSearchServer(SearchServer backend, Executor executor, boolean shutdownExecutorOnClose) { if(shutdownExecutorOnClose && !(executor instanceof ExecutorService)) { throw new IllegalArgumentException("shutdownExecutorOnClose requires 'executor' being an 'ExecutorService', actually got: " + executor.getClass()); } this.backend = backend; this.executor = executor; this.shutdownExecutorOnClose = shutdownExecutorOnClose; }
private ExecutorService createExecutor(Config config) { ThreadPoolExecutor service = new ThreadPoolExecutor(config.core, config.max, config.timeout, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(QUEUE_INIT_CAPACITY), new TaskThreadFactory(name), new ThreadPoolExecutor.DiscardPolicy()); allowCoreThreadTimeOut(service, config.allowCoreTimeOut); return service; }
private void parallelDrainQueue(int threadCount) { ExecutorService executor = Executors.newFixedThreadPool(threadCount); for (int i = 0; i < threadCount; i++) { executor.execute(new NamedRunnable("Crawler %s", i) { @Override protected void execute() { try { drainQueue(); } catch (Exception e) { e.printStackTrace(); } } }); } executor.shutdown(); }
/** * invokeAny(c) throws NullPointerException if c has null elements */ public void testInvokeAny4() throws Throwable { CountDownLatch latch = new CountDownLatch(1); ExecutorService e = new ForkJoinPool(1); try (PoolCleaner cleaner = cleaner(e)) { List<Callable<String>> l = new ArrayList<>(); l.add(latchAwaitingStringTask(latch)); l.add(null); try { e.invokeAny(l); shouldThrow(); } catch (NullPointerException success) {} latch.countDown(); } }
@SuppressWarnings("resource") @Override public InputStream generate(Integration integration) throws IOException { final PipedInputStream is = new PipedInputStream(); final ExecutorService executor = Executors.newSingleThreadExecutor(); final PipedOutputStream os = new PipedOutputStream(is); executor.execute(generateAddProjectTarEntries(integration, os)); return is; }
@Test public void testExecuteRunnable() throws InterruptedException { ExecutorService executorService = toTraced(Executors.newFixedThreadPool(NUMBER_OF_THREADS)); MockSpan parentSpan = mockTracer.buildSpan("foo").startManual(); mockTracer.scopeManager().activate(parentSpan, true); executorService.execute(new TestRunnable()); countDownLatch.await(); assertParentSpan(parentSpan); assertEquals(1, mockTracer.finishedSpans().size()); }
@SuppressWarnings("Finally") public static void exitOrThrow(ExecutorService executorService, CountDownLatch testFinishedSignal, LooperTest test) throws Throwable { // Waits for the signal indicating the test's use case is done. try { // Even if this fails we want to try as hard as possible to cleanup. If we fail to close all resources // properly, the `after()` method will most likely throw as well because it tries do delete any Realms // used. Any exception in the `after()` code will mask the original error. TestHelper.awaitOrFail(testFinishedSignal); } finally { Looper looper = test.getLooper(); if (looper != null) { // Failing to quit the looper will not execute the finally block responsible // of closing the Realm. looper.quit(); } // Waits for the finally block to execute and closes the Realm. TestHelper.awaitOrFail(test.getRealmClosedSignal()); // Closes the executor. // This needs to be called after waiting since it might interrupt waitRealmThreadExecutorFinish(). executorService.shutdownNow(); Throwable fault = test.getAssertionError(); if (fault != null) { // Throws any assertion errors happened in the background thread. throw fault; } } }
public void shutdown() { ExecutorService executor = null; synchronized (this) { // swap if (service != null) { executor = service; service = null; } } if (executor != null) { // shutdown if (!executor.isShutdown()) { executor.shutdown(); } // recycle executor = null; } }