/** * Submits the task and adds a listener that adds the future to {@code queue} when it completes. */ @GwtIncompatible // TODO private static <T> ListenableFuture<T> submitAndAddQueueListener( ListeningExecutorService executorService, Callable<T> task, final BlockingQueue<Future<T>> queue) { final ListenableFuture<T> future = executorService.submit(task); future.addListener( new Runnable() { @Override public void run() { queue.add(future); } }, directExecutor()); return future; }
public ThreadPoolEventTarget( final ThreadFactory wrappedFactory, final ThreadInitializer threadInitializer) { int poolSize = 1; BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(); executor = new ThreadPoolExecutor(poolSize, poolSize, 3, TimeUnit.SECONDS, queue, new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread thread = wrappedFactory.newThread(r); threadInitializer.setName(thread, "FirebaseDatabaseEventTarget"); threadInitializer.setDaemon(thread, true); threadInitializer.setUncaughtExceptionHandler(thread, ThreadPoolEventTarget.this); return thread; } }); }
private void processMessage(String taskId, SimpleMessage message, boolean request) { synchronized( stateMutex ) { if( request ) { Task task = runningTasks.get(taskId); if( task != null ) { task.postMessage(message); } } else { BlockingQueue<SimpleMessage> waitingQueue = messageResponses.getIfPresent(message.getMessageId()); if( waitingQueue != null ) { messageResponses.invalidate(message.getMessageId()); waitingQueue.add(message); } } } }
/** * remainingCapacity decreases on add, increases on remove */ public void testRemainingCapacity() { int size = ThreadLocalRandom.current().nextInt(1, SIZE); BlockingQueue q = populatedQueue(size, size, 2 * size, false); int spare = q.remainingCapacity(); int capacity = spare + size; for (int i = 0; i < size; i++) { assertEquals(spare + i, q.remainingCapacity()); assertEquals(capacity, q.size() + q.remainingCapacity()); assertEquals(i, q.remove()); } for (int i = 0; i < size; i++) { assertEquals(capacity - i, q.remainingCapacity()); assertEquals(capacity, q.size() + q.remainingCapacity()); assertTrue(q.add(i)); } }
private ThreadHelper() { mMainHandler = new Handler(Looper.getMainLooper()); ThreadFactory threadFactory = new ThreadFactory() { private final AtomicInteger mCount = new AtomicInteger(1); @Override public Thread newThread(Runnable r) { return new Thread(r, "ThreadHelper #".concat(String.valueOf(mCount.getAndIncrement()))); } }; int cpuCount = Runtime.getRuntime().availableProcessors(); int corePoolSize = cpuCount + 1; int maxPoolSize = cpuCount * 2 + 1; BlockingQueue<Runnable> queue = new ArrayBlockingQueue<>(128); mExecutorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, 10, TimeUnit.SECONDS, queue, threadFactory); }
void testDelayQueue(final BlockingQueue q) throws Throwable { System.err.println(q.getClass().getSimpleName()); for (int i = 0; i < CAPACITY; i++) q.add(new PDelay(i)); ArrayBlockingQueue q2 = new ArrayBlockingQueue(SMALL); try { q.drainTo(q2, SMALL + 3); fail("should throw"); } catch (IllegalStateException success) { equal(SMALL, q2.size()); equal(new PDelay(0), q2.poll()); equal(new PDelay(1), q2.poll()); check(q2.isEmpty()); for (int i = SMALL; i < CAPACITY; i++) equal(new PDelay(i), q.poll()); equal(0, q.size()); } }
public void testGet_concurrent() { assertTrue(ArbitraryInstances.get(BlockingDeque.class).isEmpty()); assertTrue(ArbitraryInstances.get(BlockingQueue.class).isEmpty()); assertTrue(ArbitraryInstances.get(DelayQueue.class).isEmpty()); assertTrue(ArbitraryInstances.get(SynchronousQueue.class).isEmpty()); assertTrue(ArbitraryInstances.get(PriorityBlockingQueue.class).isEmpty()); assertTrue(ArbitraryInstances.get(ConcurrentMap.class).isEmpty()); assertTrue(ArbitraryInstances.get(ConcurrentNavigableMap.class).isEmpty()); ArbitraryInstances.get(Executor.class).execute(ArbitraryInstances.get(Runnable.class)); assertNotNull(ArbitraryInstances.get(ThreadFactory.class)); assertFreshInstanceReturned( BlockingQueue.class, BlockingDeque.class, PriorityBlockingQueue.class, DelayQueue.class, SynchronousQueue.class, ConcurrentMap.class, ConcurrentNavigableMap.class, AtomicReference.class, AtomicBoolean.class, AtomicInteger.class, AtomicLong.class, AtomicDouble.class); }
/** * Invokes {@code queue.}{@link BlockingQueue#put(Object) put(element)} uninterruptibly. * * @throws ClassCastException if the class of the specified element prevents it from being added * to the given queue * @throws IllegalArgumentException if some property of the specified element prevents it from * being added to the given queue */ @GwtIncompatible // concurrency public static <E> void putUninterruptibly(BlockingQueue<E> queue, E element) { boolean interrupted = false; try { while (true) { try { queue.put(element); return; } catch (InterruptedException e) { interrupted = true; } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } }
@Before public void setUp() throws Exception { instanceConfigAuditUtil = new InstanceConfigAuditUtil(); ReflectionTestUtils.setField(instanceConfigAuditUtil, "instanceService", instanceService); audits = (BlockingQueue<InstanceConfigAuditUtil.InstanceConfigAuditModel>) ReflectionTestUtils.getField(instanceConfigAuditUtil, "audits"); someAppId = "someAppId"; someClusterName = "someClusterName"; someDataCenter = "someDataCenter"; someIp = "someIp"; someConfigAppId = "someConfigAppId"; someConfigClusterName = "someConfigClusterName"; someConfigNamespace = "someConfigNamespace"; someReleaseKey = "someReleaseKey"; someAuditModel = new InstanceConfigAuditUtil.InstanceConfigAuditModel(someAppId, someClusterName, someDataCenter, someIp, someConfigAppId, someConfigClusterName, someConfigNamespace, someReleaseKey); }
static void putQ(BlockingQueue<String> q, String o) { try { q.put(o); } catch (InterruptedException e) { // can't happen } }
@Override public JobResult execute(BlockingQueue<String> jobPathArray) { String currentThreadName = Thread.currentThread().getName(); logger.info(" [cz88.net] 线程[" + currentThreadName + "] 爬虫JOB 开始工作 "); JobResult jobResult = new JobResult(); String url = null ; try { while ((url = jobPathArray.poll()) != null) { BaseUtil.getInstance().getRedisClient().lpush(Constant.WEBSITE_CODE_QUEUE_CZ88, url) ; } } catch (Exception e) { logger.info("Redis 连接异常 , 网络是否畅通 , 服务是否启动") ; } if (null == url) { logger.info(" [cz88.net] 线程[" + currentThreadName + "] 发现队列为空 停止工作 "); CrawlersTaskPool.sharedInstance().getExecutor().shutdown(); } logger.info(" [cz88.net] 线程[" + currentThreadName + "] 爬虫JOB 结束工作 "); return jobResult ; }
public static void main(String[] args) throws Exception { final int maxPairs = (args.length > 0) ? Integer.parseInt(args[0]) : 5; pool = Executors.newCachedThreadPool(); for (int i = 1; i <= maxPairs; i += (i+1) >>> 1) { final List<BlockingQueue<Integer>> queues = new ArrayList<>(); queues.add(new ArrayBlockingQueue<Integer>(100)); queues.add(new LinkedBlockingQueue<Integer>(100)); queues.add(new LinkedBlockingDeque<Integer>(100)); queues.add(new SynchronousQueue<Integer>()); // unbounded queue implementations are prone to OOME: // PriorityBlockingQueue, LinkedTransferQueue for (BlockingQueue<Integer> queue : queues) new CancelledProducerConsumerLoops(i, queue).run(); } pool.shutdown(); if (! pool.awaitTermination(LONG_DELAY_MS, MILLISECONDS)) throw new AssertionError("timed out"); pool = null; }
/** * Put a new message onto the queue. This method is blocking if the queue buffer is full. * @param message - Message to be added to the queue. * @throws InterruptedException - thrown if a thread is interrupted while blocked adding to the queue. */ @Override public void put(final Message message) throws InterruptedException { // Grab the source virtual spoutId final VirtualSpoutIdentifier virtualSpoutId = message.getMessageId().getSrcVirtualSpoutId(); // Add to correct buffer BlockingQueue<Message> virtualSpoutQueue = messageBuffer.get(virtualSpoutId); // If our queue doesn't exist if (virtualSpoutQueue == null) { // Attempt to put it messageBuffer.putIfAbsent(virtualSpoutId, createNewEmptyQueue()); // Grab a reference. virtualSpoutQueue = messageBuffer.get(virtualSpoutId); } // Put it. virtualSpoutQueue.put(message); }
private BlockingQueue<Runnable> createBlockingQueue() { BlockingQueueTypeEnum queueType = BlockingQueueTypeEnum.fromString(txConfig.getBlockingQueueType()); switch (queueType) { case LINKED_BLOCKING_QUEUE: return new LinkedBlockingQueue<>(1024); case ARRAY_BLOCKING_QUEUE: return new ArrayBlockingQueue<>(MAX_ARRAY_QUEUE); case SYNCHRONOUS_QUEUE: return new SynchronousQueue<>(); default: return new LinkedBlockingQueue<>(1024); } }
private FlexibleThreadPoolWrapper(int minThreadCount, int maxThreadCount, long keepAlive, TimeUnit timeUnit, ThreadFactory threadFactory, BlockingQueue<Runnable> queue) { executor = new ThreadPoolExecutor(minThreadCount, maxThreadCount, keepAlive, timeUnit, queue, threadFactory, new FlexibleRejectionHandler()); executor.prestartAllCoreThreads(); }
/** Creates default implementation of task executor */ public static Executor createExecutor(int threadPoolSize, int threadPriority, QueueProcessingType tasksProcessingType) { boolean lifo = tasksProcessingType == QueueProcessingType.LIFO; BlockingQueue<Runnable> taskQueue = lifo ? new LIFOLinkedBlockingDeque<Runnable>() : new LinkedBlockingQueue<Runnable>(); return new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 0L, TimeUnit.MILLISECONDS, taskQueue, createThreadFactory(threadPriority, "uil-pool-")); }
public void shutdownWebDriver(WebDriverEx webDriver, Request request) { DriverType driverType = chooser.choose(request); BlockingQueue<WebDriverEx> queue = queueMap.get(driverType); if (queue != null) { webDriver.shutdown(); if (queue instanceof LandlordBlockingQueue) { ((LandlordBlockingQueue) queue).resetOne(); } } }
SleepAndSpawn(Object id, AtomicInteger var, StateContext context, BlockingQueue<Continuation> workQueue) { this.id = id; this.var = var; this.context = Objects.requireNonNull(context); this.workQueue = Objects.requireNonNull(workQueue); }
/** * * @param messageQueue */ public MessageSenderConsumer( BlockingQueue<Message> messageQueue ) { this(); this.messageQueue = messageQueue; }
public void assertCanPut(BlockingQueue<Schedulable> cq, int numberOfPuts, int putAttempts) throws InterruptedException { CountDownLatch latch = new CountDownLatch(numberOfPuts); Putter putter = new Putter(cq, putAttempts, null, latch); Thread t = new Thread(putter); t.start(); latch.await(); assertEquals(numberOfPuts, putter.callsAdded); t.interrupt(); }
public BaseWaySegmentProducerImpl(IQueuingGraphInputFormat<T> inputFormat, InputStream stream, BlockingQueue<T> segmentsQueue, BlockingQueue<IWayGraphVersionMetadata> metadataQueue) { super(inputFormat, stream, segmentsQueue); this.inputFormat = inputFormat; this.metadataQueue = metadataQueue; }
private void zetOpdrachtenOpQueue(SelectieVerwerkTaakBericht selectieTaak, int verwerkerPoolSize, BlockingQueue<MaakPersoonslijstBatchOpdracht> persoonsBeeldTaakQueue) throws InterruptedException { //zet de opdrachten op de queue final List<List<SelectiePersoonBericht>> bundelChunks = Lists.partition(selectieTaak.getPersonen(), verwerkerPoolSize); for (List<SelectiePersoonBericht> bundelChunk : bundelChunks) { final MaakPersoonslijstBatchOpdracht maakPersoonslijstBatchOpdracht = new MaakPersoonslijstBatchOpdracht(); maakPersoonslijstBatchOpdracht.setCaches(bundelChunk); persoonsBeeldTaakQueue.put(maakPersoonslijstBatchOpdracht); } }
@Override protected void initInternal() throws LifecycleException { BlockingQueue<Runnable> startStopQueue = new LinkedBlockingQueue<Runnable>(); startStopExecutor = new ThreadPoolExecutor(getStartStopThreadsInternal(), getStartStopThreadsInternal(), 10, TimeUnit.SECONDS, startStopQueue, new StartStopThreadFactory(getName() + "-startStop-")); startStopExecutor.allowCoreThreadTimeOut(true); super.initInternal(); }
/** * Queue transitions from empty to full when elements added */ public void testEmptyFull() { BlockingQueue q = populatedQueue(0, 2, 2, false); assertTrue(q.isEmpty()); assertEquals(2, q.remainingCapacity()); q.add(one); assertFalse(q.isEmpty()); assertTrue(q.offer(two)); assertFalse(q.isEmpty()); assertEquals(0, q.remainingCapacity()); assertFalse(q.offer(three)); }
public LifecycleModule() { // The thread pool is unbounded, so use direct handoff BlockingQueue<Runnable> queue = new SynchronousQueue<Runnable>(); // Discard tasks that are submitted during shutdown RejectedExecutionHandler policy = new ThreadPoolExecutor.DiscardPolicy(); // Create threads as required and keep them in the pool for 60 seconds ioExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60, SECONDS, queue, policy); }
@Override public void rejectedExecution(Runnable runnable, ThreadPoolExecutor executor) { if (threadName != null) { LOG.error("MythTransaction Thread pool [{}] is exhausted, executor={}", threadName, executor.toString()); } if (!executor.isShutdown()) { BlockingQueue<Runnable> queue = executor.getQueue(); int discardSize = queue.size() >> 1; for (int i = 0; i < discardSize; i++) { queue.poll(); } queue.offer(runnable); } }
/** * Drains the queue as {@linkplain #drain(BlockingQueue, Collection, int, long, TimeUnit)}, * but with a different behavior in case it is interrupted while waiting. In that case, the * operation will continue as usual, and in the end the thread's interruption status will be set * (no {@code InterruptedException} is thrown). * * @param q the blocking queue to be drained * @param buffer where to add the transferred elements * @param numElements the number of elements to be waited for * @param timeout how long to wait before giving up, in units of {@code unit} * @param unit a {@code TimeUnit} determining how to interpret the timeout parameter * @return the number of elements transferred */ @Beta public static <E> int drainUninterruptibly(BlockingQueue<E> q, Collection<? super E> buffer, int numElements, long timeout, TimeUnit unit) { Preconditions.checkNotNull(buffer); long deadline = System.nanoTime() + unit.toNanos(timeout); int added = 0; boolean interrupted = false; try { while (added < numElements) { // we could rely solely on #poll, but #drainTo might be more efficient when there are // multiple elements already available (e.g. LinkedBlockingQueue#drainTo locks only once) added += q.drainTo(buffer, numElements - added); if (added < numElements) { // not enough elements immediately available; will have to poll E e; // written exactly once, by a successful (uninterrupted) invocation of #poll while (true) { try { e = q.poll(deadline - System.nanoTime(), TimeUnit.NANOSECONDS); break; } catch (InterruptedException ex) { interrupted = true; // note interruption and retry } } if (e == null) { break; // we already waited enough, and there are no more elements in sight } buffer.add(e); added++; } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } return added; }
private void assertUninterruptibleDrained(BlockingQueue<Object> q) { assertEquals(0, Queues.drainUninterruptibly(q, ImmutableList.of(), 0, 10, MILLISECONDS)); // but does the wait actually occurs? threadPool.submit(new Interrupter(currentThread())); Stopwatch timer = Stopwatch.createStarted(); Queues.drainUninterruptibly(q, newArrayList(), 1, 10, MILLISECONDS); assertThat(timer.elapsed(MILLISECONDS)).isAtLeast(10L); // wait for interrupted status and clear it while (!Thread.interrupted()) { Thread.yield(); } }
@Before public void setUp() throws Exception { // We want to count additional events, so we reset here mockQueueConstructions = 0; mockQueuePuts = 0; int portRetries = 5; int nnPort; for (; portRetries > 0; --portRetries) { // Pick a random port in the range [30000,60000). nnPort = 30000 + rand.nextInt(30000); config = new Configuration(); callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl"; config.setClass(callQueueConfigKey, MockCallQueue.class, BlockingQueue.class); config.set("hadoop.security.authorization", "true"); FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort); fs = FileSystem.get(config); try { cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build(); cluster.waitActive(); break; } catch (BindException be) { // Retry with a different port number. } } if (portRetries == 0) { // Bail if we get very unlucky with our choice of ports. fail("Failed to pick an ephemeral port for the NameNode RPC server."); } }
private DataFrame consume(BlockingQueue<DataFrame> bq) throws InterruptedException { DataFrame dfRet = null; //TODO PARAMETRIZAR int timeOverlap = WINDOW_SZ/2; dfRet = bq.take(); //ENVIAMOS AL PROCESO DE RECOGIDA LA COLA EL ULTIMO MEDIO SEGUNDO DE RECOGIDA int row = dfRet.length() - 1; int rowFinal = row; long timestampFinal = (long) dfRet.get(rowFinal, 0); long timestampActual = (long) dfRet.get(row, 0); //TODO PARAMETRIZAR NUMEROS while((timestampFinal - timestampActual) <= (timeOverlap - 15)){ row--; timestampActual = (long) dfRet.get(row, 0); } //TODO HACER SLICE DEL DF [ROW, ROWFINAL] Y METERLO A LA COLA PARA QUE LO COJA EL SERVICIO DE RECOGIDA DE DATOS DataFrame df = dfRet.slice(row, rowFinal + 1); this.queueProcRecog.put(df); return dfRet; }
/** * 获取结果 * @param blockingQueue * @param timeout * @param unit * @return * @throws InterruptedIOException */ public static Object getResult(BlockingQueue<Object> blockingQueue,long timeout, TimeUnit unit) throws InterruptedIOException { Object result; try { result = blockingQueue.poll(timeout, unit); if (result == null) { if (!blockingQueue.offer("")) { result = blockingQueue.take(); } } } catch (InterruptedException e) { throw ExceptionUtil.initCause(new InterruptedIOException(e.getMessage()), e); } return result; }
/** * Tests that a queue with one element and a capacity of one has its only element replaced. */ @Test void testReplaceOnlyElementSmallCapacity() { final Runnable lastRunnable = mock(Runnable.class); final BlockingQueue<Runnable> queue = new ArrayBlockingQueue<>(1, true, Arrays.asList(lastRunnable)); when(executor.getQueue()).thenReturn(queue); policy.rejectedExecution(newRunnable, executor); verify(executor).remove(lastRunnable); verify(executor).submit(newRunnable); }
/** * purge removes cancelled tasks from the queue */ public void testPurge() throws InterruptedException { final CountDownLatch threadStarted = new CountDownLatch(1); final CountDownLatch done = new CountDownLatch(1); final BlockingQueue<Runnable> q = new ArrayBlockingQueue<>(10); final ThreadPoolExecutor p = new ThreadPoolExecutor(1, 1, LONG_DELAY_MS, MILLISECONDS, q); try (PoolCleaner cleaner = cleaner(p, done)) { FutureTask[] tasks = new FutureTask[5]; for (int i = 0; i < tasks.length; i++) { Callable task = new CheckedCallable<Boolean>() { public Boolean realCall() throws InterruptedException { threadStarted.countDown(); await(done); return Boolean.TRUE; }}; tasks[i] = new FutureTask(task); p.execute(tasks[i]); } await(threadStarted); assertEquals(tasks.length, p.getTaskCount()); assertEquals(tasks.length - 1, q.size()); assertEquals(1L, p.getActiveCount()); assertEquals(0L, p.getCompletedTaskCount()); tasks[4].cancel(true); tasks[3].cancel(false); p.purge(); assertEquals(tasks.length - 3, q.size()); assertEquals(tasks.length - 2, p.getTaskCount()); p.purge(); // Nothing to do assertEquals(tasks.length - 3, q.size()); assertEquals(tasks.length - 2, p.getTaskCount()); } }
/** * Peek, like poll, provides no strict consistency. */ @Override public E peek() { BlockingQueue<E> q = this.getFirstNonEmptyQueue(0); if (q == null) { return null; } else { return q.peek(); } }
@Override public E poll(long timeout, TimeUnit unit) throws InterruptedException { int startIdx = this.multiplexer.getAndAdvanceCurrentIndex(); long nanos = unit.toNanos(timeout); takeLock.lockInterruptibly(); try { for (;;) { BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx); if (q != null) { E e = q.poll(); if (e != null) { // Escape condition: there might be something available return e; } } if (nanos <= 0) { // Wait has elapsed return null; } try { // Now wait on the condition for a bit. If we get // spuriously awoken we'll re-loop nanos = notEmpty.awaitNanos(nanos); } catch (InterruptedException ie) { notEmpty.signal(); // propagate to a non-interrupted thread throw ie; } } } finally { takeLock.unlock(); } }
CustomTPE(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue) { super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue); }
@BeforeExperiment @SuppressWarnings("unchecked") void setUp() throws Exception { String prefix = (useMonitor ? "com.google.common.util.concurrent.MonitorBased" : "java.util.concurrent."); String className = prefix + queueType + "BlockingQueue"; Constructor<?> constructor = Class.forName(className).getConstructor(int.class); queue = (BlockingQueue<String>) constructor.newInstance(capacity); strings = new String[capacity]; for (int i = 0; i < capacity; i++) { strings[i] = String.valueOf(Math.random()); } }
/** * remove(task) removes queued task, and fails to remove active task */ public void testRemove() throws InterruptedException { final CountDownLatch done = new CountDownLatch(1); final ScheduledThreadPoolExecutor p = new CustomExecutor(1); try (PoolCleaner cleaner = cleaner(p, done)) { ScheduledFuture[] tasks = new ScheduledFuture[5]; final CountDownLatch threadStarted = new CountDownLatch(1); for (int i = 0; i < tasks.length; i++) { Runnable r = new CheckedRunnable() { public void realRun() throws InterruptedException { threadStarted.countDown(); await(done); }}; tasks[i] = p.schedule(r, 1, MILLISECONDS); } await(threadStarted); BlockingQueue<Runnable> q = p.getQueue(); assertFalse(p.remove((Runnable)tasks[0])); assertTrue(q.contains((Runnable)tasks[4])); assertTrue(q.contains((Runnable)tasks[3])); assertTrue(p.remove((Runnable)tasks[4])); assertFalse(p.remove((Runnable)tasks[4])); assertFalse(q.contains((Runnable)tasks[4])); assertTrue(q.contains((Runnable)tasks[3])); assertTrue(p.remove((Runnable)tasks[3])); assertFalse(q.contains((Runnable)tasks[3])); } }
/** * Pumps data from the queue, within the agent context. * * @param apmAgentContext the agent context * @param queue the data queue */ protected void pumpData (ApmAgentContext apmAgentContext, BlockingQueue<T> queue) throws IOException { final List<T> data = new ArrayList<> (); queue.drainTo (data, apmAgentContext.getPumpBatchSize ()); if (data.isEmpty ()) { logger.info ("No data to pump"); } else { logger.info ("Pumping data"); sendData (apmAgentContext, data); } }