public IndexResultScanner(List<SingleScanner> scanners, byte[][] resultColumns, int resultBufferSize, float loadFactor) { this.scanners = scanners; this.resultColumns = resultColumns; this.resultBufferSize = resultBufferSize; this.loadFactor = loadFactor; this.minLoadSize = (int) (this.resultBufferSize * this.loadFactor); this.resultBuffer = new LinkedBlockingQueue<Result>(resultBufferSize); LOG.debug("IndexResultScanner is started!"); this.scannerNum = this.scanners.size(); this.finishedScanner = new ArrayList<String>(); this.stoppedScanner = new ArrayList<String>(); this.startTime = System.currentTimeMillis(); int i = 0; for (SingleScanner scanner : this.scanners) { scanner.setName("Scanner" + i++); scanner.setIndexResultScanner(this); scanner.start(); } this.restartTimes = 0; this.MAX_RESTART_TIMES = HBaseConfiguration.create().getInt("hbase.client.retries.number", 10); }
/** * Create a cache which will reside in {@code directory}. This cache is lazily initialized on * first access and will be created if it does not exist. * * @param directory a writable directory * @param valueCount the number of values per cache entry. Must be positive. * @param maxSize the maximum number of bytes this cache should use to store */ public static DiskLruCache create(FileSystem fileSystem, File directory, int appVersion, int valueCount, long maxSize) { if (maxSize <= 0) { throw new IllegalArgumentException("maxSize <= 0"); } if (valueCount <= 0) { throw new IllegalArgumentException("valueCount <= 0"); } // Use a single background thread to evict entries. Executor executor = new ThreadPoolExecutor(0, 1, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), Util.threadFactory("OkHttp DiskLruCache", true)); return new DiskLruCache(fileSystem, directory, appVersion, valueCount, maxSize, executor); }
@Test public void rightBoundsOfProduceTasksTest() throws Exception { BlockingQueue<Future<Optional<byte[]>>> tasksQueue = new LinkedBlockingQueue<>(); S3DataLoaderMocker.mockPrimitiveLoadFromTo(mockFactory, DATA_SIZE); new ParallelPartsLoader( S3DataLoaderMocker.FAKE_URI, 0, DATA_SIZE, mockFactory, tasksQueue ); int numberOfFullChunks = 7; for (int i = 0; i < numberOfFullChunks; i++) { int ruleForIncreaseChunkSize = (i / 2 + 1) < 4 ? i / 2 + 1 : 3; checkRightSizeOfChunk(tasksQueue, (int) Math.pow(MIN_PART_SIZE, ruleForIncreaseChunkSize)); } checkRightSizeOfChunk(tasksQueue, 1); Assert.assertFalse(tasksQueue.take().get().isPresent()); }
/** * Create cache for a destination table. * * @param cxt the bireme context * @param tableName the table name to cached * @param pipeLine the pipeLine belongs to which */ public RowCache(Context cxt, String tableName, PipeLine pipeLine) { this.cxt = cxt; this.tableName = tableName; this.pipeLine = pipeLine; this.lastMergeTime = new Date().getTime(); this.mergeInterval = cxt.conf.merge_interval; this.batchSize = cxt.conf.batch_size; this.rows = new LinkedBlockingQueue<Row>(cxt.conf.batch_size * 2); this.commitCallback = new LinkedBlockingQueue<CommitCallback>(); this.localMerger = new LinkedList<RowBatchMerger>(); for (int i = 0; i < cxt.conf.loader_task_queue_size; i++) { localMerger.add(new RowBatchMerger()); } this.mergeResult = new LinkedBlockingQueue<Future<LoadTask>>(cxt.conf.loader_task_queue_size); this.loader = new ChangeLoader(cxt, pipeLine, tableName, mergeResult); // add statistics pipeLine.stat.addGaugeForCache(tableName, this); }
private ThreadPoolExecutor createIOPoolExecutor(){ //IO线程工厂类 ThreadFactory threadFactory = new ThreadFactory() { @Override public Thread newThread(@NonNull Runnable runnable) { Thread thread = new Thread(runnable); thread.setName("dunit-io"); return thread; } }; //创建一个任务拒绝策略 //直接忽略新进的任务 RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.DiscardPolicy(); //创建一个最大线程数为3的线程池 ThreadPoolExecutor poolExecutor = new ThreadPoolExecutor(1, 3, 3, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(10),threadFactory,rejectedExecutionHandler); //当核心线程空闲时,允许杀死核心线程 poolExecutor.allowCoreThreadTimeOut(true); return poolExecutor; }
@Test public void taskProducerShouldTerminateWhenItIsCanceled() throws InterruptedException { BlockingQueue<Future<Optional<byte[]>>> tasksQueue = new LinkedBlockingQueue<>(); ParallelPartsLoader taskProducer = new ParallelPartsLoader( S3DataLoaderMocker.FAKE_URI, 0, DATA_SIZE, mockFactory, tasksQueue ); CompletableFuture.runAsync(taskProducer) .thenAccept(r -> taskProducer.cancelLoading()) .thenAccept(r -> Assert.assertTrue(tasksQueue.isEmpty())); }
static void putAllCollections(Map<Class<?>, IntFunction<?>> map, Map<Class<?>, Function<?, ?>> unmodMap) { safePut(map, ArrayList.class, ArrayList::new); safePut(map, HashSet.class, LinkedHashSet::new); safePut(map, Properties.class, x -> new Properties()); safePut(map, Hashtable.class, Hashtable::new); safePut(map, Collection.class, ArrayList::new); safePut(map, Set.class, LinkedHashSet::new); safePut(map, List.class, ArrayList::new); safePut(map, SortedSet.class, x -> new TreeSet<>()); safePut(map, Queue.class, x -> new ConcurrentLinkedQueue<>()); safePut(map, Deque.class, x -> new ConcurrentLinkedDeque<>()); safePut(map, BlockingQueue.class, x -> new LinkedBlockingQueue<>()); safePut(map, BlockingDeque.class, x -> new LinkedBlockingDeque<>()); safePut(map, HashMap.class, LinkedHashMap::new); safePut(map, LinkedHashMap.class, LinkedHashMap::new); safePut(map, ConcurrentHashMap.class, ConcurrentHashMap::new); safePut(map, Map.class, LinkedHashMap::new); safePut(map, ConcurrentMap.class, x -> new ConcurrentSkipListMap<>()); safePut(map, ConcurrentNavigableMap.class, x -> new ConcurrentSkipListMap<>()); safePut(map, SortedMap.class, i -> new TreeMap<>()); }
/** * 检查路书是否堵车 * * @param target 目标地点 * @param routeModel 路书 * @param sb 路况信息 * @return true=是 */ private boolean setRoadTrafficNotInNavigate(String target, RouteModel routeModel, StringBuilder sb) { routeModel.refreshRoadCondition(); Log.i(TAG, "conditionNodes.size=" + routeModel.getConditionNodes().size()); Queue<Integer> rQueue = new LinkedBlockingQueue<Integer>(); int conjestionCount = 0; for (int i = 0; i < routeModel.getConditionNodes().size(); ++i) { if (routeModel.getConditionNodes().get(i).getRoadCondition() >= RouteModel.ROAD_CONDITION_TYPE_Slow) { rQueue.offer(i); if (routeModel.getConditionNodes().get(i).getRoadCondition() > RouteModel.ROAD_CONDITION_TYPE_Slow) { conjestionCount++; } } } return setTrafficState(sb, routeModel, rQueue, conjestionCount, 0); }
@Override public void init(FloodlightModuleContext context) throws FloodlightModuleException { linkDiscoveryService = context.getServiceImpl(ILinkDiscoveryService.class); threadPoolService = context.getServiceImpl(IThreadPoolService.class); floodlightProviderService = context.getServiceImpl(IFloodlightProviderService.class); switchService = context.getServiceImpl(IOFSwitchService.class); restApiService = context.getServiceImpl(IRestApiService.class); debugCounterService = context.getServiceImpl(IDebugCounterService.class); debugEventService = context.getServiceImpl(IDebugEventService.class); switchPorts = new HashMap<DatapathId, Set<OFPort>>(); switchPortLinks = new HashMap<NodePortTuple, Set<Link>>(); directLinks = new HashMap<NodePortTuple, Set<Link>>(); portBroadcastDomainLinks = new HashMap<NodePortTuple, Set<Link>>(); tunnelPorts = new HashSet<NodePortTuple>(); topologyAware = new ArrayList<ITopologyListener>(); ldUpdates = new LinkedBlockingQueue<LDUpdate>(); haListener = new HAListenerDelegate(); registerTopologyDebugCounters(); registerTopologyDebugEvents(); }
public static void main(String[] args) { BlockingQueue<Integer> blockingQueue = new LinkedBlockingQueue<Integer>(3); Producer producer = new Producer(blockingQueue); Consumer consumer = new Consumer(blockingQueue); // 创建5个生产者,5个消费者 for (int i = 0; i < 6; i++) { if (i < 5) { new Thread(producer, "producer" + i).start(); } else { new Thread(consumer, "consumer" + (i - 5)).start(); } } try { Thread.sleep(1000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } producer.shutDown(); consumer.shutDown(); }
protected View findScrollableViewInternal(View content, boolean selfable) { View scrollableView = null; Queue<View> views = new LinkedBlockingQueue<>(Collections.singletonList(content)); while (!views.isEmpty() && scrollableView == null) { View view = views.poll(); if (view != null) { if ((selfable || view != content) && isScrollableView(view)) { scrollableView = view; } else if (view instanceof ViewGroup) { ViewGroup group = (ViewGroup) view; for (int j = 0; j < group.getChildCount(); j++) { views.add(group.getChildAt(j)); } } } } return scrollableView == null ? content : scrollableView; }
private void initDictionaryCaches(DictionaryDAOImpl dictionaryDAO, TenantService tenantService) { CompiledModelsCache compiledModelsCache = new CompiledModelsCache(); compiledModelsCache.setDictionaryDAO(dictionaryDAO); compiledModelsCache.setTenantService(tenantService); compiledModelsCache.setRegistry(new DefaultAsynchronouslyRefreshedCacheRegistry()); TraceableThreadFactory threadFactory = new TraceableThreadFactory(); threadFactory.setThreadDaemon(true); threadFactory.setThreadPriority(Thread.NORM_PRIORITY); ThreadPoolExecutor threadPoolExecutor = new DynamicallySizedThreadPoolExecutor(20, 20, 90, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory, new ThreadPoolExecutor.CallerRunsPolicy()); compiledModelsCache.setThreadPoolExecutor(threadPoolExecutor); dictionaryDAO.setDictionaryRegistryCache(compiledModelsCache); dictionaryDAO.init(); }
/** * UPDATE! * @param syncs the batch of calls to sync that arrived as this thread was starting; when done, * we will put the result of the actual hdfs sync call as the result. * @param sequence The sequence number on the ring buffer when this thread was set running. * If this actual writer sync completes then all appends up this point have been * flushed/synced/pushed to datanodes. If we fail, then the passed in <code>syncs</code> * futures will return the exception to their clients; some of the edits may have made it out * to data nodes but we will report all that were part of this session as failed. */ SyncRunner(final String name, final int maxHandlersCount) { super(name); // LinkedBlockingQueue because of // http://www.javacodegeeks.com/2010/09/java-best-practices-queue-battle-and.html // Could use other blockingqueues here or concurrent queues. // // We could let the capacity be 'open' but bound it so we get alerted in pathological case // where we cannot sync and we have a bunch of threads all backed up waiting on their syncs // to come in. LinkedBlockingQueue actually shrinks when you remove elements so Q should // stay neat and tidy in usual case. Let the max size be three times the maximum handlers. // The passed in maxHandlerCount is the user-level handlers which is what we put up most of // but HBase has other handlers running too -- opening region handlers which want to write // the meta table when succesful (i.e. sync), closing handlers -- etc. These are usually // much fewer in number than the user-space handlers so Q-size should be user handlers plus // some space for these other handlers. Lets multiply by 3 for good-measure. this.syncFutures = new LinkedBlockingQueue<SyncFuture>(maxHandlersCount * 3); }
/** * Output the items in the stream with a function. * This method will block until the result returns. * Eg. `output(StatisticOperators.count())` will output the number of items. * * @param itemsCollector the function used to output current stream * @param <Tout> the type of the result * @return the result * @throws PSException if failed to the result. */ @PSAction(blocking = true) public <Tout> Tout output(Function<List<Item>, Tout> itemsCollector) throws PSException { final BlockingQueue<Object> resultQueue = new LinkedBlockingQueue<>(); Callback<Tout> resultHandler = new Callback<Tout>() { @Override protected void onInput(Tout input) { resultQueue.add(input); } @Override protected void onFail(PSException exception) { resultQueue.add(exception); } }; this.output(itemsCollector, resultHandler); try { Object resultOrException = resultQueue.take(); if (resultOrException instanceof PSException) { throw (PSException) resultOrException; } return (Tout) resultOrException; } catch (InterruptedException e) { throw PSException.INTERRUPTED(e.getMessage()); } }
/** * * @param coords * @return * @throws InterruptedException * @throws ExecutionException */ public List<double[]> parallelPixelFromGeo(final Coordinate[] coords) throws InterruptedException, ExecutionException { int processors = Runtime.getRuntime().availableProcessors(); ThreadPoolExecutor executor = new ThreadPoolExecutor(2, processors, 2, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());//(ThreadPoolExecutor)Executors.newFixedThreadPool(processors); try { final List<Future<double[]>> tasks = new ArrayList<Future<double[]>>(); for (int i = 0; i < coords.length; i++) { tasks.add(executor.submit(new ParallelReverse(coords[i].x, coords[i].y))); } executor.shutdown(); final List<double[]> points = new ArrayList<double[]>(); for (Future<double[]> result : tasks) { List<double[]> l = Arrays.asList(result.get()); points.addAll(l); } return points; } catch (Exception e) { if (!executor.isShutdown()) executor.shutdown(); throw e; } }
/** * Create a AsyncDiskServices with a set of volumes (specified by their * root directories). * * The AsyncDiskServices uses one ThreadPool per volume to do the async * disk operations. * * @param volumes The roots of the file system volumes. */ public AsyncDiskService(String[] volumes) { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } }; // Create one ThreadPool per volume for (int v = 0 ; v < volumes.length; v++) { ThreadPoolExecutor executor = new ThreadPoolExecutor( CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); executors.put(volumes[v], executor); } }
private void migrateData() throws SQLException{ executor = new ThreadPoolExecutor(margs.getThreadCount(), margs.getThreadCount(), 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(),new ThreadPoolExecutor.CallerRunsPolicy()); for(TableMigrateInfo table:migrateTables){ if(!table.isError()){ //忽略已出错的拆分表 List<DataNodeMigrateInfo> detailList = table.getDataNodesDetail(); for(DataNodeMigrateInfo info:detailList){ executor.execute(new DataMigrateRunner(table, info.getSrc(), info.getTarget(), table.getTableName(), info.getTempFile())); } } } executor.shutdown(); while(true){ if(executor.isTerminated()){ break; } try { Thread.sleep(200); } catch (InterruptedException e) { LOGGER.error("error",e); } } }
public WebSocketImpl(WebSocketListener listener, Draft draft) { this.flushandclosestate = false; this.readystate = READYSTATE.NOT_YET_CONNECTED; this.draft = null; this.current_continuous_frame_opcode = null; this.handshakerequest = null; this.closemessage = null; this.closecode = null; this.closedremotely = null; if (listener == null || (draft == null && this.role == Role.SERVER)) { throw new IllegalArgumentException("parameters must not be null"); } this.outQueue = new LinkedBlockingQueue(); this.inQueue = new LinkedBlockingQueue(); this.wsl = listener; this.role = Role.CLIENT; if (draft != null) { this.draft = draft.copyInstance(); } }
public List<double[]> parallelGeoFromPixel(final Coordinate[] coords) throws InterruptedException, ExecutionException { int processors = Runtime.getRuntime().availableProcessors(); ThreadPoolExecutor executor = new ThreadPoolExecutor(2, processors, 5000, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());//(ThreadPoolExecutor)Executors.newFixedThreadPool(processors); List<Callable<double[]>> tasks = new ArrayList<Callable<double[]>>(); for (final Coordinate c : coords) { tasks.add(new ParallelForward(c.y,c.x)); } List<Future<double[]>> results = executor.invokeAll(tasks); executor.shutdown(); List<double[]> points = new ArrayList<double[]>(); for (Future<double[]> result : results) { List<double[]> l = Arrays.asList(result.get()); points.addAll(l); } return points; }
private ThumbnailLoader(@Nullable Context context, @Nullable String googleApiKey) { String metaGoogleApiKey = googleApiKey; if (context != null) { try { final ApplicationInfo appInfo = context.getPackageManager().getApplicationInfo(context.getPackageName(), PackageManager.GET_META_DATA); if (appInfo.metaData != null) { metaGoogleApiKey = appInfo.metaData.getString("com.codewaves.youtubethumbnailview.ApiKey"); } } catch (PackageManager.NameNotFoundException e) { // Ignore } } final BlockingQueue<Runnable> taskQueue = new LinkedBlockingQueue<>(); executor = new ThreadPoolExecutor(DEFAULT_THREAD_POOL_SIZE, DEFAULT_THREAD_POOL_SIZE, 0L, TimeUnit.MILLISECONDS, taskQueue); requestMap = new WeakHashMap<>(); defaultInfoDownloader = new ApiVideoInfoDownloader(metaGoogleApiKey); defaultImageLoader = new SimpleImageLoader(); }
private BoundedThreadPool(int numberOfThreads, ThreadFactory threadFactory) { super(numberOfThreads, numberOfThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(maxQueueSize), threadFactory, new CallerFeedbackPolicy()); underHighLoad = ((CallerFeedbackPolicy) getRejectedExecutionHandler()).load(); }
public SimpleMonitorService() { queue = new LinkedBlockingQueue<URL>(Integer.parseInt(ConfigUtils.getProperty("dubbo.monitor.queue", "100000"))); writeThread = new Thread(new Runnable() { public void run() { while (running) { try { write(); // 记录统计日志 } catch (Throwable t) { // 防御性容错 logger.error("Unexpected error occur at write stat log, cause: " + t.getMessage(), t); try { Thread.sleep(5000); // 失败延迟 } catch (Throwable t2) { } } } } }); writeThread.setDaemon(true); writeThread.setName("DubboMonitorAsyncWriteLogThread"); writeThread.start(); chartFuture = scheduledExecutorService.scheduleWithFixedDelay(new Runnable() { public void run() { try { draw(); // 绘制图表 } catch (Throwable t) { // 防御性容错 logger.error("Unexpected error occur at draw stat chart, cause: " + t.getMessage(), t); } } }, 1, 300, TimeUnit.SECONDS); INSTANCE = this; }
public void initCompensatePool() { synchronized (LOGGER) { QUEUE = new LinkedBlockingQueue<>(txConfig.getCompensationQueueMax()); final int compensationThreadMax = txConfig.getCompensationThreadMax(); final TransactionThreadPool threadPool = SpringBeanUtils.getInstance().getBean(TransactionThreadPool.class); final ExecutorService executorService = threadPool.newCustomFixedThreadPool(compensationThreadMax); LogUtil.info(LOGGER, "启动补偿操作线程数量为:{}", () -> compensationThreadMax); for (int i = 0; i < compensationThreadMax; i++) { executorService.execute(new Worker()); } } }
public BlockChainManager(Integer index, String uri_address){ index_ = index; draft_ = new Draft_17(); uri_ = URI.create(uri_address); send_queue_ = new LinkedBlockingQueue<>(); blockchain_manager_thhead = new Thread(this); blockchain_manager_thhead.start(); }
public ConnectionOrderedChannelHandler(ChannelHandler handler, URL url) { super(handler, url); String threadName = url.getParameter(Constants.THREAD_NAME_KEY,Constants.DEFAULT_THREAD_NAME); connectionExecutor = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(url.getPositiveParameter(Constants.CONNECT_QUEUE_CAPACITY, Integer.MAX_VALUE)), new NamedThreadFactory(threadName, true), new AbortPolicyWithReport(threadName, url) ); // FIXME 没有地方释放connectionExecutor! queuewarninglimit = url.getParameter(Constants.CONNECT_QUEUE_WARNING_SIZE, Constants.DEFAULT_CONNECT_QUEUE_WARNING_SIZE); }
private NetworkCommunicator() { this.hosts = new LinkedBlockingQueue<>(); this.ports = new LinkedBlockingQueue<>(); this.messages = new LinkedBlockingQueue<>(); this.receivedMessages = new LinkedBlockingQueue<>(); this.needsResponse = new LinkedBlockingQueue<>(); this.interval = 200; }
public MobileCommProcessor(Context mContext, SystemVoiceMediator mediator, Handler handler) { super(mContext, mediator); this.mHandler = handler; mAppConfig = (AppConfig) ((Service) mContext).getApplication(); tPools = new ThreadPoolExecutor(10, 20, 10, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); phoneCallListener(); // 注册收件箱内容观察者 mContext.getContentResolver().registerContentObserver(Uri.parse(PhoneContactUtils.SMS_URI_INBOX), true, new SmsObserver(handler)); // 注册联系人内容观察者 mContext.getContentResolver().registerContentObserver(ContactsContract.Contacts.CONTENT_URI, true, new ContactObserver(handler)); }
@Override protected void initInternal() throws LifecycleException { BlockingQueue<Runnable> startStopQueue = new LinkedBlockingQueue<Runnable>(); startStopExecutor = new ThreadPoolExecutor( getStartStopThreadsInternal(), getStartStopThreadsInternal(), 10, TimeUnit.SECONDS, startStopQueue, new StartStopThreadFactory(getName() + "-startStop-")); startStopExecutor.allowCoreThreadTimeOut(true); super.initInternal(); }
private static void realMain(final String[] args) throws Throwable { testQueue(new SynchronousQueue<Object>()); testQueue(new ArrayBlockingQueue<Object>(1,false)); testQueue(new ArrayBlockingQueue<Object>(1,true)); testQueue(new LinkedBlockingQueue<Object>(1)); testQueue(new LinkedBlockingDeque<Object>(1)); }
protected ThreadPoolExecutor createPriorityTaskExecutor() { final ThreadPoolExecutor priorityTpe = new ThreadPoolExecutor(12, 12, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("TaskRunner.priorityPool")); priorityTpe.allowCoreThreadTimeOut(true); return priorityTpe; }
final void implementQueues() { inputBuffer = new LinkedBlockingQueue(); internalBuffer= new PriorityQueue(); outputQueue=new MultiReaderQueue(); }
public DBCPool(PoolConfig config){ maxPoolSize = config.getMaxActive(); minIdle = config.getMinIdle(); maxIdle = config.getMaxIdle(); keepAliveTime = config.getMaxWait(); conpool = new ConcurrentLinkedQueue<Connection>(); idleQueue = new LinkedBlockingQueue<Connection>(); initQueue(); }
/** * Factory method to create a new {@code ConstrainedExecutorService} with an unbounded * {@link LinkedBlockingQueue} queue. * @param name Friendly name to identify the executor in logging and reporting. * @param maxConcurrency Maximum number of tasks to execute in parallel on the delegate executor. * @param queueSize Number of items that can be queued before new submissions are rejected. * @param executor Delegate executor for actually running tasks. * @return new {@code ConstrainedExecutorService} instance. */ public static ConstrainedExecutorService newConstrainedExecutor( String name, int maxConcurrency, int queueSize, Executor executor) { return new ConstrainedExecutorService( name, maxConcurrency, executor, new LinkedBlockingQueue<Runnable>(queueSize)); }
public void initTransactionEnv() { TransactionMQProducer producer = (TransactionMQProducer) this.defaultMQProducer; this.checkRequestQueue = new LinkedBlockingQueue<Runnable>(producer.getCheckRequestHoldMax()); this.checkExecutor = new ThreadPoolExecutor(// producer.getCheckThreadPoolMinSize(), // producer.getCheckThreadPoolMaxSize(), // 1000 * 60, // TimeUnit.MILLISECONDS, // this.checkRequestQueue); }
/** * Creates a proper Cached Thread Pool. Tasks will reuse cached threads if available * or create new threads until the core pool is full. tasks will then be queued. If an * task cannot be queued, a new thread will be created unless this would exceed max pool * size, then the task will be rejected. Threads will time out after 1 second. * * Core thread timeout is only available on android-9+. * * @param threadFactory the factory to use when creating new threads * @return the newly created thread pool */ public static ExecutorService newCachedThreadPool(ThreadFactory threadFactory) { ThreadPoolExecutor executor = new ThreadPoolExecutor( CORE_POOL_SIZE, MAX_POOL_SIZE, KEEP_ALIVE_TIME, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); allowCoreThreadTimeout(executor, true); return executor; }
/** * poll succeeds unless empty */ public void testPoll() { LinkedBlockingQueue q = populatedQueue(SIZE); for (int i = 0; i < SIZE; ++i) { assertEquals(i, q.poll()); } assertNull(q.poll()); }
DbBackedQueueImpl(String queueName, boolean autoDelete, SharedMessageStore sharedMessageStore) throws BrokerException { super(queueName, true, autoDelete); this.sharedMessageStore = sharedMessageStore; this.memQueue = new LinkedBlockingQueue<>(); Collection<Message> messages = sharedMessageStore.readStoredMessages(queueName); memQueue.addAll(messages); }
/** * take retrieves elements in FIFO order */ public void testTake() throws InterruptedException { LinkedBlockingQueue q = populatedQueue(SIZE); for (int i = 0; i < SIZE; ++i) { assertEquals(i, q.take()); } }
private ProcessorManager(IMediaReader toProcess, AdvancedOptions options, String baseFileName, IProcessor... processors) { this.toProcess = toProcess; this.processors = new ArrayList<>(); // TODO: Thread safe list here instead? this.processors.addAll(Arrays.asList(processors)); this.queue = new LinkedBlockingQueue<>(); this.status = new SimpleLongProperty(0l); this.options = options; this.baseFileName = baseFileName; }
public void execute() throws Exception { synchronized (exeHandlers) { if (terminate.get()) return; for (BaseSelectHandler exeHandler : exeHandlers) { MySQLConnection exeConn = exeHandler.initConnection(); if (exeConn != null) { exeConn.setComplexQuery(true); queues.put(exeConn, new LinkedBlockingQueue<HeapItem>(queueSize)); exeHandler.execute(exeConn); } } } }