@Test public void doChannelRead_cancels_timeout_check_if_response_finishes_before_timeout_check_occurs() throws Exception { // given ScheduledFuture timeoutCheckMock = mock(ScheduledFuture.class); doReturn(timeoutCheckMock).when(eventLoopMock).schedule(any(Runnable.class), any(Long.class), any(TimeUnit.class)); handlerSpy.doChannelRead(ctxMock, msg); ArgumentCaptor<BiConsumer> timeoutCheckCancellationLogicArgumentCaptor = ArgumentCaptor.forClass(BiConsumer.class); // The 2nd whenComplete is for cancelling the timeout check if the response finishes before the timeout verify(futureThatWillBeAttachedToSpy, times(2)).whenComplete(timeoutCheckCancellationLogicArgumentCaptor.capture()); BiConsumer<ResponseInfo<?>, Throwable> timeoutCheckCancellationLogic = timeoutCheckCancellationLogicArgumentCaptor.getAllValues().get(1); // when: the timeout check scheduled future is not yet complete when the response finishes doReturn(false).when(timeoutCheckMock).isDone(); timeoutCheckCancellationLogic.accept(mock(ResponseInfo.class), null); // then: timeout check scheduled future should be cancelled verify(timeoutCheckMock).cancel(false); }
@Test public void doChannelRead_does_nothing_to_timeout_check_if_timeout_check_is_already_completed_when_response_completes() throws Exception { // given ScheduledFuture timeoutCheckMock = mock(ScheduledFuture.class); doReturn(timeoutCheckMock).when(eventLoopMock).schedule(any(Runnable.class), any(Long.class), any(TimeUnit.class)); handlerSpy.doChannelRead(ctxMock, msg); ArgumentCaptor<BiConsumer> timeoutCheckCancellationLogicArgumentCaptor = ArgumentCaptor.forClass(BiConsumer.class); // The 2nd whenComplete is for cancelling the timeout check if the response finishes before the timeout verify(futureThatWillBeAttachedToSpy, times(2)).whenComplete(timeoutCheckCancellationLogicArgumentCaptor.capture()); BiConsumer<ResponseInfo<?>, Throwable> timeoutCheckCancellationLogic = timeoutCheckCancellationLogicArgumentCaptor.getAllValues().get(1); // when: the timeout check scheduled future is already done doReturn(true).when(timeoutCheckMock).isDone(); timeoutCheckCancellationLogic.accept(mock(ResponseInfo.class), null); // then: nothing should be done verify(timeoutCheckMock).isDone(); verify(timeoutCheckMock, times(0)).cancel(any(Boolean.class)); verifyNoMoreInteractions(timeoutCheckMock); }
private O limitedExecute(ClientRequestContext ctx, I req) throws Exception { final Deferred<O> deferred = defer(ctx, req); final PendingTask currentTask = new PendingTask(ctx, req, deferred); pendingRequests.add(currentTask); drain(); if (!currentTask.isRun() && timeoutMillis != 0) { // Current request was not delegated. Schedule a timeout. final ScheduledFuture<?> timeoutFuture = ctx.eventLoop().schedule( () -> deferred.close(ResponseTimeoutException.get()), timeoutMillis, TimeUnit.MILLISECONDS); currentTask.set(timeoutFuture); } return deferred.response(); }
@Override public void close() { isRunning.set(false); for (ScheduledFuture<?> f : scheduledFutures) { f.cancel(true); } // wait for scheduled futures to cancel while (scheduledFutures.stream().anyMatch((f) -> !f.isDone())) { Uninterruptibles.sleepUninterruptibly(250, TimeUnit.MILLISECONDS); } // handle remaining items in the queue while (counter.get() > 0) { drainMessageQ(); } connectionPool.close(); }
public static void udpConfirm(Session session) { synchronized (session.getLocker()) { if (!session.isUdpAuthenticated()) { try { session.getLocker().wait(5000); } catch (InterruptedException e) { Output.println(e.getMessage(), Level.DEBUG); } } } boolean authenticated = session.isUdpAuthenticated(); session.send(MessageBuilder.udpConfirm(authenticated)); if (!authenticated) { session.close(); } else if (session.getUdpPingFuture() == null) { ScheduledFuture<?> udpPingFuture = session.getChannel().eventLoop() .scheduleAtFixedRate(new UdpPing(session), 10, 10, TimeUnit.SECONDS); session.setUdpPingFuture(udpPingFuture); } }
private void activateTopologyRefreshIfNeeded() { if (getOptions() instanceof ClusterClientOptions) { ClusterClientOptions options = (ClusterClientOptions) getOptions(); ClusterTopologyRefreshOptions topologyRefreshOptions = options.getTopologyRefreshOptions(); if (!topologyRefreshOptions.isPeriodicRefreshEnabled() || clusterTopologyRefreshActivated.get()) { return; } if (clusterTopologyRefreshActivated.compareAndSet(false, true)) { ScheduledFuture<?> scheduledFuture = genericWorkerPool.scheduleAtFixedRate(clusterTopologyRefreshScheduler, options.getRefreshPeriod(), options.getRefreshPeriod(), options.getRefreshPeriodUnit()); clusterTopologyRefreshFuture.set(scheduledFuture); } } }
/** * Shutdown this client and close all open connections. The client should be discarded after calling shutdown. * * @param quietPeriod the quiet period as described in the documentation * @param timeout the maximum amount of time to wait until the executor is shutdown regardless if a task was submitted * during the quiet period * @param timeUnit the unit of {@code quietPeriod} and {@code timeout} */ @Override public void shutdown(long quietPeriod, long timeout, TimeUnit timeUnit) { if (clusterTopologyRefreshActivated.compareAndSet(true, false)) { ScheduledFuture<?> scheduledFuture = clusterTopologyRefreshFuture.get(); try { scheduledFuture.cancel(false); clusterTopologyRefreshFuture.set(null); } catch (Exception e) { logger.debug("Could not unschedule Cluster topology refresh", e); } } super.shutdown(quietPeriod, timeout, timeUnit); }
void createEventLoop() { EventLoop realEventLoop = super.eventLoop(); if (realEventLoop == null) { return; } eventLoop = mock(EventLoop.class, delegatesTo(realEventLoop)); doAnswer( new Answer<ScheduledFuture<Void>>() { @Override public ScheduledFuture<Void> answer(InvocationOnMock invocation) throws Throwable { Runnable command = (Runnable) invocation.getArguments()[0]; Long delay = (Long) invocation.getArguments()[1]; TimeUnit timeUnit = (TimeUnit) invocation.getArguments()[2]; return new FakeClockScheduledNettyFuture(eventLoop, command, delay, timeUnit); } }).when(eventLoop).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); }
public <T, R> RFuture<R> async(long timeout, Codec encoder, RedisCommand<T> command, Object ... params) { final RPromise<R> promise = new RedissonPromise<R>(); if (timeout == -1) { timeout = redisClient.getCommandTimeout(); } if (redisClient.getEventLoopGroup().isShuttingDown()) { RedissonShutdownException cause = new RedissonShutdownException("Redisson is shutdown"); return RedissonPromise.newFailedFuture(cause); } final ScheduledFuture<?> scheduledFuture = redisClient.getEventLoopGroup().schedule(new Runnable() { @Override public void run() { RedisTimeoutException ex = new RedisTimeoutException("Command execution timeout for " + redisClient.getAddr()); promise.tryFailure(ex); } }, timeout, TimeUnit.MILLISECONDS); promise.addListener(new FutureListener<R>() { @Override public void operationComplete(Future<R> future) throws Exception { scheduledFuture.cancel(false); } }); send(new CommandData<T, R>(promise, encoder, command, params)); return promise; }
private void cancelRequestTimeout() { ScheduledFuture<?> timeout = this.timeout.get(); if (timeout != null) { timeout.cancel(false); } }
private void handleConnect(ChannelHandlerContext ctx, HttpRequest req) { boolean isConnected = false; String sessionId = HttpSessionStore.getClientSessionId(req); HttpChannelEntity httpChannelEntity = null; if (!HttpSessionStore.checkJSessionId(sessionId)) { sessionId = HttpSessionStore.genJSessionId(); httpChannelEntity = new HttpJsonpChannelEntity(sessionId, true); MemoryMetaPool.registerClienId(sessionId, httpChannelEntity); } else { isConnected = true; httpChannelEntity = (HttpChannelEntity) MemoryMetaPool .getChannelEntryByClientId(sessionId); } Map<String, Object> map = new HashMap<String, Object>(2); map.put("status", true); map.put("clientId", sessionId); String result = gson.toJson(map); ByteBuf content = ctx.alloc().directBuffer() .writeBytes(result.getBytes()); FullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, OK, content); if (isConnected) { res.headers().add("Set-Cookie", ServerCookieEncoder.encode("JSESSIONID", sessionId)); } res.headers().set(CONTENT_TYPE, HEADER_CONTENT_TYPE); setContentLength(res, content.readableBytes()); ScheduledFuture<?> scheduleTask = ctx.executor().schedule( new SessionTimeoutTask(sessionId), 60, TimeUnit.SECONDS); httpChannelEntity.setScheduleTask(scheduleTask); sendHttpResponse(ctx, req, res); }
@Override public PipelineContinuationBehavior doChannelActive(ChannelHandlerContext ctx) throws Exception { // New channel opening. See if we have too many open channels. int actualOpenChannelsCount = openChannelsGroup.size(); if (actualOpenChannelsCount >= maxOpenChannelsThreshold) { Channel channel = ctx.channel(); // Mark this channel as needing to be closed. ctx.channel().attr(TOO_MANY_OPEN_CONNECTIONS_THIS_CHANNEL_SHOULD_CLOSE).set(actualOpenChannelsCount); // Schedule a double-check event to make sure the channel gets closed. ScheduledFuture doubleCheckScheduledFuture = ctx.channel().eventLoop().schedule(() -> { if (channel.isOpen()) channel.close(); }, 100, TimeUnit.MILLISECONDS); // Add a channel close future listener to cancel the double-check scheduled event immediately if the channel // is closed quickly. Even though the double-check event will execute in 100 milliseconds that's 100 // milliseconds of potential garbage accumulating when it shouldn't. Could be a lot for a high traffic // server (which this likely is if the open channels limit is being hit). channel.closeFuture().addListener(future -> { if (!doubleCheckScheduledFuture.isDone()) doubleCheckScheduledFuture.cancel(false); }); } else { // Not at the threshold. Add this channel to the open channel group. openChannelsGroup.add(ctx.channel()); } return PipelineContinuationBehavior.CONTINUE; }
@Before public void beforeMethod() { channelMock = mock(Channel.class); ctxMock = mock(ChannelHandlerContext.class); tooManyOpenConnectionsAttributeMock = mock(Attribute.class); doReturn(channelMock).when(ctxMock).channel(); doReturn(tooManyOpenConnectionsAttributeMock).when(channelMock) .attr(TOO_MANY_OPEN_CONNECTIONS_THIS_CHANNEL_SHOULD_CLOSE); doReturn(true).when(channelMock).isOpen(); eventLoopMock = mock(EventLoop.class); closeFutureMock = mock(ChannelFuture.class); doReturn(eventLoopMock).when(channelMock).eventLoop(); doReturn(closeFutureMock).when(channelMock).closeFuture(); doubleCheckScheduledFutureMock = mock(ScheduledFuture.class); doubleCheckRunnableCaptor = ArgumentCaptor.forClass(Runnable.class); closeFutureListenerCaptor = ArgumentCaptor.forClass(GenericFutureListener.class); doReturn(doubleCheckScheduledFutureMock).when(eventLoopMock) .schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); doReturn(false).when(doubleCheckScheduledFutureMock).isDone(); channelGroupMock = mock(ChannelGroup.class); maxOpenChannelsThreshold = 42; handler = new OpenChannelLimitHandler(channelGroupMock, maxOpenChannelsThreshold); }
@Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { return next().scheduleAtFixedRate(command, initialDelay, period, unit); }
@Override public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { return next().scheduleWithFixedDelay(command, initialDelay, delay, unit); }
@Override public void channelRegistered(ChannelHandlerContext ctx) throws Exception { super.channelRegistered(ctx); Channel c = ctx.channel(); channel.set(c); registeredFutureLatch.release(null); //release all futures waiting for channel registration to complete. log.info("Connection established {} ", ctx); c.write(new WireCommands.Hello(WireCommands.WIRE_VERSION, WireCommands.OLDEST_COMPATIBLE_VERSION), c.voidPromise()); ScheduledFuture<?> old = keepAliveFuture.getAndSet(c.eventLoop().scheduleWithFixedDelay(new KeepAliveTask(), 20, 10, TimeUnit.SECONDS)); if (old != null) { old.cancel(false); } }
/** * Disconnected. * @see io.netty.channel.ChannelInboundHandler#channelUnregistered(io.netty.channel.ChannelHandlerContext) */ @Override public void channelUnregistered(ChannelHandlerContext ctx) throws Exception { registeredFutureLatch.reset(); ScheduledFuture<?> future = keepAliveFuture.get(); if (future != null) { future.cancel(false); } channel.set(null); processor.connectionDropped(); super.channelUnregistered(ctx); }
@Test public void testScheduledCancelled() throws Exception { EmbeddedChannel ch = new EmbeddedChannel(new ChannelInboundHandlerAdapter()); ScheduledFuture<?> future = ch.eventLoop().schedule(new Runnable() { @Override public void run() { } }, 1, TimeUnit.DAYS); ch.finish(); Assert.assertTrue(future.isCancelled()); }
@Override public void run() { isRun = true; ScheduledFuture<?> timeoutFuture = get(); if (timeoutFuture != null) { if (timeoutFuture.isDone() || !timeoutFuture.cancel(false)) { // Timeout task ran already or is determined to run. numActiveRequests.decrementAndGet(); return; } } try (SafeCloseable ignored = RequestContext.push(ctx)) { try { final O actualRes = delegate().execute(ctx, req); actualRes.completionFuture().whenCompleteAsync((unused, cause) -> { numActiveRequests.decrementAndGet(); drain(); }, ctx.eventLoop()); deferred.delegate(actualRes); } catch (Throwable t) { numActiveRequests.decrementAndGet(); deferred.close(t); } } }
boolean cancelTimeout() { final ScheduledFuture<?> responseTimeoutFuture = this.responseTimeoutFuture; if (responseTimeoutFuture == null) { return true; } this.responseTimeoutFuture = null; return responseTimeoutFuture.cancel(false); }
@Test public void shouldUnsubscribeTopologyRefresh() throws Exception { ClusterTopologyRefreshOptions topologyRefreshOptions = ClusterTopologyRefreshOptions.builder() .enablePeriodicRefresh(true) // .build(); clusterClient.setOptions(ClusterClientOptions.builder().topologyRefreshOptions(topologyRefreshOptions).build()); RedisAdvancedClusterAsyncCommands<String, String> clusterConnection = clusterClient.connect().async(); AtomicBoolean clusterTopologyRefreshActivated = (AtomicBoolean) ReflectionTestUtils .getField(clusterClient, "clusterTopologyRefreshActivated"); AtomicReference<ScheduledFuture<?>> clusterTopologyRefreshFuture = (AtomicReference) ReflectionTestUtils .getField(clusterClient, "clusterTopologyRefreshFuture"); assertThat(clusterTopologyRefreshActivated.get()).isTrue(); assertThat((Future) clusterTopologyRefreshFuture.get()).isNotNull(); ScheduledFuture<?> scheduledFuture = clusterTopologyRefreshFuture.get(); clusterConnection.close(); FastShutdown.shutdown(clusterClient); assertThat(clusterTopologyRefreshActivated.get()).isFalse(); assertThat((Future) clusterTopologyRefreshFuture.get()).isNull(); assertThat(scheduledFuture.isCancelled()).isTrue(); }
/** * 向TxManager 发生消息 * * @param heartBeat 定义的数据传输对象 * @return Object */ public Object sendTxManagerMessage(HeartBeat heartBeat) { if (ctx != null && ctx.channel() != null && ctx.channel().isActive()) { final String sendKey = IdWorkerUtils.getInstance().createTaskKey(); BlockTask sendTask = BlockTaskHelper.getInstance().getTask(sendKey); heartBeat.setKey(sendKey); ctx.writeAndFlush(heartBeat); final ScheduledFuture<?> schedule = ctx.executor() .schedule(() -> { if (!sendTask.isNotify()) { if (NettyMessageActionEnum.GET_TRANSACTION_GROUP_STATUS.getCode() == heartBeat.getAction()) { sendTask.setAsyncCall(objects -> NettyResultEnum.TIME_OUT.getCode()); } else if (NettyMessageActionEnum.FIND_TRANSACTION_GROUP_INFO.getCode() == heartBeat.getAction()) { sendTask.setAsyncCall(objects -> null); } else { sendTask.setAsyncCall(objects -> false); } sendTask.signal(); } }, txConfig.getDelayTime(), TimeUnit.SECONDS); //发送线程在此等待,等tm是否 正确返回(正确返回唤醒) 返回错误或者无返回通过上面的调度线程唤醒 sendTask.await(); //如果已经被唤醒,就不需要去执行调度线程了 ,关闭上面的调度线程池中的任务 if (!schedule.isDone()) { schedule.cancel(false); } try { return sendTask.getAsyncCall().callBack(); } catch (Throwable throwable) { throwable.printStackTrace(); return null; } finally { BlockTaskHelper.getInstance().removeByKey(sendKey); } } else { return null; } }
public ScheduledFuture<?> getScheduleTask() { return scheduledTask; }
public void setScheduleTask(ScheduledFuture<?> scheduledTask) { this.scheduledTask = scheduledTask; }
@Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { return delegate.schedule(command, delay, unit); }
@Override public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) { return delegate.schedule(callable, delay, unit); }
@Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { return delegate.scheduleAtFixedRate(command, initialDelay, period, unit); }
@Override public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { return delegate.scheduleWithFixedDelay(command, initialDelay, delay, unit); }
@Override public PipelineContinuationBehavior doChannelRead(ChannelHandlerContext ctx, Object msg) throws Exception { HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get(); Endpoint<?> endpoint = state.getEndpointForExecution(); if (shouldHandleDoChannelReadMessage(msg, endpoint)) { // We only do something when the last chunk of content has arrived. if (msg instanceof LastHttpContent) { NonblockingEndpoint nonblockingEndpoint = ((NonblockingEndpoint) endpoint); // We're supposed to execute the endpoint. There may be pre-endpoint-execution validation logic or // other work that needs to happen before the endpoint is executed, so set up the // CompletableFuture for the endpoint call to only execute if the pre-endpoint-execution // validation/work chain is successful. RequestInfo<?> requestInfo = state.getRequestInfo(); @SuppressWarnings("unchecked") CompletableFuture<ResponseInfo<?>> responseFuture = state .getPreEndpointExecutionWorkChain() .thenCompose(functionWithTracingAndMdc( aVoid -> (CompletableFuture<ResponseInfo<?>>)nonblockingEndpoint.execute( requestInfo, longRunningTaskExecutor, ctx ), ctx) ); // Register an on-completion callback so we can be notified when the CompletableFuture finishes. responseFuture.whenComplete((responseInfo, throwable) -> { if (throwable != null) asyncErrorCallback(ctx, throwable); else asyncCallback(ctx, responseInfo); }); // Also schedule a timeout check with our Netty event loop to make sure we kill the // CompletableFuture if it goes on too long. long timeoutValueToUse = (nonblockingEndpoint.completableFutureTimeoutOverrideMillis() == null) ? defaultCompletableFutureTimeoutMillis : nonblockingEndpoint.completableFutureTimeoutOverrideMillis(); ScheduledFuture<?> responseTimeoutScheduledFuture = ctx.channel().eventLoop().schedule(() -> { if (!responseFuture.isDone()) { runnableWithTracingAndMdc( () -> logger.error("A non-blocking endpoint's CompletableFuture did not finish within " + "the allotted timeout ({} milliseconds). Forcibly cancelling it.", timeoutValueToUse), ctx ).run(); @SuppressWarnings("unchecked") Throwable errorToUse = nonblockingEndpoint.getCustomTimeoutExceptionCause(requestInfo, ctx); if (errorToUse == null) errorToUse = new NonblockingEndpointCompletableFutureTimedOut(timeoutValueToUse); responseFuture.completeExceptionally(errorToUse); } }, timeoutValueToUse, TimeUnit.MILLISECONDS); /* The problem with the scheduled timeout check is that it holds on to the RequestInfo, ChannelHandlerContext, and a bunch of other stuff that *should* become garbage the instant the request finishes, but because of the timeout check it has to wait until the check executes before the garbage is collectable. In high volume servers the default 60 second timeout is way too long and acts like a memory leak and results in garbage collection thrashing if the available memory can be filled within the 60 second timeout. To combat this we cancel the timeout future when the endpoint future finishes. Netty will remove the cancelled timeout future from its scheduled list within a short time, thus letting the garbage be collected. */ responseFuture.whenComplete((responseInfo, throwable) -> { if (!responseTimeoutScheduledFuture.isDone()) responseTimeoutScheduledFuture.cancel(false); }); } // Whether it was the last chunk or not, we don't want the pipeline to continue since the endpoint was a // NonblockingEndpoint and we need to wait for the CompletableFuture to complete. When the // NonblockingEndpoint processes the request then the pipeline will continue when the CompletableFuture // completes (see asyncCallback() and asyncErrorCallback()). return PipelineContinuationBehavior.DO_NOT_FIRE_CONTINUE_EVENT; } // Not a NonblockingEndpoint, so continue the pipeline in case another endpoint handler is in the pipeline and // wants to deal with it. If no such endpoint handler exists then ExceptionHandlingHandler will cause an // error to be returned to the client. return PipelineContinuationBehavior.CONTINUE; }
@Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { return next().schedule(command, delay, unit); }
@Override public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) { return next().schedule(callable, delay, unit); }
/** * Schedule the next run of {@link #sendDiscoveryRequest()} if it hasn't been scheduled yet. * * @return the Future returned by {@link io.netty.channel.EventLoop#schedule(Callable, long, TimeUnit)} */ private synchronized ScheduledFuture<?> scheduleDiscoveryRetry() { Log.v(TAG, "scheduleDiscoveryRetry()"); // don't schedule a second execution if one is already pending final boolean isExecutionPending = retryFuture != null && !retryFuture.isDone(); if (isDiscoveryRunning && !isExecutionPending) { if (requireComponent(Client.KEY).isChannelOpen() && timeout == 0) { Log.d(TAG, "scheduleDiscoveryRetry() running indefinitely, but Client Channel is open. Was stopDiscovery called?"); } retryFuture = requireComponent(ExecutionServiceComponent.KEY).schedule(new Runnable() { @Override public void run() { if (timeout > 0 && System.currentTimeMillis() > timeout) { Log.i(TAG, "Stopping discovery after timeout"); stopDiscovery(); } if (isDiscoveryRunning) { sendDiscoveryRequest(); /* Mark this future as completed, so that the next discovery request will be scheduled. * Otherwise retryFuture.isDone() would be false until this method terminates and the following * recursive call wouldn't schedule the next execution. */ retryFuture = null; scheduleDiscoveryRetry(); } } }, CLIENT_MILLIS_BETWEEN_BROADCASTS, TimeUnit.MILLISECONDS); retryFuture.addListener(new FutureListener<Object>() { @Override public void operationComplete(Future future) throws Exception { if (!future.isSuccess()) { Log.w(TAG, "Could not reschedule execution of UDP discovery", future.cause()); } } }); } else { Log.d(TAG, "not scheduling another retry because " + "isDiscoveryRunning = " + isDiscoveryRunning + ", retryFuture = " + retryFuture); } return retryFuture; }
@Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { return delegate().schedule(context().makeContextAware(command), delay, unit); }
@Override public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) { return delegate().schedule(context().makeContextAware(callable), delay, unit); }
@Override public ScheduledFuture<?> scheduleAtFixedRate( Runnable command, long initialDelay, long period, TimeUnit unit) { return delegate().scheduleAtFixedRate(context().makeContextAware(command), initialDelay, period, unit); }
@Override public ScheduledFuture<?> scheduleWithFixedDelay( Runnable command, long initialDelay, long delay, TimeUnit unit) { return delegate().scheduleWithFixedDelay( context().makeContextAware(command), initialDelay, delay, unit); }
private void schedule(Duration interval, Runnable runnable) { ScheduledFuture<?> f = workerLoop.scheduleAtFixedRate(runnable, 0, interval.toMillis(), TimeUnit.MILLISECONDS); scheduledFutures.add(f); }
public ScheduledFuture<?> getUdpPingFuture() { return udpPingFuture; }
public void setUdpPingFuture(ScheduledFuture<?> udpPingFuture) { this.udpPingFuture = udpPingFuture; }
public ScheduledFuture<?> getLoadingTimeoutFuture() { return loadingTimeoutFuture; }