@Override public synchronized ListenableFuture<List<BlockHeader>> sendGetBlockHeaders(long blockNumber, int maxBlocksAsk, boolean reverse) { if (ethState == EthState.STATUS_SUCCEEDED && peerState != IDLE) return null; if(logger.isTraceEnabled()) logger.trace( "Peer {}: queue GetBlockHeaders, blockNumber [{}], maxBlocksAsk [{}]", channel.getPeerIdShort(), blockNumber, maxBlocksAsk ); if (headerRequest != null) { throw new RuntimeException("The peer is waiting for headers response: " + this); } GetBlockHeadersMessage headersRequest = new GetBlockHeadersMessage(blockNumber, null, maxBlocksAsk, 0, reverse); GetBlockHeadersMessageWrapper messageWrapper = new GetBlockHeadersMessageWrapper(headersRequest); headerRequest = messageWrapper; sendNextHeaderRequest(); return messageWrapper.getFutureHeaders(); }
@Override public ListenableFuture<List<Void>> removeAll(EntityId entityId, String attributeType, List<String> keys) { List<AttributeKvEntity> entitiesToDelete = keys.stream().map(key -> { AttributeKvEntity entityToDelete = new AttributeKvEntity(); entityToDelete.setEntityType(entityId.getEntityType()); entityToDelete.setEntityId(fromTimeUUID(entityId.getId())); entityToDelete.setAttributeType(attributeType); entityToDelete.setAttributeKey(key); return entityToDelete; }).collect(Collectors.toList()); return service.submit(() -> { attributeKvRepository.delete(entitiesToDelete); return null; }); }
@Nonnull public ListenableFuture<RequestVoteResponse> requestVote(@Nonnull final Replica replica, @Nonnull final RequestVote request) { checkNotNull(replica); checkNotNull(request); // Put a (possibly) blocking connect onto a different thread ListenableFuture<ListenableFuture<RequestVoteResponse>> response = networkCallExecutor.submit(new Callable<ListenableFuture<RequestVoteResponse>>() { @Override public ListenableFuture<RequestVoteResponse> call() throws Exception { RaftClient client = clientProvider.get(replica); return client.requestVote(request); } }); // Transfer the response back onto the raft thread return transform(response, Identity.<RequestVoteResponse> identity(), raftExecutor); }
@Override public ListenableFuture<Void> save(EntityId entityId, String attributeType, AttributeKvEntry attribute) { AttributeKvEntity entity = new AttributeKvEntity(); entity.setEntityType(entityId.getEntityType()); entity.setEntityId(fromTimeUUID(entityId.getId())); entity.setAttributeType(attributeType); entity.setAttributeKey(attribute.getKey()); entity.setLastUpdateTs(attribute.getLastUpdateTs()); entity.setStrValue(attribute.getStrValue().orElse(null)); entity.setDoubleValue(attribute.getDoubleValue().orElse(null)); entity.setLongValue(attribute.getLongValue().orElse(null)); entity.setBooleanValue(attribute.getBooleanValue().orElse(null)); return service.submit(() -> { attributeKvRepository.save(entity); return null; }); }
@Test public void sanity() { ListenableFuture<Integer> success = dbAsync.submit(DB.unit(42)); SQLException ex = new SQLException("failed i have"); ListenableFuture<Integer> fail = dbAsync.submit(DB.db((Try1<Connection, Integer, SQLException>) c -> { assertThat(c, is(notNullValue())); throw ex; })); assertThat(awaitAndGet(success), is(42)); assertThat(awaitAndGetFailure(fail), is(ex)); }
/** * Run the job now. * The job must set its own state to DISABLED or PAUSED when failed, otherwise it is set to ACTIVE. * @param yadaJob * @return */ public void runJob(Long yadaJobId) { log.debug("Running job id {}", yadaJobId); YadaJob toRun = yadaJobRepository.findOne(yadaJobId); if (toRun==null) { log.info("Job not found when trying to run it, id={}", toRun); return; } yadaJobRepository.internalSetRunning(yadaJobId, YadaJobState.RUNNING.toId(), YadaJobState.ACTIVE.toId()); final YadaJob wiredYadaJob = (YadaJob) yadaUtil.autowire(toRun); // YadaJob instances can have @Autowire fields ListenableFuture<Void> jobHandle = jobScheduler.submit(wiredYadaJob); jobHandles.put(yadaJobId, jobHandle); Futures.addCallback(jobHandle, new FutureCallback<Void>() { // The callback is run in executor public void onSuccess(Void result) { // result is always null jobHandles.remove(yadaJobId); yadaJobSchedulerDao.internalJobSuccessful(wiredYadaJob); } public void onFailure(Throwable thrown) { jobHandles.remove(yadaJobId); yadaJobSchedulerDao.internalJobFailed(wiredYadaJob, thrown); } }, MoreExecutors.directExecutor()); }
@Test(timeout = 5000) public void testWriteRequestSuccess() throws InterruptedException, ExecutionException { Capture<List<OFMessage>> cMsgList = prepareChannelForWriteList(); OFEchoRequest echoRequest = factory.echoRequest(new byte[] {}); ListenableFuture<OFEchoReply> future = conn.writeRequest(echoRequest); assertThat("Connection should have 1 pending request", conn.getPendingRequestIds().size(), equalTo(1)); assertThat("Should have captured MsgList", cMsgList.getValue(), Matchers.<OFMessage> contains(echoRequest)); assertThat("Future should not be complete yet", future.isDone(), equalTo(false)); OFEchoReply echoReply = factory.buildEchoReply() .setXid(echoRequest.getXid()) .build(); assertThat("Connection should have accepted the response", conn.deliverResponse(echoReply), equalTo(true)); assertThat("Future should be complete ", future.isDone(), equalTo(true)); assertThat(future.get(), equalTo(echoReply)); assertThat("Connection should have no pending requests", conn.getPendingRequestIds().isEmpty(), equalTo(true)); }
private <T> void sendMessageToManagerForConfiguredShards(DataStoreType dataStoreType, List<Entry<ListenableFuture<T>, ShardResultBuilder>> shardResultData, Function<String, Object> messageSupplier) { ActorContext actorContext = dataStoreType == DataStoreType.Config ? configDataStore.getActorContext() : operDataStore.getActorContext(); Set<String> allShardNames = actorContext.getConfiguration().getAllShardNames(); LOG.debug("Sending message to all shards {} for data store {}", allShardNames, actorContext.getDataStoreName()); for (String shardName: allShardNames) { ListenableFuture<T> future = this.ask(actorContext.getShardManager(), messageSupplier.apply(shardName), SHARD_MGR_TIMEOUT); shardResultData.add(new SimpleEntry<>(future, new ShardResultBuilder().setShardName(shardName).setDataStoreType(dataStoreType))); } }
@Test public void loadBuildTypeList_callback_registers_project_to_BuildTypeManager_and_dispatch_it_on_event_bus( ) throws Exception { // Setup final BuildTypeList buildTypelist = new BuildTypeList( ); buildTypelist.addBuildType( new BuildType( "bt1", "btName", "btProjectName", "btProjectId" ) ); buildTypelist.addBuildType( new BuildType( "bt2", "btName", "btProjectName", "btProjectId" ) ); when( _mockRequestController.sendRequest( getApiVersion( ), "buildTypes", BuildTypeList.class ) ) .thenReturn( Futures.immediateFuture( buildTypelist ) ); // Exercise final ListenableFuture<Void> ackFuture = _apiController.loadBuildTypeList( ); // Verify assertThat( _buildTypeManager.getBuildTypes( ).size( ), is( 2 ) ); assertThat( _buildTypeManager.getBuildTypes( ).get( 0 ).getId( ), is( "bt1" ) ); assertThat( _buildTypeManager.getBuildTypes( ).get( 1 ).getId( ), is( "bt2" ) ); assertThat( _dispatchedObjects, hasItem( _buildTypeManager ) ); assertThat( ackFuture.isDone( ), is( true ) ); }
public ListenableFuture<Void> createBlobTable(String tableName, Settings indexSettings) { Settings.Builder builder = Settings.builder(); builder.put(indexSettings); builder.put(SETTING_INDEX_BLOBS_ENABLED, true); final SettableFuture<Void> result = SettableFuture.create(); transportCreateIndexActionProvider.get().execute(new CreateIndexRequest(fullIndexName(tableName), builder.build()), new ActionListener<CreateIndexResponse>() { @Override public void onResponse(CreateIndexResponse createIndexResponse) { assert createIndexResponse.isAcknowledged(); result.set(null); } @Override public void onFailure(Throwable e) { result.setException(e); } }); return result; }
/** * <p>Returns a future that wraps a list of all transactions that the given transaction depends on, recursively. * Only transactions in peers memory pools are included; the recursion stops at transactions that are in the * current best chain. So it doesn't make much sense to provide a tx that was already in the best chain and * a precondition checks this.</p> * * <p>For example, if tx has 2 inputs that connect to transactions A and B, and transaction B is unconfirmed and * has one input connecting to transaction C that is unconfirmed, and transaction C connects to transaction D * that is in the chain, then this method will return either {B, C} or {C, B}. No ordering is guaranteed.</p> * * <p>This method is useful for apps that want to learn about how long an unconfirmed transaction might take * to confirm, by checking for unexpectedly time locked transactions, unusually deep dependency trees or fee-paying * transactions that depend on unconfirmed free transactions.</p> * * <p>Note that dependencies downloaded this way will not trigger the onTransaction method of event listeners.</p> */ public ListenableFuture<List<Transaction>> downloadDependencies(Transaction tx) { TransactionConfidence.ConfidenceType txConfidence = tx.getConfidence().getConfidenceType(); Preconditions.checkArgument(txConfidence != TransactionConfidence.ConfidenceType.BUILDING); log.info("{}: Downloading dependencies of {}", getAddress(), tx.getHashAsString()); final LinkedList<Transaction> results = new LinkedList<>(); // future will be invoked when the entire dependency tree has been walked and the results compiled. final ListenableFuture<Object> future = downloadDependenciesInternal(vDownloadTxDependencyDepth, 0, tx, new Object(), results); final SettableFuture<List<Transaction>> resultFuture = SettableFuture.create(); Futures.addCallback(future, new FutureCallback<Object>() { @Override public void onSuccess(Object ignored) { resultFuture.set(results); } @Override public void onFailure(Throwable throwable) { resultFuture.setException(throwable); } }); return resultFuture; }
void requestLoadingBuilds( ) { _loading.setValue( true ); _loadingFailure.setValue( true ); _loadingInformation.setValue( "Trying to connect..." ); final ListenableFuture<Void> loadProjectsFuture = _apiController.loadProjectList( ); final ListenableFuture<Void> loadBuildTypesfuture = transform( loadProjectsFuture, (AsyncFunction<Void, Void>) input -> _apiController.loadBuildTypeList( ) ); addCallback( loadBuildTypesfuture, loadingSuccessfulCallback( ) ); }
@Override public void removeAttributes(final TenantId tenantId, final EntityId entityId, final String scope, final List<String> keys, final PluginCallback<Void> callback) { validate(entityId, new ValidationCallback(callback, ctx -> { ListenableFuture<List<Void>> futures = pluginCtx.attributesService.removeAll(entityId, scope, keys); Futures.addCallback(futures, getCallback(callback, v -> null), executor); if (entityId.getEntityType() == ThingType.DEVICE) { onDeviceAttributesDeleted(tenantId, new DeviceId(entityId.getId()), keys.stream().map(key -> new AttributeKey(scope, key)).collect(Collectors.toSet())); } })); }
protected synchronized ListenableFuture<Void> disposeSubscription ( final DefaultSubscription subscription ) { this.subscriptions.remove ( subscription ); this.numberOfSubscriptions = this.subscriptions.size (); if ( this.executor.isShutdown () ) { // if we are already disposed return Futures.immediateFuture ( null ); } // the completion will come from the executor, so the completion has to wait in line // with possible remaining updated return this.executor.submit ( () -> null ); }
public ListenableFuture<Object> uploadChunk(final BlockRenderLayer p_188245_1_, final VertexBuffer p_188245_2_, final RenderChunk p_188245_3_, final CompiledChunk p_188245_4_, final double p_188245_5_) { if (Minecraft.getMinecraft().isCallingFromMinecraftThread()) { if (OpenGlHelper.useVbo()) { this.uploadVertexBuffer(p_188245_2_, p_188245_3_.getVertexBufferByLayer(p_188245_1_.ordinal())); } else { this.uploadDisplayList(p_188245_2_, ((ListedRenderChunk)p_188245_3_).getDisplayList(p_188245_1_, p_188245_4_), p_188245_3_); } p_188245_2_.setTranslation(0.0D, 0.0D, 0.0D); return Futures.<Object>immediateFuture((Object)null); } else { ListenableFutureTask<Object> listenablefuturetask = ListenableFutureTask.<Object>create(new Runnable() { public void run() { ChunkRenderDispatcher.this.uploadChunk(p_188245_1_, p_188245_2_, p_188245_3_, p_188245_4_, p_188245_5_); } }, (Object)null); synchronized (this.queueChunkUploads) { this.queueChunkUploads.add(new ChunkRenderDispatcher.PendingUpload(listenablefuturetask, p_188245_5_)); return listenablefuturetask; } } }
@Override public ListenableFuture<D> findByIdAsync(UUID key) { log.debug("Get entity by key {}", key); Select.Where query = select().from(getColumnFamilyName()).where(eq(ModelConstants.ID_PROPERTY, key)); log.trace("Execute query {}", query); return findOneByStatementAsync(query); }
public QuorumCall<AsyncLogger, Void> discardSegments(long startTxId) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.discardSegments(startTxId); calls.put(logger, future); } return QuorumCall.create(calls); }
@Override public ListenableFuture<List<EntityRelation>> findAllByFromAndType(EntityId from, String relationType, RelationTypeGroup typeGroup) { BoundStatement stmt = getFindAllByFromAndTypeStmt().bind() .setUUID(0, from.getId()) .setString(1, from.getEntityType().name()) .set(2, typeGroup, relationTypeGroupCodec) .setString(3, relationType); return executeAsyncRead(from, stmt); }
public ListenableFuture<FindMultiResponse<T>> refreshAll() { return FutureUtils.mapSync( sendRefreshAll(), response -> handleRefreshAll(response, Runnable::run), modelSync ); }
public KillTask(TransportKillAllNodeAction nodeAction, UUID jobId) { super(jobId); this.nodeAction = nodeAction; SettableFuture<TaskResult> result = SettableFuture.create(); results = ImmutableList.of((ListenableFuture<TaskResult>) result); actionListener = ActionListeners.wrap(result, RESPONSE_TO_TASK_RESULT); }
public Optional<String> getToken() { final ListenableFuture<String> masterPassword = encyptionKeyProvider.getMasterPassword(); if (!masterPassword.isDone()) { return Optional.empty(); } final String key = encyptionKeyProvider.getImmediatePassword(); final String s = key + " meta"; final ECKey privKey = ECKey.fromPrivate(Sha256Hash.twiceOf(s.getBytes(Charsets.UTF_8)).getBytes()); /* @POST @Path("/token") @Produces(MediaType.APPLICATION_OCTET_STREAM) public Response createToken(@QueryParam("timestamp") Long nonce, @QueryParam("signature") String signature) { */ // } final long timeStamp = Instant.now().toEpochMilli(); try { final String url = rootPath + "auth/token"; final HttpResponse<String> token = Unirest.post(url) .queryString("timestamp", timeStamp) .queryString("signature", privKey.signMessage(String.valueOf(timeStamp))) .asString(); if (token.getStatus() != 200) { return Optional.empty(); } return Optional.of(token.getBody()); } catch (UnirestException e) { LOGGER.error("exception from remote service when trying to get token", e); return Optional.empty(); } }
public ListenableFuture<Object> downloadResourcePack(String url, String hash) { String s = DigestUtils.sha1Hex(url); final String s1 = SHA1.matcher(hash).matches() ? hash : ""; final File file1 = new File(this.dirServerResourcepacks, s); this.lock.lock(); try { this.clearResourcePack(); if (file1.exists()) { if (this.checkHash(s1, file1)) { ListenableFuture listenablefuture2 = this.setResourcePackInstance(file1); ListenableFuture listenablefuture3 = listenablefuture2; return listenablefuture3; } LOGGER.warn("Deleting file {}", new Object[] {file1}); FileUtils.deleteQuietly(file1); } this.deleteOldServerResourcesPacks(); final GuiScreenWorking guiscreenworking = new GuiScreenWorking(); Map<String, String> map = getDownloadHeaders(); final Minecraft minecraft = Minecraft.getMinecraft(); Futures.getUnchecked(minecraft.addScheduledTask(new Runnable() { public void run() { minecraft.displayGuiScreen(guiscreenworking); } })); final SettableFuture<Object> settablefuture = SettableFuture.<Object>create(); this.downloadingPacks = HttpUtil.downloadResourcePack(file1, url, map, 52428800, guiscreenworking, minecraft.getProxy()); Futures.addCallback(this.downloadingPacks, new FutureCallback<Object>() { public void onSuccess(@Nullable Object p_onSuccess_1_) { if (ResourcePackRepository.this.checkHash(s1, file1)) { ResourcePackRepository.this.setResourcePackInstance(file1); settablefuture.set((Object)null); } else { ResourcePackRepository.LOGGER.warn("Deleting file {}", new Object[] {file1}); FileUtils.deleteQuietly(file1); } } public void onFailure(Throwable p_onFailure_1_) { FileUtils.deleteQuietly(file1); settablefuture.setException(p_onFailure_1_); } }); ListenableFuture listenablefuture = this.downloadingPacks; ListenableFuture listenablefuture11 = listenablefuture; return listenablefuture11; } finally { this.lock.unlock(); } }
public ListenableFuture<Object> uploadChunk(final EnumWorldBlockLayer player, final WorldRenderer p_178503_2_, final RenderChunk chunkRenderer, final CompiledChunk compiledChunkIn) { if (Minecraft.getMinecraft().isCallingFromMinecraftThread()) { if (OpenGlHelper.useVbo()) { this.uploadVertexBuffer(p_178503_2_, chunkRenderer.getVertexBufferByLayer(player.ordinal())); } else { this.uploadDisplayList(p_178503_2_, ((ListedRenderChunk)chunkRenderer).getDisplayList(player, compiledChunkIn), chunkRenderer); } p_178503_2_.setTranslation(0.0D, 0.0D, 0.0D); return Futures.<Object>immediateFuture((Object)null); } else { ListenableFutureTask<Object> listenablefuturetask = ListenableFutureTask.<Object>create(new Runnable() { public void run() { ChunkRenderDispatcher.this.uploadChunk(player, p_178503_2_, chunkRenderer, compiledChunkIn); } }, (Object)null); synchronized (this.queueChunkUploads) { this.queueChunkUploads.add(listenablefuturetask); return listenablefuturetask; } } }
@Override public ListenableFuture<UserSearchResponse> search(UserSearchRequest request) { return FutureUtils.mapSync( userFinder.findUserAsync(request.username), user -> { if(user.hasValidId()) { return new UserSearchResponse(new LocalUserDocument(user), user.isOnline(), false, null, null); } throw new NotFound("No user named '" + request.username + "'"); } ); }
@Nonnull public ListenableFuture<AppendEntriesResponse> requestUpdate() { requested = true; if (!running) { return sendUpdate(); } return nextResponse; }
protected <T> ListenableFuture<T> getFuture(ResultSetFuture future, java.util.function.Function<ResultSet, T> transformer) { return Futures.transform(future, new Function<ResultSet, T>() { @Nullable @Override public T apply(@Nullable ResultSet input) { return transformer.apply(input); } }, readResultsProcessingExecutor); }
@Override public ListenableFuture<List<DeviceType>> findDeviceTypesByTenantIdAndIdsAsync(UUID tenantId, List<UUID> deviceIds) { log.debug("Try to find devices by tenantId [{}] and device Ids [{}]", tenantId, deviceIds); Select select = select().from(getColumnFamilyName()); Select.Where query = select.where(); query.and(eq(DEVICE_TYPE_TENANT_ID_PROPERTY, tenantId)); query.and(in(ID_PROPERTY, deviceIds)); return findListByStatementAsync(query); }
@Override public ListenableFuture<Void> loadBuildTypeList( ) { if ( !getApiVersion( ).isSupported( ApiFeature.BUILD_TYPE_STATUS ) ) return Futures.immediateFuture( null ); final SettableFuture<Void> ackFuture = SettableFuture.create( ); runInWorkerThread( ( ) -> { final ListenableFuture<BuildTypeList> buildListFuture = _apiRequestController.sendRequest( getApiVersion( ), "buildTypes", BuildTypeList.class ); addCallback( buildListFuture, new FutureCallback<BuildTypeList>( ) { @Override public void onSuccess( final BuildTypeList result ) { final List<BuildTypeData> buildTypes = result.getBuildTypes( ).stream( ) .map( ( btype ) -> _buildTypeProvider.get( getApiVersion( ) ).apply( btype ) ) .collect( Collectors.toList( ) ); _buildManager.registerBuildTypes( buildTypes ); _eventBus.post( _buildManager ); for ( final BuildTypeData buildType : _buildManager.getBuildTypes( ) ) { final Optional<ProjectData> project = _projectManager.getProject( buildType.getProjectId( ) ); if ( project.isPresent( ) ) { project.get( ).registerBuildType( buildType ); _eventBus.post( project.get( ) ); } LOGGER.info( "Discovering build type " + buildType.getId( ) + " (" + buildType.getName( ) + ") on project " + buildType.getProjectId( ) + " (" + buildType.getProjectName( ) + ")" ); } ackFuture.set( null ); } @Override public void onFailure( final Throwable t ) { LOGGER.error( "Error during loading build type list:", t ); ackFuture.setException( t ); } } ); } ); return ackFuture; }
@Override public ListenableFuture<List<DeviceType>> findDeviceTypesByTenantIdCustomerIdAndIdsAsync(UUID tenantId, UUID customerId, List<UUID> deviceIds) { log.debug("Try to find devices by tenantId [{}], customerId [{}] and device Ids [{}]", tenantId, customerId, deviceIds); Select select = select().from(getColumnFamilyName()); Select.Where query = select.where(); query.and(eq(DEVICE_TYPE_TENANT_ID_PROPERTY, tenantId)); query.and(eq(DEVICE_TYPE_CUSTOMER_ID_PROPERTY, customerId)); query.and(in(ID_PROPERTY, deviceIds)); return findListByStatementAsync(query); }
@Override public ListenableFuture<Long> visitCreateBlobTableStatement( CreateBlobTableAnalyzedStatement analysis, SingleJobTask jobId) { return wrapRowCountFuture( blobIndices.createBlobTable( analysis.tableName(), analysis.tableParameter().settings() ), 1L ); }
@Test(timeout = 5000) public void testWriteRequestSuccess() throws InterruptedException, ExecutionException { Capture<List<OFMessage>> cMsgList = prepareChannelForWriteList(); OFEchoRequest echoRequest = factory.echoRequest(new byte[] {}); ListenableFuture<OFEchoReply> future = conn.writeRequest(echoRequest); assertThat("Connection should have 1 pending request", conn.getPendingRequestIds().size(), equalTo(1)); eventLoop.runTasks(); assertThat("Should have captured MsgList", cMsgList.getValue(), Matchers.<OFMessage> contains(echoRequest)); assertThat("Future should not be complete yet", future.isDone(), equalTo(false)); OFEchoReply echoReply = factory.buildEchoReply() .setXid(echoRequest.getXid()) .build(); assertThat("Connection should have accepted the response", conn.deliverResponse(echoReply), equalTo(true)); assertThat("Future should be complete ", future.isDone(), equalTo(true)); assertThat(future.get(), equalTo(echoReply)); assertThat("Connection should have no pending requests", conn.getPendingRequestIds().isEmpty(), equalTo(true)); }
/** * Interrupt the job and make it ACTIVE * @param yadaJob */ public void interruptJob(Long yadaJobId) { log.debug("Interrupting job id {}", yadaJobId); ListenableFuture<?> jobHandle = jobHandles.get(yadaJobId); if (jobHandle!=null) { jobHandle.cancel(true); } else { log.debug("No job handle found for job id {} when interrupting", yadaJobId); } }
@Override public ListenableFuture<Object> invoke(InvokeRequest request) { try { return executorService.submit(() -> invokeSynchronous(request)); } catch (Exception e) { return immediateFailedFuture(toDriftException(e)); } }
V loadSync( K key, int hash, LoadingValueReference<K, V> loadingValueReference, CacheLoader<? super K, V> loader) throws ExecutionException { ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader); return getAndRecordStats(key, hash, loadingValueReference, loadingFuture); }
private void cancelOthers(ListenableFuture besidesThis) { for (ListenableFuture future : futures) { if (future != besidesThis) { try { future.cancel(true); } catch (Exception e) { } } } }
@Override public ListenableFuture<Long> visitMigrateTableAnalyzedStatement(MigrateTableAnalyzedStatement analysis, SingleJobTask jobId) { MigrateIndexTenantRequest migrateIndexRequest = new MigrateIndexTenantRequest(analysis.destTenantName(), analysis.tableName()); final SettableFuture<Long> future = SettableFuture.create(); ActionListener<MigrateIndexTenantResponse> listener = ActionListeners.wrap(future, Functions.<Long>constant(ONE)); transportActionProvider.transportMigrateIndexTenantAction().execute(migrateIndexRequest, listener); return future; }
@Mutation("createBook") ListenableFuture<Book> createBook( CreateBookRequest request, BookServiceGrpc.BookServiceFutureStub client) { return client.createBook(request); }
@Override public ListenableFuture<Long> visitAddColumnStatement(AddColumnAnalyzedStatement analysis, SingleJobTask context) { return alterTableOperation.executeAlterTableAddColumn(analysis); }