@Override public FdfsClient modify(ReadStream<Buffer> stream, long size, FdfsFileId fileId, long offset, Handler<AsyncResult<Void>> handler) { getTracker().setHandler(tracker -> { if (tracker.succeeded()) { tracker.result().getStoreStorage(fileId.group(), storage -> { if (storage.succeeded()) { storage.result().modify(stream, size, fileId, offset, modify -> { handler.handle(modify); }); } else { handler.handle(Future.failedFuture(storage.cause())); } }); } else { handler.handle(Future.failedFuture(tracker.cause())); } }); return this; }
@Override public BookDatabaseService getBooks(Book book, Handler<AsyncResult<JsonArray>> resultHandler) { DynamicQuery dynamicQuery = generateDynamicQuery(SQL_FIND_ALL_BOOKS, book); String preparedQuery = dynamicQuery.getPreparedQuery(); Tuple params = dynamicQuery.getParams(); pgConnectionPool.rxPreparedQuery(preparedQuery, params) .map(PgResult::getDelegate) .subscribe( pgResult -> { JsonArray jsonArray = transformPgResultToJson(pgResult); resultHandler.handle(Future.succeededFuture(jsonArray)); }, throwable -> { LOGGER.error("Failed to get the filtered books by the following conditions" + params.toString(), throwable); resultHandler.handle(Future.failedFuture(throwable)); }); return this; }
private void pageUpdateHandler(RoutingContext context) { String title = context.request().getParam("title"); Handler<AsyncResult<Void>> handler = reply -> { if (reply.succeeded()) { context.response().setStatusCode(303); context.response().putHeader("Location", "/wiki/" + title); context.response().end(); } else { context.fail(reply.cause()); } }; String markdown = context.request().getParam("markdown"); if ("yes".equals(context.request().getParam("newPage"))) { dbService.createPage(title, markdown, handler); } else { dbService.savePage(Integer.valueOf(context.request().getParam("id")), markdown, handler); } }
private Handler<Message<JsonObject>> handler(ServiceHandler serviceHandler) { return msg -> { vertx.executeBlocking(ar -> { try { ar.complete(serviceHandler.invokeService(msg.body())); } catch (Exception e) { ar.fail(e); } }, res -> { if (res.succeeded()) { msg.reply(res.result()); } else { int failureCode = 500; String message = res.cause().getMessage(); msg.fail(failureCode, message); } }); }; }
/** * 分发请求并处理 * 下面具体的方法如方法名所示,与DAO层方法命名一致,在此不表 * * @author Leibniz.Hu */ @Override protected <T> void processMethods(Handler<T> replyMsg, JsonArray params, EventBusNamespace method) { switch (method) { case COMMAND_INSERT_ORDER: insertOrder(replyMsg, params); break; case COMMAND_GET_ORDER_BY_ALIPAY_ORDER_ID: getOrderByAlipayOrderId(replyMsg, params); break; case COMMAND_GET_ORDER_BY_WECHAT_ORDER_ID: getOrderByWechatOrderId(replyMsg, params); break; case COMMAND_UPDATE_PAID_ORDER: updateAfterPaid(replyMsg, params); break; default: log.error("未能处理的请求方法:{}", method); } }
@Override public FdfsClient modify(String fileFullPathName, FdfsFileId fileId, long offset, Handler<AsyncResult<Void>> handler) { getTracker().setHandler(tracker -> { if (tracker.succeeded()) { tracker.result().getStoreStorage(fileId.group(), storage -> { if (storage.succeeded()) { storage.result().modify(fileFullPathName, fileId, offset, modify -> { handler.handle(modify); }); } else { handler.handle(Future.failedFuture(storage.cause())); } }); } else { handler.handle(Future.failedFuture(tracker.cause())); } }); return this; }
@Override public WikiDatabaseService fetchAllPages(Handler<AsyncResult<JsonArray>> resultHandler) { dbClient.query(sqlQueries.get(SqlQuery.ALL_PAGES), res -> { if (res.succeeded()) { JsonArray pages = new JsonArray(res.result() .getResults() .stream() .map(json -> json.getString(0)) .sorted() .collect(Collectors.toList())); resultHandler.handle(Future.succeededFuture(pages)); } else { LOGGER.error("Database query error", res.cause()); resultHandler.handle(Future.failedFuture(res.cause())); } }); return this; }
@Override public WikiDatabaseService fetchPage(String name, Handler<AsyncResult<JsonObject>> resultHandler) { dbClient.queryWithParams(sqlQueries.get(SqlQuery.GET_PAGE), new JsonArray().add(name), fetch -> { if (fetch.succeeded()) { JsonObject response = new JsonObject(); ResultSet resultSet = fetch.result(); if (resultSet.getNumRows() == 0) { response.put("found", false); } else { response.put("found", true); JsonArray row = resultSet.getResults().get(0); response.put("id", row.getInteger(0)); response.put("rawContent", row.getString(1)); } resultHandler.handle(Future.succeededFuture(response)); } else { LOGGER.error("Database query error", fetch.cause()); resultHandler.handle(Future.failedFuture(fetch.cause())); } }); return this; }
@Test public void connect_success(@Mocked NetSocketImpl netSocket) { FutureFactoryImpl futureFactory = new FutureFactoryImpl(); new MockUp<NetClientWrapper>(netClientWrapper) { @Mock void connect(boolean ssl, int port, String host, Handler<AsyncResult<NetSocket>> connectHandler) { connectHandler.handle(futureFactory.succeededFuture(netSocket)); } }; tcpClientConnection.connect(); Assert.assertSame(netSocket, tcpClientConnection.getNetSocket()); Assert.assertEquals(Status.WORKING, Deencapsulation.getField(tcpClientConnection, "status")); }
WikiDatabaseServiceImpl(io.vertx.ext.jdbc.JDBCClient dbClient, HashMap<SqlQuery, String> sqlQueries, Handler<AsyncResult<WikiDatabaseService>> readyHandler) { this.dbClient = new JDBCClient(dbClient); this.sqlQueries = sqlQueries; getConnection() .flatMap(conn -> conn.rxExecute(sqlQueries.get(SqlQuery.CREATE_PAGES_TABLE))) .map(v -> this) .subscribe(RxHelper.toSubscriber(readyHandler)); }
/** * Count all records of the underlying table asynchronously. * @param resultHandler the resultHandler which succeeds when the blocking method of this type succeeds or fails * with an <code>DataAccessException</code> if the blocking method of this type throws an exception * @see #count() */ default void countAsync(Handler<AsyncResult<Long>> resultHandler){ VertxDAOHelper.countAsync(this, (query,mapper)->{ client().fetchOne(query,mapper, h -> { if (h.succeeded()) { resultHandler.handle(Future.succeededFuture((Long) h.result().get())); } else { resultHandler.handle(Future.failedFuture(h.cause())); } }); return null; }); }
public static <A, B> Handler<AsyncResult<A>> ofSucceededVoid(Handler<AsyncResult<B>> handler, VoidConsumer alwaysDo, Consumer<A> consumer) { return e -> { if (alwaysDo != null) { alwaysDo.accept(); } if (e.failed()) { handler.handle(failedFuture(e.cause())); return; } consumer.accept(e.result()); }; }
@Override public BinlogClientImpl connect(Handler<AsyncResult<Void>> startHandler) { if (connected) { throw new IllegalStateException("Client already connected."); } connected = true; vertx.<Void>executeBlocking((f) -> { try { client.connect(connectTimeout); Runtime.getRuntime().addShutdownHook(shutdownHook); f.complete(); } catch (Exception e) { f.fail(e); } }, true, (ar) -> { if (ar.succeeded()) { if (logger.isDebugEnabled()) { logger.debug("Binlog listener " + "[" + host + ":" + port + "]" + " started "); } } else { connected = false; if (exceptionHandler != null) { exceptionHandler.handle(ar.cause()); } } startHandler.handle(ar); }); return this; }
private Handler<HttpClientResponse> compareBodyHandler(String message, TestContext context, Async f) { return r -> { context.assertEquals(200, r.statusCode(), "Failed to call consumes test '" + message + "'"); r.exceptionHandler(context::fail).bodyHandler(body -> { context.assertEquals(message, body.toString()); f.complete(); }); }; }
private @NotNull HttpClientRequest request(@NotNull Handler<Buffer> handler, @NotNull Async latch) { return vertx.createHttpClient() .get(PORT, "localhost", "/metrics") .handler(response -> { context.assertEquals(HttpResponseStatus.OK.code(), response.statusCode()); response.bodyHandler(body -> { handler.handle(body); latch.complete(); }); }); }
@Override // tag::rx-data-flow[] public WikiDatabaseService fetchAllPages(Handler<AsyncResult<JsonArray>> resultHandler) { dbClient.rxQuery(sqlQueries.get(SqlQuery.ALL_PAGES)) .flatMapObservable(res -> { // <1> List<JsonArray> results = res.getResults(); return Observable.from(results); // <2> }) .map(json -> json.getString(0)) // <3> .sorted() // <4> .collect(JsonArray::new, JsonArray::add) // <5> .subscribe(RxHelper.toSubscriber(resultHandler)); return this; }
private <T> Handler<AsyncResult<List<T>>> createListHandler(Message msg) { return res -> { if (res.failed()) { if (res.cause() instanceof ServiceException) { msg.reply(res.cause()); } else { msg.reply(new ServiceException(-1, res.cause().getMessage())); } } else { msg.reply(new JsonArray(res.result())); } }; }
private Handler<Void> finishEndHandler(RoutingContext routingContext, Span span) { return handler -> { decorators.forEach(spanDecorator -> spanDecorator.onResponse(routingContext.request(), span)); span.finish(); }; }
@Override public WikiDatabaseService deletePage(int id, Handler<AsyncResult<Void>> resultHandler) { JsonArray data = new JsonArray().add(id); dbClient.rxUpdateWithParams(sqlQueries.get(SqlQuery.DELETE_PAGE), data) .map(res -> (Void) null) .subscribe(RxHelper.toSubscriber(resultHandler)); return this; }
@Override public FdfsClient uploadAppender(Buffer buffer, String ext, Handler<AsyncResult<FdfsFileId>> handler) { if (Buffer.buffer(ext, options.getCharset()).length() > FdfsProtocol.FDFS_FILE_EXT_NAME_MAX_LEN) { handler.handle(Future .failedFuture("ext is too long ( greater than " + FdfsProtocol.FDFS_FILE_EXT_NAME_MAX_LEN + ")")); return this; } getTracker().setHandler(tracker -> { if (tracker.succeeded()) { tracker.result().getStoreStorage(storage -> { if (storage.succeeded()) { storage.result().uploadAppender(buffer, ext, uploadAppender -> { handler.handle(uploadAppender); }); } else { handler.handle(Future.failedFuture(storage.cause())); } }); } else { handler.handle(Future.failedFuture(tracker.cause())); } }); return this; }
@Override public BookDatabaseService deleteBookById(int id, Handler<AsyncResult<Void>> resultHandler) { pgConnectionPool.rxPreparedQuery(SQL_DELETE_BOOK_BY_ID, Tuple.of(id)) .subscribe(updateResult -> resultHandler.handle(Future.succeededFuture()), throwable -> { LOGGER.error("Failed to delete the book by id " + id, throwable); resultHandler.handle(Future.failedFuture(throwable)); }); return this; }
private Handler<AsyncResult<HttpServer>> serverStartHandler(final Future<Void> startFuture) { return onComplete -> { if (onComplete.succeeded()) { startFuture.complete(); } else { startFuture.fail(onComplete.cause()); System.exit(0); } }; }
@Override public WikiDatabaseService savePage(int id, String markdown, Handler<AsyncResult<Void>> resultHandler) { JsonArray data = new JsonArray().add(markdown).add(id); dbClient.updateWithParams(sqlQueries.get(SqlQuery.SAVE_PAGE), data, res -> { if (res.succeeded()) { resultHandler.handle(Future.succeededFuture()); } else { LOGGER.error("Database query error", res.cause()); resultHandler.handle(Future.failedFuture(res.cause())); } }); return this; }
@Override public synchronized InputStreamToReadStream exceptionHandler(Handler<Throwable> handler) { check(); this.exceptionHandler = handler; return this; }
@Fluent WikiDatabaseService fetchPage(String name, Handler<AsyncResult<JsonObject>> resultHandler);
private void lookupAll(Function<Record, Boolean> filter, boolean includeOutOfService, Handler<AsyncResult<List<Record>>> handler) { if (isNull(filter)) throw new InvalidFilterException(); serviceDiscovery.getRecords(filter, includeOutOfService, handler); }
CloseStatementCommand(Handler<? super CommandResponse<Void>> handler) { super(handler); }
@Override public EntityManagerExt lock(Object entity, LockModeType lockMode, JsonObject properties, Handler<AsyncResult<Void>> handler) { // TODO Auto-generated method stub return null; }
@Override public EntityManagerExt isOpen(Handler<AsyncResult<Boolean>> result) { // TODO Auto-generated method stub return null; }
@Override public SSDBClient multiZdel(String setKey, List<String> itemKeys, Handler<AsyncResult<Void>> handler) { sendCommand(F.ofSucceeded(handler, this::voidValue), "multi_zdel", setKey, itemKeys.toArray()); return this; }
public void lookupAllByJson(JsonObject jsonFilter, Handler<AsyncResult<List<Record>>> handler) { serviceDiscovery.getRecords(jsonFilter, handler); }
public void publish(DataSourceServiceConfiguration configuration, Handler<AsyncResult<Record>> handler) { serviceDiscovery.publish(createMongoRecord(configuration), handler); }
@Override public EntityManagerExt createNativeQuery(String sqlString, Handler<AsyncResult<io.vertx.ext.jpa.Query>> handler) { // TODO Auto-generated method stub return null; }
@Override public SSDBClient hscan(String hashKey, String fieldKeyStart, String fieldKeyEnd, int limit, Handler<AsyncResult<Map<String, String>>> handler) { sendCommand(F.ofSucceeded(handler, this::mapValue), "hscan", hashKey, fieldKeyStart, fieldKeyEnd, limit); return this; }
@Override public FdfsStorage setMetaData(FdfsFileId fileId, JsonObject metaData, byte flag, Handler<AsyncResult<Void>> handler) { Future<FdfsConnection> futureConn = getConnection(); futureConn.compose(connection -> { Future<FdfsPacket> futureResponse = FdfsProtocol.recvPacket(vertx, options.getNetworkTimeout(), connection, FdfsProtocol.STORAGE_PROTO_CMD_RESP, 0, null); Buffer metaBuffer = FdfsProtocol.packMetaData(metaData, options.getCharset()); Buffer nameBuffer = Buffer.buffer(fileId.name(), options.getCharset()); long bodyLength = FdfsProtocol.FDFS_PROTO_PKG_LEN_SIZE + FdfsProtocol.FDFS_PROTO_PKG_LEN_SIZE + 1 + FdfsProtocol.FDFS_GROUP_NAME_MAX_LEN + nameBuffer.length() + metaBuffer.length(); Buffer headerBuffer = FdfsProtocol.packHeader(FdfsProtocol.STORAGE_PROTO_CMD_SET_METADATA, (byte) 0, bodyLength); connection.write(headerBuffer); if (connection.writeQueueFull()) { connection.pause(); connection.drainHandler(v -> { connection.resume(); }); } Buffer groupBuffer = Buffer.buffer(fileId.group(), options.getCharset()); Buffer bodyBuffer = FdfsUtils.newZero(bodyLength); int offset = 0; bodyBuffer.setLong(offset, nameBuffer.length()); offset += FdfsProtocol.FDFS_PROTO_PKG_LEN_SIZE; bodyBuffer.setLong(offset, metaBuffer.length()); offset += FdfsProtocol.FDFS_PROTO_PKG_LEN_SIZE; bodyBuffer.setByte(offset, flag); offset += 1; bodyBuffer.setBuffer(offset, groupBuffer); offset += FdfsProtocol.FDFS_GROUP_NAME_MAX_LEN; bodyBuffer.setBuffer(offset, nameBuffer); offset += nameBuffer.length(); bodyBuffer.setBuffer(offset, metaBuffer); connection.write(bodyBuffer); if (connection.writeQueueFull()) { connection.pause(); connection.drainHandler(v -> { connection.resume(); }); } return futureResponse; }).setHandler(ar -> { if (futureConn.succeeded()) { futureConn.result().release(); } if (ar.succeeded()) { handler.handle(Future.succeededFuture()); } else { handler.handle(Future.failedFuture(ar.cause())); } }); return this; }
@Fluent EntityManagerExt createQuery(String qlString, Handler<AsyncResult<Query>> handler);
@Fluent WikiDatabaseService createPage(String title, String markdown, Handler<AsyncResult<Void>> resultHandler);
@Override public WriteStream<JsonObject> exceptionHandler(Handler<Throwable> handler) { return this; }