private ZooKeeperCommandExecutor(String replicaId, CommandExecutor delegate, CuratorFramework curator, String zkPath, boolean createPathIfNotExist, File revisionFile, int numWorkers, int maxLogCount, long minLogAgeMillis) { super(replicaId); this.delegate = delegate; this.revisionFile = revisionFile; this.curator = curator; this.zkPath = zkPath; this.createPathIfNotExist = createPathIfNotExist; this.maxLogCount = maxLogCount; this.minLogAgeMillis = minLogAgeMillis; final ThreadPoolExecutor executor = new ThreadPoolExecutor( numWorkers, numWorkers, 60, TimeUnit.SECONDS, new LinkedTransferQueue<>(), new DefaultThreadFactory("zookeeper-command-executor", true)); executor.allowCoreThreadTimeOut(true); this.executor = executor; logWatcher = new PathChildrenCache(curator, absolutePath(LOG_PATH), true); logWatcher.getListenable().addListener(this, MoreExecutors.directExecutor()); oldLogRemover = new OldLogRemover(); leaderSelector = new LeaderSelector(curator, absolutePath(LEADER_PATH), oldLogRemover); leaderSelector.autoRequeue(); }
public void start(SlaveNode slaveNode) { if(slaveNode==null){ throw new IllegalArgumentException("slaveNode is null"); } EventLoopGroup bossGroup = new NioEventLoopGroup(CommonConstants.BOSS_GROUP_SIZE, new DefaultThreadFactory("boss", true)); EventLoopGroup workerGroup = new NioEventLoopGroup(CommonConstants.WORKER_GROUP_SIZE, new DefaultThreadFactory("worker", true)); try { ServerBootstrap b = new ServerBootstrap(); b.option(ChannelOption.SO_BACKLOG, 1024); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new SlaveServerInitializer()); ChannelFuture future = b.bind(slaveNode.getPort()).sync(); LOGGER.info("SlaveServer Startup at port:{}",slaveNode.getPort()); // 等待服务端Socket关闭 future.channel().closeFuture().sync(); } catch (InterruptedException e) { LOGGER.error("InterruptedException:",e); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
public void start() { EventLoopGroup bossGroup = new NioEventLoopGroup(CommonConstants.BOSS_GROUP_SIZE, new DefaultThreadFactory("boss", true)); EventLoopGroup workerGroup = new NioEventLoopGroup(CommonConstants.WORKER_GROUP_SIZE, new DefaultThreadFactory("worker", true)); try { ServerBootstrap b = new ServerBootstrap(); b.option(ChannelOption.SO_BACKLOG, 1024); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new MasterServerInitializer()); ChannelFuture future = b.bind(CommonConstants.SERVER_PORT).sync(); LOGGER.info("MasterServer Startup at port:{}",CommonConstants.SERVER_PORT); // 等待服务端Socket关闭 future.channel().closeFuture().sync(); } catch (InterruptedException e) { LOGGER.error("InterruptedException:",e); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
public void start() { EventLoopGroup bossGroup = new NioEventLoopGroup(CommonConstants.BOSS_GROUP_SIZE, new DefaultThreadFactory("boss", true)); EventLoopGroup workerGroup = new NioEventLoopGroup(CommonConstants.WORKER_GROUP_SIZE, new DefaultThreadFactory("worker", true)); try { ServerBootstrap b = new ServerBootstrap(); b.option(ChannelOption.SO_BACKLOG, 1024); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ServerInitializer()); ChannelFuture future = b.bind(CommonConstants.SERVER_PORT).sync(); logger.info("NettyServer Startup at port:{}",CommonConstants.SERVER_PORT); // 等待服务端Socket关闭 future.channel().closeFuture().sync(); } catch (InterruptedException e) { logger.error("InterruptedException:",e); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
public synchronized void start(CommandExecutor commandExecutor) { if (isStarted()) { return; } this.commandExecutor = requireNonNull(commandExecutor, "commandExecutor"); scheduler = MoreExecutors.listeningDecorator(Executors.newSingleThreadScheduledExecutor( new DefaultThreadFactory("mirroring-scheduler", true))); worker = MoreExecutors.listeningDecorator( new ThreadPoolExecutor(0, numThreads, 1, TimeUnit.MINUTES, new SynchronousQueue<>(), new DefaultThreadFactory("mirroring-worker", true))); final ListenableScheduledFuture<?> future = scheduler.scheduleWithFixedDelay( this::schedulePendingMirrors, TICK.getSeconds(), TICK.getSeconds(), TimeUnit.SECONDS); FuturesExtra.addFailureCallback( future, cause -> logger.error("Git-to-CD mirroring scheduler stopped due to an unexpected exception:", cause)); }
public NettyClientImpl(URL url) { super(url); this.remoteAddress = new InetSocketAddress(url.getHost(), url.getPort()); this.timeout = url.getIntParameter(URLParam.requestTimeout.getName(), URLParam.requestTimeout.getIntValue()); this.scheduledExecutorService = Executors.newScheduledThreadPool(5, new DefaultThreadFactory(String.format("%s-%s", Constants.FRAMEWORK_NAME, "future"))); this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() { @Override public void run() { scanRpcFutureTable(); } }, 0, 5000, TimeUnit.MILLISECONDS); }
@Override protected void doStart(Listener listener) throws Throwable { workerGroup = new NioEventLoopGroup(http_work, new DefaultThreadFactory(ThreadNames.T_HTTP_CLIENT)); b = new Bootstrap(); b.group(workerGroup); b.channel(NioSocketChannel.class); b.option(ChannelOption.SO_KEEPALIVE, true); b.option(ChannelOption.TCP_NODELAY, true); b.option(ChannelOption.SO_REUSEADDR, true); b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 4000); b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); b.handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast("decoder", new HttpResponseDecoder()); ch.pipeline().addLast("aggregator", new HttpObjectAggregator(maxContentLength)); ch.pipeline().addLast("encoder", new HttpRequestEncoder()); ch.pipeline().addLast("handler", new HttpClientHandler(NettyHttpClient.this)); } }); timer = new HashedWheelTimer(new NamedThreadFactory(T_HTTP_TIMER), 1, TimeUnit.SECONDS, 64); listener.onSuccess(); }
public void startServers() throws Exception { int bossThreadCount = Settings.INSTANCE.getInt("netty.bossThreadCount", 0); int workerThreadCount = Settings.INSTANCE.getInt("netty.workerThreadCount", 0); ThreadFactory bossThreadFactory = new DefaultThreadFactory("lannister/boss"); ThreadFactory workerThreadFactory = new DefaultThreadFactory("lannister/worker"); if (Literals.NETTY_EPOLL.equals(Settings.INSTANCE.nettyTransportMode())) { bossGroup = new EpollEventLoopGroup(bossThreadCount, bossThreadFactory); workerGroup = new EpollEventLoopGroup(workerThreadCount, workerThreadFactory); } else { bossGroup = new NioEventLoopGroup(bossThreadCount, bossThreadFactory); workerGroup = new NioEventLoopGroup(workerThreadCount, workerThreadFactory); } mqttServer = new MqttServer(bossGroup, workerGroup); mqttServer.start(); webServer = new WebServer(bossGroup, workerGroup); webServer.start("net.anyflow"); }
@Test public void testDoubleIpAddress() throws Exception { String serviceUrl = "pulsar://non-existing-dns-name:" + BROKER_PORT; ClientConfiguration conf = new ClientConfiguration(); EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, new DefaultThreadFactory("test")); ConnectionPool pool = Mockito.spy(new ConnectionPool(conf, eventLoop)); PulsarClientImpl client = new PulsarClientImpl(serviceUrl, conf, eventLoop, pool); List<InetAddress> result = Lists.newArrayList(); // Add a non existent IP to the response to check that we're trying the 2nd address as well result.add(InetAddress.getByName("127.0.0.99")); result.add(InetAddress.getByName("127.0.0.1")); Mockito.when(pool.resolveName("non-existing-dns-name")).thenReturn(CompletableFuture.completedFuture(result)); // Create producer should succeed by trying the 2nd IP client.createProducer("persistent://sample/standalone/ns/my-topic"); client.close(); }
public PulsarClientImpl(String serviceUrl, ClientConfiguration conf, EventLoopGroup eventLoopGroup, ConnectionPool cnxPool) throws PulsarClientException { if (isBlank(serviceUrl) || conf == null || eventLoopGroup == null) { throw new PulsarClientException.InvalidConfigurationException("Invalid client configuration"); } this.eventLoopGroup = eventLoopGroup; this.conf = conf; conf.getAuthentication().start(); this.cnxPool = cnxPool; if (serviceUrl.startsWith("http")) { lookup = new HttpLookupService(serviceUrl, conf, eventLoopGroup); } else { lookup = new BinaryProtoLookupService(this, serviceUrl, conf.isUseTls()); } timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-timer"), 1, TimeUnit.MILLISECONDS); externalExecutorProvider = new ExecutorProvider(conf.getListenerThreads(), "pulsar-external-listener"); producers = Maps.newIdentityHashMap(); consumers = Maps.newIdentityHashMap(); state.set(State.Open); }
public NettyConnector(InetSocketAddress isa, final TransportConfig transportConfig) { workerGroup = new NioEventLoopGroup(1, new DefaultThreadFactory("N4C-Work")); clientBoot = new Bootstrap().group(workerGroup).channel(NioSocketChannel.class); clientBoot.option(ChannelOption.TCP_NODELAY, true); clientBoot.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, transportConfig.getConnectTimeout()); clientBoot.option(ChannelOption.SO_RCVBUF, 8 * 1024).option(ChannelOption.SO_SNDBUF, 8 * 1024); clientBoot.handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { TransportProtocolDecoder decoder = new TransportProtocolDecoder(); decoder.setMaxObjectSize(transportConfig.getMaxSize()); TransportProtocolEncoder encoder = new TransportProtocolEncoder(); encoder.setMaxObjectSize(transportConfig.getMaxSize()); ch.pipeline().addLast("TransportProtocolDecoder", decoder); ch.pipeline().addLast("TransportProtocolEncoder", encoder); int intervalSeconds = transportConfig.getHeartbeatIntervalSeconds(); ch.pipeline().addLast("IdleStateHandler", new IdleStateHandler(0, intervalSeconds, 0)); ch.pipeline().addLast("NettyClientHandler", new NettyClientHandler()); } }); clientBoot.remoteAddress(isa); }
public void run() throws Exception { final ThreadFactory connectFactory = new DefaultThreadFactory("rendezvous"); final NioEventLoopGroup connectGroup = new NioEventLoopGroup(1, connectFactory, NioUdtProvider.BYTE_PROVIDER); try { final Bootstrap bootstrap = new Bootstrap(); bootstrap.group(connectGroup) .channelFactory(NioUdtProvider.BYTE_RENDEZVOUS) .handler(new ChannelInitializer<UdtChannel>() { @Override protected void initChannel(UdtChannel ch) throws Exception { ch.pipeline().addLast( new LoggingHandler(LogLevel.INFO), new ByteEchoPeerHandler(messageSize)); } }); final ChannelFuture future = bootstrap.connect(peerAddress, myAddress).sync(); future.channel().closeFuture().sync(); } finally { connectGroup.shutdownGracefully(); } }
/** * 私有构造函数 */ private NettyCenter() { int maybeThreadSize = Runtime.getRuntime().availableProcessors(); if (maybeThreadSize == 1) maybeThreadSize += 2; else if (maybeThreadSize == 8) maybeThreadSize = 2; else if (maybeThreadSize > 8) maybeThreadSize /= 2; /** * 构造事件循环组 */ eventLoopGroup = new NioEventLoopGroup(maybeThreadSize, new DefaultThreadFactory("NettyNioLoopGroup")); /** * 构造定时器 */ hashedWheelTimer = new HashedWheelTimer(new DefaultThreadFactory("NettyHashedWheelTimer")); /** * 构造 SSL 环境 */ try { SslContextBuilder sslContextBuilder = SslContextBuilder.forClient(); sslContextBuilder.clientAuth(ClientAuth.OPTIONAL); simpleClientSslContext = sslContextBuilder.build(); } catch (Throwable e) { log.error("NettyCenter :: initialize client sslcontext error!", e); } }
@Inject public ApiServerChannelInitializer(ObjectMapper objectMapper, ApiProtocolSwitcher apiProtocolSwitcher, // ObservableEncoder rxjavaHandler, GeneratedJaxRsModuleHandler jaxRsModuleHandler) { this.apiProtocolSwitcher = apiProtocolSwitcher; // this.rxjavaHandler = rxjavaHandler; this.jaxRsHandlers = jaxRsModuleHandler; SimpleModule nettyModule = new SimpleModule("Netty", PackageVersion.VERSION); nettyModule.addSerializer(new ByteBufSerializer()); objectMapper.registerModule(nettyModule); // TODO: allow customization of the thread pool! // rxJavaGroup = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("rxjava")); jaxRsGroup = new DefaultEventExecutorGroup(Runtime.getRuntime().availableProcessors(), new DefaultThreadFactory("jax-rs")); }
public void run() throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(0, new DefaultThreadFactory("lb-boss")); EventLoopGroup workerGroup = new NioEventLoopGroup(0, new DefaultThreadFactory("lb-worker")); ServerBootstrap b = new ServerBootstrap(); try { b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .childHandler(proxyInitializer) .childOption(ChannelOption.AUTO_READ, false) .bind(lbPort).sync().channel().closeFuture().sync(); } catch (Throwable e) { throw new Exception("Failed to start load balancer on port " + lbPort, e); } finally { // b.shutdown(); bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); proxyInitializer.close(); } }
public EventLoopGroup epollGroup() { EventLoopGroup epollEventLoopGroup = new EpollEventLoopGroup(0, new DefaultThreadFactory(threadPrefix)); ChannelFactory<EpollDomainSocketChannel> factory = new ChannelFactory<EpollDomainSocketChannel>() { @Override public EpollDomainSocketChannel newChannel() { return configure(new EpollDomainSocketChannel()); } }; bootstrap.group(epollEventLoopGroup).channelFactory(factory).handler(new ChannelInitializer<UnixChannel>() { @Override protected void initChannel(final UnixChannel channel) throws Exception { channel.pipeline().addLast(new HttpClientCodec()); } }); return epollEventLoopGroup; }
@Override public void start() throws Exception { int bossThreads = conf.getInt("netty.threads.Boss"); bossLoop = eventLoop(bossThreads, "boss"); int workerThreads = conf.getInt("netty.threads.Worker"); if (workerThreads > 0) { workerLoop = eventLoop(Math.max(4, workerThreads), "worker"); } else { workerLoop = bossLoop; } ThreadFactory threadFactory = new DefaultThreadFactory(conf.getString("netty.threads.Name")); this.executor = new DefaultEventExecutorGroup(conf.getInt("netty.threads.Max"), threadFactory); this.ch = bootstrap(executor, null, conf.getInt("application.port")); boolean securePort = conf.hasPath("application.securePort"); if (securePort) { bootstrap(executor, NettySslContext.build(conf), conf.getInt("application.securePort")); } }
@Override protected List<com.google.common.util.concurrent.Service> buildServices() { Injector injector = Guice.createInjector(new AppendLogModule(stateMachine), new WebModule()); List<com.google.common.util.concurrent.Service> services = Lists.newArrayList(); services.add(raft); JettyService jetty = injector.getInstance(JettyService.class); jetty.init(config.httpPort); services.add(jetty); if (config.gossip != null) { ScheduledExecutorService executor = Executors.newScheduledThreadPool(0, new DefaultThreadFactory( "pool-gossip-workers")); ClusterService cluster = new ClusterService(config.gossip, executor); services.add(cluster); services.add(new RepairService(raft, cluster, executor)); } return services; }
@Override protected void doStart() { try { int nThreads = 1; group = new NioEventLoopGroup(nThreads, new DefaultThreadFactory("pool-redis")); ServerBootstrap b = buildBootstrap(group); // Start the server. Channel serverChannel = b.bind().sync().channel(); this.serverChannel = serverChannel; // return f; this.notifyStarted(); } catch (Exception e) { this.notifyFailed(e); } }
public DataSourceSupplier(final Config config) { this.config = config; encoding = new EncodingRegistry( config.encodingClasses() .map(l -> l.stream().map(this::loadEncoding).collect(Collectors.toList()))); channelSupplier = new ChannelSupplier(config.charset(), createMarshaller(), new Unmarshaller(), new NioEventLoopGroup(config.nioThreads().orElse(0), new DefaultThreadFactory("ndbc-netty4", true)), config.host(), config.port()); }
@BeforeClass public static void setupHikari() { ds = Hikari.createHikari("jdbc:hsqldb:mem:JdbcUtilsTest", "sa", "", new Properties()); ex = Hikari.createExecutorFor(ds, false, () -> new UnorderedThreadPoolEventExecutor(0, new DefaultThreadFactory("sanity-test")) ); dbAsync = new FutureInterpreter(ds, SanityTest.ex); }
public NettyServerImpl(URL url, MessageRouter router){ super(url); this.localAddress = new InetSocketAddress(url.getPort()); this.router = router; this.pool = new ThreadPoolExecutor(url.getIntParameter(URLParam.minWorkerThread.getName(), URLParam.minWorkerThread.getIntValue()), url.getIntParameter(URLParam.maxWorkerThread.getName(), URLParam.maxWorkerThread.getIntValue()), 120, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new DefaultThreadFactory(String.format("%s-%s", Constants.FRAMEWORK_NAME, "biz"))); }
public static void main(String... args) throws Exception { System.out.println("starting server"); String host = System.getProperty("host", "0.0.0.0"); int port = Integer.getInteger("port", 8001); boolean useEpoll = Boolean.getBoolean("usePoll"); Class channel; if (useEpoll) { channel = EpollServerSocketChannel.class; } else { channel = NioServerSocketChannel.class; } ThreadFactory tf = new DefaultThreadFactory("server-elg-", true /*daemon */); NioEventLoopGroup boss = new NioEventLoopGroup(1, tf); NioEventLoopGroup worker = new NioEventLoopGroup(0, tf); NettyServerBuilder builder = NettyServerBuilder.forPort(port) .bossEventLoopGroup(boss) .workerEventLoopGroup(worker) .channelType(channel) .addService(new DefaultService()) .directExecutor() .maxConcurrentCallsPerConnection(Runtime.getRuntime().availableProcessors() * 256) .flowControlWindow(NettyChannelBuilder.DEFAULT_FLOW_CONTROL_WINDOW * 10); io.grpc.Server start = builder.build(); start.start(); System.out.println("server started"); start.awaitTermination(); }
private void createNioServer(Listener listener) { NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup( 1, new DefaultThreadFactory(ThreadNames.T_GATEWAY_WORKER) ); eventLoopGroup.setIoRatio(100); createServer(listener, eventLoopGroup, () -> new NioDatagramChannel(IPv4));//默认是根据机器情况创建Channel,如果机器支持ipv6,则无法使用ipv4的地址加入组播 }
@SuppressWarnings("unused") private void createEpollServer(Listener listener) { EpollEventLoopGroup eventLoopGroup = new EpollEventLoopGroup( 1, new DefaultThreadFactory(ThreadNames.T_GATEWAY_WORKER) ); eventLoopGroup.setIoRatio(100); createServer(listener, eventLoopGroup, EpollDatagramChannel::new); }
private void createNioClient(Listener listener) { NioEventLoopGroup workerGroup = new NioEventLoopGroup( getWorkThreadNum(), new DefaultThreadFactory(ThreadNames.T_TCP_CLIENT), getSelectorProvider() ); workerGroup.setIoRatio(getIoRate()); createClient(listener, workerGroup, getChannelFactory()); }
private void createEpollClient(Listener listener) { EpollEventLoopGroup workerGroup = new EpollEventLoopGroup( getWorkThreadNum(), new DefaultThreadFactory(ThreadNames.T_TCP_CLIENT) ); workerGroup.setIoRatio(getIoRate()); createClient(listener, workerGroup, EpollSocketChannel::new); }
private ServerBootstrap configServer() { bossGroup = new NioEventLoopGroup(args.bossThreads, new DefaultThreadFactory("NettyBossGroup", true)); workerGroup = new NioEventLoopGroup(args.workerThreads, new DefaultThreadFactory("NettyWorkerGroup", true)); userThreadPool = Executors.newFixedThreadPool(args.userThreads, new DefaultThreadFactory("UserThreads", true)); final ThriftHandler thriftHandler = new ThriftHandler(this.processorFactory_, this.inputProtocolFactory_, this.outputProtocolFactory_, userThreadPool); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .childOption(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.SO_KEEPALIVE, true) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); if (args.socketTimeoutMills > 0) { b.childOption(ChannelOption.SO_TIMEOUT, args.socketTimeoutMills); } if (args.recvBuff > 0) { b.childOption(ChannelOption.SO_RCVBUF, args.recvBuff); } if (args.sendBuff > 0) { b.childOption(ChannelOption.SO_SNDBUF, args.sendBuff); } b.childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(createThriftFramedDecoder(), createThriftFramedEncoder(), thriftHandler); } }); return b; }
public void start() throws SailfishException { ServerBootstrap boot = newServerBootstrap(); EventLoopGroup accept = NettyPlatformIndependent.newEventLoopGroup(1, new DefaultThreadFactory(RemotingConstants.SERVER_ACCEPT_THREADNAME)); if (null != config.getEventLoopGroup()) { boot.group(accept, config.getEventLoopGroup()); } else { boot.group(accept, ServerEventGroup.INSTANCE.getLoopGroup()); } final EventExecutorGroup executor = (null != config.getEventExecutorGroup() ? config.getEventExecutorGroup() : ServerEventGroup.INSTANCE.getExecutorGroup()); boot.localAddress(config.address().host(), config.address().port()); boot.childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); ch.attr(ChannelAttrKeys.OneTime.idleTimeout).set(config.idleTimeout()); ch.attr(ChannelAttrKeys.maxIdleTimeout).set(config.maxIdleTimeout()); ch.attr(ChannelAttrKeys.exchangeServer).set(DefaultServer.this); pipeline.addLast(executor, RemotingEncoder.INSTANCE, new RemotingDecoder(), new IdleStateHandler(config.idleTimeout(), 0, 0), HeartbeatChannelHandler.INSTANCE, NegotiateChannelHandler.INSTANCE, ConcreteRequestHandler.INSTANCE); } }); try { channel = boot.bind().syncUninterruptibly().channel(); } catch (Throwable cause) { throw new SailfishException(cause); } }
public ServerManager(ServiceConfig config) { this.webServiceExecutor = Executors.newFixedThreadPool(32, new DefaultThreadFactory("pulsar-external-web")); this.server = new Server(new ExecutorThreadPool(webServiceExecutor)); this.externalServicePort = config.getWebServicePort(); List<ServerConnector> connectors = Lists.newArrayList(); ServerConnector connector = new ServerConnector(server, 1, 1); connector.setPort(externalServicePort); connectors.add(connector); if (config.isTlsEnabled()) { SslContextFactory sslCtxFactory = new SslContextFactory(); try { SSLContext sslCtx = SecurityUtility.createSslContext(config.isTlsAllowInsecureConnection(), config.getTlsTrustCertsFilePath(), config.getTlsCertificateFilePath(), config.getTlsKeyFilePath()); sslCtxFactory.setSslContext(sslCtx); } catch (GeneralSecurityException e) { throw new RestException(e); } sslCtxFactory.setWantClientAuth(true); ServerConnector tlsConnector = new ServerConnector(server, 1, 1, sslCtxFactory); tlsConnector.setPort(config.getWebServicePortTls()); connectors.add(tlsConnector); } // Limit number of concurrent HTTP connections to avoid getting out of file descriptors connectors.stream().forEach(c -> c.setAcceptQueueSize(1024 / connectors.size())); server.setConnectors(connectors.toArray(new ServerConnector[connectors.size()])); }
public ProxyServer(WebSocketProxyConfiguration config) throws PulsarClientException, MalformedURLException, PulsarServerException { this.conf = config; executorService = Executors.newFixedThreadPool(WebSocketProxyConfiguration.PROXY_SERVER_EXECUTOR_THREADS, new DefaultThreadFactory("pulsar-websocket-web")); this.server = new Server(new ExecutorThreadPool(executorService)); List<ServerConnector> connectors = new ArrayList<>(); ServerConnector connector = new ServerConnector(server); connector.setPort(config.getWebServicePort()); connectors.add(connector); // TLS enabled connector if (config.isTlsEnabled()) { SslContextFactory sslCtxFactory = new SslContextFactory(true); try { SSLContext sslCtx = SecurityUtility.createSslContext(false, config.getTlsTrustCertsFilePath(), config.getTlsCertificateFilePath(), config.getTlsKeyFilePath()); sslCtxFactory.setSslContext(sslCtx); } catch (GeneralSecurityException e) { throw new PulsarServerException(e); } sslCtxFactory.setWantClientAuth(true); ServerConnector tlsConnector = new ServerConnector(server, -1, -1, sslCtxFactory); tlsConnector.setPort(config.getWebServicePortTls()); connectors.add(tlsConnector); } // Limit number of concurrent HTTP connections to avoid getting out of // file descriptors connectors.stream().forEach(c -> c.setAcceptQueueSize(1024 / connectors.size())); server.setConnectors(connectors.toArray(new ServerConnector[connectors.size()])); }
public WebService(PulsarService pulsar) throws PulsarServerException { this.handlers = Lists.newArrayList(); this.pulsar = pulsar; this.webServiceExecutor = Executors.newFixedThreadPool(WebService.NUM_ACCEPTORS, new DefaultThreadFactory("pulsar-web")); this.server = new Server(new ExecutorThreadPool(webServiceExecutor)); List<ServerConnector> connectors = new ArrayList<>(); ServerConnector connector = new PulsarServerConnector(server, 1, 1); connector.setPort(pulsar.getConfiguration().getWebServicePort()); connector.setHost(pulsar.getBindAddress()); connectors.add(connector); if (pulsar.getConfiguration().isTlsEnabled()) { SslContextFactory sslCtxFactory = new SslContextFactory(); try { sslCtxFactory.setSslContext( SecurityUtility.createSslContext( pulsar.getConfiguration().isTlsAllowInsecureConnection(), pulsar.getConfiguration().getTlsTrustCertsFilePath(), pulsar.getConfiguration().getTlsCertificateFilePath(), pulsar.getConfiguration().getTlsKeyFilePath())); } catch (GeneralSecurityException e) { throw new PulsarServerException(e); } sslCtxFactory.setWantClientAuth(true); ServerConnector tlsConnector = new PulsarServerConnector(server, 1, 1, sslCtxFactory); tlsConnector.setPort(pulsar.getConfiguration().getWebServicePortTls()); tlsConnector.setHost(pulsar.getBindAddress()); connectors.add(tlsConnector); } // Limit number of concurrent HTTP connections to avoid getting out of file descriptors connectors.forEach(c -> c.setAcceptQueueSize(WebService.MAX_CONCURRENT_REQUESTS / connectors.size())); server.setConnectors(connectors.toArray(new ServerConnector[connectors.size()])); }
public PulsarService(ServiceConfiguration config) { state = State.Init; this.bindAddress = ServiceConfigurationUtils.getDefaultOrConfiguredAddress(config.getBindAddress()); this.advertisedAddress = advertisedAddress(config); this.webServiceAddress = webAddress(config); this.webServiceAddressTls = webAddressTls(config); this.brokerServiceUrl = brokerUrl(config); this.brokerServiceUrlTls = brokerUrlTls(config); this.brokerVersion = PulsarBrokerVersionStringUtils.getNormalizedVersionString(); this.config = config; this.shutdownService = new MessagingServiceShutdownHook(this); this.loadManagerExecutor = Executors .newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-load-manager")); }
/** * Initializes fields which do not depend on PulsarService. initialize(PulsarService) should subsequently be called. */ public ModularLoadManagerImpl() { brokerCandidateCache = new HashSet<>(); brokerToNamespaceToBundleRange = new HashMap<>(); defaultStats = new NamespaceBundleStats(); filterPipeline = new ArrayList<>(); loadData = new LoadData(); loadSheddingPipeline = new ArrayList<>(); loadSheddingPipeline.add(new OverloadShedder(conf)); preallocatedBundleToBroker = new ConcurrentHashMap<>(); scheduler = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-modular-load-manager")); this.brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override public boolean isEnablePersistentTopics(String brokerUrl) { final BrokerData brokerData = loadData.getBrokerData().get(brokerUrl.replace("http://", "")); return brokerData != null && brokerData.getLocalData() != null && brokerData.getLocalData().isPersistentTopicsEnabled(); } @Override public boolean isEnableNonPersistentTopics(String brokerUrl) { final BrokerData brokerData = loadData.getBrokerData().get(brokerUrl.replace("http://", "")); return brokerData != null && brokerData.getLocalData() != null && brokerData.getLocalData().isNonPersistentTopicsEnabled(); } }; }
public SimpleLoadManagerImpl() { scheduler = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-simple-load-manager")); this.sortedRankings.set(new TreeMap<>()); this.currentLoadReports = new HashMap<>(); this.resourceUnitRankings = new HashMap<>(); this.loadBalancingMetrics.set(Lists.newArrayList()); this.realtimeResourceQuotas.set(new HashMap<>()); this.realtimeAvgResourceQuota = new ResourceQuota(); placementStrategy = new WRRPlacementStrategy(); bundleGainsCache = new HashSet<>(); bundleLossesCache = new HashSet<>(); brokerCandidateCache = new HashSet<>(); availableBrokersCache = new HashSet<>(); brokerToNamespaceToBundleRange = new HashMap<>(); this.brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override public boolean isEnablePersistentTopics(String brokerUrl) { ResourceUnit ru = new SimpleResourceUnit(brokerUrl, new PulsarResourceDescription()); LoadReport loadReport = currentLoadReports.get(ru); return loadReport != null && loadReport.isPersistentTopicsEnabled(); } @Override public boolean isEnableNonPersistentTopics(String brokerUrl) { ResourceUnit ru = new SimpleResourceUnit(brokerUrl, new PulsarResourceDescription()); LoadReport loadReport = currentLoadReports.get(ru); return loadReport != null && loadReport.isNonPersistentTopicsEnabled(); } }; }
@BeforeClass @Override protected void setup() throws Exception { super.internalSetup(); super.producerBaseSetup(); executor = Executors.newFixedThreadPool(1, new DefaultThreadFactory("PartitionedProducerConsumerTest")); }
@Test public void testSingleIpAddress() throws Exception { ClientConfiguration conf = new ClientConfiguration(); EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, new DefaultThreadFactory("test")); ConnectionPool pool = Mockito.spy(new ConnectionPool(conf, eventLoop)); PulsarClientImpl client = new PulsarClientImpl(serviceUrl, conf, eventLoop, pool); List<InetAddress> result = Lists.newArrayList(); result.add(InetAddress.getByName("127.0.0.1")); Mockito.when(pool.resolveName("non-existing-dns-name")).thenReturn(CompletableFuture.completedFuture(result)); client.createProducer("persistent://sample/standalone/ns/my-topic"); client.close(); }
public WebServer(ProxyConfiguration config) { this.webServiceExecutor = Executors.newFixedThreadPool(32, new DefaultThreadFactory("pulsar-external-web")); this.server = new Server(new ExecutorThreadPool(webServiceExecutor)); this.externalServicePort = config.getWebServicePort(); List<ServerConnector> connectors = Lists.newArrayList(); ServerConnector connector = new ServerConnector(server, 1, 1); connector.setPort(externalServicePort); connectors.add(connector); if (config.isTlsEnabledInProxy()) { SslContextFactory sslCtxFactory = new SslContextFactory(); try { SSLContext sslCtx = SecurityUtility.createSslContext(false, null, config.getTlsCertificateFilePath(), config.getTlsKeyFilePath()); sslCtxFactory.setSslContext(sslCtx); } catch (GeneralSecurityException e) { throw new RuntimeException(e); } sslCtxFactory.setWantClientAuth(false); ServerConnector tlsConnector = new ServerConnector(server, 1, 1, sslCtxFactory); tlsConnector.setPort(config.getWebServicePortTls()); connectors.add(tlsConnector); } // Limit number of concurrent HTTP connections to avoid getting out of file descriptors connectors.stream().forEach(c -> c.setAcceptQueueSize(1024 / connectors.size())); server.setConnectors(connectors.toArray(new ServerConnector[connectors.size()])); }