public RateLimiter initRateLimiter(String target, double rate) { RateLimiter limiter = requestRateLimiterMap.get(target); if (limiter == null) { limiter = RateLimiter.create(rate); requestRateLimiterMap.put(target, limiter); return limiter; } if (limiter.getRate() == rate) { return limiter; } limiter.setRate(rate); requestRateLimiterMap.put(target, limiter); return limiter; }
RedisThrottler( RedisThrottlerConfig config, RedisAppender appender, boolean ignoreExceptions, boolean debugEnabled) { this.config = config; this.appender = appender; this.ignoreExceptions = ignoreExceptions; this.buffer = new ArrayBlockingQueue<>(config.getBufferSize()); this.batch = new byte[config.getBatchSize()][]; this.flushTrigger = createFlushTrigger(); this.eventRateLimiter = config.getMaxEventCountPerSecond() > 0 ? RateLimiter.create(config.getMaxEventCountPerSecond()) : null; this.byteRateLimiter = config.getMaxByteCountPerSecond() > 0 ? RateLimiter.create(config.getMaxByteCountPerSecond()) : null; this.logger = new DebugLogger(RedisThrottler.class, debugEnabled); this.jmxBeanName = createJmxBeanName(); }
public StreamServer(Socket aClientSocket, RateLimiter rateLimiter, long startTime, int duration, BufferedReader dataSource, AtomicInteger consumedTuples, int maxTupels) { try { _duration = duration; _sourceBuffer = dataSource; _rateLimiter = rateLimiter; _clientSocket = aClientSocket; _startTime = startTime; _cosumedTuples = consumedTuples; _maxTuples = maxTupels; _output = new BufferedOutputStream(_clientSocket.getOutputStream()); this.start(); } catch (IOException e) { System.out.println(e.getMessage()); } }
public static void main(String[] args) { try { RateLimiter limit = RateLimiter.create(100d); PropertyConfigurator.configure("conf/log4j.properties"); while (true) { if (limit.tryAcquire()) { final HelloCommand command = new HelloCommand(); // System.out.println("result: " + command.execute()); // System.out.println(""); Future<String> future = command.queue(); System.out.println("result: " + future.get()); System.out.println(""); } } // Observable<String> observe = command.observe(); // observe.asObservable().subscribe((result) -> { // System.out.println(result); // }); } catch (Exception e) { e.printStackTrace(); } }
/** * @param parent the parent to set. */ @Override public void setParent(Channel parent) { super.setParent(parent); UserQuotas quotas = this.getUserQuotas(); if (quotas != null) { if (quotas.getMaxBandwidth() != null) { this.rateLimiter = RateLimiter.create(quotas.getMaxBandwidth().intValue()); } if (quotas.getMaxSize() != null) { this.maxAllowedPermits = quotas.getMaxSize().longValue(); } } }
public void freshNetRate(int rate) { int downRate = rate - Constants.SYSTEM_NEED_RATE; if (downRate <= 0) { downRate = rate / 2; } if (downRate == 0) { logger.error("net rate:{} is illegal", rate); return; } long rateOnByte = downRate * 1024L * 1024L; boolean updated = false; try { for (RateLimiter rateLimiter : rateLimiters) { if (Math.abs(rateLimiter.getRate() - rateOnByte) >= 1024) { rateLimiter.setRate(rateOnByte); updated = true; } } if (updated) { logger.info("update net rate to {} MB", rate); } } catch (Exception e) { logger.error("E_freshNetRate", e); } }
public static void main(String[] args) throws Exception { CliLoader cliLoader = new CliLoader(); Configuration configuration = cliLoader.parseArgs(args); LOGGER.info("starting login client"); RateLimiter rateLimiter; if( configuration.getRequestsPerMinute() == -1) { rateLimiter = null; } else { rateLimiter = RateLimiter.create( (double)configuration.getRequestsPerMinute()/60); } for (long i = 0; i < configuration.getLoops(); i++) { LOGGER.info("executing login loop {}", i); configuration = cliLoader.parseArgs(args); executeLogins(configuration, rateLimiter); } }
@Inject public RateLimitedExecutor(Config config) { // Check arguments. checkNotNull(config, "config"); // Set class fields. this.executorService = createExecutorService(config.getThreadCount()); this.rateLimiter = RateLimiter.create( config.getMaxRequestCountPerSecond(), config.getRampUpDurationSeconds(), TimeUnit.SECONDS); LOGGER.debug( "instantiated (threadCount={}, maxRequestCountPerSecond={}, rampUpDurationSeconds={})", config.getThreadCount(), config.getMaxRequestCountPerSecond(), config.getRampUpDurationSeconds()); }
/** * Constructor. * * @param namespace the namespace */ public RemoteConfigRepository(String namespace) { m_namespace = namespace; m_configCache = new AtomicReference<>(); m_configUtil = ApolloInjector.getInstance(ConfigUtil.class); m_httpUtil = ApolloInjector.getInstance(HttpUtil.class); m_serviceLocator = ApolloInjector.getInstance(ConfigServiceLocator.class); remoteConfigLongPollService = ApolloInjector.getInstance(RemoteConfigLongPollService.class); m_longPollServiceDto = new AtomicReference<>(); m_remoteMessages = new AtomicReference<>(); m_loadConfigRateLimiter = RateLimiter.create(m_configUtil.getLoadConfigQPS()); m_configNeedForceRefresh = new AtomicBoolean(true); m_loadConfigFailSchedulePolicy = new ExponentialSchedulePolicy(m_configUtil.getOnErrorRetryInterval(), m_configUtil.getOnErrorRetryInterval() * 8); gson = new Gson(); this.trySync(); this.schedulePeriodicRefresh(); this.scheduleLongPollingRefresh(); }
/** * Constructor. */ public RemoteConfigLongPollService() { m_longPollFailSchedulePolicyInSecond = new ExponentialSchedulePolicy(1, 120); //in second m_longPollingStopped = new AtomicBoolean(false); m_longPollingService = Executors.newSingleThreadExecutor( ApolloThreadFactory.create("RemoteConfigLongPollService", true)); m_longPollStarted = new AtomicBoolean(false); m_longPollNamespaces = Multimaps.synchronizedSetMultimap(HashMultimap.<String, RemoteConfigRepository>create()); m_notifications = Maps.newConcurrentMap(); m_remoteNotificationMessages = Maps.newConcurrentMap(); m_responseType = new TypeToken<List<ApolloConfigNotification>>() { }.getType(); gson = new Gson(); m_configUtil = ApolloInjector.getInstance(ConfigUtil.class); m_httpUtil = ApolloInjector.getInstance(HttpUtil.class); m_serviceLocator = ApolloInjector.getInstance(ConfigServiceLocator.class); m_longPollRateLimiter = RateLimiter.create(m_configUtil.getLongPollQPS()); }
@Override public BaseResponse<UserResVO> getUserByFeignBatch(@RequestBody UserReqVO userReqVO) { //调用远程服务 OrderNoReqVO vo = new OrderNoReqVO() ; vo.setReqNo(userReqVO.getReqNo()); RateLimiter limiter = RateLimiter.create(2.0) ; //批量调用 for (int i = 0 ;i< COUNT ; i++){ double acquire = limiter.acquire(); logger.debug("获取令牌成功!,消耗=" + acquire); BaseResponse<OrderNoResVO> orderNo = orderServiceClient.getOrderNo(vo); logger.debug("远程返回:"+JSON.toJSONString(orderNo)); } UserRes userRes = new UserRes() ; userRes.setUserId(123); userRes.setUserName("张三"); userRes.setReqNo(userReqVO.getReqNo()); userRes.setCode(StatusEnum.SUCCESS.getCode()); userRes.setMessage("成功"); return userRes ; }
/** * executes a page scan with scanExpression, limited by limiter for objects of the type T with the class clazz * and calls action for each object found * * @param clazz class of the objects * @param scanExpression expression for the scan * @param limiter the read limiter limiting the amount of requests * @param action the function to call for each object * @param <T> the type of the objects */ private static <T> void scanPages(Class<T> clazz, DynamoDBScanExpression scanExpression, RateLimiter limiter, Consumer<? super T> action) { // define pageScan and add consumed capacity to scan expression ScanResultPage<T> pageScan; scanExpression.setReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL); // initialize counter, permits and mapper int permitsToConsume = 1; DynamoDBMapper dynamoDBMapper = DBConnector.getInstance().getDynamoDBMapper(); int scanned = 0; int count = 0; do { // acquire permits and scan limiter.acquire(permitsToConsume); pageScan = dynamoDBMapper.scanPage(clazz, scanExpression); // update page scan scanExpression.setExclusiveStartKey(pageScan.getLastEvaluatedKey()); // update stats variables scanned += pageScan.getScannedCount(); count += pageScan.getCount(); // call the action on each result pageScan.getResults().forEach(action); // calculate permits for next scan Double capacityUnits = pageScan.getConsumedCapacity().getCapacityUnits(); permitsToConsume = (int) (capacityUnits - 1); if (permitsToConsume <= 0) permitsToConsume = 1; log.info(String.format("Scanned a page for class %s. Results: %d/%d (%d/%d total). Capacity units consumed: %f", clazz.getSimpleName(), pageScan.getCount(), pageScan.getScannedCount(), count, scanned, capacityUnits)); } while (pageScan.getLastEvaluatedKey() != null); }
@Override public Collection<SSTableReader> getDataReaders(RateLimiter rateLimiter) { Collection<SSTableReader> readers = new ArrayList<>(sstables.size()); for (org.apache.cassandra.io.sstable.format.SSTableReader sstable : sstables) { try { File dataFile = new File(sstable.descriptor.filenameFor(Component.DATA)); readers.add(new DataReader( new SSTableStatistics( sstable.descriptor.generation, dataFile.getName(), sstable.uncompressedLength(), sstable.getMinTimestamp(), sstable.getMaxTimestamp(), sstable.getSSTableLevel()), sstable.getScanner(rateLimiter), Util.NOW_SECONDS - sstable.metadata.params.gcGraceSeconds )); } catch (Throwable t) {} } return readers; }
@Override public Collection<SSTableReader> getDataReaders(RateLimiter rateLimiter) { Collection<SSTableReader> readers = new ArrayList<>(sstables.size()); for (org.apache.cassandra.io.sstable.SSTableReader sstable : sstables) { try { File dataFile = new File(sstable.descriptor.filenameFor(Component.DATA)); readers.add(new DataReader( new SSTableStatistics( sstable.descriptor.generation, dataFile.getName(), sstable.uncompressedLength(), sstable.getMinTimestamp(), sstable.getMaxTimestamp(), sstable.getSSTableLevel()), sstable.getScanner(rateLimiter), Util.NOW_SECONDS - sstable.metadata.getGcGraceSeconds() )); } catch (Throwable t) { } } return readers; }
@Override public Collection<SSTableReader> getDataReaders(RateLimiter rateLimiter) { Collection<SSTableReader> readers = new ArrayList<>(sstables.size()); for (org.apache.cassandra.io.sstable.format.SSTableReader sstable : sstables) { try { File dataFile = new File(sstable.descriptor.filenameFor(Component.DATA)); readers.add(new DataReader( new SSTableStatistics( sstable.descriptor.generation, dataFile.getName(), sstable.uncompressedLength(), sstable.getMinTimestamp(), sstable.getMaxTimestamp(), sstable.getSSTableLevel()), sstable.getScanner(rateLimiter), Util.NOW_SECONDS - sstable.metadata.getGcGraceSeconds() )); } catch (Throwable t) { } } return readers; }
public TradeUnit(final TradeConfiguration tradeConf, final PulsarClient client, final ProducerConfiguration producerConf, final ConsumerConfiguration consumerConf, final Map<Integer, byte[]> payloadCache) throws Exception { consumerFuture = client.subscribeAsync(tradeConf.topic, "Subscriber-" + tradeConf.topic, consumerConf); this.payload = new AtomicReference<>(); this.producerConf = producerConf; this.payloadCache = payloadCache; this.client = client; topic = tradeConf.topic; // Add a byte[] of the appropriate size if it is not already present // in the cache. this.payload.set(payloadCache.computeIfAbsent(tradeConf.size, byte[]::new)); rateLimiter = RateLimiter.create(tradeConf.rate); stop = new AtomicBoolean(false); }
public ManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, MetaStore store, ManagedLedgerConfig config, ScheduledExecutorService scheduledExecutor, OrderedSafeExecutor orderedExecutor, final String name) { this.factory = factory; this.bookKeeper = bookKeeper; this.config = config; this.store = store; this.name = name; this.scheduledExecutor = scheduledExecutor; this.executor = orderedExecutor; TOTAL_SIZE_UPDATER.set(this, 0); NUMBER_OF_ENTRIES_UPDATER.set(this, 0); ENTRIES_ADDED_COUNTER_UPDATER.set(this, 0); STATE_UPDATER.set(this, State.None); this.ledgersStat = null; this.mbean = new ManagedLedgerMBeanImpl(this); this.entryCache = factory.getEntryCacheManager().getEntryCache(this); this.waitingCursors = Queues.newConcurrentLinkedQueue(); this.uninitializedCursors = Maps.newHashMap(); this.updateCursorRateLimit = RateLimiter.create(1); // Get the next rollover time. Add a random value upto 5% to avoid rollover multiple ledgers at the same time this.maximumRolloverTimeMs = (long) (config.getMaximumRolloverTimeMs() * (1 + random.nextDouble() * 5 / 100.0)); }
ManagedCursorImpl(BookKeeper bookkeeper, ManagedLedgerConfig config, ManagedLedgerImpl ledger, String cursorName) { this.bookkeeper = bookkeeper; this.config = config; this.ledger = ledger; this.name = cursorName; STATE_UPDATER.set(this, State.Uninitialized); PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER.set(this, 0); PENDING_READ_OPS_UPDATER.set(this, 0); RESET_CURSOR_IN_PROGRESS_UPDATER.set(this, FALSE); WAITING_READ_OP_UPDATER.set(this, null); this.lastLedgerSwitchTimestamp = System.currentTimeMillis(); if (config.getThrottleMarkDelete() > 0.0) { markDeleteLimiter = RateLimiter.create(config.getThrottleMarkDelete()); } else { // Disable mark-delete rate limiter markDeleteLimiter = null; } }
private void moveStreams(StreamChooser streamChooser, StreamMover streamMover, int concurrency, Optional<RateLimiter> rateLimiter) { CountDownLatch doneLatch = new CountDownLatch(concurrency); RegionMover regionMover = new RegionMover(streamChooser, streamMover, rateLimiter, doneLatch); ExecutorService executorService = Executors.newFixedThreadPool(concurrency); try { for (int i = 0; i < concurrency; i++) { executorService.submit(regionMover); } try { doneLatch.await(); } catch (InterruptedException e) { logger.info("{} is interrupted. Stopping it ...", streamMover); regionMover.shutdown(); } } finally { executorService.shutdown(); } }
protected void parseCommandLine(CommandLine cmdline) throws ParseException { if (cmdline.hasOption("rwm")) { this.rebalanceWaterMark = Integer.parseInt(cmdline.getOptionValue("rwm")); } if (cmdline.hasOption("rtp")) { this.rebalanceTolerancePercentage = Double.parseDouble(cmdline.getOptionValue("rtp")); } if (cmdline.hasOption("rc")) { this.rebalanceConcurrency = Integer.parseInt(cmdline.getOptionValue("rc")); } if (cmdline.hasOption("r")) { this.rate = Double.parseDouble(cmdline.getOptionValue("r")); } Preconditions.checkArgument(rebalanceWaterMark >= 0, "Rebalance Water Mark should be a non-negative number"); Preconditions.checkArgument(rebalanceTolerancePercentage >= 0.0f, "Rebalance Tolerance Percentage should be a non-negative number"); Preconditions.checkArgument(rebalanceConcurrency > 0, "Rebalance Concurrency should be a positive number"); if (null == rate || rate <= 0.0f) { rateLimiter = Optional.absent(); } else { rateLimiter = Optional.of(RateLimiter.create(rate)); } }
protected void balanceFromSource(DistributedLogClientBuilder clientBuilder, ClusterBalancer balancer, String source, Optional<RateLimiter> rateLimiter) throws Exception { InetSocketAddress sourceAddr = DLSocketAddress.parseSocketAddress(source); DistributedLogClientBuilder sourceClientBuilder = DistributedLogClientBuilder.newBuilder(clientBuilder) .host(sourceAddr); Pair<DistributedLogClient, MonitorServiceClient> clientPair = ClientUtils.buildClient(sourceClientBuilder); try { Await.result(clientPair.getRight().setAcceptNewStream(false)); logger.info("Disable accepting new stream on proxy {}.", source); balancer.balanceAll(source, rebalanceConcurrency, rateLimiter); } finally { clientPair.getLeft().close(); } }
@Inject CloudDnsWriter( Dns dnsConnection, @Config("projectId") String projectId, @DnsWriterZone String zoneName, @Config("dnsDefaultATtl") Duration defaultATtl, @Config("dnsDefaultNsTtl") Duration defaultNsTtl, @Config("dnsDefaultDsTtl") Duration defaultDsTtl, @Named("cloudDns") RateLimiter rateLimiter, @Named("cloudDnsNumThreads") int numThreads, Clock clock, Retrier retrier) { this.dnsConnection = dnsConnection; this.projectId = projectId; this.zoneName = zoneName.replace('.', '-'); this.defaultATtl = defaultATtl; this.defaultNsTtl = defaultNsTtl; this.defaultDsTtl = defaultDsTtl; this.rateLimiter = rateLimiter; this.clock = clock; this.retrier = retrier; this.numThreads = numThreads; }
@Override public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception { if(handler instanceof HandlerMethod){ HandlerMethod handlerMethod = (HandlerMethod)handler; Method method = handlerMethod.getMethod(); if(method.isAnnotationPresent(Limiter.class)){ String methodAbsKey = MethodUtil.getAsbMethodCacheKey(method); RateLimiter rateLimiter = RateLimiterCache.get(methodAbsKey); if(null==rateLimiter){ return true; } return rateLimiter.tryAcquire(); } } return true; }
/** * Returns a list of KeyScanners given sstables and a range on which to scan. * The default implementation simply grab one SSTableScanner per-sstable, but overriding this method * allow for a more memory efficient solution if we know the sstable don't overlap (see * LeveledCompactionStrategy for instance). */ public ScannerList getScanners(Collection<SSTableReader> sstables, Range<Token> range) { RateLimiter limiter = CompactionManager.instance.getRateLimiter(); ArrayList<ISSTableScanner> scanners = new ArrayList<ISSTableScanner>(); try { for (SSTableReader sstable : sstables) scanners.add(sstable.getScanner(range, limiter)); } catch (Throwable t) { try { new ScannerList(scanners).close(); } catch (Throwable t2) { t.addSuppressed(t2); } throw t; } return new ScannerList(scanners); }
/** * @param sstable SSTable to scan; must not be null * @param tokenRanges A set of token ranges to scan * @param limiter background i/o RateLimiter; may be null */ private SSTableScanner(SSTableReader sstable, Collection<Range<Token>> tokenRanges, RateLimiter limiter) { assert sstable != null; this.dfile = limiter == null ? sstable.openDataReader() : sstable.openDataReader(limiter); this.ifile = sstable.openIndexReader(); this.sstable = sstable; this.dataRange = null; List<AbstractBounds<RowPosition>> boundsList = new ArrayList<>(tokenRanges.size()); for (Range<Token> range : Range.normalize(tokenRanges)) addRange(range.toRowBounds(), boundsList); this.rangeIterator = boundsList.iterator(); }
private void setupStore(final Configuration config, final Map<String, RateLimiter> readRateLimit, final Map<String, RateLimiter> writeRateLimit, final String store) { final String dataModel = config.get(Constants.STORES_DATA_MODEL, store); final int scanLimit = config.get(Constants.STORES_SCAN_LIMIT, store); final long readCapacity = config.get(Constants.STORES_INITIAL_CAPACITY_READ, store); final long writeCapacity = config.get(Constants.STORES_INITIAL_CAPACITY_WRITE, store); final double readRate = config.get(Constants.STORES_READ_RATE_LIMIT, store); final double writeRate = config.get(Constants.STORES_WRITE_RATE_LIMIT, store); final String actualTableName = prefix + "_" + store; this.dataModelMap.put(store, BackendDataModel.valueOf(dataModel)); this.capacityRead.put(actualTableName, readCapacity); this.capacityWrite.put(actualTableName, writeCapacity); readRateLimit.put(actualTableName, RateLimiterCreator.createBurstingLimiter(readRate, DEFAULT_BURST_BUCKET_SIZE_IN_SECONDS)); writeRateLimit.put(actualTableName, RateLimiterCreator.createBurstingLimiter(writeRate, DEFAULT_BURST_BUCKET_SIZE_IN_SECONDS)); this.scanLimitMap.put(actualTableName, scanLimit); }
protected List<Resource> buildResourceList(RestApi api) { List<Resource> resourceList = new ArrayList<>(); Resources resources = api.getResources(); resourceList.addAll(resources.getItem()); LOG.debug("Building list of resources. Stack trace: ", new Throwable()); final RateLimiter rl = RateLimiter.create(2); while (resources._isLinkAvailable("next")) { rl.acquire(); resources = resources.getNext(); resourceList.addAll(resources.getItem()); } return resourceList; }
/** * @param sstable SSTable to scan; must not be null * @param dataRange a single range to scan; must not be null * @param limiter background i/o RateLimiter; may be null */ SSTableScanner(SSTableReader sstable, DataRange dataRange, RateLimiter limiter) { assert sstable != null; this.dfile = limiter == null ? sstable.openDataReader() : sstable.openDataReader(limiter); this.ifile = sstable.openIndexReader(); this.sstable = sstable; this.dataRange = dataRange; List<AbstractBounds<RowPosition>> boundsList = new ArrayList<>(2); if (dataRange.isWrapAround() && !dataRange.stopKey().isMinimum(sstable.partitioner)) { // split the wrapping range into two parts: 1) the part that starts at the beginning of the sstable, and // 2) the part that comes before the wrap-around boundsList.add(new Bounds<>(sstable.partitioner.getMinimumToken().minKeyBound(), dataRange.stopKey(), sstable.partitioner)); boundsList.add(new Bounds<>(dataRange.startKey(), sstable.partitioner.getMinimumToken().maxKeyBound(), sstable.partitioner)); } else { boundsList.add(new Bounds<>(dataRange.startKey(), dataRange.stopKey(), sstable.partitioner)); } this.rangeIterator = boundsList.iterator(); }
/** * @param sstable SSTable to scan; must not be null * @param tokenRanges A set of token ranges to scan * @param limiter background i/o RateLimiter; may be null */ SSTableScanner(SSTableReader sstable, Collection<Range<Token>> tokenRanges, RateLimiter limiter) { assert sstable != null; this.dfile = limiter == null ? sstable.openDataReader() : sstable.openDataReader(limiter); this.ifile = sstable.openIndexReader(); this.sstable = sstable; this.dataRange = null; List<Range<Token>> normalized = Range.normalize(tokenRanges); List<AbstractBounds<RowPosition>> boundsList = new ArrayList<>(normalized.size()); for (Range<Token> range : normalized) boundsList.add(new Range<RowPosition>(range.left.maxKeyBound(sstable.partitioner), range.right.maxKeyBound(sstable.partitioner), sstable.partitioner)); this.rangeIterator = boundsList.iterator(); }
public void run() { RateLimiter rl = rateLimit != 0 ? RateLimiter.create(rateLimit) : null; final Random rand = random != null ? random : ThreadLocalRandom.current(); while (!stop) { if (rl != null) rl.acquire(); ByteBuffer key = randomBytes(16, rand); UpdateBuilder builder = UpdateBuilder.create(Schema.instance.getCFMetaData("Keyspace1", "Standard1"), Util.dk(key)); for (int ii = 0; ii < numCells; ii++) { int sz = randomSize ? rand.nextInt(cellSize) : cellSize; ByteBuffer bytes = randomBytes(sz, rand); builder.newRow("name" + ii).add("val", bytes); hash = hash(hash, bytes); ++cells; dataSize += sz; } rp = commitLog.add(new Mutation(builder.build())); counter.incrementAndGet(); } }
public static IFace create(double maxRequestsPerSecond) { if (maxRequestsPerSecond <= 0.0) { return ALLOW_NONE; } final RateLimiter rateLimiter = RateLimiter.create(maxRequestsPerSecond); return new IFace() { @Override public boolean allowDequeue(int numJobs) { return rateLimiter.tryAcquire(numJobs); } @Override public double getRate() { return rateLimiter.getRate(); } }; }