private static void doTest(int nThreads) throws InterruptedException { Thread[] aThreads = new Thread[nThreads]; final AtomicLong atl = new AtomicLong(); for (int i = 0; i < nThreads; i++) { aThreads[i] = new RunnerThread(atl, 1L << (8 * i)); } for (int i = 0; i < nThreads; i++) { aThreads[i].start(); } for (int i = 0; i < nThreads; i++) { aThreads[i].join(); } }
/** * Atomically adds the positive value n to the requested value in the AtomicLong and * caps the result at Long.MAX_VALUE and returns the previous value and * considers Long.MIN_VALUE as a cancel indication (no addition then). * @param requested the AtomicLong holding the current requested value * @param n the value to add, must be positive (not verified) * @return the original value before the add */ public static long addCancel(AtomicLong requested, long n) { for (;;) { long r = requested.get(); if (r == Long.MIN_VALUE) { return Long.MIN_VALUE; } if (r == Long.MAX_VALUE) { return Long.MAX_VALUE; } long u = addCap(r, n); if (requested.compareAndSet(r, u)) { return r; } } }
public static <T extends ImmutableTrade> void runSimpleQuery(List<T> immutableTrades) { Comparator<ImmutableTrade> comparator = Comparator.comparing(ImmutableTrade::getExchangeRate); Predicate<ImmutableTrade> predicate = t -> t.getCurrency1().equalsIgnoreCase("GBP") && t.getCurrency2().equalsIgnoreCase("USD") && t.getBuySell().equalsIgnoreCase("Buy"); final AtomicLong ignore = new AtomicLong(0); int n = 10; System.out.println("Running a filter and sort on the trades (" + n + " times)"); long start = System.nanoTime(); for (int i = 0; i < n; i++) { System.gc(); immutableTrades.stream() .filter(predicate) .sorted(comparator) .limit(10) .forEach(p -> ignore.set(p.getId())); } System.out.println("ignore: " + ignore.get()); System.out.printf("Query time = %.3f seconds%n%n", (System.nanoTime() - start) / 1e9); }
private void doCommits(String tableName, final List<com.alicloud.tablestore.adaptor.struct.OPut> puts) throws IOException { boolean flushSuccessfully = false; try { otsProxy.putMultiple(tableName, puts); flushSuccessfully = true; } finally { if (!flushSuccessfully && !getClearBufferOnFail(tableName)) { ArrayList<com.alicloud.tablestore.adaptor.struct.OPut> tableWriteBuffer = getTableWriteBuffer(tableName); synchronized (tableWriteBuffer) { AtomicLong currentBufferSize = getTableCurrentBufferSize(tableName); for (com.alicloud.tablestore.adaptor.struct.OPut put : puts) { tableWriteBuffer.add(put); currentBufferSize.addAndGet(put.getWritableSize()); } } } } }
private CacheManager(final File cacheDir, final long sizeLimit, final int countLimit) { this.cacheDir = cacheDir; this.sizeLimit = sizeLimit; this.countLimit = countLimit; cacheSize = new AtomicLong(); cacheCount = new AtomicInteger(); mThread = new Thread(new Runnable() { @Override public void run() { int size = 0; int count = 0; final File[] cachedFiles = cacheDir.listFiles(); if (cachedFiles != null) { for (File cachedFile : cachedFiles) { size += cachedFile.length(); count += 1; lastUsageDates.put(cachedFile, cachedFile.lastModified()); } cacheSize.getAndAdd(size); cacheCount.getAndAdd(count); } } }); mThread.start(); }
@SuppressWarnings("unchecked") public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) { final JSONLexer lexer = parser.getLexer(); Long longObject; if (lexer.token() == JSONToken.LITERAL_INT) { long longValue = lexer.longValue(); lexer.nextToken(JSONToken.COMMA); longObject = Long.valueOf(longValue); } else { Object value = parser.parse(); if (value == null) { return null; } longObject = TypeUtils.castToLong(value); } if (clazz == AtomicLong.class) { return (T) new AtomicLong(longObject.longValue()); } return (T) longObject; }
private boolean waitForCounterBoolean(final AtomicLong ctr, final long oldval, final long newval, long timems, boolean failIfTimeout) throws Exception { long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (ctr.get() >= newval); } }); if( timeWaited > 0) { // when not timed out assertEquals(newval, ctr.get()); } return true; }
private void emitItem(GroupState<K, T> groupState, Object item) { Queue<Object> q = groupState.buffer; AtomicLong keyRequested = groupState.requested; REQUESTED.decrementAndGet(this); if (keyRequested == null || keyRequested.get() <= 0 || !(q == null || q.isEmpty())) { q.add(item); BUFFERED_COUNT.incrementAndGet(this); if (groupState.count.getAndIncrement() == 0) { pollQueue(groupState); } } else { nl.accept(groupState.getObserver(), item); if (keyRequested.get() != Long.MAX_VALUE) { keyRequested.decrementAndGet(); } } requestMoreIfNecessary(); }
public synchronized void addUsage(Long acl) { if (acl == OPEN_UNSAFE_ACL_ID) { return; } if (!longKeyMap.containsKey(acl)) { LOG.info("Ignoring acl " + acl + " as it does not exist in the cache"); return; } AtomicLong count = referenceCounter.get(acl); if (count == null) { referenceCounter.put(acl, new AtomicLongWithEquals(1)); } else { count.incrementAndGet(); } }
@Test public void waitForChange_runWithRealmThread() throws InterruptedException { final CountDownLatch bgRealmStarted = new CountDownLatch(1); final CountDownLatch bgRealmFished = new CountDownLatch(1); final AtomicBoolean bgRealmChangeResult = new AtomicBoolean(false); final AtomicLong bgRealmResultSize = new AtomicLong(0); RealmThread thread = new RealmThread(realmConfig, new RealmThread.RealmRunnable() { @Override public void run(Realm realm) { bgRealmStarted.countDown(); bgRealmChangeResult.set(realm.waitForChange()); bgRealmResultSize.set(realm.where(AllTypes.class).count()); realm.close(); bgRealmFished.countDown(); } }); thread.start(); TestHelper.awaitOrFail(bgRealmStarted); populateTestRealm(); TestHelper.awaitOrFail(bgRealmFished); assertTrue(bgRealmChangeResult.get()); assertEquals(TEST_DATA_SIZE, bgRealmResultSize.get()); }
public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) { Long longObject; JSONLexer lexer = parser.getLexer(); if (lexer.token() == 2) { long longValue = lexer.longValue(); lexer.nextToken(16); longObject = Long.valueOf(longValue); } else { Object value = parser.parse(); if (value == null) { return null; } longObject = TypeUtils.castToLong(value); } if (clazz == AtomicLong.class) { return new AtomicLong(longObject.longValue()); } return longObject; }
/** * Creates and destroys lots of records in such a way that some may end up getting past * the eden space. */ @Test @Ignore public void testSimulateHighVolumeWithBoxedData() throws InterruptedException { BlockingQueue<Iterable<Record>> queue = new ArrayBlockingQueue<>(QUEUE_DEPTH); ExecutorService pool = Executors.newFixedThreadPool(POOL_SIZE); try { timerStart = new AtomicLong(System.currentTimeMillis()); processed = new AtomicLong(0); IntStream.range(0, 10).forEach(i -> pool.execute(() -> produce(queue, this::randomiseObjects))); IntStream.range(0, 8).forEach(i -> pool.execute(() -> consume(queue, this::readObjects))); } finally { pool.shutdown(); pool.awaitTermination(3, TimeUnit.MINUTES); } }
public void ensureSyncComplete(int blockIndex) { final AtomicLong noSyncLength = noSyncLengthMap.get(blockIndex); if (noSyncLength != null && noSyncLength.get() > 0) { // sync to store if (syncRunning) { // wait for sync parkThreadList.add(Thread.currentThread()); while (true) { LockSupport.parkNanos(WAIT_SYNC_NANO); if (!syncRunning) break; } } // sync once, make sure data has been synced. syncRunning = true; syncRunnable.run(); } }
@Test public void write() throws IOException { final DownloadOutputStream outputStream = mock(DownloadOutputStream.class); doReturn(outputStream).when(multiPointOutputStream).outputStream(anyInt()); multiPointOutputStream.syncRunning = true; final byte[] bytes = new byte[6]; multiPointOutputStream.noSyncLengthMap.put(1, new AtomicLong()); multiPointOutputStream.write(1, bytes, 6); verify(multiPointOutputStream).write(1, bytes, 6); multiPointOutputStream.noSyncLengthMap.put(2, new AtomicLong()); multiPointOutputStream.write(2, bytes, 16); verify(multiPointOutputStream).write(2, bytes, 16); assertThat(multiPointOutputStream.allNoSyncLength.get()).isEqualTo(22); assertThat(multiPointOutputStream.noSyncLengthMap.get(1).get()).isEqualTo(6); assertThat(multiPointOutputStream.noSyncLengthMap.get(2).get()).isEqualTo(16); }
public void initialize(){ activeCompanyCount = companies.getCompanyCount(); activeSecurityCount = SecurityHandler.getSecurityNum(myCustomerCount); industryCount = industries.getMaxKey(); sectorCount = sectors.getMaxKey(); startFromCompany = companies.generateCompId(); maxActivePrePopulatedTradeID = (int)(( hoursOfInitialTrades * EGenDate.SecondsPerHour * ( activeCustomerCount / scaleFactor )) * TPCEConstants.AbortTrade / 100 ); currentTradeID = new AtomicLong(maxActivePrePopulatedTradeID + 1); startTime = EGenDate.getDateFromTime( TPCEConstants.initialTradePopulationBaseYear, TPCEConstants.initialTradePopulationBaseMonth, TPCEConstants.initialTradePopulationBaseDay, TPCEConstants.initialTradePopulationBaseHour, TPCEConstants.initialTradePopulationBaseMinute, TPCEConstants.initialTradePopulationBaseSecond, TPCEConstants.initialTradePopulationBaseFraction ); }
@Test public void testCompareNumbers() { Assert.assertTrue(GatherUtils.compareNumbers((byte) 32, 64d) < 0); Assert.assertTrue(GatherUtils.compareNumbers((short) 32, 64d) < 0); Assert.assertTrue(GatherUtils.compareNumbers((int) 32, 64d) < 0); Assert.assertTrue(GatherUtils.compareNumbers((long) 32, 64d) < 0); Assert.assertTrue(GatherUtils.compareNumbers((float) 32, 64d) < 0); Assert.assertTrue(GatherUtils.compareNumbers((double) 32, new AtomicLong(64)) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new AtomicInteger(32), new AtomicLong(64)) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new AtomicLong(32), (short) 64) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), new BigDecimal(64)) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), (byte) 64) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), (short) 64) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), (int) 64) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), (long) 64) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), (float) 64) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), (double) 64) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), new AtomicInteger(64)) < 0); Assert.assertTrue(GatherUtils.compareNumbers(new BigDecimal(32), new AtomicLong(64)) < 0); }
@Test public void testWrongOffsetResponds409() { // arrange when(request.getUuid()).thenReturn(uuid); when(request.getHeader("Upload-Offset")).thenReturn(Optional.of("10")); when(upload.getOffset()).thenReturn(new AtomicLong(50L)); when(upload.uploadChunk(request)) .thenReturn(Observable.just(1L, 2L, 3L).concatWith(Observable.error(new IOException()))); // act Observable<TusResponse> response = handler.handle(); // assert response.toBlocking().subscribe(tr -> { assertThat(tr.getStatusCode()).isEqualTo(409); }, x -> logger.info("", x)); }
@Override public void testRunStarted(Description description) throws Exception { super.testRunStarted(description); Runtime.getRuntime().addShutdownHook(new Thread(() -> { running.set(Boolean.FALSE); })); new Thread(() -> { final RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean(); final AtomicLong u = new AtomicLong(runtimeBean.getUptime()); while (running.get()) { try { long upTime = runtimeBean.getUptime(); if (upTime >= u.get() + 10000) { u.set(upTime); System.out.printf("Test Up Time = %.3f (s)%n", upTime / 1000d); System.out.printf("Heap Usage = %.3f (MB)%n", ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed() / 1024d / 1024d); System.out.printf("None Heap Usage = %.3f (MB)%n", ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage().getUsed() / 1024d / 1024d); System.out.println("============================="); } Thread.currentThread().sleep(10000l); } catch (InterruptedException ex) { Logger.getLogger(RedissonTestRunListener.class.getName()).log(Level.SEVERE, null, ex); } } }).start(); }
public ThreadFactory build(ThreadFactoryBuilder builder) { final String nameFormat = builder.nameFormat; final Boolean daemon = builder.daemon; final Integer priority = builder.priority; final AtomicLong count = nameFormat != null ? new AtomicLong(0L) : null; return (runnable)->{ Thread thread = Executors.defaultThreadFactory().newThread(runnable); if (nameFormat != null) { thread.setName(ThreadFactoryBuilder.format(nameFormat, count.getAndIncrement())); } if (daemon != null) { thread.setDaemon(daemon.booleanValue()); } if (priority != null) { thread.setPriority(priority.intValue()); } return thread; }; }
/** * Delete all file for a table * * @param tableName */ public void deleteTable(String tableName) { if (!initialized) { return; } String[] filesTodelete = getAllFilesForTable(tableName); for (String file : filesTodelete) { deleteWALFile(file); } closeAllWALFilesForTable(tableName); /** remove the sequence-no for the table that is getting deleted **/ Matcher matcher = Pattern.compile("^" + tableName + "_[0-9]+$").matcher("_dummy_"); Iterator<Map.Entry<String, AtomicLong>> iterator = bucketSeqNoMap.entrySet().iterator(); Map.Entry<String, AtomicLong> entry; while (iterator.hasNext()) { entry = iterator.next(); if (matcher.reset(entry.getKey()).matches()) { iterator.remove(); } } }
/** * Create a circuit breaker that will break if the number of estimated * bytes grows above the limit. All estimations will be multiplied by * the given overheadConstant. Uses the given oldBreaker to initialize * the starting offset. * @param settings settings to configure this breaker * @param parent parent circuit breaker service to delegate tripped breakers to * @param name the name of the breaker * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset) */ public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker, ESLogger logger, HierarchyCircuitBreakerService parent, String name) { this.name = name; this.settings = settings; this.memoryBytesLimit = settings.getLimit(); this.overheadConstant = settings.getOverhead(); if (oldBreaker == null) { this.used = new AtomicLong(0); this.trippedCount = new AtomicLong(0); } else { this.used = oldBreaker.used; this.trippedCount = oldBreaker.trippedCount; } this.logger = logger; if (logger.isTraceEnabled()) { logger.trace("creating ChildCircuitBreaker with settings {}", this.settings); } this.parent = parent; }
@Test public void testProgress() throws Exception { Path tempDir = Files.createTempDirectory("jaffree"); Path outputPath = tempDir.resolve("test.mkv"); final AtomicLong counter = new AtomicLong(); ProgressListener listener = new ProgressListener() { @Override public void onProgress(FFmpegProgress progress) { counter.incrementAndGet(); } }; FFmpegResult result = FFmpeg.atPath(BIN) .addInput(UrlInput.fromPath(SMALL_FLV)) .addOutput(UrlOutput.toPath(outputPath)) .setProgressListener(listener) .execute(); Assert.assertNotNull(result); Assert.assertTrue(counter.get() > 0); }
@Test public void testNewLinearId() { AtomicLong id = new AtomicLong(0); assertThat(IdGenerator.newLinearId(id)).isEqualTo(1); assertThat(IdGenerator.newLinearId(id)).isEqualTo(2); id.set(IdGenerator.MAX - 1L); assertThat(IdGenerator.newLinearId(id)).isEqualTo(IdGenerator.MAX); assertThat(IdGenerator.newLinearId(id)).isEqualTo(1L); }
private String putMessageDistributeTimeToString() { final AtomicLong[] times = this.putMessageDistributeTime; if (null == times) return null; final StringBuilder sb = new StringBuilder(); for (int i = 0; i < times.length; i++) { long value = times[i].get(); sb.append(String.format("%s:%d", PUT_MESSAGE_ENTIRE_TIME_MAX_DESC[i], value)); sb.append(" "); } return sb.toString(); }
@Test public void testRandomLoadBalance_select() { int runs = 1000; Map<Invoker,AtomicLong> counter = getInvokeCounter(runs,RandomLoadBalance.NAME); for (Invoker minvoker :counter.keySet() ){ Long count = counter.get(minvoker).get(); // System.out.println(count); Assert.assertTrue("abs diff shoud < avg", Math.abs(count-runs/(0f+invokers.size())) <runs/(0f+invokers.size())); } }
/** * getOpaque returns the last value set */ public void testGetOpaqueSet() { AtomicLong ai = new AtomicLong(1); assertEquals(1, ai.getOpaque()); ai.set(2); assertEquals(2, ai.getOpaque()); ai.set(-3); assertEquals(-3, ai.getOpaque()); }
/** * Restores a RestorableMeter from the last seen 15m and 2h rates. * @param lastM15Rate the last-seen 15m rate, in terms of events per second * @param lastM120Rate the last seen 2h rate, in terms of events per second */ public RestorableMeter(double lastM15Rate, double lastM120Rate) { this.m15Rate = new RestorableEWMA(lastM15Rate, TimeUnit.MINUTES.toSeconds(15)); this.m120Rate = new RestorableEWMA(lastM120Rate, TimeUnit.MINUTES.toSeconds(120)); this.startTime = this.clock.getTick(); this.lastTick = new AtomicLong(startTime); }
@Test public void testMulti() throws IOException, KeeperException, InterruptedException { Op createTtl = Op.create("/a", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_WITH_TTL, 100); Op createTtlSequential = Op.create("/b", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL_WITH_TTL, 200); Op createNonTtl = Op.create("/c", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); List<OpResult> results = zk.multi(Arrays.asList(createTtl, createTtlSequential, createNonTtl)); String sequentialPath = ((OpResult.CreateResult)results.get(1)).getPath(); final AtomicLong fakeElapsed = new AtomicLong(0); ContainerManager containerManager = newContainerManager(fakeElapsed); containerManager.checkContainers(); Assert.assertNotNull("node should not have been deleted yet", zk.exists("/a", false)); Assert.assertNotNull("node should not have been deleted yet", zk.exists(sequentialPath, false)); Assert.assertNotNull("node should never be deleted", zk.exists("/c", false)); fakeElapsed.set(110); containerManager.checkContainers(); Assert.assertNull("node should have been deleted", zk.exists("/a", false)); Assert.assertNotNull("node should not have been deleted yet", zk.exists(sequentialPath, false)); Assert.assertNotNull("node should never be deleted", zk.exists("/c", false)); fakeElapsed.set(210); containerManager.checkContainers(); Assert.assertNull("node should have been deleted", zk.exists("/a", false)); Assert.assertNull("node should have been deleted", zk.exists(sequentialPath, false)); Assert.assertNotNull("node should never be deleted", zk.exists("/c", false)); }
private static ExecutorService createShardConsumersThreadPool(final String subtaskName) { return Executors.newCachedThreadPool(new ThreadFactory() { public Thread newThread(Runnable runnable) { final AtomicLong threadCount = new AtomicLong(0); Thread thread = new Thread(runnable); thread.setName("shardConsumers-" + subtaskName + "-thread-" + threadCount.getAndIncrement()); thread.setDaemon(true); return thread; } }); }
@Override public Runnable createJob(SimpleConPool conPool2, long myCount, int batch, long startId, AtomicLong finshiedCount2, AtomicLong failedCount2) { return new TravelRecordGlobalSeqInsertJob(conPool2, myCount, batch, startId, finshiedCount, failedCount); }
private void setMaxValue(AtomicLong field, long value) { for ( long oldMax; value > ( oldMax = field.get() ); ) { if ( field.compareAndSet( oldMax, value ) ) { return; } } }
@Test public void should_verify_an_actual_atomic_long_is_conform_to_an_expected_result() { assertThat(resultOf(() -> { gwtMock.whenAnEventHappensInRelationToAnActionOfTheConsumer(); return new AtomicLong(123); }).hasPositiveValue()).hasSameClassAs(assertThat(new AtomicLong())); }
private void runTestTook(boolean controlled) throws Exception { String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json"); // translate Windows line endings (\r\n) to standard ones (\n) if (Constants.WINDOWS) { bulkAction = Strings.replace(bulkAction, "\r\n", "\n"); } BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON); AtomicLong expected = new AtomicLong(); TransportBulkAction action = createAction(controlled, expected); action.doExecute(null, bulkRequest, new ActionListener<BulkResponse>() { @Override public void onResponse(BulkResponse bulkItemResponses) { if (controlled) { assertThat( bulkItemResponses.getTook().getMillis(), equalTo(TimeUnit.MILLISECONDS.convert(expected.get(), TimeUnit.NANOSECONDS))); } else { assertThat( bulkItemResponses.getTook().getMillis(), greaterThanOrEqualTo(TimeUnit.MILLISECONDS.convert(expected.get(), TimeUnit.NANOSECONDS))); } } @Override public void onFailure(Exception e) { } }); }
public Service(Collection<String> groups, int poolSize) { this.groups = groups; pendingRequests = new PendingRequests(groups, rwDMPersistenceSupraLock); threadPool = Executors.newFixedThreadPool(poolSize, new ThreadFactory() { private final AtomicLong counter = new AtomicLong(); @Override public Thread newThread(Runnable r) { return new Thread(r, "TPWorker" + counter.incrementAndGet()); } }); messageProcessor = new AbstractProcessor<MessageDataModel>() { @Override protected void doProcess(MessageDataModel element) { pendingRequests.run(() -> processRequest(element), element.getGroup()); } }; recoveryMessageProcessor = new AbstractProcessor<RecoveryMessage>() { @Override protected void doProcess(RecoveryMessage element) { if (pendingRequests.requestExists(element.getGroup(), element.getId())) { Service.this.process(new MessageDataModel(element.data)); } } }; serviceStarted = new CountDownLatch(1); LOGGER.info("Created " + getClass().getSimpleName()); }
@EntryPoint public Pcg64Random(final byte[] seed) { super(seed); if (seed.length != Long.BYTES) { throw new IllegalArgumentException("Pcg64Random requires an 8-byte seed"); } internal = new AtomicLong(BinaryUtils.convertBytesToLong(seed)); }
public void reset() { startTimeMs = System.currentTimeMillis(); numProcessed = new AtomicLong(0); lastPrintTimeMs = new AtomicLong(0); measureDeque.clear(); measureDeque.add(new MeasurePoint(startTimeMs, 0)); }
public ByteBufferArena(int chunkSize, int pageSize, int chunkCount, int conReadBuferChunk) { try { this.chunkSize = chunkSize; this.pageSize = pageSize; this.chunkCount.set(chunkCount); this.conReadBuferChunk = conReadBuferChunk; q = new ByteBufferChunkList[6]; q[5] = new ByteBufferChunkList(100, Integer.MAX_VALUE, chunkSize, pageSize, 0); q[4] = new ByteBufferChunkList(75, 100, chunkSize, pageSize, 0); q[3] = new ByteBufferChunkList(50, 100, chunkSize, pageSize, 0); q[2] = new ByteBufferChunkList(25, 75, chunkSize, pageSize, 0); q[1] = new ByteBufferChunkList(1, 50, chunkSize, pageSize, 0); q[0] = new ByteBufferChunkList(Integer.MIN_VALUE, 25, chunkSize, pageSize, chunkCount); q[0].nextList = q[1]; q[1].nextList = q[2]; q[2].nextList = q[3]; q[3].nextList = q[4]; q[4].nextList = q[5]; q[5].nextList = null; q[5].prevList = q[4]; q[4].prevList = q[3]; q[3].prevList = q[2]; q[2].prevList = q[1]; q[1].prevList = q[0]; q[0].prevList = null; capacity = new AtomicLong(6 * chunkCount * chunkSize); size = new AtomicLong(6 * chunkCount * chunkSize); sharedOptsCount = new ConcurrentHashMap<>(); memoryUsage = new ConcurrentHashMap<>(); } finally { } }
private CacheManager(File cacheDir, long sizeLimit, int countLimit) { this.cacheDir = cacheDir; this.sizeLimit = sizeLimit; this.countLimit = countLimit; cacheSize = new AtomicLong(); cacheCount = new AtomicInteger(); calculateCacheSizeAndCacheCount(); }
RegionServerTask(Connection connection, String serverName, HRegionInfo region, ExtendedSink sink, AtomicLong successes) { this.connection = connection; this.serverName = serverName; this.region = region; this.sink = sink; this.successes = successes; }
public RegionReplicaReplayCallable(ClusterConnection connection, RpcControllerFactory rpcControllerFactory, TableName tableName, HRegionLocation location, HRegionInfo regionInfo, byte[] row,List<Entry> entries, AtomicLong skippedEntries) { super(connection, rpcControllerFactory, location, tableName, row, regionInfo.getReplicaId()); this.entries = entries; this.skippedEntries = skippedEntries; this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes(); }